content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
context("sf_polygon")
test_that("various objects converted to sf_polygon",{
m <- matrix(1:4, ncol = 2)
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,3,4,5), ncol = 2)
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c(0L,1L), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,3,4,5), ncol = 2)
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1L,1L,2L,2L))
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1L,1L,2L,2L) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,2:8), ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), "V3", NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1L,1L,2L,2L) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), "V3", "V3", xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), "V3", "V3", xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,2:8), ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), "V3", NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), c("V3"), NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), c("V3"), NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1L))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c(0L,1L), NULL, 2L, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, "V3", xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), NULL, "V3", xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
})
test_that("polygons are closed and list-column properties follow the same structure as geometires, and first row is repeated where closed",{
df <- data.frame(
multi_id = c(1,1,1,1, 1,1,1,1,1, 1,1,1,1)
, poly_id = c(1,1,1,1, 1,1,1,1,1, 2,2,2,2)
, line_id = c(1,1,1,1, 2,2,2,2,2, 1,1,1,1)
, x = c(0,0,1,1, 2,2,3,3,2, 10,10,12,12)
, y = c(0,1,1,0, 2,3,3,2,2, 10,12,12,10)
, z = c(1,2,2,2, 2,3,3,3,2, 3,2,2,3)
, prop = letters[1:13]
)
## In this test:
## - polygon 1 has two rings, the first needs closing, the 2nd is OK.
## - polygon 2 has one ring, it needs closing
sf_poly <- sfheaders::sf_polygon(
obj = df
, x = "x"
, y = "y"
, z = "z"
, polygon_id = "poly_id"
, linestring_id = "line_id"
, list_columns = "prop"
, keep = T
, close = TRUE
)
expect_true( attr(sf_poly[1, ]$geometry[[1]][[1]], "closed" ) == "has_been_closed" )
expect_true( is.null( attr(sf_poly[1, ]$geometry[[1]][[2]], "closed" ) ) )
expect_true( attr(sf_poly[2, ]$geometry[[1]][[1]], "closed" ) == "has_been_closed" )
expect_equal( sf_poly[1, ]$prop[[1]][[1]], c( letters[1:4], letters[1] ) )
expect_equal( sf_poly[1, ]$prop[[1]][[2]], letters[5:9] )
expect_equal( sf_poly[2, ]$prop[[1]][[1]], c( letters[10:13], letters[10] ) )
## In this test we're not closing the polygon
sf_poly <- sfheaders::sf_polygon(
obj = df
, x = "x"
, y = "y"
, z = "z"
, polygon_id = "poly_id"
, linestring_id = "line_id"
, list_columns = "prop"
, keep = T
, close = FALSE
)
expect_true( is.null( attr(sf_poly[1, ]$geometry[[1]][[1]], "closed" ) ) )
expect_true( is.null( attr(sf_poly[1, ]$geometry[[1]][[2]], "closed" ) ) )
expect_true( is.null( attr(sf_poly[2, ]$geometry[[1]][[1]], "closed" ) ) )
expect_equal( sf_poly[1, ]$prop[[1]][[1]], letters[1:4] )
expect_equal( sf_poly[1, ]$prop[[1]][[2]], letters[5:9] )
expect_equal( sf_poly[2, ]$prop[[1]][[1]], letters[10:13] )
## In this test the rings of each polygon are the same as for sf_poly
## but they are all contained inside a MULTIPOLYGON
sf_multi <- sfheaders::sf_multipolygon(
obj = df
, x = "x"
, y = "y"
, z = "z"
, multipolygon_id = "multi_id"
, polygon_id = "poly_id"
, linestring_id = "line_id"
, list_columns = "prop"
, keep = T
, close = TRUE
)
expect_true( attr(sf_multi[1, ]$geometry[[1]][[1]][[1]], "closed" ) == "has_been_closed" )
expect_true( is.null( attr(sf_multi[1, ]$geometry[[1]][[1]][[2]], "closed" ) ) )
expect_true( attr(sf_multi[1, ]$geometry[[1]][[2]][[1]], "closed" ) == "has_been_closed" )
expect_equal( sf_multi[1, ]$prop[[1]][[1]][[1]], c( letters[1:4], letters[1] ) )
expect_equal( sf_multi[1, ]$prop[[1]][[1]][[2]], letters[5:9] )
expect_equal( sf_multi[1, ]$prop[[1]][[2]][[1]], c( letters[10:13], letters[10] ) )
})
|
/tests/testthat/test-sf_polygon.R
|
permissive
|
dcooley/sfheaders
|
R
| false | false | 9,005 |
r
|
context("sf_polygon")
test_that("various objects converted to sf_polygon",{
m <- matrix(1:4, ncol = 2)
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,3,4,5), ncol = 2)
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c(0L,1L), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,3,4,5), ncol = 2)
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:4, ncol = 2)
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1L,1L,2L,2L))
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
res <- sfheaders:::rcpp_sf_polygon(m, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1L,1L,2L,2L) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,2:8), ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), "V3", NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1L,1L,2L,2L) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), "V3", "V3", xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2) )
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), "V3", "V3", xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(c(1.2,2:8), ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), "V3", NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:8, ncol = 2)
m <- cbind(m, c(1,1,2,2))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), c("V3"), NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), c("V3"), NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c(0L,1L), 2L, NULL, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1L))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c(0L,1L), NULL, 2L, xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
res <- sfheaders:::rcpp_sf_polygon(df, c("V1","V2"), NULL, "V3", xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
m <- matrix(1:2, ncol = 2)
m <- cbind(m, c(1))
df <- as.data.frame( m )
m <- as.matrix( df )
res <- sfheaders:::rcpp_sf_polygon(m, c("V1","V2"), NULL, "V3", xyzm = "", keep = TRUE, close = FALSE )
expect_true( all( attr(res, "class") == c("sf", "data.frame") ) )
})
test_that("polygons are closed and list-column properties follow the same structure as geometires, and first row is repeated where closed",{
df <- data.frame(
multi_id = c(1,1,1,1, 1,1,1,1,1, 1,1,1,1)
, poly_id = c(1,1,1,1, 1,1,1,1,1, 2,2,2,2)
, line_id = c(1,1,1,1, 2,2,2,2,2, 1,1,1,1)
, x = c(0,0,1,1, 2,2,3,3,2, 10,10,12,12)
, y = c(0,1,1,0, 2,3,3,2,2, 10,12,12,10)
, z = c(1,2,2,2, 2,3,3,3,2, 3,2,2,3)
, prop = letters[1:13]
)
## In this test:
## - polygon 1 has two rings, the first needs closing, the 2nd is OK.
## - polygon 2 has one ring, it needs closing
sf_poly <- sfheaders::sf_polygon(
obj = df
, x = "x"
, y = "y"
, z = "z"
, polygon_id = "poly_id"
, linestring_id = "line_id"
, list_columns = "prop"
, keep = T
, close = TRUE
)
expect_true( attr(sf_poly[1, ]$geometry[[1]][[1]], "closed" ) == "has_been_closed" )
expect_true( is.null( attr(sf_poly[1, ]$geometry[[1]][[2]], "closed" ) ) )
expect_true( attr(sf_poly[2, ]$geometry[[1]][[1]], "closed" ) == "has_been_closed" )
expect_equal( sf_poly[1, ]$prop[[1]][[1]], c( letters[1:4], letters[1] ) )
expect_equal( sf_poly[1, ]$prop[[1]][[2]], letters[5:9] )
expect_equal( sf_poly[2, ]$prop[[1]][[1]], c( letters[10:13], letters[10] ) )
## In this test we're not closing the polygon
sf_poly <- sfheaders::sf_polygon(
obj = df
, x = "x"
, y = "y"
, z = "z"
, polygon_id = "poly_id"
, linestring_id = "line_id"
, list_columns = "prop"
, keep = T
, close = FALSE
)
expect_true( is.null( attr(sf_poly[1, ]$geometry[[1]][[1]], "closed" ) ) )
expect_true( is.null( attr(sf_poly[1, ]$geometry[[1]][[2]], "closed" ) ) )
expect_true( is.null( attr(sf_poly[2, ]$geometry[[1]][[1]], "closed" ) ) )
expect_equal( sf_poly[1, ]$prop[[1]][[1]], letters[1:4] )
expect_equal( sf_poly[1, ]$prop[[1]][[2]], letters[5:9] )
expect_equal( sf_poly[2, ]$prop[[1]][[1]], letters[10:13] )
## In this test the rings of each polygon are the same as for sf_poly
## but they are all contained inside a MULTIPOLYGON
sf_multi <- sfheaders::sf_multipolygon(
obj = df
, x = "x"
, y = "y"
, z = "z"
, multipolygon_id = "multi_id"
, polygon_id = "poly_id"
, linestring_id = "line_id"
, list_columns = "prop"
, keep = T
, close = TRUE
)
expect_true( attr(sf_multi[1, ]$geometry[[1]][[1]][[1]], "closed" ) == "has_been_closed" )
expect_true( is.null( attr(sf_multi[1, ]$geometry[[1]][[1]][[2]], "closed" ) ) )
expect_true( attr(sf_multi[1, ]$geometry[[1]][[2]][[1]], "closed" ) == "has_been_closed" )
expect_equal( sf_multi[1, ]$prop[[1]][[1]][[1]], c( letters[1:4], letters[1] ) )
expect_equal( sf_multi[1, ]$prop[[1]][[1]][[2]], letters[5:9] )
expect_equal( sf_multi[1, ]$prop[[1]][[2]][[1]], c( letters[10:13], letters[10] ) )
})
|
rm(list=ls())
cat("\014")
library(ALL)
data(ALL)
cat("\014")
ist = grep("T", as.character(ALL$BT))
length(ist)
T = ALL[, ist]
cat("\014")
samplesCR = c('01003',
'01007',
'04018',
'09002',
'10005',
'11002',
'15006',
'16002',
'16007',
'19002',
'19014',
'19017',
'20005',
'24006',
'26009',
'28008',
'28009',
'37001',
'43015',
'44001',
'49004',
'56007',
'65003',
'83001')
CR = T[,samplesCR]
CR
pdata = pData(CR)
summary(pdata)
cat("\014")
T22 = CR[,-c(2,4)]
pdata = pData(T22)
summary(pdata)
dfdata = as.data.frame(pData(T22))
cat("\014")
setClass = lapply(T22$f.u, function(t) (if ( ("REL" %in% t) ) { return(1) } else { return(0) } ))
dfClass = as.data.frame(setClass)
names(dfClass)=sampleNames(T22)
table(unlist(dfClass))
dfdata['class'] = t(dfClass)
cat("\014")
nGene = 12625L
genes <- featureNames(T22)
head(genes)
matT22exprs = t(exprs(T22)[,])
library(iterativeBMA)
genesel <- BssWssFast(matT22exprs, t(dfClass), 2)
geneidx = genesel$ix[1:nGene]
genes <- featureNames(T22)[geneidx]
head(genes)
generatio = genesel$x[1:nGene]
head(generatio)
tail(generatio)
round(generatio[1:25],2)
cat("\014")
library(hgu95av2.db)
?hgu95av2ENTREZID
D = T22[genes,]
symbolsD = select(hgu95av2.db,
featureNames(D),
c("SYMBOL","ENTREZID", "GENENAME"))
cat("\014")
genesize = 25
D25 = D[c(1:genesize),]
featureNames(D25)
symbolsD25 = select(hgu95av2.db,
featureNames(D25),
c("SYMBOL","ENTREZID", "GENENAME"))
cat("\014")
genesize = 3
D3 = D25[c(1,10,19),]
featureNames(D3)
symbolsD3 = select(hgu95av2.db,
featureNames(D3),
c("SYMBOL","ENTREZID", "GENENAME"))
expdata = cbind(as.data.frame(t(round(exprs(D3),digits=2))),as.numeric(dfClass))
colnames(expdata)[genesize+1] = "class"
cat("\014")
t21pdata <- pData(T22)[-c(12),] # 24006
t21pdata
library(lubridate)
datelastseen = mdy(t21pdata$`date last seen`)
datecr = mdy(t21pdata$date.cr)
datediff = datelastseen - datecr
datediff
imputedate = mean(datediff)
imputedate
imputecr = mdy(pData(T22)[12,21])-imputedate
imputecr
months(imputecr)
pData(T22)[12,8] =
as.character.Date(imputecr, "%m/%d/%Y")
pData(T22)[12,]
datelastseen =
mdy(pData(T22)$`date last seen`)
datecr = mdy(pData(T22)$date.cr)
datediff = as.numeric(datelastseen - datecr)
datediff
dfdata['fu_time'] = datediff
cat("\014")
idx=19
gene19 = round(exprs(D25)[idx,],2)
dfdata['gene19'] = gene19
View(dfdata)
fu_time <- dfdata[,"fu_time"] # continuous variable (numeric)
relapse <- dfdata[,"class"] # binary variable (numeric)
PFKFB3 <- unlist(dfdata[,"gene19"])
medPFKFB3 = round(median(PFKFB3),2)
overPFKFB3 <- ifelse(gene19>=medPFKFB3,1,0) # dichotomise pfkfb3
overPFKFB3
table(medPFKFB3, exclude = NULL) # inspect the numbers
table(PFKFB3, medPFKFB3, exclude = NULL) # check
survdiff(Surv(fu_time, relapse) ~ PFKFB3, rho=0)
km_fit <- survfit(Surv(fu_time, relapse) ~ overPFKFB3, data = dfdata)
library(survminer)
ggsurvplot(
fit = km_fit,
legend.title = "PFKFB3 over-expression",
palette = c("blue", "red"),
pval = TRUE,
xlab = "Time (Days)",
ylab = "CCR probability")
|
/pfkfb3.r
|
no_license
|
vuslan/sage
|
R
| false | false | 3,439 |
r
|
rm(list=ls())
cat("\014")
library(ALL)
data(ALL)
cat("\014")
ist = grep("T", as.character(ALL$BT))
length(ist)
T = ALL[, ist]
cat("\014")
samplesCR = c('01003',
'01007',
'04018',
'09002',
'10005',
'11002',
'15006',
'16002',
'16007',
'19002',
'19014',
'19017',
'20005',
'24006',
'26009',
'28008',
'28009',
'37001',
'43015',
'44001',
'49004',
'56007',
'65003',
'83001')
CR = T[,samplesCR]
CR
pdata = pData(CR)
summary(pdata)
cat("\014")
T22 = CR[,-c(2,4)]
pdata = pData(T22)
summary(pdata)
dfdata = as.data.frame(pData(T22))
cat("\014")
setClass = lapply(T22$f.u, function(t) (if ( ("REL" %in% t) ) { return(1) } else { return(0) } ))
dfClass = as.data.frame(setClass)
names(dfClass)=sampleNames(T22)
table(unlist(dfClass))
dfdata['class'] = t(dfClass)
cat("\014")
nGene = 12625L
genes <- featureNames(T22)
head(genes)
matT22exprs = t(exprs(T22)[,])
library(iterativeBMA)
genesel <- BssWssFast(matT22exprs, t(dfClass), 2)
geneidx = genesel$ix[1:nGene]
genes <- featureNames(T22)[geneidx]
head(genes)
generatio = genesel$x[1:nGene]
head(generatio)
tail(generatio)
round(generatio[1:25],2)
cat("\014")
library(hgu95av2.db)
?hgu95av2ENTREZID
D = T22[genes,]
symbolsD = select(hgu95av2.db,
featureNames(D),
c("SYMBOL","ENTREZID", "GENENAME"))
cat("\014")
genesize = 25
D25 = D[c(1:genesize),]
featureNames(D25)
symbolsD25 = select(hgu95av2.db,
featureNames(D25),
c("SYMBOL","ENTREZID", "GENENAME"))
cat("\014")
genesize = 3
D3 = D25[c(1,10,19),]
featureNames(D3)
symbolsD3 = select(hgu95av2.db,
featureNames(D3),
c("SYMBOL","ENTREZID", "GENENAME"))
expdata = cbind(as.data.frame(t(round(exprs(D3),digits=2))),as.numeric(dfClass))
colnames(expdata)[genesize+1] = "class"
cat("\014")
t21pdata <- pData(T22)[-c(12),] # 24006
t21pdata
library(lubridate)
datelastseen = mdy(t21pdata$`date last seen`)
datecr = mdy(t21pdata$date.cr)
datediff = datelastseen - datecr
datediff
imputedate = mean(datediff)
imputedate
imputecr = mdy(pData(T22)[12,21])-imputedate
imputecr
months(imputecr)
pData(T22)[12,8] =
as.character.Date(imputecr, "%m/%d/%Y")
pData(T22)[12,]
datelastseen =
mdy(pData(T22)$`date last seen`)
datecr = mdy(pData(T22)$date.cr)
datediff = as.numeric(datelastseen - datecr)
datediff
dfdata['fu_time'] = datediff
cat("\014")
idx=19
gene19 = round(exprs(D25)[idx,],2)
dfdata['gene19'] = gene19
View(dfdata)
fu_time <- dfdata[,"fu_time"] # continuous variable (numeric)
relapse <- dfdata[,"class"] # binary variable (numeric)
PFKFB3 <- unlist(dfdata[,"gene19"])
medPFKFB3 = round(median(PFKFB3),2)
overPFKFB3 <- ifelse(gene19>=medPFKFB3,1,0) # dichotomise pfkfb3
overPFKFB3
table(medPFKFB3, exclude = NULL) # inspect the numbers
table(PFKFB3, medPFKFB3, exclude = NULL) # check
survdiff(Surv(fu_time, relapse) ~ PFKFB3, rho=0)
km_fit <- survfit(Surv(fu_time, relapse) ~ overPFKFB3, data = dfdata)
library(survminer)
ggsurvplot(
fit = km_fit,
legend.title = "PFKFB3 over-expression",
palette = c("blue", "red"),
pval = TRUE,
xlab = "Time (Days)",
ylab = "CCR probability")
|
library(soil.spec)
library(readr)
#Read OFRA 2 MIR data
#set working directory
setwd("~/Models/aFSIS/data")
#set name of new spectra
new <- "VS"
afs<-read_csv("~/Dropbox/AfSIS_reporting_data/Seperated_datasets/Calibration_Htsxt_MIR.csv")
new<-read_csv("~/Dropbox/AfSIS_MASTER_FILES/VS/VS_combined/data/VS_MIR_spectra.csv")
#rename new colnames
#new spectra starts which column?
hd.0<-as.vector(colnames(new))
k0<-menu(hd.0,graphics=TRUE,title="Select where column names with spectra begins")
wave.0 <- paste0("m",round(as.numeric(substr(colnames(new[,2:length(hd.0)]),2,19)),1))
colnames(new) <-c("SSN",wave.0)
#Getting matching names in the two tables and rbind
p.0<-which(colnames(new)%in%colnames(afs))
afsn.0<-rbind(afs,new[,p.0])
#Store the file
write.table(afsn.0,file="afsis plus new spectra.csv",sep=",",row.names=FALSE)
z<-ncol(afsn)
afsn<-afsn.0[,-c(1,z)]
colnames(afsn)<-as.numeric(substr(colnames(afsn),2,16))
#Obtain derivatives
afsn<-as.matrix(afsn)
de.s<-trans(afsn,tr="derivative",order=1,gap=21)
der.s<-as.matrix(de.s$trans)
#########Run PCA#################################
pc<-prcomp(der.s)
imp<-summary(pc)$importance
pcs<-pc$x[,1:10]
pcs.ssn<-cbind(as.vector(afsn[,1]),pcs)
colnames(pcs.ssn)<-c("SSN",colnames(pcs))
ypc<-as.data.frame(pcs.ssn)
write.table(ypc,file="afsis pc scores plus new spectra.csv",sep=",",row.names=FALSE)
ypc<-read.csv("afsis pc scores plus new spectra.csv")
png(file=paste0("~/Dropbox/AfSIS_MASTER_FILES/VS/VS_combined/figs/AfSIS and", new,".png"),width=600,height=600)
par(mfrow=c(1,1))
plot(ypc[,2:3],col="blue",pch="",main="PCA scores for afsis and new soil spectra",cex.lab=1.2,xlab=paste0("PC1 explains ", round(imp[2,1],3)*100, " % total variation"),ylab=paste0("PC2 explains ", round(imp[2,2],3)*100, " % total variation"))
k<-nrow(afs)
points(ypc[1:k,2:3],col="blue",pch=19)
points(ypc[-c(1:k),2:3],col="red",pch=19)
legend("bottomright",pch=19,col=c("blue","red"),c("AfSIS",new),bty="n",text.col=c("blue","red"))
dev.off()
|
/afsis PCA models vs new spectra.R
|
no_license
|
asila/spectral_data_code
|
R
| false | false | 1,986 |
r
|
library(soil.spec)
library(readr)
#Read OFRA 2 MIR data
#set working directory
setwd("~/Models/aFSIS/data")
#set name of new spectra
new <- "VS"
afs<-read_csv("~/Dropbox/AfSIS_reporting_data/Seperated_datasets/Calibration_Htsxt_MIR.csv")
new<-read_csv("~/Dropbox/AfSIS_MASTER_FILES/VS/VS_combined/data/VS_MIR_spectra.csv")
#rename new colnames
#new spectra starts which column?
hd.0<-as.vector(colnames(new))
k0<-menu(hd.0,graphics=TRUE,title="Select where column names with spectra begins")
wave.0 <- paste0("m",round(as.numeric(substr(colnames(new[,2:length(hd.0)]),2,19)),1))
colnames(new) <-c("SSN",wave.0)
#Getting matching names in the two tables and rbind
p.0<-which(colnames(new)%in%colnames(afs))
afsn.0<-rbind(afs,new[,p.0])
#Store the file
write.table(afsn.0,file="afsis plus new spectra.csv",sep=",",row.names=FALSE)
z<-ncol(afsn)
afsn<-afsn.0[,-c(1,z)]
colnames(afsn)<-as.numeric(substr(colnames(afsn),2,16))
#Obtain derivatives
afsn<-as.matrix(afsn)
de.s<-trans(afsn,tr="derivative",order=1,gap=21)
der.s<-as.matrix(de.s$trans)
#########Run PCA#################################
pc<-prcomp(der.s)
imp<-summary(pc)$importance
pcs<-pc$x[,1:10]
pcs.ssn<-cbind(as.vector(afsn[,1]),pcs)
colnames(pcs.ssn)<-c("SSN",colnames(pcs))
ypc<-as.data.frame(pcs.ssn)
write.table(ypc,file="afsis pc scores plus new spectra.csv",sep=",",row.names=FALSE)
ypc<-read.csv("afsis pc scores plus new spectra.csv")
png(file=paste0("~/Dropbox/AfSIS_MASTER_FILES/VS/VS_combined/figs/AfSIS and", new,".png"),width=600,height=600)
par(mfrow=c(1,1))
plot(ypc[,2:3],col="blue",pch="",main="PCA scores for afsis and new soil spectra",cex.lab=1.2,xlab=paste0("PC1 explains ", round(imp[2,1],3)*100, " % total variation"),ylab=paste0("PC2 explains ", round(imp[2,2],3)*100, " % total variation"))
k<-nrow(afs)
points(ypc[1:k,2:3],col="blue",pch=19)
points(ypc[-c(1:k),2:3],col="red",pch=19)
legend("bottomright",pch=19,col=c("blue","red"),c("AfSIS",new),bty="n",text.col=c("blue","red"))
dev.off()
|
plot4 <- function () {
par(mfrow=c(2,2))
plot(data$time,data$Global_active_power, type="l", ylab="Global Active Power", xlab="")
plot(data$time,data$Voltage, type="l",ylab="Voltage", xlab="datetime")
plot(data$time,data$Sub_metering_1, type="l",ylab="Energy sub metering", xlab="")
lines(data$time,data$Sub_metering_2,col="red")
lines(data$time,data$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1),cex = 0.75)
plot(data$time,data$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
}
|
/plot4.R
|
no_license
|
HGCobos/ExData_Plotting1
|
R
| false | false | 727 |
r
|
plot4 <- function () {
par(mfrow=c(2,2))
plot(data$time,data$Global_active_power, type="l", ylab="Global Active Power", xlab="")
plot(data$time,data$Voltage, type="l",ylab="Voltage", xlab="datetime")
plot(data$time,data$Sub_metering_1, type="l",ylab="Energy sub metering", xlab="")
lines(data$time,data$Sub_metering_2,col="red")
lines(data$time,data$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1),cex = 0.75)
plot(data$time,data$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
}
|
library(shiny)
library(chron)
library(ggplot2)
precio <- read.csv("Precio.csv", header = T)
precio$Fecha <- as.Date(precio$Fecha, "%d/%m/%Y")
precio <- precio[order(precio[, 3], precio[, 2]), ]
precio$Variacion <- c(0,diff(precio$Precio))
attach(precio)
shinyServer(function(input, output) {
PrecioF <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == input$Region, ])
PrecioG <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2], ])
Ant <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[1], c(1,4)])
Bog <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[2], 4])
Caribe <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[3], 4])
Eje <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[4], 4])
Valle <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[5], 4])
output$General <- renderTable(data.frame(PrecioF() [,c(1, 4, 5)]))
output$GrafxReg <- renderPlot({plot(PrecioF()[,2], PrecioF()[,4], pch = 16,
main = paste("Precio Semanal", input$Region),
xlab = "Fecha", col = "darkblue",
ylab = "Precio Kg en Pie ($)", lwd = 2,
panel.first = grid(), type = "b")})
output$Var <- renderPlot({barplot(PrecioF()[,5],
main = "Variación del Precio Respecto a la Semana Anterior",
xlab = "Fecha", ylab = "Variación Precio ($)",
col = ifelse(PrecioF()[,5] < 0,
"darkred", "darkgreen"))
#output$Variacion <- renderTable({data.frame(Semana = PrecioF()[-1,1],
# VariaciÃÂón = diff(PrecioF()[,4]))})
output$Resumen <- renderTable(data.frame(Ant(), Bog(), Caribe(), Eje(), Valle())
)
output$GrafREs <- renderPlot(qplot(Fecha, Precio, data=PrecioG(), shape = Region,
color=Region, geom = c("point", "line"),
main = "Precio Semanal", xlab = "Fecha",
ylab = "Precio Kg en Pie") +
theme(legend.position="bottom"))
output$GrafVar <- renderPlot(qplot(Fecha, Variacion, data=PrecioG(), shape = Region,
color=Region, geom = c("point", "line"),
main = "Variación del Precio Semanal",
xlab = "Fecha",
ylab = "Variación Precio Kg en Pie") +
theme(legend.position="bottom"))
})
})
|
/server.R
|
no_license
|
jclopez59/Precio-Cerdo
|
R
| false | false | 3,307 |
r
|
library(shiny)
library(chron)
library(ggplot2)
precio <- read.csv("Precio.csv", header = T)
precio$Fecha <- as.Date(precio$Fecha, "%d/%m/%Y")
precio <- precio[order(precio[, 3], precio[, 2]), ]
precio$Variacion <- c(0,diff(precio$Precio))
attach(precio)
shinyServer(function(input, output) {
PrecioF <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == input$Region, ])
PrecioG <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2], ])
Ant <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[1], c(1,4)])
Bog <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[2], 4])
Caribe <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[3], 4])
Eje <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[4], 4])
Valle <- reactive(precio [Fecha >= input$FFecha[1] & Fecha <= input$FFecha[2] &
Region == levels(Region)[5], 4])
output$General <- renderTable(data.frame(PrecioF() [,c(1, 4, 5)]))
output$GrafxReg <- renderPlot({plot(PrecioF()[,2], PrecioF()[,4], pch = 16,
main = paste("Precio Semanal", input$Region),
xlab = "Fecha", col = "darkblue",
ylab = "Precio Kg en Pie ($)", lwd = 2,
panel.first = grid(), type = "b")})
output$Var <- renderPlot({barplot(PrecioF()[,5],
main = "Variación del Precio Respecto a la Semana Anterior",
xlab = "Fecha", ylab = "Variación Precio ($)",
col = ifelse(PrecioF()[,5] < 0,
"darkred", "darkgreen"))
#output$Variacion <- renderTable({data.frame(Semana = PrecioF()[-1,1],
# VariaciÃÂón = diff(PrecioF()[,4]))})
output$Resumen <- renderTable(data.frame(Ant(), Bog(), Caribe(), Eje(), Valle())
)
output$GrafREs <- renderPlot(qplot(Fecha, Precio, data=PrecioG(), shape = Region,
color=Region, geom = c("point", "line"),
main = "Precio Semanal", xlab = "Fecha",
ylab = "Precio Kg en Pie") +
theme(legend.position="bottom"))
output$GrafVar <- renderPlot(qplot(Fecha, Variacion, data=PrecioG(), shape = Region,
color=Region, geom = c("point", "line"),
main = "Variación del Precio Semanal",
xlab = "Fecha",
ylab = "Variación Precio Kg en Pie") +
theme(legend.position="bottom"))
})
})
|
# This code gets us a WDI dataset using the `WDI` package. There are two main
# outputs here:
#
# - `wdi_data`, which can take a while to load, so I suggest saving it through
# the `saveRDS(wdi_data, "wdi_data.rds")` function.
#
# - `wdi_varlist` provides the whole set of variables which I thought were most
# relevant. This one is important because otherwise there's no easy way of
# knowing what, for example, `BN.KLT.DINV.DRS.GDP.ZS` means.
library(tidyverse)
library(WDI)
library(countrycode)
## First step: find relevant variable codes and store them as a string.
#WDIsearch("...TAX", field = "indicator") %>% View()
code_regex <- c("DINV", "FRM.COR", "^NY.GDP", "^RL", "^GV", "...TAX")
wdi_varlist <- map(code_regex, WDIsearch, field = "indicator")
indicator_names <- map(wdi_varlist, ~.[ , 1]) %>% unlist() ## many indicators
## Second step: load data (this might take a while). Also, only 87 indicators survive the process.
wdi_data <- WDI(country = "all", indicator = indicator_names,
start = 2000, end = 2017, extra = FALSE) %>% as_data_frame()
## Third step: get country and continent names
wdi_data$country <- countrycode(wdi_data$iso2c, origin = "iso2c",
destination = "country.name")
wdi_data$continent <- countrycode(wdi_data$country, origin = "country.name",
destination = "continent")
## Fourth step: Clean up and organize
wdi_data <- wdi_data %>%
filter(!is.na(country)) %>%
select(country, continent, year, everything())
## Fith step: make wdi_varlist into useful format
wdi_varlist <- data_frame(regex = code_regex, info = wdi_varlist) %>%
mutate(info = map(info, as_data_frame)) %>%
unnest()
## Fith step: save data
readr::write_rds(wdi_data, "wdi_data.RDS")
readr::write_rds(wdi_varlist, "wdi_varlist.RDS")
|
/wdi_data/wdi.R
|
no_license
|
acastroaraujo/TaxHavens
|
R
| false | false | 1,745 |
r
|
# This code gets us a WDI dataset using the `WDI` package. There are two main
# outputs here:
#
# - `wdi_data`, which can take a while to load, so I suggest saving it through
# the `saveRDS(wdi_data, "wdi_data.rds")` function.
#
# - `wdi_varlist` provides the whole set of variables which I thought were most
# relevant. This one is important because otherwise there's no easy way of
# knowing what, for example, `BN.KLT.DINV.DRS.GDP.ZS` means.
library(tidyverse)
library(WDI)
library(countrycode)
## First step: find relevant variable codes and store them as a string.
#WDIsearch("...TAX", field = "indicator") %>% View()
code_regex <- c("DINV", "FRM.COR", "^NY.GDP", "^RL", "^GV", "...TAX")
wdi_varlist <- map(code_regex, WDIsearch, field = "indicator")
indicator_names <- map(wdi_varlist, ~.[ , 1]) %>% unlist() ## many indicators
## Second step: load data (this might take a while). Also, only 87 indicators survive the process.
wdi_data <- WDI(country = "all", indicator = indicator_names,
start = 2000, end = 2017, extra = FALSE) %>% as_data_frame()
## Third step: get country and continent names
wdi_data$country <- countrycode(wdi_data$iso2c, origin = "iso2c",
destination = "country.name")
wdi_data$continent <- countrycode(wdi_data$country, origin = "country.name",
destination = "continent")
## Fourth step: Clean up and organize
wdi_data <- wdi_data %>%
filter(!is.na(country)) %>%
select(country, continent, year, everything())
## Fith step: make wdi_varlist into useful format
wdi_varlist <- data_frame(regex = code_regex, info = wdi_varlist) %>%
mutate(info = map(info, as_data_frame)) %>%
unnest()
## Fith step: save data
readr::write_rds(wdi_data, "wdi_data.RDS")
readr::write_rds(wdi_varlist, "wdi_varlist.RDS")
|
#' Plot single-sample network
#'
#' This function is able to plot sepecified amount of edges in a specified sample network.
#' This function should be used following the \code{\link{runLioness}}.
#' As LIONESS estimates the single-sample network of an aggregate network generated by PANDA (more )
#'
#' @param col Numerical Vector indicating the index (0-based)of specified sample in a population. Defaults to 0.
#' @param top Numeric Vector indicating the amount of edges selected to plot by decreasting order of egde weights. Defaults to 100.
#' @param file Character String indicating the name of output .png file. Defaults to 'lioness_top_100.png'.
#'
#' @return A message showing the path of output plot.
#' @examples
#' # refer to the input datasets files of control in inst/extdat as example
#' control_expression_file_path <- system.file("extdata", "expr10.txt", package = "netZoo", mustWork = TRUE)
#' motif_file_path <- system.file("extdata", "chip.txt", package = "netZoo", mustWork = TRUE)
#' ppi_file_path <- system.file("extdata", "ppi.txt", package = "netZoo", mustWork = TRUE)
#'
#' # Run PANDA algorithm
#' control_lioness_result <- runLioness(e = control_expression_file_path, m = motif_file_path, ppi = ppi_file_path, rm_missing = TRUE )
#'
#' # plot the top 100 egdes in first sample network.
#' plotLioness(col = 0, top = 100, file = "top100_sample1.png")
#'
#' @import reticulate
#' @export
plotLioness <- function(col = 0, top = 100, file = 'lioness_top_100.png'){
# source analyze_lioness.py in pypanda from GitHub
reticulate::source_python("https://raw.githubusercontent.com/twangxxx/pypanda/master/pypanda/analyze_lioness.py",convert = TRUE)
# run py code to create an instance named "plot" of AnalyzeLioness class.
py_run_string(paste("plot = AnalyzeLioness(l)"))
# invoke the method "top_network_plot" to plot the network.
py_run_string(paste("plot.top_network_plot(column=", col,",", "top=", top, ",", "file='", file,"\')",sep=""))
# print out a message to indicate the path of output .png file.
message(paste("The plot of top ", top, " edges in sample ", col+1, " is located in ", getwd(), "/", file, sep = ""))
}
|
/R/plotLioness.R
|
no_license
|
xhyuo/netZoo
|
R
| false | false | 2,169 |
r
|
#' Plot single-sample network
#'
#' This function is able to plot sepecified amount of edges in a specified sample network.
#' This function should be used following the \code{\link{runLioness}}.
#' As LIONESS estimates the single-sample network of an aggregate network generated by PANDA (more )
#'
#' @param col Numerical Vector indicating the index (0-based)of specified sample in a population. Defaults to 0.
#' @param top Numeric Vector indicating the amount of edges selected to plot by decreasting order of egde weights. Defaults to 100.
#' @param file Character String indicating the name of output .png file. Defaults to 'lioness_top_100.png'.
#'
#' @return A message showing the path of output plot.
#' @examples
#' # refer to the input datasets files of control in inst/extdat as example
#' control_expression_file_path <- system.file("extdata", "expr10.txt", package = "netZoo", mustWork = TRUE)
#' motif_file_path <- system.file("extdata", "chip.txt", package = "netZoo", mustWork = TRUE)
#' ppi_file_path <- system.file("extdata", "ppi.txt", package = "netZoo", mustWork = TRUE)
#'
#' # Run PANDA algorithm
#' control_lioness_result <- runLioness(e = control_expression_file_path, m = motif_file_path, ppi = ppi_file_path, rm_missing = TRUE )
#'
#' # plot the top 100 egdes in first sample network.
#' plotLioness(col = 0, top = 100, file = "top100_sample1.png")
#'
#' @import reticulate
#' @export
plotLioness <- function(col = 0, top = 100, file = 'lioness_top_100.png'){
# source analyze_lioness.py in pypanda from GitHub
reticulate::source_python("https://raw.githubusercontent.com/twangxxx/pypanda/master/pypanda/analyze_lioness.py",convert = TRUE)
# run py code to create an instance named "plot" of AnalyzeLioness class.
py_run_string(paste("plot = AnalyzeLioness(l)"))
# invoke the method "top_network_plot" to plot the network.
py_run_string(paste("plot.top_network_plot(column=", col,",", "top=", top, ",", "file='", file,"\')",sep=""))
# print out a message to indicate the path of output .png file.
message(paste("The plot of top ", top, " edges in sample ", col+1, " is located in ", getwd(), "/", file, sep = ""))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lm.R
\name{ggDiagnose.lm}
\alias{ggDiagnose.lm}
\title{Diagnostic plot for lm and glm objects (ggplot based)}
\usage{
ggDiagnose.lm(x, which = c(1L:3L, 5L),
caption = list("Residuals vs Fitted", "Normal Q-Q", "Scale-Location",
"Cook's distance", "Residuals vs Leverage",
expression("Cook's dist vs Leverage " * h[ii]/(1 - h[ii]))),
sub.caption = NULL, main = NULL, ..., id.n = 3,
labels.id = factor(names(stats::residuals(x)), levels =
names(stats::residuals(x))), qqline = TRUE, cook.levels = c(0.5, 1),
show.plot = TRUE, return = FALSE, shape = 1, nrow = min(2,
length(which)), smooth.color = "blue", dashed.color = c("red",
"blue", NA, NA, "red", "black"))
}
\arguments{
\item{x}{\code{lm} or \code{glm} object}
\item{which}{which plots you'd like to create}
\item{caption}{title per plot}
\item{sub.caption}{caption for bottom of multiple plot visual (defaults
to the formula of the model.)}
\item{main}{title for the mulitple plot visual}
\item{...}{extra attributes (currently not used)}
\item{id.n}{id the n observations with largest residuals}
\item{labels.id}{labels for all observations}
\item{qqline}{logic for whether a qqline should be drawn (a line between the
25 and 75 quantiles in the Q-Q plot)}
\item{cook.levels}{levels of cooks distance to visualize in the leverage vs
standardized residual graphic}
\item{show.plot}{logic to display the graphics (group of graphics in this
case)}
\item{return}{logic to return list of graphics and the data frame to make
the majority of graphics}
\item{shape}{shape of points (the default is 1, an open circle)}
\item{nrow}{number of rows in the displayed graphic}
\item{smooth.color}{color for smoothing lines}
\item{dashed.color}{color for dashed line (a vector of length 6 is expected)}
}
\value{
depending on \code{show.plot} and \code{return} it
will return the visualization of the graphics and/or a list
of both the data frame used the make the majority of the graphic and
a list of each individual graphic.
}
\description{
This function leverages code from the \code{\link[stats]{plot.lm}} function
from the \pkg{stats} library. It allows for the same imput (except when
related directly to `par` functionality, which makes no sense to include).
We also include "newer" functionality.
}
\details{
This function can deal with \code{\link[stats]{lm}} and
\code{\link[stats]{glm}} objects.
}
\examples{
lm.object <- lm(Sepal.Length ~., data = iris)
par(mfrow = c(2,3))
plot(lm.object, which = 1:6)
ggDiagnose.lm(lm.object, which = 1:6)
}
\seealso{
see \code{\link{dfCompile.lm}} for data creation.
}
|
/man/ggDiagnose.lm.Rd
|
permissive
|
benjaminleroy/ggDiagnose
|
R
| false | true | 2,674 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lm.R
\name{ggDiagnose.lm}
\alias{ggDiagnose.lm}
\title{Diagnostic plot for lm and glm objects (ggplot based)}
\usage{
ggDiagnose.lm(x, which = c(1L:3L, 5L),
caption = list("Residuals vs Fitted", "Normal Q-Q", "Scale-Location",
"Cook's distance", "Residuals vs Leverage",
expression("Cook's dist vs Leverage " * h[ii]/(1 - h[ii]))),
sub.caption = NULL, main = NULL, ..., id.n = 3,
labels.id = factor(names(stats::residuals(x)), levels =
names(stats::residuals(x))), qqline = TRUE, cook.levels = c(0.5, 1),
show.plot = TRUE, return = FALSE, shape = 1, nrow = min(2,
length(which)), smooth.color = "blue", dashed.color = c("red",
"blue", NA, NA, "red", "black"))
}
\arguments{
\item{x}{\code{lm} or \code{glm} object}
\item{which}{which plots you'd like to create}
\item{caption}{title per plot}
\item{sub.caption}{caption for bottom of multiple plot visual (defaults
to the formula of the model.)}
\item{main}{title for the mulitple plot visual}
\item{...}{extra attributes (currently not used)}
\item{id.n}{id the n observations with largest residuals}
\item{labels.id}{labels for all observations}
\item{qqline}{logic for whether a qqline should be drawn (a line between the
25 and 75 quantiles in the Q-Q plot)}
\item{cook.levels}{levels of cooks distance to visualize in the leverage vs
standardized residual graphic}
\item{show.plot}{logic to display the graphics (group of graphics in this
case)}
\item{return}{logic to return list of graphics and the data frame to make
the majority of graphics}
\item{shape}{shape of points (the default is 1, an open circle)}
\item{nrow}{number of rows in the displayed graphic}
\item{smooth.color}{color for smoothing lines}
\item{dashed.color}{color for dashed line (a vector of length 6 is expected)}
}
\value{
depending on \code{show.plot} and \code{return} it
will return the visualization of the graphics and/or a list
of both the data frame used the make the majority of the graphic and
a list of each individual graphic.
}
\description{
This function leverages code from the \code{\link[stats]{plot.lm}} function
from the \pkg{stats} library. It allows for the same imput (except when
related directly to `par` functionality, which makes no sense to include).
We also include "newer" functionality.
}
\details{
This function can deal with \code{\link[stats]{lm}} and
\code{\link[stats]{glm}} objects.
}
\examples{
lm.object <- lm(Sepal.Length ~., data = iris)
par(mfrow = c(2,3))
plot(lm.object, which = 1:6)
ggDiagnose.lm(lm.object, which = 1:6)
}
\seealso{
see \code{\link{dfCompile.lm}} for data creation.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_metrics.R
\name{pct_dom1_group}
\alias{pct_dom1_group}
\title{The percent of the most dominant group}
\usage{
pct_dom1_group(long.df, master.df, Group, taxa.rank)
}
\arguments{
\item{long.df}{Long data frame format of taxonomic counts.}
\item{master.df}{Taxonomic attributes table.}
\item{Group}{The taxonomic group to be assessed}
\item{taxa.rank}{The taxonomic taxa.rank used during the assessment.}
}
\value{
The percentage of taxa representing by the most dominant (abundant)
group. Typically, this function is used to assess the functional feeding
groups and habits.
}
\description{
The percent of the most dominant group
}
|
/man/pct_dom1_group.Rd
|
no_license
|
esocid/Benthos
|
R
| false | true | 716 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_metrics.R
\name{pct_dom1_group}
\alias{pct_dom1_group}
\title{The percent of the most dominant group}
\usage{
pct_dom1_group(long.df, master.df, Group, taxa.rank)
}
\arguments{
\item{long.df}{Long data frame format of taxonomic counts.}
\item{master.df}{Taxonomic attributes table.}
\item{Group}{The taxonomic group to be assessed}
\item{taxa.rank}{The taxonomic taxa.rank used during the assessment.}
}
\value{
The percentage of taxa representing by the most dominant (abundant)
group. Typically, this function is used to assess the functional feeding
groups and habits.
}
\description{
The percent of the most dominant group
}
|
# Define server logic required to draw a histogram
server <- function(input, output, session) {
temp <- reactiveVal(fromJSON(urlWeather)$main$temp)
# Refresh date and week every hour
today <- reactive({
invalidateLater(3600000)
Sys.Date()
})
weekNr <- reactive({
invalidateLater(3600000)
week(Sys.Date())
})
# Where to put this? One file reader per session or one shared with all sessions.
#backup_file = "H:/Dokument/Dashboard/Dashboard/calendar_backup.csv"
backup_file = "C:/Users/kottd/Documents/Dashboard/Dashboard/calendar_backup.csv"
calendarRV <- reactiveFileReader(1000, session, backup_file, read.csv, stringsAsFactors = FALSE)
# Update calendar when the csv is changed
combinedData <- reactive({
calendar <- calendarRV()
calendarData <- data.frame(
id = seq(startID, startID + length(calendar[, 1]) - 1, 1),
start = as.character(calendar$start),
end = as.character(calendar$end),
content = calendar$content,
className = calendar$content,
group = calendar$group
)
combinedData = rbind(weekData, calendarData)
return(combinedData)
})
# Schedule plot
output$schema <- renderTimevis({
timevis(
combinedData(),
showZoom = FALSE,
fit = FALSE,
groups = groups,
options = list(
editable = c(remove = TRUE),
zoomable = FALSE,
timeAxis = c(scale = "weekday", step = 1),
start = floor_date(today(), unit = "week") + days(1),
end = ceiling_date(today(), unit = "week") + days(1),
stack = FALSE
)
)
})
# Add events directly to csv file
observeEvent(input$addEvent, {
appendVec <-
c(
as.character(input$startDate),
as.character(input$endDate),
input$eventName,
input$person
)
appendLine <- paste(appendVec, collapse = ",")
write(appendLine, file = backup_file, append = TRUE)
})
# Save csv backup of calendar when something is removed via the UI
observeEvent(input$schema_data, {
newData <- input$schema_data
newData <- newData[!(newData$group == "Week"), ]
oldData <- calendarRV()
if (length(newData[, 1]) < length(oldData[, 1])) {
backupTable <- newData
backupTable$id = NULL
write.csv(backupTable, file = backup_file, row.names = FALSE)
}
})
# Focus on selected item
observeEvent(input$schema_selected, {
centerItem("schema", input$schema_selected)
})
fika <- reactiveVal(1)
observeEvent(weekNr(), {
fika(fikaStep(1))
})
observeEvent(input$fikaNext, {
fika(fikaStep(1))
})
observeEvent(input$fikaBack, {
fika(fikaStep(-1))
})
# output$weekNr <- renderValueBox({
# valueBox(
# paste("Vecka", weekNr()),
# today(),
# icon = icon("calendar"),
# color = "blue"
# )
# })
output$fika <- renderValueBox({
valueBox(Names[fika()],
"Fika Ansvarig",
icon = icon("coffee"),
color = "green")
})
output$fikaNext <- renderValueBox({
valueBox(
Names[fikaStep(1)],
"Nästa Fika Ansvarig",
icon = icon("angle-double-right"),
color = "yellow"
)
})
output$temp <- renderValueBox({
valueBox(
paste(temp(), "°C"),
ifelse(temp() <= 0, "Brrrr...", "Nice..."),
icon = icon("thermometer-half"),
color = ifelse(temp() <= 0, "aqua", ifelse(temp() <= 10, "green", "yellow"))
)
})
# Get the index of the next person in the fika list
fikaStep <- function(step) {
if (fika() >= length(Names) & step > 0) {
return(1)
}
else if (fika() == 1 & step < 0) {
return(length(Names))
}
else {
return(fika() + step)
}
}
}
|
/Dashboard/server.R
|
no_license
|
emos8710/Dashboard
|
R
| false | false | 3,794 |
r
|
# Define server logic required to draw a histogram
server <- function(input, output, session) {
temp <- reactiveVal(fromJSON(urlWeather)$main$temp)
# Refresh date and week every hour
today <- reactive({
invalidateLater(3600000)
Sys.Date()
})
weekNr <- reactive({
invalidateLater(3600000)
week(Sys.Date())
})
# Where to put this? One file reader per session or one shared with all sessions.
#backup_file = "H:/Dokument/Dashboard/Dashboard/calendar_backup.csv"
backup_file = "C:/Users/kottd/Documents/Dashboard/Dashboard/calendar_backup.csv"
calendarRV <- reactiveFileReader(1000, session, backup_file, read.csv, stringsAsFactors = FALSE)
# Update calendar when the csv is changed
combinedData <- reactive({
calendar <- calendarRV()
calendarData <- data.frame(
id = seq(startID, startID + length(calendar[, 1]) - 1, 1),
start = as.character(calendar$start),
end = as.character(calendar$end),
content = calendar$content,
className = calendar$content,
group = calendar$group
)
combinedData = rbind(weekData, calendarData)
return(combinedData)
})
# Schedule plot
output$schema <- renderTimevis({
timevis(
combinedData(),
showZoom = FALSE,
fit = FALSE,
groups = groups,
options = list(
editable = c(remove = TRUE),
zoomable = FALSE,
timeAxis = c(scale = "weekday", step = 1),
start = floor_date(today(), unit = "week") + days(1),
end = ceiling_date(today(), unit = "week") + days(1),
stack = FALSE
)
)
})
# Add events directly to csv file
observeEvent(input$addEvent, {
appendVec <-
c(
as.character(input$startDate),
as.character(input$endDate),
input$eventName,
input$person
)
appendLine <- paste(appendVec, collapse = ",")
write(appendLine, file = backup_file, append = TRUE)
})
# Save csv backup of calendar when something is removed via the UI
observeEvent(input$schema_data, {
newData <- input$schema_data
newData <- newData[!(newData$group == "Week"), ]
oldData <- calendarRV()
if (length(newData[, 1]) < length(oldData[, 1])) {
backupTable <- newData
backupTable$id = NULL
write.csv(backupTable, file = backup_file, row.names = FALSE)
}
})
# Focus on selected item
observeEvent(input$schema_selected, {
centerItem("schema", input$schema_selected)
})
fika <- reactiveVal(1)
observeEvent(weekNr(), {
fika(fikaStep(1))
})
observeEvent(input$fikaNext, {
fika(fikaStep(1))
})
observeEvent(input$fikaBack, {
fika(fikaStep(-1))
})
# output$weekNr <- renderValueBox({
# valueBox(
# paste("Vecka", weekNr()),
# today(),
# icon = icon("calendar"),
# color = "blue"
# )
# })
output$fika <- renderValueBox({
valueBox(Names[fika()],
"Fika Ansvarig",
icon = icon("coffee"),
color = "green")
})
output$fikaNext <- renderValueBox({
valueBox(
Names[fikaStep(1)],
"Nästa Fika Ansvarig",
icon = icon("angle-double-right"),
color = "yellow"
)
})
output$temp <- renderValueBox({
valueBox(
paste(temp(), "°C"),
ifelse(temp() <= 0, "Brrrr...", "Nice..."),
icon = icon("thermometer-half"),
color = ifelse(temp() <= 0, "aqua", ifelse(temp() <= 10, "green", "yellow"))
)
})
# Get the index of the next person in the fika list
fikaStep <- function(step) {
if (fika() >= length(Names) & step > 0) {
return(1)
}
else if (fika() == 1 & step < 0) {
return(length(Names))
}
else {
return(fika() + step)
}
}
}
|
#' Posterior predictive intervals
#'
#' Posterior predictive intervals of the model.
#'
#' @template args-methods
#' @template args-prob
#' @template args-dots-ignored
#'
#' @details
#'
#' Reports for each row of the input data set the predictive interval
#' according to the fitted model.
#'
#' @return Matrix with as many rows as the input data set and two
#' columns which contain the lower and upper quantile
#' corresponding to the central probability mass \code{prob} for
#' the number of responses of the predictive distribution.
#'
#' @template start-example
#' @examples
#' example_model("single_agent", silent=TRUE)
#'
#' predictive_interval(blrmfit)
#'
#' @template stop-example
#'
#' @method predictive_interval blrmfit
#' @aliases predictive_interval
#' @export
predictive_interval.blrmfit <- function(object, prob=0.95, newdata, ...) {
##yrep <- posterior_predict(object, newdata=newdata)
##rstantools::predictive_interval(yrep, prob=prob)
assert_number(prob, lower=0, upper=1, finite=TRUE)
s <- summary(object, newdata=newdata, prob=prob, predictive=TRUE, transform=FALSE)
cp <- c(0.5- prob/2, 0.5+ prob/2)
as.matrix(s[,c(3,5)])
}
#' @method predictive_interval blrm_trial
#' @export
predictive_interval.blrm_trial <- function(object, prob=0.95, newdata, ...) {
.assert_is_blrm_trial_and_prior_is_set(object)
if(missing(newdata)) {
return(predictive_interval.blrmfit(object$blrmfit, prob=prob, newdata=object$data, ...))
} else {
return(predictive_interval.blrmfit(object$blrmfit, prob=prob, newdata=newdata, ...))
}
}
|
/R/predictive_interval.R
|
no_license
|
cran/OncoBayes2
|
R
| false | false | 1,604 |
r
|
#' Posterior predictive intervals
#'
#' Posterior predictive intervals of the model.
#'
#' @template args-methods
#' @template args-prob
#' @template args-dots-ignored
#'
#' @details
#'
#' Reports for each row of the input data set the predictive interval
#' according to the fitted model.
#'
#' @return Matrix with as many rows as the input data set and two
#' columns which contain the lower and upper quantile
#' corresponding to the central probability mass \code{prob} for
#' the number of responses of the predictive distribution.
#'
#' @template start-example
#' @examples
#' example_model("single_agent", silent=TRUE)
#'
#' predictive_interval(blrmfit)
#'
#' @template stop-example
#'
#' @method predictive_interval blrmfit
#' @aliases predictive_interval
#' @export
predictive_interval.blrmfit <- function(object, prob=0.95, newdata, ...) {
##yrep <- posterior_predict(object, newdata=newdata)
##rstantools::predictive_interval(yrep, prob=prob)
assert_number(prob, lower=0, upper=1, finite=TRUE)
s <- summary(object, newdata=newdata, prob=prob, predictive=TRUE, transform=FALSE)
cp <- c(0.5- prob/2, 0.5+ prob/2)
as.matrix(s[,c(3,5)])
}
#' @method predictive_interval blrm_trial
#' @export
predictive_interval.blrm_trial <- function(object, prob=0.95, newdata, ...) {
.assert_is_blrm_trial_and_prior_is_set(object)
if(missing(newdata)) {
return(predictive_interval.blrmfit(object$blrmfit, prob=prob, newdata=object$data, ...))
} else {
return(predictive_interval.blrmfit(object$blrmfit, prob=prob, newdata=newdata, ...))
}
}
|
#' Multilevel Exponential-Family Random Graph Models
#'
#' This function estimates an exponential-family random graph model for multilevel network data. At present, \code{mlergm} covers network data where the set of nodes is nested within known blocks (see, e.g., Schweinberger and Handcock, 2015). An example is groups of students nested within classrooms, which is covered in the \code{\link{classes}} data set. It is assumed that the node membership, that to which block each node is associated, is known (or has been previously estimated).
#'
#' The estimation procedures performs Monte-Carlo maximum likelihood for the specified ERGM using a version of the Fisher scoring method detailed by Hunter and Handcock (2006). Settings governing the MCMC procedure (such as \code{burnin}, \code{interval}, and \code{sample_size}) as well as more general settings for the estimation procedure can be adjusted through \code{\link{set_options}}. The estimation procedure uses the the stepping algorithm of Hummel, et al., (2012) for added stability.
#'
#' @param form Formula of the form: \code{network ~ term1 + term2 + ...}; allowable model terms are a subset of those in R package ergm,
#' see \code{\link{ergm.terms}}.
#' @param node_memb Vector (length equal to the number of nodes in the network) indicating to which block or group the nodes belong.
#' If the network provided in \code{form} is an object of class \code{mlnet},
#' then \code{node_memb} can be exctracted directly from the network and need not be provided.
#' @param parameterization Parameterization options include 'standard', 'offset', or 'size'.
#' \itemize{
#' \item 'standard' : Does not adjust the individual block parameters for size.
#' \item 'offset' : The offset parameterization uses edge and mutual offsets along the lines of Krivitsky, Handcock, and Morris (2011) and Krivitsky and Kolaczyk (2015). The edge parameter is offset by \eqn{-log n(k)} and the mutual parameter is offset by \eqn{+log n(k)}, where \eqn{n(k)} is the size of the kth block.
#' \item 'size' : Multiplies the block parameters by \eqn{log n(k)}, where \eqn{n(k)} is the size of the kth block.
#' }
#' @param options See \code{\link{set_options}} for details.
#' @param theta_init Parameter vector of initial estimates for theta to be used.
#' @param verbose Controls the level of output. A value of \code{0} corresponds to no output, except for warnings; a value of \code{1} corresponds to minimal output, and a value of \code{2} corresponds to full output.
#' @param eval_loglik (Logical \code{TRUE} or \code{FALSE}) If set to \code{TRUE}, the bridge estimation procedure of Hunter and Handcock (2006) is used to estimate the loglikelihood for BIC calculations, otherwise the loglikelihood and therefore the BIC is not estimated.
#' @param seed For reproducibility, an integer-valued seed may be specified.
#' @return
#' \code{\link{mlergm}} returns an object of class \code{\link{mlergm}} which is a list containing:
#' \item{theta}{Estimated parameter vector of the exponential-family random graph model.}
#' \item{between_theta}{Estimated parameter vector of the between group model.}
#' \item{se}{Standard error vector for theta.}
#' \item{between_se}{Standard error vector for between_theta.}
#' \item{pvalue}{A vector of p-values for the estimated parameter vector.}
#' \item{between_pvalue}{A vector of p-values for the estimated parameter vector.}
#' \item{logLikval}{The loglikelihood for at the estimated MLE.}
#' \item{bic}{The BIC for the estimated model.}
#' \item{mcmc_chain}{The MCMC sample used in the final estimation step, which can be used to diagnose non-convergence.}
#' \item{estimation_status}{Indicator of whether the estimation procedure had \code{succcess} or \code{failed}.}
#' \item{parameterization}{The model parameterization (either \code{standard} or \code{offset}).}
#' \item{formula}{The model formula.}
#' \item{network}{The network for which the model is estimated.}
#' \item{node_memb}{Vector indicating to which group or block the nodes belong.}
#' \item{size_quantiles}{The quantiles of the block sizes.}
#'
#' @references
#'
#' Schweinberger, M. and Stewart, J. (2019)
#' Concentration and consistency results for canonical and curved exponential-family random graphs.
#' The Annals of Statistics, to appear.
#'
#'
#' Schweinberger, M. and Handcock, M. S. (2015).
#' Local dependence in random graph models: characterization, properties and statistical inference.
#' Journal of the Royal Statistical Society: Series B (Statistical Methodology), 77(3), 647-676.
#'
#' Hunter, D. R., and Handcock, M. S. (2006).
#' Inference in curved exponential family models for networks.
#' Journal of Computational and Graphical Statistics, 15(3), 565-583.
#'
#' Hummel, R. M., Hunter, D. R., and Handcock, M. S. (2012).
#' Improving simulation-based algorithms for fitting ERGMs.
#' Journal of Computational and Graphical Statistics, 21(4), 920-939.
#'
#' Krivitsky, P. N., Handcock, M. S., & Morris, M. (2011).
#' Adjusting for network size and composition effects in exponential-family random graph models.
#' Statistical methodology, 8(4), 319-339.
#'
#' Krivitsky, P.N, and Kolaczyk, E. D. (2015).
#' On the question of effective sample size in network modeling: An asymptotic inquiry.
#' Statistical science: a review journal of the Institute of Mathematical Statistics, 30(2), 184.
#'
#' Hunter D., Handcock M., Butts C., Goodreau S., and Morris M. (2008).
#' ergm: A Package to Fit, Simulate and Diagnose Exponential-Family Models for Networks.
#' Journal of Statistical Software, 24(3), 1-29.
#'
#' Butts, C. (2016).
#' sna: Tools for Social Network Analysis.
#' R package version 2.4. \url{https://CRAN.R-project.org/package=sna}.
#'
#' Butts, C. (2008).
#' network: a Package for Managing Relational Data in R.
#' Journal of Statistical Software, 24(2). \url{http://www.jstatsoft.org/v24/i02/paper}.
#'
#' Stewart, J., Schweinberger, M., Bojanowski, M., and M. Morris (2019).
#' Multilevel network data facilitate statistical inference for curved {ERGM}s with geometrically weighted terms.
#' Social Networks, to appear.
#'
#' Schweinberger, M., Krivitsky, P. N., Butts, C.T. and J. Stewart (2018).
#' Exponential-family models of random graphs: Inference in finite-, super-, and infinite-population scenarios.
#' https://arxiv.org/abs/1707.04800
#'
#' @seealso \code{\link{gof.mlergm}}, \code{\link{mlnet}}
#' @keywords estimation
#' @export
#' @importFrom stats median sd as.formula update simulate update.formula pnorm quantile
#' @importFrom parallel stopCluster mclapply makeCluster clusterEvalQ clusterApply parLapply
#' @importFrom Matrix bdiag
#' @importFrom stringr str_match str_split str_trim str_replace_all
#' @importFrom cowplot plot_grid
#' @importFrom reshape2 melt
#' @importFrom plyr is.formula
#' @importFrom methods is
#' @importFrom graphics plot
#' @import ergm
#' @import network
#' @examples
#' \donttest{
#' ### Load the school classes data-set
#' data(classes)
#'
#' # Estimate a curved multilevel ergm model with offset parameter
#' # Approximate run time (2 cores): 1.2m, Run time (3 cores): 55s
#' model_est <- mlergm(classes ~ edges + mutual + nodematch("sex") + gwesp(fixed = FALSE),
#' seed = 123,
#' options = set_options(number_cores = 2))
#'
#' # To access a summary of the fitted model, call the 'summary' function
#' summary(model_est)
#'
#' # Goodness-of-fit can be run by calling the 'gof.mlergm' method
#' # Approximate run time (2 cores): 48s, Run time (3 cores): 34s
#' gof_res <- gof(model_est, options = set_options(number_cores = 2))
#' plot(gof_res, cutoff = 15)
#' }
mlergm <- function(form,
node_memb,
parameterization = "standard",
options = set_options(),
theta_init = NULL,
verbose = 0,
eval_loglik = TRUE,
seed = NULL) {
# Check that required arguments are provided
if (missing(form)) {
stop("\nArgument 'form' not provided. A formula object must be provided.\n", call. = FALSE)
} else {
check_formula(form)
}
if (missing(node_memb)) {
# Check if network provided is of class 'mlnet'
net <- get_network_from_formula(form)
if (inherits(net, "mlnet")) {
node_memb <- get.vertex.attribute(net, "node_memb")
} else {
stop("\nArgument 'node_memb' not provided. The node memberships must be provided.\n", call. = FALSE)
}
}
# Check that the formula and terms requested are valid
check_terms(form, K = length(unique(node_memb)))
# If a seed is provided, set it
if (!is.null(seed)) {
check_integer(seed, "seed")
set.seed(seed, "L'Ecuyer")
}
if (verbose > 0) {
cat("\nBeginning estimation procedure for:")
cat(paste0("\n ", deparse(form)))
}
# Adjust formula if necessary
form <- adjust_formula(form)
# Parse formula to get network and model
net <- get_network_from_formula(form)
check_net(net)
terms <- get_terms_from_formula(form, net)
# Since net_list was not provided, we need to make it
memb_list <- check_and_convert_memb(node_memb)
memb_labels <- memb_list$memb_labels
memb_internal <- memb_list$memb_internal
rm(memb_list); clean_mem()
net_list <- make_net_list(net, memb_internal)
# Determine the parameterization type and compute necessary quantities
model_temp <- ergm_model(form, net)
param_list <- check_parameterization_type(net_list, terms, parameterization, model_temp)
statistic_names <- param_list$statistic_names
which_largest <- param_list$which_largest
# Initialize object
obj <- initialize_object(net_list = net_list,
net = net,
block_memb = memb_internal,
theta_init = theta_init,
param_list = param_list,
sim_param = options$sim_param,
est_param = options$est_param,
verbose = verbose,
parameterization = parameterization)
# Remove objects that are no longer needed
rm(param_list)
rm(options)
clean_mem()
# Initialize estimate if an initial estimate is not provided
cd_flag <- TRUE
if (is.null(obj$est$theta)) {
if (verbose > 0) {
cat("\n\nComputing initial estimate.")
}
if (!is.curved(obj$net$model)) {
obj$est$theta <- numeric(length(obj$net$model$coef.names))
obj <- compute_initial_estimate(obj)
} else {
obj$est$theta <- numeric(length(obj$net$model$coef.names))
obj <- compute_initial_estimate(obj)
}
if (verbose > 0) {
cat("\n Initial estimate:")
cat(paste("\n ", obj$net$theta_names, " = ", formatC(obj$est$theta, digits = 4, format = "f")))
}
} else {
obj$est$theta <- theta_init
}
# Call MCMLE to perform estimation
if (verbose > 0) {
cat("\n\n\nBegining Monte-Carlo maximum likelihood estimation\n")
cat("===================================================")
cat("\n")
}
obj <- MCMLE(obj)
# Estimate the between block model (if possible)
obj <- estimate_between_block(obj)
# Evalaute results of MCMLE procedure
if (verbose > 0 & !obj$est$ML_status_fail) {
cat(paste0("\n\nComputing approximate loglikelihood at estimate using ",
obj$est$bridge_num, " bridges."))
cat("\n\n")
} else if (verbose > 0 & obj$est$ML_status_fail) {
cat("\n\nEstimation procedure stopping. Estimation unsuccesful.\n\n", call. = FALSE)
}
# Compute quantiles of block sizes
quantiles_of_sizes <- quantile(obj$net$clust_sizes)
# Make structure to be returned
if (!obj$est$ML_status_fail) {
if (obj$est$inCH_counter > 0) {
obj <- compute_pvalue(obj)
if (eval_loglik) {
obj$likval <- lik_fun(form = form, memb = node_memb, theta = obj$est$theta,
bridge_num = obj$est$bridge_num, ncores = obj$est$par_n_cores,
offset = obj$est$parameterization == "offset",
burnin = obj$sim$bridge_burnin,
interval = obj$sim$bridge_interval,
sample_size = obj$sim$bridge_sample_size,
size = obj$est$parameterization == "size")
obj$bic <- compute_bic(obj)
} else {
obj$likval <- NULL
obj$bic <- NULL
}
mcmc_path <- Reduce("+", obj$sim$stats)
colnames(mcmc_path) <- statistic_names
obj$est$theta <- as.numeric(obj$est$theta)
names(obj$est$theta) <- get_coef_names(obj$net$model, FALSE)
names(obj$se) <- names(obj$est$theta)
names(obj$pvalue) <- names(obj$est$theta)
estimates <- list(theta = obj$est$theta,
between_theta = obj$est$between_theta,
between_se = obj$est$between_se,
se = obj$se,
pvalue = obj$pvalue,
between_pvalue = obj$est$between_pvalue,
bic = obj$bic,
logLikval = obj$likval,
mcmc_chain = mcmc_path,
estimation_status = "success",
parameterization = obj$est$parameterization,
formula = form,
network = net,
node_memb = node_memb,
size_quantiles = quantiles_of_sizes)
class(estimates) <- "mlergm"
rm(mcmc_path); clean_mem()
} else if (obj$est$inCH_counter == 0) {
cat("\n\nWarning: Maximum number of iterations reached without the observation lying in the")
cat(" interior of the simulated convex hull. Parameters not estimated.\n\n")
estimates <- list(theta = NA,
se = NA,
formula = form,
network = net,
node_memb = node_memb,
size_quantiles = quantiles_of_sizes,
mcmc_chain = NULL,
estimation_status = "failed")
class(estimates) <- "mlergm"
}
} else {
estimates <- list(theta = NA,
se = NA,
formula = form,
network = net,
node_memb = node_memb,
size_quantiles = quantiles_of_sizes,
mcmc_chain = NULL,
estimation_status = "failed")
class(estimates) <- "mlergm"
}
if (verbose > 0) {
summary(estimates)
}
return(estimates)
}
|
/R/mlergm.R
|
no_license
|
meleangelo/mlergm
|
R
| false | false | 14,816 |
r
|
#' Multilevel Exponential-Family Random Graph Models
#'
#' This function estimates an exponential-family random graph model for multilevel network data. At present, \code{mlergm} covers network data where the set of nodes is nested within known blocks (see, e.g., Schweinberger and Handcock, 2015). An example is groups of students nested within classrooms, which is covered in the \code{\link{classes}} data set. It is assumed that the node membership, that to which block each node is associated, is known (or has been previously estimated).
#'
#' The estimation procedures performs Monte-Carlo maximum likelihood for the specified ERGM using a version of the Fisher scoring method detailed by Hunter and Handcock (2006). Settings governing the MCMC procedure (such as \code{burnin}, \code{interval}, and \code{sample_size}) as well as more general settings for the estimation procedure can be adjusted through \code{\link{set_options}}. The estimation procedure uses the the stepping algorithm of Hummel, et al., (2012) for added stability.
#'
#' @param form Formula of the form: \code{network ~ term1 + term2 + ...}; allowable model terms are a subset of those in R package ergm,
#' see \code{\link{ergm.terms}}.
#' @param node_memb Vector (length equal to the number of nodes in the network) indicating to which block or group the nodes belong.
#' If the network provided in \code{form} is an object of class \code{mlnet},
#' then \code{node_memb} can be exctracted directly from the network and need not be provided.
#' @param parameterization Parameterization options include 'standard', 'offset', or 'size'.
#' \itemize{
#' \item 'standard' : Does not adjust the individual block parameters for size.
#' \item 'offset' : The offset parameterization uses edge and mutual offsets along the lines of Krivitsky, Handcock, and Morris (2011) and Krivitsky and Kolaczyk (2015). The edge parameter is offset by \eqn{-log n(k)} and the mutual parameter is offset by \eqn{+log n(k)}, where \eqn{n(k)} is the size of the kth block.
#' \item 'size' : Multiplies the block parameters by \eqn{log n(k)}, where \eqn{n(k)} is the size of the kth block.
#' }
#' @param options See \code{\link{set_options}} for details.
#' @param theta_init Parameter vector of initial estimates for theta to be used.
#' @param verbose Controls the level of output. A value of \code{0} corresponds to no output, except for warnings; a value of \code{1} corresponds to minimal output, and a value of \code{2} corresponds to full output.
#' @param eval_loglik (Logical \code{TRUE} or \code{FALSE}) If set to \code{TRUE}, the bridge estimation procedure of Hunter and Handcock (2006) is used to estimate the loglikelihood for BIC calculations, otherwise the loglikelihood and therefore the BIC is not estimated.
#' @param seed For reproducibility, an integer-valued seed may be specified.
#' @return
#' \code{\link{mlergm}} returns an object of class \code{\link{mlergm}} which is a list containing:
#' \item{theta}{Estimated parameter vector of the exponential-family random graph model.}
#' \item{between_theta}{Estimated parameter vector of the between group model.}
#' \item{se}{Standard error vector for theta.}
#' \item{between_se}{Standard error vector for between_theta.}
#' \item{pvalue}{A vector of p-values for the estimated parameter vector.}
#' \item{between_pvalue}{A vector of p-values for the estimated parameter vector.}
#' \item{logLikval}{The loglikelihood for at the estimated MLE.}
#' \item{bic}{The BIC for the estimated model.}
#' \item{mcmc_chain}{The MCMC sample used in the final estimation step, which can be used to diagnose non-convergence.}
#' \item{estimation_status}{Indicator of whether the estimation procedure had \code{succcess} or \code{failed}.}
#' \item{parameterization}{The model parameterization (either \code{standard} or \code{offset}).}
#' \item{formula}{The model formula.}
#' \item{network}{The network for which the model is estimated.}
#' \item{node_memb}{Vector indicating to which group or block the nodes belong.}
#' \item{size_quantiles}{The quantiles of the block sizes.}
#'
#' @references
#'
#' Schweinberger, M. and Stewart, J. (2019)
#' Concentration and consistency results for canonical and curved exponential-family random graphs.
#' The Annals of Statistics, to appear.
#'
#'
#' Schweinberger, M. and Handcock, M. S. (2015).
#' Local dependence in random graph models: characterization, properties and statistical inference.
#' Journal of the Royal Statistical Society: Series B (Statistical Methodology), 77(3), 647-676.
#'
#' Hunter, D. R., and Handcock, M. S. (2006).
#' Inference in curved exponential family models for networks.
#' Journal of Computational and Graphical Statistics, 15(3), 565-583.
#'
#' Hummel, R. M., Hunter, D. R., and Handcock, M. S. (2012).
#' Improving simulation-based algorithms for fitting ERGMs.
#' Journal of Computational and Graphical Statistics, 21(4), 920-939.
#'
#' Krivitsky, P. N., Handcock, M. S., & Morris, M. (2011).
#' Adjusting for network size and composition effects in exponential-family random graph models.
#' Statistical methodology, 8(4), 319-339.
#'
#' Krivitsky, P.N, and Kolaczyk, E. D. (2015).
#' On the question of effective sample size in network modeling: An asymptotic inquiry.
#' Statistical science: a review journal of the Institute of Mathematical Statistics, 30(2), 184.
#'
#' Hunter D., Handcock M., Butts C., Goodreau S., and Morris M. (2008).
#' ergm: A Package to Fit, Simulate and Diagnose Exponential-Family Models for Networks.
#' Journal of Statistical Software, 24(3), 1-29.
#'
#' Butts, C. (2016).
#' sna: Tools for Social Network Analysis.
#' R package version 2.4. \url{https://CRAN.R-project.org/package=sna}.
#'
#' Butts, C. (2008).
#' network: a Package for Managing Relational Data in R.
#' Journal of Statistical Software, 24(2). \url{http://www.jstatsoft.org/v24/i02/paper}.
#'
#' Stewart, J., Schweinberger, M., Bojanowski, M., and M. Morris (2019).
#' Multilevel network data facilitate statistical inference for curved {ERGM}s with geometrically weighted terms.
#' Social Networks, to appear.
#'
#' Schweinberger, M., Krivitsky, P. N., Butts, C.T. and J. Stewart (2018).
#' Exponential-family models of random graphs: Inference in finite-, super-, and infinite-population scenarios.
#' https://arxiv.org/abs/1707.04800
#'
#' @seealso \code{\link{gof.mlergm}}, \code{\link{mlnet}}
#' @keywords estimation
#' @export
#' @importFrom stats median sd as.formula update simulate update.formula pnorm quantile
#' @importFrom parallel stopCluster mclapply makeCluster clusterEvalQ clusterApply parLapply
#' @importFrom Matrix bdiag
#' @importFrom stringr str_match str_split str_trim str_replace_all
#' @importFrom cowplot plot_grid
#' @importFrom reshape2 melt
#' @importFrom plyr is.formula
#' @importFrom methods is
#' @importFrom graphics plot
#' @import ergm
#' @import network
#' @examples
#' \donttest{
#' ### Load the school classes data-set
#' data(classes)
#'
#' # Estimate a curved multilevel ergm model with offset parameter
#' # Approximate run time (2 cores): 1.2m, Run time (3 cores): 55s
#' model_est <- mlergm(classes ~ edges + mutual + nodematch("sex") + gwesp(fixed = FALSE),
#' seed = 123,
#' options = set_options(number_cores = 2))
#'
#' # To access a summary of the fitted model, call the 'summary' function
#' summary(model_est)
#'
#' # Goodness-of-fit can be run by calling the 'gof.mlergm' method
#' # Approximate run time (2 cores): 48s, Run time (3 cores): 34s
#' gof_res <- gof(model_est, options = set_options(number_cores = 2))
#' plot(gof_res, cutoff = 15)
#' }
mlergm <- function(form,
node_memb,
parameterization = "standard",
options = set_options(),
theta_init = NULL,
verbose = 0,
eval_loglik = TRUE,
seed = NULL) {
# Check that required arguments are provided
if (missing(form)) {
stop("\nArgument 'form' not provided. A formula object must be provided.\n", call. = FALSE)
} else {
check_formula(form)
}
if (missing(node_memb)) {
# Check if network provided is of class 'mlnet'
net <- get_network_from_formula(form)
if (inherits(net, "mlnet")) {
node_memb <- get.vertex.attribute(net, "node_memb")
} else {
stop("\nArgument 'node_memb' not provided. The node memberships must be provided.\n", call. = FALSE)
}
}
# Check that the formula and terms requested are valid
check_terms(form, K = length(unique(node_memb)))
# If a seed is provided, set it
if (!is.null(seed)) {
check_integer(seed, "seed")
set.seed(seed, "L'Ecuyer")
}
if (verbose > 0) {
cat("\nBeginning estimation procedure for:")
cat(paste0("\n ", deparse(form)))
}
# Adjust formula if necessary
form <- adjust_formula(form)
# Parse formula to get network and model
net <- get_network_from_formula(form)
check_net(net)
terms <- get_terms_from_formula(form, net)
# Since net_list was not provided, we need to make it
memb_list <- check_and_convert_memb(node_memb)
memb_labels <- memb_list$memb_labels
memb_internal <- memb_list$memb_internal
rm(memb_list); clean_mem()
net_list <- make_net_list(net, memb_internal)
# Determine the parameterization type and compute necessary quantities
model_temp <- ergm_model(form, net)
param_list <- check_parameterization_type(net_list, terms, parameterization, model_temp)
statistic_names <- param_list$statistic_names
which_largest <- param_list$which_largest
# Initialize object
obj <- initialize_object(net_list = net_list,
net = net,
block_memb = memb_internal,
theta_init = theta_init,
param_list = param_list,
sim_param = options$sim_param,
est_param = options$est_param,
verbose = verbose,
parameterization = parameterization)
# Remove objects that are no longer needed
rm(param_list)
rm(options)
clean_mem()
# Initialize estimate if an initial estimate is not provided
cd_flag <- TRUE
if (is.null(obj$est$theta)) {
if (verbose > 0) {
cat("\n\nComputing initial estimate.")
}
if (!is.curved(obj$net$model)) {
obj$est$theta <- numeric(length(obj$net$model$coef.names))
obj <- compute_initial_estimate(obj)
} else {
obj$est$theta <- numeric(length(obj$net$model$coef.names))
obj <- compute_initial_estimate(obj)
}
if (verbose > 0) {
cat("\n Initial estimate:")
cat(paste("\n ", obj$net$theta_names, " = ", formatC(obj$est$theta, digits = 4, format = "f")))
}
} else {
obj$est$theta <- theta_init
}
# Call MCMLE to perform estimation
if (verbose > 0) {
cat("\n\n\nBegining Monte-Carlo maximum likelihood estimation\n")
cat("===================================================")
cat("\n")
}
obj <- MCMLE(obj)
# Estimate the between block model (if possible)
obj <- estimate_between_block(obj)
# Evalaute results of MCMLE procedure
if (verbose > 0 & !obj$est$ML_status_fail) {
cat(paste0("\n\nComputing approximate loglikelihood at estimate using ",
obj$est$bridge_num, " bridges."))
cat("\n\n")
} else if (verbose > 0 & obj$est$ML_status_fail) {
cat("\n\nEstimation procedure stopping. Estimation unsuccesful.\n\n", call. = FALSE)
}
# Compute quantiles of block sizes
quantiles_of_sizes <- quantile(obj$net$clust_sizes)
# Make structure to be returned
if (!obj$est$ML_status_fail) {
if (obj$est$inCH_counter > 0) {
obj <- compute_pvalue(obj)
if (eval_loglik) {
obj$likval <- lik_fun(form = form, memb = node_memb, theta = obj$est$theta,
bridge_num = obj$est$bridge_num, ncores = obj$est$par_n_cores,
offset = obj$est$parameterization == "offset",
burnin = obj$sim$bridge_burnin,
interval = obj$sim$bridge_interval,
sample_size = obj$sim$bridge_sample_size,
size = obj$est$parameterization == "size")
obj$bic <- compute_bic(obj)
} else {
obj$likval <- NULL
obj$bic <- NULL
}
mcmc_path <- Reduce("+", obj$sim$stats)
colnames(mcmc_path) <- statistic_names
obj$est$theta <- as.numeric(obj$est$theta)
names(obj$est$theta) <- get_coef_names(obj$net$model, FALSE)
names(obj$se) <- names(obj$est$theta)
names(obj$pvalue) <- names(obj$est$theta)
estimates <- list(theta = obj$est$theta,
between_theta = obj$est$between_theta,
between_se = obj$est$between_se,
se = obj$se,
pvalue = obj$pvalue,
between_pvalue = obj$est$between_pvalue,
bic = obj$bic,
logLikval = obj$likval,
mcmc_chain = mcmc_path,
estimation_status = "success",
parameterization = obj$est$parameterization,
formula = form,
network = net,
node_memb = node_memb,
size_quantiles = quantiles_of_sizes)
class(estimates) <- "mlergm"
rm(mcmc_path); clean_mem()
} else if (obj$est$inCH_counter == 0) {
cat("\n\nWarning: Maximum number of iterations reached without the observation lying in the")
cat(" interior of the simulated convex hull. Parameters not estimated.\n\n")
estimates <- list(theta = NA,
se = NA,
formula = form,
network = net,
node_memb = node_memb,
size_quantiles = quantiles_of_sizes,
mcmc_chain = NULL,
estimation_status = "failed")
class(estimates) <- "mlergm"
}
} else {
estimates <- list(theta = NA,
se = NA,
formula = form,
network = net,
node_memb = node_memb,
size_quantiles = quantiles_of_sizes,
mcmc_chain = NULL,
estimation_status = "failed")
class(estimates) <- "mlergm"
}
if (verbose > 0) {
summary(estimates)
}
return(estimates)
}
|
/run_analysis.R
|
no_license
|
mtgds/Getting-and-Cleaning-Data-Project
|
R
| false | false | 3,210 |
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_get_toggleswitch.R
\name{get_toggleswitch}
\alias{get_toggleswitch}
\title{Toggle switch model}
\usage{
get_toggleswitch()
}
\value{
The list contains rprior, dprior (generate and evaluate the density of prior distribution),
generate_randomness (generate data-generating variables), robservation (create synthetic
data sets), parameter_names (useful for plotting), thetadim (dimension of parameter),
ydim (dimension of observations), parameters (list of hyperparameters,
to be passed to rprior,dprior,robservation)
}
\description{
This function returns a list representing the toggle switch model
of Bonassi, F. V., West, M., et al. (2015).
Sequential Monte Carlo with adaptive weights for approximate Bayesian computation. Bayesian Analysis, 10(1):171–187.
}
|
/man/get_toggleswitch.Rd
|
no_license
|
alexanderwhatley/winference
|
R
| false | true | 847 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_get_toggleswitch.R
\name{get_toggleswitch}
\alias{get_toggleswitch}
\title{Toggle switch model}
\usage{
get_toggleswitch()
}
\value{
The list contains rprior, dprior (generate and evaluate the density of prior distribution),
generate_randomness (generate data-generating variables), robservation (create synthetic
data sets), parameter_names (useful for plotting), thetadim (dimension of parameter),
ydim (dimension of observations), parameters (list of hyperparameters,
to be passed to rprior,dprior,robservation)
}
\description{
This function returns a list representing the toggle switch model
of Bonassi, F. V., West, M., et al. (2015).
Sequential Monte Carlo with adaptive weights for approximate Bayesian computation. Bayesian Analysis, 10(1):171–187.
}
|
library(elliptic)
### Name: near.match
### Title: Are two vectors close to one another?
### Aliases: near.match
### Keywords: math
### ** Examples
x <- rep(1,6)
near.match(x, x+rnorm(6)/1e10)
|
/data/genthat_extracted_code/elliptic/examples/near.match.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 199 |
r
|
library(elliptic)
### Name: near.match
### Title: Are two vectors close to one another?
### Aliases: near.match
### Keywords: math
### ** Examples
x <- rep(1,6)
near.match(x, x+rnorm(6)/1e10)
|
n<-40
p<-0.99
result<-0
sum<-0
for(k in 0:n){
if(k%%2==0){
sum<-sum+choose(n-1,k)*(p^(n-1-k))*((1-p)^k)
}
}
result<-sum*100
print(result)
|
/R/teste.r
|
no_license
|
raphaelfilene/Portfolio
|
R
| false | false | 145 |
r
|
n<-40
p<-0.99
result<-0
sum<-0
for(k in 0:n){
if(k%%2==0){
sum<-sum+choose(n-1,k)*(p^(n-1-k))*((1-p)^k)
}
}
result<-sum*100
print(result)
|
rm(list=ls())
library('gee')
library('data.table')
library('MuMIn')
data=data.frame(read.table('full_df.tsv',header=TRUE,sep='\t'))
#remove any rows with "NA" -- no device value recorded
data=na.omit(data)
data$Error=abs(data$Error)
data$Activity=factor(data$Activity,levels=c("sit","walk","run","bike","max"))
hr=data[which(data$Metric=="hr"),]
en=data[which(data$Metric=="en"),]
en$Device=factor(en$Device,levels=c("Apple","Basis","Fitbit","Microsoft","PulseOn"))
#GLM model
#hr_glm=glm(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=hr)
#en_glm=glm(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=en)
#Fit Generalized estimation equation (GEE) with independent correlation structure
#hr_gee_ind=gee(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=hr,id=Subject,corstr="independence")
#en_gee_ind=gee(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=en,id=Subject,corstr="independence")
#Fit Generalized estimation equation (GEE) with exchangeable correlation structure
hr_gee_exch=gee(Error~
Sex+
Age+
Sex:Age+
Height+
Weight+
BMI+
Skin+
Fitzpatrick+
Wrist+
VO2max+
Activity+
Intensity+
Device+
Activity:Device+
Intensity:Device
,data=hr,id=Subject,corstr="exchangeable")
en_gee_exch=gee(Error~
Sex+
Age+
Sex:Age+
Height+
Weight+
BMI+
Skin+
Fitzpatrick+
Wrist+
VO2max+
Activity+
Intensity+
Device+
Activity:Device+
Intensity:Device
,data=en,id=Subject,corstr="exchangeable")
en_gee_exch_pval=2 * pnorm(abs(coef(summary(en_gee_exch))[,5]), lower.tail = FALSE)
en_results=data.frame(summary(en_gee_exch)$coefficients,en_gee_exch_pval)
hr_results=hr_results[order(hr_gee_exch_pval),]
en_results=en_results[order(en_gee_exch_pval),]
dd=pdredge(hr_gee_exch)
# Model average models with delta AICc < 4
model.avg(dd, subset = delta < 4)
#or as a 95% confidence set:
model.avg(dd, subset = cumsum(weight) <= .95) # get averaged coefficients
#'Best' model
hr_best=summary(get.models(dd, 1)[[1]])
hr_gee_exch_pval=2 * pnorm(abs(coef(hr_best)[,5]), lower.tail = FALSE)
hr_results=data.frame(hr_best$coefficients,hr_gee_exch_pval)
par(mar = c(3,5,6,4))
plot(dd, labAsExpr = TRUE)
dd=pdredge(en_gee_exch)
# Model average models with delta AICc < 4
model.avg(dd, subset = delta < 4)
#or as a 95% confidence set:
model.avg(dd, subset = cumsum(weight) <= .95) # get averaged coefficients
#'Best' model
en_best=summary(get.models(dd, 1)[[1]])
en_gee_exch_pval=2 * pnorm(abs(coef(en_best)[,5]), lower.tail = FALSE)
en_results=data.frame(en_best$coefficients,en_gee_exch_pval)
par(mar = c(3,5,6,4))
plot(dd, labAsExpr = TRUE)
|
/gee/multivar_mixed_model.R
|
no_license
|
AshleyLab/device_validation
|
R
| false | false | 3,277 |
r
|
rm(list=ls())
library('gee')
library('data.table')
library('MuMIn')
data=data.frame(read.table('full_df.tsv',header=TRUE,sep='\t'))
#remove any rows with "NA" -- no device value recorded
data=na.omit(data)
data$Error=abs(data$Error)
data$Activity=factor(data$Activity,levels=c("sit","walk","run","bike","max"))
hr=data[which(data$Metric=="hr"),]
en=data[which(data$Metric=="en"),]
en$Device=factor(en$Device,levels=c("Apple","Basis","Fitbit","Microsoft","PulseOn"))
#GLM model
#hr_glm=glm(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=hr)
#en_glm=glm(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=en)
#Fit Generalized estimation equation (GEE) with independent correlation structure
#hr_gee_ind=gee(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=hr,id=Subject,corstr="independence")
#en_gee_ind=gee(Error~Sex+Age+Height+Weight+BMI+Skin+Fitzpatrick+Wrist+VO2max+Activity+Intensity+Device,data=en,id=Subject,corstr="independence")
#Fit Generalized estimation equation (GEE) with exchangeable correlation structure
hr_gee_exch=gee(Error~
Sex+
Age+
Sex:Age+
Height+
Weight+
BMI+
Skin+
Fitzpatrick+
Wrist+
VO2max+
Activity+
Intensity+
Device+
Activity:Device+
Intensity:Device
,data=hr,id=Subject,corstr="exchangeable")
en_gee_exch=gee(Error~
Sex+
Age+
Sex:Age+
Height+
Weight+
BMI+
Skin+
Fitzpatrick+
Wrist+
VO2max+
Activity+
Intensity+
Device+
Activity:Device+
Intensity:Device
,data=en,id=Subject,corstr="exchangeable")
en_gee_exch_pval=2 * pnorm(abs(coef(summary(en_gee_exch))[,5]), lower.tail = FALSE)
en_results=data.frame(summary(en_gee_exch)$coefficients,en_gee_exch_pval)
hr_results=hr_results[order(hr_gee_exch_pval),]
en_results=en_results[order(en_gee_exch_pval),]
dd=pdredge(hr_gee_exch)
# Model average models with delta AICc < 4
model.avg(dd, subset = delta < 4)
#or as a 95% confidence set:
model.avg(dd, subset = cumsum(weight) <= .95) # get averaged coefficients
#'Best' model
hr_best=summary(get.models(dd, 1)[[1]])
hr_gee_exch_pval=2 * pnorm(abs(coef(hr_best)[,5]), lower.tail = FALSE)
hr_results=data.frame(hr_best$coefficients,hr_gee_exch_pval)
par(mar = c(3,5,6,4))
plot(dd, labAsExpr = TRUE)
dd=pdredge(en_gee_exch)
# Model average models with delta AICc < 4
model.avg(dd, subset = delta < 4)
#or as a 95% confidence set:
model.avg(dd, subset = cumsum(weight) <= .95) # get averaged coefficients
#'Best' model
en_best=summary(get.models(dd, 1)[[1]])
en_gee_exch_pval=2 * pnorm(abs(coef(en_best)[,5]), lower.tail = FALSE)
en_results=data.frame(en_best$coefficients,en_gee_exch_pval)
par(mar = c(3,5,6,4))
plot(dd, labAsExpr = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.PMCMR.R
\name{summaryGroup}
\alias{summaryGroup}
\title{Grouped Summary of an PMCMR Object}
\usage{
summaryGroup(x, alpha = 0.05, ...)
}
\arguments{
\item{x}{an object of class \code{"PMCMR"}.}
\item{alpha}{the selected alpha-level. Defaults to 0.05.}
\item{\dots}{further arguments. Currently ignored.}
}
\value{
Provides summary statistics for each factor level
and a letter symbol, whereas different letters indicate
significant differences between factor levels based on the
selected level of alpha.
}
\description{
Performes a grouped summary on an PMCMR object.
}
\seealso{
\code{\link{summary.PMCMR}}
}
\keyword{models}
|
/man/summaryGroup.Rd
|
no_license
|
pociuteagne/PMCMRplus
|
R
| false | true | 713 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.PMCMR.R
\name{summaryGroup}
\alias{summaryGroup}
\title{Grouped Summary of an PMCMR Object}
\usage{
summaryGroup(x, alpha = 0.05, ...)
}
\arguments{
\item{x}{an object of class \code{"PMCMR"}.}
\item{alpha}{the selected alpha-level. Defaults to 0.05.}
\item{\dots}{further arguments. Currently ignored.}
}
\value{
Provides summary statistics for each factor level
and a letter symbol, whereas different letters indicate
significant differences between factor levels based on the
selected level of alpha.
}
\description{
Performes a grouped summary on an PMCMR object.
}
\seealso{
\code{\link{summary.PMCMR}}
}
\keyword{models}
|
\name{formatDistData}
\alias{formatDistData}
\title{Bin distance data}
\usage{formatDistData(distData, distCol, transectNameCol, dist.breaks,
occasionCol, effortMatrix)}
\description{Convert individual-level distance data to the transect-level
format required by \code{\link{distsamp}} or \code{\link{gdistsamp}}}
\arguments{
\item{distData}{data.frame where each row is a detected individual.
Must have at least 2 columns. One for distances and the other for
transect names.}
\item{distCol}{character, name of the column in distData that contains
the distances. The distances should be numeric.}
\item{transectNameCol}{character, column name containing transect
names. The transect column should be a factor.}
\item{dist.breaks}{numeric vector of distance interval cutpoints. Length
must equal J+1.}
\item{occasionCol}{optional character. If transects were visited more
than once, this can be used to format data for \code{gdistsamp}. It is
the name of the column in distData that contains the occasion
numbers. The occasion column should be a factor.}
\item{effortMatrix}{optional matrix of 1 and 0s that is M * J in size and will allow for the insertion of NAs where the matrix = 0, indicating that a survey was not completed. When not supplied a matrix of all 1s is created since it is assumed all surveys were completed.}
}
\details{This function creates a site (M) by distance interval (J) response
matrix from a data.frame containing the detection distances for each
individual and the transect names. Alternatively, if each transect was
surveyed T times, the resulting matrix is M x JT, which is the format
required by \code{\link{gdistsamp}}, see\code{\link{unmarkedFrameGDS}}.}
\value{An M x J or M x JT matrix containing the binned distance
data. Transect names will become rownames and colnames will describe
the distance intervals.}
\note{It is important that the factor containing transect names includes
levels for all the transects surveyed, not just those with >=1
detection. Likewise, if transects were visited more than once, the
factor containing the occasion numbers should include levels for all
occasions. See the example for how to add levels to a factor.}
\seealso{\code{\link{distsamp}}, \code{\link{unmarkedFrame}}}
\examples{
# Create a data.frame containing distances of animals detected
# along 4 transects.
dat <- data.frame(transect=gl(4,5, labels=letters[1:4]),
distance=rpois(20, 10))
dat
# Look at your transect names.
levels(dat$transect)
# Suppose that you also surveyed a transect named "e" where no animals were
# detected. You must add it to the levels of dat$transect
levels(dat$transect) <- c(levels(dat$transect), "e")
levels(dat$transect)
# Distance cut points defining distance intervals
cp <- c(0, 8, 10, 12, 14, 18)
# Create formated response matrix
yDat <- formatDistData(dat, "distance", "transect", cp)
yDat
# Now you could merge yDat with transect-level covariates and
# then use unmarkedFrameDS to prepare data for distsamp
## Example for data from multiple occasions
dat2 <- data.frame(distance=1:100, site=gl(5, 20),
visit=factor(rep(1:4, each=5)))
cutpt <- seq(0, 100, by=25)
y2 <- formatDistData(dat2, "distance", "site", cutpt, "visit")
umf <- unmarkedFrameGDS(y=y2, numPrimary=4, survey="point",
dist.breaks=cutpt, unitsIn="m")
## Example for datda from multiple occasions with effortMatrix
dat3 <- data.frame(distance=1:100, site=gl(5, 20), visit=factor(rep(1:4, each=5)))
cutpt <- seq(0, 100, by=25)
effortMatrix <- matrix(ncol=4, nrow=5, rbinom(20,1,0.8))
y3 <- formatDistData(dat2, "distance", "site", cutpt, "visit", effortMatrix)
}
|
/fuzzedpackages/unmarked/man/formatDistData.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 3,719 |
rd
|
\name{formatDistData}
\alias{formatDistData}
\title{Bin distance data}
\usage{formatDistData(distData, distCol, transectNameCol, dist.breaks,
occasionCol, effortMatrix)}
\description{Convert individual-level distance data to the transect-level
format required by \code{\link{distsamp}} or \code{\link{gdistsamp}}}
\arguments{
\item{distData}{data.frame where each row is a detected individual.
Must have at least 2 columns. One for distances and the other for
transect names.}
\item{distCol}{character, name of the column in distData that contains
the distances. The distances should be numeric.}
\item{transectNameCol}{character, column name containing transect
names. The transect column should be a factor.}
\item{dist.breaks}{numeric vector of distance interval cutpoints. Length
must equal J+1.}
\item{occasionCol}{optional character. If transects were visited more
than once, this can be used to format data for \code{gdistsamp}. It is
the name of the column in distData that contains the occasion
numbers. The occasion column should be a factor.}
\item{effortMatrix}{optional matrix of 1 and 0s that is M * J in size and will allow for the insertion of NAs where the matrix = 0, indicating that a survey was not completed. When not supplied a matrix of all 1s is created since it is assumed all surveys were completed.}
}
\details{This function creates a site (M) by distance interval (J) response
matrix from a data.frame containing the detection distances for each
individual and the transect names. Alternatively, if each transect was
surveyed T times, the resulting matrix is M x JT, which is the format
required by \code{\link{gdistsamp}}, see\code{\link{unmarkedFrameGDS}}.}
\value{An M x J or M x JT matrix containing the binned distance
data. Transect names will become rownames and colnames will describe
the distance intervals.}
\note{It is important that the factor containing transect names includes
levels for all the transects surveyed, not just those with >=1
detection. Likewise, if transects were visited more than once, the
factor containing the occasion numbers should include levels for all
occasions. See the example for how to add levels to a factor.}
\seealso{\code{\link{distsamp}}, \code{\link{unmarkedFrame}}}
\examples{
# Create a data.frame containing distances of animals detected
# along 4 transects.
dat <- data.frame(transect=gl(4,5, labels=letters[1:4]),
distance=rpois(20, 10))
dat
# Look at your transect names.
levels(dat$transect)
# Suppose that you also surveyed a transect named "e" where no animals were
# detected. You must add it to the levels of dat$transect
levels(dat$transect) <- c(levels(dat$transect), "e")
levels(dat$transect)
# Distance cut points defining distance intervals
cp <- c(0, 8, 10, 12, 14, 18)
# Create formated response matrix
yDat <- formatDistData(dat, "distance", "transect", cp)
yDat
# Now you could merge yDat with transect-level covariates and
# then use unmarkedFrameDS to prepare data for distsamp
## Example for data from multiple occasions
dat2 <- data.frame(distance=1:100, site=gl(5, 20),
visit=factor(rep(1:4, each=5)))
cutpt <- seq(0, 100, by=25)
y2 <- formatDistData(dat2, "distance", "site", cutpt, "visit")
umf <- unmarkedFrameGDS(y=y2, numPrimary=4, survey="point",
dist.breaks=cutpt, unitsIn="m")
## Example for datda from multiple occasions with effortMatrix
dat3 <- data.frame(distance=1:100, site=gl(5, 20), visit=factor(rep(1:4, each=5)))
cutpt <- seq(0, 100, by=25)
effortMatrix <- matrix(ncol=4, nrow=5, rbinom(20,1,0.8))
y3 <- formatDistData(dat2, "distance", "site", cutpt, "visit", effortMatrix)
}
|
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = numeric()){
inversion <- NULL;
set <- function(value){
x <<- value
inversion <<- NULL;
}
get <- function() x
setInversion <- function(value) inversion <<- value
getInversion <- function() inversion
list(set = set, get = get,
setInversion = setInversion,
getInversion = getInversion)
}
## This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` will retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getInversion()
if(!is.null(inv)){
message('retrieve cached inversion.')
return(inv)
}
data <- x$get()
x$setInversion(solve(data))
## Return a matrix that is the inverse of 'x'
x$getInversion()
}
|
/cachematrix.R
|
no_license
|
Changdao/ProgrammingAssignment2
|
R
| false | false | 1,008 |
r
|
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = numeric()){
inversion <- NULL;
set <- function(value){
x <<- value
inversion <<- NULL;
}
get <- function() x
setInversion <- function(value) inversion <<- value
getInversion <- function() inversion
list(set = set, get = get,
setInversion = setInversion,
getInversion = getInversion)
}
## This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` will retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getInversion()
if(!is.null(inv)){
message('retrieve cached inversion.')
return(inv)
}
data <- x$get()
x$setInversion(solve(data))
## Return a matrix that is the inverse of 'x'
x$getInversion()
}
|
"factor.stats" <-
function(r=NULL,f,phi=NULL,n.obs=NA,np.obs=NULL,alpha=.1,fm=NULL,smooth=TRUE) {
fa.stats(r=r,f=f,phi=phi,n.obs=n.obs,np.obs=np.obs,alpha=alpha,fm=fm,smooth=smooth)}
"fa.stats" <-
function(r=NULL,f,phi=NULL,n.obs=NA,np.obs=NULL,alpha=.05,fm=NULL,smooth=TRUE) {
#revised June 21, 2010 to add RMSEA etc.
#revised August 25, 2011 to add cor.smooth for smoothing
#revised November 10, 2012 to add stats for the minchi option of factoring
#revised February 28, 2014 to emphasize empirical chi 2 and report empirical BIC
#revised March 9, 2015 to report NA if RMSEA values are not in the confidence intervals
cl <- match.call()
conf.level <- alpha
if((!is.matrix(f)) && (!is.data.frame(f))) {#do a number of things that use f as list
if(is.null(r) && (!is.null(f$r)) ) r <- f$r #we found the correlation while factoring
#if(is.na(n.obs) && (!is.null(f$np.obs))) {np.obs <- f$np.obs}
f <- as.matrix(f$loadings)} else {f <- as.matrix(f)}
n <- dim(r)[2] #number of variables
if(dim(r)[1] !=n ) {n.obs = dim(r)[1]
r <- cor(r,use="pairwise")
}
if(is.data.frame(r)) r <- as.matrix(r)
nfactors <- dim(f)[2] # number of factors
if(is.null(phi)) {model <- f %*% t(f)} else {model <- f %*% phi %*% t(f)}
residual<- r - model
r2 <- sum(r*r)
rstar2 <- sum(residual*residual)
result <- list(residual = residual)
result$dof <- dof <- n * (n-1)/2 - n * nfactors + (nfactors *(nfactors-1)/2)
#r2.off <- r
#diag(r2.off) <- 0
# r2.off <- sum(r2.off^2)
r2.off <- r2 - tr(r)
diag(residual) <- 0
if(is.null(np.obs)) {rstar.off <- sum(residual^2)
result$ENull <- r2.off * n.obs #the empirical null model
result$chi <- rstar.off * n.obs #this is the empirical chi square
result$rms <- sqrt(rstar.off/(n*(n-1))) #this is the empirical rmsea
result$nh <- n.obs
if (result$dof > 0) {result$EPVAL <- pchisq(result$chi, result$dof, lower.tail = FALSE)
result$crms <- sqrt(rstar.off/(2*result$dof) )
result$EBIC <- result$chi - result$dof * log(n.obs)
result$ESABIC <- result$chi - result$dof * log((n.obs+2)/24) } else {result$EPVAL <- NA
result$crms <- NA
result$EBIC <- NA
result$ESABIC <- NA}
} else {
rstar.off <- sum(residual^2 * np.obs) #weight the residuals by their sample size
r2.off <-(r*r * np.obs) #weight the original by sample size
r2.off <- sum(r2.off) -tr(r2.off)
result$chi <- rstar.off #this is the sample size weighted chi square
result$nh <- harmonic.mean(as.vector(np.obs)) #this is the sample weighted cell size
result$rms <- sqrt(rstar.off/(result$nh*n*(n-1))) #this is the sample size weighted square root average squared residual
if (result$dof > 0) {result$EPVAL <- pchisq(result$chi, result$dof, lower.tail = FALSE)
result$crms <- sqrt(rstar.off/(2*result$nh*result$dof) )
result$EBIC <- result$chi - result$dof * log(result$nh)
result$ESABIC <- result$chi - result$dof * log((result$nh+2)/24) } else { #added 2/28/2014
result$EPVAL <- NA
result$crms <- NA
result$EBIC <- NA
result$ESABIC <- NA
}
}
result$fit <-1-rstar2/r2
result$fit.off <- 1-rstar.off/r2.off
result$sd <- sd(as.vector(residual)) #this is the none sample size weighted root mean square residual
result$factors <- nfactors
result$complexity <- (apply(f,1,function(x) sum(x^2)))^2/apply(f,1,function(x)sum(x^4))
diag(model) <- diag(r)
model <- cor.smooth(model) #this replaces the next few lines with a slightly cleaner approach
if(smooth) {r <- cor.smooth(r) } #this makes sure that the correlation is positive semi-definite
#although it would seem that the model should always be positive semidefinite so this is probably not necessary
#cor.smooth approach added August 25,2011
# }
m.inv.r <- try(solve(model,r),silent=TRUE) #modified Oct 30, 2009 to perhaps increase precision -- #modified 2015/1/2 to use try
if(inherits(m.inv.r,"try-error")) {warning("the model inverse times the r matrix is singular, replaced with Identity matrix which means fits are wrong")
m.inv.r <- diag(1,n,n)}
if(is.na(n.obs)) {result$n.obs=NA
result$PVAL=NA} else {result$n.obs=n.obs}
result$dof <- n * (n-1)/2 - n * nfactors + (nfactors *(nfactors-1)/2)
result$objective <- sum(diag((m.inv.r))) - log(det(m.inv.r)) -n #this is what Tucker Lewis call F
if(is.infinite(result$objective)) {result$objective <- rstar2
message("The determinant of the smoothed correlation was zero.\nThis means the objective function is not defined.\nChi square is based upon observed residuals.")}
result$criteria <- c("objective"=result$objective,NA,NA)
if (!is.na(n.obs)) {result$STATISTIC <- chisq <- result$objective * ((n.obs-1) -(2 * n + 5)/6 -(2*nfactors)/3) #from Tucker and from factanal
# if (!is.na(n.obs)) {result$STATISTIC <- chisq <- result$objective * ((n.obs-1)) #from Fox and sem
if(!is.nan(result$STATISTIC)) if (result$STATISTIC <0) {result$STATISTIC <- 0}
if (result$dof > 0) {result$PVAL <- pchisq(result$STATISTIC, result$dof, lower.tail = FALSE)} else {result$PVAL <- NA}
}
result$Call <- cl
#find the Tucker Lewis Index of reliability
#Also known as the NNFI which is expressed in terms of Chisq
#NNFI <- (chisqNull/dfNull - chisq/df)/(chisqNull/dfNull - 1)
#first find the null model
F0 <- sum(diag((r))) - log(det(r)) -n
if(is.infinite(F0)) {F0 <- r2
message("The determinant of the smoothed correlation was zero.\nThis means the objective function is not defined for the null model either.\nThe Chi square is thus based upon observed correlations.")}
Fm <- result$objective #objective function of model
Mm <- Fm/( n * (n-1)/2 - n * nfactors + (nfactors *(nfactors-1)/2))
M0 <- F0* 2 /(n*(n-1))
nm <- ((n.obs-1) -(2 * n + 5)/6 -(2*nfactors)/3) #
result$null.model <- F0
result$null.dof <- n * (n-1) /2
if (!is.na(n.obs)) {result$null.chisq <- F0 * ((n.obs-1) -(2 * n + 5)/6 )
result$TLI <- (M0 - Mm)/(M0 - 1/nm) #NNFI in Fox's sem
if(is.numeric(result$TLI) & !is.nan(result$TLI) & (result$TLI >1)) result$F0 <-1
#The estimatation of RMSEA and the upper and lower bounds are taken from John Fox's summary.sem with minor modifications
if(!is.null(result$objective) && (result$dof >0) &&(!is.na(result$objective))) {
# RMSEA <- sqrt(max(result$objective/result$dof - 1/(n.obs-1), 0)) #this is x2/(df*N ) - 1/(N-1) #put back 4/21/17
#however, this is not quite right and should be
RMSEA <- sqrt(max(chisq/(result$dof* n.obs) - 1/(n.obs-1), 0)) #this is x2/(df*N ) - 1/(N-1) #fixed 4/5/19
#note that the result$objective is not actually the chi square unless we adjust it ala Tucker
#thus, the RMSEA was slightly off. This was fixed October 29, 2016 to be
# RMSEA <- sqrt(max( (chisq/(result$dof * (n.obs))-1/(n.obs)),0)) #changed to this from above October 29, 2016 and then changed to N February 28, 2017
#Seem to have dropped the sqrt part of this at some point
tail <- conf.level/2 #this had been incorrectly listed as (1-conf.level)/2 which gave extraordinarily narrow confidence boundaries, fixed August 25, 2011
N <- max <- n.obs
df <- result$dof
#chi.sq.statistic <- RMSEA^2 * df * (N - 1) + df
#why isn't this just chi.sq?
chi.sq.statistic <- chisq
max <- max(n.obs,chi.sq.statistic) +2* n.obs
#the alternative to this is to use the uniroot technique of Yves Rosseel in lavaan
#### from Hao Wu
# LB<-function(T){
# + if (pchisq(df=df,q=T)<=0.95) return(0) else
# + sqrt(uniroot(function(x) {pchisq(df=df,ncp=x,q=T)-0.95},c(0,10000))$root/nstar/df)
# + }
#
# > UB<-function(T){
# + if (pchisq(df=df,q=T)<=0.05) return(0) else
# + sqrt(uniroot(function(x) {pchisq(df=df,ncp=x,q=T)-0.05},c(0,10000))$root/nstar/df)
# + }
##
#Finally implement February 2017
# upperlambda <- function(lam) {tail - pchisq(chi.sq.statistic, df, ncp=lam)^2 }
RMSEA.U <- 0 #in case we can not find it
if(pchisq(df=result$dof,q=result$STATISTIC) > tail){ RMSEA.U <- try( sqrt(uniroot(function(x) {pchisq(df=result$dof,ncp=x,q=result$STATISTIC)- tail},c(0,max))$root/(n.obs-1)/result$dof),silent=TRUE)
if(inherits( RMSEA.U,"try-error")) {if(RMSEA <= 0 ) {RMSEA.U <- 0} else {message("In factor.stats, I could not find the RMSEA upper bound . Sorry about that")
#if the fit is super good, then the chisq is too small to get an upper bound. Report it as 0.
RMSEA.U <- NA}}
}
# lam.U <- NA} else {lam.U <- res}
# # if (is.null(res) || is.na(res$objective) || res$objective < 0){
# # max <- 0
# # warning("cannot find upper bound of RMSEA")
# # break
# # }
#
# lowerlambda <- function(lam) {1- tail - pchisq(chi.sq.statistic, df, ncp=lam)^2 }
RMSEA.L <- 0 #in case we can not find it
if(pchisq(df=result$dof,q=result$STATISTIC) > (1-tail)) { RMSEA.L <- try( sqrt(uniroot(function(x) {pchisq(df=result$dof,ncp=x,q=result$STATISTIC)-1 + tail},c(0,max))$root/(n.obs-1)/result$dof) ,silent=TRUE)
if(inherits(RMSEA.L,"try-error")) {#message("In factor.stats, I could not find the RMSEA lower bound . Sorry about that")
RMSEA.L <- NA}
} else {RMSEA.L <- 0}
# lam.L <- 0} else {lam.L <- res}
# # if (is.null(res) || is.na(res$objective) || res$objective < 0){
# # max <- 0
# # warning("cannot find lower bound of RMSEA")
# # break
# # }
#However, this was giving the wrong results and so I implemented the following
#suggested by Hao Wu April, 2017
#RMSEA.U <- sqrt(uniroot(function(x) {pchisq(df=result$dof,ncp=x,q=result$STATISTIC)- alpha},c(0,10000))$root/(n.obs-1)/result$dof)
#RMSEA.L <- sqrt(uniroot(function(x) {pchisq(df=result$dof,ncp=x,q=result$STATISTIC)-1 + alpha},c(0,10000))$root/(n.obs-1)/result$dof)
# while (max > 1){
# res <- try(optimize(function(lam) (tail - pchisq(chi.sq.statistic, df, ncp=lam))^2, interval=c(0, max)),silent=TRUE)
# if(class(res)=="try-error") {message("In factor.stats, I could not find the RMSEA upper bound . Sorry about that")
# res <- NULL}
# if (is.null(res) || is.na(res$objective) || res$objective < 0){
# max <- 0
# warning("cannot find upper bound of RMSEA")
# break
# }
# if (sqrt(res$objective) < tail/100) break
# max <- max/2
# }
# lam.U <- if (max <= 1) NA else res$minimum
# # max <- max(max,lam.U)
# max <- lam.U
# if(is.na(max)) max <- N
# while (max > 1){# this just iterates in to get a value
# res <- try(optimize(function(lam) (1 - tail - pchisq(chi.sq.statistic, df, ncp=lam))^2, interval=c(0, max)),silent=TRUE)
# if(class(res)=="try-error") {message("In factor.stats, I could not find the RMSEA lower bound. Sorry about that")
# res <- NULL}
# if (is.null(res)) {break}
# if (sqrt(res$objective) < tail/100) break
# max <- max/2
# if (is.na(res$objective) || res$objective < 0){
# max <- 0
# warning("cannot find lower bound of RMSEA")
# break
# }
# }
#
#
# lam.L <- if (max <= 1) NA else res$minimum #lam is the ncp
# this RMSEA calculation is probably not right because it will sometimes (but rarely) give cis that don't include the estimate
# RMSEA.U <- sqrt(lam.U/((N)*df) ) #lavaan uses sqrt(lam.U/((N)*df) ) sem uses sqrt(lam.U/((N-1)*df) )
# RMSEA.L <- min(sqrt(lam.L/((N)*df) ),RMSEA)
if(!is.na(RMSEA.U) && RMSEA.U < RMSEA) RMSEA.U <- NA
if(!is.na(RMSEA.L) && RMSEA.L > RMSEA) RMSEA.L <- NA
result$RMSEA <- c(RMSEA, RMSEA.L, RMSEA.U, 1-conf.level)
names(result$RMSEA) <- c("RMSEA","lower","upper","confidence")
result$BIC <- chisq - df * log(N)
result$SABIC <- chisq - df * log((N+2)/24) # added 1/27/2014
}
}
#now, find the correlations of the factor scores, even if not estimated, with the factors
#this repeats what was done in factor.scores and does not take in\to account the options in factor.scores
if(!is.null(phi)) f <- f %*% phi #convert the pattern to structure coefficients
if(smooth) {r <- cor.smooth(r)}
# w <- try(solve(r,f) ,silent=TRUE) #these are the regression factor weights
w <- Pinv(r)%*%f #use the Pseudo inverse #added 4/22/23
if(inherits(w,"try-error")) {message("In factor.stats, the correlation matrix is singular, an approximation is used")
ev <- eigen(r)
if(is.complex(ev$values)) {warning("complex eigen values detected by factor stats, results are suspect")
} else {
ev$values[ev$values < .Machine$double.eps] <- 100 * .Machine$double.eps
r <- ev$vectors %*% diag(ev$values) %*% t(ev$vectors)
diag(r) <- 1
# w <- try(solve(r,f) ,silent=TRUE)
w <- Pinv(r)%*% r #use the Pseudo inverse #these are the factor weights
if(inherits(w,"try-error")) {warning("In factor.stats, the correlation matrix is singular, and we could not calculate the beta weights for factor score estimates")
w <- diag(1,dim(r)[1])
} #these are the beta weights
}}
R2 <- diag(t(w) %*% f) #but, we actually already found this in factor scores -- these are the Thurstone values
if(is.null(fm)) {
if(prod(R2,na.rm=TRUE) < 0 ) {message("In factor.stats: The factor scoring weights matrix is probably singular -- Factor score estimate results are likely incorrect.\n Try a different factor score estimation method\n")
R2[abs(R2) > 1] <- NA
R2[R2 <= 0] <- NA
}
if ((max(R2,na.rm=TRUE) > (1 + .Machine$double.eps)) ) {warning("The estimated weights for the factor scores are probably incorrect. Try a different factor score estimation method.")}
}
r.scores <- cov2cor(t(w) %*% r %*% w)
result$r.scores <- r.scores
result$R2 <- R2 #this is the multiple R2 of the scores with the factors
# result$R2.corrected <- factor.indeterm(r,f)
# result$R2.total <- R2.cor$R2
# result$beta.total <- R2.cor$beta.total
#coarse coding
keys <- factor2cluster(f)
covar <- t(keys) %*% r %*% keys
if((nfactors >1) && (dim(covar)[2] >1 )) {
sd.inv <- diag(1/sqrt(diag(covar)))
cluster.correl <- sd.inv %*% covar %*% sd.inv #this is just cov2cor(covar)
valid <- t(f) %*% keys %*% sd.inv
result$valid <- diag(valid)
if(NCOL(cluster.correl) < NCOL(r.scores)) {#we need to pad out the cluster.correl matrix so that fa.organize does not choke 9/2/20
temp <- cluster.correl
n.temp <- NROW(temp)
cluster.correl <- matrix(NA,ncol(r.scores),nrow(r.scores))
cluster.correl[1:n.temp,1:n.temp] <- temp
}
result$score.cor <- cluster.correl} else {sd.inv <- 1/sqrt(covar)
if(dim(sd.inv)[1] == 1) sd.inv <- diag(sd.inv)
valid <- try(t(f) %*% keys * sd.inv)
result$valid <- valid}
result$weights <- w #the beta weights for factor scores
class(result) <- c("psych","stats")
return(result)
}
|
/R/factor.stats.R
|
no_license
|
cran/psych
|
R
| false | false | 16,993 |
r
|
"factor.stats" <-
function(r=NULL,f,phi=NULL,n.obs=NA,np.obs=NULL,alpha=.1,fm=NULL,smooth=TRUE) {
fa.stats(r=r,f=f,phi=phi,n.obs=n.obs,np.obs=np.obs,alpha=alpha,fm=fm,smooth=smooth)}
"fa.stats" <-
function(r=NULL,f,phi=NULL,n.obs=NA,np.obs=NULL,alpha=.05,fm=NULL,smooth=TRUE) {
#revised June 21, 2010 to add RMSEA etc.
#revised August 25, 2011 to add cor.smooth for smoothing
#revised November 10, 2012 to add stats for the minchi option of factoring
#revised February 28, 2014 to emphasize empirical chi 2 and report empirical BIC
#revised March 9, 2015 to report NA if RMSEA values are not in the confidence intervals
cl <- match.call()
conf.level <- alpha
if((!is.matrix(f)) && (!is.data.frame(f))) {#do a number of things that use f as list
if(is.null(r) && (!is.null(f$r)) ) r <- f$r #we found the correlation while factoring
#if(is.na(n.obs) && (!is.null(f$np.obs))) {np.obs <- f$np.obs}
f <- as.matrix(f$loadings)} else {f <- as.matrix(f)}
n <- dim(r)[2] #number of variables
if(dim(r)[1] !=n ) {n.obs = dim(r)[1]
r <- cor(r,use="pairwise")
}
if(is.data.frame(r)) r <- as.matrix(r)
nfactors <- dim(f)[2] # number of factors
if(is.null(phi)) {model <- f %*% t(f)} else {model <- f %*% phi %*% t(f)}
residual<- r - model
r2 <- sum(r*r)
rstar2 <- sum(residual*residual)
result <- list(residual = residual)
result$dof <- dof <- n * (n-1)/2 - n * nfactors + (nfactors *(nfactors-1)/2)
#r2.off <- r
#diag(r2.off) <- 0
# r2.off <- sum(r2.off^2)
r2.off <- r2 - tr(r)
diag(residual) <- 0
if(is.null(np.obs)) {rstar.off <- sum(residual^2)
result$ENull <- r2.off * n.obs #the empirical null model
result$chi <- rstar.off * n.obs #this is the empirical chi square
result$rms <- sqrt(rstar.off/(n*(n-1))) #this is the empirical rmsea
result$nh <- n.obs
if (result$dof > 0) {result$EPVAL <- pchisq(result$chi, result$dof, lower.tail = FALSE)
result$crms <- sqrt(rstar.off/(2*result$dof) )
result$EBIC <- result$chi - result$dof * log(n.obs)
result$ESABIC <- result$chi - result$dof * log((n.obs+2)/24) } else {result$EPVAL <- NA
result$crms <- NA
result$EBIC <- NA
result$ESABIC <- NA}
} else {
rstar.off <- sum(residual^2 * np.obs) #weight the residuals by their sample size
r2.off <-(r*r * np.obs) #weight the original by sample size
r2.off <- sum(r2.off) -tr(r2.off)
result$chi <- rstar.off #this is the sample size weighted chi square
result$nh <- harmonic.mean(as.vector(np.obs)) #this is the sample weighted cell size
result$rms <- sqrt(rstar.off/(result$nh*n*(n-1))) #this is the sample size weighted square root average squared residual
if (result$dof > 0) {result$EPVAL <- pchisq(result$chi, result$dof, lower.tail = FALSE)
result$crms <- sqrt(rstar.off/(2*result$nh*result$dof) )
result$EBIC <- result$chi - result$dof * log(result$nh)
result$ESABIC <- result$chi - result$dof * log((result$nh+2)/24) } else { #added 2/28/2014
result$EPVAL <- NA
result$crms <- NA
result$EBIC <- NA
result$ESABIC <- NA
}
}
result$fit <-1-rstar2/r2
result$fit.off <- 1-rstar.off/r2.off
result$sd <- sd(as.vector(residual)) #this is the none sample size weighted root mean square residual
result$factors <- nfactors
result$complexity <- (apply(f,1,function(x) sum(x^2)))^2/apply(f,1,function(x)sum(x^4))
diag(model) <- diag(r)
model <- cor.smooth(model) #this replaces the next few lines with a slightly cleaner approach
if(smooth) {r <- cor.smooth(r) } #this makes sure that the correlation is positive semi-definite
#although it would seem that the model should always be positive semidefinite so this is probably not necessary
#cor.smooth approach added August 25,2011
# }
m.inv.r <- try(solve(model,r),silent=TRUE) #modified Oct 30, 2009 to perhaps increase precision -- #modified 2015/1/2 to use try
if(inherits(m.inv.r,"try-error")) {warning("the model inverse times the r matrix is singular, replaced with Identity matrix which means fits are wrong")
m.inv.r <- diag(1,n,n)}
if(is.na(n.obs)) {result$n.obs=NA
result$PVAL=NA} else {result$n.obs=n.obs}
result$dof <- n * (n-1)/2 - n * nfactors + (nfactors *(nfactors-1)/2)
result$objective <- sum(diag((m.inv.r))) - log(det(m.inv.r)) -n #this is what Tucker Lewis call F
if(is.infinite(result$objective)) {result$objective <- rstar2
message("The determinant of the smoothed correlation was zero.\nThis means the objective function is not defined.\nChi square is based upon observed residuals.")}
result$criteria <- c("objective"=result$objective,NA,NA)
if (!is.na(n.obs)) {result$STATISTIC <- chisq <- result$objective * ((n.obs-1) -(2 * n + 5)/6 -(2*nfactors)/3) #from Tucker and from factanal
# if (!is.na(n.obs)) {result$STATISTIC <- chisq <- result$objective * ((n.obs-1)) #from Fox and sem
if(!is.nan(result$STATISTIC)) if (result$STATISTIC <0) {result$STATISTIC <- 0}
if (result$dof > 0) {result$PVAL <- pchisq(result$STATISTIC, result$dof, lower.tail = FALSE)} else {result$PVAL <- NA}
}
result$Call <- cl
#find the Tucker Lewis Index of reliability
#Also known as the NNFI which is expressed in terms of Chisq
#NNFI <- (chisqNull/dfNull - chisq/df)/(chisqNull/dfNull - 1)
#first find the null model
F0 <- sum(diag((r))) - log(det(r)) -n
if(is.infinite(F0)) {F0 <- r2
message("The determinant of the smoothed correlation was zero.\nThis means the objective function is not defined for the null model either.\nThe Chi square is thus based upon observed correlations.")}
Fm <- result$objective #objective function of model
Mm <- Fm/( n * (n-1)/2 - n * nfactors + (nfactors *(nfactors-1)/2))
M0 <- F0* 2 /(n*(n-1))
nm <- ((n.obs-1) -(2 * n + 5)/6 -(2*nfactors)/3) #
result$null.model <- F0
result$null.dof <- n * (n-1) /2
if (!is.na(n.obs)) {result$null.chisq <- F0 * ((n.obs-1) -(2 * n + 5)/6 )
result$TLI <- (M0 - Mm)/(M0 - 1/nm) #NNFI in Fox's sem
if(is.numeric(result$TLI) & !is.nan(result$TLI) & (result$TLI >1)) result$F0 <-1
#The estimatation of RMSEA and the upper and lower bounds are taken from John Fox's summary.sem with minor modifications
if(!is.null(result$objective) && (result$dof >0) &&(!is.na(result$objective))) {
# RMSEA <- sqrt(max(result$objective/result$dof - 1/(n.obs-1), 0)) #this is x2/(df*N ) - 1/(N-1) #put back 4/21/17
#however, this is not quite right and should be
RMSEA <- sqrt(max(chisq/(result$dof* n.obs) - 1/(n.obs-1), 0)) #this is x2/(df*N ) - 1/(N-1) #fixed 4/5/19
#note that the result$objective is not actually the chi square unless we adjust it ala Tucker
#thus, the RMSEA was slightly off. This was fixed October 29, 2016 to be
# RMSEA <- sqrt(max( (chisq/(result$dof * (n.obs))-1/(n.obs)),0)) #changed to this from above October 29, 2016 and then changed to N February 28, 2017
#Seem to have dropped the sqrt part of this at some point
tail <- conf.level/2 #this had been incorrectly listed as (1-conf.level)/2 which gave extraordinarily narrow confidence boundaries, fixed August 25, 2011
N <- max <- n.obs
df <- result$dof
#chi.sq.statistic <- RMSEA^2 * df * (N - 1) + df
#why isn't this just chi.sq?
chi.sq.statistic <- chisq
max <- max(n.obs,chi.sq.statistic) +2* n.obs
#the alternative to this is to use the uniroot technique of Yves Rosseel in lavaan
#### from Hao Wu
# LB<-function(T){
# + if (pchisq(df=df,q=T)<=0.95) return(0) else
# + sqrt(uniroot(function(x) {pchisq(df=df,ncp=x,q=T)-0.95},c(0,10000))$root/nstar/df)
# + }
#
# > UB<-function(T){
# + if (pchisq(df=df,q=T)<=0.05) return(0) else
# + sqrt(uniroot(function(x) {pchisq(df=df,ncp=x,q=T)-0.05},c(0,10000))$root/nstar/df)
# + }
##
#Finally implement February 2017
# upperlambda <- function(lam) {tail - pchisq(chi.sq.statistic, df, ncp=lam)^2 }
RMSEA.U <- 0 #in case we can not find it
if(pchisq(df=result$dof,q=result$STATISTIC) > tail){ RMSEA.U <- try( sqrt(uniroot(function(x) {pchisq(df=result$dof,ncp=x,q=result$STATISTIC)- tail},c(0,max))$root/(n.obs-1)/result$dof),silent=TRUE)
if(inherits( RMSEA.U,"try-error")) {if(RMSEA <= 0 ) {RMSEA.U <- 0} else {message("In factor.stats, I could not find the RMSEA upper bound . Sorry about that")
#if the fit is super good, then the chisq is too small to get an upper bound. Report it as 0.
RMSEA.U <- NA}}
}
# lam.U <- NA} else {lam.U <- res}
# # if (is.null(res) || is.na(res$objective) || res$objective < 0){
# # max <- 0
# # warning("cannot find upper bound of RMSEA")
# # break
# # }
#
# lowerlambda <- function(lam) {1- tail - pchisq(chi.sq.statistic, df, ncp=lam)^2 }
RMSEA.L <- 0 #in case we can not find it
if(pchisq(df=result$dof,q=result$STATISTIC) > (1-tail)) { RMSEA.L <- try( sqrt(uniroot(function(x) {pchisq(df=result$dof,ncp=x,q=result$STATISTIC)-1 + tail},c(0,max))$root/(n.obs-1)/result$dof) ,silent=TRUE)
if(inherits(RMSEA.L,"try-error")) {#message("In factor.stats, I could not find the RMSEA lower bound . Sorry about that")
RMSEA.L <- NA}
} else {RMSEA.L <- 0}
# lam.L <- 0} else {lam.L <- res}
# # if (is.null(res) || is.na(res$objective) || res$objective < 0){
# # max <- 0
# # warning("cannot find lower bound of RMSEA")
# # break
# # }
#However, this was giving the wrong results and so I implemented the following
#suggested by Hao Wu April, 2017
#RMSEA.U <- sqrt(uniroot(function(x) {pchisq(df=result$dof,ncp=x,q=result$STATISTIC)- alpha},c(0,10000))$root/(n.obs-1)/result$dof)
#RMSEA.L <- sqrt(uniroot(function(x) {pchisq(df=result$dof,ncp=x,q=result$STATISTIC)-1 + alpha},c(0,10000))$root/(n.obs-1)/result$dof)
# while (max > 1){
# res <- try(optimize(function(lam) (tail - pchisq(chi.sq.statistic, df, ncp=lam))^2, interval=c(0, max)),silent=TRUE)
# if(class(res)=="try-error") {message("In factor.stats, I could not find the RMSEA upper bound . Sorry about that")
# res <- NULL}
# if (is.null(res) || is.na(res$objective) || res$objective < 0){
# max <- 0
# warning("cannot find upper bound of RMSEA")
# break
# }
# if (sqrt(res$objective) < tail/100) break
# max <- max/2
# }
# lam.U <- if (max <= 1) NA else res$minimum
# # max <- max(max,lam.U)
# max <- lam.U
# if(is.na(max)) max <- N
# while (max > 1){# this just iterates in to get a value
# res <- try(optimize(function(lam) (1 - tail - pchisq(chi.sq.statistic, df, ncp=lam))^2, interval=c(0, max)),silent=TRUE)
# if(class(res)=="try-error") {message("In factor.stats, I could not find the RMSEA lower bound. Sorry about that")
# res <- NULL}
# if (is.null(res)) {break}
# if (sqrt(res$objective) < tail/100) break
# max <- max/2
# if (is.na(res$objective) || res$objective < 0){
# max <- 0
# warning("cannot find lower bound of RMSEA")
# break
# }
# }
#
#
# lam.L <- if (max <= 1) NA else res$minimum #lam is the ncp
# this RMSEA calculation is probably not right because it will sometimes (but rarely) give cis that don't include the estimate
# RMSEA.U <- sqrt(lam.U/((N)*df) ) #lavaan uses sqrt(lam.U/((N)*df) ) sem uses sqrt(lam.U/((N-1)*df) )
# RMSEA.L <- min(sqrt(lam.L/((N)*df) ),RMSEA)
if(!is.na(RMSEA.U) && RMSEA.U < RMSEA) RMSEA.U <- NA
if(!is.na(RMSEA.L) && RMSEA.L > RMSEA) RMSEA.L <- NA
result$RMSEA <- c(RMSEA, RMSEA.L, RMSEA.U, 1-conf.level)
names(result$RMSEA) <- c("RMSEA","lower","upper","confidence")
result$BIC <- chisq - df * log(N)
result$SABIC <- chisq - df * log((N+2)/24) # added 1/27/2014
}
}
#now, find the correlations of the factor scores, even if not estimated, with the factors
#this repeats what was done in factor.scores and does not take in\to account the options in factor.scores
if(!is.null(phi)) f <- f %*% phi #convert the pattern to structure coefficients
if(smooth) {r <- cor.smooth(r)}
# w <- try(solve(r,f) ,silent=TRUE) #these are the regression factor weights
w <- Pinv(r)%*%f #use the Pseudo inverse #added 4/22/23
if(inherits(w,"try-error")) {message("In factor.stats, the correlation matrix is singular, an approximation is used")
ev <- eigen(r)
if(is.complex(ev$values)) {warning("complex eigen values detected by factor stats, results are suspect")
} else {
ev$values[ev$values < .Machine$double.eps] <- 100 * .Machine$double.eps
r <- ev$vectors %*% diag(ev$values) %*% t(ev$vectors)
diag(r) <- 1
# w <- try(solve(r,f) ,silent=TRUE)
w <- Pinv(r)%*% r #use the Pseudo inverse #these are the factor weights
if(inherits(w,"try-error")) {warning("In factor.stats, the correlation matrix is singular, and we could not calculate the beta weights for factor score estimates")
w <- diag(1,dim(r)[1])
} #these are the beta weights
}}
R2 <- diag(t(w) %*% f) #but, we actually already found this in factor scores -- these are the Thurstone values
if(is.null(fm)) {
if(prod(R2,na.rm=TRUE) < 0 ) {message("In factor.stats: The factor scoring weights matrix is probably singular -- Factor score estimate results are likely incorrect.\n Try a different factor score estimation method\n")
R2[abs(R2) > 1] <- NA
R2[R2 <= 0] <- NA
}
if ((max(R2,na.rm=TRUE) > (1 + .Machine$double.eps)) ) {warning("The estimated weights for the factor scores are probably incorrect. Try a different factor score estimation method.")}
}
r.scores <- cov2cor(t(w) %*% r %*% w)
result$r.scores <- r.scores
result$R2 <- R2 #this is the multiple R2 of the scores with the factors
# result$R2.corrected <- factor.indeterm(r,f)
# result$R2.total <- R2.cor$R2
# result$beta.total <- R2.cor$beta.total
#coarse coding
keys <- factor2cluster(f)
covar <- t(keys) %*% r %*% keys
if((nfactors >1) && (dim(covar)[2] >1 )) {
sd.inv <- diag(1/sqrt(diag(covar)))
cluster.correl <- sd.inv %*% covar %*% sd.inv #this is just cov2cor(covar)
valid <- t(f) %*% keys %*% sd.inv
result$valid <- diag(valid)
if(NCOL(cluster.correl) < NCOL(r.scores)) {#we need to pad out the cluster.correl matrix so that fa.organize does not choke 9/2/20
temp <- cluster.correl
n.temp <- NROW(temp)
cluster.correl <- matrix(NA,ncol(r.scores),nrow(r.scores))
cluster.correl[1:n.temp,1:n.temp] <- temp
}
result$score.cor <- cluster.correl} else {sd.inv <- 1/sqrt(covar)
if(dim(sd.inv)[1] == 1) sd.inv <- diag(sd.inv)
valid <- try(t(f) %*% keys * sd.inv)
result$valid <- valid}
result$weights <- w #the beta weights for factor scores
class(result) <- c("psych","stats")
return(result)
}
|
context("test connect connection")
# should connect with env vars
test_conn_1 <- NULL
test_conn_2 <- NULL
test_that("connect works", {
test_conn_1 <<- connect(
host = Sys.getenv("TEST_1_SERVER"),
api_key = Sys.getenv("TEST_1_API_KEY")
)
expect_true(validate_R6_class(test_conn_1, "Connect"))
})
test_that("connect works with prefix only", {
test_conn_2 <<- connect(
prefix = "TEST_2"
)
expect_true(validate_R6_class(test_conn_2, "Connect"))
})
test_that("connect fails for nonexistent server", {
expect_error({
connect(host = "does-not-exist.rstudio.com", api_key = "bogus")
})
})
test_that("connect fails for good server, bad api key", {
expect_error({
connect(
host = Sys.getenv("TEST_1_SERVER"),
api_key = "bogus"
)
})
})
test_that("error if API key is empty", {
expect_error(
connect(host = Sys.getenv("TEST_1_SERVER"), api_key = ""),
"provide a valid API key"
)
expect_error(
connect(host = Sys.getenv("TEST_1_SERVER"), api_key = NA_character_),
"provide a valid API key"
)
expect_error(
connect(host = Sys.getenv("TEST_1_SERVER"), api_key = NULL),
"provide a valid API key"
)
})
|
/tests/integrated/test-connect.R
|
no_license
|
slodge/connectapi
|
R
| false | false | 1,185 |
r
|
context("test connect connection")
# should connect with env vars
test_conn_1 <- NULL
test_conn_2 <- NULL
test_that("connect works", {
test_conn_1 <<- connect(
host = Sys.getenv("TEST_1_SERVER"),
api_key = Sys.getenv("TEST_1_API_KEY")
)
expect_true(validate_R6_class(test_conn_1, "Connect"))
})
test_that("connect works with prefix only", {
test_conn_2 <<- connect(
prefix = "TEST_2"
)
expect_true(validate_R6_class(test_conn_2, "Connect"))
})
test_that("connect fails for nonexistent server", {
expect_error({
connect(host = "does-not-exist.rstudio.com", api_key = "bogus")
})
})
test_that("connect fails for good server, bad api key", {
expect_error({
connect(
host = Sys.getenv("TEST_1_SERVER"),
api_key = "bogus"
)
})
})
test_that("error if API key is empty", {
expect_error(
connect(host = Sys.getenv("TEST_1_SERVER"), api_key = ""),
"provide a valid API key"
)
expect_error(
connect(host = Sys.getenv("TEST_1_SERVER"), api_key = NA_character_),
"provide a valid API key"
)
expect_error(
connect(host = Sys.getenv("TEST_1_SERVER"), api_key = NULL),
"provide a valid API key"
)
})
|
basicPage("Hello, Shiny!")
|
/ui.r
|
no_license
|
adriedl/shiny
|
R
| false | false | 27 |
r
|
basicPage("Hello, Shiny!")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NlcOptim.R
\name{solnl}
\alias{solnl}
\title{Solve Optimization problem with Nonlinear Objective and Constraints}
\usage{
solnl(X = NULL, objfun = NULL, confun = NULL, A = NULL, B = NULL,
Aeq = NULL, Beq = NULL, lb = NULL, ub = NULL, tolX = 1e-05,
tolFun = 1e-06, tolCon = 1e-06, maxnFun = 1e+07, maxIter = 4000)
}
\arguments{
\item{X}{Starting vector of parameter values.}
\item{objfun}{Nonlinear objective function that is to be optimized.}
\item{confun}{Nonlinear constraint function. Return a \code{ceq} vector
and a \code{c} vector as nonlinear equality constraints and an inequality constraints.}
\item{A}{A in the linear inequality constraints.}
\item{B}{B in the linear inequality constraints.}
\item{Aeq}{Aeq in the linear equality constraints.}
\item{Beq}{Beq in the linear equality constraints.}
\item{lb}{Lower bounds of parameters.}
\item{ub}{Upper bounds of parameters.}
\item{tolX}{The tolerance in X.}
\item{tolFun}{The tolerance in the objective function.}
\item{tolCon}{The tolenrance in the constraint function.}
\item{maxnFun}{Maximum updates in the objective function.}
\item{maxIter}{Maximum iteration.}
}
\value{
Return a list with the following components:
\item{par}{The optimum solution.}
\item{fn}{The value of the objective function at the optimal point.}
\item{counts}{Number of function evaluations, and number of gradient evaluations.}
\item{lambda}{Lagrangian multiplier.}
\item{grad}{The gradient of the objective function at the optimal point.}
\item{hessian}{Hessian of the objective function at the optimal point.}
}
\description{
Sequential Quatratic
Programming (SQP) method is implemented to find solution for general nonlinear optimization problem
(with nonlinear objective and constraint functions). The SQP method can be find in detail in Chapter 18 of
Jorge Nocedal and Stephen J. Wright's book.
Linear or nonlinear equality and inequality constraints are allowed.
It accepts the input parameters as a constrained matrix.
The function \code{solnl} is to solve generalized nonlinear optimization problem:
\deqn{min f(x)}
\deqn{s.t. ceq(x)=0}
\deqn{c(x)\le 0}
\deqn{Ax\le B}
\deqn{Aeq x \le Beq}
\deqn{lb\le x \le ub}
}
\examples{
library(MASS)
###ex1
objfun=function(x){
return(exp(x[1]*x[2]*x[3]*x[4]*x[5]))
}
#constraint function
confun=function(x){
f=NULL
f=rbind(f,x[1]^2+x[2]^2+x[3]^2+x[4]^2+x[5]^2-10)
f=rbind(f,x[2]*x[3]-5*x[4]*x[5])
f=rbind(f,x[1]^3+x[2]^3+1)
return(list(ceq=f,c=NULL))
}
x0=c(-2,2,2,-1,-1)
solnl(x0,objfun=objfun,confun=confun)
####ex2
obj=function(x){
return((x[1]-1)^2+(x[1]-x[2])^2+(x[2]-x[3])^3+(x[3]-x[4])^4+(x[4]-x[5])^4)
}
#constraint function
con=function(x){
f=NULL
f=rbind(f,x[1]+x[2]^2+x[3]^3-2-3*sqrt(2))
f=rbind(f,x[2]-x[3]^2+x[4]+2-2*sqrt(2))
f=rbind(f,x[1]*x[5]-2)
return(list(ceq=f,c=NULL))
}
x0=c(1,1,1,1,1)
solnl(x0,objfun=obj,confun=con)
##########ex3
obj=function(x){
return((1-x[1])^2+(x[2]-x[1]^2)^2)
}
#constraint function
con=function(x){
f=NULL
f=rbind(f,x[1]^2+x[2]^2-1.5)
return(list(ceq=NULL,c=f))
}
x0=as.matrix(c(-1.9,2))
obj(x0)
con(x0)
solnl(x0,objfun=obj,confun=con)
##########ex4
objfun=function(x){
return(x[1]^2+x[2]^2)
}
#constraint function
confun=function(x){
f=NULL
f=rbind(f,-x[1] - x[2] + 1)
f=rbind(f,-x[1]^2 - x[2]^2 + 1)
f=rbind(f,-9*x[1]^2 - x[2]^2 + 9)
f=rbind(f,-x[1]^2 + x[2])
f=rbind(f,-x[2]^2 + x[1])
return(list(ceq=NULL,c=f))
}
x0=as.matrix(c(3,1))
solnl(x0,objfun=objfun,confun=confun)
##############ex5
rosbkext.f <- function(x){
n <- length(x)
sum (100*(x[1:(n-1)]^2 - x[2:n])^2 + (x[1:(n-1)] - 1)^2)
}
n <- 2
set.seed(54321)
p0 <- rnorm(n)
Aeq <- matrix(rep(1, n), nrow=1)
Beq <- 1
lb <- c(rep(-Inf, n-1), 0)
solnl(X=p0,objfun=rosbkext.f, lb=lb, Aeq=Aeq, Beq=Beq)
ub <- rep(1, n)
solnl(X=p0,objfun=rosbkext.f, lb=lb, ub=ub, Aeq=Aeq, Beq=Beq)
##############ex6
nh <- vector("numeric", length = 5)
Nh <- c(6221,11738,4333,22809,5467)
ch <- c(120, 80, 80, 90, 150)
mh.rev <- c(85, 11, 23, 17, 126)
Sh.rev <- c(170.0, 8.8, 23.0, 25.5, 315.0)
mh.emp <- c(511, 21, 70, 32, 157)
Sh.emp <- c(255.50, 5.25, 35.00, 32.00, 471.00)
ph.rsch <- c(0.8, 0.2, 0.5, 0.3, 0.9)
ph.offsh <- c(0.06, 0.03, 0.03, 0.21, 0.77)
budget = 300000
n.min <- 100
relvar.rev <- function(nh){
rv <- sum(Nh * (Nh/nh - 1)*Sh.rev^2)
tot <- sum(Nh * mh.rev)
rv/tot^2
}
relvar.emp <- function(nh){
rv <- sum(Nh * (Nh/nh - 1)*Sh.emp^2)
tot <- sum(Nh * mh.emp)
rv/tot^2
}
relvar.rsch <- function(nh){
rv <- sum( Nh * (Nh/nh - 1)*ph.rsch*(1-ph.rsch)*Nh/(Nh-1) )
tot <- sum(Nh * ph.rsch)
rv/tot^2
}
relvar.offsh <- function(nh){
rv <- sum( Nh * (Nh/nh - 1)*ph.offsh*(1-ph.offsh)*Nh/(Nh-1) )
tot <- sum(Nh * ph.offsh)
rv/tot^2
}
nlc.constraints <- function(nh){
h <- rep(NA, 13)
h[1:length(nh)] <- (Nh + 0.01) - nh
h[(length(nh)+1) : (2*length(nh)) ] <- (nh + 0.01) - n.min
h[2*length(nh) + 1] <- 0.05^2 - relvar.emp(nh)
h[2*length(nh) + 2] <- 0.03^2 - relvar.rsch(nh)
h[2*length(nh) + 3] <- 0.03^2 - relvar.offsh(nh)
return(list(ceq=NULL, c=-h))
}
nlc <- function(nh){
h <- rep(NA, 3)
h[ 1] <- 0.05^2 - relvar.emp(nh)
h[ 2] <- 0.03^2 - relvar.rsch(nh)
h[3] <- 0.03^2 - relvar.offsh(nh)
return(list(ceq=NULL, c=-h))
}
Aeq <- matrix(ch/budget, nrow=1)
Beq <- 1
A=rbind(diag(-1,5,5),diag(1,5,5))
B=c(-Nh-0.01,rep(n.min-0.01,5))
solnl(X=rep(100,5),objfun=relvar.rev,confun=nlc.constraints, Aeq=Aeq, Beq=Beq)
solnl(X=rep(100,5),objfun=relvar.rev,confun=nlc, Aeq=Aeq, Beq=Beq, A=-A, B=-B)
}
\references{
Nocedal, Jorge, and Stephen Wright. Numerical optimization. Springer Science & Business Media, 2006.
}
\author{
Xianyan Chen, Xiangrong Yin
}
|
/man/solnl.Rd
|
no_license
|
octaviodeliberato/NlcOptim
|
R
| false | true | 5,715 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NlcOptim.R
\name{solnl}
\alias{solnl}
\title{Solve Optimization problem with Nonlinear Objective and Constraints}
\usage{
solnl(X = NULL, objfun = NULL, confun = NULL, A = NULL, B = NULL,
Aeq = NULL, Beq = NULL, lb = NULL, ub = NULL, tolX = 1e-05,
tolFun = 1e-06, tolCon = 1e-06, maxnFun = 1e+07, maxIter = 4000)
}
\arguments{
\item{X}{Starting vector of parameter values.}
\item{objfun}{Nonlinear objective function that is to be optimized.}
\item{confun}{Nonlinear constraint function. Return a \code{ceq} vector
and a \code{c} vector as nonlinear equality constraints and an inequality constraints.}
\item{A}{A in the linear inequality constraints.}
\item{B}{B in the linear inequality constraints.}
\item{Aeq}{Aeq in the linear equality constraints.}
\item{Beq}{Beq in the linear equality constraints.}
\item{lb}{Lower bounds of parameters.}
\item{ub}{Upper bounds of parameters.}
\item{tolX}{The tolerance in X.}
\item{tolFun}{The tolerance in the objective function.}
\item{tolCon}{The tolenrance in the constraint function.}
\item{maxnFun}{Maximum updates in the objective function.}
\item{maxIter}{Maximum iteration.}
}
\value{
Return a list with the following components:
\item{par}{The optimum solution.}
\item{fn}{The value of the objective function at the optimal point.}
\item{counts}{Number of function evaluations, and number of gradient evaluations.}
\item{lambda}{Lagrangian multiplier.}
\item{grad}{The gradient of the objective function at the optimal point.}
\item{hessian}{Hessian of the objective function at the optimal point.}
}
\description{
Sequential Quatratic
Programming (SQP) method is implemented to find solution for general nonlinear optimization problem
(with nonlinear objective and constraint functions). The SQP method can be find in detail in Chapter 18 of
Jorge Nocedal and Stephen J. Wright's book.
Linear or nonlinear equality and inequality constraints are allowed.
It accepts the input parameters as a constrained matrix.
The function \code{solnl} is to solve generalized nonlinear optimization problem:
\deqn{min f(x)}
\deqn{s.t. ceq(x)=0}
\deqn{c(x)\le 0}
\deqn{Ax\le B}
\deqn{Aeq x \le Beq}
\deqn{lb\le x \le ub}
}
\examples{
library(MASS)
###ex1
objfun=function(x){
return(exp(x[1]*x[2]*x[3]*x[4]*x[5]))
}
#constraint function
confun=function(x){
f=NULL
f=rbind(f,x[1]^2+x[2]^2+x[3]^2+x[4]^2+x[5]^2-10)
f=rbind(f,x[2]*x[3]-5*x[4]*x[5])
f=rbind(f,x[1]^3+x[2]^3+1)
return(list(ceq=f,c=NULL))
}
x0=c(-2,2,2,-1,-1)
solnl(x0,objfun=objfun,confun=confun)
####ex2
obj=function(x){
return((x[1]-1)^2+(x[1]-x[2])^2+(x[2]-x[3])^3+(x[3]-x[4])^4+(x[4]-x[5])^4)
}
#constraint function
con=function(x){
f=NULL
f=rbind(f,x[1]+x[2]^2+x[3]^3-2-3*sqrt(2))
f=rbind(f,x[2]-x[3]^2+x[4]+2-2*sqrt(2))
f=rbind(f,x[1]*x[5]-2)
return(list(ceq=f,c=NULL))
}
x0=c(1,1,1,1,1)
solnl(x0,objfun=obj,confun=con)
##########ex3
obj=function(x){
return((1-x[1])^2+(x[2]-x[1]^2)^2)
}
#constraint function
con=function(x){
f=NULL
f=rbind(f,x[1]^2+x[2]^2-1.5)
return(list(ceq=NULL,c=f))
}
x0=as.matrix(c(-1.9,2))
obj(x0)
con(x0)
solnl(x0,objfun=obj,confun=con)
##########ex4
objfun=function(x){
return(x[1]^2+x[2]^2)
}
#constraint function
confun=function(x){
f=NULL
f=rbind(f,-x[1] - x[2] + 1)
f=rbind(f,-x[1]^2 - x[2]^2 + 1)
f=rbind(f,-9*x[1]^2 - x[2]^2 + 9)
f=rbind(f,-x[1]^2 + x[2])
f=rbind(f,-x[2]^2 + x[1])
return(list(ceq=NULL,c=f))
}
x0=as.matrix(c(3,1))
solnl(x0,objfun=objfun,confun=confun)
##############ex5
rosbkext.f <- function(x){
n <- length(x)
sum (100*(x[1:(n-1)]^2 - x[2:n])^2 + (x[1:(n-1)] - 1)^2)
}
n <- 2
set.seed(54321)
p0 <- rnorm(n)
Aeq <- matrix(rep(1, n), nrow=1)
Beq <- 1
lb <- c(rep(-Inf, n-1), 0)
solnl(X=p0,objfun=rosbkext.f, lb=lb, Aeq=Aeq, Beq=Beq)
ub <- rep(1, n)
solnl(X=p0,objfun=rosbkext.f, lb=lb, ub=ub, Aeq=Aeq, Beq=Beq)
##############ex6
nh <- vector("numeric", length = 5)
Nh <- c(6221,11738,4333,22809,5467)
ch <- c(120, 80, 80, 90, 150)
mh.rev <- c(85, 11, 23, 17, 126)
Sh.rev <- c(170.0, 8.8, 23.0, 25.5, 315.0)
mh.emp <- c(511, 21, 70, 32, 157)
Sh.emp <- c(255.50, 5.25, 35.00, 32.00, 471.00)
ph.rsch <- c(0.8, 0.2, 0.5, 0.3, 0.9)
ph.offsh <- c(0.06, 0.03, 0.03, 0.21, 0.77)
budget = 300000
n.min <- 100
relvar.rev <- function(nh){
rv <- sum(Nh * (Nh/nh - 1)*Sh.rev^2)
tot <- sum(Nh * mh.rev)
rv/tot^2
}
relvar.emp <- function(nh){
rv <- sum(Nh * (Nh/nh - 1)*Sh.emp^2)
tot <- sum(Nh * mh.emp)
rv/tot^2
}
relvar.rsch <- function(nh){
rv <- sum( Nh * (Nh/nh - 1)*ph.rsch*(1-ph.rsch)*Nh/(Nh-1) )
tot <- sum(Nh * ph.rsch)
rv/tot^2
}
relvar.offsh <- function(nh){
rv <- sum( Nh * (Nh/nh - 1)*ph.offsh*(1-ph.offsh)*Nh/(Nh-1) )
tot <- sum(Nh * ph.offsh)
rv/tot^2
}
nlc.constraints <- function(nh){
h <- rep(NA, 13)
h[1:length(nh)] <- (Nh + 0.01) - nh
h[(length(nh)+1) : (2*length(nh)) ] <- (nh + 0.01) - n.min
h[2*length(nh) + 1] <- 0.05^2 - relvar.emp(nh)
h[2*length(nh) + 2] <- 0.03^2 - relvar.rsch(nh)
h[2*length(nh) + 3] <- 0.03^2 - relvar.offsh(nh)
return(list(ceq=NULL, c=-h))
}
nlc <- function(nh){
h <- rep(NA, 3)
h[ 1] <- 0.05^2 - relvar.emp(nh)
h[ 2] <- 0.03^2 - relvar.rsch(nh)
h[3] <- 0.03^2 - relvar.offsh(nh)
return(list(ceq=NULL, c=-h))
}
Aeq <- matrix(ch/budget, nrow=1)
Beq <- 1
A=rbind(diag(-1,5,5),diag(1,5,5))
B=c(-Nh-0.01,rep(n.min-0.01,5))
solnl(X=rep(100,5),objfun=relvar.rev,confun=nlc.constraints, Aeq=Aeq, Beq=Beq)
solnl(X=rep(100,5),objfun=relvar.rev,confun=nlc, Aeq=Aeq, Beq=Beq, A=-A, B=-B)
}
\references{
Nocedal, Jorge, and Stephen Wright. Numerical optimization. Springer Science & Business Media, 2006.
}
\author{
Xianyan Chen, Xiangrong Yin
}
|
################################################################################
# module_load_cppModel_UI
################################################################################
# A module's UI function should be given a name that is
# suffixed with Input, Output, or UI;
module_load_cppModel_UI <- function(id, label = "") {
# Create a namespace function using the provided id
ns <- NS(id)
tagList(
fluidRow(column(12,uiOutput(ns("cppModel_source_selector")))),
fluidRow(column(6, uiOutput(ns("load_external_cppModel_container")))),
fluidRow(column(6, uiOutput(ns("load_internal_cppModel_container")))),
fluidRow(column(6, uiOutput(ns("load_session_cppModel_container")))),
fluidRow(column(12,uiOutput(ns("update_cppModel_container"))))
)
}
################################################################################
# main function: module_load_cppModel
################################################################################
module_load_cppModel <- function(input, output, session,
ALL, cppModel_name="TEST") {
ns <- session$ns
values <- reactiveValues()
#--------------------------------------
# cppModel_source_selector
#--------------------------------------
output$cppModel_source_selector <- renderUI({
validate(need(globalVars$login$status, message=FALSE))
fluidRow(
column(6,
radioButtons(ns("cppModel_source"),
label="Select cpp model from:",
choices=c("internal library",
"within session",
"external file"),
inline=TRUE,
width="100%",
selected="internal library"))
)
})
#--------------------------------------
# load_external_cppModel_container
#--------------------------------------
output$load_external_cppModel_container <- renderUI({
validate(need(globalVars$login$status, message=FALSE),
need(input$cppModel_source=="external file", message=FALSE))
fileInput(ns("which_external_cppModel"),
label = "load external cppModel",
width="100%"
) # h5
# accept=c('text/csv/sas7bdat',
# 'text/comma-separated-values,text/plain',
# '.xlsx',
# '.xls',
# '.csv',
# '.sas7bdat',
# '.RData'))
})
#--------------------------------------
# load_internal_cppModel_container
#--------------------------------------
output$load_internal_cppModel_container <- renderUI({
validate(need(globalVars$login$status, message=FALSE),
need(input$cppModel_source=="internal library", message=FALSE))
dirs_lst=list.files(path = paste0(WORKING_HOME, "/cpp"),
full.names = FALSE,
recursive = FALSE,
pattern=".cpp",
include.dirs=FALSE)
dirs_lst = c("", dirs_lst)
selectizeInput(ns("which_internal_cppModel"),
label = "load internal cppModel",
choices = dirs_lst,
multiple = FALSE,
width = "100%",
selected = dirs_lst[1]
)
})
#--------------------------------------
# load_session_cppModel_container
#--------------------------------------
output$load_session_cppModel_container <- renderUI({
validate(need(globalVars$login$status, message=FALSE),
need(input$cppModel_source=="within session", message=FALSE))
name_lst <- names(isolate({ALL$cppModel}))
only_for_internal_use <- name_lst[which(substr(name_lst, 1, 6)=="mYtEsT")]
dirs_lst = c("", setdiff(name_lst, only_for_internal_use))
selectizeInput(ns("which_session_cppModel"),
label = "load session cppModel",
choices = dirs_lst,
multiple = FALSE,
width = "100%",
selected = dirs_lst[1])
})
#--------------------------------------
# update_cppModel_container
#--------------------------------------
output$update_cppModel_container <- renderUI({
validate(need(globalVars$login$status, message=FALSE),
need(values$cppModel_content, message=FALSE)
)
tagList(
fluidRow(
column(width=12,
HTML(colFmt("You may modify the loaded model and then re-assign a name for it.",
color="gray")))
),
fluidRow(
column(width=4, #status = "primary", #class = 'rightAlign', #background ="aqua",
textInput(ns("model_name"),
value=NULL,
placeholder ="cppModel-name",
label=NULL,
width="100%")
),
column(2,
actionButton(ns("save_model"),
label="Save model",
style=actionButton_style )
)
),
fluidRow(
column(12,
aceEditor(ns("cppModel_content"),
mode="c_cpp",
value=paste0(values$cppModel_content, collapse="\n"),
theme = "crimson_editor", # chrome
autoComplete = "enabled",
height = "1000px",
fontSize = 15
)
)
)
) # tagList
})
#---------------------------------------------------
# observeEvent of input$which_internal_cppModel
#---------------------------------------------------
#observeEvent({input$which_internal_cppModel}, {
load_internal_cppModel <- reactive({
validate(need(input$cppModel_source=="internal library", message=FALSE),
need(input$which_internal_cppModel, message=FALSE)
)
# readLines cppModel
cppModel_file <- paste0(WORKING_HOME, '/cpp/', input$which_internal_cppModel)
values$cppModel_content <- readLines(cppModel_file)
# create a progress object
progress <- shiny::Progress$new()
on.exit(progress$close()) # Make sure it closes when we exit this reactive, even if there's an error
progress$set(message = "mread cppModel...please Wait", value = 0)
# mread cppModel
environment(try_eval) <- environment() # basename
text="cppModel=mread(model='cppModel', project=paste0(WORKING_HOME, '/cpp/'), quiet=TRUE, file=basename(input$which_internal_cppModel))"
env = try_eval(text)
if ("cppModel" %in% ls(env)) {
cppModel = get("cppModel", env)
# "default, "message", "warning", "error"
showNotification("mread cppModel...done.", type="message") # "default, "message", "warning", "error"
}else{
cppModel = NULL
error_message = get("message", env)
# "default, "message", "warning", "error"
showNotification(paste0(error_message, collapse="\n"), type="error")
}
cppModel
})
#---------------------------------------------------
# observeEvent of input$which_external_cppModel
#---------------------------------------------------
#observeEvent({input$which_external_cppModel}, {
load_external_cppModel <- reactive({
validate(need(input$cppModel_source=="external file", message=FALSE) )
inFile = input$which_external_cppModel
# print(inFile) # readLines cppModel
# name size type datapath
# 1 cpp.model.cpp 6369 /tmp/RtmprQR1xU/1eb54214311d1970e61c917f/0.cpp
#
ext <- tools::file_ext(inFile$name)
file.rename(inFile$datapath,
paste(inFile$datapath, ext, sep="."))
cppModel_file = paste(inFile$datapath, ext, sep=".")
values$cppModel_content <- readLines(cppModel_file)
# create a progress object
progress <- shiny::Progress$new()
on.exit(progress$close()) # Make sure it closes when we exit this reactive, even if there's an error
progress$set(message = "mread cppModel...please Wait", value = 0)
# mread cppModel
environment(try_eval) <- environment()
text="cppModel=mread(model='cppModel',project=dirname(inFile$datapath),quiet=TRUE,file=basename(cppModel_file))"
env = try_eval(text)
if ("cppModel" %in% ls(env)) {
cppModel = get("cppModel", env)
# "default, "message", "warning", "error"
showNotification("mread cppModel...done.", type="message") # "default, "message", "warning", "error"
}else{
cppModel = NULL
error_message = get("message", env)
# "default, "message", "warning", "error"
showNotification(paste0(error_message, collapse="\n"), type="error")
}
cppModel
})
#---------------------------------------------------
# observeEvent of input$which_session_cppModel
#---------------------------------------------------
#observeEvent({input$which_session_cppModel}, {
load_session_cppModel <- reactive({
validate(need(input$cppModel_source=="within session", message=FALSE),
need(input$which_session_cppModel, message=FALSE)
)
cppModel = ALL$cppModel[[input$which_session_cppModel]]
validate(need(cppModel, message=FALSE))
values$cppModel_content = see(cppModel, raw=TRUE)
cppModel
})
#---------------------------------------------------
# observeEvent of input$YesNoIIV, not used
#---------------------------------------------------
observeEvent(input$YesNoIIV, {
cppModel = values$cppModel
validate(need(cppModel, message=FALSE))
if (input$YesNoIIV=="No") {
cppModel = cppModel %>% zero_re
values$cppModel = cppModel
message = "Simulation with no inter-individual variability (IIV)"
# "default, "message", "warning", "error"
showNotification(message, type="message")
}
if (input$YesNoIIV=="Yes") {
message = "Simulation with inter-individual variability (IIV)"
# "default, "message", "warning", "error"
showNotification(message, type="message")
}
})
#--------------------------------------
# observeEvent
#--------------------------------------
# https://groups.google.com/forum/#!topic/shiny-discuss/vd_nB-BH8sw
# event for values$cppModel
# default, save the loaded cppModel to
# ALL$cppModel[[cppModel_name]]
# observeEvent({values$cppModel}, {
# validate(need(values$cppModel, message=FALSE))
#
# ALL$cppModel[[cppModel_name]] = values$cppModel
# })
observeEvent({input$which_internal_cppModel}, {
validate(need(input$cppModel_source=="internal library", message=FALSE) )
ALL$cppModel[[cppModel_name]] = load_internal_cppModel()
})
observeEvent({input$which_external_cppModel}, {
validate(need(input$cppModel_source=="external file", message=FALSE) )
ALL$cppModel[[cppModel_name]] = load_external_cppModel()
})
observeEvent({input$which_session_cppModel}, {
validate(need(input$cppModel_source=="within session", message=FALSE) )
ALL$cppModel[[cppModel_name]] = load_session_cppModel()
})
#-----------------------
# event for save_model
#-----------------------
# or, user manually save the modified cppModel to
# ALL$cppModel[[cppModel_name]]
observeEvent({input$save_model}, {
if (is.null(input$cppModel_content) | input$cppModel_content=="") {
message = "There is nothing to save"
# "default, "message", "warning", "error"
showNotification(message, type="warning")
}
if (is.null(input$model_name) | input$model_name=="") {
message = "Please specify model name"
# "default, "message", "warning", "error"
showNotification(message, type="warning")
}
validate(need(input$cppModel_content, message="No cppModel loaded..."),
need(input$model_name, message="Please specify model name")
)
# create a progress object
progress <- shiny::Progress$new()
on.exit(progress$close()) # Make sure it closes when we exit this reactive, even if there's an error
progress$set(message = "mread cppModel...please Wait", value = 0)
# mread cppModel
environment(try_eval) <- environment()
text="cppModel=mread('cppModel', tempdir(), input$cppModel_content, quiet=TRUE)"
env = try_eval(text)
if ("cppModel" %in% ls(env)) {
cppModel = get("cppModel", env)
values$cppModel = cppModel
ALL$cppModel[[input$model_name]] = cppModel # visible
# "default, "message", "warning", "error"
showNotification("mread cppModel...done.", type="message")
}else{
values$cppModel = NULL
error_message = get("message", env)
# "default, "message", "warning", "error"
showNotification(paste0(error_message, collapse="\n"), type="error")
}
})
return(ALL)
}
|
/module/module_load_cppModel.R
|
no_license
|
fyang72/handbook
|
R
| false | false | 12,564 |
r
|
################################################################################
# module_load_cppModel_UI
################################################################################
# A module's UI function should be given a name that is
# suffixed with Input, Output, or UI;
module_load_cppModel_UI <- function(id, label = "") {
# Create a namespace function using the provided id
ns <- NS(id)
tagList(
fluidRow(column(12,uiOutput(ns("cppModel_source_selector")))),
fluidRow(column(6, uiOutput(ns("load_external_cppModel_container")))),
fluidRow(column(6, uiOutput(ns("load_internal_cppModel_container")))),
fluidRow(column(6, uiOutput(ns("load_session_cppModel_container")))),
fluidRow(column(12,uiOutput(ns("update_cppModel_container"))))
)
}
################################################################################
# main function: module_load_cppModel
################################################################################
module_load_cppModel <- function(input, output, session,
ALL, cppModel_name="TEST") {
ns <- session$ns
values <- reactiveValues()
#--------------------------------------
# cppModel_source_selector
#--------------------------------------
output$cppModel_source_selector <- renderUI({
validate(need(globalVars$login$status, message=FALSE))
fluidRow(
column(6,
radioButtons(ns("cppModel_source"),
label="Select cpp model from:",
choices=c("internal library",
"within session",
"external file"),
inline=TRUE,
width="100%",
selected="internal library"))
)
})
#--------------------------------------
# load_external_cppModel_container
#--------------------------------------
output$load_external_cppModel_container <- renderUI({
validate(need(globalVars$login$status, message=FALSE),
need(input$cppModel_source=="external file", message=FALSE))
fileInput(ns("which_external_cppModel"),
label = "load external cppModel",
width="100%"
) # h5
# accept=c('text/csv/sas7bdat',
# 'text/comma-separated-values,text/plain',
# '.xlsx',
# '.xls',
# '.csv',
# '.sas7bdat',
# '.RData'))
})
#--------------------------------------
# load_internal_cppModel_container
#--------------------------------------
output$load_internal_cppModel_container <- renderUI({
validate(need(globalVars$login$status, message=FALSE),
need(input$cppModel_source=="internal library", message=FALSE))
dirs_lst=list.files(path = paste0(WORKING_HOME, "/cpp"),
full.names = FALSE,
recursive = FALSE,
pattern=".cpp",
include.dirs=FALSE)
dirs_lst = c("", dirs_lst)
selectizeInput(ns("which_internal_cppModel"),
label = "load internal cppModel",
choices = dirs_lst,
multiple = FALSE,
width = "100%",
selected = dirs_lst[1]
)
})
#--------------------------------------
# load_session_cppModel_container
#--------------------------------------
output$load_session_cppModel_container <- renderUI({
validate(need(globalVars$login$status, message=FALSE),
need(input$cppModel_source=="within session", message=FALSE))
name_lst <- names(isolate({ALL$cppModel}))
only_for_internal_use <- name_lst[which(substr(name_lst, 1, 6)=="mYtEsT")]
dirs_lst = c("", setdiff(name_lst, only_for_internal_use))
selectizeInput(ns("which_session_cppModel"),
label = "load session cppModel",
choices = dirs_lst,
multiple = FALSE,
width = "100%",
selected = dirs_lst[1])
})
#--------------------------------------
# update_cppModel_container
#--------------------------------------
output$update_cppModel_container <- renderUI({
validate(need(globalVars$login$status, message=FALSE),
need(values$cppModel_content, message=FALSE)
)
tagList(
fluidRow(
column(width=12,
HTML(colFmt("You may modify the loaded model and then re-assign a name for it.",
color="gray")))
),
fluidRow(
column(width=4, #status = "primary", #class = 'rightAlign', #background ="aqua",
textInput(ns("model_name"),
value=NULL,
placeholder ="cppModel-name",
label=NULL,
width="100%")
),
column(2,
actionButton(ns("save_model"),
label="Save model",
style=actionButton_style )
)
),
fluidRow(
column(12,
aceEditor(ns("cppModel_content"),
mode="c_cpp",
value=paste0(values$cppModel_content, collapse="\n"),
theme = "crimson_editor", # chrome
autoComplete = "enabled",
height = "1000px",
fontSize = 15
)
)
)
) # tagList
})
#---------------------------------------------------
# observeEvent of input$which_internal_cppModel
#---------------------------------------------------
#observeEvent({input$which_internal_cppModel}, {
load_internal_cppModel <- reactive({
validate(need(input$cppModel_source=="internal library", message=FALSE),
need(input$which_internal_cppModel, message=FALSE)
)
# readLines cppModel
cppModel_file <- paste0(WORKING_HOME, '/cpp/', input$which_internal_cppModel)
values$cppModel_content <- readLines(cppModel_file)
# create a progress object
progress <- shiny::Progress$new()
on.exit(progress$close()) # Make sure it closes when we exit this reactive, even if there's an error
progress$set(message = "mread cppModel...please Wait", value = 0)
# mread cppModel
environment(try_eval) <- environment() # basename
text="cppModel=mread(model='cppModel', project=paste0(WORKING_HOME, '/cpp/'), quiet=TRUE, file=basename(input$which_internal_cppModel))"
env = try_eval(text)
if ("cppModel" %in% ls(env)) {
cppModel = get("cppModel", env)
# "default, "message", "warning", "error"
showNotification("mread cppModel...done.", type="message") # "default, "message", "warning", "error"
}else{
cppModel = NULL
error_message = get("message", env)
# "default, "message", "warning", "error"
showNotification(paste0(error_message, collapse="\n"), type="error")
}
cppModel
})
#---------------------------------------------------
# observeEvent of input$which_external_cppModel
#---------------------------------------------------
#observeEvent({input$which_external_cppModel}, {
load_external_cppModel <- reactive({
validate(need(input$cppModel_source=="external file", message=FALSE) )
inFile = input$which_external_cppModel
# print(inFile) # readLines cppModel
# name size type datapath
# 1 cpp.model.cpp 6369 /tmp/RtmprQR1xU/1eb54214311d1970e61c917f/0.cpp
#
ext <- tools::file_ext(inFile$name)
file.rename(inFile$datapath,
paste(inFile$datapath, ext, sep="."))
cppModel_file = paste(inFile$datapath, ext, sep=".")
values$cppModel_content <- readLines(cppModel_file)
# create a progress object
progress <- shiny::Progress$new()
on.exit(progress$close()) # Make sure it closes when we exit this reactive, even if there's an error
progress$set(message = "mread cppModel...please Wait", value = 0)
# mread cppModel
environment(try_eval) <- environment()
text="cppModel=mread(model='cppModel',project=dirname(inFile$datapath),quiet=TRUE,file=basename(cppModel_file))"
env = try_eval(text)
if ("cppModel" %in% ls(env)) {
cppModel = get("cppModel", env)
# "default, "message", "warning", "error"
showNotification("mread cppModel...done.", type="message") # "default, "message", "warning", "error"
}else{
cppModel = NULL
error_message = get("message", env)
# "default, "message", "warning", "error"
showNotification(paste0(error_message, collapse="\n"), type="error")
}
cppModel
})
#---------------------------------------------------
# observeEvent of input$which_session_cppModel
#---------------------------------------------------
#observeEvent({input$which_session_cppModel}, {
load_session_cppModel <- reactive({
validate(need(input$cppModel_source=="within session", message=FALSE),
need(input$which_session_cppModel, message=FALSE)
)
cppModel = ALL$cppModel[[input$which_session_cppModel]]
validate(need(cppModel, message=FALSE))
values$cppModel_content = see(cppModel, raw=TRUE)
cppModel
})
#---------------------------------------------------
# observeEvent of input$YesNoIIV, not used
#---------------------------------------------------
observeEvent(input$YesNoIIV, {
cppModel = values$cppModel
validate(need(cppModel, message=FALSE))
if (input$YesNoIIV=="No") {
cppModel = cppModel %>% zero_re
values$cppModel = cppModel
message = "Simulation with no inter-individual variability (IIV)"
# "default, "message", "warning", "error"
showNotification(message, type="message")
}
if (input$YesNoIIV=="Yes") {
message = "Simulation with inter-individual variability (IIV)"
# "default, "message", "warning", "error"
showNotification(message, type="message")
}
})
#--------------------------------------
# observeEvent
#--------------------------------------
# https://groups.google.com/forum/#!topic/shiny-discuss/vd_nB-BH8sw
# event for values$cppModel
# default, save the loaded cppModel to
# ALL$cppModel[[cppModel_name]]
# observeEvent({values$cppModel}, {
# validate(need(values$cppModel, message=FALSE))
#
# ALL$cppModel[[cppModel_name]] = values$cppModel
# })
observeEvent({input$which_internal_cppModel}, {
validate(need(input$cppModel_source=="internal library", message=FALSE) )
ALL$cppModel[[cppModel_name]] = load_internal_cppModel()
})
observeEvent({input$which_external_cppModel}, {
validate(need(input$cppModel_source=="external file", message=FALSE) )
ALL$cppModel[[cppModel_name]] = load_external_cppModel()
})
observeEvent({input$which_session_cppModel}, {
validate(need(input$cppModel_source=="within session", message=FALSE) )
ALL$cppModel[[cppModel_name]] = load_session_cppModel()
})
#-----------------------
# event for save_model
#-----------------------
# or, user manually save the modified cppModel to
# ALL$cppModel[[cppModel_name]]
observeEvent({input$save_model}, {
if (is.null(input$cppModel_content) | input$cppModel_content=="") {
message = "There is nothing to save"
# "default, "message", "warning", "error"
showNotification(message, type="warning")
}
if (is.null(input$model_name) | input$model_name=="") {
message = "Please specify model name"
# "default, "message", "warning", "error"
showNotification(message, type="warning")
}
validate(need(input$cppModel_content, message="No cppModel loaded..."),
need(input$model_name, message="Please specify model name")
)
# create a progress object
progress <- shiny::Progress$new()
on.exit(progress$close()) # Make sure it closes when we exit this reactive, even if there's an error
progress$set(message = "mread cppModel...please Wait", value = 0)
# mread cppModel
environment(try_eval) <- environment()
text="cppModel=mread('cppModel', tempdir(), input$cppModel_content, quiet=TRUE)"
env = try_eval(text)
if ("cppModel" %in% ls(env)) {
cppModel = get("cppModel", env)
values$cppModel = cppModel
ALL$cppModel[[input$model_name]] = cppModel # visible
# "default, "message", "warning", "error"
showNotification("mread cppModel...done.", type="message")
}else{
values$cppModel = NULL
error_message = get("message", env)
# "default, "message", "warning", "error"
showNotification(paste0(error_message, collapse="\n"), type="error")
}
})
return(ALL)
}
|
# ---------------------------------------------------------------------------------------------
# Script to:
# * Fill gaps in AGB map by interpolating by land cover class
# Proceeds:
# * (possibly postprocess_AGB_map.R)
# * regression_AGB-g0.R - creates AGB map
# * calculate_AGB.R - calculates AGB by plot from field data
# * process_ALOS_tiles.R
# Requires:
# * AGB map
# * LULC map
# ---------------------------------------------------------------------------------------------
library(sf)
library(terra)
library(tidyverse)
library(tmap)
tmap_mode('view')
results_dir <- 'data/results'
lc_fps <- c("data/raw/landcover/Lemoiner/Haiti2017_Clip.tif",
"data/raw/landcover/Lemoiner/DR_2017_clip.tif")
lc_fp_out <- "data/LULC/Hisp_2017_resALOS_terra.tif"
lc_out <- "data/LULC/Hisp_2017_resALOS_mskLand.tif"
agb_masked_fp <- file.path(results_dir, 'tifs_by_R',
'agb18_v3_l1_mask_Ap3WUw25_u20_hti_qLee.tif')
msk_lnd_fp <- file.path(results_dir, 'masks',
'hti18_maskLand.tif')
agb_from_lc_fp <- file.path(results_dir, 'tifs_by_R',
'LCpatches_agb18_v3l1_Ap3WUw25u20_hti.tif')
agb_filled_fp <- file.path(results_dir, 'tifs_by_R',
'agb18_v3_l3_Ap3WUw25u20_hti_filled_LCpatches.tif')
agb_capped_fp <- file.path(results_dir, 'tifs_by_R',
'agb18_v3_l2_nomask_cap310.tif')
lc_pols_agb_fp <- str_glue("data/LULC/Haiti2017_Clip_polys_meanAGB.geojson")
lc_pols_agb_fp <- str_glue("data/LULC/Haiti2017_polys_AGBzonal.gpkg")
lc_stat <- 'median'
agb_filled_fp <- file.path(results_dir, 'tifs_by_R', str_glue('agb18_v3_l3_nomask_cap310_hti_LC{lc_stat}.tif'))
agb_from_lc_fp <- file.path(results_dir, 'tifs_by_R', str_glue('LCpatches_agb18_v3l2_nomask_cap310_{lc_stat}.tif'))
agb_from_lc_sd_fp <- file.path(results_dir, 'tifs_by_R', str_glue('LCpatches_agb18_v3l2_nomask_cap310_hti_{lc_stat}_sd.tif'))
agb_filled_sd_fp <- file.path(results_dir, 'tifs_by_R', str_glue('agb18_v3_l3_nomask_cap310_hti_LC{lc_stat}_sd.tif'))
mask_code <- 'mAWUw25u20'
cap_code <- 'cap310'
agb_masked_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('agb18_v3_l2_{mask_code}_{cap_code}.tif'))
# lc_pols_agb_fp <- file.path('data/LULC',
# str_glue("Haiti2017_Clip_polys_{lc_stat}AGB_v3l2_{mask_code}_{cap_code}.gpkg"))
agb_filled_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('agb18_v3_l3_{mask_code}_{cap_code}_hti_LC{lc_stat}.tif'))
agb_from_lc_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('LCpatches_agb18_v3l2_{mask_code}_{cap_code}_{lc_stat}.tif'))
agb_from_lc_sd_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('LCpatches_agb18_v3l2_{mask_code}_{cap_code}_hti_{lc_stat}_sd.tif'))
agb_filled_sd_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('agb18_v3_l3_{mask_code}_{cap_code}_hti_LC{lc_stat}_sd.tif'))
# Load AGB ================================================================
agb_ras <- terra::rast(agb_capped_fp)
# ~ AGB mean by LC patch =========================================================
# Get mean AGB for each land cover patch (extract)
# Polygonize LULC raster ----
# output filenames
lc_multipols_fp <- "data/LULC/Haiti2017_Clip_multipolys"
lc_pols_fp <- "data/LULC/Haiti2017_Clip_polys.geojson"
if(!file.exists(lc_pols_fp)) {
# Polygonize
lc_fps[[1]] %>%
terra::rast() %>%
terra::as.polygons() %>%
terra::writeVector(lc_multipols_fp)
# Un-dissolve: Convert multipolygons to polygons
lc_sf <- lc_multipols_fp %>%
sf::st_read() %>%
rename('LC' = 1) %>%
sf::st_cast('POLYGON')
# Save
lc_sf %>% st_write(lc_pols_fp, delete_dsn=T)
}
# Extract AGB values ----
if(!file.exists(lc_pols_agb_fp)){
# Load polygons
lc_vect <- terra::vect(lc_pols_fp)
# Don't use Water and Urban class
lc_vect2 <- terra::subset(lc_vect, lc_vect$LC > 2)
agb_ex <- terra::extract(agb_ras, lc_vect2)
names(agb_ex) <- c('ID', 'agb')
# Convert matrix to tibble and get mean and count for each polygon
agb <- agb_ex %>%
as_tibble() %>%
group_by(ID) %>%
summarise(median = median(agb, na.rm=T),
mean = mean(agb, na.rm=T),
ct = sum(!is.na(agb)),
sd = sd(agb, na.rm = T))
# Append columns to SF polys
lc_sf <- sf::st_as_sf(as.data.frame(lc_vect2, geom=TRUE),
wkt="geometry",
crs=crs(lc_vect2))
lc_sf_all <- agb %>%
select(-ID) %>%
cbind(lc_sf, .) %>%
filter(ct > 1)
# Save
lc_sf_all %>% st_write(lc_pols_agb_fp, delete_dsn = T)
}
# Look
agb_raster <- raster::raster(agb_masked_fp)
lc_sf_all <- st_read(lc_pols_agb_fp)
tm_shape(lc_sf_all) + tm_fill(col='mean') +
tm_shape(agb_raster) + tm_raster()
# Fill missing AGB with patch means --------------------------------------------
# agb_from_lc_sd_fp <- file.path(results_dir, 'tifs_by_R/LCpatches_agb18_v3l1_Ap3WUw25u20_hti_sd.tif')
# agb_filled_fp <- file.path(results_dir, 'tifs_by_R/agb18_v3_l3_Ap3WUw25u20_hti_filled_LCpatches.tif')
# agb_filled_sd_fp <- file.path(results_dir, 'tifs_by_R/agb18_v3_l3_Ap3WUw25u20_hti_filled_LCpatches_sd.tif')
# SF to SpatVector
lc_all_vect <- terra::vect(lc_pols_agb_fp)
if(!file.exists(agb_filled_fp)) {
# Rasterize means
agb_by_lcpatch <- lc_all_vect %>%
terra::rasterize(agb_ras, field = lc_stat,
filename=agb_from_lc_fp, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
# Fill gaps and save
agb_ras <- terra::rast(agb_masked_fp)
agb_filled <- terra::cover(agb_ras, agb_by_lcpatch,
filename=agb_filled_fp, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
} else {
agb_by_lcpatch <- terra::rast(agb_filled_fp)
}
# Create uncertainty layer for agb_filled ----
agb_err <- agb_ras <- terra::rast(agb_masked_fp)
agb_err[agb_err > 0] <- 23 # cross-validation RMSE
if(!file.exists(agb_filled_sd_fp)) {
# Rasterize SDs
agb_by_lcpatch_sd <- lc_all_vect %>%
terra::rasterize(agb_err, field = 'sd', filename=agb_from_lc_sd_fp, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
agb_filled_err <- terra::cover(agb_err, agb_by_lcpatch_sd,
filename=agb_filled_sd_fp, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
}
agb_by_lcpatch_sd <- terra::rast(agb_from_lc_sd_fp)
plot(agb_by_lcpatch_sd)
# Pre-process land cover -------------------------------------------------------
if(!file.exists(lc_fp_out)){
# Resample Haiti and DR land cover to AGB res
rs <- sapply(lc_fps,
function(fp) {
r <- terra::rast(fp)
r <- terra::resample(r, agb_ras, method='near')
return(r)}
)
# Merge, i.e. cover
lc2 <- terra::cover(rs$`data/LULC/Haiti2017_Clip.tif`,
rs$`data/LULC/DR_2017_clip.tif`,
filename=lc_fp_out, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
}
# ~ AGB mean by LC (6 values) ----------------------------------------------------
agb_filled_fp <- file.path(results_dir, 'tifs_by_R/agb18_v3_l3_Ap3WUw25u20_hti_filled_6zones.tif')
# Create AGB surface from mean AGB for each LC class (6 values)
# Load and crop LC
lc <- terra::rast(lc_fp_out) %>%
terra::crop(agb_ras)
# Mask LC to land pixels
lc <- lc * terra::rast(msk_lnd_fp)
# Compute mean AGB for each LC (zonal statistics)
zonal_stats <- terra::zonal(agb_ras, lc, 'mean', na.rm=T)
# Reclass LC to AGB values -> AGB by LC surface
agb_by_lc <- terra::classify(lc, zonal_stats)
# Fill gaps in AGB with AGB by LC ----------------------------------------------
agb_filled <- terra::cover(agb_ras, agb_by_lc, filename=agb_filled_fp,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
if(file.exists(agb_filled_fp)) agb_filled <- terra::rast(agb_filled_fp)
plot(agb_filled)
plot(agb_ras)
|
/src/R/2018/05_post_fill_AGB_gaps.R
|
no_license
|
emilysturdivant/biomass-espanola
|
R
| false | false | 8,269 |
r
|
# ---------------------------------------------------------------------------------------------
# Script to:
# * Fill gaps in AGB map by interpolating by land cover class
# Proceeds:
# * (possibly postprocess_AGB_map.R)
# * regression_AGB-g0.R - creates AGB map
# * calculate_AGB.R - calculates AGB by plot from field data
# * process_ALOS_tiles.R
# Requires:
# * AGB map
# * LULC map
# ---------------------------------------------------------------------------------------------
library(sf)
library(terra)
library(tidyverse)
library(tmap)
tmap_mode('view')
results_dir <- 'data/results'
lc_fps <- c("data/raw/landcover/Lemoiner/Haiti2017_Clip.tif",
"data/raw/landcover/Lemoiner/DR_2017_clip.tif")
lc_fp_out <- "data/LULC/Hisp_2017_resALOS_terra.tif"
lc_out <- "data/LULC/Hisp_2017_resALOS_mskLand.tif"
agb_masked_fp <- file.path(results_dir, 'tifs_by_R',
'agb18_v3_l1_mask_Ap3WUw25_u20_hti_qLee.tif')
msk_lnd_fp <- file.path(results_dir, 'masks',
'hti18_maskLand.tif')
agb_from_lc_fp <- file.path(results_dir, 'tifs_by_R',
'LCpatches_agb18_v3l1_Ap3WUw25u20_hti.tif')
agb_filled_fp <- file.path(results_dir, 'tifs_by_R',
'agb18_v3_l3_Ap3WUw25u20_hti_filled_LCpatches.tif')
agb_capped_fp <- file.path(results_dir, 'tifs_by_R',
'agb18_v3_l2_nomask_cap310.tif')
lc_pols_agb_fp <- str_glue("data/LULC/Haiti2017_Clip_polys_meanAGB.geojson")
lc_pols_agb_fp <- str_glue("data/LULC/Haiti2017_polys_AGBzonal.gpkg")
lc_stat <- 'median'
agb_filled_fp <- file.path(results_dir, 'tifs_by_R', str_glue('agb18_v3_l3_nomask_cap310_hti_LC{lc_stat}.tif'))
agb_from_lc_fp <- file.path(results_dir, 'tifs_by_R', str_glue('LCpatches_agb18_v3l2_nomask_cap310_{lc_stat}.tif'))
agb_from_lc_sd_fp <- file.path(results_dir, 'tifs_by_R', str_glue('LCpatches_agb18_v3l2_nomask_cap310_hti_{lc_stat}_sd.tif'))
agb_filled_sd_fp <- file.path(results_dir, 'tifs_by_R', str_glue('agb18_v3_l3_nomask_cap310_hti_LC{lc_stat}_sd.tif'))
mask_code <- 'mAWUw25u20'
cap_code <- 'cap310'
agb_masked_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('agb18_v3_l2_{mask_code}_{cap_code}.tif'))
# lc_pols_agb_fp <- file.path('data/LULC',
# str_glue("Haiti2017_Clip_polys_{lc_stat}AGB_v3l2_{mask_code}_{cap_code}.gpkg"))
agb_filled_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('agb18_v3_l3_{mask_code}_{cap_code}_hti_LC{lc_stat}.tif'))
agb_from_lc_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('LCpatches_agb18_v3l2_{mask_code}_{cap_code}_{lc_stat}.tif'))
agb_from_lc_sd_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('LCpatches_agb18_v3l2_{mask_code}_{cap_code}_hti_{lc_stat}_sd.tif'))
agb_filled_sd_fp <- file.path(results_dir, 'tifs_by_R',
str_glue('agb18_v3_l3_{mask_code}_{cap_code}_hti_LC{lc_stat}_sd.tif'))
# Load AGB ================================================================
agb_ras <- terra::rast(agb_capped_fp)
# ~ AGB mean by LC patch =========================================================
# Get mean AGB for each land cover patch (extract)
# Polygonize LULC raster ----
# output filenames
lc_multipols_fp <- "data/LULC/Haiti2017_Clip_multipolys"
lc_pols_fp <- "data/LULC/Haiti2017_Clip_polys.geojson"
if(!file.exists(lc_pols_fp)) {
# Polygonize
lc_fps[[1]] %>%
terra::rast() %>%
terra::as.polygons() %>%
terra::writeVector(lc_multipols_fp)
# Un-dissolve: Convert multipolygons to polygons
lc_sf <- lc_multipols_fp %>%
sf::st_read() %>%
rename('LC' = 1) %>%
sf::st_cast('POLYGON')
# Save
lc_sf %>% st_write(lc_pols_fp, delete_dsn=T)
}
# Extract AGB values ----
if(!file.exists(lc_pols_agb_fp)){
# Load polygons
lc_vect <- terra::vect(lc_pols_fp)
# Don't use Water and Urban class
lc_vect2 <- terra::subset(lc_vect, lc_vect$LC > 2)
agb_ex <- terra::extract(agb_ras, lc_vect2)
names(agb_ex) <- c('ID', 'agb')
# Convert matrix to tibble and get mean and count for each polygon
agb <- agb_ex %>%
as_tibble() %>%
group_by(ID) %>%
summarise(median = median(agb, na.rm=T),
mean = mean(agb, na.rm=T),
ct = sum(!is.na(agb)),
sd = sd(agb, na.rm = T))
# Append columns to SF polys
lc_sf <- sf::st_as_sf(as.data.frame(lc_vect2, geom=TRUE),
wkt="geometry",
crs=crs(lc_vect2))
lc_sf_all <- agb %>%
select(-ID) %>%
cbind(lc_sf, .) %>%
filter(ct > 1)
# Save
lc_sf_all %>% st_write(lc_pols_agb_fp, delete_dsn = T)
}
# Look
agb_raster <- raster::raster(agb_masked_fp)
lc_sf_all <- st_read(lc_pols_agb_fp)
tm_shape(lc_sf_all) + tm_fill(col='mean') +
tm_shape(agb_raster) + tm_raster()
# Fill missing AGB with patch means --------------------------------------------
# agb_from_lc_sd_fp <- file.path(results_dir, 'tifs_by_R/LCpatches_agb18_v3l1_Ap3WUw25u20_hti_sd.tif')
# agb_filled_fp <- file.path(results_dir, 'tifs_by_R/agb18_v3_l3_Ap3WUw25u20_hti_filled_LCpatches.tif')
# agb_filled_sd_fp <- file.path(results_dir, 'tifs_by_R/agb18_v3_l3_Ap3WUw25u20_hti_filled_LCpatches_sd.tif')
# SF to SpatVector
lc_all_vect <- terra::vect(lc_pols_agb_fp)
if(!file.exists(agb_filled_fp)) {
# Rasterize means
agb_by_lcpatch <- lc_all_vect %>%
terra::rasterize(agb_ras, field = lc_stat,
filename=agb_from_lc_fp, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
# Fill gaps and save
agb_ras <- terra::rast(agb_masked_fp)
agb_filled <- terra::cover(agb_ras, agb_by_lcpatch,
filename=agb_filled_fp, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
} else {
agb_by_lcpatch <- terra::rast(agb_filled_fp)
}
# Create uncertainty layer for agb_filled ----
agb_err <- agb_ras <- terra::rast(agb_masked_fp)
agb_err[agb_err > 0] <- 23 # cross-validation RMSE
if(!file.exists(agb_filled_sd_fp)) {
# Rasterize SDs
agb_by_lcpatch_sd <- lc_all_vect %>%
terra::rasterize(agb_err, field = 'sd', filename=agb_from_lc_sd_fp, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
agb_filled_err <- terra::cover(agb_err, agb_by_lcpatch_sd,
filename=agb_filled_sd_fp, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
}
agb_by_lcpatch_sd <- terra::rast(agb_from_lc_sd_fp)
plot(agb_by_lcpatch_sd)
# Pre-process land cover -------------------------------------------------------
if(!file.exists(lc_fp_out)){
# Resample Haiti and DR land cover to AGB res
rs <- sapply(lc_fps,
function(fp) {
r <- terra::rast(fp)
r <- terra::resample(r, agb_ras, method='near')
return(r)}
)
# Merge, i.e. cover
lc2 <- terra::cover(rs$`data/LULC/Haiti2017_Clip.tif`,
rs$`data/LULC/DR_2017_clip.tif`,
filename=lc_fp_out, overwrite=T,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
}
# ~ AGB mean by LC (6 values) ----------------------------------------------------
agb_filled_fp <- file.path(results_dir, 'tifs_by_R/agb18_v3_l3_Ap3WUw25u20_hti_filled_6zones.tif')
# Create AGB surface from mean AGB for each LC class (6 values)
# Load and crop LC
lc <- terra::rast(lc_fp_out) %>%
terra::crop(agb_ras)
# Mask LC to land pixels
lc <- lc * terra::rast(msk_lnd_fp)
# Compute mean AGB for each LC (zonal statistics)
zonal_stats <- terra::zonal(agb_ras, lc, 'mean', na.rm=T)
# Reclass LC to AGB values -> AGB by LC surface
agb_by_lc <- terra::classify(lc, zonal_stats)
# Fill gaps in AGB with AGB by LC ----------------------------------------------
agb_filled <- terra::cover(agb_ras, agb_by_lc, filename=agb_filled_fp,
wopt=list(datatype='FLT4S', gdal='COMPRESS=LZW'))
if(file.exists(agb_filled_fp)) agb_filled <- terra::rast(agb_filled_fp)
plot(agb_filled)
plot(agb_ras)
|
library(shiny)
library(ggplot2)
# Create populations from 10 to 10000, using a log10 scale
pop <- round(10 ^ seq(1, 4, by = 0.03))
base <- 5
# Functions for base 5 flooring and ceiling
floor_ <- function(num, base) { floor(num / base) * base }
ceil_ <- function(num, base) { ceiling(num / base) * base }
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$sim <- renderPlot({
denum <- pop
num <- (input$prev / 100) * denum
prev <- num / denum
# Classical rounding
# Aléatoire
# Controlled
worst_lo = floor_(num, base) / ceil_(denum, base)
worst_hi = ceil_(num, base) / floor_(denum, base)
ggplot(data.frame(num, denum, prev)) +
aes(x=denum) +
geom_point(aes(y = prev), colour = 'red') +
geom_point(aes(y = worst_lo), colour = 'blue') +
geom_point(aes(y = worst_hi), colour = 'green') +
scale_x_log10() +
xlab("Population size (from 1 to 10000)") +
ylab("Prevalence in the population")
})
})
|
/sup-sim/server.R
|
permissive
|
malavv/sup-sim
|
R
| false | false | 1,050 |
r
|
library(shiny)
library(ggplot2)
# Create populations from 10 to 10000, using a log10 scale
pop <- round(10 ^ seq(1, 4, by = 0.03))
base <- 5
# Functions for base 5 flooring and ceiling
floor_ <- function(num, base) { floor(num / base) * base }
ceil_ <- function(num, base) { ceiling(num / base) * base }
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$sim <- renderPlot({
denum <- pop
num <- (input$prev / 100) * denum
prev <- num / denum
# Classical rounding
# Aléatoire
# Controlled
worst_lo = floor_(num, base) / ceil_(denum, base)
worst_hi = ceil_(num, base) / floor_(denum, base)
ggplot(data.frame(num, denum, prev)) +
aes(x=denum) +
geom_point(aes(y = prev), colour = 'red') +
geom_point(aes(y = worst_lo), colour = 'blue') +
geom_point(aes(y = worst_hi), colour = 'green') +
scale_x_log10() +
xlab("Population size (from 1 to 10000)") +
ylab("Prevalence in the population")
})
})
|
##Data Specialization 04 - Exploratory Data: Project 1 - plot1
rm(list=ls())
wd <- "C:/Users/KK/Documents/Outside Learning/Specialization-Data Science/04_Exploratory Data Analysis/Project/Project01/exdata-data-household_power_consumption"
setwd(wd)
data <- read.table("household_power_consumption.txt",nrow=2075259,sep=";",stringsAsFactors = FALSE)
colnames(data) <- data[1,]
data2 <- data[2:nrow(data),]
DateTime <- paste(data2[,1],data2[,2])
DateTime <- strptime(DateTime, format = "%d/%m/%Y %H:%M:%S")
data2 <- cbind(DateTime,data2[,-(1:2)])
for(c in 2:ncol(data2)){
data2[,c] <- gsub("?","",data2[,c],fixed=TRUE)
data2[,c] <- as.numeric(data2[,c])
}
data2 <- data2[order(data2$DateTime),]
from <- c("2007-02-01 00:00:00")
date.from <- strptime(from, format = "%Y-%m-%d %H:%M:%S")
to <- c("2007-02-02 23:59:59")
date.to <- strptime(to, format = "%Y-%m-%d %H:%M:%S")
f <- which(data2[,1] < date.from)
t <- which(data2[,1] > date.to)
data3 <- data2[(f[length(f)]+1):(t[1]-1),]
###plot1
png("plot1.png")
hist(data3[,3],main="Global Active Power",col="red",xlab="Global Active Power (kilowatts)",
breaks=12)
dev.off()
|
/plot1.R
|
no_license
|
ArctikkSunrise/ExData_Plotting1
|
R
| false | false | 1,138 |
r
|
##Data Specialization 04 - Exploratory Data: Project 1 - plot1
rm(list=ls())
wd <- "C:/Users/KK/Documents/Outside Learning/Specialization-Data Science/04_Exploratory Data Analysis/Project/Project01/exdata-data-household_power_consumption"
setwd(wd)
data <- read.table("household_power_consumption.txt",nrow=2075259,sep=";",stringsAsFactors = FALSE)
colnames(data) <- data[1,]
data2 <- data[2:nrow(data),]
DateTime <- paste(data2[,1],data2[,2])
DateTime <- strptime(DateTime, format = "%d/%m/%Y %H:%M:%S")
data2 <- cbind(DateTime,data2[,-(1:2)])
for(c in 2:ncol(data2)){
data2[,c] <- gsub("?","",data2[,c],fixed=TRUE)
data2[,c] <- as.numeric(data2[,c])
}
data2 <- data2[order(data2$DateTime),]
from <- c("2007-02-01 00:00:00")
date.from <- strptime(from, format = "%Y-%m-%d %H:%M:%S")
to <- c("2007-02-02 23:59:59")
date.to <- strptime(to, format = "%Y-%m-%d %H:%M:%S")
f <- which(data2[,1] < date.from)
t <- which(data2[,1] > date.to)
data3 <- data2[(f[length(f)]+1):(t[1]-1),]
###plot1
png("plot1.png")
hist(data3[,3],main="Global Active Power",col="red",xlab="Global Active Power (kilowatts)",
breaks=12)
dev.off()
|
"catheter" <-
structure(list(Name = c("Ciresi", "George", "Hannan", "Heard",
"vanHeerden", "Maki", "Bach(a)", "Ramsay", "Appavu", "Trazzera",
"Collins", "Bach(b)", "Tennenberg", "Pemberton", "Logghe"), n.trt = c(124,
44, 68, 151, 28, 208, 14, 199, 12, 123, 98, 116, 137, 32, 338
), n.ctrl = c(127, 35, 60, 157, 26, 195, 12, 189, 7, 99, 139,
117, 145, 40, 342), col.trt = c(15, 10, 22, 60, 4, 28, 0, 45,
1, 16, 2, 2, 8, NA, NA), col.ctrl = c(21, 25, 22, 82, 10, 47,
4, 63, 1, 24, 25, 16, 32, NA, NA), inf.trt = c(13, 1, 5, 5, NA,
2, NA, 1, NA, 4, 1, 0, 5, 2, 17), inf.ctrl = c(14, 3, 7, 6, NA,
9, NA, 4, NA, 5, 4, 3, 9, 3, 15)), row.names = c("1", "2", "3",
"4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"
), class = "data.frame")
|
/rmeta/data/catheter.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 756 |
r
|
"catheter" <-
structure(list(Name = c("Ciresi", "George", "Hannan", "Heard",
"vanHeerden", "Maki", "Bach(a)", "Ramsay", "Appavu", "Trazzera",
"Collins", "Bach(b)", "Tennenberg", "Pemberton", "Logghe"), n.trt = c(124,
44, 68, 151, 28, 208, 14, 199, 12, 123, 98, 116, 137, 32, 338
), n.ctrl = c(127, 35, 60, 157, 26, 195, 12, 189, 7, 99, 139,
117, 145, 40, 342), col.trt = c(15, 10, 22, 60, 4, 28, 0, 45,
1, 16, 2, 2, 8, NA, NA), col.ctrl = c(21, 25, 22, 82, 10, 47,
4, 63, 1, 24, 25, 16, 32, NA, NA), inf.trt = c(13, 1, 5, 5, NA,
2, NA, 1, NA, 4, 1, 0, 5, 2, 17), inf.ctrl = c(14, 3, 7, 6, NA,
9, NA, 4, NA, 5, 4, 3, 9, 3, 15)), row.names = c("1", "2", "3",
"4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15"
), class = "data.frame")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/GUIfunctions.R
\name{generateStrata}
\alias{generateStrata}
\title{Generate one strata variable from multiple factors}
\usage{
generateStrata(df, stratavars, name)
}
\arguments{
\item{df}{a data.frame}
\item{stratavars}{character vector with variable name}
\item{name}{name of the newly generated variable}
}
\value{
The original data set with one new column.
}
\description{
For strata defined by multiple variables (e.g. sex,age,country) one combined
variable is generated.
}
\examples{
x <- testdata
x <- generateStrata(x,c("sex","urbrur"),"strataIDvar")
head(x)
}
\author{
Alexander Kowarik
}
|
/man/generateStrata.Rd
|
no_license
|
bigfoot31/sdcMicro
|
R
| false | false | 686 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/GUIfunctions.R
\name{generateStrata}
\alias{generateStrata}
\title{Generate one strata variable from multiple factors}
\usage{
generateStrata(df, stratavars, name)
}
\arguments{
\item{df}{a data.frame}
\item{stratavars}{character vector with variable name}
\item{name}{name of the newly generated variable}
}
\value{
The original data set with one new column.
}
\description{
For strata defined by multiple variables (e.g. sex,age,country) one combined
variable is generated.
}
\examples{
x <- testdata
x <- generateStrata(x,c("sex","urbrur"),"strataIDvar")
head(x)
}
\author{
Alexander Kowarik
}
|
#' @rdname coupled4_CASPF
#' @title 4-way Coupled Ancestor Sampling Conditional Particle Filter
#' @description Runs four coupled conditional particle filters (two at each discretization level).
#' @param model a list representing a hidden Markov model, e.g. \code{\link{hmm_ornstein_uhlenbeck}}
#' @param theta a vector of parameters as input to model functions
#' @param discretization lists containing stepsize, nsteps, statelength, obstimes for fine and coarse levels,
#' and coarsetimes of length statelength_fine indexing time steps of coarse level
#' @param observations a matrix of observations, of size nobservations x ydimension
#' @param nparticles number of particles
#' @param resampling_threshold ESS proportion below which resampling is triggered (always resample at observation times by default)
#' @param coupled_resampling a 4-way coupled resampling scheme, such as \code{\link{coupled4_maximalchains_maximallevels_independent_residuals}}
#' @param ref_trajectory_coarse1 a matrix of first reference trajectory for coarser discretization level, of size xdimension x statelength_coarse
#' @param ref_trajectory_coarse2 a matrix of second reference trajectory for coarser discretization level, of size xdimension x statelength_coarse
#' @param ref_trajectory_fine1 a matrix of first reference trajectory for finer discretization level, of size xdimension x statelength_fine
#' @param ref_trajectory_fine2 a matrix of second reference trajectory for finer discretization level, of size xdimension x statelength_fine
#' @param treestorage logical specifying tree storage of Jacob, Murray and Rubenthaler (2013);
#' if missing, this function store all states and ancestors
#' @return four new trajectories stored as matrices of size xdimension x statelength_coarse/fine.
#' @export
coupled4_CASPF <- function(model, theta, discretization, observations, nparticles, resampling_threshold, coupled_resampling,
ref_trajectory_coarse1, ref_trajectory_coarse2,
ref_trajectory_fine1, ref_trajectory_fine2, treestorage = FALSE){
# get model/problem settings
nobservations <- nrow(observations)
xdimension <- model$xdimension
ydimension <- model$ydimension
# discretization
nsteps <- discretization$fine$nsteps
stepsize_fine <- discretization$fine$stepsize
stepsize_coarse <- discretization$coarse$stepsize
statelength_fine <- discretization$fine$statelength
statelength_coarse <- discretization$coarse$statelength
obstimes <- discretization$fine$obstimes # vector of length nsteps+1 indexing observation times
coarsetimes <- discretization$coarsetimes # vector of length nsteps+1 indexing coarse times
# check if trajectories are equal
meet_coarse <- all(ref_trajectory_coarse1 == ref_trajectory_coarse2)
meet_fine <- all(ref_trajectory_fine1 == ref_trajectory_fine2)
# create tree representation of the trajectories or store all states and ancestors
if (meet_coarse){
if (treestorage){
Tree_coarse1 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension) # only store one tree in this case
} else {
xtrajectory_coarse1 <- array(0, dim = c(statelength_coarse, xdimension, nparticles))
ancestries_coarse1 <- matrix(0, nrow = statelength_coarse, ncol = nparticles)
}
} else {
if (treestorage){
Tree_coarse1 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension)
Tree_coarse2 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension)
} else {
xtrajectory_coarse1 <- array(0, dim = c(statelength_coarse, xdimension, nparticles))
xtrajectory_coarse2 <- array(0, dim = c(statelength_coarse, xdimension, nparticles))
ancestries_coarse1 <- matrix(0, nrow = statelength_coarse, ncol = nparticles)
ancestries_coarse2 <- matrix(0, nrow = statelength_coarse, ncol = nparticles)
}
}
if (meet_fine){
if (treestorage){
Tree_fine1 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension) # only store one tree in this case
} else {
xtrajectory_fine1 <- array(0, dim = c(statelength_fine, xdimension, nparticles))
ancestries_fine1 <- matrix(0, nrow = statelength_fine, ncol = nparticles)
}
} else {
if (treestorage){
Tree_fine1 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension)
Tree_fine2 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension)
} else {
xtrajectory_fine1 <- array(0, dim = c(statelength_fine, xdimension, nparticles))
xtrajectory_fine2 <- array(0, dim = c(statelength_fine, xdimension, nparticles))
ancestries_fine1 <- matrix(0, nrow = statelength_fine, ncol = nparticles)
ancestries_fine2 <- matrix(0, nrow = statelength_fine, ncol = nparticles)
}
}
# initialization
xparticles_coarse1 <- model$rinit(nparticles) # size: xdimension x nparticles
xparticles_coarse2 <- xparticles_coarse1
xparticles_fine1 <- xparticles_coarse1
xparticles_fine2 <- xparticles_coarse1
xparticles_coarse1[, nparticles] <- ref_trajectory_coarse1[, 1]
xparticles_coarse2[, nparticles] <- ref_trajectory_coarse2[, 1]
xparticles_fine1[, nparticles] <- ref_trajectory_fine1[, 1]
xparticles_fine2[, nparticles] <- ref_trajectory_fine2[, 1]
# initialize tree to storage trajectories
if (meet_coarse){
if (treestorage){
Tree_coarse1$init(xparticles_coarse1)
} else {
xtrajectory_coarse1[1, , ] <- xparticles_coarse1
}
} else {
if (treestorage){
Tree_coarse1$init(xparticles_coarse1)
Tree_coarse2$init(xparticles_coarse2)
} else {
xtrajectory_coarse1[1, , ] <- xparticles_coarse1
xtrajectory_coarse2[1, , ] <- xparticles_coarse2
}
}
if (meet_fine){
if (treestorage){
Tree_fine1$init(xparticles_fine1)
} else {
xtrajectory_fine1[1, , ] <- xparticles_fine1
}
} else {
if (treestorage){
Tree_fine1$init(xparticles_fine1)
Tree_fine2$init(xparticles_fine2)
} else {
xtrajectory_fine1[1, , ] <- xparticles_fine1
xtrajectory_fine2[1, , ] <- xparticles_fine2
}
}
logweights_coarse1 <- rep(0, nparticles)
logweights_coarse2 <- rep(0, nparticles)
logweights_fine1 <- rep(0, nparticles)
logweights_fine2 <- rep(0, nparticles)
index_obs <- 0
ancestors_coarse1 <- 1:nparticles
ancestors_coarse2 <- 1:nparticles
ancestors_fine1 <- 1:nparticles
ancestors_fine2 <- 1:nparticles
# index last observation
last_obs_fine <- 1
last_obs_coarse <- 1
# random initialization
if (obstimes[1]){
# compute weights
index_obs <- index_obs + 1
observation <- observations[index_obs, ] # 1 x ydimension
if (meet_fine){
logweights_fine1 <- model$dmeasurement(theta, stepsize_fine[1], xparticles_fine1, observation)
logweights_fine2 <- logweights_fine1
} else {
logweights_fine1 <- model$dmeasurement(theta, stepsize_fine[1], xparticles_fine1, observation)
logweights_fine2 <- model$dmeasurement(theta, stepsize_fine[1], xparticles_fine2, observation)
}
maxlogweights_fine1 <- max(logweights_fine1)
maxlogweights_fine2 <- max(logweights_fine2)
weights_fine1 <- exp(logweights_fine1 - maxlogweights_fine1)
weights_fine2 <- exp(logweights_fine2 - maxlogweights_fine2)
normweights_fine1 <- weights_fine1 / sum(weights_fine1)
normweights_fine2 <- weights_fine2 / sum(weights_fine2)
ess_fine1 <- 1 / sum(normweights_fine1^2)
ess_fine2 <- 1 / sum(normweights_fine2^2)
if (meet_coarse){
logweights_coarse1 <- model$dmeasurement(theta, stepsize_coarse[1], xparticles_coarse1, observation)
logweights_coarse2 <- logweights_coarse1
} else {
logweights_coarse1 <- model$dmeasurement(theta, stepsize_coarse[1], xparticles_coarse1, observation)
logweights_coarse2 <- model$dmeasurement(theta, stepsize_coarse[1], xparticles_coarse2, observation)
}
maxlogweights_coarse1 <- max(logweights_coarse1)
maxlogweights_coarse2 <- max(logweights_coarse2)
weights_coarse1 <- exp(logweights_coarse1 - maxlogweights_coarse1)
weights_coarse2 <- exp(logweights_coarse2 - maxlogweights_coarse2)
normweights_coarse1 <- weights_coarse1 / sum(weights_coarse1)
normweights_coarse2 <- weights_coarse2 / sum(weights_coarse2)
ess_coarse1 <- 1 / sum(normweights_coarse1^2)
ess_coarse2 <- 1 / sum(normweights_coarse2^2)
# resampling
min_ess <- min(ess_fine1, ess_fine2, ess_coarse1, ess_coarse2)
if (min_ess < resampling_threshold * nparticles){
rand <- runif(nparticles)
ancestors <- coupled_resampling(normweights_coarse1, normweights_coarse2,
normweights_fine1, normweights_fine2,
nparticles, rand)
ancestors_coarse1 <- ancestors[, 1]
ancestors_coarse2 <- ancestors[, 2]
ancestors_fine1 <- ancestors[, 3]
ancestors_fine2 <- ancestors[, 4]
# ancestor sampling
as_logweights_fine1 <- logweights_fine1 + model$dtransition(theta, stepsize_fine[1], xparticles_fine1, ref_trajectory_fine1[, 2])
as_logweights_fine2 <- logweights_fine2 + model$dtransition(theta, stepsize_fine[1], xparticles_fine2, ref_trajectory_fine2[, 2])
as_maxlogweights_fine1 <- max(as_logweights_fine1)
as_maxlogweights_fine2 <- max(as_logweights_fine2)
as_weights_fine1 <- exp(as_logweights_fine1 - as_maxlogweights_fine1)
as_weights_fine2 <- exp(as_logweights_fine2 - as_maxlogweights_fine2)
as_normweights_fine1 <- as_weights_fine1 / sum(as_weights_fine1)
as_normweights_fine2 <- as_weights_fine2 / sum(as_weights_fine2)
as_logweights_coarse1 <- logweights_coarse1 + model$dtransition(theta, stepsize_coarse[1], xparticles_coarse1, ref_trajectory_coarse1[, 2])
as_logweights_coarse2 <- logweights_coarse2 + model$dtransition(theta, stepsize_coarse[1], xparticles_coarse2, ref_trajectory_coarse2[, 2])
as_maxlogweights_coarse1 <- max(as_logweights_coarse1)
as_maxlogweights_coarse2 <- max(as_logweights_coarse2)
as_weights_coarse1 <- exp(as_logweights_coarse1 - as_maxlogweights_coarse1)
as_weights_coarse2 <- exp(as_logweights_coarse2 - as_maxlogweights_coarse2)
as_normweights_coarse1 <- as_weights_coarse1 / sum(as_weights_coarse1)
as_normweights_coarse2 <- as_weights_coarse2 / sum(as_weights_coarse2)
as_ancestors <- coupled_resampling(as_normweights_coarse1, as_normweights_coarse2,
as_normweights_fine1, as_normweights_fine2,
1, rand[nparticles])
ancestors_coarse1[nparticles] <- as_ancestors[, 1]
ancestors_coarse2[nparticles] <- as_ancestors[, 2]
ancestors_fine1[nparticles] <- as_ancestors[, 3]
ancestors_fine2[nparticles] <- as_ancestors[, 4]
xparticles_coarse1 <- xparticles_coarse1[, ancestors_coarse1]
xparticles_coarse2 <- xparticles_coarse2[, ancestors_coarse2]
xparticles_fine1 <- xparticles_fine1[, ancestors_fine1]
xparticles_fine2 <- xparticles_fine2[, ancestors_fine2]
# reset weights
logweights_coarse1 <- rep(0, nparticles)
logweights_coarse2 <- rep(0, nparticles)
logweights_fine1 <- rep(0, nparticles)
logweights_fine2 <- rep(0, nparticles)
}
}
index_coarse <- 0 # index coarse times
for (k in 1:nsteps){
# propagate under latent dynamics
randn <- matrix(rnorm(xdimension * nparticles), nrow = xdimension, ncol = nparticles) # size: xdimension x nparticles
if (meet_fine){
xparticles_fine1 <- model$rtransition(theta, stepsize_fine[k], xparticles_fine1, randn) # size: xdimension x nparticles
xparticles_fine2 <- xparticles_fine1
} else {
xparticles_fine1 <- model$rtransition(theta, stepsize_fine[k], xparticles_fine1, randn) # size: xdimension x nparticles
xparticles_fine2 <- model$rtransition(theta, stepsize_fine[k], xparticles_fine2, randn) # size: xdimension x nparticles
}
xparticles_fine1[, nparticles] <- ref_trajectory_fine1[, k+1]
xparticles_fine2[, nparticles] <- ref_trajectory_fine2[, k+1]
if (coarsetimes[k+1]){
# increment number of coarse steps
index_coarse <- index_coarse + 1
# combine Brownian increments
if (coarsetimes[k]){
combined_randn <- randn # use same Brownian increment if we cannot fit a fine stepsize in the remainder of coarse level
} else {
combined_randn <- (previous_randn + randn) / sqrt(2)
}
# propagate under latent dynamics
if (meet_coarse){
xparticles_coarse1 <- model$rtransition(theta, stepsize_coarse[index_coarse], xparticles_coarse1, combined_randn) # size: xdimension x nparticles
xparticles_coarse2 <- xparticles_coarse1
} else {
xparticles_coarse1 <- model$rtransition(theta, stepsize_coarse[index_coarse], xparticles_coarse1, combined_randn) # size: xdimension x nparticles
xparticles_coarse2 <- model$rtransition(theta, stepsize_coarse[index_coarse], xparticles_coarse2, combined_randn)
}
xparticles_coarse1[, nparticles] <- ref_trajectory_coarse1[, index_coarse+1]
xparticles_coarse2[, nparticles] <- ref_trajectory_coarse2[, index_coarse+1]
}
previous_randn <- randn
# update tree storage
if (meet_fine){
if (treestorage){
Tree_fine1$update(xparticles_fine1, ancestors_fine1 - 1)
} else {
xtrajectory_fine1[k+1, , ] <- xparticles_fine1
ancestries_fine1[k, ] <- ancestors_fine1
}
} else {
if (treestorage){
Tree_fine1$update(xparticles_fine1, ancestors_fine1 - 1)
Tree_fine2$update(xparticles_fine2, ancestors_fine2 - 1)
} else {
xtrajectory_fine1[k+1, , ] <- xparticles_fine1
xtrajectory_fine2[k+1, , ] <- xparticles_fine2
ancestries_fine1[k, ] <- ancestors_fine1
ancestries_fine2[k, ] <- ancestors_fine2
}
}
ancestors_fine1 <- 1:nparticles
ancestors_fine2 <- 1:nparticles
if (coarsetimes[k+1]){
if (meet_coarse){
if (treestorage){
Tree_coarse1$update(xparticles_coarse1, ancestors_coarse1 - 1)
} else {
xtrajectory_coarse1[index_coarse+1, , ] <- xparticles_coarse1
ancestries_coarse1[index_coarse, ] <- ancestors_coarse1
}
} else {
if (treestorage){
Tree_coarse1$update(xparticles_coarse1, ancestors_coarse1 - 1)
Tree_coarse2$update(xparticles_coarse2, ancestors_coarse2 - 1)
} else {
xtrajectory_coarse1[index_coarse+1, , ] <- xparticles_coarse1
xtrajectory_coarse2[index_coarse+1, , ] <- xparticles_coarse2
ancestries_coarse1[index_coarse, ] <- ancestors_coarse1
ancestries_coarse2[index_coarse, ] <- ancestors_coarse2
}
}
ancestors_coarse1 <- 1:nparticles
ancestors_coarse2 <- 1:nparticles
}
if (obstimes[k+1]){
# compute weights
index_obs <- index_obs + 1
observation <- observations[index_obs, ] # 1 x ydimension
if (model$is_discrete_observation){
# observation only depends on current particles in a discrete model
if (meet_fine){
logweights_fine1 <- logweights_fine1 + model$dmeasurement(theta, stepsize_fine[k], xparticles_fine1, observation)
logweights_fine2 <- logweights_fine1
} else {
logweights_fine1 <- logweights_fine1 + model$dmeasurement(theta, stepsize_fine[k], xparticles_fine1, observation)
logweights_fine2 <- logweights_fine2 + model$dmeasurement(theta, stepsize_fine[k], xparticles_fine2, observation)
}
} else {
# observation depends on inter-observation states
x_sub_trajectory1 <- array(0, dim = c(k-last_obs_fine+1, xdimension, nparticles))
x_sub_trajectory1[ , , ] <- xtrajectory_fine1[last_obs_fine:k, , ]
if (meet_fine){
logweights_fine1 <- logweights_fine1 + model$dmeasurement(theta, stepsize_fine[k], x_sub_trajectory1, observation)
logweights_fine2 <- logweights_fine1
} else {
x_sub_trajectory2 <- array(0, dim = c(k-last_obs_fine+1, xdimension, nparticles))
x_sub_trajectory2[ , , ] <- xtrajectory_fine2[last_obs_fine:k, , ]
logweights_fine1 <- logweights_fine1 + model$dmeasurement(theta, stepsize_fine[k], x_sub_trajectory1, observation)
logweights_fine2 <- logweights_fine2 + model$dmeasurement(theta, stepsize_fine[k], x_sub_trajectory2, observation)
}
}
# index last observation
last_obs_fine <- k + 1
maxlogweights_fine1 <- max(logweights_fine1)
maxlogweights_fine2 <- max(logweights_fine2)
weights_fine1 <- exp(logweights_fine1 - maxlogweights_fine1)
weights_fine2 <- exp(logweights_fine2 - maxlogweights_fine2)
normweights_fine1 <- weights_fine1 / sum(weights_fine1)
normweights_fine2 <- weights_fine2 / sum(weights_fine2)
ess_fine1 <- 1 / sum(normweights_fine1^2)
ess_fine2 <- 1 / sum(normweights_fine2^2)
if (model$is_discrete_observation){
# observation only depends on current particles in a discrete model
if (meet_coarse){
logweights_coarse1 <- logweights_coarse1 + model$dmeasurement(theta, stepsize_coarse[index_coarse], xparticles_coarse1, observation)
logweights_coarse2 <- logweights_coarse1
} else {
logweights_coarse1 <- logweights_coarse1 + model$dmeasurement(theta, stepsize_coarse[index_coarse], xparticles_coarse1, observation)
logweights_coarse2 <- logweights_coarse2 + model$dmeasurement(theta, stepsize_coarse[index_coarse], xparticles_coarse2, observation)
}
} else {
# observation depends on inter-observation states
x_sub_trajectory1 <- array(0, dim = c(index_coarse-last_obs_coarse+1, xdimension, nparticles))
x_sub_trajectory1[ , , ] <- xtrajectory_coarse1[last_obs_coarse:index_coarse, , ]
if (meet_coarse){
logweights_coarse1 <- logweights_coarse1 + model$dmeasurement(theta, stepsize_coarse[index_coarse], x_sub_trajectory1, observation)
logweights_coarse2 <- logweights_coarse1
} else {
x_sub_trajectory2 <- array(0, dim = c(index_coarse-last_obs_coarse+1, xdimension, nparticles))
x_sub_trajectory2[ , , ] <- xtrajectory_coarse2[last_obs_coarse:index_coarse, , ]
logweights_coarse1 <- logweights_coarse1 + model$dmeasurement(theta, stepsize_coarse[index_coarse], x_sub_trajectory1, observation)
logweights_coarse2 <- logweights_coarse2 + model$dmeasurement(theta, stepsize_coarse[index_coarse], x_sub_trajectory2, observation)
}
}
# index last observation
last_obs_coarse <- index_coarse + 1
maxlogweights_coarse1 <- max(logweights_coarse1)
maxlogweights_coarse2 <- max(logweights_coarse2)
weights_coarse1 <- exp(logweights_coarse1 - maxlogweights_coarse1)
weights_coarse2 <- exp(logweights_coarse2 - maxlogweights_coarse2)
normweights_coarse1 <- weights_coarse1 / sum(weights_coarse1)
normweights_coarse2 <- weights_coarse2 / sum(weights_coarse2)
ess_coarse1 <- 1 / sum(normweights_coarse1^2)
ess_coarse2 <- 1 / sum(normweights_coarse2^2)
# resampling
min_ess <- min(ess_fine1, ess_fine2, ess_coarse1, ess_coarse2)
if (k < nsteps && min_ess < resampling_threshold * nparticles){
rand <- runif(nparticles)
ancestors <- coupled_resampling(normweights_coarse1, normweights_coarse2,
normweights_fine1, normweights_fine2,
nparticles, rand)
ancestors_coarse1 <- ancestors[, 1]
ancestors_coarse2 <- ancestors[, 2]
ancestors_fine1 <- ancestors[, 3]
ancestors_fine2 <- ancestors[, 4]
# ancestor sampling
as_logweights_fine1 <- logweights_fine1 + model$dtransition(theta, stepsize_fine[k+1], xparticles_fine1, ref_trajectory_fine1[, k+2])
as_logweights_fine2 <- logweights_fine2 + model$dtransition(theta, stepsize_fine[k+1], xparticles_fine2, ref_trajectory_fine2[, k+2])
as_maxlogweights_fine1 <- max(as_logweights_fine1)
as_maxlogweights_fine2 <- max(as_logweights_fine2)
as_weights_fine1 <- exp(as_logweights_fine1 - as_maxlogweights_fine1)
as_weights_fine2 <- exp(as_logweights_fine2 - as_maxlogweights_fine2)
as_normweights_fine1 <- as_weights_fine1 / sum(as_weights_fine1)
as_normweights_fine2 <- as_weights_fine2 / sum(as_weights_fine2)
as_logweights_coarse1 <- logweights_coarse1 + model$dtransition(theta, stepsize_coarse[index_coarse+1], xparticles_coarse1, ref_trajectory_coarse1[, index_coarse+2])
as_logweights_coarse2 <- logweights_coarse2 + model$dtransition(theta, stepsize_coarse[index_coarse+1], xparticles_coarse2, ref_trajectory_coarse2[, index_coarse+2])
as_maxlogweights_coarse1 <- max(as_logweights_coarse1)
as_maxlogweights_coarse2 <- max(as_logweights_coarse2)
as_weights_coarse1 <- exp(as_logweights_coarse1 - as_maxlogweights_coarse1)
as_weights_coarse2 <- exp(as_logweights_coarse2 - as_maxlogweights_coarse2)
as_normweights_coarse1 <- as_weights_coarse1 / sum(as_weights_coarse1)
as_normweights_coarse2 <- as_weights_coarse2 / sum(as_weights_coarse2)
as_ancestors <- coupled_resampling(as_normweights_coarse1, as_normweights_coarse2,
as_normweights_fine1, as_normweights_fine2,
1, rand[nparticles])
ancestors_coarse1[nparticles] <- as_ancestors[, 1]
ancestors_coarse2[nparticles] <- as_ancestors[, 2]
ancestors_fine1[nparticles] <- as_ancestors[, 3]
ancestors_fine2[nparticles] <- as_ancestors[, 4]
xparticles_coarse1 <- xparticles_coarse1[, ancestors_coarse1]
xparticles_coarse2 <- xparticles_coarse2[, ancestors_coarse2]
xparticles_fine1 <- xparticles_fine1[, ancestors_fine1]
xparticles_fine2 <- xparticles_fine2[, ancestors_fine2]
# reset weights
logweights_coarse1 <- rep(0, nparticles)
logweights_coarse2 <- rep(0, nparticles)
logweights_fine1 <- rep(0, nparticles)
logweights_fine2 <- rep(0, nparticles)
}
}
}
# draw a pair of trajectories
rand <- runif(1)
ancestor <- coupled_resampling(normweights_coarse1, normweights_coarse2,
normweights_fine1, normweights_fine2,
1, rand)
ancestor_coarse1 <- ancestor[, 1]
ancestor_coarse2 <- ancestor[, 2]
ancestor_fine1 <- ancestor[, 3]
ancestor_fine2 <- ancestor[, 4]
if (meet_coarse){
if (treestorage){
new_trajectory_coarse1 <- Tree_coarse1$get_path(ancestor_coarse1 - 1)
} else {
new_trajectory_coarse1 <- get_path(model, discretization$coarse, xtrajectory_coarse1, ancestries_coarse1, ancestor_coarse1)
}
new_trajectory_coarse2 <- new_trajectory_coarse1
} else {
if (treestorage){
new_trajectory_coarse1 <- Tree_coarse1$get_path(ancestor_coarse1 - 1)
new_trajectory_coarse2 <- Tree_coarse2$get_path(ancestor_coarse2 - 1)
} else {
new_trajectory_coarse1 <- get_path(model, discretization$coarse, xtrajectory_coarse1, ancestries_coarse1, ancestor_coarse1)
new_trajectory_coarse2 <- get_path(model, discretization$coarse, xtrajectory_coarse2, ancestries_coarse2, ancestor_coarse2)
}
}
if (meet_fine){
if (treestorage){
new_trajectory_fine1 <- Tree_fine1$get_path(ancestor_fine1 - 1)
} else {
new_trajectory_fine1 <- get_path(model, discretization$fine, xtrajectory_fine1, ancestries_fine1, ancestor_fine1)
}
new_trajectory_fine2 <- new_trajectory_fine1
} else {
if (treestorage){
new_trajectory_fine1 <- Tree_fine1$get_path(ancestor_fine1 - 1)
new_trajectory_fine2 <- Tree_fine2$get_path(ancestor_fine2 - 1)
} else {
new_trajectory_fine1 <- get_path(model, discretization$fine, xtrajectory_fine1, ancestries_fine1, ancestor_fine1)
new_trajectory_fine2 <- get_path(model, discretization$fine, xtrajectory_fine2, ancestries_fine2, ancestor_fine2)
}
}
return(list(new_trajectory_coarse1 = new_trajectory_coarse1, new_trajectory_coarse2 = new_trajectory_coarse2,
new_trajectory_fine1 = new_trajectory_fine1, new_trajectory_fine2 = new_trajectory_fine2))
}
|
/R/coupled4_CASPF.R
|
no_license
|
jeremyhengjm/UnbiasedScore
|
R
| false | false | 24,785 |
r
|
#' @rdname coupled4_CASPF
#' @title 4-way Coupled Ancestor Sampling Conditional Particle Filter
#' @description Runs four coupled conditional particle filters (two at each discretization level).
#' @param model a list representing a hidden Markov model, e.g. \code{\link{hmm_ornstein_uhlenbeck}}
#' @param theta a vector of parameters as input to model functions
#' @param discretization lists containing stepsize, nsteps, statelength, obstimes for fine and coarse levels,
#' and coarsetimes of length statelength_fine indexing time steps of coarse level
#' @param observations a matrix of observations, of size nobservations x ydimension
#' @param nparticles number of particles
#' @param resampling_threshold ESS proportion below which resampling is triggered (always resample at observation times by default)
#' @param coupled_resampling a 4-way coupled resampling scheme, such as \code{\link{coupled4_maximalchains_maximallevels_independent_residuals}}
#' @param ref_trajectory_coarse1 a matrix of first reference trajectory for coarser discretization level, of size xdimension x statelength_coarse
#' @param ref_trajectory_coarse2 a matrix of second reference trajectory for coarser discretization level, of size xdimension x statelength_coarse
#' @param ref_trajectory_fine1 a matrix of first reference trajectory for finer discretization level, of size xdimension x statelength_fine
#' @param ref_trajectory_fine2 a matrix of second reference trajectory for finer discretization level, of size xdimension x statelength_fine
#' @param treestorage logical specifying tree storage of Jacob, Murray and Rubenthaler (2013);
#' if missing, this function store all states and ancestors
#' @return four new trajectories stored as matrices of size xdimension x statelength_coarse/fine.
#' @export
coupled4_CASPF <- function(model, theta, discretization, observations, nparticles, resampling_threshold, coupled_resampling,
ref_trajectory_coarse1, ref_trajectory_coarse2,
ref_trajectory_fine1, ref_trajectory_fine2, treestorage = FALSE){
# get model/problem settings
nobservations <- nrow(observations)
xdimension <- model$xdimension
ydimension <- model$ydimension
# discretization
nsteps <- discretization$fine$nsteps
stepsize_fine <- discretization$fine$stepsize
stepsize_coarse <- discretization$coarse$stepsize
statelength_fine <- discretization$fine$statelength
statelength_coarse <- discretization$coarse$statelength
obstimes <- discretization$fine$obstimes # vector of length nsteps+1 indexing observation times
coarsetimes <- discretization$coarsetimes # vector of length nsteps+1 indexing coarse times
# check if trajectories are equal
meet_coarse <- all(ref_trajectory_coarse1 == ref_trajectory_coarse2)
meet_fine <- all(ref_trajectory_fine1 == ref_trajectory_fine2)
# create tree representation of the trajectories or store all states and ancestors
if (meet_coarse){
if (treestorage){
Tree_coarse1 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension) # only store one tree in this case
} else {
xtrajectory_coarse1 <- array(0, dim = c(statelength_coarse, xdimension, nparticles))
ancestries_coarse1 <- matrix(0, nrow = statelength_coarse, ncol = nparticles)
}
} else {
if (treestorage){
Tree_coarse1 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension)
Tree_coarse2 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension)
} else {
xtrajectory_coarse1 <- array(0, dim = c(statelength_coarse, xdimension, nparticles))
xtrajectory_coarse2 <- array(0, dim = c(statelength_coarse, xdimension, nparticles))
ancestries_coarse1 <- matrix(0, nrow = statelength_coarse, ncol = nparticles)
ancestries_coarse2 <- matrix(0, nrow = statelength_coarse, ncol = nparticles)
}
}
if (meet_fine){
if (treestorage){
Tree_fine1 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension) # only store one tree in this case
} else {
xtrajectory_fine1 <- array(0, dim = c(statelength_fine, xdimension, nparticles))
ancestries_fine1 <- matrix(0, nrow = statelength_fine, ncol = nparticles)
}
} else {
if (treestorage){
Tree_fine1 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension)
Tree_fine2 <- new(TreeClass, nparticles, 10*nparticles*xdimension, xdimension)
} else {
xtrajectory_fine1 <- array(0, dim = c(statelength_fine, xdimension, nparticles))
xtrajectory_fine2 <- array(0, dim = c(statelength_fine, xdimension, nparticles))
ancestries_fine1 <- matrix(0, nrow = statelength_fine, ncol = nparticles)
ancestries_fine2 <- matrix(0, nrow = statelength_fine, ncol = nparticles)
}
}
# initialization
xparticles_coarse1 <- model$rinit(nparticles) # size: xdimension x nparticles
xparticles_coarse2 <- xparticles_coarse1
xparticles_fine1 <- xparticles_coarse1
xparticles_fine2 <- xparticles_coarse1
xparticles_coarse1[, nparticles] <- ref_trajectory_coarse1[, 1]
xparticles_coarse2[, nparticles] <- ref_trajectory_coarse2[, 1]
xparticles_fine1[, nparticles] <- ref_trajectory_fine1[, 1]
xparticles_fine2[, nparticles] <- ref_trajectory_fine2[, 1]
# initialize tree to storage trajectories
if (meet_coarse){
if (treestorage){
Tree_coarse1$init(xparticles_coarse1)
} else {
xtrajectory_coarse1[1, , ] <- xparticles_coarse1
}
} else {
if (treestorage){
Tree_coarse1$init(xparticles_coarse1)
Tree_coarse2$init(xparticles_coarse2)
} else {
xtrajectory_coarse1[1, , ] <- xparticles_coarse1
xtrajectory_coarse2[1, , ] <- xparticles_coarse2
}
}
if (meet_fine){
if (treestorage){
Tree_fine1$init(xparticles_fine1)
} else {
xtrajectory_fine1[1, , ] <- xparticles_fine1
}
} else {
if (treestorage){
Tree_fine1$init(xparticles_fine1)
Tree_fine2$init(xparticles_fine2)
} else {
xtrajectory_fine1[1, , ] <- xparticles_fine1
xtrajectory_fine2[1, , ] <- xparticles_fine2
}
}
logweights_coarse1 <- rep(0, nparticles)
logweights_coarse2 <- rep(0, nparticles)
logweights_fine1 <- rep(0, nparticles)
logweights_fine2 <- rep(0, nparticles)
index_obs <- 0
ancestors_coarse1 <- 1:nparticles
ancestors_coarse2 <- 1:nparticles
ancestors_fine1 <- 1:nparticles
ancestors_fine2 <- 1:nparticles
# index last observation
last_obs_fine <- 1
last_obs_coarse <- 1
# random initialization
if (obstimes[1]){
# compute weights
index_obs <- index_obs + 1
observation <- observations[index_obs, ] # 1 x ydimension
if (meet_fine){
logweights_fine1 <- model$dmeasurement(theta, stepsize_fine[1], xparticles_fine1, observation)
logweights_fine2 <- logweights_fine1
} else {
logweights_fine1 <- model$dmeasurement(theta, stepsize_fine[1], xparticles_fine1, observation)
logweights_fine2 <- model$dmeasurement(theta, stepsize_fine[1], xparticles_fine2, observation)
}
maxlogweights_fine1 <- max(logweights_fine1)
maxlogweights_fine2 <- max(logweights_fine2)
weights_fine1 <- exp(logweights_fine1 - maxlogweights_fine1)
weights_fine2 <- exp(logweights_fine2 - maxlogweights_fine2)
normweights_fine1 <- weights_fine1 / sum(weights_fine1)
normweights_fine2 <- weights_fine2 / sum(weights_fine2)
ess_fine1 <- 1 / sum(normweights_fine1^2)
ess_fine2 <- 1 / sum(normweights_fine2^2)
if (meet_coarse){
logweights_coarse1 <- model$dmeasurement(theta, stepsize_coarse[1], xparticles_coarse1, observation)
logweights_coarse2 <- logweights_coarse1
} else {
logweights_coarse1 <- model$dmeasurement(theta, stepsize_coarse[1], xparticles_coarse1, observation)
logweights_coarse2 <- model$dmeasurement(theta, stepsize_coarse[1], xparticles_coarse2, observation)
}
maxlogweights_coarse1 <- max(logweights_coarse1)
maxlogweights_coarse2 <- max(logweights_coarse2)
weights_coarse1 <- exp(logweights_coarse1 - maxlogweights_coarse1)
weights_coarse2 <- exp(logweights_coarse2 - maxlogweights_coarse2)
normweights_coarse1 <- weights_coarse1 / sum(weights_coarse1)
normweights_coarse2 <- weights_coarse2 / sum(weights_coarse2)
ess_coarse1 <- 1 / sum(normweights_coarse1^2)
ess_coarse2 <- 1 / sum(normweights_coarse2^2)
# resampling
min_ess <- min(ess_fine1, ess_fine2, ess_coarse1, ess_coarse2)
if (min_ess < resampling_threshold * nparticles){
rand <- runif(nparticles)
ancestors <- coupled_resampling(normweights_coarse1, normweights_coarse2,
normweights_fine1, normweights_fine2,
nparticles, rand)
ancestors_coarse1 <- ancestors[, 1]
ancestors_coarse2 <- ancestors[, 2]
ancestors_fine1 <- ancestors[, 3]
ancestors_fine2 <- ancestors[, 4]
# ancestor sampling
as_logweights_fine1 <- logweights_fine1 + model$dtransition(theta, stepsize_fine[1], xparticles_fine1, ref_trajectory_fine1[, 2])
as_logweights_fine2 <- logweights_fine2 + model$dtransition(theta, stepsize_fine[1], xparticles_fine2, ref_trajectory_fine2[, 2])
as_maxlogweights_fine1 <- max(as_logweights_fine1)
as_maxlogweights_fine2 <- max(as_logweights_fine2)
as_weights_fine1 <- exp(as_logweights_fine1 - as_maxlogweights_fine1)
as_weights_fine2 <- exp(as_logweights_fine2 - as_maxlogweights_fine2)
as_normweights_fine1 <- as_weights_fine1 / sum(as_weights_fine1)
as_normweights_fine2 <- as_weights_fine2 / sum(as_weights_fine2)
as_logweights_coarse1 <- logweights_coarse1 + model$dtransition(theta, stepsize_coarse[1], xparticles_coarse1, ref_trajectory_coarse1[, 2])
as_logweights_coarse2 <- logweights_coarse2 + model$dtransition(theta, stepsize_coarse[1], xparticles_coarse2, ref_trajectory_coarse2[, 2])
as_maxlogweights_coarse1 <- max(as_logweights_coarse1)
as_maxlogweights_coarse2 <- max(as_logweights_coarse2)
as_weights_coarse1 <- exp(as_logweights_coarse1 - as_maxlogweights_coarse1)
as_weights_coarse2 <- exp(as_logweights_coarse2 - as_maxlogweights_coarse2)
as_normweights_coarse1 <- as_weights_coarse1 / sum(as_weights_coarse1)
as_normweights_coarse2 <- as_weights_coarse2 / sum(as_weights_coarse2)
as_ancestors <- coupled_resampling(as_normweights_coarse1, as_normweights_coarse2,
as_normweights_fine1, as_normweights_fine2,
1, rand[nparticles])
ancestors_coarse1[nparticles] <- as_ancestors[, 1]
ancestors_coarse2[nparticles] <- as_ancestors[, 2]
ancestors_fine1[nparticles] <- as_ancestors[, 3]
ancestors_fine2[nparticles] <- as_ancestors[, 4]
xparticles_coarse1 <- xparticles_coarse1[, ancestors_coarse1]
xparticles_coarse2 <- xparticles_coarse2[, ancestors_coarse2]
xparticles_fine1 <- xparticles_fine1[, ancestors_fine1]
xparticles_fine2 <- xparticles_fine2[, ancestors_fine2]
# reset weights
logweights_coarse1 <- rep(0, nparticles)
logweights_coarse2 <- rep(0, nparticles)
logweights_fine1 <- rep(0, nparticles)
logweights_fine2 <- rep(0, nparticles)
}
}
index_coarse <- 0 # index coarse times
for (k in 1:nsteps){
# propagate under latent dynamics
randn <- matrix(rnorm(xdimension * nparticles), nrow = xdimension, ncol = nparticles) # size: xdimension x nparticles
if (meet_fine){
xparticles_fine1 <- model$rtransition(theta, stepsize_fine[k], xparticles_fine1, randn) # size: xdimension x nparticles
xparticles_fine2 <- xparticles_fine1
} else {
xparticles_fine1 <- model$rtransition(theta, stepsize_fine[k], xparticles_fine1, randn) # size: xdimension x nparticles
xparticles_fine2 <- model$rtransition(theta, stepsize_fine[k], xparticles_fine2, randn) # size: xdimension x nparticles
}
xparticles_fine1[, nparticles] <- ref_trajectory_fine1[, k+1]
xparticles_fine2[, nparticles] <- ref_trajectory_fine2[, k+1]
if (coarsetimes[k+1]){
# increment number of coarse steps
index_coarse <- index_coarse + 1
# combine Brownian increments
if (coarsetimes[k]){
combined_randn <- randn # use same Brownian increment if we cannot fit a fine stepsize in the remainder of coarse level
} else {
combined_randn <- (previous_randn + randn) / sqrt(2)
}
# propagate under latent dynamics
if (meet_coarse){
xparticles_coarse1 <- model$rtransition(theta, stepsize_coarse[index_coarse], xparticles_coarse1, combined_randn) # size: xdimension x nparticles
xparticles_coarse2 <- xparticles_coarse1
} else {
xparticles_coarse1 <- model$rtransition(theta, stepsize_coarse[index_coarse], xparticles_coarse1, combined_randn) # size: xdimension x nparticles
xparticles_coarse2 <- model$rtransition(theta, stepsize_coarse[index_coarse], xparticles_coarse2, combined_randn)
}
xparticles_coarse1[, nparticles] <- ref_trajectory_coarse1[, index_coarse+1]
xparticles_coarse2[, nparticles] <- ref_trajectory_coarse2[, index_coarse+1]
}
previous_randn <- randn
# update tree storage
if (meet_fine){
if (treestorage){
Tree_fine1$update(xparticles_fine1, ancestors_fine1 - 1)
} else {
xtrajectory_fine1[k+1, , ] <- xparticles_fine1
ancestries_fine1[k, ] <- ancestors_fine1
}
} else {
if (treestorage){
Tree_fine1$update(xparticles_fine1, ancestors_fine1 - 1)
Tree_fine2$update(xparticles_fine2, ancestors_fine2 - 1)
} else {
xtrajectory_fine1[k+1, , ] <- xparticles_fine1
xtrajectory_fine2[k+1, , ] <- xparticles_fine2
ancestries_fine1[k, ] <- ancestors_fine1
ancestries_fine2[k, ] <- ancestors_fine2
}
}
ancestors_fine1 <- 1:nparticles
ancestors_fine2 <- 1:nparticles
if (coarsetimes[k+1]){
if (meet_coarse){
if (treestorage){
Tree_coarse1$update(xparticles_coarse1, ancestors_coarse1 - 1)
} else {
xtrajectory_coarse1[index_coarse+1, , ] <- xparticles_coarse1
ancestries_coarse1[index_coarse, ] <- ancestors_coarse1
}
} else {
if (treestorage){
Tree_coarse1$update(xparticles_coarse1, ancestors_coarse1 - 1)
Tree_coarse2$update(xparticles_coarse2, ancestors_coarse2 - 1)
} else {
xtrajectory_coarse1[index_coarse+1, , ] <- xparticles_coarse1
xtrajectory_coarse2[index_coarse+1, , ] <- xparticles_coarse2
ancestries_coarse1[index_coarse, ] <- ancestors_coarse1
ancestries_coarse2[index_coarse, ] <- ancestors_coarse2
}
}
ancestors_coarse1 <- 1:nparticles
ancestors_coarse2 <- 1:nparticles
}
if (obstimes[k+1]){
# compute weights
index_obs <- index_obs + 1
observation <- observations[index_obs, ] # 1 x ydimension
if (model$is_discrete_observation){
# observation only depends on current particles in a discrete model
if (meet_fine){
logweights_fine1 <- logweights_fine1 + model$dmeasurement(theta, stepsize_fine[k], xparticles_fine1, observation)
logweights_fine2 <- logweights_fine1
} else {
logweights_fine1 <- logweights_fine1 + model$dmeasurement(theta, stepsize_fine[k], xparticles_fine1, observation)
logweights_fine2 <- logweights_fine2 + model$dmeasurement(theta, stepsize_fine[k], xparticles_fine2, observation)
}
} else {
# observation depends on inter-observation states
x_sub_trajectory1 <- array(0, dim = c(k-last_obs_fine+1, xdimension, nparticles))
x_sub_trajectory1[ , , ] <- xtrajectory_fine1[last_obs_fine:k, , ]
if (meet_fine){
logweights_fine1 <- logweights_fine1 + model$dmeasurement(theta, stepsize_fine[k], x_sub_trajectory1, observation)
logweights_fine2 <- logweights_fine1
} else {
x_sub_trajectory2 <- array(0, dim = c(k-last_obs_fine+1, xdimension, nparticles))
x_sub_trajectory2[ , , ] <- xtrajectory_fine2[last_obs_fine:k, , ]
logweights_fine1 <- logweights_fine1 + model$dmeasurement(theta, stepsize_fine[k], x_sub_trajectory1, observation)
logweights_fine2 <- logweights_fine2 + model$dmeasurement(theta, stepsize_fine[k], x_sub_trajectory2, observation)
}
}
# index last observation
last_obs_fine <- k + 1
maxlogweights_fine1 <- max(logweights_fine1)
maxlogweights_fine2 <- max(logweights_fine2)
weights_fine1 <- exp(logweights_fine1 - maxlogweights_fine1)
weights_fine2 <- exp(logweights_fine2 - maxlogweights_fine2)
normweights_fine1 <- weights_fine1 / sum(weights_fine1)
normweights_fine2 <- weights_fine2 / sum(weights_fine2)
ess_fine1 <- 1 / sum(normweights_fine1^2)
ess_fine2 <- 1 / sum(normweights_fine2^2)
if (model$is_discrete_observation){
# observation only depends on current particles in a discrete model
if (meet_coarse){
logweights_coarse1 <- logweights_coarse1 + model$dmeasurement(theta, stepsize_coarse[index_coarse], xparticles_coarse1, observation)
logweights_coarse2 <- logweights_coarse1
} else {
logweights_coarse1 <- logweights_coarse1 + model$dmeasurement(theta, stepsize_coarse[index_coarse], xparticles_coarse1, observation)
logweights_coarse2 <- logweights_coarse2 + model$dmeasurement(theta, stepsize_coarse[index_coarse], xparticles_coarse2, observation)
}
} else {
# observation depends on inter-observation states
x_sub_trajectory1 <- array(0, dim = c(index_coarse-last_obs_coarse+1, xdimension, nparticles))
x_sub_trajectory1[ , , ] <- xtrajectory_coarse1[last_obs_coarse:index_coarse, , ]
if (meet_coarse){
logweights_coarse1 <- logweights_coarse1 + model$dmeasurement(theta, stepsize_coarse[index_coarse], x_sub_trajectory1, observation)
logweights_coarse2 <- logweights_coarse1
} else {
x_sub_trajectory2 <- array(0, dim = c(index_coarse-last_obs_coarse+1, xdimension, nparticles))
x_sub_trajectory2[ , , ] <- xtrajectory_coarse2[last_obs_coarse:index_coarse, , ]
logweights_coarse1 <- logweights_coarse1 + model$dmeasurement(theta, stepsize_coarse[index_coarse], x_sub_trajectory1, observation)
logweights_coarse2 <- logweights_coarse2 + model$dmeasurement(theta, stepsize_coarse[index_coarse], x_sub_trajectory2, observation)
}
}
# index last observation
last_obs_coarse <- index_coarse + 1
maxlogweights_coarse1 <- max(logweights_coarse1)
maxlogweights_coarse2 <- max(logweights_coarse2)
weights_coarse1 <- exp(logweights_coarse1 - maxlogweights_coarse1)
weights_coarse2 <- exp(logweights_coarse2 - maxlogweights_coarse2)
normweights_coarse1 <- weights_coarse1 / sum(weights_coarse1)
normweights_coarse2 <- weights_coarse2 / sum(weights_coarse2)
ess_coarse1 <- 1 / sum(normweights_coarse1^2)
ess_coarse2 <- 1 / sum(normweights_coarse2^2)
# resampling
min_ess <- min(ess_fine1, ess_fine2, ess_coarse1, ess_coarse2)
if (k < nsteps && min_ess < resampling_threshold * nparticles){
rand <- runif(nparticles)
ancestors <- coupled_resampling(normweights_coarse1, normweights_coarse2,
normweights_fine1, normweights_fine2,
nparticles, rand)
ancestors_coarse1 <- ancestors[, 1]
ancestors_coarse2 <- ancestors[, 2]
ancestors_fine1 <- ancestors[, 3]
ancestors_fine2 <- ancestors[, 4]
# ancestor sampling
as_logweights_fine1 <- logweights_fine1 + model$dtransition(theta, stepsize_fine[k+1], xparticles_fine1, ref_trajectory_fine1[, k+2])
as_logweights_fine2 <- logweights_fine2 + model$dtransition(theta, stepsize_fine[k+1], xparticles_fine2, ref_trajectory_fine2[, k+2])
as_maxlogweights_fine1 <- max(as_logweights_fine1)
as_maxlogweights_fine2 <- max(as_logweights_fine2)
as_weights_fine1 <- exp(as_logweights_fine1 - as_maxlogweights_fine1)
as_weights_fine2 <- exp(as_logweights_fine2 - as_maxlogweights_fine2)
as_normweights_fine1 <- as_weights_fine1 / sum(as_weights_fine1)
as_normweights_fine2 <- as_weights_fine2 / sum(as_weights_fine2)
as_logweights_coarse1 <- logweights_coarse1 + model$dtransition(theta, stepsize_coarse[index_coarse+1], xparticles_coarse1, ref_trajectory_coarse1[, index_coarse+2])
as_logweights_coarse2 <- logweights_coarse2 + model$dtransition(theta, stepsize_coarse[index_coarse+1], xparticles_coarse2, ref_trajectory_coarse2[, index_coarse+2])
as_maxlogweights_coarse1 <- max(as_logweights_coarse1)
as_maxlogweights_coarse2 <- max(as_logweights_coarse2)
as_weights_coarse1 <- exp(as_logweights_coarse1 - as_maxlogweights_coarse1)
as_weights_coarse2 <- exp(as_logweights_coarse2 - as_maxlogweights_coarse2)
as_normweights_coarse1 <- as_weights_coarse1 / sum(as_weights_coarse1)
as_normweights_coarse2 <- as_weights_coarse2 / sum(as_weights_coarse2)
as_ancestors <- coupled_resampling(as_normweights_coarse1, as_normweights_coarse2,
as_normweights_fine1, as_normweights_fine2,
1, rand[nparticles])
ancestors_coarse1[nparticles] <- as_ancestors[, 1]
ancestors_coarse2[nparticles] <- as_ancestors[, 2]
ancestors_fine1[nparticles] <- as_ancestors[, 3]
ancestors_fine2[nparticles] <- as_ancestors[, 4]
xparticles_coarse1 <- xparticles_coarse1[, ancestors_coarse1]
xparticles_coarse2 <- xparticles_coarse2[, ancestors_coarse2]
xparticles_fine1 <- xparticles_fine1[, ancestors_fine1]
xparticles_fine2 <- xparticles_fine2[, ancestors_fine2]
# reset weights
logweights_coarse1 <- rep(0, nparticles)
logweights_coarse2 <- rep(0, nparticles)
logweights_fine1 <- rep(0, nparticles)
logweights_fine2 <- rep(0, nparticles)
}
}
}
# draw a pair of trajectories
rand <- runif(1)
ancestor <- coupled_resampling(normweights_coarse1, normweights_coarse2,
normweights_fine1, normweights_fine2,
1, rand)
ancestor_coarse1 <- ancestor[, 1]
ancestor_coarse2 <- ancestor[, 2]
ancestor_fine1 <- ancestor[, 3]
ancestor_fine2 <- ancestor[, 4]
if (meet_coarse){
if (treestorage){
new_trajectory_coarse1 <- Tree_coarse1$get_path(ancestor_coarse1 - 1)
} else {
new_trajectory_coarse1 <- get_path(model, discretization$coarse, xtrajectory_coarse1, ancestries_coarse1, ancestor_coarse1)
}
new_trajectory_coarse2 <- new_trajectory_coarse1
} else {
if (treestorage){
new_trajectory_coarse1 <- Tree_coarse1$get_path(ancestor_coarse1 - 1)
new_trajectory_coarse2 <- Tree_coarse2$get_path(ancestor_coarse2 - 1)
} else {
new_trajectory_coarse1 <- get_path(model, discretization$coarse, xtrajectory_coarse1, ancestries_coarse1, ancestor_coarse1)
new_trajectory_coarse2 <- get_path(model, discretization$coarse, xtrajectory_coarse2, ancestries_coarse2, ancestor_coarse2)
}
}
if (meet_fine){
if (treestorage){
new_trajectory_fine1 <- Tree_fine1$get_path(ancestor_fine1 - 1)
} else {
new_trajectory_fine1 <- get_path(model, discretization$fine, xtrajectory_fine1, ancestries_fine1, ancestor_fine1)
}
new_trajectory_fine2 <- new_trajectory_fine1
} else {
if (treestorage){
new_trajectory_fine1 <- Tree_fine1$get_path(ancestor_fine1 - 1)
new_trajectory_fine2 <- Tree_fine2$get_path(ancestor_fine2 - 1)
} else {
new_trajectory_fine1 <- get_path(model, discretization$fine, xtrajectory_fine1, ancestries_fine1, ancestor_fine1)
new_trajectory_fine2 <- get_path(model, discretization$fine, xtrajectory_fine2, ancestries_fine2, ancestor_fine2)
}
}
return(list(new_trajectory_coarse1 = new_trajectory_coarse1, new_trajectory_coarse2 = new_trajectory_coarse2,
new_trajectory_fine1 = new_trajectory_fine1, new_trajectory_fine2 = new_trajectory_fine2))
}
|
#################################################################
###
###
### Data input:
### "./4_Analysis/",download.method, "/", Cancer, "/Signature_Enrichment/GSEA_", Cancer,
### "_Bindea_xCell_HallmarkPathways.Rdata")
### Output :
### "./5_Figures/Correlation_plots/ICR_Correlation_plots/", download.method,
### "/ICR_Correlation_plot_",Cancer,".png"
### Manual adjustment of min and max
#################################################################
# Setup environment
rm(list=ls())
setwd("~/Dropbox (TBI-Lab)/TCGA Analysis pipeline/") # Setwd to location were output files have to be saved.
#setwd("~/Dropbox (TBI-Lab)/External Collaborations/TCGA Analysis pipeline/")
code_path = "~/Dropbox (Personal)/Jessica PhD Project/QCRI-SIDRA-ICR-Jessica/" # Set code path to the location were the R code is located
#code_path = "~/Dropbox (Personal)/R-projects/QCRI-SIDRA-ICR/"
#code_path = "C:/Users/whendrickx/R/GITHUB/TCGA_Pipeline/"
source(paste0(code_path, "R tools/ipak.function.R"))
source(paste0(code_path,"R tools/heatmap.3.R"))
required.packages <- c("gtools", "circlize")
ibiopak("ComplexHeatmap")
ipak(required.packages)
# Set Parameters
CancerTYPES = "ALL" # Specify the cancertypes that you want to download or process, c("...","...") or "ALL"
Cancer_skip = "" # If CancerTYPES = "ALL", specify here if you want to skip cancertypes
download.method = "Assembler_Panca_Normalized" # Specify download method (this information to be used when saving the file)
assay.platform = "gene_RNAseq" # Specify to which location TCGA-Assembler_v2.0.3 was downloaded
Log_file = paste0("./1_Log_Files/", download.method ,"/5.5.1.Pancancer_Bindea_Heatmap/5.5.1.Pancancer.Bindea.Heatmap_", # Specify complete name of the logfile that will be saved during this script
gsub(":",".",gsub(" ","_",date())),".txt")
my.palette <- colorRampPalette(c("blue", "white", "red"))(n = 297)
ColsideLabels = c("Cancers", "ICR_Enabled/Disabled")
Legend = c("ICR Low","ICR Med","ICR High")
Deconvolution_matrix = "Hallmark.enrichment.score" # "Bindea.enrichment.score", "bindea_patrick.enrichment.score"
Cutoff_HR = 1
Tumor_purity_correction = "No_correction"
pathway_filter = "significant_and_inverse_pathways"
include_proliferation = "include_proliferation"
# Load data
TCGA.cancersets = read.csv(paste0(code_path, "Datalists/TCGA.datasets.csv"),stringsAsFactors = FALSE) # TCGA.datasets.csv is created from Table 1. (Cancer Types Abbreviations)
# in the Manual of Assembler v2.0.3 and was saved as csv file.
# Define parameters (based on loaded data)
if (CancerTYPES == "ALL") {
CancerTYPES = TCGA.cancersets$cancerType
}
# Create folders
dir.create("./5_Figures/",showWarnings = FALSE)
dir.create(paste0("./5_Figures/Pancancer_plots"), showWarnings = FALSE)
dir.create(paste0("./5_Figures/Pancancer_plots/", download.method), showWarnings = FALSE)
dir.create(paste0("./1_Log_Files/"), showWarnings = FALSE)
dir.create(paste0("./1_Log_Files/", download.method, "/5.5.1.Pancancer_Bindea_Heatmap"), showWarnings = FALSE)
cat("This is a log file for creating heatmap bindea enrichment plots",
"__________________________________________",
"",
"Session Info :",
capture.output(sessionInfo()),
"",
"Parameters Used :",
paste0("CancerTYPES = ", CancerTYPES),
paste0("Cancer_skip = ", Cancer_skip),
paste0("download.method = ", download.method),
"",
"Scripts output :",
file = Log_file,
append = FALSE, sep= "\n")
N.sets = length(CancerTYPES)
load(paste0("./4_Analysis/",download.method,"/Pan_Cancer/Survival_Analysis/", "Survival_analysis_High_vs_Low_Groups",
"HML_classification", ".Rdata"))
ICR_enabled_cancers = as.character(All_survival_analysis_data$Cancertype[which(All_survival_analysis_data$HR > Cutoff_HR)])
ICR_disabled_cancers = as.character(All_survival_analysis_data$Cancertype[which(All_survival_analysis_data$HR <= Cutoff_HR)])
ICR_disabled_cancers = ICR_disabled_cancers[-which(ICR_disabled_cancers == "LAML")]
load("./4_Analysis/Assembler_Panca_Normalized/Pan_Cancer/Clustering/ICR_cluster_assignment_allcancers.Rdata")
if(Tumor_purity_correction == "No_correction" & include_proliferation == "include_proliferation"){
load(paste0("./4_Analysis/", download.method, "/Pan_Cancer/Signature_Enrichment/ssGSEA.Hallmark&Proliferation.Pancancer.Rdata"))
Enrichment_score_all = Hallmark_enrichment_score_all
}
if(Tumor_purity_correction == "Leukocyte_estimate_correction"){
load(paste0("./4_Analysis/", download.method,"/Pan_Cancer/Signature_Enrichment/ssGSEA.Hallmark.Pancancer.Leukocyte.Estimate.Corrected_v2.Rdata"))
Enrichment_score_all = Hallmark_ES_all_Leuko_corrected
rownames(ICR_cluster_assignment_allcancers) = substring(rownames(ICR_cluster_assignment_allcancers), 1, 12)
}
###
#Leuk_estimate = read.csv("./3_DataProcessing/External/Leuk.infil.data.clean.csv", stringsAsFactors = FALSE)
#ICR_cluster_assignment_allcancers$Leuk_estimate = Leuk_estimate$Leuk.Estimate[match(rownames(ICR_cluster_assignment_allcancers), substring(Leuk_estimate$SampleID, 1, 12))]
#ICR_cluster_assignment_allcancers = ICR_cluster_assignment_allcancers[-which(is.na(ICR_cluster_assignment_allcancers$Leuk_estimate)),]
#ICR_cluster_assignment_allcancers$Leuk_estimate[which(ICR_cluster_assignment_allcancers$Leuk_estimate >= 0.2)] = "High"
#ICR_cluster_assignment_allcancers$Leuk_estimate[which(ICR_cluster_assignment_allcancers$Leuk_estimate < 0.2)] = "Low"
#annotation = ICR_cluster_assignment_allcancers[,c("HML_cluster", "Cancer", "Leuk_estimate")]
#annotation$LE.col[which(annotation$Leuk_estimate == "Low")] = "lightblue"
#annotation$LE.col[which(annotation$Leuk_estimate == "High")] = "pink"
###
annotation = ICR_cluster_assignment_allcancers[,c("HML_cluster", "Cancer")]
annotation$ICR_ED = NA
annotation$ICR_ED[which(annotation$Cancer %in% ICR_enabled_cancers)] = "ICR_enabled"
annotation$ICR_ED[which(annotation$Cancer %in% ICR_disabled_cancers)] = "ICR_disabled"
# Bindea classification
annotation$HML_cluster.col[annotation$HML_cluster=="ICR High"] = "red"
annotation$HML_cluster.col[annotation$HML_cluster=="ICR Medium"] = "green"
annotation$HML_cluster.col[annotation$HML_cluster=="ICR Low"] = "blue"
annotation$Cancer.col = Cancer_color_table$color[match(annotation$Cancer, Cancer_color_table$Group.1)]
annotation$ICR_ED.col[annotation$ICR_ED == "ICR_enabled"] = "orange"
annotation$ICR_ED.col[annotation$ICR_ED == "ICR_disabled"] = "purple"
All_survival_analysis_data = All_survival_analysis_data[order(All_survival_analysis_data$HR, decreasing = TRUE),]
Cancer_order = as.character(All_survival_analysis_data$Cancertype[-which(All_survival_analysis_data$Cancertype == "LAML")])
ICR_order = c("ICR Low","ICR Medium","ICR High")
ICR_ED_order = c("ICR_enabled", "ICR_disabled")
##
#Leuk_est_order = c("Low", "High")
#annotation = annotation[order(match(annotation$ICR_ED, ICR_ED_order), match(annotation$Leuk_estimate, Leuk_est_order)),]
#annotation = annotation[order(match(annotation$HML_cluster, ICR_order), match(annotation$Cancer, Cancer_order)),]
#annotation = annotation[order(match(annotation$HML_cluster, ICR_order)),]
annotation = annotation[order(match(annotation$ICR_ED, ICR_ED_order), match(annotation$Cancer, Cancer_order)),]
annotation.blot = as.matrix(annotation[,c("Cancer.col", "ICR_ED.col"), drop = FALSE])
annotation.blot = annotation.blot[which(rownames(annotation.blot) %in% colnames(Enrichment_score_all)),]
#annotation.blot = annotation.blot[colnames(Expression.data),] # The sample order in annotation.blot needs to be the same as in Expression.data
#Expression.data = Expression.data[colnames(annotation.blot),]
Enrichment_score_all.z.score = Enrichment_score_all
j=1
for(j in 1: nrow(Enrichment_score_all.z.score)) {
Enrichment_score_all.z.score[j,] = (Enrichment_score_all[j,]-mean(Enrichment_score_all[j,]))/sd(Enrichment_score_all[j,]) # z-score the enrichment matrix
}
Enrichment_score_all.z.score = Enrichment_score_all.z.score[,rownames(annotation.blot)]
ICR_cluster_assignment_allcancers$ICR_ED = NA
ICR_cluster_assignment_allcancers$ICR_ED[ICR_cluster_assignment_allcancers$Cancer %in% ICR_enabled_cancers] = "ICR_enabled"
ICR_cluster_assignment_allcancers$ICR_ED[ICR_cluster_assignment_allcancers$Cancer %in% ICR_disabled_cancers] = "ICR_disabled"
## Determine order rows/signatures
to_aggregate = t(Enrichment_score_all.z.score)
to_aggregate = as.data.frame(to_aggregate)
to_aggregate$ICR_ED = ICR_cluster_assignment_allcancers$ICR_ED[match(rownames(to_aggregate), rownames(ICR_cluster_assignment_allcancers))]
mean_ES_all = aggregate(.~ ICR_ED, data = to_aggregate, FUN = mean)
mean_ES_all = t(mean_ES_all)
colnames(mean_ES_all) = mean_ES_all[1,]
mean_ES_all = mean_ES_all[-1,]
mode(mean_ES_all) = "numeric"
mean_ES_all = as.data.frame(mean_ES_all)
mean_ES_all$DeltaED = c(mean_ES_all$ICR_enabled - mean_ES_all$ICR_disabled)
mean_ES_all = mean_ES_all[order(mean_ES_all$DeltaED, decreasing = TRUE),]
cell_order = rownames(mean_ES_all)
Enrichment_score_all.z.score = Enrichment_score_all.z.score[c(cell_order),]
if(pathway_filter == "significant_and_inverse_pathways"){
load(paste0("./4_Analysis/", download.method, "/Pan_Cancer/Survival_Analysis/ICR_All_Pathway_High_Survival_analysis_High_vs_Low_Groups_Oncogenic_pathways.Rdata"))
All_survival_analysis_data_ONCO_High = All_survival_analysis_data_ONCO
rm(All_survival_analysis_data_ONCO)
load(paste0("./4_Analysis/", download.method, "/Pan_Cancer/Survival_Analysis/ICR_All_Pathway_Low_Survival_analysis_High_vs_Low_Groups_Oncogenic_pathways.Rdata"))
All_survival_analysis_data_ONCO_Low = All_survival_analysis_data_ONCO
rm(All_survival_analysis_data_ONCO)
merged = merge(All_survival_analysis_data_ONCO_High, All_survival_analysis_data_ONCO_Low, by = "Oncogenic_Pathway",
suffixes = c("_High", "_Low"))
merged$score_contribution = NA
merged$score_contribution[which(merged$HR_High >1 & merged$HR_Low <1 & merged$p_value_High < 0.05 & merged$p_value_Low < 0.05)] = "enabling"
merged$score_contribution[which(merged$HR_High <1 & merged$HR_Low >1 & merged$p_value_High < 0.05 & merged$p_value_Low < 0.05)] = "disabling"
enabling_pathways = gsub("_cluster_Pancancer", "", as.character(merged$Oncogenic_Pathway[which(merged$score_contribution == "enabling")]))
enabling_pathways[which(enabling_pathways == "Proliferation")] = c("[LM] Proliferation")
disabling_pathways = gsub("_cluster_Pancancer", "", as.character(merged$Oncogenic_Pathway[which(merged$score_contribution == "disabling")]))
Enrichment_score_all.z.score = Enrichment_score_all.z.score[c(enabling_pathways, disabling_pathways),]
}
## Plot prep
Legend = Cancer_order
rownames(Cancer_color_table) = Cancer_color_table$Group.1
Cancer_color_table = Cancer_color_table[Cancer_order,]
Legend_colors = c(Cancer_color_table$color)
#Legend2 = c("ICR Low","ICR Med","ICR High")
#Legend_colors2 = c("blue","green","red")
Legend3 = c("ICR enabled", "ICR disabled")
Legend_colors3 = c("orange", "purple")
### Plotting
png(paste0("./5_Figures/Pancancer_plots/", download.method, "/", Deconvolution_matrix, "_", Tumor_purity_correction,
"_", pathway_filter,"_ICR_E_D_seperated_v2", "_Heatmap_RNASeq_Pancancer_HML.png"),
res = 600, height = 15, width = 15, unit = "in")
heatmap.3((as.matrix(Enrichment_score_all.z.score)),
main= paste0("Pancancer enrichment scores \nssGSEA/Oncogenic pathway signatures \n", Tumor_purity_correction),
col=my.palette,
ColSideColors=annotation.blot,
font_size_col_Labs = 1.5,
cex.main = 10,
ColSideLabs = ColsideLabels,
#Colv= as.dendrogram(sHc),
#Colv = NULL,
Rowv = NULL,
labCol=NA,
side.height.fraction = 0.25,
cexRow = 1.3,
margins = c(13, 30))
title(sub = list(paste0("Figure: EDAseq normalized, log transformed gene expression data \n was obtained from TCGA, using ", download.method, ". \n",
"Oncogenic pathway enrichment z-scores were used to generate this heatmap."), cex = 1), outer = FALSE, line = -1, adj = 0.65)
legend("bottomleft",legend = Legend,
col = Legend_colors,lty= 0.5,lwd = 0.5, cex = 0.7, pch= 15, pt.cex = 1.0)
#legend("top", legend = Legend2,
#col = Legend_colors2, lty= 0.5,lwd = 0.5, cex = 0.7, pch= 15, pt.cex = 1.0)
legend("topright", legend = Legend3,
col = Legend_colors3, lty= 0.5,lwd = 0.5, cex = 0.7, pch= 15, pt.cex = 1.0)
dev.off()
## Color Key
png(paste0("./5_Figures/Pancancer_plots/", download.method, "/Color_Key_", Deconvolution_matrix, "_", Tumor_purity_correction,
"_", pathway_filter,"_ICR_E_D_seperated_v2", "_Heatmap_RNASeq_Pancancer_HML.png"),
res = 600, height = 7, width = 7, unit = "in")
heatmap.3((as.matrix(Enrichment_score_all.z.score)),
main= paste0("Pancancer enrichment scores \nssGSEA/Oncogenic pathway signatures \n", Tumor_purity_correction),
col=my.palette,
ColSideColors=annotation.blot,
font_size_col_Labs = 1.5,
cex.main = 10,
ColSideLabs = ColsideLabels,
#Colv= as.dendrogram(sHc),
Colv = NULL,
Rowv = NULL,
labCol=NA,
side.height.fraction = 0.25,
cexRow = 1.3,
margins = c(13, 30))
dev.off()
|
/Any Cancer (V4.1)/5.5.1.2.Pancancer.ES.Heatmap.ICR.Enabled.Disabled.R
|
no_license
|
dudious/QCRI-SIDRA-ICR
|
R
| false | false | 13,992 |
r
|
#################################################################
###
###
### Data input:
### "./4_Analysis/",download.method, "/", Cancer, "/Signature_Enrichment/GSEA_", Cancer,
### "_Bindea_xCell_HallmarkPathways.Rdata")
### Output :
### "./5_Figures/Correlation_plots/ICR_Correlation_plots/", download.method,
### "/ICR_Correlation_plot_",Cancer,".png"
### Manual adjustment of min and max
#################################################################
# Setup environment
rm(list=ls())
setwd("~/Dropbox (TBI-Lab)/TCGA Analysis pipeline/") # Setwd to location were output files have to be saved.
#setwd("~/Dropbox (TBI-Lab)/External Collaborations/TCGA Analysis pipeline/")
code_path = "~/Dropbox (Personal)/Jessica PhD Project/QCRI-SIDRA-ICR-Jessica/" # Set code path to the location were the R code is located
#code_path = "~/Dropbox (Personal)/R-projects/QCRI-SIDRA-ICR/"
#code_path = "C:/Users/whendrickx/R/GITHUB/TCGA_Pipeline/"
source(paste0(code_path, "R tools/ipak.function.R"))
source(paste0(code_path,"R tools/heatmap.3.R"))
required.packages <- c("gtools", "circlize")
ibiopak("ComplexHeatmap")
ipak(required.packages)
# Set Parameters
CancerTYPES = "ALL" # Specify the cancertypes that you want to download or process, c("...","...") or "ALL"
Cancer_skip = "" # If CancerTYPES = "ALL", specify here if you want to skip cancertypes
download.method = "Assembler_Panca_Normalized" # Specify download method (this information to be used when saving the file)
assay.platform = "gene_RNAseq" # Specify to which location TCGA-Assembler_v2.0.3 was downloaded
Log_file = paste0("./1_Log_Files/", download.method ,"/5.5.1.Pancancer_Bindea_Heatmap/5.5.1.Pancancer.Bindea.Heatmap_", # Specify complete name of the logfile that will be saved during this script
gsub(":",".",gsub(" ","_",date())),".txt")
my.palette <- colorRampPalette(c("blue", "white", "red"))(n = 297)
ColsideLabels = c("Cancers", "ICR_Enabled/Disabled")
Legend = c("ICR Low","ICR Med","ICR High")
Deconvolution_matrix = "Hallmark.enrichment.score" # "Bindea.enrichment.score", "bindea_patrick.enrichment.score"
Cutoff_HR = 1
Tumor_purity_correction = "No_correction"
pathway_filter = "significant_and_inverse_pathways"
include_proliferation = "include_proliferation"
# Load data
TCGA.cancersets = read.csv(paste0(code_path, "Datalists/TCGA.datasets.csv"),stringsAsFactors = FALSE) # TCGA.datasets.csv is created from Table 1. (Cancer Types Abbreviations)
# in the Manual of Assembler v2.0.3 and was saved as csv file.
# Define parameters (based on loaded data)
if (CancerTYPES == "ALL") {
CancerTYPES = TCGA.cancersets$cancerType
}
# Create folders
dir.create("./5_Figures/",showWarnings = FALSE)
dir.create(paste0("./5_Figures/Pancancer_plots"), showWarnings = FALSE)
dir.create(paste0("./5_Figures/Pancancer_plots/", download.method), showWarnings = FALSE)
dir.create(paste0("./1_Log_Files/"), showWarnings = FALSE)
dir.create(paste0("./1_Log_Files/", download.method, "/5.5.1.Pancancer_Bindea_Heatmap"), showWarnings = FALSE)
cat("This is a log file for creating heatmap bindea enrichment plots",
"__________________________________________",
"",
"Session Info :",
capture.output(sessionInfo()),
"",
"Parameters Used :",
paste0("CancerTYPES = ", CancerTYPES),
paste0("Cancer_skip = ", Cancer_skip),
paste0("download.method = ", download.method),
"",
"Scripts output :",
file = Log_file,
append = FALSE, sep= "\n")
N.sets = length(CancerTYPES)
load(paste0("./4_Analysis/",download.method,"/Pan_Cancer/Survival_Analysis/", "Survival_analysis_High_vs_Low_Groups",
"HML_classification", ".Rdata"))
ICR_enabled_cancers = as.character(All_survival_analysis_data$Cancertype[which(All_survival_analysis_data$HR > Cutoff_HR)])
ICR_disabled_cancers = as.character(All_survival_analysis_data$Cancertype[which(All_survival_analysis_data$HR <= Cutoff_HR)])
ICR_disabled_cancers = ICR_disabled_cancers[-which(ICR_disabled_cancers == "LAML")]
load("./4_Analysis/Assembler_Panca_Normalized/Pan_Cancer/Clustering/ICR_cluster_assignment_allcancers.Rdata")
if(Tumor_purity_correction == "No_correction" & include_proliferation == "include_proliferation"){
load(paste0("./4_Analysis/", download.method, "/Pan_Cancer/Signature_Enrichment/ssGSEA.Hallmark&Proliferation.Pancancer.Rdata"))
Enrichment_score_all = Hallmark_enrichment_score_all
}
if(Tumor_purity_correction == "Leukocyte_estimate_correction"){
load(paste0("./4_Analysis/", download.method,"/Pan_Cancer/Signature_Enrichment/ssGSEA.Hallmark.Pancancer.Leukocyte.Estimate.Corrected_v2.Rdata"))
Enrichment_score_all = Hallmark_ES_all_Leuko_corrected
rownames(ICR_cluster_assignment_allcancers) = substring(rownames(ICR_cluster_assignment_allcancers), 1, 12)
}
###
#Leuk_estimate = read.csv("./3_DataProcessing/External/Leuk.infil.data.clean.csv", stringsAsFactors = FALSE)
#ICR_cluster_assignment_allcancers$Leuk_estimate = Leuk_estimate$Leuk.Estimate[match(rownames(ICR_cluster_assignment_allcancers), substring(Leuk_estimate$SampleID, 1, 12))]
#ICR_cluster_assignment_allcancers = ICR_cluster_assignment_allcancers[-which(is.na(ICR_cluster_assignment_allcancers$Leuk_estimate)),]
#ICR_cluster_assignment_allcancers$Leuk_estimate[which(ICR_cluster_assignment_allcancers$Leuk_estimate >= 0.2)] = "High"
#ICR_cluster_assignment_allcancers$Leuk_estimate[which(ICR_cluster_assignment_allcancers$Leuk_estimate < 0.2)] = "Low"
#annotation = ICR_cluster_assignment_allcancers[,c("HML_cluster", "Cancer", "Leuk_estimate")]
#annotation$LE.col[which(annotation$Leuk_estimate == "Low")] = "lightblue"
#annotation$LE.col[which(annotation$Leuk_estimate == "High")] = "pink"
###
annotation = ICR_cluster_assignment_allcancers[,c("HML_cluster", "Cancer")]
annotation$ICR_ED = NA
annotation$ICR_ED[which(annotation$Cancer %in% ICR_enabled_cancers)] = "ICR_enabled"
annotation$ICR_ED[which(annotation$Cancer %in% ICR_disabled_cancers)] = "ICR_disabled"
# Bindea classification
annotation$HML_cluster.col[annotation$HML_cluster=="ICR High"] = "red"
annotation$HML_cluster.col[annotation$HML_cluster=="ICR Medium"] = "green"
annotation$HML_cluster.col[annotation$HML_cluster=="ICR Low"] = "blue"
annotation$Cancer.col = Cancer_color_table$color[match(annotation$Cancer, Cancer_color_table$Group.1)]
annotation$ICR_ED.col[annotation$ICR_ED == "ICR_enabled"] = "orange"
annotation$ICR_ED.col[annotation$ICR_ED == "ICR_disabled"] = "purple"
All_survival_analysis_data = All_survival_analysis_data[order(All_survival_analysis_data$HR, decreasing = TRUE),]
Cancer_order = as.character(All_survival_analysis_data$Cancertype[-which(All_survival_analysis_data$Cancertype == "LAML")])
ICR_order = c("ICR Low","ICR Medium","ICR High")
ICR_ED_order = c("ICR_enabled", "ICR_disabled")
##
#Leuk_est_order = c("Low", "High")
#annotation = annotation[order(match(annotation$ICR_ED, ICR_ED_order), match(annotation$Leuk_estimate, Leuk_est_order)),]
#annotation = annotation[order(match(annotation$HML_cluster, ICR_order), match(annotation$Cancer, Cancer_order)),]
#annotation = annotation[order(match(annotation$HML_cluster, ICR_order)),]
annotation = annotation[order(match(annotation$ICR_ED, ICR_ED_order), match(annotation$Cancer, Cancer_order)),]
annotation.blot = as.matrix(annotation[,c("Cancer.col", "ICR_ED.col"), drop = FALSE])
annotation.blot = annotation.blot[which(rownames(annotation.blot) %in% colnames(Enrichment_score_all)),]
#annotation.blot = annotation.blot[colnames(Expression.data),] # The sample order in annotation.blot needs to be the same as in Expression.data
#Expression.data = Expression.data[colnames(annotation.blot),]
Enrichment_score_all.z.score = Enrichment_score_all
j=1
for(j in 1: nrow(Enrichment_score_all.z.score)) {
Enrichment_score_all.z.score[j,] = (Enrichment_score_all[j,]-mean(Enrichment_score_all[j,]))/sd(Enrichment_score_all[j,]) # z-score the enrichment matrix
}
Enrichment_score_all.z.score = Enrichment_score_all.z.score[,rownames(annotation.blot)]
ICR_cluster_assignment_allcancers$ICR_ED = NA
ICR_cluster_assignment_allcancers$ICR_ED[ICR_cluster_assignment_allcancers$Cancer %in% ICR_enabled_cancers] = "ICR_enabled"
ICR_cluster_assignment_allcancers$ICR_ED[ICR_cluster_assignment_allcancers$Cancer %in% ICR_disabled_cancers] = "ICR_disabled"
## Determine order rows/signatures
to_aggregate = t(Enrichment_score_all.z.score)
to_aggregate = as.data.frame(to_aggregate)
to_aggregate$ICR_ED = ICR_cluster_assignment_allcancers$ICR_ED[match(rownames(to_aggregate), rownames(ICR_cluster_assignment_allcancers))]
mean_ES_all = aggregate(.~ ICR_ED, data = to_aggregate, FUN = mean)
mean_ES_all = t(mean_ES_all)
colnames(mean_ES_all) = mean_ES_all[1,]
mean_ES_all = mean_ES_all[-1,]
mode(mean_ES_all) = "numeric"
mean_ES_all = as.data.frame(mean_ES_all)
mean_ES_all$DeltaED = c(mean_ES_all$ICR_enabled - mean_ES_all$ICR_disabled)
mean_ES_all = mean_ES_all[order(mean_ES_all$DeltaED, decreasing = TRUE),]
cell_order = rownames(mean_ES_all)
Enrichment_score_all.z.score = Enrichment_score_all.z.score[c(cell_order),]
if(pathway_filter == "significant_and_inverse_pathways"){
load(paste0("./4_Analysis/", download.method, "/Pan_Cancer/Survival_Analysis/ICR_All_Pathway_High_Survival_analysis_High_vs_Low_Groups_Oncogenic_pathways.Rdata"))
All_survival_analysis_data_ONCO_High = All_survival_analysis_data_ONCO
rm(All_survival_analysis_data_ONCO)
load(paste0("./4_Analysis/", download.method, "/Pan_Cancer/Survival_Analysis/ICR_All_Pathway_Low_Survival_analysis_High_vs_Low_Groups_Oncogenic_pathways.Rdata"))
All_survival_analysis_data_ONCO_Low = All_survival_analysis_data_ONCO
rm(All_survival_analysis_data_ONCO)
merged = merge(All_survival_analysis_data_ONCO_High, All_survival_analysis_data_ONCO_Low, by = "Oncogenic_Pathway",
suffixes = c("_High", "_Low"))
merged$score_contribution = NA
merged$score_contribution[which(merged$HR_High >1 & merged$HR_Low <1 & merged$p_value_High < 0.05 & merged$p_value_Low < 0.05)] = "enabling"
merged$score_contribution[which(merged$HR_High <1 & merged$HR_Low >1 & merged$p_value_High < 0.05 & merged$p_value_Low < 0.05)] = "disabling"
enabling_pathways = gsub("_cluster_Pancancer", "", as.character(merged$Oncogenic_Pathway[which(merged$score_contribution == "enabling")]))
enabling_pathways[which(enabling_pathways == "Proliferation")] = c("[LM] Proliferation")
disabling_pathways = gsub("_cluster_Pancancer", "", as.character(merged$Oncogenic_Pathway[which(merged$score_contribution == "disabling")]))
Enrichment_score_all.z.score = Enrichment_score_all.z.score[c(enabling_pathways, disabling_pathways),]
}
## Plot prep
Legend = Cancer_order
rownames(Cancer_color_table) = Cancer_color_table$Group.1
Cancer_color_table = Cancer_color_table[Cancer_order,]
Legend_colors = c(Cancer_color_table$color)
#Legend2 = c("ICR Low","ICR Med","ICR High")
#Legend_colors2 = c("blue","green","red")
Legend3 = c("ICR enabled", "ICR disabled")
Legend_colors3 = c("orange", "purple")
### Plotting
png(paste0("./5_Figures/Pancancer_plots/", download.method, "/", Deconvolution_matrix, "_", Tumor_purity_correction,
"_", pathway_filter,"_ICR_E_D_seperated_v2", "_Heatmap_RNASeq_Pancancer_HML.png"),
res = 600, height = 15, width = 15, unit = "in")
heatmap.3((as.matrix(Enrichment_score_all.z.score)),
main= paste0("Pancancer enrichment scores \nssGSEA/Oncogenic pathway signatures \n", Tumor_purity_correction),
col=my.palette,
ColSideColors=annotation.blot,
font_size_col_Labs = 1.5,
cex.main = 10,
ColSideLabs = ColsideLabels,
#Colv= as.dendrogram(sHc),
#Colv = NULL,
Rowv = NULL,
labCol=NA,
side.height.fraction = 0.25,
cexRow = 1.3,
margins = c(13, 30))
title(sub = list(paste0("Figure: EDAseq normalized, log transformed gene expression data \n was obtained from TCGA, using ", download.method, ". \n",
"Oncogenic pathway enrichment z-scores were used to generate this heatmap."), cex = 1), outer = FALSE, line = -1, adj = 0.65)
legend("bottomleft",legend = Legend,
col = Legend_colors,lty= 0.5,lwd = 0.5, cex = 0.7, pch= 15, pt.cex = 1.0)
#legend("top", legend = Legend2,
#col = Legend_colors2, lty= 0.5,lwd = 0.5, cex = 0.7, pch= 15, pt.cex = 1.0)
legend("topright", legend = Legend3,
col = Legend_colors3, lty= 0.5,lwd = 0.5, cex = 0.7, pch= 15, pt.cex = 1.0)
dev.off()
## Color Key
png(paste0("./5_Figures/Pancancer_plots/", download.method, "/Color_Key_", Deconvolution_matrix, "_", Tumor_purity_correction,
"_", pathway_filter,"_ICR_E_D_seperated_v2", "_Heatmap_RNASeq_Pancancer_HML.png"),
res = 600, height = 7, width = 7, unit = "in")
heatmap.3((as.matrix(Enrichment_score_all.z.score)),
main= paste0("Pancancer enrichment scores \nssGSEA/Oncogenic pathway signatures \n", Tumor_purity_correction),
col=my.palette,
ColSideColors=annotation.blot,
font_size_col_Labs = 1.5,
cex.main = 10,
ColSideLabs = ColsideLabels,
#Colv= as.dendrogram(sHc),
Colv = NULL,
Rowv = NULL,
labCol=NA,
side.height.fraction = 0.25,
cexRow = 1.3,
margins = c(13, 30))
dev.off()
|
# Exercise 2: indexing and filtering vectors
# Create a vector `first_ten` that has the values 10 through 20 in it (using
# the : operator)
first_ten <- c(10:20)
# Create a vector `next_ten` that has the values 21 through 30 in it (using the
# seq() function)
next_ten <- c(seq(21, 30, 1))
# Create a vector `all_numbers` by combining the previous two vectors
all_numbers <- c(first_ten, next_ten)
# Create a variable `eleventh` that contains the 11th element in `all_numbers`
eleventh <- all_numbers[11]
# Create a vector `some_numbers` that contains the 2nd through the 5th elements
# of `all_numbers`
some_numbers <- c(all_numbers[2:5])
# Create a vector `even` that holds the even numbers from 1 to 100
even <- c(seq(2, 100, 2))
# Using the `all()` function and `%%` (modulo) operator, confirm that all of the
# numbers in your `even` vector are even
all(even %% 2 == 0)
# Create a vector `phone_numbers` that contains the numbers 8, 6, 7, 5, 3, 0, 9
phone_numbers <- c(8, 6, 7, 5, 3, 0, 9)
# Create a vector `prefix` that has the first three elements of `phone_numbers`
prefix <- c(phone_numbers[1:3])
# Create a vector `small` that has the values of `phone_numbers` that are
# less than or equal to 5
small <- subset(phone_numbers, phone_numbers <= 5)
# Create a vector `large` that has the values of `phone_numbers` that are
# strictly greater than 5
large <- subset(phone_numbers, phone_numbers > 5)
# Replace the values in `phone_numbers` that are larger than 5 with the number 5
phone_numbers <- ifelse(phone_numbers > 5, 5, phone_numbers)
# Replace every odd-numbered value in `phone_numbers` with the number 0
phone_numbers <- ifelse(phone_numbers %% 2 == 1, 0, phone_numbers)
|
/chapter-07-exercises/exercise-2/exercise.R
|
permissive
|
omgitsjt/book-exercises
|
R
| false | false | 1,720 |
r
|
# Exercise 2: indexing and filtering vectors
# Create a vector `first_ten` that has the values 10 through 20 in it (using
# the : operator)
first_ten <- c(10:20)
# Create a vector `next_ten` that has the values 21 through 30 in it (using the
# seq() function)
next_ten <- c(seq(21, 30, 1))
# Create a vector `all_numbers` by combining the previous two vectors
all_numbers <- c(first_ten, next_ten)
# Create a variable `eleventh` that contains the 11th element in `all_numbers`
eleventh <- all_numbers[11]
# Create a vector `some_numbers` that contains the 2nd through the 5th elements
# of `all_numbers`
some_numbers <- c(all_numbers[2:5])
# Create a vector `even` that holds the even numbers from 1 to 100
even <- c(seq(2, 100, 2))
# Using the `all()` function and `%%` (modulo) operator, confirm that all of the
# numbers in your `even` vector are even
all(even %% 2 == 0)
# Create a vector `phone_numbers` that contains the numbers 8, 6, 7, 5, 3, 0, 9
phone_numbers <- c(8, 6, 7, 5, 3, 0, 9)
# Create a vector `prefix` that has the first three elements of `phone_numbers`
prefix <- c(phone_numbers[1:3])
# Create a vector `small` that has the values of `phone_numbers` that are
# less than or equal to 5
small <- subset(phone_numbers, phone_numbers <= 5)
# Create a vector `large` that has the values of `phone_numbers` that are
# strictly greater than 5
large <- subset(phone_numbers, phone_numbers > 5)
# Replace the values in `phone_numbers` that are larger than 5 with the number 5
phone_numbers <- ifelse(phone_numbers > 5, 5, phone_numbers)
# Replace every odd-numbered value in `phone_numbers` with the number 0
phone_numbers <- ifelse(phone_numbers %% 2 == 1, 0, phone_numbers)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rd2roxygen.R
\name{create_roxygen}
\alias{create_roxygen}
\title{Create the roxygen documentation}
\usage{
create_roxygen(info, usage = FALSE)
}
\arguments{
\item{info}{the named list of the parsed documentation}
\item{usage}{logical: whether to include the usage section in the output
(this can be useful when there are multiple functions in a single usage
section, but generally it is not necessary because roxygen can generate the
usage section automatically)}
}
\value{
a character vector
}
\description{
The parsed information is converted to a vector of roxygen tags.
}
\examples{
rd.file = system.file("examples", "parse_and_save.Rd", package = "Rd2roxygen")
options(roxygen.comment = "##' ")
create_roxygen(parse_file(rd.file))
}
\author{
Hadley Wickham; modified by Yihui Xie <\url{http://yihui.org}>
}
|
/man/create_roxygen.Rd
|
no_license
|
yihui/Rd2roxygen
|
R
| false | true | 891 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rd2roxygen.R
\name{create_roxygen}
\alias{create_roxygen}
\title{Create the roxygen documentation}
\usage{
create_roxygen(info, usage = FALSE)
}
\arguments{
\item{info}{the named list of the parsed documentation}
\item{usage}{logical: whether to include the usage section in the output
(this can be useful when there are multiple functions in a single usage
section, but generally it is not necessary because roxygen can generate the
usage section automatically)}
}
\value{
a character vector
}
\description{
The parsed information is converted to a vector of roxygen tags.
}
\examples{
rd.file = system.file("examples", "parse_and_save.Rd", package = "Rd2roxygen")
options(roxygen.comment = "##' ")
create_roxygen(parse_file(rd.file))
}
\author{
Hadley Wickham; modified by Yihui Xie <\url{http://yihui.org}>
}
|
##################################################################
## ##
## Plots of Volume of Landings/Discards per FO/Fishing Day/Trip ##
## ##
## MM 07/02/2008 ##
##################################################################
#-------------------------------------------------------------------------------
# Calculation from cs datasets of volume (weights) of landings/discards per haul/fd/trip
#-------------------------------------------------------------------------------
landisVolumeFun <- function(object, #csData object
species, #species to specify
fraction, #"LAN" or "DIS"
timeStrata, #from hh
spaceStrata, #from hh
techStrata, #from hh
sampPar=TRUE, #'sampPar' checks if given species is considered as automatically sampled (if TRUE, sppReg=Par <-> sppReg=All, i.e. includes sppReg="Par" in the analysis )
...){
fraction <- toupper(fraction) #
object@sl$catchCat <- toupper(object@sl$catchCat) # MM 29/04/2010
object@hl$catchCat <- toupper(object@hl$catchCat) #
object@ca$catchCat <- toupper(object@ca$catchCat) #
M_ik <- y_ikj <- m_ik <- NULL
if (is.na(timeStrata)) timeStrata <- NULL
if (is.na(spaceStrata)) spaceStrata <- NULL
if (is.na(techStrata)) techStrata <- NULL
##only sea sampling data is kept # modif 26/01/2009 : all data is kept
op.sub <- object@hh
# op.sub <- op.sub[op.sub$sampType=="S",] #
capt.sub <- object@sl
# capt.sub <- capt.sub[capt.sub$sampType=="S",] #
#if (nrow(op.sub)==0) stop("no sea sampling data!!") #
#if species parameter is missing, one species from sl table has to be chosen
if (missing(species)) {
un <- unique(as.character(object@sl$spp))
un <- un[!is.na(un)]
if (length(un)>1) {
warning("Several species in SL table!! All will be taken into account!")}
species <- un}
#restriction of data to specified fraction and species
capt.sub <- capt.sub[capt.sub$catchCat%in%fraction,]
capt.sub <- capt.sub[capt.sub$spp%in%species,]
#trpCode, staNum & date are converted to factors
op.sub$trpCode <- factor(op.sub$trpCode)
op.sub$date <- factor(op.sub$date)
op.sub$staNum <- factor(op.sub$staNum)
#If timeStrata="semester", "quarter" or "month", field must be put in HH
if (!is.null(timeStrata)) {
HHmonth <- as.numeric(sapply(op.sub$date,function(x) strsplit(as.character(x),"-")[[1]][2]))
if (timeStrata=="month")
op.sub$month <- HHmonth
if (timeStrata=="quarter")
op.sub$quarter <- ceiling(HHmonth/3)
if (timeStrata=="semester")
op.sub$semester <- ceiling(HHmonth/6)
}
#stratification fields are also to be factors
if (!is.null(timeStrata))
op.sub[,timeStrata] <- factor(op.sub[,timeStrata])
if (!is.null(techStrata))
op.sub[,techStrata] <- factor(op.sub[,techStrata])
if (!is.null(spaceStrata))
op.sub[,spaceStrata] <- factor(op.sub[,spaceStrata])
#Number of sampled fishing days by trip, tech,time,space
expr1 <- paste(",op.sub$",c(timeStrata,techStrata,spaceStrata),sep="",collapse="")
if (expr1==",op.sub$")
expr1 <- ""
expr2 <- paste(",tabl1$",c(timeStrata,techStrata,spaceStrata),sep="",collapse="")
if (expr2==",tabl1$")
expr2 <- ""
#M_ik = total number of FOs by fishing day, by trip, by tech,time,space
eval(parse('',text=paste("M_ik <- tapply(op.sub$staNum,list(op.sub$trpCode,op.sub$date",expr1,"),function(x) length(unique(x)))",sep="")))
if (fraction=="LAN") {
fract <- "Lan"
} else {
fract <- "Dis" }
#-----------------------------------------------------------------------------
# Sampled weight index (Windex)
#-----------------------------------------------------------------------------
#---------------------------------------------------------------------------
# A haul is considered as sampled (weights) for a given species and a given fraction if :
# 1) (catchReg=="All") OR (catchReg==frac)
# AND
# 2) (sppReg=="All") OR (sppReg=="Par" AND sampPar=TRUE)
#---------------------------------------------------------------------------
#hh-index for sampled(1/0)/non sampled(NA) haul (weights) will be the combination of 2 indexes
indexCat <- indexSpp <- rep(0,nrow(op.sub))
#indexCat==1 if catReg=="All" or frac
indexCat[op.sub$catReg%in%c("All",fract)] <- 1
#indexSpp==1 if sppReg=="All" or if sppReg=="Par" & sampPar==TRUE
capt.sub$ind <- 1 ; indSpp <- merge(cbind(op.sub,ord=1:nrow(op.sub)),unique(capt.sub[,c("sampType","landCtry","vslFlgCtry","year","proj","trpCode","staNum","ind")]),all.x=TRUE,sort=FALSE) ; indSpp <- indSpp$ind[order(indSpp$ord)] #indSpp <-> index of hauls with related information in sl for given species and fraction
#indexSpp[op.sub$sppReg=="All" | (op.sub$sppReg=="Par" & sampPar)] <- 1
indexSpp[op.sub$sppReg=="All" | (op.sub$sppReg=="Par" & (sampPar | (!is.na(indSpp)) ))] <- 1
#so, Windex = indexCat*indexSpp (sampled haul index)
Windex <- indexCat*indexSpp
indZero <- (Windex==1) & (is.na(indSpp)) #indZero : index of sampled hauls with 0 values
#finally,...
Windex[Windex==0] <- NA ; Windex[indZero] <- 0
#'ind2', the valid sampling indicator, is built
Windex[op.sub$foVal!="V"] <- NA
op.sub$ind <- Windex
#m_ik = Number of sampled FOs by fishing day, by trip, by tech,time,space
eval(parse('',text=paste("m_ik <- tapply(!is.na(Windex),list(op.sub$trpCode,op.sub$date",expr1,"),sum)",sep="")))
#essentially to homogenize vectors sizes
tabl1 <- merge(op.sub,aggregate(capt.sub$wt,list(sampType=capt.sub$sampType,
landCtry=capt.sub$landCtry,
vslFlgCtry=capt.sub$vslFlgCtry,
year=capt.sub$year,
proj=capt.sub$proj,
trpCode=capt.sub$trpCode,
staNum=capt.sub$staNum),
sum),
all.x=TRUE)
names(tabl1)[ncol(tabl1)] <- "wt"
tabl1$wt[is.na(tabl1$wt)] <- 0 #this means that species is absent (or not sampled)
#ind is the sampling indicator in tabl1 (~Windex)
tabl1 <- tabl1[!is.na(tabl1$ind),]
#ADDED 10/10/2008 : if 'aggLev' = T, all FOs are considered to be sampled,so M_ik data is put in m_ik for these trips ###############################################################
comma <- paste(rep(",",length(dim(M_ik))-2),collapse="",sep="") #
if (any(tabl1$aggLev%in%"T")) { #modif 09/12/2008 #
eval(parse('',text=paste("m_ik[as.character(tabl1$trpCode)[tabl1$aggLev%in%\"T\"],",comma,"] <- M_ik[as.character(tabl1$trpCode)[tabl1$aggLev%in%\"T\"],",comma,"]",sep=""))) #
} #
#y_ikj = Total sampled weight by fishing day, by trip, by tech,time,space
eval(parse('',text=paste("y_ikj <- tapply(tabl1$wt,list(tabl1$trpCode,tabl1$date",expr2,"),sum,na.rm=TRUE)",sep="")))
#y_ikj_hat = Weight of each sample by fishing day and by trip
y_ikj_hat <- split(tabl1$wt,paste(tabl1$trpCode,tabl1$date,sep=":-:"),drop=TRUE)
#y_ik = Mean weight of samples by fishing day and by trip
y_ik <- unlist(lapply(y_ikj_hat,mean))
#y_IK = Raised sampled weight by fishing day, trip, time, space and tech
y_IK <- M_ik*y_ikj/m_ik
#MAT is built to define groupings for y_ik_hat
ll <- sum(!is.null(techStrata),!is.null(timeStrata),!is.null(spaceStrata))
indic <- ll+2
val <- expand.grid(dimnames(y_IK)[1:indic])
# valChar <- apply(val[,-2,drop=FALSE],1,function(x) paste(as.character(x),collapse=":-:"))
# MAT <- array(valChar,dim=dim(y_IK))
# MAT[is.na(y_IK)] <- NA
valChar <- apply(val[!is.na(y_IK),-2,drop=FALSE],1,function(x) paste(as.character(x),collapse=":-:"))
MAT <- array(y_IK,dim(y_IK))
MAT[!is.na(MAT)] <- valChar
#y_ik_hat = Raised Weight for each fishing day by trip
y_ik_hat <- split(y_IK,MAT,drop=TRUE)
#y_ik_hat = Mean Raised Weight for fishing days by trip
y_i <- unlist(lapply(y_ik_hat,mean))
result <- list(fraction=fraction,species=species,timeStrata=timeStrata,techStrata=techStrata,spaceStrata=spaceStrata,
VolFO_FDTR=y_ikj_hat,MeanFO_FDTR=y_ik,VolFD_TR=y_ik_hat,MeanFD_TR=y_i)
return(new("edaResult",desc="landisVol",outPut=result))
}
################################################################################
################################################################################
#-------------------------------------------------------------------------------
# Plots of MeanFD_TR values
#-------------------------------------------------------------------------------
fdPlot <- function(x,
groups=NULL,
...){
stratas <- c("timeStrata","techStrata","spaceStrata")
timeStrata <- x@outPut$timeStrata
techStrata <- x@outPut$techStrata
spaceStrata <- x@outPut$spaceStrata
index <- c(timeStrata,techStrata,spaceStrata)
#-----------------------------------------------------------------------------
# Update of graphical parameters
#-----------------------------------------------------------------------------
data(GraphsPar,envir=environment())
dots <- list(...)
sapply(names(gp),function(x)
if (is.null(eval(parse('',text=paste("dots$",x,sep="")))))
eval(parse('',text=paste("dots$",x," <<- gp$",x,sep=""))))
if (is.null(dots$xlab))
dots$xlab <- "Trip Code"
if (is.null(dots$ylab))
dots$ylab <- "Weight (g)"
if (is.null(dots$layout)) dots$layout <- NULL
if (is.null(dots$as.table)) dots$as.table <- FALSE
if (all(is.null(timeStrata),is.null(techStrata),is.null(spaceStrata))) {
if (is.null(dots$main))
dots$main <- paste("Mean Weight by Fishing Day for each Trip\nSpecies :",paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "))
df <- data.frame(trp=names(x@outPut$MeanFD_TR),bb=as.numeric(x@outPut$MeanFD_TR))
print(xyplot(bb~trp,data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),
scales=list(font=dots$font.axis,x=list(rot=dots$rot)),pch=dots$pch[1],fill=dots$p.bg[1],cex=dots$p.cex[1],col=dots$col[1]))
} else {
if (is.null(dots$main))
dots$main <- paste("Mean Weight by Fishing Day for each Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "),"\n",
paste("Time Strata :",timeStrata)[!is.null(timeStrata)],
paste(" Technical Strata :",techStrata)[!is.null(techStrata)],
paste(" Space Strata :",spaceStrata)[!is.null(spaceStrata)])
datas <- x@outPut$MeanFD_TR
df <- as.data.frame(do.call("rbind",lapply(names(datas),function(x) strsplit(x,":-:")[[1]])))
names(df) <- c("trp",timeStrata,techStrata,spaceStrata)
df$bb <- as.numeric(x@outPut$MeanFD_TR)
strip.col <- trellis.par.get("strip.background")$col
#-----------------------------------------------------------------------------
# Graphical display
#-----------------------------------------------------------------------------
if (is.null(groups)) {
eval(parse('',text=paste("print(xyplot(bb~trp|",paste(index,collapse="*"),",data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),layout=dots$layout,as.table=dots$as.table,",
"ylab=list(dots$ylab,font=dots$font.lab),par.strip.text=list(font=dots$font.lab),scales=list(font=dots$font.axis,x=list(relation=\"free\",rot=dots$rot,cex=dots$cex.lab[1])),",
"key=list(points=list(pch=15,cex=dots$p.cex[1],col=strip.col[1:length(index)]),text=list(index),font=dots$font.lab,columns=1,border=TRUE,space=\"right\"),",
"prepanel=function(x,y,...){x <- x[,drop=TRUE] ; prepanel.default.xyplot(x,y,...)},",
"panel = function(x,y,...){x <- x[,drop=TRUE] ; panel.xyplot(x,y,pch=dots$pch[1],fill=dots$p.bg[1],cex=dots$p.cex[1],col=dots$col[1],...)}))",sep="")))
} else {
indexStr <- index[!index%in%eval(parse('',text=groups))]
l1 <- length(indexStr)
LEV <- levels(df[,eval(parse('',text=groups))])
l2 <- length(LEV)
groups <- eval(parse('',text=groups))
eval(parse('',text=paste("print(xyplot(bb~trp",paste("|",paste(indexStr,collapse="*"),sep="")[l1>0],",data=df,groups=",groups,",main=list(dots$main,font=dots$font.main),layout=dots$layout,as.table=dots$as.table,",
"xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),scales=list(font=dots$font.axis,x=list(relation=\"free\",rot=dots$rot,cex=dots$cex.lab[1])),",
"key=list(points=list(pch=c(rep(dots$pch[1],l2),NA,",c("rep(15,l1)","NA")[c((l1>0),(l1==0))],"),fill=dots$p.bg[1:l2],cex=dots$p.cex[1],lwd=dots$p.lwd[1],",
"col=c(rep(dots$col[1],l2),NA",",strip.col[1:l1]"[l1>0],")),text=list(c(LEV,\"\",\"",paste(indexStr,collapse="\",\""),"\")),title=\"",groups,"\",",
"cex.title=0.8,space=\"right\",font=dots$font.lab,columns=1,border=TRUE),par.strip.text=list(font=dots$font.lab),",
"prepanel=function(x,y,...){x <- x[,drop=TRUE] ; prepanel.default.xyplot(x,y,...)},",
"panel = function(x,y,...){x <- x[,drop=TRUE] ; panel.xyplot(x,y,pch=dots$pch[1],fill=dots$p.bg","[1]"[l2==0],",cex=dots$p.cex[1],col=dots$col[1],...)}))",sep="")))
}}
}
################################################################################
################################################################################
#-------------------------------------------------------------------------------
# Boxplots of VolFD_TR values
#-------------------------------------------------------------------------------
fdBoxplot <- function(x,...){
stratas <- c("timeStrata","techStrata","spaceStrata")
timeStrata <- x@outPut$timeStrata
techStrata <- x@outPut$techStrata
spaceStrata <- x@outPut$spaceStrata
index <- c(timeStrata,techStrata,spaceStrata)
data(GraphsPar,envir=environment())
dots <- list(...)
if (is.null(dots$pch))
dots$pch <- 19
sapply(names(gp),function(x)
if (is.null(eval(parse('',text=paste("dots$",x,sep="")))))
eval(parse('',text=paste("dots$",x," <<- gp$",x,sep=""))))
if (is.null(dots$xlab))
dots$xlab <- "Trip Code"
if (is.null(dots$ylab))
dots$ylab <- "Weight (g)"
if (is.null(dots$layout)) dots$layout <- NULL
if (is.null(dots$as.table)) dots$as.table <- FALSE
if (all(is.null(timeStrata),is.null(techStrata),is.null(spaceStrata))) {
if (is.null(dots$main))
dots$main <- paste("Weight by Fishing Day for each Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "))
obj <- x@outPut$VolFD_TR
names(obj) <- sapply(names(x@outPut$VolFD_TR),function(x) strsplit(x,":-:")[[1]][1])
vec <- unlist(x@outPut$VolFD_TR)
nvec <- unlist(lapply(x@outPut$VolFD_TR,length))
df <- data.frame(aa=rep(names(obj),nvec),bb=vec)
print(bwplot(bb~aa,data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),
pch=dots$pch[1],fill=dots$p.bg[1],scales=list(font=dots$font.axis,x=list(rot=dots$rot)),layout=dots$layout,as.table=dots$as.table,
par.settings=list(box.rectangle=list(col=dots$col[1]),box.umbrella=list(col=dots$col[1],lty=dots$lty[1]),
plot.symbol=list(col=dots$col[1]))))
} else {
if (is.null(dots$main))
dots$main <- paste("Weight by Fishing Day for each Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "),"\n",
paste("Time Strata :",timeStrata)[!is.null(timeStrata)],
paste(" Technical Strata :",techStrata)[!is.null(techStrata)],
paste(" Space Strata :",spaceStrata)[!is.null(spaceStrata)])
datas <- x@outPut$VolFD_TR
df <- as.data.frame(do.call("rbind",lapply(rep(names(datas),lapply(datas,length)), function(x) strsplit(x,":-:")[[1]])))
names(df) <- c("trp",timeStrata,techStrata,spaceStrata)
df$bb <- as.numeric(unlist(datas))
strip.col <- trellis.par.get("strip.background")$col
eval(parse('',text=paste("print(bwplot(bb~trp|",paste(index,collapse="*"),",data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),layout=dots$layout,as.table=dots$as.table,",
"ylab=list(dots$ylab,font=dots$font.lab),par.strip.text=list(font=dots$font.lab),scales=list(font=dots$font.axis,x=list(relation=\"free\",rot=dots$rot,cex=dots$cex.lab[1])),",
"key=list(points=list(pch=15,cex=dots$p.cex[1],col=strip.col[1:length(index)]),text=list(index),font=dots$font.lab,columns=1,border=TRUE,space=\"right\"),",
"prepanel=function(x,y,...){x <- x[,drop=TRUE] ; prepanel.default.bwplot(x,y,...)},",
"panel = function(x,y,...){x <- x[,drop=TRUE] ; panel.bwplot(x,y,pch=dots$pch[1],fill=dots$p.bg[1],cex=dots$p.cex[1],col=dots$col[1],",
"par.settings=list(box.rectangle=list(col=dots$col[1]),box.umbrella=list(col=dots$col[1],lty=dots$lty[1]),plot.symbol=list(col=dots$col[1])),...)}))",sep="")))
}
}
################################################################################
################################################################################
#-------------------------------------------------------------------------------
# Plots of MeanFO_FDTR values
#-------------------------------------------------------------------------------
foPlot <- function(x,...){
data(GraphsPar,envir=environment())
dots <- list(...)
if (is.null(dots$rot))
dots$rot <- 0
sapply(names(gp),function(x)
if (is.null(eval(parse('',text=paste("dots$",x,sep="")))))
eval(parse('',text=paste("dots$",x," <<- gp$",x,sep=""))))
if (is.null(dots$xlab))
dots$xlab <- "Fishing day"
if (is.null(dots$ylab))
dots$ylab <- "Weight (g)"
if (is.null(dots$main))
dots$main <- paste("Mean Weight by FO for each Fishing day and Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "))
if (is.null(dots$layout)) dots$layout <- NULL
if (is.null(dots$as.table)) dots$as.table <- FALSE
mat <- t(sapply(names(x@outPut$MeanFO_FDTR),function(x) strsplit(x,":-:")[[1]]))
df <- data.frame(trpCode=mat[,1],date=mat[,2],Fday=as.numeric(unlist(tapply(mat[,1],list(mat[,1]),function(x) 1:length(x)))),val=as.numeric(x@outPut$MeanFO_FDTR))
print(xyplot(val~Fday|trpCode,data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),
scales=list(font=dots$font.axis,x=list(rot=dots$rot,cex=dots$cex.lab[1])),par.strip.text=list(font=dots$font.lab),layout=dots$layout,as.table=dots$as.table,
pch=dots$pch[1],fill=dots$p.bg[1],cex=dots$p.cex[1],col=dots$col[1]))
}
################################################################################
################################################################################
#-------------------------------------------------------------------------------
# Boxplots of VolFO_FDTR values
#-------------------------------------------------------------------------------
foBoxplot <- function(x,...){
data(GraphsPar,envir=environment())
dots <- list(...)
if (is.null(dots$rot))
dots$rot <- 0
if (is.null(dots$pch))
dots$pch <- 19
sapply(names(gp),function(x)
if (is.null(eval(parse('',text=paste("dots$",x,sep="")))))
eval(parse('',text=paste("dots$",x," <<- gp$",x,sep=""))))
if (is.null(dots$xlab))
dots$xlab <- "Fishing day"
if (is.null(dots$ylab))
dots$ylab <- "Weight (g)"
if (is.null(dots$main))
dots$main <- paste("Weight by Fishing Operation for each Fishing day and Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "))
if (is.null(dots$layout)) dots$layout <- NULL
if (is.null(dots$as.table)) dots$as.table <- FALSE
vec <- unlist(x@outPut$VolFO_FDTR)
nvec <- unlist(lapply(x@outPut$VolFO_FDTR,length))
nvec2 <- rep(names(nvec),nvec)
mat <- t(sapply(nvec2,function(x) strsplit(x,":-:")[[1]]))
nvec3 <- sapply(unique(nvec2),function(x) strsplit(x,":-:")[[1]][1])
ind1 <- as.numeric(unlist(tapply(nvec3,list(nvec3),function(x) 1:length(x))))
ind2 <- as.numeric(unlist(tapply(nvec2,list(nvec2),length)))
df <- data.frame(trpCode=mat[,1],date=mat[,2],Fday=rep(ind1,ind2),val=vec)
df$Fday <- as.factor(df$Fday)
print(bwplot(val~Fday|trpCode,data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),layout=dots$layout,as.table=dots$as.table,
pch=dots$pch[1],fill=dots$p.bg[1],scales=list(font=dots$font.axis,x=list(rot=dots$rot,cex=dots$cex.lab[1])),par.strip.text=list(font=dots$font.lab),
par.settings=list(box.rectangle=list(col=dots$col[1]),box.umbrella=list(col=dots$col[1],lty=dots$lty[1]),
plot.symbol=list(col=dots$col[1]))))
}
plotVol <- function(x,type,...){ #type="FD" or "FO"
if (missing(type)) type <- "FD"
if (type=="FD") {
fdPlot(x,...)
} else {
if (type=="FO") {
foPlot(x,...)
} else {
stop("'type' parameter is not valid!!")
}
}
}
boxplotVol <- function(x,type,...){ #type="FD" or "FO"
if (missing(type)) type <- "FD"
if (type=="FD") {
fdBoxplot(x,...)
} else {
if (type=="FO") {
foBoxplot(x,...)
} else {
stop("'type' parameter is not valid!!")
}
}
}
#########################
## ##
## Methods to export ##
## ##
#########################
#-------------------------------------------------------------------------------
# Calculation from cs datasets of volume of landings/discards per haul/fd/trip
#-------------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Raw objects
#---------------------------------------------------------------------------
setGeneric("landisVol", function(object,
strDef,
species,
fraction="LAN",
sampPar=TRUE,
...){
standardGeneric("landisVol")
})
setMethod("landisVol", signature(object="csData",strDef="strIni"), function(object,
strDef,
species,
fraction="LAN", #or "DIS"
sampPar=TRUE,
...){
landisVolumeFun(object,species=species,fraction=fraction,timeStrata=strDef@timeStrata,spaceStrata=strDef@spaceStrata,techStrata=strDef@techStrata,sampPar=sampPar)
})
setMethod("landisVol", signature(object="csData",strDef="missing"), function(object,
species,
fraction="LAN", #or "DIS"
sampPar=TRUE,
...){
landisVolumeFun(object,species=species,fraction=fraction,timeStrata=NA,spaceStrata=NA,techStrata=NA,sampPar=sampPar)
})
#-------------------------------------------------------------------------------
# plot (--> 'edaResult' with desc="landisVol" ) cf MarketSampGraphs_ExploreSimplify.r
#-------------------------------------------------------------------------------
|
/COSTeda/R/MarketSeaSampGraphs_Explore.r
|
no_license
|
BackupTheBerlios/cost-project
|
R
| false | false | 26,720 |
r
|
##################################################################
## ##
## Plots of Volume of Landings/Discards per FO/Fishing Day/Trip ##
## ##
## MM 07/02/2008 ##
##################################################################
#-------------------------------------------------------------------------------
# Calculation from cs datasets of volume (weights) of landings/discards per haul/fd/trip
#-------------------------------------------------------------------------------
landisVolumeFun <- function(object, #csData object
species, #species to specify
fraction, #"LAN" or "DIS"
timeStrata, #from hh
spaceStrata, #from hh
techStrata, #from hh
sampPar=TRUE, #'sampPar' checks if given species is considered as automatically sampled (if TRUE, sppReg=Par <-> sppReg=All, i.e. includes sppReg="Par" in the analysis )
...){
fraction <- toupper(fraction) #
object@sl$catchCat <- toupper(object@sl$catchCat) # MM 29/04/2010
object@hl$catchCat <- toupper(object@hl$catchCat) #
object@ca$catchCat <- toupper(object@ca$catchCat) #
M_ik <- y_ikj <- m_ik <- NULL
if (is.na(timeStrata)) timeStrata <- NULL
if (is.na(spaceStrata)) spaceStrata <- NULL
if (is.na(techStrata)) techStrata <- NULL
##only sea sampling data is kept # modif 26/01/2009 : all data is kept
op.sub <- object@hh
# op.sub <- op.sub[op.sub$sampType=="S",] #
capt.sub <- object@sl
# capt.sub <- capt.sub[capt.sub$sampType=="S",] #
#if (nrow(op.sub)==0) stop("no sea sampling data!!") #
#if species parameter is missing, one species from sl table has to be chosen
if (missing(species)) {
un <- unique(as.character(object@sl$spp))
un <- un[!is.na(un)]
if (length(un)>1) {
warning("Several species in SL table!! All will be taken into account!")}
species <- un}
#restriction of data to specified fraction and species
capt.sub <- capt.sub[capt.sub$catchCat%in%fraction,]
capt.sub <- capt.sub[capt.sub$spp%in%species,]
#trpCode, staNum & date are converted to factors
op.sub$trpCode <- factor(op.sub$trpCode)
op.sub$date <- factor(op.sub$date)
op.sub$staNum <- factor(op.sub$staNum)
#If timeStrata="semester", "quarter" or "month", field must be put in HH
if (!is.null(timeStrata)) {
HHmonth <- as.numeric(sapply(op.sub$date,function(x) strsplit(as.character(x),"-")[[1]][2]))
if (timeStrata=="month")
op.sub$month <- HHmonth
if (timeStrata=="quarter")
op.sub$quarter <- ceiling(HHmonth/3)
if (timeStrata=="semester")
op.sub$semester <- ceiling(HHmonth/6)
}
#stratification fields are also to be factors
if (!is.null(timeStrata))
op.sub[,timeStrata] <- factor(op.sub[,timeStrata])
if (!is.null(techStrata))
op.sub[,techStrata] <- factor(op.sub[,techStrata])
if (!is.null(spaceStrata))
op.sub[,spaceStrata] <- factor(op.sub[,spaceStrata])
#Number of sampled fishing days by trip, tech,time,space
expr1 <- paste(",op.sub$",c(timeStrata,techStrata,spaceStrata),sep="",collapse="")
if (expr1==",op.sub$")
expr1 <- ""
expr2 <- paste(",tabl1$",c(timeStrata,techStrata,spaceStrata),sep="",collapse="")
if (expr2==",tabl1$")
expr2 <- ""
#M_ik = total number of FOs by fishing day, by trip, by tech,time,space
eval(parse('',text=paste("M_ik <- tapply(op.sub$staNum,list(op.sub$trpCode,op.sub$date",expr1,"),function(x) length(unique(x)))",sep="")))
if (fraction=="LAN") {
fract <- "Lan"
} else {
fract <- "Dis" }
#-----------------------------------------------------------------------------
# Sampled weight index (Windex)
#-----------------------------------------------------------------------------
#---------------------------------------------------------------------------
# A haul is considered as sampled (weights) for a given species and a given fraction if :
# 1) (catchReg=="All") OR (catchReg==frac)
# AND
# 2) (sppReg=="All") OR (sppReg=="Par" AND sampPar=TRUE)
#---------------------------------------------------------------------------
#hh-index for sampled(1/0)/non sampled(NA) haul (weights) will be the combination of 2 indexes
indexCat <- indexSpp <- rep(0,nrow(op.sub))
#indexCat==1 if catReg=="All" or frac
indexCat[op.sub$catReg%in%c("All",fract)] <- 1
#indexSpp==1 if sppReg=="All" or if sppReg=="Par" & sampPar==TRUE
capt.sub$ind <- 1 ; indSpp <- merge(cbind(op.sub,ord=1:nrow(op.sub)),unique(capt.sub[,c("sampType","landCtry","vslFlgCtry","year","proj","trpCode","staNum","ind")]),all.x=TRUE,sort=FALSE) ; indSpp <- indSpp$ind[order(indSpp$ord)] #indSpp <-> index of hauls with related information in sl for given species and fraction
#indexSpp[op.sub$sppReg=="All" | (op.sub$sppReg=="Par" & sampPar)] <- 1
indexSpp[op.sub$sppReg=="All" | (op.sub$sppReg=="Par" & (sampPar | (!is.na(indSpp)) ))] <- 1
#so, Windex = indexCat*indexSpp (sampled haul index)
Windex <- indexCat*indexSpp
indZero <- (Windex==1) & (is.na(indSpp)) #indZero : index of sampled hauls with 0 values
#finally,...
Windex[Windex==0] <- NA ; Windex[indZero] <- 0
#'ind2', the valid sampling indicator, is built
Windex[op.sub$foVal!="V"] <- NA
op.sub$ind <- Windex
#m_ik = Number of sampled FOs by fishing day, by trip, by tech,time,space
eval(parse('',text=paste("m_ik <- tapply(!is.na(Windex),list(op.sub$trpCode,op.sub$date",expr1,"),sum)",sep="")))
#essentially to homogenize vectors sizes
tabl1 <- merge(op.sub,aggregate(capt.sub$wt,list(sampType=capt.sub$sampType,
landCtry=capt.sub$landCtry,
vslFlgCtry=capt.sub$vslFlgCtry,
year=capt.sub$year,
proj=capt.sub$proj,
trpCode=capt.sub$trpCode,
staNum=capt.sub$staNum),
sum),
all.x=TRUE)
names(tabl1)[ncol(tabl1)] <- "wt"
tabl1$wt[is.na(tabl1$wt)] <- 0 #this means that species is absent (or not sampled)
#ind is the sampling indicator in tabl1 (~Windex)
tabl1 <- tabl1[!is.na(tabl1$ind),]
#ADDED 10/10/2008 : if 'aggLev' = T, all FOs are considered to be sampled,so M_ik data is put in m_ik for these trips ###############################################################
comma <- paste(rep(",",length(dim(M_ik))-2),collapse="",sep="") #
if (any(tabl1$aggLev%in%"T")) { #modif 09/12/2008 #
eval(parse('',text=paste("m_ik[as.character(tabl1$trpCode)[tabl1$aggLev%in%\"T\"],",comma,"] <- M_ik[as.character(tabl1$trpCode)[tabl1$aggLev%in%\"T\"],",comma,"]",sep=""))) #
} #
#y_ikj = Total sampled weight by fishing day, by trip, by tech,time,space
eval(parse('',text=paste("y_ikj <- tapply(tabl1$wt,list(tabl1$trpCode,tabl1$date",expr2,"),sum,na.rm=TRUE)",sep="")))
#y_ikj_hat = Weight of each sample by fishing day and by trip
y_ikj_hat <- split(tabl1$wt,paste(tabl1$trpCode,tabl1$date,sep=":-:"),drop=TRUE)
#y_ik = Mean weight of samples by fishing day and by trip
y_ik <- unlist(lapply(y_ikj_hat,mean))
#y_IK = Raised sampled weight by fishing day, trip, time, space and tech
y_IK <- M_ik*y_ikj/m_ik
#MAT is built to define groupings for y_ik_hat
ll <- sum(!is.null(techStrata),!is.null(timeStrata),!is.null(spaceStrata))
indic <- ll+2
val <- expand.grid(dimnames(y_IK)[1:indic])
# valChar <- apply(val[,-2,drop=FALSE],1,function(x) paste(as.character(x),collapse=":-:"))
# MAT <- array(valChar,dim=dim(y_IK))
# MAT[is.na(y_IK)] <- NA
valChar <- apply(val[!is.na(y_IK),-2,drop=FALSE],1,function(x) paste(as.character(x),collapse=":-:"))
MAT <- array(y_IK,dim(y_IK))
MAT[!is.na(MAT)] <- valChar
#y_ik_hat = Raised Weight for each fishing day by trip
y_ik_hat <- split(y_IK,MAT,drop=TRUE)
#y_ik_hat = Mean Raised Weight for fishing days by trip
y_i <- unlist(lapply(y_ik_hat,mean))
result <- list(fraction=fraction,species=species,timeStrata=timeStrata,techStrata=techStrata,spaceStrata=spaceStrata,
VolFO_FDTR=y_ikj_hat,MeanFO_FDTR=y_ik,VolFD_TR=y_ik_hat,MeanFD_TR=y_i)
return(new("edaResult",desc="landisVol",outPut=result))
}
################################################################################
################################################################################
#-------------------------------------------------------------------------------
# Plots of MeanFD_TR values
#-------------------------------------------------------------------------------
fdPlot <- function(x,
groups=NULL,
...){
stratas <- c("timeStrata","techStrata","spaceStrata")
timeStrata <- x@outPut$timeStrata
techStrata <- x@outPut$techStrata
spaceStrata <- x@outPut$spaceStrata
index <- c(timeStrata,techStrata,spaceStrata)
#-----------------------------------------------------------------------------
# Update of graphical parameters
#-----------------------------------------------------------------------------
data(GraphsPar,envir=environment())
dots <- list(...)
sapply(names(gp),function(x)
if (is.null(eval(parse('',text=paste("dots$",x,sep="")))))
eval(parse('',text=paste("dots$",x," <<- gp$",x,sep=""))))
if (is.null(dots$xlab))
dots$xlab <- "Trip Code"
if (is.null(dots$ylab))
dots$ylab <- "Weight (g)"
if (is.null(dots$layout)) dots$layout <- NULL
if (is.null(dots$as.table)) dots$as.table <- FALSE
if (all(is.null(timeStrata),is.null(techStrata),is.null(spaceStrata))) {
if (is.null(dots$main))
dots$main <- paste("Mean Weight by Fishing Day for each Trip\nSpecies :",paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "))
df <- data.frame(trp=names(x@outPut$MeanFD_TR),bb=as.numeric(x@outPut$MeanFD_TR))
print(xyplot(bb~trp,data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),
scales=list(font=dots$font.axis,x=list(rot=dots$rot)),pch=dots$pch[1],fill=dots$p.bg[1],cex=dots$p.cex[1],col=dots$col[1]))
} else {
if (is.null(dots$main))
dots$main <- paste("Mean Weight by Fishing Day for each Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "),"\n",
paste("Time Strata :",timeStrata)[!is.null(timeStrata)],
paste(" Technical Strata :",techStrata)[!is.null(techStrata)],
paste(" Space Strata :",spaceStrata)[!is.null(spaceStrata)])
datas <- x@outPut$MeanFD_TR
df <- as.data.frame(do.call("rbind",lapply(names(datas),function(x) strsplit(x,":-:")[[1]])))
names(df) <- c("trp",timeStrata,techStrata,spaceStrata)
df$bb <- as.numeric(x@outPut$MeanFD_TR)
strip.col <- trellis.par.get("strip.background")$col
#-----------------------------------------------------------------------------
# Graphical display
#-----------------------------------------------------------------------------
if (is.null(groups)) {
eval(parse('',text=paste("print(xyplot(bb~trp|",paste(index,collapse="*"),",data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),layout=dots$layout,as.table=dots$as.table,",
"ylab=list(dots$ylab,font=dots$font.lab),par.strip.text=list(font=dots$font.lab),scales=list(font=dots$font.axis,x=list(relation=\"free\",rot=dots$rot,cex=dots$cex.lab[1])),",
"key=list(points=list(pch=15,cex=dots$p.cex[1],col=strip.col[1:length(index)]),text=list(index),font=dots$font.lab,columns=1,border=TRUE,space=\"right\"),",
"prepanel=function(x,y,...){x <- x[,drop=TRUE] ; prepanel.default.xyplot(x,y,...)},",
"panel = function(x,y,...){x <- x[,drop=TRUE] ; panel.xyplot(x,y,pch=dots$pch[1],fill=dots$p.bg[1],cex=dots$p.cex[1],col=dots$col[1],...)}))",sep="")))
} else {
indexStr <- index[!index%in%eval(parse('',text=groups))]
l1 <- length(indexStr)
LEV <- levels(df[,eval(parse('',text=groups))])
l2 <- length(LEV)
groups <- eval(parse('',text=groups))
eval(parse('',text=paste("print(xyplot(bb~trp",paste("|",paste(indexStr,collapse="*"),sep="")[l1>0],",data=df,groups=",groups,",main=list(dots$main,font=dots$font.main),layout=dots$layout,as.table=dots$as.table,",
"xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),scales=list(font=dots$font.axis,x=list(relation=\"free\",rot=dots$rot,cex=dots$cex.lab[1])),",
"key=list(points=list(pch=c(rep(dots$pch[1],l2),NA,",c("rep(15,l1)","NA")[c((l1>0),(l1==0))],"),fill=dots$p.bg[1:l2],cex=dots$p.cex[1],lwd=dots$p.lwd[1],",
"col=c(rep(dots$col[1],l2),NA",",strip.col[1:l1]"[l1>0],")),text=list(c(LEV,\"\",\"",paste(indexStr,collapse="\",\""),"\")),title=\"",groups,"\",",
"cex.title=0.8,space=\"right\",font=dots$font.lab,columns=1,border=TRUE),par.strip.text=list(font=dots$font.lab),",
"prepanel=function(x,y,...){x <- x[,drop=TRUE] ; prepanel.default.xyplot(x,y,...)},",
"panel = function(x,y,...){x <- x[,drop=TRUE] ; panel.xyplot(x,y,pch=dots$pch[1],fill=dots$p.bg","[1]"[l2==0],",cex=dots$p.cex[1],col=dots$col[1],...)}))",sep="")))
}}
}
################################################################################
################################################################################
#-------------------------------------------------------------------------------
# Boxplots of VolFD_TR values
#-------------------------------------------------------------------------------
fdBoxplot <- function(x,...){
stratas <- c("timeStrata","techStrata","spaceStrata")
timeStrata <- x@outPut$timeStrata
techStrata <- x@outPut$techStrata
spaceStrata <- x@outPut$spaceStrata
index <- c(timeStrata,techStrata,spaceStrata)
data(GraphsPar,envir=environment())
dots <- list(...)
if (is.null(dots$pch))
dots$pch <- 19
sapply(names(gp),function(x)
if (is.null(eval(parse('',text=paste("dots$",x,sep="")))))
eval(parse('',text=paste("dots$",x," <<- gp$",x,sep=""))))
if (is.null(dots$xlab))
dots$xlab <- "Trip Code"
if (is.null(dots$ylab))
dots$ylab <- "Weight (g)"
if (is.null(dots$layout)) dots$layout <- NULL
if (is.null(dots$as.table)) dots$as.table <- FALSE
if (all(is.null(timeStrata),is.null(techStrata),is.null(spaceStrata))) {
if (is.null(dots$main))
dots$main <- paste("Weight by Fishing Day for each Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "))
obj <- x@outPut$VolFD_TR
names(obj) <- sapply(names(x@outPut$VolFD_TR),function(x) strsplit(x,":-:")[[1]][1])
vec <- unlist(x@outPut$VolFD_TR)
nvec <- unlist(lapply(x@outPut$VolFD_TR,length))
df <- data.frame(aa=rep(names(obj),nvec),bb=vec)
print(bwplot(bb~aa,data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),
pch=dots$pch[1],fill=dots$p.bg[1],scales=list(font=dots$font.axis,x=list(rot=dots$rot)),layout=dots$layout,as.table=dots$as.table,
par.settings=list(box.rectangle=list(col=dots$col[1]),box.umbrella=list(col=dots$col[1],lty=dots$lty[1]),
plot.symbol=list(col=dots$col[1]))))
} else {
if (is.null(dots$main))
dots$main <- paste("Weight by Fishing Day for each Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "),"\n",
paste("Time Strata :",timeStrata)[!is.null(timeStrata)],
paste(" Technical Strata :",techStrata)[!is.null(techStrata)],
paste(" Space Strata :",spaceStrata)[!is.null(spaceStrata)])
datas <- x@outPut$VolFD_TR
df <- as.data.frame(do.call("rbind",lapply(rep(names(datas),lapply(datas,length)), function(x) strsplit(x,":-:")[[1]])))
names(df) <- c("trp",timeStrata,techStrata,spaceStrata)
df$bb <- as.numeric(unlist(datas))
strip.col <- trellis.par.get("strip.background")$col
eval(parse('',text=paste("print(bwplot(bb~trp|",paste(index,collapse="*"),",data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),layout=dots$layout,as.table=dots$as.table,",
"ylab=list(dots$ylab,font=dots$font.lab),par.strip.text=list(font=dots$font.lab),scales=list(font=dots$font.axis,x=list(relation=\"free\",rot=dots$rot,cex=dots$cex.lab[1])),",
"key=list(points=list(pch=15,cex=dots$p.cex[1],col=strip.col[1:length(index)]),text=list(index),font=dots$font.lab,columns=1,border=TRUE,space=\"right\"),",
"prepanel=function(x,y,...){x <- x[,drop=TRUE] ; prepanel.default.bwplot(x,y,...)},",
"panel = function(x,y,...){x <- x[,drop=TRUE] ; panel.bwplot(x,y,pch=dots$pch[1],fill=dots$p.bg[1],cex=dots$p.cex[1],col=dots$col[1],",
"par.settings=list(box.rectangle=list(col=dots$col[1]),box.umbrella=list(col=dots$col[1],lty=dots$lty[1]),plot.symbol=list(col=dots$col[1])),...)}))",sep="")))
}
}
################################################################################
################################################################################
#-------------------------------------------------------------------------------
# Plots of MeanFO_FDTR values
#-------------------------------------------------------------------------------
foPlot <- function(x,...){
data(GraphsPar,envir=environment())
dots <- list(...)
if (is.null(dots$rot))
dots$rot <- 0
sapply(names(gp),function(x)
if (is.null(eval(parse('',text=paste("dots$",x,sep="")))))
eval(parse('',text=paste("dots$",x," <<- gp$",x,sep=""))))
if (is.null(dots$xlab))
dots$xlab <- "Fishing day"
if (is.null(dots$ylab))
dots$ylab <- "Weight (g)"
if (is.null(dots$main))
dots$main <- paste("Mean Weight by FO for each Fishing day and Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "))
if (is.null(dots$layout)) dots$layout <- NULL
if (is.null(dots$as.table)) dots$as.table <- FALSE
mat <- t(sapply(names(x@outPut$MeanFO_FDTR),function(x) strsplit(x,":-:")[[1]]))
df <- data.frame(trpCode=mat[,1],date=mat[,2],Fday=as.numeric(unlist(tapply(mat[,1],list(mat[,1]),function(x) 1:length(x)))),val=as.numeric(x@outPut$MeanFO_FDTR))
print(xyplot(val~Fday|trpCode,data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),
scales=list(font=dots$font.axis,x=list(rot=dots$rot,cex=dots$cex.lab[1])),par.strip.text=list(font=dots$font.lab),layout=dots$layout,as.table=dots$as.table,
pch=dots$pch[1],fill=dots$p.bg[1],cex=dots$p.cex[1],col=dots$col[1]))
}
################################################################################
################################################################################
#-------------------------------------------------------------------------------
# Boxplots of VolFO_FDTR values
#-------------------------------------------------------------------------------
foBoxplot <- function(x,...){
data(GraphsPar,envir=environment())
dots <- list(...)
if (is.null(dots$rot))
dots$rot <- 0
if (is.null(dots$pch))
dots$pch <- 19
sapply(names(gp),function(x)
if (is.null(eval(parse('',text=paste("dots$",x,sep="")))))
eval(parse('',text=paste("dots$",x," <<- gp$",x,sep=""))))
if (is.null(dots$xlab))
dots$xlab <- "Fishing day"
if (is.null(dots$ylab))
dots$ylab <- "Weight (g)"
if (is.null(dots$main))
dots$main <- paste("Weight by Fishing Operation for each Fishing day and Trip\nSpecies :",
paste(x@outPut$species,collapse=", ")," Fraction :",paste(x@outPut$fraction,collapse=", "))
if (is.null(dots$layout)) dots$layout <- NULL
if (is.null(dots$as.table)) dots$as.table <- FALSE
vec <- unlist(x@outPut$VolFO_FDTR)
nvec <- unlist(lapply(x@outPut$VolFO_FDTR,length))
nvec2 <- rep(names(nvec),nvec)
mat <- t(sapply(nvec2,function(x) strsplit(x,":-:")[[1]]))
nvec3 <- sapply(unique(nvec2),function(x) strsplit(x,":-:")[[1]][1])
ind1 <- as.numeric(unlist(tapply(nvec3,list(nvec3),function(x) 1:length(x))))
ind2 <- as.numeric(unlist(tapply(nvec2,list(nvec2),length)))
df <- data.frame(trpCode=mat[,1],date=mat[,2],Fday=rep(ind1,ind2),val=vec)
df$Fday <- as.factor(df$Fday)
print(bwplot(val~Fday|trpCode,data=df,main=list(dots$main,font=dots$font.main),xlab=list(dots$xlab,font=dots$font.lab),ylab=list(dots$ylab,font=dots$font.lab),layout=dots$layout,as.table=dots$as.table,
pch=dots$pch[1],fill=dots$p.bg[1],scales=list(font=dots$font.axis,x=list(rot=dots$rot,cex=dots$cex.lab[1])),par.strip.text=list(font=dots$font.lab),
par.settings=list(box.rectangle=list(col=dots$col[1]),box.umbrella=list(col=dots$col[1],lty=dots$lty[1]),
plot.symbol=list(col=dots$col[1]))))
}
plotVol <- function(x,type,...){ #type="FD" or "FO"
if (missing(type)) type <- "FD"
if (type=="FD") {
fdPlot(x,...)
} else {
if (type=="FO") {
foPlot(x,...)
} else {
stop("'type' parameter is not valid!!")
}
}
}
boxplotVol <- function(x,type,...){ #type="FD" or "FO"
if (missing(type)) type <- "FD"
if (type=="FD") {
fdBoxplot(x,...)
} else {
if (type=="FO") {
foBoxplot(x,...)
} else {
stop("'type' parameter is not valid!!")
}
}
}
#########################
## ##
## Methods to export ##
## ##
#########################
#-------------------------------------------------------------------------------
# Calculation from cs datasets of volume of landings/discards per haul/fd/trip
#-------------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Raw objects
#---------------------------------------------------------------------------
setGeneric("landisVol", function(object,
strDef,
species,
fraction="LAN",
sampPar=TRUE,
...){
standardGeneric("landisVol")
})
setMethod("landisVol", signature(object="csData",strDef="strIni"), function(object,
strDef,
species,
fraction="LAN", #or "DIS"
sampPar=TRUE,
...){
landisVolumeFun(object,species=species,fraction=fraction,timeStrata=strDef@timeStrata,spaceStrata=strDef@spaceStrata,techStrata=strDef@techStrata,sampPar=sampPar)
})
setMethod("landisVol", signature(object="csData",strDef="missing"), function(object,
species,
fraction="LAN", #or "DIS"
sampPar=TRUE,
...){
landisVolumeFun(object,species=species,fraction=fraction,timeStrata=NA,spaceStrata=NA,techStrata=NA,sampPar=sampPar)
})
#-------------------------------------------------------------------------------
# plot (--> 'edaResult' with desc="landisVol" ) cf MarketSampGraphs_ExploreSimplify.r
#-------------------------------------------------------------------------------
|
# this code relates to https://github.com/pepfar-datim/COP-Target-Setting/issues/700
# it is not being exported pending https://github.com/pepfar-datim/COP-Target-Setting/issues/734
# as it works for COP20 leaving it in place for now
#' @export
#' @title Pull & combine all UIDS for specified data_stream types and FY.
#'
#' @description
#' Pulls all uids for types argument for a specified FY.
#'
#' @param cop_year Specifies COP year. Remember, COP20 = FY21 targets.
#' @param types Specify MER, SUBNAT, or IMPATT, or omit to specify all.
#'
#' @return a character vector with the requested uids respecting the selection in the type parameter and FY parameter.
#'
#' @example getCOPDatasetUIDs(cop_year = 2020, types = c("MER", "SUBNAT", "IMPATT"))
#'
getCOPDatasetUIDs <- function(cop_year = getCurrentCOPYear(),
types = c("MER", "SUBNAT", "IMPATT"),
d2_session = dynGet("d2_default_session",
inherits = TRUE)) {
if(cop_year != 2020){
stop("The COP year provided is not supported by the internal function getCOPDatasetUids")
}
data <- api_get(api_call("dataSets",
d2_session = d2_session),
d2_session = d2_session)
data <- data[grepl("^MER Targets: (Community|Facility)|MER Target Setting: PSNU|^(Host Country Targets|Planning Attributes): COP Prioritization SNU",
data$displayName),]
FY = cop_year + 1
# if(FY != currentFY()+1) #This only works if we assume that DATIM datasets all update and deploy by Oct 1
if(FY != getCurrentCOPYear()+1) {
data <- data[grepl(paste0("FY",FY), data$displayName),]
} else {
data <- data[!(grepl("FY[0-9]{4}", data$displayName)),]
}
data$fiscal_year <- ifelse(!stringr::str_detect(data$displayName, "FY"), currentFY()+1,
as.numeric(stringr::str_extract(data$displayName,"(?<=FY)\\d{4}$")))
data$data_stream <- ifelse(stringr::str_detect(data$displayName, "^MER "), "MER",
ifelse(stringr::str_detect(data$displayName, "^Host Country Targets"),
"SUBNAT","IMPATT"))
if(!(all((types %in% data$data_stream))))
{
stop(paste0("UID Not Found for ", setdiff(types, data$data_stream), " for FY ", FY))
}
# print(paste0("returning uids for " , FY))
return(data$id[data$data_stream %in% types])
}
#' @export
#' @title Grab all raw data in DATIM for a country for the COP data sets for a given COP Year.
#'
#' @description
#' Grab all raw data in DATIM for a country for the COP data sets for a given COP Year.
#'
#' @param country_uid country_uid
#' @param cop_year Specifies COP year for dating as well as selection of
#' templates.
#' @param d2_session DHIS2 Session id
#'
#' @return Raw data in DATIM for a country for the COP data sets for a given COP Year.
#'
#' @example getCOPDataFromDATIM(country_uid = d$info$country_uids,
#' cop_year = d$info$cop_year)
#'
getCOPDataFromDATIM <- function(country_uids,
cop_year = getCurrentCOPYear(),
d2_session = dynGet("d2_default_session",
inherits = TRUE)) {
if(cop_year != 2020){
stop("The COP year provided is not supported by the internal function getCOPDataFromDATIM")
}
dataset_uids <- getCOPDatasetUIDs(cop_year,
d2_session = d2_session)
# package parameters for getDataValueSets function call
parameters <-
dplyr::bind_rows(
tibble::tibble(key = "dataSet", value = dataset_uids),
tibble::tibble(key = "orgUnit", value = country_uids),
tibble::tribble(~ key, ~ value,
"children", "true",
"categoryOptionComboIdScheme", "code",
"includeDeleted", "false",
"period", paste0(cop_year, "Oct")
)
)
# get data from datim using dataValueSets
# rename to standard names
datim_data <-
getDataValueSets(parameters$key,
parameters$value,
d2_session = d2_session) %>%
dplyr::rename(
datim_value = value,
data_element_uid = data_element,
org_unit_uid = org_unit,
category_option_combo_uid = category_option_combo,
attribute_option_combo_code = attribute_option_combo
)
return(datim_data)
}
|
/R/getCOPDataFromDATIM.R
|
permissive
|
Solanrewaju/datapackr
|
R
| false | false | 4,515 |
r
|
# this code relates to https://github.com/pepfar-datim/COP-Target-Setting/issues/700
# it is not being exported pending https://github.com/pepfar-datim/COP-Target-Setting/issues/734
# as it works for COP20 leaving it in place for now
#' @export
#' @title Pull & combine all UIDS for specified data_stream types and FY.
#'
#' @description
#' Pulls all uids for types argument for a specified FY.
#'
#' @param cop_year Specifies COP year. Remember, COP20 = FY21 targets.
#' @param types Specify MER, SUBNAT, or IMPATT, or omit to specify all.
#'
#' @return a character vector with the requested uids respecting the selection in the type parameter and FY parameter.
#'
#' @example getCOPDatasetUIDs(cop_year = 2020, types = c("MER", "SUBNAT", "IMPATT"))
#'
getCOPDatasetUIDs <- function(cop_year = getCurrentCOPYear(),
types = c("MER", "SUBNAT", "IMPATT"),
d2_session = dynGet("d2_default_session",
inherits = TRUE)) {
if(cop_year != 2020){
stop("The COP year provided is not supported by the internal function getCOPDatasetUids")
}
data <- api_get(api_call("dataSets",
d2_session = d2_session),
d2_session = d2_session)
data <- data[grepl("^MER Targets: (Community|Facility)|MER Target Setting: PSNU|^(Host Country Targets|Planning Attributes): COP Prioritization SNU",
data$displayName),]
FY = cop_year + 1
# if(FY != currentFY()+1) #This only works if we assume that DATIM datasets all update and deploy by Oct 1
if(FY != getCurrentCOPYear()+1) {
data <- data[grepl(paste0("FY",FY), data$displayName),]
} else {
data <- data[!(grepl("FY[0-9]{4}", data$displayName)),]
}
data$fiscal_year <- ifelse(!stringr::str_detect(data$displayName, "FY"), currentFY()+1,
as.numeric(stringr::str_extract(data$displayName,"(?<=FY)\\d{4}$")))
data$data_stream <- ifelse(stringr::str_detect(data$displayName, "^MER "), "MER",
ifelse(stringr::str_detect(data$displayName, "^Host Country Targets"),
"SUBNAT","IMPATT"))
if(!(all((types %in% data$data_stream))))
{
stop(paste0("UID Not Found for ", setdiff(types, data$data_stream), " for FY ", FY))
}
# print(paste0("returning uids for " , FY))
return(data$id[data$data_stream %in% types])
}
#' @export
#' @title Grab all raw data in DATIM for a country for the COP data sets for a given COP Year.
#'
#' @description
#' Grab all raw data in DATIM for a country for the COP data sets for a given COP Year.
#'
#' @param country_uid country_uid
#' @param cop_year Specifies COP year for dating as well as selection of
#' templates.
#' @param d2_session DHIS2 Session id
#'
#' @return Raw data in DATIM for a country for the COP data sets for a given COP Year.
#'
#' @example getCOPDataFromDATIM(country_uid = d$info$country_uids,
#' cop_year = d$info$cop_year)
#'
getCOPDataFromDATIM <- function(country_uids,
cop_year = getCurrentCOPYear(),
d2_session = dynGet("d2_default_session",
inherits = TRUE)) {
if(cop_year != 2020){
stop("The COP year provided is not supported by the internal function getCOPDataFromDATIM")
}
dataset_uids <- getCOPDatasetUIDs(cop_year,
d2_session = d2_session)
# package parameters for getDataValueSets function call
parameters <-
dplyr::bind_rows(
tibble::tibble(key = "dataSet", value = dataset_uids),
tibble::tibble(key = "orgUnit", value = country_uids),
tibble::tribble(~ key, ~ value,
"children", "true",
"categoryOptionComboIdScheme", "code",
"includeDeleted", "false",
"period", paste0(cop_year, "Oct")
)
)
# get data from datim using dataValueSets
# rename to standard names
datim_data <-
getDataValueSets(parameters$key,
parameters$value,
d2_session = d2_session) %>%
dplyr::rename(
datim_value = value,
data_element_uid = data_element,
org_unit_uid = org_unit,
category_option_combo_uid = category_option_combo,
attribute_option_combo_code = attribute_option_combo
)
return(datim_data)
}
|
# flashiness
flash <- function(temp) {
n <- length(temp) # read out length of input data
t_fl <<- c() # create return vector; <<- defines global variable
for(i in 2:n) t_fl[i] <- abs(temp[i]-temp[i-1]) # calculate RB-index
flashy <- sum(t_fl, na.rm = TRUE)/(n-1)
return(flashy)
}
t_flsh <- flash(temp = df_temp$th) # average of the hourly flashiness
|
/flashiness.R
|
no_license
|
ValeGacitua/Data-Collection-Storage-and-Management
|
R
| false | false | 369 |
r
|
# flashiness
flash <- function(temp) {
n <- length(temp) # read out length of input data
t_fl <<- c() # create return vector; <<- defines global variable
for(i in 2:n) t_fl[i] <- abs(temp[i]-temp[i-1]) # calculate RB-index
flashy <- sum(t_fl, na.rm = TRUE)/(n-1)
return(flashy)
}
t_flsh <- flash(temp = df_temp$th) # average of the hourly flashiness
|
library(modelsummary)
library(bife)
library(broom)
library(finalfit)
library(lmtest)
library(plm)
cyears <- read.csv("may2020data.csv")
# main models ----
p1 <- glm(new_mil ~ theta_diff_lag + theta_lag + gdpgr_lag + durable + recent_reb + (year>1989) + ethfrac, data=cyears, family=binomial("logit"))
summary(p1)
coeftest(p1, vcov. = vcovHC(p1, type="HC1", cluster="gwno_loc", adjust=T))
p2 <- glm(new_mil ~ theta_diff_lag + gdpgr_lag + log(gdppc_lag) + durable + recent_reb + factor(gwno_loc) -1, data=cyears, family=binomial("probit"))
msummary(list(p1,p2), 'markdown', coef_omit = 'gwno_loc', stars = T, title = 'Probit Models')
p3 <- bife(new_mil ~ theta_diff_lag + theta_lag + gdpgr_lag + durable + recent_reb | gwno_loc, data=cyears, model="logit")
p3bc <- bias_corr(p3, L=1L)
summary(p3bc)
summary(get_APEs(p3bc))
p3b <- clogit(new_mil ~ theta_diff_lag + theta_lag + gdpgr_lag + durable + recent_reb + strata(gwno_loc), data=cyears)
summary(p3b)
# pred probs ----
pred_data <- with(cyears, data.frame(
theta_diff_lag = seq(min(theta_diff_lag, na.rm = T), max(theta_diff_lag, na.rm = T), 100),
theta_lag = mean(theta_lag, na.rm = T),
gdpgr_lag = mean(gdpgr_lag, na.rm = T),
durable = mean(durable, na.rm = T),
recent_reb = 0,
gwno_loc=651))
library(ggeffects)
ggpredict(p1, vcov.fun = "vcovCR", vcov.type = "CR0", vcov.args = list(cluster = cyears$gwno_loc))
# Old ----
pred_data <- data.frame(theta_diff_lag=seq(min(cyears$theta_diff_lag,na.rm=T), max(cyears$theta_diff_lag,na.rm=T), length.out = 100), theta_lag=, gdpgr_lag=rep(mean(cyears$gdpgr_lag, na.rm=T), 100), durable=rep(mean(cyears$durable, na.rm=T), 100), recent_reb=rep(0, 100))
p1 %>%
boot_predict(pred_data, R = 10000, condense = F) %>%
ggplot(aes(x=theta_diff_lag, y=estimate, ymin = estimate_conf.low, ymax = estimate_conf.high)) +
geom_line() +
geom_ribbon(alpha=0.1)
pred_data$pred = predict(p1, pred_data, type='response', se.fit = T)$fit
pred_data$se = predict(p1, pred_data, type='response', se.fit = T)$se.fit
pred_data$upper <- pred_data$pred + 1.96*pred_data$se
pred_data$lower <- pred_data$pred - 1.96*pred_data$se
ggplot(pred_data, aes(x=theta_lag, y=pred)) + geom_line() + geom_line(aes(x=theta_lag, y=upper), color="red") + geom_line(aes(x=theta_lag, y=lower), color="red")
# binary ivs ----
p4 <- glm(new_mil ~ downgrade_lag + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
p5 <- glm(new_mil ~ theta_shock3 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
p6 <- glm(new_mil ~ theta_shock5 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
p7 <- glm(new_mil ~ nonviol_repress_lag3 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
p8 <- glm(new_mil ~ nonviol_repress_lag3 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
msummary(list(p4,p5,p6,p7), 'markdown', coef_omit = 'gwno_loc', stars = T, title = 'Probit Models')
p3 <- bife(new_mil ~ theta_diff_lag + gdpgr_lag + gdppc_lag + durable + recent_reb | gwno_loc, data=cyears, model="logit")
p3bc <- bias_corr(p3, L=1L)
summary(p3bc)
summary(get_APEs(p3bc))
pred5 <- with(cyears, data.frame(theta_shock3=0:1,
gdpgr_lag=mean(gdpgr_lag, na.rm=T),
durable=mean(durable, na.rm=T),
recent_reb=0))
p5 %>%
augment(newdata = pred5, type.predict="response") %>%
mutate(upper = .fitted + 1.96 * .se.fit,
lower = .fitted - 1.96 * .se.fit) %>%
ggplot(aes(factor(theta_shock3), .fitted)) +
geom_point(size = 1.5) +
geom_errorbar(aes(ymin = lower, ymax = upper), width = .2)
p9 <- glm(thyne_attempt ~ theta_shock5 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
summary(p9)
pred9 <- with(cyears, data.frame(theta_shock5=0:1,
gdpgr_lag=mean(gdpgr_lag, na.rm=T),
durable=mean(durable, na.rm=T),
recent_reb=0))
p9 %>%
augment(newdata = pred9, type.predict = "response") %>%
mutate(upper = .fitted + 1.96 * .se.fit,
lower = .fitted - 1.96 * .se.fit) %>%
ggplot(aes(factor(theta_shock5), .fitted)) +
geom_point(size = 1.5) +
geom_errorbar(aes(ymin = lower, ymax = upper), width = .2)
library(finalfit)
pred5 <- with(cyears, data.frame(theta_shock3=0:1,
gdpgr_lag=mean(gdpgr_lag, na.rm=T),
durable=mean(durable, na.rm=T),
recent_reb=0))
p5 %>%
augment(newdata = pred5, type.predict="response") %>%
mutate(upper = .fitted + 1.96 * .se.fit,
lower = .fitted - 1.96 * .se.fit) %>%
ggplot(aes(factor(theta_shock3), .fitted)) +
geom_point(size = 1.5) +
geom_errorbar(aes(ymin = lower, ymax = upper), width = .2)
p5 %>%
boot_predict(pred5, R = 1000, condense = F) %>%
ggplot(aes(factor(theta_shock3), estimate)) +
geom_point(size = 1.5) +
geom_errorbar(aes(ymin = estimate_conf.low, ymax = estimate_conf.high), width = .2)
|
/analysis/jun2020_models.R
|
no_license
|
dbowden/defection_manuscript
|
R
| false | false | 5,135 |
r
|
library(modelsummary)
library(bife)
library(broom)
library(finalfit)
library(lmtest)
library(plm)
cyears <- read.csv("may2020data.csv")
# main models ----
p1 <- glm(new_mil ~ theta_diff_lag + theta_lag + gdpgr_lag + durable + recent_reb + (year>1989) + ethfrac, data=cyears, family=binomial("logit"))
summary(p1)
coeftest(p1, vcov. = vcovHC(p1, type="HC1", cluster="gwno_loc", adjust=T))
p2 <- glm(new_mil ~ theta_diff_lag + gdpgr_lag + log(gdppc_lag) + durable + recent_reb + factor(gwno_loc) -1, data=cyears, family=binomial("probit"))
msummary(list(p1,p2), 'markdown', coef_omit = 'gwno_loc', stars = T, title = 'Probit Models')
p3 <- bife(new_mil ~ theta_diff_lag + theta_lag + gdpgr_lag + durable + recent_reb | gwno_loc, data=cyears, model="logit")
p3bc <- bias_corr(p3, L=1L)
summary(p3bc)
summary(get_APEs(p3bc))
p3b <- clogit(new_mil ~ theta_diff_lag + theta_lag + gdpgr_lag + durable + recent_reb + strata(gwno_loc), data=cyears)
summary(p3b)
# pred probs ----
pred_data <- with(cyears, data.frame(
theta_diff_lag = seq(min(theta_diff_lag, na.rm = T), max(theta_diff_lag, na.rm = T), 100),
theta_lag = mean(theta_lag, na.rm = T),
gdpgr_lag = mean(gdpgr_lag, na.rm = T),
durable = mean(durable, na.rm = T),
recent_reb = 0,
gwno_loc=651))
library(ggeffects)
ggpredict(p1, vcov.fun = "vcovCR", vcov.type = "CR0", vcov.args = list(cluster = cyears$gwno_loc))
# Old ----
pred_data <- data.frame(theta_diff_lag=seq(min(cyears$theta_diff_lag,na.rm=T), max(cyears$theta_diff_lag,na.rm=T), length.out = 100), theta_lag=, gdpgr_lag=rep(mean(cyears$gdpgr_lag, na.rm=T), 100), durable=rep(mean(cyears$durable, na.rm=T), 100), recent_reb=rep(0, 100))
p1 %>%
boot_predict(pred_data, R = 10000, condense = F) %>%
ggplot(aes(x=theta_diff_lag, y=estimate, ymin = estimate_conf.low, ymax = estimate_conf.high)) +
geom_line() +
geom_ribbon(alpha=0.1)
pred_data$pred = predict(p1, pred_data, type='response', se.fit = T)$fit
pred_data$se = predict(p1, pred_data, type='response', se.fit = T)$se.fit
pred_data$upper <- pred_data$pred + 1.96*pred_data$se
pred_data$lower <- pred_data$pred - 1.96*pred_data$se
ggplot(pred_data, aes(x=theta_lag, y=pred)) + geom_line() + geom_line(aes(x=theta_lag, y=upper), color="red") + geom_line(aes(x=theta_lag, y=lower), color="red")
# binary ivs ----
p4 <- glm(new_mil ~ downgrade_lag + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
p5 <- glm(new_mil ~ theta_shock3 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
p6 <- glm(new_mil ~ theta_shock5 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
p7 <- glm(new_mil ~ nonviol_repress_lag3 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
p8 <- glm(new_mil ~ nonviol_repress_lag3 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
msummary(list(p4,p5,p6,p7), 'markdown', coef_omit = 'gwno_loc', stars = T, title = 'Probit Models')
p3 <- bife(new_mil ~ theta_diff_lag + gdpgr_lag + gdppc_lag + durable + recent_reb | gwno_loc, data=cyears, model="logit")
p3bc <- bias_corr(p3, L=1L)
summary(p3bc)
summary(get_APEs(p3bc))
pred5 <- with(cyears, data.frame(theta_shock3=0:1,
gdpgr_lag=mean(gdpgr_lag, na.rm=T),
durable=mean(durable, na.rm=T),
recent_reb=0))
p5 %>%
augment(newdata = pred5, type.predict="response") %>%
mutate(upper = .fitted + 1.96 * .se.fit,
lower = .fitted - 1.96 * .se.fit) %>%
ggplot(aes(factor(theta_shock3), .fitted)) +
geom_point(size = 1.5) +
geom_errorbar(aes(ymin = lower, ymax = upper), width = .2)
p9 <- glm(thyne_attempt ~ theta_shock5 + gdpgr_lag + durable + recent_reb, data=cyears, family=binomial("logit"))
summary(p9)
pred9 <- with(cyears, data.frame(theta_shock5=0:1,
gdpgr_lag=mean(gdpgr_lag, na.rm=T),
durable=mean(durable, na.rm=T),
recent_reb=0))
p9 %>%
augment(newdata = pred9, type.predict = "response") %>%
mutate(upper = .fitted + 1.96 * .se.fit,
lower = .fitted - 1.96 * .se.fit) %>%
ggplot(aes(factor(theta_shock5), .fitted)) +
geom_point(size = 1.5) +
geom_errorbar(aes(ymin = lower, ymax = upper), width = .2)
library(finalfit)
pred5 <- with(cyears, data.frame(theta_shock3=0:1,
gdpgr_lag=mean(gdpgr_lag, na.rm=T),
durable=mean(durable, na.rm=T),
recent_reb=0))
p5 %>%
augment(newdata = pred5, type.predict="response") %>%
mutate(upper = .fitted + 1.96 * .se.fit,
lower = .fitted - 1.96 * .se.fit) %>%
ggplot(aes(factor(theta_shock3), .fitted)) +
geom_point(size = 1.5) +
geom_errorbar(aes(ymin = lower, ymax = upper), width = .2)
p5 %>%
boot_predict(pred5, R = 1000, condense = F) %>%
ggplot(aes(factor(theta_shock3), estimate)) +
geom_point(size = 1.5) +
geom_errorbar(aes(ymin = estimate_conf.low, ymax = estimate_conf.high), width = .2)
|
#Load Data.table library
library("data.table")
#set file to filename variable.
filename <- "./household_power_consumption.txt"
# Read data into variable "read" with read.table function.
read <- read.table(filename, sep = ";", na = "?", header = TRUE)
# convert to data.table and set to dtread format.
# subset to subdtread format using subset.data.frame function. Character class was used rather than converting to date
# to finish job with fewer lines of code.
subread <- subset.data.frame(read, ((read$Date == "1/2/2007")) | (read$Date == "2/2/2007") )
# Convert Date/Time columns to Date class.
subread$Date <- strptime(paste(subread$Date,subread$Time), "%d/%m/%Y %H:%M:%S")
# Set up 2 rows & 2 columns for the charts
par(mfrow = c(2, 2))
# Set all of the charts to be created with the next block of code.
with(subread, {
# Create plot 1
plot(subread$Date, subread$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
# Create plot 2
plot(subread$Date, subread$Voltage, type="l", xlab="datetime", ylab="Voltage")
# Create plot 3's base
plot(subread$Date, subread$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
# Add line 1 to plot 3
lines(subread$Date, subread$Sub_metering_1, col = "black")
# Add line 2 to plot 3
lines(subread$Date, subread$Sub_metering_2, col = "red")
# Add line 3 to plot 3
lines(subread$Date, subread$Sub_metering_3, col = "blue")
# Add a legend to plot 3
legend("topright",lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
# Add plot 4
plot(subread$Date, subread$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
# Write to png file.
dev.copy(png, file = "plot4.png")
# Deactivate png visual device
dev.off()
|
/plot4.R
|
no_license
|
brent-halen/ExData_Plotting1
|
R
| false | false | 1,911 |
r
|
#Load Data.table library
library("data.table")
#set file to filename variable.
filename <- "./household_power_consumption.txt"
# Read data into variable "read" with read.table function.
read <- read.table(filename, sep = ";", na = "?", header = TRUE)
# convert to data.table and set to dtread format.
# subset to subdtread format using subset.data.frame function. Character class was used rather than converting to date
# to finish job with fewer lines of code.
subread <- subset.data.frame(read, ((read$Date == "1/2/2007")) | (read$Date == "2/2/2007") )
# Convert Date/Time columns to Date class.
subread$Date <- strptime(paste(subread$Date,subread$Time), "%d/%m/%Y %H:%M:%S")
# Set up 2 rows & 2 columns for the charts
par(mfrow = c(2, 2))
# Set all of the charts to be created with the next block of code.
with(subread, {
# Create plot 1
plot(subread$Date, subread$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
# Create plot 2
plot(subread$Date, subread$Voltage, type="l", xlab="datetime", ylab="Voltage")
# Create plot 3's base
plot(subread$Date, subread$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
# Add line 1 to plot 3
lines(subread$Date, subread$Sub_metering_1, col = "black")
# Add line 2 to plot 3
lines(subread$Date, subread$Sub_metering_2, col = "red")
# Add line 3 to plot 3
lines(subread$Date, subread$Sub_metering_3, col = "blue")
# Add a legend to plot 3
legend("topright",lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
# Add plot 4
plot(subread$Date, subread$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
# Write to png file.
dev.copy(png, file = "plot4.png")
# Deactivate png visual device
dev.off()
|
# Q4: Across the United States, how have emissions from coal combustion-related sources changed from 1999-2008?
# load dplyr lib
library(dplyr)
# read data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Filter SCC table by "coal"
filteredSCC <- filter(SCC, grepl("coal", SCC$EI.Sector, ignore.case = TRUE))
# Convert SCC factor to list of strings
listOfFips <- as.character(filteredSCC$SCC)
# Filter NEI table by SCC being one of "coal" SCCs
filteredNEI <- filter(NEI, NEI$SCC %in% listOfFips)
# Create summary table of total emissions by year
by_year <- group_by(filteredNEI, year)
by_year_summary <- summarize_each(by_year, c("sum"), Emissions)
# set up png device
png(file = "project2_plot4.png", width = 480, height = 480, units="px", type="windows")
par(cex.axis=.75, cex.lab=.75, cex.main=.9)
# create linear regression line
by_year_model <- lm(Emissions ~ year, by_year_summary)
with(by_year_summary, {
# plot data
plot(Emissions ~ year, xlab = "Year", ylab = "Emissions from Coal (tons)", col = "steelblue", pch = 19, main = "Emissions From Coal in the US")
abline(by_year_model, lwd = 1, col = "steelblue")
})
# close png device
dev.off()
|
/project2_plot4.R
|
no_license
|
data-sci-1/ExData_Project2
|
R
| false | false | 1,206 |
r
|
# Q4: Across the United States, how have emissions from coal combustion-related sources changed from 1999-2008?
# load dplyr lib
library(dplyr)
# read data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Filter SCC table by "coal"
filteredSCC <- filter(SCC, grepl("coal", SCC$EI.Sector, ignore.case = TRUE))
# Convert SCC factor to list of strings
listOfFips <- as.character(filteredSCC$SCC)
# Filter NEI table by SCC being one of "coal" SCCs
filteredNEI <- filter(NEI, NEI$SCC %in% listOfFips)
# Create summary table of total emissions by year
by_year <- group_by(filteredNEI, year)
by_year_summary <- summarize_each(by_year, c("sum"), Emissions)
# set up png device
png(file = "project2_plot4.png", width = 480, height = 480, units="px", type="windows")
par(cex.axis=.75, cex.lab=.75, cex.main=.9)
# create linear regression line
by_year_model <- lm(Emissions ~ year, by_year_summary)
with(by_year_summary, {
# plot data
plot(Emissions ~ year, xlab = "Year", ylab = "Emissions from Coal (tons)", col = "steelblue", pch = 19, main = "Emissions From Coal in the US")
abline(by_year_model, lwd = 1, col = "steelblue")
})
# close png device
dev.off()
|
# Fractals
library(RColorBrewer)
recurs <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs(one,one+vec,h-1)
recurs(one+vec,one+vec+rot%*%vec,h-1)
recurs(one+vec+rot%*%vec,two-vec+rot%*%vec,h-1)
recurs(two-vec+rot%*%vec,two-vec,h-1)
recurs(two-vec,two,h-1)
}
}
start <- function(h) {
recurs(c(0,0),c(0,1),h)
recurs(c(0,1),c(1,1),h)
recurs(c(1,1),c(1,0),h)
recurs(c(1,0),c(0,0),h)
}
rot <- cbind(c(0,-1),c(1,0))
plot(c(0,1),c(0,1),type='n') # Increase size of Quartz window
start(4) # Adjust argument for different complexity
recurs2 <- function(one,two,h,color) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]),lwd=.5,col=colorRampPalette(brewer.pal(9,'GnBu'))(256)[color])
}
else {
vec <- (2*one+two)/3 - one
new <- 0*c(rnorm(1,0,sqrt(vec[1]^2+vec[2]^2)))
recurs2(one,one+vec,h-1,color)
recurs2(one+vec,one+vec+rot2%*%vec+new,h-1,color)
recurs2(one+vec+rot2%*%vec+new,one+2*vec,h-1,color)
recurs2(one+2*vec,two,h-1,color)
}
}
start2 <- function(h,add=F,color=1,scale=1) {
if(add==F) {
plot(c(-.5,1),c(-.2,1.2),type='n')
}
one <- c(0,0)
two <- c(0,1)
three <- rot2%*%c(0,-1)+c(0,1)
cent <- c(1/(2*sqrt(3)),1/2)
recurs2(cent+scale*(one-cent),cent+scale*(two-cent),h,color)
recurs2(cent+scale*(two-cent),cent+scale*(three-cent),h,color)
recurs2(cent+scale*(three-cent),cent+scale*(one-cent),h,color)
}
ang <- pi/3
rot2 <- cbind(c(cos(ang),sin(ang)),c(-sin(ang),cos(ang)))
start2(4,T) # Adjust argument for different complexity
for (i in 1:5) {
start2(i,ifelse(i==1,F,T),col=i)
}
start2(0,T)
for (i in 1:199) {
start2(2,ifelse(i==1,F,T),col=i,scale=2-i/100)
}
recurs12 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs21(one,one+vec,h-1)
recurs21(one+vec,one+vec+rot%*%vec,h-1)
recurs21(one+vec+rot%*%vec,two-vec+rot%*%vec,h-1)
recurs21(two-vec+rot%*%vec,two-vec,h-1)
recurs21(two-vec,two,h-1)
}
}
recurs21 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]),lwd=.5)
}
else {
vec <- (2*one+two)/3 - one
recurs12(one,one+vec,h-1)
recurs12(one+vec,one+vec+rot2%*%vec,h-1)
recurs12(one+vec+rot2%*%vec,one+2*vec,h-1)
recurs12(one+2*vec,two,h-1)
}
}
start12 <- function(h) {
plot(c(-.2,1.2),c(-.2,1.2),type='n')
recurs12(c(0,0),c(0,1),h)
recurs12(c(0,1),c(1,1),h)
recurs12(c(1,1),c(1,0),h)
recurs12(c(1,0),c(0,0),h)
}
start21 <- function(h) {
plot(c(-.5,1),c(-.2,1.2),type='n')
recurs21(c(0,0),c(0,1),h)
recurs21(c(0,1),rot2%*%c(0,-1)+c(0,1),h)
recurs21(rot2%*%c(0,-1)+c(0,1),c(0,0),h)
}
start12(7)
start21(4)
recurs3 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs(one,one+vec,h-1)
recurs(one+vec,one+vec+rot3%*%vec,h-1)
recurs(one+vec+rot3%*%vec,two-vec+rot3%*%vec,h-1)
recurs(two-vec+rot3%*%vec,two-vec,h-1)
recurs(two-vec,two,h-1)
}
}
start3 <- function(h) {
plot(c(-.5,1.5),c(-.5,1.5),type='n')
recurs3(c(0,0),c(0,1),h)
recurs3(c(0,1),c(1,1),h)
recurs3(c(1,1),c(1,0),h)
recurs3(c(1,0),c(0,0),h)
}
rot3 <- -rot
start3(3) # Adjust argument for different complexity
recurs123 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs231(one,one+vec,h-1)
recurs231(one+vec,one+vec+rot%*%vec,h-1)
recurs231(one+vec+rot%*%vec,two-vec+rot%*%vec,h-1)
recurs231(two-vec+rot%*%vec,two-vec,h-1)
recurs231(two-vec,two,h-1)
}
}
recurs231 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]),lwd=.5)
}
else {
vec <- (2*one+two)/3 - one
recurs312(one,one+vec,h-1)
recurs312(one+vec,one+vec+rot2%*%vec,h-1)
recurs312(one+vec+rot2%*%vec,one+2*vec,h-1)
recurs312(one+2*vec,two,h-1)
}
}
recurs312 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs123(one,one+vec,h-1)
recurs123(one+vec,one+vec+rot3%*%vec,h-1)
recurs123(one+vec+rot3%*%vec,two-vec+rot3%*%vec,h-1)
recurs123(two-vec+rot3%*%vec,two-vec,h-1)
recurs123(two-vec,two,h-1)
}
}
start123 <- function(h) {
plot(c(-.2,1.2),c(-.2,1.2),type='n')
recurs123(c(0,0),c(0,1),h)
recurs123(c(0,1),c(1,1),h)
recurs123(c(1,1),c(1,0),h)
recurs123(c(1,0),c(0,0),h)
}
start231 <- function(h) {
plot(c(-.5,1),c(-.2,1.2),type='n')
recurs231(c(0,0),c(0,1),h)
recurs231(c(0,1),rot2%*%c(0,-1)+c(0,1),h)
recurs231(rot2%*%c(0,-1)+c(0,1),c(0,0),h)
}
start312 <- function(h) {
plot(c(-.5,1.5),c(-.5,1.5),type='n')
recurs312(c(0,0),c(0,1),h)
recurs312(c(0,1),c(1,1),h)
recurs312(c(1,1),c(1,0),h)
recurs312(c(1,0),c(0,0),h)
}
start123(5)
start231(5)
start312(6)
f1 <- function (ord,one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
func <- fn[[1]]
print(func)
vec <- (3*one+two)/4 - one
print(h)
func(c(ord[-1],ord[1]),one,one+vec,h-1)
func(c(ord[-1],ord[1]),one+vec,one+vec+c(-vec[2],vec[1]),h-1)
func(c(ord[-1],ord[1]),one+vec+c(-vec[2],vec[1]),two-vec+c(-vec[2],vec[1]),h-1)
func(c(ord[-1],ord[1]),two-vec+c(-vec[2],vec[1]),two-vec,h-1)
func(c(ord[-1],ord[1]),two-vec,two,h-1)
}
}
# Could try matrices instead of different functions, then call same function
order <- c(1)
fn <- c(f1)
start <- function (ord,startx,starty,endx,endy,h) {
plot(c(-1,1),c(0,1),type='n')
for (i in 1:length(startx)) {
as.function(fn[[1]])(c(ord[-1],ord[1]),c(startx[i],starty[i]),c(endx[i],endy[i]),h)
}
}
start(1,0,0,0,1,1)
mode(as.function(fn[1]))
as.function(fn[[1]])
f1(1,c(0,0),c(0,1),2)
|
/Other/Geometry/Fract/Fractals.R
|
no_license
|
chacemcneil/Personal
|
R
| false | false | 5,668 |
r
|
# Fractals
library(RColorBrewer)
recurs <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs(one,one+vec,h-1)
recurs(one+vec,one+vec+rot%*%vec,h-1)
recurs(one+vec+rot%*%vec,two-vec+rot%*%vec,h-1)
recurs(two-vec+rot%*%vec,two-vec,h-1)
recurs(two-vec,two,h-1)
}
}
start <- function(h) {
recurs(c(0,0),c(0,1),h)
recurs(c(0,1),c(1,1),h)
recurs(c(1,1),c(1,0),h)
recurs(c(1,0),c(0,0),h)
}
rot <- cbind(c(0,-1),c(1,0))
plot(c(0,1),c(0,1),type='n') # Increase size of Quartz window
start(4) # Adjust argument for different complexity
recurs2 <- function(one,two,h,color) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]),lwd=.5,col=colorRampPalette(brewer.pal(9,'GnBu'))(256)[color])
}
else {
vec <- (2*one+two)/3 - one
new <- 0*c(rnorm(1,0,sqrt(vec[1]^2+vec[2]^2)))
recurs2(one,one+vec,h-1,color)
recurs2(one+vec,one+vec+rot2%*%vec+new,h-1,color)
recurs2(one+vec+rot2%*%vec+new,one+2*vec,h-1,color)
recurs2(one+2*vec,two,h-1,color)
}
}
start2 <- function(h,add=F,color=1,scale=1) {
if(add==F) {
plot(c(-.5,1),c(-.2,1.2),type='n')
}
one <- c(0,0)
two <- c(0,1)
three <- rot2%*%c(0,-1)+c(0,1)
cent <- c(1/(2*sqrt(3)),1/2)
recurs2(cent+scale*(one-cent),cent+scale*(two-cent),h,color)
recurs2(cent+scale*(two-cent),cent+scale*(three-cent),h,color)
recurs2(cent+scale*(three-cent),cent+scale*(one-cent),h,color)
}
ang <- pi/3
rot2 <- cbind(c(cos(ang),sin(ang)),c(-sin(ang),cos(ang)))
start2(4,T) # Adjust argument for different complexity
for (i in 1:5) {
start2(i,ifelse(i==1,F,T),col=i)
}
start2(0,T)
for (i in 1:199) {
start2(2,ifelse(i==1,F,T),col=i,scale=2-i/100)
}
recurs12 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs21(one,one+vec,h-1)
recurs21(one+vec,one+vec+rot%*%vec,h-1)
recurs21(one+vec+rot%*%vec,two-vec+rot%*%vec,h-1)
recurs21(two-vec+rot%*%vec,two-vec,h-1)
recurs21(two-vec,two,h-1)
}
}
recurs21 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]),lwd=.5)
}
else {
vec <- (2*one+two)/3 - one
recurs12(one,one+vec,h-1)
recurs12(one+vec,one+vec+rot2%*%vec,h-1)
recurs12(one+vec+rot2%*%vec,one+2*vec,h-1)
recurs12(one+2*vec,two,h-1)
}
}
start12 <- function(h) {
plot(c(-.2,1.2),c(-.2,1.2),type='n')
recurs12(c(0,0),c(0,1),h)
recurs12(c(0,1),c(1,1),h)
recurs12(c(1,1),c(1,0),h)
recurs12(c(1,0),c(0,0),h)
}
start21 <- function(h) {
plot(c(-.5,1),c(-.2,1.2),type='n')
recurs21(c(0,0),c(0,1),h)
recurs21(c(0,1),rot2%*%c(0,-1)+c(0,1),h)
recurs21(rot2%*%c(0,-1)+c(0,1),c(0,0),h)
}
start12(7)
start21(4)
recurs3 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs(one,one+vec,h-1)
recurs(one+vec,one+vec+rot3%*%vec,h-1)
recurs(one+vec+rot3%*%vec,two-vec+rot3%*%vec,h-1)
recurs(two-vec+rot3%*%vec,two-vec,h-1)
recurs(two-vec,two,h-1)
}
}
start3 <- function(h) {
plot(c(-.5,1.5),c(-.5,1.5),type='n')
recurs3(c(0,0),c(0,1),h)
recurs3(c(0,1),c(1,1),h)
recurs3(c(1,1),c(1,0),h)
recurs3(c(1,0),c(0,0),h)
}
rot3 <- -rot
start3(3) # Adjust argument for different complexity
recurs123 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs231(one,one+vec,h-1)
recurs231(one+vec,one+vec+rot%*%vec,h-1)
recurs231(one+vec+rot%*%vec,two-vec+rot%*%vec,h-1)
recurs231(two-vec+rot%*%vec,two-vec,h-1)
recurs231(two-vec,two,h-1)
}
}
recurs231 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]),lwd=.5)
}
else {
vec <- (2*one+two)/3 - one
recurs312(one,one+vec,h-1)
recurs312(one+vec,one+vec+rot2%*%vec,h-1)
recurs312(one+vec+rot2%*%vec,one+2*vec,h-1)
recurs312(one+2*vec,two,h-1)
}
}
recurs312 <- function(one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
vec <- (3*one+two)/4 - one
recurs123(one,one+vec,h-1)
recurs123(one+vec,one+vec+rot3%*%vec,h-1)
recurs123(one+vec+rot3%*%vec,two-vec+rot3%*%vec,h-1)
recurs123(two-vec+rot3%*%vec,two-vec,h-1)
recurs123(two-vec,two,h-1)
}
}
start123 <- function(h) {
plot(c(-.2,1.2),c(-.2,1.2),type='n')
recurs123(c(0,0),c(0,1),h)
recurs123(c(0,1),c(1,1),h)
recurs123(c(1,1),c(1,0),h)
recurs123(c(1,0),c(0,0),h)
}
start231 <- function(h) {
plot(c(-.5,1),c(-.2,1.2),type='n')
recurs231(c(0,0),c(0,1),h)
recurs231(c(0,1),rot2%*%c(0,-1)+c(0,1),h)
recurs231(rot2%*%c(0,-1)+c(0,1),c(0,0),h)
}
start312 <- function(h) {
plot(c(-.5,1.5),c(-.5,1.5),type='n')
recurs312(c(0,0),c(0,1),h)
recurs312(c(0,1),c(1,1),h)
recurs312(c(1,1),c(1,0),h)
recurs312(c(1,0),c(0,0),h)
}
start123(5)
start231(5)
start312(6)
f1 <- function (ord,one,two,h) {
if (h==0) {
lines(c(one[1],two[1]),c(one[2],two[2]))
}
else {
func <- fn[[1]]
print(func)
vec <- (3*one+two)/4 - one
print(h)
func(c(ord[-1],ord[1]),one,one+vec,h-1)
func(c(ord[-1],ord[1]),one+vec,one+vec+c(-vec[2],vec[1]),h-1)
func(c(ord[-1],ord[1]),one+vec+c(-vec[2],vec[1]),two-vec+c(-vec[2],vec[1]),h-1)
func(c(ord[-1],ord[1]),two-vec+c(-vec[2],vec[1]),two-vec,h-1)
func(c(ord[-1],ord[1]),two-vec,two,h-1)
}
}
# Could try matrices instead of different functions, then call same function
order <- c(1)
fn <- c(f1)
start <- function (ord,startx,starty,endx,endy,h) {
plot(c(-1,1),c(0,1),type='n')
for (i in 1:length(startx)) {
as.function(fn[[1]])(c(ord[-1],ord[1]),c(startx[i],starty[i]),c(endx[i],endy[i]),h)
}
}
start(1,0,0,0,1,1)
mode(as.function(fn[1]))
as.function(fn[[1]])
f1(1,c(0,0),c(0,1),2)
|
## code to prepare `vars.ejscreen.acs` dataset
# mytables <- c("B01001", "B03002", "B15002", 'B23025', "B25034", "C16002", "C17002")
# get.table.info(mytables)
# ID title
# 1 B01001 SEX BY AGE
# 2 B03002 HISPANIC OR LATINO ORIGIN BY RACE
# 3 B15002 SEX BY EDUCATIONAL ATTAINMENT FOR THE POPULATION 25 YEARS AND OVER
# 4 B23025 EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# 5 B25034 YEAR STRUCTURE BUILT
# 6 C16002 HOUSEHOLD LANGUAGE BY HOUSEHOLD LIMITED ENGLISH SPEAKING STATUS
# 7 C17002 RATIO OF INCOME TO POVERTY LEVEL IN THE PAST 12 MONTHS
# get.field.info(mytables[4])[,1:4] # get.field.info('b23025')[,1:4]
# B23025.004 Employed unemployed
# B23025.003 Civilian labor force unemployedbase
# essential ones are these for ejscreen 2.1:
vars.ejscreen.acs <- ejscreenformulas$acsfieldname[!is.na(ejscreenformulas$acsfieldname)]
# former, more extensive list, but not really needed:
# vars.ejscreen.acs <- c(
# "B01001.001", "B01001.003", "B01001.004", "B01001.005", "B01001.006",
# "B01001.020", "B01001.021", "B01001.022", "B01001.023", "B01001.024",
# "B01001.025", "B01001.027", "B01001.028", "B01001.029", "B01001.030",
# "B01001.044", "B01001.045", "B01001.046", "B01001.047", "B01001.048",
# "B01001.049",
#
# "B03002.001", "B03002.002", "B03002.003", "B03002.004",
# "B03002.005", "B03002.006", "B03002.007", "B03002.008", "B03002.009",
# "B03002.010", "B03002.011", "B03002.012", "B03002.013", "B03002.014",
# "B03002.015", "B03002.016", "B03002.017", "B03002.018", "B03002.019",
# "B03002.020", "B03002.021",
#
# "B15002.001", "B15002.003", "B15002.004",
# "B15002.005", "B15002.006", "B15002.007", "B15002.008", "B15002.009",
# "B15002.010", "B15002.020", "B15002.021", "B15002.022", "B15002.023",
# "B15002.024", "B15002.025", "B15002.026", "B15002.027",
#
# "B16001.001",
# "B16001.002", "B16001.003", "B16001.005", "B16001.006", "B16001.008",
# "B16001.009", "B16001.011", "B16001.012", "B16001.014", "B16001.015",
# "B16001.017", "B16001.018", "B16001.020", "B16001.021", "B16001.023",
# "B16001.024", "B16001.026", "B16001.027", "B16001.029", "B16001.030",
# "B16001.032", "B16001.033", "B16001.035", "B16001.036", "B16001.038",
# "B16001.039", "B16001.041", "B16001.042", "B16001.044", "B16001.045",
# "B16001.047", "B16001.048", "B16001.050", "B16001.051", "B16001.053",
# "B16001.054", "B16001.056", "B16001.057", "B16001.059", "B16001.060",
# "B16001.062", "B16001.063", "B16001.065", "B16001.066", "B16001.068",
# "B16001.069", "B16001.071", "B16001.072", "B16001.074", "B16001.075",
# "B16001.077", "B16001.078", "B16001.080", "B16001.081", "B16001.083",
# "B16001.084", "B16001.086", "B16001.087", "B16001.089", "B16001.090",
# "B16001.092", "B16001.093", "B16001.095", "B16001.096", "B16001.098",
# "B16001.099", "B16001.101", "B16001.102", "B16001.104", "B16001.105",
# "B16001.107", "B16001.108", "B16001.110", "B16001.111", "B16001.113",
# "B16001.114", "B16001.116", "B16001.117", "B16001.119",
#
# "C16002.001",
# "C16002.003", "C16002.004", "C16002.006", "C16002.007", "C16002.009",
# "C16002.010", "C16002.012", "C16002.013",
#
# "C17002.001", "C17002.002",
# "C17002.003", "C17002.004", "C17002.005", "C17002.006", "C17002.007",
# "C17002.008",
#
# "B25034.001", "B25034.002", "B25034.003", "B25034.004",
# "B25034.005", "B25034.006", "B25034.007", "B25034.008", "B25034.009",
# "B25034.010",
#
# "B23025.001", "B23025.002", "B23025.003", "B23025.004",
# "B23025.005", "B23025.006", "B23025.007"
# )
# get.field.info(mytables[4])[,1:4]
# metadata <- list(ejscreen_releasedate = 'October 2022', ejscreen_version = '2.1', ACS_version = '2016-2020', ACS_releasedate = '3/17/2022')
vars.ejscreen.acs <- add_metadata(vars.ejscreen.acs)
# attributes(geoformat2020) <- c(attributes(geoformat2020), metadata)
usethis::use_data(vars.ejscreen.acs, overwrite = TRUE)
|
/data-raw/vars-ejscreen-acs.R
|
no_license
|
ejanalysis/ejscreen
|
R
| false | false | 4,137 |
r
|
## code to prepare `vars.ejscreen.acs` dataset
# mytables <- c("B01001", "B03002", "B15002", 'B23025', "B25034", "C16002", "C17002")
# get.table.info(mytables)
# ID title
# 1 B01001 SEX BY AGE
# 2 B03002 HISPANIC OR LATINO ORIGIN BY RACE
# 3 B15002 SEX BY EDUCATIONAL ATTAINMENT FOR THE POPULATION 25 YEARS AND OVER
# 4 B23025 EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# 5 B25034 YEAR STRUCTURE BUILT
# 6 C16002 HOUSEHOLD LANGUAGE BY HOUSEHOLD LIMITED ENGLISH SPEAKING STATUS
# 7 C17002 RATIO OF INCOME TO POVERTY LEVEL IN THE PAST 12 MONTHS
# get.field.info(mytables[4])[,1:4] # get.field.info('b23025')[,1:4]
# B23025.004 Employed unemployed
# B23025.003 Civilian labor force unemployedbase
# essential ones are these for ejscreen 2.1:
vars.ejscreen.acs <- ejscreenformulas$acsfieldname[!is.na(ejscreenformulas$acsfieldname)]
# former, more extensive list, but not really needed:
# vars.ejscreen.acs <- c(
# "B01001.001", "B01001.003", "B01001.004", "B01001.005", "B01001.006",
# "B01001.020", "B01001.021", "B01001.022", "B01001.023", "B01001.024",
# "B01001.025", "B01001.027", "B01001.028", "B01001.029", "B01001.030",
# "B01001.044", "B01001.045", "B01001.046", "B01001.047", "B01001.048",
# "B01001.049",
#
# "B03002.001", "B03002.002", "B03002.003", "B03002.004",
# "B03002.005", "B03002.006", "B03002.007", "B03002.008", "B03002.009",
# "B03002.010", "B03002.011", "B03002.012", "B03002.013", "B03002.014",
# "B03002.015", "B03002.016", "B03002.017", "B03002.018", "B03002.019",
# "B03002.020", "B03002.021",
#
# "B15002.001", "B15002.003", "B15002.004",
# "B15002.005", "B15002.006", "B15002.007", "B15002.008", "B15002.009",
# "B15002.010", "B15002.020", "B15002.021", "B15002.022", "B15002.023",
# "B15002.024", "B15002.025", "B15002.026", "B15002.027",
#
# "B16001.001",
# "B16001.002", "B16001.003", "B16001.005", "B16001.006", "B16001.008",
# "B16001.009", "B16001.011", "B16001.012", "B16001.014", "B16001.015",
# "B16001.017", "B16001.018", "B16001.020", "B16001.021", "B16001.023",
# "B16001.024", "B16001.026", "B16001.027", "B16001.029", "B16001.030",
# "B16001.032", "B16001.033", "B16001.035", "B16001.036", "B16001.038",
# "B16001.039", "B16001.041", "B16001.042", "B16001.044", "B16001.045",
# "B16001.047", "B16001.048", "B16001.050", "B16001.051", "B16001.053",
# "B16001.054", "B16001.056", "B16001.057", "B16001.059", "B16001.060",
# "B16001.062", "B16001.063", "B16001.065", "B16001.066", "B16001.068",
# "B16001.069", "B16001.071", "B16001.072", "B16001.074", "B16001.075",
# "B16001.077", "B16001.078", "B16001.080", "B16001.081", "B16001.083",
# "B16001.084", "B16001.086", "B16001.087", "B16001.089", "B16001.090",
# "B16001.092", "B16001.093", "B16001.095", "B16001.096", "B16001.098",
# "B16001.099", "B16001.101", "B16001.102", "B16001.104", "B16001.105",
# "B16001.107", "B16001.108", "B16001.110", "B16001.111", "B16001.113",
# "B16001.114", "B16001.116", "B16001.117", "B16001.119",
#
# "C16002.001",
# "C16002.003", "C16002.004", "C16002.006", "C16002.007", "C16002.009",
# "C16002.010", "C16002.012", "C16002.013",
#
# "C17002.001", "C17002.002",
# "C17002.003", "C17002.004", "C17002.005", "C17002.006", "C17002.007",
# "C17002.008",
#
# "B25034.001", "B25034.002", "B25034.003", "B25034.004",
# "B25034.005", "B25034.006", "B25034.007", "B25034.008", "B25034.009",
# "B25034.010",
#
# "B23025.001", "B23025.002", "B23025.003", "B23025.004",
# "B23025.005", "B23025.006", "B23025.007"
# )
# get.field.info(mytables[4])[,1:4]
# metadata <- list(ejscreen_releasedate = 'October 2022', ejscreen_version = '2.1', ACS_version = '2016-2020', ACS_releasedate = '3/17/2022')
vars.ejscreen.acs <- add_metadata(vars.ejscreen.acs)
# attributes(geoformat2020) <- c(attributes(geoformat2020), metadata)
usethis::use_data(vars.ejscreen.acs, overwrite = TRUE)
|
#Set the current directory
setwd("C:/Coursera/R")
#Read the data from household_power_consumption.txt file
householdData <- read.csv("./household/household_power_consumption.txt", header=T, sep=';', na.strings="?")
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
# Subset of Data
householdSubset <- subset(householdData, Date %in% c("1/2/2007","2/2/2007"))
# change class of all columns to correct class
# Date <- as.Date(householdSubset$Date, format="%d/%m/%Y")
globalActivePower <- as.numeric(householdSubset$Global_active_power)
## plot histogram of global active power for those 2 days
png("./household/plot1.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency")
dev.off()
|
/plot1.R
|
no_license
|
pkssd/ExData_Plotting1
|
R
| false | false | 807 |
r
|
#Set the current directory
setwd("C:/Coursera/R")
#Read the data from household_power_consumption.txt file
householdData <- read.csv("./household/household_power_consumption.txt", header=T, sep=';', na.strings="?")
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
# Subset of Data
householdSubset <- subset(householdData, Date %in% c("1/2/2007","2/2/2007"))
# change class of all columns to correct class
# Date <- as.Date(householdSubset$Date, format="%d/%m/%Y")
globalActivePower <- as.numeric(householdSubset$Global_active_power)
## plot histogram of global active power for those 2 days
png("./household/plot1.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency")
dev.off()
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12457037567309e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615777995-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 348 |
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12457037567309e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
# Compare two strategies
library(fPortfolio)
infile=read.csv(".Close50.csv")
tail(infile,2)
dateID=as.POSIXlt(as.Date(infile[,1]), format='%Y-%m-%d')
myData0=infile[,-1]
rownames(myData0)=dateID
myData0=as.timeSeries(myData0)
tail(myData0)
ncol(myData0)
myData=returns(myData0)[,-1]*100
ID=as.character(read.table("file_twii50.csv",sep=",")$V2)
colnames(myData)=ID
head(myData)
dim(myData)
#####=== Portfolio 1. GMVP Portfolio:
SpecGMVP= portfolioSpec()
myPort.GMVP=minriskPortfolio(data = myData,spec = SpecGMVP, constraints = "LongOnly")
print(myPort.GMVP)
#####=== Portfolio 2. Tangency Portfolio: maximal Sharpe ratio
SpecTangency = portfolioSpec()
myPort.tangency=tangencyPortfolio(data = myData,spec = SpecTangency, constraints = "LongOnly")
print(myPort.tangency)
#### Plotting Results
#==== 1. Plotting by comparing Optimal Results of two+ Portfolios
col = divPalette(ncol(myData), "RdBu")
dev.new();par(mfrow=c(3,2))
weightsPie(myPort.GMVP, radius = 0.7, col = divPalette(ncol(myData), "RdBu"),cex=5)
mtext(text = "GMVP", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
weightsPie(myPort.tangency, radius = 0.7, col = divPalette(ncol(myData), "RdBu"))
mtext(text = "Tangency MV Portfolio", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
weightedReturnsPie(myPort.GMVP, radius = 0.7, col = divPalette(ncol(myData), "PRGn"))
mtext(text = "GMVP", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
weightedReturnsPie(myPort.tangency, radius = 0.7, col = divPalette(ncol(myData), "PRGn"))
mtext(text = "Tangency MV Portfolio", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
covRiskBudgetsPie(myPort.GMVP, radius = 0.7, col = divPalette(ncol(myData), "Spectral"))
mtext(text = "GMVP", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
covRiskBudgetsPie(myPort.tangency, radius = 0.7, col = divPalette(ncol(myData), "Spectral"))
mtext(text = "Tangency MV Portfolio", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
par(mfrow=c(1,1))
### Discussion: The limitation of visualiztion
|
/05-專題/2_Portfolio/03_mvCompare.R
|
no_license
|
LINS2000/R-Class
|
R
| false | false | 2,042 |
r
|
# Compare two strategies
library(fPortfolio)
infile=read.csv(".Close50.csv")
tail(infile,2)
dateID=as.POSIXlt(as.Date(infile[,1]), format='%Y-%m-%d')
myData0=infile[,-1]
rownames(myData0)=dateID
myData0=as.timeSeries(myData0)
tail(myData0)
ncol(myData0)
myData=returns(myData0)[,-1]*100
ID=as.character(read.table("file_twii50.csv",sep=",")$V2)
colnames(myData)=ID
head(myData)
dim(myData)
#####=== Portfolio 1. GMVP Portfolio:
SpecGMVP= portfolioSpec()
myPort.GMVP=minriskPortfolio(data = myData,spec = SpecGMVP, constraints = "LongOnly")
print(myPort.GMVP)
#####=== Portfolio 2. Tangency Portfolio: maximal Sharpe ratio
SpecTangency = portfolioSpec()
myPort.tangency=tangencyPortfolio(data = myData,spec = SpecTangency, constraints = "LongOnly")
print(myPort.tangency)
#### Plotting Results
#==== 1. Plotting by comparing Optimal Results of two+ Portfolios
col = divPalette(ncol(myData), "RdBu")
dev.new();par(mfrow=c(3,2))
weightsPie(myPort.GMVP, radius = 0.7, col = divPalette(ncol(myData), "RdBu"),cex=5)
mtext(text = "GMVP", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
weightsPie(myPort.tangency, radius = 0.7, col = divPalette(ncol(myData), "RdBu"))
mtext(text = "Tangency MV Portfolio", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
weightedReturnsPie(myPort.GMVP, radius = 0.7, col = divPalette(ncol(myData), "PRGn"))
mtext(text = "GMVP", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
weightedReturnsPie(myPort.tangency, radius = 0.7, col = divPalette(ncol(myData), "PRGn"))
mtext(text = "Tangency MV Portfolio", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
covRiskBudgetsPie(myPort.GMVP, radius = 0.7, col = divPalette(ncol(myData), "Spectral"))
mtext(text = "GMVP", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
covRiskBudgetsPie(myPort.tangency, radius = 0.7, col = divPalette(ncol(myData), "Spectral"))
mtext(text = "Tangency MV Portfolio", side = 3, line = 1.5,font = 2, cex = 0.7, adj = 0)
par(mfrow=c(1,1))
### Discussion: The limitation of visualiztion
|
#' Winsorize a numeric vector
#'
#' @param x A vector of values
#' @param cutpoints Cutpoints under and above which are defined outliers. Default is (median - five times interquartile range, median + five times interquartile range). Compared to bottom and top percentile, this takes into account the whole distribution of the vector.
#' @param probs A vector of probabilities that can be used instead of cutpoints. Quantiles are computed as the inverse of the empirical distribution function (type = 1)
#' @param replace Values by which outliers are replaced. Default to cutpoints. A frequent alternative is NA.
#' @param verbose Boolean. Should the percentage of replaced values printed?
#' @examples
#' v <- c(1:4, 99)
#' winsorize(v)
#' winsorize(v, replace = NA)
#' winsorize(v, probs = c(0.01, 0.99))
#' winsorize(v, cutpoints = c(1, 50))
#' @export
winsorize <- function(x, probs = NULL, cutpoints = NULL , replace = c(cutpoints[1], cutpoints[2]), verbose = TRUE){
dummy = is.integer(x)
if (!is.null(probs)){
stopifnot(is.null(cutpoints))
stopifnot(length(probs)==2)
cutpoints <- stats::quantile(x, probs, type = 1, na.rm = TRUE)
} else if (is.null(cutpoints)){
l <- stats::quantile(x, c(0.25, 0.50, 0.75), type = 1, na.rm = TRUE)
cutpoints <- c(l[2]-5*(l[3]-l[1]), l[2]+5*(l[3]-l[1]))
} else{
stopifnot(length(cutpoints)==2)
}
if (is.integer(x)) cutpoints <- round(cutpoints)
bottom <- x < cutpoints[1]
top <- x > cutpoints[2]
if (verbose){
length <- length(x)
message(paste(sprintf("%3.2f", 100*sum(bottom, na.rm = TRUE)/length),"% observations replaced at the bottom"))
message(paste(sprintf("%3.2f", 100*sum(top, na.rm = TRUE)/length),"% observations replaced at the top"))
}
x[bottom] <- replace[1]
x[top] <- replace[2]
if (dummy){
x <- as.integer(x)
}
x
}
#' @export
#' @rdname winsorize
winsorise <- winsorize
|
/R/winsorize.R
|
no_license
|
cran/statar
|
R
| false | false | 1,933 |
r
|
#' Winsorize a numeric vector
#'
#' @param x A vector of values
#' @param cutpoints Cutpoints under and above which are defined outliers. Default is (median - five times interquartile range, median + five times interquartile range). Compared to bottom and top percentile, this takes into account the whole distribution of the vector.
#' @param probs A vector of probabilities that can be used instead of cutpoints. Quantiles are computed as the inverse of the empirical distribution function (type = 1)
#' @param replace Values by which outliers are replaced. Default to cutpoints. A frequent alternative is NA.
#' @param verbose Boolean. Should the percentage of replaced values printed?
#' @examples
#' v <- c(1:4, 99)
#' winsorize(v)
#' winsorize(v, replace = NA)
#' winsorize(v, probs = c(0.01, 0.99))
#' winsorize(v, cutpoints = c(1, 50))
#' @export
winsorize <- function(x, probs = NULL, cutpoints = NULL , replace = c(cutpoints[1], cutpoints[2]), verbose = TRUE){
dummy = is.integer(x)
if (!is.null(probs)){
stopifnot(is.null(cutpoints))
stopifnot(length(probs)==2)
cutpoints <- stats::quantile(x, probs, type = 1, na.rm = TRUE)
} else if (is.null(cutpoints)){
l <- stats::quantile(x, c(0.25, 0.50, 0.75), type = 1, na.rm = TRUE)
cutpoints <- c(l[2]-5*(l[3]-l[1]), l[2]+5*(l[3]-l[1]))
} else{
stopifnot(length(cutpoints)==2)
}
if (is.integer(x)) cutpoints <- round(cutpoints)
bottom <- x < cutpoints[1]
top <- x > cutpoints[2]
if (verbose){
length <- length(x)
message(paste(sprintf("%3.2f", 100*sum(bottom, na.rm = TRUE)/length),"% observations replaced at the bottom"))
message(paste(sprintf("%3.2f", 100*sum(top, na.rm = TRUE)/length),"% observations replaced at the top"))
}
x[bottom] <- replace[1]
x[top] <- replace[2]
if (dummy){
x <- as.integer(x)
}
x
}
#' @export
#' @rdname winsorize
winsorise <- winsorize
|
setwd( "C:/Users/James.Thorson/Desktop/Project_git/2018_FSH556/Week 7 -- spatiotemporal models/Lab" )
#########################
# Spatial Gompertz model
# SEE: James T. Thorson, Hans Skaug, Kasper Kristensen, Andrew O. Shelton, Eric J. Ward, John Harms, Jim Benante. In press. The importance of spatial models for estimating the strength of density dependence. Ecology.
########################
# load libraries
library(INLA)
library(TMB)
library(RandomFields)
library(raster)
library(RANN)
source( "Sim_Gompertz_Fn.R" )
# Read data
set.seed( 2 )
Sim_List = Sim_Gompertz_Fn( n_years=10, n_stations=1000, SpatialScale=0.1, SD_O=0.4, SD_E=1, SD_extra=0, rho=0.5, logMeanDens=1, phi=-2, Loc=NULL )
DF = Sim_List[["DF"]]
loc_xy_orig = loc_xy = Sim_List[["Loc"]]
# Reduce sample sizes to 100 per year
Which2Keep = sample(1:nrow(DF), size=100*Sim_List$n_years, replace=FALSE)
Which2Drop = setdiff(1:nrow(DF),Which2Keep)
DF[Which2Drop,'Simulated_example'] = NA
# Reduce number of stations -- OPTIONAL
n_knots = 50
if( n_knots < nrow(loc_xy) ){
knots_xy = kmeans( x=loc_xy_orig, centers=n_knots )
# Modify data
loc_xy = knots_xy$centers
DF[,'Site'] = knots_xy$cluster[DF[,'Site']]
}
# Build SPDE object using INLA (must pass mesh$idx$loc when supplying Boundary)
mesh = inla.mesh.create( loc_xy, refine=TRUE, extend=-0.5 )
spde = inla.spde2.matern( mesh )
# Visualize mesh and predictive process
plot(mesh)
points( loc_xy_orig, cex=1.5, pch=20 )
points( loc_xy, cex=2, pch=3, col="green", lwd=5)
# Generate grid to visualize density
vizloc_xy = expand.grid( x=seq(0,1,by=0.001), y=seq(0,1,by=0.001) )
knots_xy = nn2( data=loc_xy_orig, query=vizloc_xy, k=1 )
# Plot densities
par( mfrow=c(2,5), mar=c(2,2,2,0), mgp=c(1.5,0.25,0) )
for( tI in 1:Sim_List$n_years ){
vizTheta_xy = array(Sim_List$Theta[ cbind(knots_xy$nn.idx,tI) ], dim=c(1001,1001) )
rasterTheta_xy = raster( vizTheta_xy )
plot( rasterTheta_xy, xlim=c(0,1), ylim=c(0,1), main=paste0("Year ",tI) )
}
###################
#
# Parameter estimation
#
###################
#####################
# Version 0 -- Sweep upstream to downstream through time "State-space parameterization"
#####################
Version = "spatial_gompertz_state_as_random"
# Compile
compile( paste0(Version,".cpp") )
dyn.load( dynlib(Version) )
# Build inputs
X_xp = matrix( 1, ncol=1, nrow=mesh$n)
Data = list( n_i=nrow(DF), n_x=mesh$n, n_t=max(DF$Year), n_p=ncol(X_xp), x_s=mesh$idx$loc-1, c_i=DF[,'Simulated_example'], s_i=DF[,'Site']-1, t_i=DF[,'Year']-1, X_xp=X_xp, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
Parameters = list(alpha=c(0.0), phi=0.0, log_tau_U=1.0, log_tau_O=1.0, log_kappa=0.0, rho=0.5, log_D_xt=matrix(rnorm(mesh$n*Data$n_t),nrow=mesh$n,ncol=Data$n_t), Omega_input=rnorm(mesh$n))
Random = c("log_D_xt","Omega_input")
# Make object
Obj = MakeADFun(data=Data, parameters=Parameters, random=Random, hessian=FALSE, DLL=Version)
# Run optimizer
Opt0 = TMBhelper::Optimize( obj=Obj, lower=c(rep(-Inf,5),-0.999), upper=c(rep(Inf,5),0.999), getsd=TRUE, newtonsteps=1 )
# Get standard errors
Report0 = Obj$report()
H0 = Obj$env$spHess()
##################
# Version 3 -- Joint analysis using TMB functions "Innovations parameterization"
##################
Version = "spatial_gompertz"
# Compile
compile( paste0(Version,".cpp") )
dyn.load( dynlib(Version) )
# Build inputs
X_xp = matrix( 1, ncol=1, nrow=mesh$n)
Data = list( n_i=nrow(DF), n_x=mesh$n, n_t=max(DF$Year), n_p=ncol(X_xp), x_s=mesh$idx$loc-1, c_i=DF[,'Simulated_example'], s_i=DF[,'Site']-1, t_i=DF[,'Year']-1, X_xp=X_xp, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
Parameters = list(alpha=c(0.0), phi=0.0, log_tau_E=1.0, log_tau_O=1.0, log_kappa=0.0, rho=0.5, Epsilon_input=matrix(rnorm(mesh$n*Data$n_t),nrow=mesh$n,ncol=Data$n_t), Omega_input=rnorm(mesh$n))
Random = c("Epsilon_input","Omega_input")
# Make object
Obj <- MakeADFun(data=Data, parameters=Parameters, random=Random, hessian=FALSE, DLL=Version)
# Run optimizer
Opt3 = TMBhelper::Optimize( obj=Obj, lower=c(rep(-Inf,5),-0.999), upper=c(rep(Inf,5),0.999), getsd=TRUE, newtonsteps=1 )
# Get standard errors
Report3 = Obj$report()
H3 = Obj$env$spHess()
######## Compare results
# Check parameter estimates
unlist(Report0[c('Range','SigmaO','SigmaU','rho','phi')])
unlist(Report3[c('Range','SigmaO','SigmaE','rho','phi')])
Sim_List[["Parameters"]][c('SpatialScale','SigmaO','SigmaE','rho','phi')]
# Compare sparseness
sum( H0!=0 ) / prod(dim(H0))
sum( H3!=0 ) / prod(dim(H0))
# Show inner hessian
image( H0, main="Version0: Sweep through time" );
dev.new(); image(H3, main="Version3: TMB functions")
# Run times
Opt0$run_time
Opt3$run_time
|
/Week 7 -- spatiotemporal models/Lab/spatial_gompertz_estimation_example.R
|
no_license
|
tXiao95/2018_FSH556
|
R
| false | false | 4,702 |
r
|
setwd( "C:/Users/James.Thorson/Desktop/Project_git/2018_FSH556/Week 7 -- spatiotemporal models/Lab" )
#########################
# Spatial Gompertz model
# SEE: James T. Thorson, Hans Skaug, Kasper Kristensen, Andrew O. Shelton, Eric J. Ward, John Harms, Jim Benante. In press. The importance of spatial models for estimating the strength of density dependence. Ecology.
########################
# load libraries
library(INLA)
library(TMB)
library(RandomFields)
library(raster)
library(RANN)
source( "Sim_Gompertz_Fn.R" )
# Read data
set.seed( 2 )
Sim_List = Sim_Gompertz_Fn( n_years=10, n_stations=1000, SpatialScale=0.1, SD_O=0.4, SD_E=1, SD_extra=0, rho=0.5, logMeanDens=1, phi=-2, Loc=NULL )
DF = Sim_List[["DF"]]
loc_xy_orig = loc_xy = Sim_List[["Loc"]]
# Reduce sample sizes to 100 per year
Which2Keep = sample(1:nrow(DF), size=100*Sim_List$n_years, replace=FALSE)
Which2Drop = setdiff(1:nrow(DF),Which2Keep)
DF[Which2Drop,'Simulated_example'] = NA
# Reduce number of stations -- OPTIONAL
n_knots = 50
if( n_knots < nrow(loc_xy) ){
knots_xy = kmeans( x=loc_xy_orig, centers=n_knots )
# Modify data
loc_xy = knots_xy$centers
DF[,'Site'] = knots_xy$cluster[DF[,'Site']]
}
# Build SPDE object using INLA (must pass mesh$idx$loc when supplying Boundary)
mesh = inla.mesh.create( loc_xy, refine=TRUE, extend=-0.5 )
spde = inla.spde2.matern( mesh )
# Visualize mesh and predictive process
plot(mesh)
points( loc_xy_orig, cex=1.5, pch=20 )
points( loc_xy, cex=2, pch=3, col="green", lwd=5)
# Generate grid to visualize density
vizloc_xy = expand.grid( x=seq(0,1,by=0.001), y=seq(0,1,by=0.001) )
knots_xy = nn2( data=loc_xy_orig, query=vizloc_xy, k=1 )
# Plot densities
par( mfrow=c(2,5), mar=c(2,2,2,0), mgp=c(1.5,0.25,0) )
for( tI in 1:Sim_List$n_years ){
vizTheta_xy = array(Sim_List$Theta[ cbind(knots_xy$nn.idx,tI) ], dim=c(1001,1001) )
rasterTheta_xy = raster( vizTheta_xy )
plot( rasterTheta_xy, xlim=c(0,1), ylim=c(0,1), main=paste0("Year ",tI) )
}
###################
#
# Parameter estimation
#
###################
#####################
# Version 0 -- Sweep upstream to downstream through time "State-space parameterization"
#####################
Version = "spatial_gompertz_state_as_random"
# Compile
compile( paste0(Version,".cpp") )
dyn.load( dynlib(Version) )
# Build inputs
X_xp = matrix( 1, ncol=1, nrow=mesh$n)
Data = list( n_i=nrow(DF), n_x=mesh$n, n_t=max(DF$Year), n_p=ncol(X_xp), x_s=mesh$idx$loc-1, c_i=DF[,'Simulated_example'], s_i=DF[,'Site']-1, t_i=DF[,'Year']-1, X_xp=X_xp, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
Parameters = list(alpha=c(0.0), phi=0.0, log_tau_U=1.0, log_tau_O=1.0, log_kappa=0.0, rho=0.5, log_D_xt=matrix(rnorm(mesh$n*Data$n_t),nrow=mesh$n,ncol=Data$n_t), Omega_input=rnorm(mesh$n))
Random = c("log_D_xt","Omega_input")
# Make object
Obj = MakeADFun(data=Data, parameters=Parameters, random=Random, hessian=FALSE, DLL=Version)
# Run optimizer
Opt0 = TMBhelper::Optimize( obj=Obj, lower=c(rep(-Inf,5),-0.999), upper=c(rep(Inf,5),0.999), getsd=TRUE, newtonsteps=1 )
# Get standard errors
Report0 = Obj$report()
H0 = Obj$env$spHess()
##################
# Version 3 -- Joint analysis using TMB functions "Innovations parameterization"
##################
Version = "spatial_gompertz"
# Compile
compile( paste0(Version,".cpp") )
dyn.load( dynlib(Version) )
# Build inputs
X_xp = matrix( 1, ncol=1, nrow=mesh$n)
Data = list( n_i=nrow(DF), n_x=mesh$n, n_t=max(DF$Year), n_p=ncol(X_xp), x_s=mesh$idx$loc-1, c_i=DF[,'Simulated_example'], s_i=DF[,'Site']-1, t_i=DF[,'Year']-1, X_xp=X_xp, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
Parameters = list(alpha=c(0.0), phi=0.0, log_tau_E=1.0, log_tau_O=1.0, log_kappa=0.0, rho=0.5, Epsilon_input=matrix(rnorm(mesh$n*Data$n_t),nrow=mesh$n,ncol=Data$n_t), Omega_input=rnorm(mesh$n))
Random = c("Epsilon_input","Omega_input")
# Make object
Obj <- MakeADFun(data=Data, parameters=Parameters, random=Random, hessian=FALSE, DLL=Version)
# Run optimizer
Opt3 = TMBhelper::Optimize( obj=Obj, lower=c(rep(-Inf,5),-0.999), upper=c(rep(Inf,5),0.999), getsd=TRUE, newtonsteps=1 )
# Get standard errors
Report3 = Obj$report()
H3 = Obj$env$spHess()
######## Compare results
# Check parameter estimates
unlist(Report0[c('Range','SigmaO','SigmaU','rho','phi')])
unlist(Report3[c('Range','SigmaO','SigmaE','rho','phi')])
Sim_List[["Parameters"]][c('SpatialScale','SigmaO','SigmaE','rho','phi')]
# Compare sparseness
sum( H0!=0 ) / prod(dim(H0))
sum( H3!=0 ) / prod(dim(H0))
# Show inner hessian
image( H0, main="Version0: Sweep through time" );
dev.new(); image(H3, main="Version3: TMB functions")
# Run times
Opt0$run_time
Opt3$run_time
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.r
\name{is_converted_data_frame}
\alias{is_converted_data_frame}
\title{Test data set}
\usage{
is_converted_data_frame(x)
}
\arguments{
\item{x}{the object to test}
}
\value{
A flag (logical scalar) indicating whether converted data
}
\description{
Tests whether x is converted data that can be input into
JAGS, WinBUGS or OpenBUGS.
}
\examples{
is_converted_data_frame(trees)
is_converted_data_frame(convert_data(trees))
}
\seealso{
\code{\link{is_convertible_data}} and
\code{\link{convert_data}}
}
|
/man/is_converted_data_frame.Rd
|
no_license
|
poissonconsulting/datalist
|
R
| false | true | 584 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.r
\name{is_converted_data_frame}
\alias{is_converted_data_frame}
\title{Test data set}
\usage{
is_converted_data_frame(x)
}
\arguments{
\item{x}{the object to test}
}
\value{
A flag (logical scalar) indicating whether converted data
}
\description{
Tests whether x is converted data that can be input into
JAGS, WinBUGS or OpenBUGS.
}
\examples{
is_converted_data_frame(trees)
is_converted_data_frame(convert_data(trees))
}
\seealso{
\code{\link{is_convertible_data}} and
\code{\link{convert_data}}
}
|
#' ---
#' title: Emodiversity Preliminary Report
#' author: Max
#' date: "`r Sys.Date()`"
#' output:
#' pdf_document
#' ---
#' `r knitr::opts_chunk$set(echo=FALSE,warning=FALSE,message=FALSE,cache=FALSE)`
library(ggplot2)
theme_thesis <- function(){
theme_bw() %+replace%
theme(
#line = element_line(colour="black"),
#text = element_text(colour="black"),
axis.title = element_text(size = 14),
axis.text = element_text(colour="black", size=10),
#strip.text = element_text(size=12),
# legend.key=element_rect(colour=NA, fill =NA),
panel.grid.major = element_line(colour = "grey90"),
panel.grid.minor = element_line(colour="grey90"),
# panel.border = element_rect(fill = NA, colour = "black", size=1),
panel.background = element_rect(fill = "white"),
strip.background=element_rect(fill="white")#,
#legend.title=element_blank()
# legend.position="none"
)
}
theme_set(theme_thesis())
library(data.table)
setDTthreads(4)
library(arrow)
setwd("/home/maxpe/Documents/diet/")
twnames <- c('anger', 'anticipation', 'disgust', 'fear', 'joy', 'love', 'optimism', 'pessimism', 'sadness', 'surprise', 'trust')
liwcnames <- c("anger_liwc", "anxiety_liwc", "sadness_liwc", "posemo_liwc", "negemo_liwc", "social_liwc")
bintwnames <- paste0(twnames,"_bin")
cs <- read_feather("cs.feather")
cs[,(bintwnames):=lapply(.SD,function(x) as.integer(x>0.9)),.SDcols=twnames]
#' final number of users in the groups
cs[,.(gender_script=unique(gender_script),group=unique(group)),userid][,.N,list(group,gender_script)][order(group,gender_script)]
#' number of tweets per user
ggplot(cs[,.(number_tweets=.N),userid],aes(x=number_tweets)) + geom_histogram(bins=150,fill="white",colour="black") + ylab("count_users")
#' The minimum number of tweets
min(cs[,.(number_tweets=.N),userid]$number_tweets)
#' The maximum number of tweets
max(cs[,.(number_tweets=.N),userid]$number_tweets)
liwcdt1 <- cs[,lapply(.SD,mean),list(group),.SDcols=liwcnames]
liwcdt2 <- cs[,lapply(.SD,mean),list(group,period),.SDcols=liwcnames]
liwcdt3 <- cs[,lapply(.SD,mean),list(group,gender_script),.SDcols=liwcnames]
liwcdt4 <- cs[,lapply(.SD,mean),list(group,gender_script,period),.SDcols=liwcnames]
twdt1 <- cs[,lapply(.SD,mean),list(group),.SDcols=bintwnames]
twdt2 <- cs[,lapply(.SD,mean),list(group,period),.SDcols=bintwnames]
twdt3 <- cs[,lapply(.SD,mean),list(group,gender_script),.SDcols=bintwnames]
twdt4 <- cs[,lapply(.SD,mean),list(group,gender_script,period),.SDcols=bintwnames]
# rm(cs)
# plotting with ggplot2, too much for my laptop
# ggplot(csmelt,aes(y=value)) + geom_boxplot() + facet_grid(~group+variable)
#
# ggplot(csmelt,aes(y=value)) + geom_boxplot() + facet_grid(group~variable+factor(period,levels=c("pre","post")))
#
# ggplot(csmelt,aes(y=value)) + geom_boxplot() + facet_grid(group~variable+gender_script+factor(period,levels=c("pre","post")))
#' Emotions for both groups LIWC (all means)
#+ fig.height=12, fig.width=12
# ggplot(t_wide[variable %in% liwcnames], aes(x = group, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(~variable,ncol = 2)
# ggplot(t_wide[variable %in% liwcnames], aes(x = group, y = `50%`)) + geom_point() + facet_wrap(~variable,ncol = 2)
#+ fig.height=12, fig.width=12
ggplot(melt(liwcdt1,id.vars = "group",measure.vars = liwcnames),aes(x=group,y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups LIWC during the two period
#+ fig.height=12, fig.width=12
ggplot(melt(liwcdt2,id.vars = c("group","period"),measure.vars = liwcnames),aes(x=factor(interaction(group,period),levels=c("ano.pre","ano.post","bp.pre","bp.post")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups LIWC per gender
#+ fig.height=12, fig.width=12
ggplot(melt(liwcdt3,id.vars = c("group","gender_script"),measure.vars = liwcnames),aes(x=factor(interaction(group,gender_script),levels=c("ano.female","ano.male","bp.female","bp.male","ano.unknown","bp.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups LIWC per gender per period
#+ fig.height=12, fig.width=12
ggplot(melt(liwcdt4,id.vars = c("group","gender_script","period"),measure.vars = liwcnames),aes(x=factor(interaction(group,period,gender_script),levels=c("ano.pre.female","ano.pre.male","ano.post.female","ano.post.male","ano.pre.unknown","ano.post.unknown","bp.pre.female","bp.pre.male","bp.post.female","bp.post.male","bp.pre.unknown","bp.post.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 1,scales = "free")
#' Emotions for both groups SemEval (all means, binarized with a threshold of 0.9)
#+ fig.height=12, fig.width=12
ggplot(melt(twdt1,id.vars = "group",measure.vars = bintwnames),aes(x=group,y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups SemEval during the two period
#+ fig.height=12, fig.width=12
ggplot(melt(twdt2,id.vars = c("group","period"),measure.vars = bintwnames),aes(x=factor(interaction(group,period),levels=c("ano.pre","ano.post","bp.pre","bp.post")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups SemEval per gender
#+ fig.height=12, fig.width=12
ggplot(melt(twdt3,id.vars = c("group","gender_script"),measure.vars = bintwnames),aes(x=factor(interaction(group,gender_script),levels=c("ano.female","ano.male","bp.female","bp.male","ano.unknown","bp.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups SemEval per gender per period
#+ fig.height=12, fig.width=12
ggplot(melt(twdt4,id.vars = c("group","gender_script","period"),measure.vars = bintwnames),aes(x=factor(interaction(group,period,gender_script),levels=c("ano.pre.female","ano.pre.male","ano.post.female","ano.post.male","ano.pre.unknown","ano.post.unknown","bp.pre.female","bp.pre.male","bp.post.female","bp.post.male","bp.pre.unknown","bp.post.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 1,scales = "free")
#' Comparison for overlapping categories of our two classifiers
#+ fig.height=12, fig.width=12
# cor(cs[,lapply(.SD,mean),userid,.SDcols=c("anger_liwc","anger_bin")][,.(anger_liwc,anger_bin)])
#
# cor(cs[,lapply(.SD,mean),userid,.SDcols=c("sadness_liwc","sadness_bin")][,.(sadness_liwc,sadness_bin)])
# one of them is enough, see below
# cor(cs[,.(posemo_bin_semeval=as.integer((joy_bin == 1 | love_bin == 1 | optimism_bin == 1 | trust_bin == 1 | anticipati_bin == 1)),userid,posemo_liwc)][,lapply(.SD,mean),userid,.SDcols=c("posemo_bin_semeval","posemo_liwc")][,.(posemo_bin_semeval,posemo_liwc)])
cs[,posemo_bin_semeval:=as.integer(joy_bin == 1 | love_bin == 1 | optimism_bin == 1 | trust_bin == 1 | anticipation_bin == 1)]
# cor(cs[,.(posemo_bin_semeval=as.integer(( joy_bin == 1 )),userid,posemo_liwc)][,lapply(.SD,mean),userid,.SDcols=c("posemo_bin_semeval","posemo_liwc")][,.(posemo_bin_semeval,posemo_liwc)])
oc <- cs[,lapply(.SD,mean),userid,.SDcols=c("sadness_liwc","sadness_bin","anger_liwc","anger_bin","posemo_bin_semeval","posemo_liwc")][,.(sadness_liwc,sadness_bin,anger_liwc,anger_bin,posemo_bin_semeval,posemo_liwc)]
lower<-round(cor(oc),2)
lower[lower.tri(round(cor(oc),2), diag=TRUE)]<-""
lower<-as.data.frame(lower)
# lower
library(xtable)
#' I created a posemo category for the semval classifier, out of all these categories:
#' joy_bin == 1 | love_bin == 1 | optimism_bin == 1 | trust_bin == 1 | anticipation_bin == 1
#+ cortable, results='asis'
print(xtable(lower), type="latex",comment = FALSE)
# x is a matrix containing the data
# method : correlation method. "pearson"" or "spearman"" is supported
# removeTriangle : remove upper or lower triangle
# results : if "html" or "latex"
# the results will be displayed in html or latex format
# corstars <-function(x, method=c("pearson", "spearman"), removeTriangle=c("upper", "lower"),
# result=c("none", "html", "latex")){
# #Compute correlation matrix
# require(Hmisc)
# x <- as.matrix(x)
# correlation_matrix<-rcorr(x, type=method[1])
# R <- correlation_matrix$r # Matrix of correlation coeficients
# p <- correlation_matrix$P # Matrix of p-value
#
# ## Define notions for significance levels; spacing is important.
# mystars <- ifelse(p < .0001, "****", ifelse(p < .001, "*** ", ifelse(p < .01, "** ", ifelse(p < .05, "* ", " "))))
#
# ## trunctuate the correlation matrix to two decimal
# R <- format(round(cbind(rep(-1.11, ncol(x)), R), 2))[,-1]
#
# ## build a new matrix that includes the correlations with their apropriate stars
# Rnew <- matrix(paste(R, mystars, sep=""), ncol=ncol(x))
# diag(Rnew) <- paste(diag(R), " ", sep="")
# rownames(Rnew) <- colnames(x)
# colnames(Rnew) <- paste(colnames(x), "", sep="")
#
# ## remove upper triangle of correlation matrix
# if(removeTriangle[1]=="upper"){
# Rnew <- as.matrix(Rnew)
# Rnew[upper.tri(Rnew, diag = TRUE)] <- ""
# Rnew <- as.data.frame(Rnew)
# }
#
# ## remove lower triangle of correlation matrix
# else if(removeTriangle[1]=="lower"){
# Rnew <- as.matrix(Rnew)
# Rnew[lower.tri(Rnew, diag = TRUE)] <- ""
# Rnew <- as.data.frame(Rnew)
# }
#
# ## remove last column and return the correlation matrix
# Rnew <- cbind(Rnew[1:length(Rnew)-1])
# if (result[1]=="none") return(Rnew)
# else{
# if(result[1]=="html") print(xtable(Rnew), type="html")
# else print(xtable(Rnew), type="latex")
# }
# }
#
# corstars(oc)
# Try at scattermatrix
# # Customize upper panel
# upper.panel<-function(x, y){
# points(x,y, pch=19)
# r <- round(cor(x, y), digits=2)
# txt <- paste0("R = ", r)
# usr <- par("usr"); on.exit(par(usr))
# par(usr = c(0, 1, 0, 1))
# text(0.5, 0.9, txt)
# }
# pairs(log(oc), lower.panel = NULL,
# upper.panel = upper.panel)
# pairs(log(oc), pch = 19, lower.panel = NULL)
# ggplot(melt(twdt4,id.vars = c("group","gender_script","period"),measure.vars = bintwnames),aes(x=factor(interaction(group,period,gender_script),levels=c("ano.pre.female","ano.pre.male","ano.post.female","ano.post.male","ano.pre.unknown","ano.post.unknown","bp.pre.female","bp.pre.male","bp.post.female","bp.post.male","bp.pre.unknown","bp.post.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 1,scales = "free")
knitr::knit_exit()
# OLDER PART WITH RAW SCORES
csmelt <- read_feather("csmelt.feather")
#' Emotions for both groups Tweeteval
#+ fig.height=12, fig.width=12
t <- csmelt[,.(value=quantile(value,c(0, .25, .5, .75, 1))),by=c("variable","group")]
t <- cbind(t,data.table(name=c("0%","25%","50%","75%","100%")))
t_wide <- dcast(t,group+variable~name,value.var = "value")
ggplot(t_wide[variable %in% twnames], aes(x = group, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(~variable,ncol = 2)
#' Adding before and after
#+ fig.height=12, fig.width=12
t_period <- csmelt[,.(value=quantile(value,c(0, .25, .5, .75, 1))),by=c("variable","group","period")]
t_period <- cbind(t_period,data.table(name=c("0%","25%","50%","75%","100%")))
t_period_wide <- dcast(t_period,group+variable+period~name,value.var = "value")
t_period_wide[,group.period:=factor(interaction(group,period),levels=c("ano.pre","ano.post","bp.pre","bp.post"))]
ggplot(t_period_wide[variable %in% twnames], aes(x = group.period, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(~variable,ncol = 2)
#' Adding gender
#+ fig.height=12, fig.width=12
t_period_gender <- csmelt[,.(value=quantile(value,c(0, .25, .5, .75, 1))),by=c("variable","group","period","gender_script")]
t_period_gender <- cbind(t_period_gender,data.table(name=c("0%","25%","50%","75%","100%")))
t_period_gender_wide <- dcast(t_period_gender,group+variable+period+gender_script~name,value.var = "value")
t_period_gender_wide[,group.period.gender:=factor(interaction(group,period,gender_script),levels=c("ano.pre.female","ano.pre.male","ano.post.female","ano.post.male","ano.pre.unknown","ano.post.unknown","bp.pre.female","bp.pre.male","bp.post.female","bp.post.male","bp.pre.unknown","bp.post.unknown"))]
t_period_gender_wide[,period.gender:=factor(interaction(period,gender_script),levels=c("pre.female","pre.male","post.female","post.male","pre.unknown","post.unknown"))]
ggplot(t_period_gender_wide[group=="ano" & variable %in% twnames], aes(x = period.gender, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(group~variable,ncol=2)
ggplot(t_period_gender_wide[group=="bp" & variable %in% twnames], aes(x = period.gender, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(group~variable,ncol=2)
|
/02_descriptive_stats.R
|
no_license
|
srsrsunrui/diet_covid19
|
R
| false | false | 13,048 |
r
|
#' ---
#' title: Emodiversity Preliminary Report
#' author: Max
#' date: "`r Sys.Date()`"
#' output:
#' pdf_document
#' ---
#' `r knitr::opts_chunk$set(echo=FALSE,warning=FALSE,message=FALSE,cache=FALSE)`
library(ggplot2)
theme_thesis <- function(){
theme_bw() %+replace%
theme(
#line = element_line(colour="black"),
#text = element_text(colour="black"),
axis.title = element_text(size = 14),
axis.text = element_text(colour="black", size=10),
#strip.text = element_text(size=12),
# legend.key=element_rect(colour=NA, fill =NA),
panel.grid.major = element_line(colour = "grey90"),
panel.grid.minor = element_line(colour="grey90"),
# panel.border = element_rect(fill = NA, colour = "black", size=1),
panel.background = element_rect(fill = "white"),
strip.background=element_rect(fill="white")#,
#legend.title=element_blank()
# legend.position="none"
)
}
theme_set(theme_thesis())
library(data.table)
setDTthreads(4)
library(arrow)
setwd("/home/maxpe/Documents/diet/")
twnames <- c('anger', 'anticipation', 'disgust', 'fear', 'joy', 'love', 'optimism', 'pessimism', 'sadness', 'surprise', 'trust')
liwcnames <- c("anger_liwc", "anxiety_liwc", "sadness_liwc", "posemo_liwc", "negemo_liwc", "social_liwc")
bintwnames <- paste0(twnames,"_bin")
cs <- read_feather("cs.feather")
cs[,(bintwnames):=lapply(.SD,function(x) as.integer(x>0.9)),.SDcols=twnames]
#' final number of users in the groups
cs[,.(gender_script=unique(gender_script),group=unique(group)),userid][,.N,list(group,gender_script)][order(group,gender_script)]
#' number of tweets per user
ggplot(cs[,.(number_tweets=.N),userid],aes(x=number_tweets)) + geom_histogram(bins=150,fill="white",colour="black") + ylab("count_users")
#' The minimum number of tweets
min(cs[,.(number_tweets=.N),userid]$number_tweets)
#' The maximum number of tweets
max(cs[,.(number_tweets=.N),userid]$number_tweets)
liwcdt1 <- cs[,lapply(.SD,mean),list(group),.SDcols=liwcnames]
liwcdt2 <- cs[,lapply(.SD,mean),list(group,period),.SDcols=liwcnames]
liwcdt3 <- cs[,lapply(.SD,mean),list(group,gender_script),.SDcols=liwcnames]
liwcdt4 <- cs[,lapply(.SD,mean),list(group,gender_script,period),.SDcols=liwcnames]
twdt1 <- cs[,lapply(.SD,mean),list(group),.SDcols=bintwnames]
twdt2 <- cs[,lapply(.SD,mean),list(group,period),.SDcols=bintwnames]
twdt3 <- cs[,lapply(.SD,mean),list(group,gender_script),.SDcols=bintwnames]
twdt4 <- cs[,lapply(.SD,mean),list(group,gender_script,period),.SDcols=bintwnames]
# rm(cs)
# plotting with ggplot2, too much for my laptop
# ggplot(csmelt,aes(y=value)) + geom_boxplot() + facet_grid(~group+variable)
#
# ggplot(csmelt,aes(y=value)) + geom_boxplot() + facet_grid(group~variable+factor(period,levels=c("pre","post")))
#
# ggplot(csmelt,aes(y=value)) + geom_boxplot() + facet_grid(group~variable+gender_script+factor(period,levels=c("pre","post")))
#' Emotions for both groups LIWC (all means)
#+ fig.height=12, fig.width=12
# ggplot(t_wide[variable %in% liwcnames], aes(x = group, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(~variable,ncol = 2)
# ggplot(t_wide[variable %in% liwcnames], aes(x = group, y = `50%`)) + geom_point() + facet_wrap(~variable,ncol = 2)
#+ fig.height=12, fig.width=12
ggplot(melt(liwcdt1,id.vars = "group",measure.vars = liwcnames),aes(x=group,y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups LIWC during the two period
#+ fig.height=12, fig.width=12
ggplot(melt(liwcdt2,id.vars = c("group","period"),measure.vars = liwcnames),aes(x=factor(interaction(group,period),levels=c("ano.pre","ano.post","bp.pre","bp.post")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups LIWC per gender
#+ fig.height=12, fig.width=12
ggplot(melt(liwcdt3,id.vars = c("group","gender_script"),measure.vars = liwcnames),aes(x=factor(interaction(group,gender_script),levels=c("ano.female","ano.male","bp.female","bp.male","ano.unknown","bp.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups LIWC per gender per period
#+ fig.height=12, fig.width=12
ggplot(melt(liwcdt4,id.vars = c("group","gender_script","period"),measure.vars = liwcnames),aes(x=factor(interaction(group,period,gender_script),levels=c("ano.pre.female","ano.pre.male","ano.post.female","ano.post.male","ano.pre.unknown","ano.post.unknown","bp.pre.female","bp.pre.male","bp.post.female","bp.post.male","bp.pre.unknown","bp.post.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 1,scales = "free")
#' Emotions for both groups SemEval (all means, binarized with a threshold of 0.9)
#+ fig.height=12, fig.width=12
ggplot(melt(twdt1,id.vars = "group",measure.vars = bintwnames),aes(x=group,y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups SemEval during the two period
#+ fig.height=12, fig.width=12
ggplot(melt(twdt2,id.vars = c("group","period"),measure.vars = bintwnames),aes(x=factor(interaction(group,period),levels=c("ano.pre","ano.post","bp.pre","bp.post")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups SemEval per gender
#+ fig.height=12, fig.width=12
ggplot(melt(twdt3,id.vars = c("group","gender_script"),measure.vars = bintwnames),aes(x=factor(interaction(group,gender_script),levels=c("ano.female","ano.male","bp.female","bp.male","ano.unknown","bp.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 2,scales = "free")
#' Emotions for both groups SemEval per gender per period
#+ fig.height=12, fig.width=12
ggplot(melt(twdt4,id.vars = c("group","gender_script","period"),measure.vars = bintwnames),aes(x=factor(interaction(group,period,gender_script),levels=c("ano.pre.female","ano.pre.male","ano.post.female","ano.post.male","ano.pre.unknown","ano.post.unknown","bp.pre.female","bp.pre.male","bp.post.female","bp.post.male","bp.pre.unknown","bp.post.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 1,scales = "free")
#' Comparison for overlapping categories of our two classifiers
#+ fig.height=12, fig.width=12
# cor(cs[,lapply(.SD,mean),userid,.SDcols=c("anger_liwc","anger_bin")][,.(anger_liwc,anger_bin)])
#
# cor(cs[,lapply(.SD,mean),userid,.SDcols=c("sadness_liwc","sadness_bin")][,.(sadness_liwc,sadness_bin)])
# one of them is enough, see below
# cor(cs[,.(posemo_bin_semeval=as.integer((joy_bin == 1 | love_bin == 1 | optimism_bin == 1 | trust_bin == 1 | anticipati_bin == 1)),userid,posemo_liwc)][,lapply(.SD,mean),userid,.SDcols=c("posemo_bin_semeval","posemo_liwc")][,.(posemo_bin_semeval,posemo_liwc)])
cs[,posemo_bin_semeval:=as.integer(joy_bin == 1 | love_bin == 1 | optimism_bin == 1 | trust_bin == 1 | anticipation_bin == 1)]
# cor(cs[,.(posemo_bin_semeval=as.integer(( joy_bin == 1 )),userid,posemo_liwc)][,lapply(.SD,mean),userid,.SDcols=c("posemo_bin_semeval","posemo_liwc")][,.(posemo_bin_semeval,posemo_liwc)])
oc <- cs[,lapply(.SD,mean),userid,.SDcols=c("sadness_liwc","sadness_bin","anger_liwc","anger_bin","posemo_bin_semeval","posemo_liwc")][,.(sadness_liwc,sadness_bin,anger_liwc,anger_bin,posemo_bin_semeval,posemo_liwc)]
lower<-round(cor(oc),2)
lower[lower.tri(round(cor(oc),2), diag=TRUE)]<-""
lower<-as.data.frame(lower)
# lower
library(xtable)
#' I created a posemo category for the semval classifier, out of all these categories:
#' joy_bin == 1 | love_bin == 1 | optimism_bin == 1 | trust_bin == 1 | anticipation_bin == 1
#+ cortable, results='asis'
print(xtable(lower), type="latex",comment = FALSE)
# x is a matrix containing the data
# method : correlation method. "pearson"" or "spearman"" is supported
# removeTriangle : remove upper or lower triangle
# results : if "html" or "latex"
# the results will be displayed in html or latex format
# corstars <-function(x, method=c("pearson", "spearman"), removeTriangle=c("upper", "lower"),
# result=c("none", "html", "latex")){
# #Compute correlation matrix
# require(Hmisc)
# x <- as.matrix(x)
# correlation_matrix<-rcorr(x, type=method[1])
# R <- correlation_matrix$r # Matrix of correlation coeficients
# p <- correlation_matrix$P # Matrix of p-value
#
# ## Define notions for significance levels; spacing is important.
# mystars <- ifelse(p < .0001, "****", ifelse(p < .001, "*** ", ifelse(p < .01, "** ", ifelse(p < .05, "* ", " "))))
#
# ## trunctuate the correlation matrix to two decimal
# R <- format(round(cbind(rep(-1.11, ncol(x)), R), 2))[,-1]
#
# ## build a new matrix that includes the correlations with their apropriate stars
# Rnew <- matrix(paste(R, mystars, sep=""), ncol=ncol(x))
# diag(Rnew) <- paste(diag(R), " ", sep="")
# rownames(Rnew) <- colnames(x)
# colnames(Rnew) <- paste(colnames(x), "", sep="")
#
# ## remove upper triangle of correlation matrix
# if(removeTriangle[1]=="upper"){
# Rnew <- as.matrix(Rnew)
# Rnew[upper.tri(Rnew, diag = TRUE)] <- ""
# Rnew <- as.data.frame(Rnew)
# }
#
# ## remove lower triangle of correlation matrix
# else if(removeTriangle[1]=="lower"){
# Rnew <- as.matrix(Rnew)
# Rnew[lower.tri(Rnew, diag = TRUE)] <- ""
# Rnew <- as.data.frame(Rnew)
# }
#
# ## remove last column and return the correlation matrix
# Rnew <- cbind(Rnew[1:length(Rnew)-1])
# if (result[1]=="none") return(Rnew)
# else{
# if(result[1]=="html") print(xtable(Rnew), type="html")
# else print(xtable(Rnew), type="latex")
# }
# }
#
# corstars(oc)
# Try at scattermatrix
# # Customize upper panel
# upper.panel<-function(x, y){
# points(x,y, pch=19)
# r <- round(cor(x, y), digits=2)
# txt <- paste0("R = ", r)
# usr <- par("usr"); on.exit(par(usr))
# par(usr = c(0, 1, 0, 1))
# text(0.5, 0.9, txt)
# }
# pairs(log(oc), lower.panel = NULL,
# upper.panel = upper.panel)
# pairs(log(oc), pch = 19, lower.panel = NULL)
# ggplot(melt(twdt4,id.vars = c("group","gender_script","period"),measure.vars = bintwnames),aes(x=factor(interaction(group,period,gender_script),levels=c("ano.pre.female","ano.pre.male","ano.post.female","ano.post.male","ano.pre.unknown","ano.post.unknown","bp.pre.female","bp.pre.male","bp.post.female","bp.post.male","bp.pre.unknown","bp.post.unknown")),y=value)) + geom_col() + facet_wrap(~variable,ncol = 1,scales = "free")
knitr::knit_exit()
# OLDER PART WITH RAW SCORES
csmelt <- read_feather("csmelt.feather")
#' Emotions for both groups Tweeteval
#+ fig.height=12, fig.width=12
t <- csmelt[,.(value=quantile(value,c(0, .25, .5, .75, 1))),by=c("variable","group")]
t <- cbind(t,data.table(name=c("0%","25%","50%","75%","100%")))
t_wide <- dcast(t,group+variable~name,value.var = "value")
ggplot(t_wide[variable %in% twnames], aes(x = group, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(~variable,ncol = 2)
#' Adding before and after
#+ fig.height=12, fig.width=12
t_period <- csmelt[,.(value=quantile(value,c(0, .25, .5, .75, 1))),by=c("variable","group","period")]
t_period <- cbind(t_period,data.table(name=c("0%","25%","50%","75%","100%")))
t_period_wide <- dcast(t_period,group+variable+period~name,value.var = "value")
t_period_wide[,group.period:=factor(interaction(group,period),levels=c("ano.pre","ano.post","bp.pre","bp.post"))]
ggplot(t_period_wide[variable %in% twnames], aes(x = group.period, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(~variable,ncol = 2)
#' Adding gender
#+ fig.height=12, fig.width=12
t_period_gender <- csmelt[,.(value=quantile(value,c(0, .25, .5, .75, 1))),by=c("variable","group","period","gender_script")]
t_period_gender <- cbind(t_period_gender,data.table(name=c("0%","25%","50%","75%","100%")))
t_period_gender_wide <- dcast(t_period_gender,group+variable+period+gender_script~name,value.var = "value")
t_period_gender_wide[,group.period.gender:=factor(interaction(group,period,gender_script),levels=c("ano.pre.female","ano.pre.male","ano.post.female","ano.post.male","ano.pre.unknown","ano.post.unknown","bp.pre.female","bp.pre.male","bp.post.female","bp.post.male","bp.pre.unknown","bp.post.unknown"))]
t_period_gender_wide[,period.gender:=factor(interaction(period,gender_script),levels=c("pre.female","pre.male","post.female","post.male","pre.unknown","post.unknown"))]
ggplot(t_period_gender_wide[group=="ano" & variable %in% twnames], aes(x = period.gender, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(group~variable,ncol=2)
ggplot(t_period_gender_wide[group=="bp" & variable %in% twnames], aes(x = period.gender, ymin = `0%`, lower = `25%`, middle = `50%`, upper = `75%`, ymax = `100%`)) + geom_boxplot(stat = "identity") + facet_wrap(group~variable,ncol=2)
|
# url <- "https://row.githubusercontent.com/rafalab/dslabs/master/inst/extdata/murders.csv"
# dest_file<- "data/murders.csv"
# download.file(url, destfile = dest_file)
library(dslabs)
library(tidyverse)
murders<- murders
murders <- murders %>% mutate(region= factor(region), rate= population * 10^5)
save (murders, file="rda/murders.rda")
murders %>% mutate(abb= reorder(abb, rate))%>%
ggplot(aes(abb, rate))+
geom_bar(width=0.5, stat="identity", color="black")+
coord_flip()
ggsave("figs/barplot.png")
########## In RStudio ########
# pick existing directory as new project
getwd() # to confirm current working directory
save() # save into .rda file, .RData is also fine but less preferred
ggsave("figs/barplot.png") # save a plot generated by ggplot2 to a dir called "figs"
|
/download-data.R
|
no_license
|
catareyesvillegas/murders
|
R
| false | false | 825 |
r
|
# url <- "https://row.githubusercontent.com/rafalab/dslabs/master/inst/extdata/murders.csv"
# dest_file<- "data/murders.csv"
# download.file(url, destfile = dest_file)
library(dslabs)
library(tidyverse)
murders<- murders
murders <- murders %>% mutate(region= factor(region), rate= population * 10^5)
save (murders, file="rda/murders.rda")
murders %>% mutate(abb= reorder(abb, rate))%>%
ggplot(aes(abb, rate))+
geom_bar(width=0.5, stat="identity", color="black")+
coord_flip()
ggsave("figs/barplot.png")
########## In RStudio ########
# pick existing directory as new project
getwd() # to confirm current working directory
save() # save into .rda file, .RData is also fine but less preferred
ggsave("figs/barplot.png") # save a plot generated by ggplot2 to a dir called "figs"
|
# Most of this code is from a great tutorial from Bradley Boehmke
# (https://rpubs.com/bradleyboehmke/weather_graphic)
# but with minor style modifications and using a different data set
# Preprocessing & summarizing data
library(dplyr)
library(tidyr)
# Visualization package
library(ggplot2)
# Cities and airport codes for reference
# Hartford, KHFD
# New Haven, KHVN
# Stamford, KHPN
# New London, KGON
# Pull in the weather data that you scraped from Weather Underground
airp <- read.csv("KHVN.csv", stringsAsFactors=FALSE)
# Restructuring the data so it plays nice with ggplot2
airp2 <- gather(airp, "type", "temp", 4:12)
# Label the town for the chart title
town <- "New Haven"
# function to turn y-axis labels into degree formatted values
dgr_fmt <- function(x, ...) {
parse(text = paste(x, "*degree", sep = ""))
}
a <- dgr_fmt(seq(-20,100, by=10))
# Bringing in a package that will allow use of Google fonts
library(extrafont)
p <- ggplot(airp, aes(row, average_min_temp)) +
theme(plot.background = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
geom_linerange(airp, mapping=aes(x=row, ymin=record_min_temp, ymax=record_max_temp), colour = "sienna", alpha=.5)
p <- p +
geom_linerange(airp, mapping=aes(x=row, ymin=average_min_temp, ymax=average_max_temp), colour = "sienna1", alpha=.8)
p <- p +
geom_linerange(airp, mapping=aes(x=row, ymin=actual_min_temp, ymax=actual_max_temp), colour = "sienna4") +
geom_vline(xintercept = 0, colour = "sienna4", linetype=1, size=1)
# The colors used in the chart layers above can be replaced with any from
# http://sape.inf.usi.ch/quick-reference/ggplot2/colour
# Make the grid look pretty
p <- p +
geom_hline(yintercept = -20, colour = "white", linetype=1) +
geom_hline(yintercept = -10, colour = "white", linetype=1) +
geom_hline(yintercept = 0, colour = "white", linetype=1) +
geom_hline(yintercept = 10, colour = "white", linetype=1) +
geom_hline(yintercept = 20, colour = "white", linetype=1) +
geom_hline(yintercept = 30, colour = "white", linetype=1) +
geom_hline(yintercept = 40, colour = "white", linetype=1) +
geom_hline(yintercept = 50, colour = "white", linetype=1) +
geom_hline(yintercept = 60, colour = "white", linetype=1) +
geom_hline(yintercept = 70, colour = "white", linetype=1) +
geom_hline(yintercept = 80, colour = "white", linetype=1) +
geom_hline(yintercept = 90, colour = "white", linetype=1) +
geom_hline(yintercept = 100, colour = "white", linetype=1)
# Identifying the months based on number of days
p <- p +
#June - 30
geom_vline(xintercept = 30, colour = "wheat4", linetype=3, size=.5) +
#July - 31
geom_vline(xintercept = 61, colour = "wheat4", linetype=3, size=.5) +
# August - 31
geom_vline(xintercept = 92, colour = "wheat4", linetype=3, size=.5) +
# September - 30
geom_vline(xintercept = 122, colour = "wheat4", linetype=3, size=.5) +
# October 31
geom_vline(xintercept = 153, colour = "wheat4", linetype=3, size=.5) +
# November - 30
geom_vline(xintercept = 183, colour = "wheat4", linetype=3, size=.5) +
# December - 31
geom_vline(xintercept = 214, colour = "wheat4", linetype=3, size=.5) +
# January - 31
geom_vline(xintercept = 245, colour = "wheat4", linetype=3, size=.5) +
# February - 28
geom_vline(xintercept = 273, colour = "wheat4", linetype=3, size=.5) +
# March - 31
geom_vline(xintercept = 304, colour = "wheat4", linetype=3, size=.5) +
# April - 30
geom_vline(xintercept = 334, colour = "wheat4", linetype=3, size=.5) +
# May - 31
geom_vline(xintercept = 365, colour = "wheat4", linetype=3, size=.5) +
# June - 30
geom_vline(xintercept = 396, colour = "wheat4", linetype=3, size=.5) +
# July - 31
geom_vline(xintercept = 427, colour = "wheat4", linetype=3, size=.5)
# August - 31 (19 so far)
# Establishing the x axis
p <- p +
coord_cartesian(ylim = c(-20,100)) +
scale_y_continuous(breaks = seq(-20,100, by=10), labels = a) +
scale_x_continuous(expand = c(0, 0),
breaks = c(15,45,75,105,135,165,195,228,258,288,320,350, 380, 410, 440),
labels = c("JUN", "JUL", "AUG", "SEP", "OCT",
"NOV", "DEC", "JAN", "FEB", "MAR",
"APR", "MAY", "JUN", "JUL", "AUG"))
# Identifying the record-breaking days by comparing actual vs record
rlow3 <- airp[airp$actual_min_temp<=airp$record_min_temp,]
rhigh3 <- airp[airp$actual_max_temp>=airp$record_max_temp,]
# Adding them to the chart with specific colors
p <- p +
geom_point(data=rlow3, aes(x=row, y=record_min_temp), colour="blue1") +
geom_point(data=rhigh3, aes(x=row, y=record_max_temp), colour="red1")
# Adding a title based on the variable set above
title <- paste0(town, "'s weather since summer 2014")
# Setting the title
p <- p +
ggtitle(title) +
theme(plot.title=element_text(face="bold",hjust=.012,vjust=.8,colour="#3C3C3C",size=20, family="Lato")) +
annotate("text", x = 28, y = 98, label = "Temperature", size=4, fontface="bold", family="Lato Black")
# Now for the legend
p <- p +
annotate("segment", x = 65, xend = 65, y = -6, yend = 16, colour = "sienna", , alpha=.5, size=3) +
annotate("segment", x = 65, xend = 65, y = 0, yend = 10, colour = "sienna1", , alpha=.8, size=3) +
annotate("segment", x = 65, xend = 65, y = 2, yend = 8, colour = "sienna4", size=3) +
annotate("segment", x = 58, xend = 62, y = 10, yend = 10, colour = "gray30", size=.5) +
annotate("segment", x = 58, xend = 62, y = 0, yend = 0, colour = "gray30", size=.5) +
annotate("segment", x = 60, xend = 60, y = 10, yend = 0, colour = "gray30", size=.5) +
annotate("text", x = 32, y = 5, label = "AVERAGE RANGE", size=3, colour="gray30") +
annotate("segment", x = 68, xend = 72, y = 8, yend = 8, colour = "gray30", size=.5) +
annotate("segment", x = 68, xend = 72, y = 2, yend = 2, colour = "gray30", size=.5) +
annotate("segment", x = 70, xend = 70, y = 8, yend = 2, colour = "gray30", size=.5) +
annotate("text", x = 104, y = 5, label = "2014 - 2015 RANGE", size=3.5, colour="gray30") +
annotate("text", x = 42, y = 13, label = "RECORD HIGH", size=3, colour="gray30") +
annotate("text", x = 43, y = -3, label = "RECORD LOW", size=3, colour="gray30") +
annotate("segment", x = 67, xend = 76, y = 17, yend = 17, colour = "gray30", size=.5) +
annotate("segment", x = 67, xend = 76, y = -7, yend = -7, colour = "gray30", size=.5) +
annotate("point", x = 65, y = 17, colour = "red", size = 2) +
annotate("point", x = 65, y = -7, colour = "blue", size = 2) +
annotate("text", x = 106, y = 17, label = "NEW RECORD HIGH", size=3, colour="gray30") +
annotate("text", x = 106, y = -7, label = "NEW RECORD LOW", size=3, colour="gray30") +
annotate("text", x = 390, y = -15, label = "Source: Weather Underground", size=4, fontface="italic", colour="gray30")
print(p)
town <- gsub(" ", "", town)
# Exporting the charts
filenamesvg <- paste0(town, ".svg")
filenamepng <- paste0(town, ".png")
ggsave(file=filenamesvg, plot=p, width=10, height=6)
ggsave(file=filenamepng, plot=p, width=10, height=5)
|
/weather_charter.R
|
no_license
|
trendct/weather
|
R
| false | false | 7,329 |
r
|
# Most of this code is from a great tutorial from Bradley Boehmke
# (https://rpubs.com/bradleyboehmke/weather_graphic)
# but with minor style modifications and using a different data set
# Preprocessing & summarizing data
library(dplyr)
library(tidyr)
# Visualization package
library(ggplot2)
# Cities and airport codes for reference
# Hartford, KHFD
# New Haven, KHVN
# Stamford, KHPN
# New London, KGON
# Pull in the weather data that you scraped from Weather Underground
airp <- read.csv("KHVN.csv", stringsAsFactors=FALSE)
# Restructuring the data so it plays nice with ggplot2
airp2 <- gather(airp, "type", "temp", 4:12)
# Label the town for the chart title
town <- "New Haven"
# function to turn y-axis labels into degree formatted values
dgr_fmt <- function(x, ...) {
parse(text = paste(x, "*degree", sep = ""))
}
a <- dgr_fmt(seq(-20,100, by=10))
# Bringing in a package that will allow use of Google fonts
library(extrafont)
p <- ggplot(airp, aes(row, average_min_temp)) +
theme(plot.background = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
geom_linerange(airp, mapping=aes(x=row, ymin=record_min_temp, ymax=record_max_temp), colour = "sienna", alpha=.5)
p <- p +
geom_linerange(airp, mapping=aes(x=row, ymin=average_min_temp, ymax=average_max_temp), colour = "sienna1", alpha=.8)
p <- p +
geom_linerange(airp, mapping=aes(x=row, ymin=actual_min_temp, ymax=actual_max_temp), colour = "sienna4") +
geom_vline(xintercept = 0, colour = "sienna4", linetype=1, size=1)
# The colors used in the chart layers above can be replaced with any from
# http://sape.inf.usi.ch/quick-reference/ggplot2/colour
# Make the grid look pretty
p <- p +
geom_hline(yintercept = -20, colour = "white", linetype=1) +
geom_hline(yintercept = -10, colour = "white", linetype=1) +
geom_hline(yintercept = 0, colour = "white", linetype=1) +
geom_hline(yintercept = 10, colour = "white", linetype=1) +
geom_hline(yintercept = 20, colour = "white", linetype=1) +
geom_hline(yintercept = 30, colour = "white", linetype=1) +
geom_hline(yintercept = 40, colour = "white", linetype=1) +
geom_hline(yintercept = 50, colour = "white", linetype=1) +
geom_hline(yintercept = 60, colour = "white", linetype=1) +
geom_hline(yintercept = 70, colour = "white", linetype=1) +
geom_hline(yintercept = 80, colour = "white", linetype=1) +
geom_hline(yintercept = 90, colour = "white", linetype=1) +
geom_hline(yintercept = 100, colour = "white", linetype=1)
# Identifying the months based on number of days
p <- p +
#June - 30
geom_vline(xintercept = 30, colour = "wheat4", linetype=3, size=.5) +
#July - 31
geom_vline(xintercept = 61, colour = "wheat4", linetype=3, size=.5) +
# August - 31
geom_vline(xintercept = 92, colour = "wheat4", linetype=3, size=.5) +
# September - 30
geom_vline(xintercept = 122, colour = "wheat4", linetype=3, size=.5) +
# October 31
geom_vline(xintercept = 153, colour = "wheat4", linetype=3, size=.5) +
# November - 30
geom_vline(xintercept = 183, colour = "wheat4", linetype=3, size=.5) +
# December - 31
geom_vline(xintercept = 214, colour = "wheat4", linetype=3, size=.5) +
# January - 31
geom_vline(xintercept = 245, colour = "wheat4", linetype=3, size=.5) +
# February - 28
geom_vline(xintercept = 273, colour = "wheat4", linetype=3, size=.5) +
# March - 31
geom_vline(xintercept = 304, colour = "wheat4", linetype=3, size=.5) +
# April - 30
geom_vline(xintercept = 334, colour = "wheat4", linetype=3, size=.5) +
# May - 31
geom_vline(xintercept = 365, colour = "wheat4", linetype=3, size=.5) +
# June - 30
geom_vline(xintercept = 396, colour = "wheat4", linetype=3, size=.5) +
# July - 31
geom_vline(xintercept = 427, colour = "wheat4", linetype=3, size=.5)
# August - 31 (19 so far)
# Establishing the x axis
p <- p +
coord_cartesian(ylim = c(-20,100)) +
scale_y_continuous(breaks = seq(-20,100, by=10), labels = a) +
scale_x_continuous(expand = c(0, 0),
breaks = c(15,45,75,105,135,165,195,228,258,288,320,350, 380, 410, 440),
labels = c("JUN", "JUL", "AUG", "SEP", "OCT",
"NOV", "DEC", "JAN", "FEB", "MAR",
"APR", "MAY", "JUN", "JUL", "AUG"))
# Identifying the record-breaking days by comparing actual vs record
rlow3 <- airp[airp$actual_min_temp<=airp$record_min_temp,]
rhigh3 <- airp[airp$actual_max_temp>=airp$record_max_temp,]
# Adding them to the chart with specific colors
p <- p +
geom_point(data=rlow3, aes(x=row, y=record_min_temp), colour="blue1") +
geom_point(data=rhigh3, aes(x=row, y=record_max_temp), colour="red1")
# Adding a title based on the variable set above
title <- paste0(town, "'s weather since summer 2014")
# Setting the title
p <- p +
ggtitle(title) +
theme(plot.title=element_text(face="bold",hjust=.012,vjust=.8,colour="#3C3C3C",size=20, family="Lato")) +
annotate("text", x = 28, y = 98, label = "Temperature", size=4, fontface="bold", family="Lato Black")
# Now for the legend
p <- p +
annotate("segment", x = 65, xend = 65, y = -6, yend = 16, colour = "sienna", , alpha=.5, size=3) +
annotate("segment", x = 65, xend = 65, y = 0, yend = 10, colour = "sienna1", , alpha=.8, size=3) +
annotate("segment", x = 65, xend = 65, y = 2, yend = 8, colour = "sienna4", size=3) +
annotate("segment", x = 58, xend = 62, y = 10, yend = 10, colour = "gray30", size=.5) +
annotate("segment", x = 58, xend = 62, y = 0, yend = 0, colour = "gray30", size=.5) +
annotate("segment", x = 60, xend = 60, y = 10, yend = 0, colour = "gray30", size=.5) +
annotate("text", x = 32, y = 5, label = "AVERAGE RANGE", size=3, colour="gray30") +
annotate("segment", x = 68, xend = 72, y = 8, yend = 8, colour = "gray30", size=.5) +
annotate("segment", x = 68, xend = 72, y = 2, yend = 2, colour = "gray30", size=.5) +
annotate("segment", x = 70, xend = 70, y = 8, yend = 2, colour = "gray30", size=.5) +
annotate("text", x = 104, y = 5, label = "2014 - 2015 RANGE", size=3.5, colour="gray30") +
annotate("text", x = 42, y = 13, label = "RECORD HIGH", size=3, colour="gray30") +
annotate("text", x = 43, y = -3, label = "RECORD LOW", size=3, colour="gray30") +
annotate("segment", x = 67, xend = 76, y = 17, yend = 17, colour = "gray30", size=.5) +
annotate("segment", x = 67, xend = 76, y = -7, yend = -7, colour = "gray30", size=.5) +
annotate("point", x = 65, y = 17, colour = "red", size = 2) +
annotate("point", x = 65, y = -7, colour = "blue", size = 2) +
annotate("text", x = 106, y = 17, label = "NEW RECORD HIGH", size=3, colour="gray30") +
annotate("text", x = 106, y = -7, label = "NEW RECORD LOW", size=3, colour="gray30") +
annotate("text", x = 390, y = -15, label = "Source: Weather Underground", size=4, fontface="italic", colour="gray30")
print(p)
town <- gsub(" ", "", town)
# Exporting the charts
filenamesvg <- paste0(town, ".svg")
filenamepng <- paste0(town, ".png")
ggsave(file=filenamesvg, plot=p, width=10, height=6)
ggsave(file=filenamepng, plot=p, width=10, height=5)
|
context("Test for is-hex-color")
test_that("is_hex() works as expected", {
expect_true(is_hex("#FF00A7"))
expect_true(is_hex("#ff0000"))
expect_true(is_hex("#123456"))
expect_true(is_hex("#12Fb56"))
expect_false(is_hex("FF0000"))
expect_false(is_hex("#1234GF"))
expect_false(is_hex("#1234567"))
expect_false(is_hex("blue"))
expect_error(is_hex(FF00A7))
expect_error(is_hex(TRUE))
})
test_that("is_hex_alpha() works as expected", {
expect_true(is_hex_alpha("#FF000078"))
expect_true(is_hex_alpha("#ffda0078"))
expect_false(is_hex_alpha("#FF0000"))
expect_false(is_hex_alpha("#ffda00"))
expect_error(is_hex_alpha(FF00A7))
expect_error(is_hex_alpha(TRUE))
})
|
/workout02/code/tests/test-hex-color.R
|
no_license
|
nabkizil/stat133_fall2018
|
R
| false | false | 689 |
r
|
context("Test for is-hex-color")
test_that("is_hex() works as expected", {
expect_true(is_hex("#FF00A7"))
expect_true(is_hex("#ff0000"))
expect_true(is_hex("#123456"))
expect_true(is_hex("#12Fb56"))
expect_false(is_hex("FF0000"))
expect_false(is_hex("#1234GF"))
expect_false(is_hex("#1234567"))
expect_false(is_hex("blue"))
expect_error(is_hex(FF00A7))
expect_error(is_hex(TRUE))
})
test_that("is_hex_alpha() works as expected", {
expect_true(is_hex_alpha("#FF000078"))
expect_true(is_hex_alpha("#ffda0078"))
expect_false(is_hex_alpha("#FF0000"))
expect_false(is_hex_alpha("#ffda00"))
expect_error(is_hex_alpha(FF00A7))
expect_error(is_hex_alpha(TRUE))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gender-streetnet.R
\name{gender_streetnet}
\alias{gender_streetnet}
\title{gender_streetnet}
\usage{
gender_streetnet(net, wt_profile = "foot", country = "de")
}
\arguments{
\item{net}{Street network in \pkg{sf} or \pkg{sc} format}
\item{wt_profile}{Type of weighting to be used; see \code{dodgr::weight_streetnet}
for details.}
\item{country}{Country for which gender is to be determined, either as
ISO3166 two-letter abbreviation, or full text. See \link{list_countries} for
list of recognized countries.}
}
\value{
A weighted street network in \pkg{dodgr} format.
}
\description{
Weight a street network via \pkg{dodgr} function \code{weight_streetnet}, adding
additional information on gender of street names
}
|
/man/gender_streetnet.Rd
|
no_license
|
mpadge/gender-conscious-routing
|
R
| false | true | 795 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gender-streetnet.R
\name{gender_streetnet}
\alias{gender_streetnet}
\title{gender_streetnet}
\usage{
gender_streetnet(net, wt_profile = "foot", country = "de")
}
\arguments{
\item{net}{Street network in \pkg{sf} or \pkg{sc} format}
\item{wt_profile}{Type of weighting to be used; see \code{dodgr::weight_streetnet}
for details.}
\item{country}{Country for which gender is to be determined, either as
ISO3166 two-letter abbreviation, or full text. See \link{list_countries} for
list of recognized countries.}
}
\value{
A weighted street network in \pkg{dodgr} format.
}
\description{
Weight a street network via \pkg{dodgr} function \code{weight_streetnet}, adding
additional information on gender of street names
}
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033511051251e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615845437-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 736 |
r
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033511051251e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
library(neuralnet)
library(caTools)
library(plyr)
library(boot)
library(matrixStats)
library(rstudioapi)
#-------------------------Read csv file------------------------------------------------------
current_path <- getActiveDocumentContext()$path
setwd(dirname(current_path ))
data <- read.delim("features2.csv",header=T, sep=";",dec=",")
data <- within(data, rm("File_ID"))
#--------------Creation of the formula: grade = sum of other features----------------------
header <- names(data)
f <- paste(header[1:14], collapse = '+')
f <- paste(header[15], '~',f) # index 15 = grade (class)
f <- as.formula(f)
#--------------- Normalize values (max-min method)------------------------------------------
max <- apply(data,2,max)
min <- apply(data,2,min)
data <- as.data.frame(scale(data,center=min,scale=max-min))
#-------------------------------------Initializations------------------------------------------
set.seed(88)
error <- NULL
percentError <- NULL
k <- 50
ListMSE = list()
ListAccuracy = list()
pbar <- create_progress_bar('text') # Check progress of the whole process
pbar$init(k)
#--------------Establish the loop for splitting data randomly (vary train set from 30% to 80%)---------
# -------------------and train/test the net + calculate errors (50 folds--> k)------------------
for (j in 30:80){ # Dataset has 98 rows, so n ~ %
for(i in 1:k) {
index <- sample(1:nrow(data), j)
train <- data[index,]
test <- data[-index,]
nn <- neuralnet(f, data = train, hidden = c(2, 1), linear.output = F) # Train
predicted.nn.values <- compute(nn, test[1:14]) # Test
#--------------De-normalize values (from test set and predicted)-----------------
predicted.values <- predicted.nn.values$net.result * (max[15] - min[15]) + min[15]
actual.values <- test[15] * (max[15] - min[15]) + min[15]
error[i] <- sum((actual.values - predicted.values) ^ 2) / nrow(test) # Calculate Error(RMSE)
actual.values <- actual.values$Grade
percentError[i] <- mean((actual.values - predicted.values) / actual.values * 100)
}
ListMSE[[j]] = error
ListAccuracy[[j]] = 100 - abs(percentError)
pbar$step()
}
#--------------------------NEURAL NETWORK QUALITY EVALUATION------------------------------------
#garson(nn)
Matrix.MSE = do.call(cbind, ListMSE)
Matrix.Accuracy = do.call(cbind, ListAccuracy)
#-------------Get average MSE and use BoxPlot to visualize it--------------------------
boxplot(Matrix.MSE[,31], xlab = "MSE", col = 'green', border = 'red',
main = "MSE BoxPlot (traning set = 60%)", horizontal=TRUE)
boxplot(Matrix.MSE[,41], xlab = "MSE", col = 'yellow', border = 'orange',
main = "MSE BoxPlot (traning set = 70%)", horizontal=TRUE)
boxplot(Matrix.MSE[,51], xlab = "MSE", col = 'pink', border = 'red',
main = "MSE BoxPlot (traning set = 80%)", horizontal=TRUE)
#-------------Get average Accuracy and use BoxPlot to visualize it--------------------------
boxplot(Matrix.Accuracy[,31], xlab = "Accuracy (%)", col = 'green', border = 'red',
main = "Accuracy BoxPlot (traning set = 60%)", horizontal=TRUE)
boxplot(Matrix.Accuracy[,41], xlab = "Accuracy (%)", col = 'yellow', border = 'orange',
main = "Accuracy BoxPlot (traning set = 70%)", horizontal=TRUE)
boxplot(Matrix.Accuracy[,51], xlab = "Accuracy (%)", col = 'pink', border = 'red',
main = "Accuracy BoxPlot (traning set = 80%)", horizontal=TRUE)
#---------------------Graphic of MSE (median) according to train set size------------------------
med = colMedians(Matrix.MSE)
X = seq(30,80)
plot (med~X, type = "l", xlab = "Size training set (%)", ylab = "Median MSE",
main = "Variation of MSE with training set size")
#--------------------Graphic of Accuracy (median) according to train set size--------------------
med2 = colMedians(Matrix.Accuracy)
X = seq(30,80)
plot (med2~X, type = "l", xlab = "Size training set (%)", ylab = "Median Accuracy",
main = "Variation of Accuracy with training set size")
|
/VariationTrainSet_CrossValidation.R
|
no_license
|
dalilareis/CS-R-neural-network
|
R
| false | false | 4,006 |
r
|
library(neuralnet)
library(caTools)
library(plyr)
library(boot)
library(matrixStats)
library(rstudioapi)
#-------------------------Read csv file------------------------------------------------------
current_path <- getActiveDocumentContext()$path
setwd(dirname(current_path ))
data <- read.delim("features2.csv",header=T, sep=";",dec=",")
data <- within(data, rm("File_ID"))
#--------------Creation of the formula: grade = sum of other features----------------------
header <- names(data)
f <- paste(header[1:14], collapse = '+')
f <- paste(header[15], '~',f) # index 15 = grade (class)
f <- as.formula(f)
#--------------- Normalize values (max-min method)------------------------------------------
max <- apply(data,2,max)
min <- apply(data,2,min)
data <- as.data.frame(scale(data,center=min,scale=max-min))
#-------------------------------------Initializations------------------------------------------
set.seed(88)
error <- NULL
percentError <- NULL
k <- 50
ListMSE = list()
ListAccuracy = list()
pbar <- create_progress_bar('text') # Check progress of the whole process
pbar$init(k)
#--------------Establish the loop for splitting data randomly (vary train set from 30% to 80%)---------
# -------------------and train/test the net + calculate errors (50 folds--> k)------------------
for (j in 30:80){ # Dataset has 98 rows, so n ~ %
for(i in 1:k) {
index <- sample(1:nrow(data), j)
train <- data[index,]
test <- data[-index,]
nn <- neuralnet(f, data = train, hidden = c(2, 1), linear.output = F) # Train
predicted.nn.values <- compute(nn, test[1:14]) # Test
#--------------De-normalize values (from test set and predicted)-----------------
predicted.values <- predicted.nn.values$net.result * (max[15] - min[15]) + min[15]
actual.values <- test[15] * (max[15] - min[15]) + min[15]
error[i] <- sum((actual.values - predicted.values) ^ 2) / nrow(test) # Calculate Error(RMSE)
actual.values <- actual.values$Grade
percentError[i] <- mean((actual.values - predicted.values) / actual.values * 100)
}
ListMSE[[j]] = error
ListAccuracy[[j]] = 100 - abs(percentError)
pbar$step()
}
#--------------------------NEURAL NETWORK QUALITY EVALUATION------------------------------------
#garson(nn)
Matrix.MSE = do.call(cbind, ListMSE)
Matrix.Accuracy = do.call(cbind, ListAccuracy)
#-------------Get average MSE and use BoxPlot to visualize it--------------------------
boxplot(Matrix.MSE[,31], xlab = "MSE", col = 'green', border = 'red',
main = "MSE BoxPlot (traning set = 60%)", horizontal=TRUE)
boxplot(Matrix.MSE[,41], xlab = "MSE", col = 'yellow', border = 'orange',
main = "MSE BoxPlot (traning set = 70%)", horizontal=TRUE)
boxplot(Matrix.MSE[,51], xlab = "MSE", col = 'pink', border = 'red',
main = "MSE BoxPlot (traning set = 80%)", horizontal=TRUE)
#-------------Get average Accuracy and use BoxPlot to visualize it--------------------------
boxplot(Matrix.Accuracy[,31], xlab = "Accuracy (%)", col = 'green', border = 'red',
main = "Accuracy BoxPlot (traning set = 60%)", horizontal=TRUE)
boxplot(Matrix.Accuracy[,41], xlab = "Accuracy (%)", col = 'yellow', border = 'orange',
main = "Accuracy BoxPlot (traning set = 70%)", horizontal=TRUE)
boxplot(Matrix.Accuracy[,51], xlab = "Accuracy (%)", col = 'pink', border = 'red',
main = "Accuracy BoxPlot (traning set = 80%)", horizontal=TRUE)
#---------------------Graphic of MSE (median) according to train set size------------------------
med = colMedians(Matrix.MSE)
X = seq(30,80)
plot (med~X, type = "l", xlab = "Size training set (%)", ylab = "Median MSE",
main = "Variation of MSE with training set size")
#--------------------Graphic of Accuracy (median) according to train set size--------------------
med2 = colMedians(Matrix.Accuracy)
X = seq(30,80)
plot (med2~X, type = "l", xlab = "Size training set (%)", ylab = "Median Accuracy",
main = "Variation of Accuracy with training set size")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gini.R
\name{gini}
\alias{gini}
\title{Calculate Gini Index of a numeric vector}
\usage{
gini(x)
}
\arguments{
\item{x}{A numeric vector.}
}
\value{
A numeric value between 0 and 1.
}
\description{
Calculate the Gini index of a numeric vector.
}
\details{
The Gini index (Gini coefficient) is a measure of statistical dispersion. A Gini
coefficient of zero expresses perfect equality where all values are
the same. A Gini coefficient of one expresses maximal inequality among values.
}
\examples{
testValues <- runif(100)
gini(testValues)
}
\references{
Gini. C. (1912) \emph{Variability and Mutability}, C. Cuppini, Bologna
156 pages.
}
\author{
Jitao David Zhang
}
|
/man/gini.Rd
|
no_license
|
planetMDX/BioQC
|
R
| false | true | 783 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gini.R
\name{gini}
\alias{gini}
\title{Calculate Gini Index of a numeric vector}
\usage{
gini(x)
}
\arguments{
\item{x}{A numeric vector.}
}
\value{
A numeric value between 0 and 1.
}
\description{
Calculate the Gini index of a numeric vector.
}
\details{
The Gini index (Gini coefficient) is a measure of statistical dispersion. A Gini
coefficient of zero expresses perfect equality where all values are
the same. A Gini coefficient of one expresses maximal inequality among values.
}
\examples{
testValues <- runif(100)
gini(testValues)
}
\references{
Gini. C. (1912) \emph{Variability and Mutability}, C. Cuppini, Bologna
156 pages.
}
\author{
Jitao David Zhang
}
|
#' heatmap module UI representation
#'
#' @param id The ID of the modules namespace.
#' @param row.label Boolean Value set initial Value for rowlabel checkbox (Default = TRUE).
#'
#' @return A list with HTML tags from \code{\link[shiny]{tag}}.
#'
#' @export
heatmapUI <- function(id, row.label = TRUE) {
ns <- shiny::NS(id)
shiny::tagList(shiny::fluidPage(
rintrojs::introjsUI(),
shinyjs::useShinyjs(),
shiny::fluidRow(shinydashboard::box(width = 12,
shiny::div(style = "overflow-y: scroll; overflow-x: scroll; height: 800px; text-align: center",
shiny::uiOutput(ns("heatmap"))))),
shiny::fluidRow(
shinydashboard::box(
width = 12,
collapsible = TRUE,
shiny::fluidRow(
shiny::column(
width = 3,
shiny::div(id = ns("guide_selection"),
columnSelectorUI(id = ns("select")))),
shiny::column(
width = 3,
shiny::div(id = ns("guide_cluster"),
shiny::selectInput(
ns("clustering"),
label = "Choose clustering",
choices = c("columns and rows" = "both", "only columns" = "column", "only rows" = "row", "no clustering" = "none"),
multiple = FALSE
),
shiny::selectInput(
ns("cluster_distance"),
label = "Cluster distance",
choices = c("euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski", "pearson", "spearman", "kendall"),
multiple = FALSE
),
shiny::selectInput(
ns("cluster_method"),
label = "Cluster method",
choices = c("average", "ward.D", "ward.D2", "single", "complete", "mcquitty"),
multiple = FALSE))
),
shiny::column(
width = 3,
shiny::div(id = ns("guide_transformation"),
transformationUI(id = ns("transform"), choices = list(`None` = "raw", `log2` = "log2", `-log2` = "-log2", `log10` = "log10", `-log10` = "-log10", `Z score` = "zscore"), transposeOptions = TRUE)
),
shiny::div(id = ns("guide_coloring"),
shiny::selectInput(
ns("distribution"),
label = "Data distribution",
choices = c("Sequential", "Diverging"),
multiple = FALSE
),
colorPickerUI(ns("color"), show.transparency = FALSE)
)
),
shiny::column(
width = 3,
shiny::div(id = ns("guide_options"),
shiny::textInput(ns("label"), label = "Unit label", placeholder = "Enter unit..."),
shiny::checkboxInput(ns("row_label"), label = "Row label", value = row.label),
labelUI(ns("labeller")),
shiny::checkboxInput(ns("column_label"), label = "Column label", value = TRUE)
)
)
),
shiny::fluidRow(
shiny::column(
width = 12,
shiny::div(id = ns("guide_buttons"),
shiny::actionButton(ns("plot"), "Plot", style = "color: #fff; background-color: #3c8dbc"),
shiny::actionButton(ns("reset"), "Reset", style = "color: #fff; background-color: #3c8dbc"),
shiny::actionButton(ns("guide"), "Launch guide", style = "color: #fff; background-color: #3c8dbc", icon = shiny::icon("question-circle")),
shiny::downloadButton(outputId = ns("download"), label = "Download")
)
)
)
)
)
))
}
#' heatmap module server logic
#'
#' @param input Shiny's input object
#' @param output Shiny's output object
#' @param session Shiny's session object
#' @param clarion A clarion object. See \code{\link[wilson]{Clarion}}. (Supports reactive)
#' @param plot.method Choose which method is used for plotting. Either "static" or "interactive" (Default = "static").
#' @param label.sep Separator used for label merging (Default = ", ").
#' @param width Width of the plot in cm. Defaults to minimal size for readable labels and supports reactive.
#' @param height Height of the plot in cm. Defaults to minimal size for readable labels and supports reactive.
#' @param ppi Pixel per inch. Defaults to 72 and supports reactive.
#' @param scale Scale plot size. Defaults to 1, supports reactive.
#'
#' @return Reactive containing data used for plotting.
#'
#' @export
heatmap <- function(input, output, session, clarion, plot.method = "static", label.sep = ", ", width = "auto", height = "auto", ppi = 72, scale = 1) {
# globals/ initialization #####
# cluster limitation
static <- 11000
interactive <- 3000
# clear plot
clear_plot <- shiny::reactiveVal(FALSE)
# disable downloadButton on init
shinyjs::disable("download")
# input preparation #####
object <- shiny::reactive({
# support reactive
if (shiny::is.reactive(clarion)) {
if (!methods::is(clarion(), "Clarion")) shiny::stopApp("Object of class 'Clarion' needed!")
clarion()$clone(deep = TRUE)
} else {
if (!methods::is(clarion, "Clarion")) shiny::stopApp("Object of class 'Clarion' needed!")
clarion$clone(deep = TRUE)
}
})
# handle reactive sizes
size <- shiny::reactive({
width <- ifelse(shiny::is.reactive(width), width(), width)
height <- ifelse(shiny::is.reactive(height), height(), height)
ppi <- ifelse(shiny::is.reactive(ppi), ppi(), ppi)
scale <- ifelse(shiny::is.reactive(scale), scale(), scale)
if (!is.numeric(width) || width <= 0) {
width <- "auto"
}
if (!is.numeric(height) || height <= 0) {
if (plot.method == "interactive") {
height <- 28
} else {
height <- "auto"
}
}
if (!is.numeric(ppi) || ppi <= 0) {
ppi <- 72
}
list(width = width,
height = height,
ppi = ppi,
scale = scale)
})
# modules/ ui #####
columns <- shiny::callModule(columnSelector, "select", type.columns = shiny::reactive(object()$metadata[level != "feature", intersect(names(object()$metadata), c("key", "level", "label", "sub_label")), with = FALSE]), column.type.label = "Column types to choose from")
transform <- shiny::callModule(transformation, "transform", data = shiny::reactive(as.matrix(object()$data[, columns$selected_columns(), with = FALSE])), pseudocount = shiny::reactive(ifelse(object()$metadata[key == columns$selected_columns()[1]][["level"]] == "contrast", 0, 1)), replaceNA = FALSE)
color <- shiny::callModule(colorPicker, "color", distribution = shiny::reactive(tolower(input$distribution)), winsorize = shiny::reactive(equalize(transform$data())))
custom_label <- shiny::callModule(label, "labeller", data = shiny::reactive(object()$data), label = "Select row label", sep = label.sep, disable = shiny::reactive(!input$row_label))
# automatic unitlabel
shiny::observe({
shiny::updateTextInput(session = session, inputId = "label", value = transform$method())
})
# functionality/ plotting #####
# reset ui
shiny::observeEvent(input$reset, {
log_message("Heatmap: reset", "INFO", token = session$token)
shinyjs::reset("cluster_distance")
shinyjs::reset("cluster_method")
shinyjs::reset("clustering")
shinyjs::reset("distribution")
shinyjs::reset("label")
shinyjs::reset("row_label")
shinyjs::reset("column_label")
columns <<- shiny::callModule(columnSelector, "select", type.columns = shiny::reactive(object()$metadata[level != "feature", intersect(names(object()$metadata), c("key", "level", "label", "sub_label")), with = FALSE]), column.type.label = "Column types to choose from")
transform <<- shiny::callModule(transformation, "transform", data = shiny::reactive(as.matrix(object()$data[, columns$selected_columns(), with = FALSE])), pseudocount = shiny::reactive(ifelse(object()$metadata[key == columns$selected_columns()[1]][["level"]] == "contrast", 0, 1)), replaceNA = FALSE)
color <<- shiny::callModule(colorPicker, "color", distribution = shiny::reactive(tolower(input$distribution)), winsorize = shiny::reactive(equalize(transform$data())))
custom_label <<- shiny::callModule(label, "labeller", data = shiny::reactive(object()$data), label = "Select row label", sep = label.sep, disable = shiny::reactive(!input$row_label))
clear_plot(TRUE)
})
result_data <- shiny::eventReactive(input$plot, {
# new progress indicator
progress <- shiny::Progress$new()
on.exit(progress$close())
progress$set(0.2, message = "Compute data")
processed_data <- data.table::data.table(object()$data[, object()$get_id(), with = FALSE], transform$data())
progress$set(1)
return(processed_data)
})
plot <- shiny::eventReactive(input$plot, {
log_message("Heatmap: computing plot...", "INFO", token = session$token)
# enable downloadButton
shinyjs::enable("download")
clear_plot(FALSE)
# new progress indicator
progress <- shiny::Progress$new()
on.exit(progress$close())
progress$set(0.2, message = "Compute plot")
plot <- create_heatmap(
data = result_data(),
unitlabel = input$label,
row.label = input$row_label,
row.custom.label = custom_label()$label,
column.label = input$column_label,
column.custom.label = make.unique(columns$label()),
clustering = input$clustering,
clustdist = input$cluster_distance,
clustmethod = input$cluster_method,
colors = color()$palette,
width = size()$width,
height = size()$height,
ppi = size()$ppi,
scale = size()$scale,
plot.method = plot.method,
winsorize.colors = color()$winsorize
)
progress$set(1)
log_message("Heatmap: done.", "INFO", token = session$token)
return(plot)
})
# render plot #####
if (plot.method == "interactive") {
output$heatmap <- shiny::renderUI({
shinycssloaders::withSpinner(plotly::plotlyOutput(session$ns("interactive")), proxy.height = "800px")
})
output$interactive <- plotly::renderPlotly({
if (clear_plot()) {
return()
} else {
log_message("Heatmap: render plot interactive", "INFO", token = session$token)
# new progress indicator
progress <- shiny::Progress$new()
on.exit(progress$close())
progress$set(0.2, message = "Render plot")
plot <- plot()$plot
progress$set(1)
return(plot)
}
})
} else {
output$heatmap <- shiny::renderUI({
shinycssloaders::withSpinner(shiny::plotOutput(session$ns("static")), proxy.height = "800px")
})
output$static <- shiny::renderPlot(
width = shiny::reactive(plot()$width * (plot()$ppi / 2.54)),
height = shiny::reactive(plot()$height * (plot()$ppi / 2.54)),
{
if (clear_plot()) {
return()
} else {
log_message("Heatmap: render plot static", "INFO", token = session$token)
# new progress indicator
progress <- shiny::Progress$new()
on.exit(progress$close())
progress$set(0.2, message = "Render plot")
plot <- plot()$plot
# handle error
if (methods::is(plot, "try-error")) {
# TODO add logging
stop("An error occured! Please try a different dataset.")
}
progress$set(1)
return(ComplexHeatmap::draw(plot, heatmap_legend_side = "bottom"))
}
})
}
# download #####
output$download <- shiny::downloadHandler(filename = "heatmap.zip",
content = function(file) {
log_message("Heatmap: download", "INFO", token = session$token)
download(file = file, filename = "heatmap.zip", plot = plot()$plot, width = plot()$width, height = plot()$height, ppi = plot()$ppi, ui = user_input())
})
user_input <- shiny::reactive({
# format selection
selection <- list(type = columns$type(), selectedColumns = columns$selected_columns())
# format clustering
clustering <- list(
clustering = input$clustering,
distance = input$cluster_distance,
method = input$cluster_method
)
# format options
options <- list(
transformation = list(method = transform$method(), applied = transform$transpose()),
color = list(distribution = input$distribution, scheme = color()$name, reverse = color()$reverse, winsorize = color()$winsorize),
unit_label = input$label,
row_label = input$row_label,
custom_row_label = custom_label()$selected,
column_label = input$column_label
)
# merge all
list(selection = selection, clustering = clustering, options = options)
})
# notifications #####
# enable/ disable plot button
# show warning if disabled
shiny::observe({
shinyjs::disable("plot")
show_warning <- TRUE
# are columns selected?
if (shiny::isTruthy(columns$selected_columns())) {
row_num <- nrow(shiny::isolate(object()$data))
col_num <- length(columns$selected_columns())
# minimal heatmap possible (greater 1x1)?
if (row_num > 1 || col_num > 1) {
# no clustering for single rows or columns
if (row_num == 1 && !is.element(input$clustering, c("both", "row"))) {
show_warning <- FALSE
shinyjs::enable("plot")
} else if (col_num == 1 && !is.element(input$clustering, c("both", "column"))) {
show_warning <- FALSE
shinyjs::enable("plot")
} else if (row_num > 1 && col_num > 1) { # no border case heatmaps
show_warning <- FALSE
shinyjs::enable("plot")
}
}
if (show_warning) {
shiny::showNotification(
ui = "Warning! Insufficient columns/ rows. Either disable the respective clustering or expand the dataset.",
id = session$ns("insuf_data"),
type = "warning"
)
} else {
shiny::removeNotification(session$ns("insuf_data"))
}
# maximum heatmap reached?
if (plot.method == "static" && row_num > static || plot.method == "interactive" && row_num > interactive) {
shinyjs::disable("plot")
}
}
})
# cluster limitation
shiny::observe({
shiny::req(object())
if (shiny::isTruthy(columns$selected_columns())) {
if (input$clustering != "none") { # clustering
if (plot.method == "static" && nrow(object()$data) > static) { # cluster limitation (static)
shiny::showNotification(
paste("Clustering limited to", static, "genes! Please disable clustering or select less genes."),
duration = NULL,
type = "error",
id = session$ns("notification")
)
} else if (plot.method == "interactive" && nrow(object()$data) > interactive) { # cluster limitation (interactive)
shiny::showNotification(
paste("Clustering limited to", interactive, "genes! Please disable clustering or select less genes."),
duration = NULL,
type = "error",
id = session$ns("notification")
)
} else {
shiny::removeNotification(session$ns("notification"))
}
} else if (nrow(object()$data) > 200) { # computation warning
shiny::showNotification(
paste("Caution! You selected", nrow(object()$data), "genes. This will take a while to compute."),
duration = 5,
type = "warning",
id = session$ns("notification")
)
} else {
shiny::removeNotification(session$ns("notification"))
}
} else {
shiny::removeNotification(session$ns("notification"))
}
})
# warning if plot size exceeds limits
shiny::observe({
if (plot()$exceed_size) {
shiny::showNotification(
ui = "Width and/ or height exceed limit. Using 500 cm instead.",
id = session$ns("limit"),
type = "warning"
)
} else {
shiny::removeNotification(session$ns("limit"))
}
})
# Fetch the reactive guide for this module
guide <- heatmapGuide(session)
shiny::observeEvent(input$guide, {
rintrojs::introjs(session, options = list(steps = guide()))
})
return(result_data)
}
#' heatmap module guide
#'
#' @param session The shiny session
#'
#' @return A shiny reactive that contains the texts for the Guide steps.
#'
heatmapGuide <- function(session) {
steps <- list(
"guide_selection" = "<h4>Data selection</h4>
Select a column type for visualization, then select individual columns based on the chosen type.",
"guide_cluster" = "<h4>Row/Column clustering</h4>
Choose where the clustering is applied, then select a clustering distance and method.",
"guide_transformation" = "<h4>Data transformation</h4>
Pick a transformation that you want to apply to your data or leave it as 'None' if no transformation is needed.<br/>
In case of the Z-score transformation, you can additionally choose to apply it to either rows or columns.",
"guide_coloring" = "<h4>Color palettes</h4>
Based on the selected data distribution, available color palettes are either sequential or diverging.<br/>
The selected palette can additionally be reversed.<br/>
Set the limits of the color palette with 'Winsorize to upper/lower'. Out of bounds values will be mapped to the nearest color.",
"guide_options" = "<h4>Additional options</h4>
You can set a label for the color legend that describes the underlying data unit. Furthermore, you can enable/disable row and column labels.
Use the input to generate custom row-labels. The selected columns will be merged and used as label.",
"guide_buttons" = "<h4>Create the plot</h4>
As a final step click, a click on the 'Plot' button will render the plot, while a click on the 'Reset' button will reset the parameters to default."
)
shiny::reactive(data.frame(element = paste0("#", session$ns(names(steps))), intro = unlist(steps)))
}
|
/R/heatmap.R
|
permissive
|
cran/wilson
|
R
| false | false | 18,986 |
r
|
#' heatmap module UI representation
#'
#' @param id The ID of the modules namespace.
#' @param row.label Boolean Value set initial Value for rowlabel checkbox (Default = TRUE).
#'
#' @return A list with HTML tags from \code{\link[shiny]{tag}}.
#'
#' @export
heatmapUI <- function(id, row.label = TRUE) {
ns <- shiny::NS(id)
shiny::tagList(shiny::fluidPage(
rintrojs::introjsUI(),
shinyjs::useShinyjs(),
shiny::fluidRow(shinydashboard::box(width = 12,
shiny::div(style = "overflow-y: scroll; overflow-x: scroll; height: 800px; text-align: center",
shiny::uiOutput(ns("heatmap"))))),
shiny::fluidRow(
shinydashboard::box(
width = 12,
collapsible = TRUE,
shiny::fluidRow(
shiny::column(
width = 3,
shiny::div(id = ns("guide_selection"),
columnSelectorUI(id = ns("select")))),
shiny::column(
width = 3,
shiny::div(id = ns("guide_cluster"),
shiny::selectInput(
ns("clustering"),
label = "Choose clustering",
choices = c("columns and rows" = "both", "only columns" = "column", "only rows" = "row", "no clustering" = "none"),
multiple = FALSE
),
shiny::selectInput(
ns("cluster_distance"),
label = "Cluster distance",
choices = c("euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski", "pearson", "spearman", "kendall"),
multiple = FALSE
),
shiny::selectInput(
ns("cluster_method"),
label = "Cluster method",
choices = c("average", "ward.D", "ward.D2", "single", "complete", "mcquitty"),
multiple = FALSE))
),
shiny::column(
width = 3,
shiny::div(id = ns("guide_transformation"),
transformationUI(id = ns("transform"), choices = list(`None` = "raw", `log2` = "log2", `-log2` = "-log2", `log10` = "log10", `-log10` = "-log10", `Z score` = "zscore"), transposeOptions = TRUE)
),
shiny::div(id = ns("guide_coloring"),
shiny::selectInput(
ns("distribution"),
label = "Data distribution",
choices = c("Sequential", "Diverging"),
multiple = FALSE
),
colorPickerUI(ns("color"), show.transparency = FALSE)
)
),
shiny::column(
width = 3,
shiny::div(id = ns("guide_options"),
shiny::textInput(ns("label"), label = "Unit label", placeholder = "Enter unit..."),
shiny::checkboxInput(ns("row_label"), label = "Row label", value = row.label),
labelUI(ns("labeller")),
shiny::checkboxInput(ns("column_label"), label = "Column label", value = TRUE)
)
)
),
shiny::fluidRow(
shiny::column(
width = 12,
shiny::div(id = ns("guide_buttons"),
shiny::actionButton(ns("plot"), "Plot", style = "color: #fff; background-color: #3c8dbc"),
shiny::actionButton(ns("reset"), "Reset", style = "color: #fff; background-color: #3c8dbc"),
shiny::actionButton(ns("guide"), "Launch guide", style = "color: #fff; background-color: #3c8dbc", icon = shiny::icon("question-circle")),
shiny::downloadButton(outputId = ns("download"), label = "Download")
)
)
)
)
)
))
}
#' heatmap module server logic
#'
#' @param input Shiny's input object
#' @param output Shiny's output object
#' @param session Shiny's session object
#' @param clarion A clarion object. See \code{\link[wilson]{Clarion}}. (Supports reactive)
#' @param plot.method Choose which method is used for plotting. Either "static" or "interactive" (Default = "static").
#' @param label.sep Separator used for label merging (Default = ", ").
#' @param width Width of the plot in cm. Defaults to minimal size for readable labels and supports reactive.
#' @param height Height of the plot in cm. Defaults to minimal size for readable labels and supports reactive.
#' @param ppi Pixel per inch. Defaults to 72 and supports reactive.
#' @param scale Scale plot size. Defaults to 1, supports reactive.
#'
#' @return Reactive containing data used for plotting.
#'
#' @export
heatmap <- function(input, output, session, clarion, plot.method = "static", label.sep = ", ", width = "auto", height = "auto", ppi = 72, scale = 1) {
# globals/ initialization #####
# cluster limitation
static <- 11000
interactive <- 3000
# clear plot
clear_plot <- shiny::reactiveVal(FALSE)
# disable downloadButton on init
shinyjs::disable("download")
# input preparation #####
object <- shiny::reactive({
# support reactive
if (shiny::is.reactive(clarion)) {
if (!methods::is(clarion(), "Clarion")) shiny::stopApp("Object of class 'Clarion' needed!")
clarion()$clone(deep = TRUE)
} else {
if (!methods::is(clarion, "Clarion")) shiny::stopApp("Object of class 'Clarion' needed!")
clarion$clone(deep = TRUE)
}
})
# handle reactive sizes
size <- shiny::reactive({
width <- ifelse(shiny::is.reactive(width), width(), width)
height <- ifelse(shiny::is.reactive(height), height(), height)
ppi <- ifelse(shiny::is.reactive(ppi), ppi(), ppi)
scale <- ifelse(shiny::is.reactive(scale), scale(), scale)
if (!is.numeric(width) || width <= 0) {
width <- "auto"
}
if (!is.numeric(height) || height <= 0) {
if (plot.method == "interactive") {
height <- 28
} else {
height <- "auto"
}
}
if (!is.numeric(ppi) || ppi <= 0) {
ppi <- 72
}
list(width = width,
height = height,
ppi = ppi,
scale = scale)
})
# modules/ ui #####
columns <- shiny::callModule(columnSelector, "select", type.columns = shiny::reactive(object()$metadata[level != "feature", intersect(names(object()$metadata), c("key", "level", "label", "sub_label")), with = FALSE]), column.type.label = "Column types to choose from")
transform <- shiny::callModule(transformation, "transform", data = shiny::reactive(as.matrix(object()$data[, columns$selected_columns(), with = FALSE])), pseudocount = shiny::reactive(ifelse(object()$metadata[key == columns$selected_columns()[1]][["level"]] == "contrast", 0, 1)), replaceNA = FALSE)
color <- shiny::callModule(colorPicker, "color", distribution = shiny::reactive(tolower(input$distribution)), winsorize = shiny::reactive(equalize(transform$data())))
custom_label <- shiny::callModule(label, "labeller", data = shiny::reactive(object()$data), label = "Select row label", sep = label.sep, disable = shiny::reactive(!input$row_label))
# automatic unitlabel
shiny::observe({
shiny::updateTextInput(session = session, inputId = "label", value = transform$method())
})
# functionality/ plotting #####
# reset ui
shiny::observeEvent(input$reset, {
log_message("Heatmap: reset", "INFO", token = session$token)
shinyjs::reset("cluster_distance")
shinyjs::reset("cluster_method")
shinyjs::reset("clustering")
shinyjs::reset("distribution")
shinyjs::reset("label")
shinyjs::reset("row_label")
shinyjs::reset("column_label")
columns <<- shiny::callModule(columnSelector, "select", type.columns = shiny::reactive(object()$metadata[level != "feature", intersect(names(object()$metadata), c("key", "level", "label", "sub_label")), with = FALSE]), column.type.label = "Column types to choose from")
transform <<- shiny::callModule(transformation, "transform", data = shiny::reactive(as.matrix(object()$data[, columns$selected_columns(), with = FALSE])), pseudocount = shiny::reactive(ifelse(object()$metadata[key == columns$selected_columns()[1]][["level"]] == "contrast", 0, 1)), replaceNA = FALSE)
color <<- shiny::callModule(colorPicker, "color", distribution = shiny::reactive(tolower(input$distribution)), winsorize = shiny::reactive(equalize(transform$data())))
custom_label <<- shiny::callModule(label, "labeller", data = shiny::reactive(object()$data), label = "Select row label", sep = label.sep, disable = shiny::reactive(!input$row_label))
clear_plot(TRUE)
})
result_data <- shiny::eventReactive(input$plot, {
# new progress indicator
progress <- shiny::Progress$new()
on.exit(progress$close())
progress$set(0.2, message = "Compute data")
processed_data <- data.table::data.table(object()$data[, object()$get_id(), with = FALSE], transform$data())
progress$set(1)
return(processed_data)
})
plot <- shiny::eventReactive(input$plot, {
log_message("Heatmap: computing plot...", "INFO", token = session$token)
# enable downloadButton
shinyjs::enable("download")
clear_plot(FALSE)
# new progress indicator
progress <- shiny::Progress$new()
on.exit(progress$close())
progress$set(0.2, message = "Compute plot")
plot <- create_heatmap(
data = result_data(),
unitlabel = input$label,
row.label = input$row_label,
row.custom.label = custom_label()$label,
column.label = input$column_label,
column.custom.label = make.unique(columns$label()),
clustering = input$clustering,
clustdist = input$cluster_distance,
clustmethod = input$cluster_method,
colors = color()$palette,
width = size()$width,
height = size()$height,
ppi = size()$ppi,
scale = size()$scale,
plot.method = plot.method,
winsorize.colors = color()$winsorize
)
progress$set(1)
log_message("Heatmap: done.", "INFO", token = session$token)
return(plot)
})
# render plot #####
if (plot.method == "interactive") {
output$heatmap <- shiny::renderUI({
shinycssloaders::withSpinner(plotly::plotlyOutput(session$ns("interactive")), proxy.height = "800px")
})
output$interactive <- plotly::renderPlotly({
if (clear_plot()) {
return()
} else {
log_message("Heatmap: render plot interactive", "INFO", token = session$token)
# new progress indicator
progress <- shiny::Progress$new()
on.exit(progress$close())
progress$set(0.2, message = "Render plot")
plot <- plot()$plot
progress$set(1)
return(plot)
}
})
} else {
output$heatmap <- shiny::renderUI({
shinycssloaders::withSpinner(shiny::plotOutput(session$ns("static")), proxy.height = "800px")
})
output$static <- shiny::renderPlot(
width = shiny::reactive(plot()$width * (plot()$ppi / 2.54)),
height = shiny::reactive(plot()$height * (plot()$ppi / 2.54)),
{
if (clear_plot()) {
return()
} else {
log_message("Heatmap: render plot static", "INFO", token = session$token)
# new progress indicator
progress <- shiny::Progress$new()
on.exit(progress$close())
progress$set(0.2, message = "Render plot")
plot <- plot()$plot
# handle error
if (methods::is(plot, "try-error")) {
# TODO add logging
stop("An error occured! Please try a different dataset.")
}
progress$set(1)
return(ComplexHeatmap::draw(plot, heatmap_legend_side = "bottom"))
}
})
}
# download #####
output$download <- shiny::downloadHandler(filename = "heatmap.zip",
content = function(file) {
log_message("Heatmap: download", "INFO", token = session$token)
download(file = file, filename = "heatmap.zip", plot = plot()$plot, width = plot()$width, height = plot()$height, ppi = plot()$ppi, ui = user_input())
})
user_input <- shiny::reactive({
# format selection
selection <- list(type = columns$type(), selectedColumns = columns$selected_columns())
# format clustering
clustering <- list(
clustering = input$clustering,
distance = input$cluster_distance,
method = input$cluster_method
)
# format options
options <- list(
transformation = list(method = transform$method(), applied = transform$transpose()),
color = list(distribution = input$distribution, scheme = color()$name, reverse = color()$reverse, winsorize = color()$winsorize),
unit_label = input$label,
row_label = input$row_label,
custom_row_label = custom_label()$selected,
column_label = input$column_label
)
# merge all
list(selection = selection, clustering = clustering, options = options)
})
# notifications #####
# enable/ disable plot button
# show warning if disabled
shiny::observe({
shinyjs::disable("plot")
show_warning <- TRUE
# are columns selected?
if (shiny::isTruthy(columns$selected_columns())) {
row_num <- nrow(shiny::isolate(object()$data))
col_num <- length(columns$selected_columns())
# minimal heatmap possible (greater 1x1)?
if (row_num > 1 || col_num > 1) {
# no clustering for single rows or columns
if (row_num == 1 && !is.element(input$clustering, c("both", "row"))) {
show_warning <- FALSE
shinyjs::enable("plot")
} else if (col_num == 1 && !is.element(input$clustering, c("both", "column"))) {
show_warning <- FALSE
shinyjs::enable("plot")
} else if (row_num > 1 && col_num > 1) { # no border case heatmaps
show_warning <- FALSE
shinyjs::enable("plot")
}
}
if (show_warning) {
shiny::showNotification(
ui = "Warning! Insufficient columns/ rows. Either disable the respective clustering or expand the dataset.",
id = session$ns("insuf_data"),
type = "warning"
)
} else {
shiny::removeNotification(session$ns("insuf_data"))
}
# maximum heatmap reached?
if (plot.method == "static" && row_num > static || plot.method == "interactive" && row_num > interactive) {
shinyjs::disable("plot")
}
}
})
# cluster limitation
shiny::observe({
shiny::req(object())
if (shiny::isTruthy(columns$selected_columns())) {
if (input$clustering != "none") { # clustering
if (plot.method == "static" && nrow(object()$data) > static) { # cluster limitation (static)
shiny::showNotification(
paste("Clustering limited to", static, "genes! Please disable clustering or select less genes."),
duration = NULL,
type = "error",
id = session$ns("notification")
)
} else if (plot.method == "interactive" && nrow(object()$data) > interactive) { # cluster limitation (interactive)
shiny::showNotification(
paste("Clustering limited to", interactive, "genes! Please disable clustering or select less genes."),
duration = NULL,
type = "error",
id = session$ns("notification")
)
} else {
shiny::removeNotification(session$ns("notification"))
}
} else if (nrow(object()$data) > 200) { # computation warning
shiny::showNotification(
paste("Caution! You selected", nrow(object()$data), "genes. This will take a while to compute."),
duration = 5,
type = "warning",
id = session$ns("notification")
)
} else {
shiny::removeNotification(session$ns("notification"))
}
} else {
shiny::removeNotification(session$ns("notification"))
}
})
# warning if plot size exceeds limits
shiny::observe({
if (plot()$exceed_size) {
shiny::showNotification(
ui = "Width and/ or height exceed limit. Using 500 cm instead.",
id = session$ns("limit"),
type = "warning"
)
} else {
shiny::removeNotification(session$ns("limit"))
}
})
# Fetch the reactive guide for this module
guide <- heatmapGuide(session)
shiny::observeEvent(input$guide, {
rintrojs::introjs(session, options = list(steps = guide()))
})
return(result_data)
}
#' heatmap module guide
#'
#' @param session The shiny session
#'
#' @return A shiny reactive that contains the texts for the Guide steps.
#'
heatmapGuide <- function(session) {
steps <- list(
"guide_selection" = "<h4>Data selection</h4>
Select a column type for visualization, then select individual columns based on the chosen type.",
"guide_cluster" = "<h4>Row/Column clustering</h4>
Choose where the clustering is applied, then select a clustering distance and method.",
"guide_transformation" = "<h4>Data transformation</h4>
Pick a transformation that you want to apply to your data or leave it as 'None' if no transformation is needed.<br/>
In case of the Z-score transformation, you can additionally choose to apply it to either rows or columns.",
"guide_coloring" = "<h4>Color palettes</h4>
Based on the selected data distribution, available color palettes are either sequential or diverging.<br/>
The selected palette can additionally be reversed.<br/>
Set the limits of the color palette with 'Winsorize to upper/lower'. Out of bounds values will be mapped to the nearest color.",
"guide_options" = "<h4>Additional options</h4>
You can set a label for the color legend that describes the underlying data unit. Furthermore, you can enable/disable row and column labels.
Use the input to generate custom row-labels. The selected columns will be merged and used as label.",
"guide_buttons" = "<h4>Create the plot</h4>
As a final step click, a click on the 'Plot' button will render the plot, while a click on the 'Reset' button will reset the parameters to default."
)
shiny::reactive(data.frame(element = paste0("#", session$ns(names(steps))), intro = unlist(steps)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{normal_approximation}
\alias{normal_approximation}
\title{Normal approximation method}
\usage{
normal_approximation(s, n, alpha, use.backup = FALSE, backup.method, ...)
}
\arguments{
\item{s}{Vector of successes.}
\item{n}{Vector of sample sizes.}
\item{alpha}{The significance level; to calculate a 100(1-\eqn{\alpha})\% lower confidence bound.}
\item{use.backup}{If TRUE, then a backup.method in the will be used for the methods with calculate LCB = 1 in the case of no failures across all components. If FALSE (default), no backup.method is used.}
\item{backup.method}{The backup method which is used for the methods which calculate LCB = 1 in the case of zero failures. Use function name.}
\item{...}{Additional arguments to be ignored.}
}
\value{
The 100(1-\eqn{\alpha})\% lower confidence bound.
}
\description{
Calculate a binomial series lower confidence bound using a normal approximation with MLE estimates.
}
\examples{
normal_approximation(s=c(35, 97, 59), n=c(35, 100, 60), alpha=.10)
}
|
/man/normal_approximation.Rd
|
no_license
|
cran/serieslcb
|
R
| false | true | 1,131 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{normal_approximation}
\alias{normal_approximation}
\title{Normal approximation method}
\usage{
normal_approximation(s, n, alpha, use.backup = FALSE, backup.method, ...)
}
\arguments{
\item{s}{Vector of successes.}
\item{n}{Vector of sample sizes.}
\item{alpha}{The significance level; to calculate a 100(1-\eqn{\alpha})\% lower confidence bound.}
\item{use.backup}{If TRUE, then a backup.method in the will be used for the methods with calculate LCB = 1 in the case of no failures across all components. If FALSE (default), no backup.method is used.}
\item{backup.method}{The backup method which is used for the methods which calculate LCB = 1 in the case of zero failures. Use function name.}
\item{...}{Additional arguments to be ignored.}
}
\value{
The 100(1-\eqn{\alpha})\% lower confidence bound.
}
\description{
Calculate a binomial series lower confidence bound using a normal approximation with MLE estimates.
}
\examples{
normal_approximation(s=c(35, 97, 59), n=c(35, 100, 60), alpha=.10)
}
|
library(randomForest)
library(caret)
data(GermanCredit)
#check missing values
GermanCredit[rowSums(is.na(GermanCredit)) > 0,]
inTrain<-createDataPartition(GermanCredit[,10],p=0.8,list=FALSE)
train<-GermanCredit[inTrain,]
test<-GermanCredit[-inTrain,]
train_control<-trainControl(method='cv',number=10)
fit<-randomForest(Class~.,data=train,ntree=10)
pred<-predict(fit,test)
confusionMatrix(pred,test[,10])
save(fit,file='fit.rData')
|
/process.R
|
no_license
|
cleobatista/credit_risk_analysis
|
R
| false | false | 436 |
r
|
library(randomForest)
library(caret)
data(GermanCredit)
#check missing values
GermanCredit[rowSums(is.na(GermanCredit)) > 0,]
inTrain<-createDataPartition(GermanCredit[,10],p=0.8,list=FALSE)
train<-GermanCredit[inTrain,]
test<-GermanCredit[-inTrain,]
train_control<-trainControl(method='cv',number=10)
fit<-randomForest(Class~.,data=train,ntree=10)
pred<-predict(fit,test)
confusionMatrix(pred,test[,10])
save(fit,file='fit.rData')
|
library(tidyverse)
library(ape)
library(ggtree)
library(phytools)
marine_families <- c("Balaenidae", "Balaenopteridae", "Delphinidae", "Dugongidae", "Eschrichtiidae", "Iniidae", "Monodontidae", "Neobalaenidae", "Odobenidae", "Otariidae", "Phocidae", "Phocoenidae", "Physeteridae", "Platanistidae", "Trichechidae", "Ziphiidae")
phy <- read_csv("../../Data/PHYLACINE_1.2/Data/Traits/Trait_data.csv", col_type = cols()) %>%
filter(
!Family.1.2 %in% marine_families,
Genus.1.2 != "Homo",
Binomial.1.2 != "Ursus_maritimus",
Order.1.2 != "Chiroptera"
)
forest <- read.nexus("../../Data/PHYLACINE_1.2.0/Data/Phylogenies/Complete_phylogeny.nex")
tree <- forest[[1]]
tree <- keep.tip(tree, phy$Binomial.1.2)
extinct <- sapply(tree$tip.label, function(x)
if((phy %>% filter(Binomial.1.2 == x) %>% pull(IUCN.Status.1.2)) %in% c("EX", "EW", "EP"))
TRUE
else
FALSE
) %>% as_vector()
ggtree(tree)
collapseTree(tree)
|
/Code/R/tree_figure.R
|
no_license
|
emilio-berti/rewiring-rewilding
|
R
| false | false | 940 |
r
|
library(tidyverse)
library(ape)
library(ggtree)
library(phytools)
marine_families <- c("Balaenidae", "Balaenopteridae", "Delphinidae", "Dugongidae", "Eschrichtiidae", "Iniidae", "Monodontidae", "Neobalaenidae", "Odobenidae", "Otariidae", "Phocidae", "Phocoenidae", "Physeteridae", "Platanistidae", "Trichechidae", "Ziphiidae")
phy <- read_csv("../../Data/PHYLACINE_1.2/Data/Traits/Trait_data.csv", col_type = cols()) %>%
filter(
!Family.1.2 %in% marine_families,
Genus.1.2 != "Homo",
Binomial.1.2 != "Ursus_maritimus",
Order.1.2 != "Chiroptera"
)
forest <- read.nexus("../../Data/PHYLACINE_1.2.0/Data/Phylogenies/Complete_phylogeny.nex")
tree <- forest[[1]]
tree <- keep.tip(tree, phy$Binomial.1.2)
extinct <- sapply(tree$tip.label, function(x)
if((phy %>% filter(Binomial.1.2 == x) %>% pull(IUCN.Status.1.2)) %in% c("EX", "EW", "EP"))
TRUE
else
FALSE
) %>% as_vector()
ggtree(tree)
collapseTree(tree)
|
testlist <- list(x = c(2.08655633926036e-308, 1.79244985109153e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_geom_mean,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_geom_mean/AFL_cpp_geom_mean/cpp_geom_mean_valgrind_files/1615839129-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 336 |
r
|
testlist <- list(x = c(2.08655633926036e-308, 1.79244985109153e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_geom_mean,testlist)
str(result)
|
##
## diagram.R
##
## Simple objects for the elements of a diagram (text, arrows etc)
## that are compatible with plot.layered and plot.listof
##
## $Revision: 1.10 $ $Date: 2015/02/17 03:45:04 $
# ......... internal class 'diagramobj' supports other classes .........
diagramobj <- function(X, ...) {
if(inherits(try(Frame(X), silent=TRUE), "try-error"))
stop("X is not a spatial object")
a <- list(...)
if(sum(nzchar(names(a))) != length(a))
stop("All extra arguments must be named")
attributes(X) <- append(attributes(X), a)
class(X) <- c("diagramobj", class(X))
return(X)
}
"[.diagramobj" <- function(x, ...) {
y <- NextMethod("[")
attributes(y) <- attributes(x)
return(y)
}
shift.diagramobj <- function(X, ...) {
y <- NextMethod("shift")
attributes(y) <- attributes(X)
return(y)
}
scalardilate.diagramobj <- function(X, f, ...) {
y <- NextMethod("scalardilate")
attributes(y) <- attributes(X)
return(y)
}
# .............. user-accessible classes ................
# ......... (these only need a creator and a plot method) ......
## ........... text .................
textstring <- function(x, y, txt=NULL, ...) {
if(is.ppp(x) && missing(y)) {
X <- x
Window(X) <- boundingbox(x)
} else {
if(missing(y) && checkfields(x, c("x", "y"))) {
y <- x$y
x <- x$x
stopifnot(length(x) == length(y))
}
X <- ppp(x, y, window=owin(range(x),range(y)))
}
marks(X) <- txt
Y <- diagramobj(X, otherargs=list(...))
class(Y) <- c("textstring", class(Y))
return(Y)
}
plot.textstring <- function(x, ..., do.plot=TRUE) {
txt <- marks(x)
otha <- attr(x, "otherargs")
if(do.plot) do.call.matched(text.default,
resolve.defaults(list(...),
list(x=x$x, y=x$y, labels=txt),
otha),
extrargs=c("srt", "family", "xpd"))
return(invisible(Frame(x)))
}
print.textstring <- function(x, ...) {
splat("Text string object")
txt <- marks(x)
if(npoints(x) == 1) {
splat("Text:", dQuote(txt))
splat("Coordinates:", paren(paste(as.vector(coords(x)), collapse=", ")))
} else {
splat("Text:")
print(txt)
splat("Coordinates:")
print(coords(x))
}
return(invisible(NULL))
}
## ........... 'yardstick' to display scale information ................
yardstick <- function(x0, y0, x1, y1, txt=NULL, ...) {
nomore <- missing(y0) && missing(x1) && missing(y1)
if(is.ppp(x0) && nomore) {
if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points")
X <- x0
} else if(is.psp(x0) && nomore) {
if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment")
X <- endpoints.psp(x0)
} else {
xx <- c(x0, x1)
yy <- c(y0, y1)
B <- boundingbox(list(x=xx, y=yy))
X <- ppp(xx, yy, window=B, check=FALSE)
}
Window(X) <- boundingbox(X)
Y <- diagramobj(X, txt=txt, otherargs=list(...))
class(Y) <- c("yardstick", class(Y))
return(Y)
}
plot.yardstick <- local({
mysegments <- function(x0, y0, x1, y1, ..., moreargs=list()) {
## ignore unrecognised arguments without whingeing
do.call.matched(segments,
resolve.defaults(list(x0=x0, y0=y0, x1=x1, y1=y1),
list(...),
moreargs),
extrargs=c("col", "lty", "lwd", "xpd", "lend"))
}
myarrows <- function(x0, y0, x1, y1, ...,
left=TRUE, right=TRUE,
angle=20, frac=0.25,
main, show.all, add) {
mysegments(x0, y0, x1, y1, ...)
if(left || right) {
ang <- angle * pi/180
co <- cos(ang)
si <- sin(ang)
dx <- x1-x0
dy <- y1-y0
le <- sqrt(dx^2 + dy^2)
rot <- matrix(c(dx, dy, -dy, dx)/le, 2, 2)
arlen <- frac * le
up <- arlen * (rot %*% c(co, si))
lo <- arlen * (rot %*% c(co, -si))
if(left) {
mysegments(x0, y0, x0+up[1], y0+up[2], ...)
mysegments(x0, y0, x0+lo[1], y0+lo[2], ...)
}
if(right) {
mysegments(x1, y1, x1-up[1], y1-up[2], ...)
mysegments(x1, y1, x1-lo[1], y1-lo[2], ...)
}
}
return(invisible(NULL))
}
plot.yardstick <- function(x, ...,
angle=20,
frac=1/8,
split=FALSE,
shrink=1/4,
pos=NULL,
txt.args=list(),
txt.shift=c(0,0),
do.plot=TRUE) {
if(do.plot) {
txt <- attr(x, "txt")
argh <- resolve.defaults(list(...), attr(x, "otherargs"))
A <- as.numeric(coords(x)[1,])
B <- as.numeric(coords(x)[2,])
M <- (A+B)/2
if(!split) {
## double-headed arrow
myarrows(A[1], A[2], B[1], y1=B[2],
angle=angle, frac=frac, moreargs=argh)
if(is.null(pos) && !("adj" %in% names(txt.args)))
pos <- if(abs(A[1] - B[1]) < abs(A[2] - B[2])) 4 else 3
} else {
## two single-headed arrows with text
dM <- (shrink/2) * (B - A)
AM <- M - dM
BM <- M + dM
newfrac <- frac/((1-shrink)/2)
myarrows(AM[1], AM[2], A[1], A[2],
angle=angle, frac=newfrac, left=FALSE, moreargs=argh)
myarrows(BM[1], BM[2], B[1], B[2],
angle=angle, frac=newfrac, left=FALSE, moreargs=argh)
}
if(is.null(txt.shift)) txt.shift <- rep(0, 2) else
txt.shift <- ensure2vector(unlist(txt.shift))
do.call.matched(text.default,
resolve.defaults(list(x=M[1] + txt.shift[1],
y=M[2] + txt.shift[2]),
txt.args,
list(labels=txt, pos=pos),
argh,
.MatchNull=FALSE),
extrargs=c("srt", "family", "xpd"))
}
return(invisible(Window(x)))
}
plot.yardstick
})
print.yardstick <- function(x, ...) {
splat("Yardstick")
if(!is.null(txt <- attr(x, "txt")))
splat("Text:", txt)
ui <- summary(unitname(x))
splat("Length:", pairdist(x)[1,2], ui$plural, ui$explain)
splat("Midpoint:",
paren(paste(signif(c(mean(x$x), mean(x$y)), 3), collapse=", ")))
dx <- diff(range(x$x))
dy <- diff(range(x$y))
orient <- if(dx == 0) "vertical" else
if(dy == 0) "horizontal" else
paste(atan2(dy, dx) * 180/pi, "degrees")
splat("Orientation:", orient)
return(invisible(NULL))
}
## code to draw a decent-looking arrow in spatstat diagrams
## (works in layered objects)
## The name 'onearrow' is used because R contains
## hidden functions [.arrow, length.arrow
onearrow <- function(x0, y0, x1, y1, txt=NULL, ...) {
nomore <- missing(y0) && missing(x1) && missing(y1)
if(is.ppp(x0) && nomore) {
if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points")
X <- x0
} else if(is.psp(x0) && nomore) {
if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment")
X <- endpoints.psp(x0)
} else {
xx <- c(x0, x1)
yy <- c(y0, y1)
B <- boundingbox(list(x=xx, y=yy))
X <- ppp(xx, yy, window=B, check=FALSE)
}
Window(X) <- boundingbox(X)
Y <- diagramobj(X, txt=txt, otherargs=list(...))
class(Y) <- c("onearrow", class(Y))
return(Y)
}
print.onearrow <- function(x, ...) {
cat("Single arrow", fill=TRUE)
if(!is.null(txt <- attr(x, "txt")))
cat("Text:", txt, fill=TRUE)
NextMethod("print")
}
plot.onearrow <- function(x, ...,
add=FALSE,
main="",
retract=0.05,
headfraction=0.25,
headangle=12, # degrees
headnick=0.1, # fraction of head length
col.head=NA,
lwd.head=lwd,
lwd=1,
col=1,
zap=FALSE,
zapfraction=0.07,
pch=1, cex=1,
do.plot=TRUE,
do.points=FALSE,
show.all=!add) {
result <- plot.ppp(x, main=main, add=add,
pch=pch, cex=cex,
do.plot=do.plot && do.points,
show.all=show.all)
if(do.plot) {
if(!do.points && !add)
plot(Frame(x), main="", type="n")
txt <- attr(x, "txt")
argh <- resolve.defaults(list(...), attr(x, "otherargs"))
A <- as.numeric(coords(x)[1,])
B <- as.numeric(coords(x)[2,])
V <- B - A
AR <- A + retract * V
BR <- B - retract * V
H <- B - headfraction * V
HN <- H + headnick * headfraction * V
headlength <- headfraction * sqrt(sum(V^2))
halfwidth <- headlength * tan((headangle/2) * pi/180)
alpha <- atan2(V[2], V[1]) + pi/2
U <- c(cos(alpha), sin(alpha))
HL <- H + halfwidth * U
HR <- H - halfwidth * U
Head <- rbind(HN, HL, BR, HR, HN)
if(!is.na(col.head))
do.call.matched(polygon,
resolve.defaults(list(x=Head),
argh,
list(col=col.head, lwd=lwd.head)))
if(!zap) {
Tail <- AR
} else {
M <- (AR+HN)/2
dM <- (zapfraction/2) * (1-headfraction) * V
dM <- dM + c(-dM[2], dM[1])
ML <- M + dM
MR <- M - dM
Tail <- rbind(AR, ML, MR)
}
do.call.matched(lines,
resolve.defaults(list(x=rbind(Tail, Head)),
argh,
list(col=col, lwd=lwd)),
extrargs=c("col", "lwd", "lty", "xpd", "lend"))
if(!is.null(txt <- attr(x, "txt"))) {
H <- (A+B)/2
do.call.matched(text.default,
resolve.defaults(
list(x=H[1], y=H[2]),
argh,
list(labels=txt, pos=3 + (V[2] != 0))),
extrargs=c("srt", "family", "xpd"))
}
}
return(invisible(result))
}
|
/R/diagram.R
|
no_license
|
jmetz/spatstat
|
R
| false | false | 10,362 |
r
|
##
## diagram.R
##
## Simple objects for the elements of a diagram (text, arrows etc)
## that are compatible with plot.layered and plot.listof
##
## $Revision: 1.10 $ $Date: 2015/02/17 03:45:04 $
# ......... internal class 'diagramobj' supports other classes .........
diagramobj <- function(X, ...) {
if(inherits(try(Frame(X), silent=TRUE), "try-error"))
stop("X is not a spatial object")
a <- list(...)
if(sum(nzchar(names(a))) != length(a))
stop("All extra arguments must be named")
attributes(X) <- append(attributes(X), a)
class(X) <- c("diagramobj", class(X))
return(X)
}
"[.diagramobj" <- function(x, ...) {
y <- NextMethod("[")
attributes(y) <- attributes(x)
return(y)
}
shift.diagramobj <- function(X, ...) {
y <- NextMethod("shift")
attributes(y) <- attributes(X)
return(y)
}
scalardilate.diagramobj <- function(X, f, ...) {
y <- NextMethod("scalardilate")
attributes(y) <- attributes(X)
return(y)
}
# .............. user-accessible classes ................
# ......... (these only need a creator and a plot method) ......
## ........... text .................
textstring <- function(x, y, txt=NULL, ...) {
if(is.ppp(x) && missing(y)) {
X <- x
Window(X) <- boundingbox(x)
} else {
if(missing(y) && checkfields(x, c("x", "y"))) {
y <- x$y
x <- x$x
stopifnot(length(x) == length(y))
}
X <- ppp(x, y, window=owin(range(x),range(y)))
}
marks(X) <- txt
Y <- diagramobj(X, otherargs=list(...))
class(Y) <- c("textstring", class(Y))
return(Y)
}
plot.textstring <- function(x, ..., do.plot=TRUE) {
txt <- marks(x)
otha <- attr(x, "otherargs")
if(do.plot) do.call.matched(text.default,
resolve.defaults(list(...),
list(x=x$x, y=x$y, labels=txt),
otha),
extrargs=c("srt", "family", "xpd"))
return(invisible(Frame(x)))
}
print.textstring <- function(x, ...) {
splat("Text string object")
txt <- marks(x)
if(npoints(x) == 1) {
splat("Text:", dQuote(txt))
splat("Coordinates:", paren(paste(as.vector(coords(x)), collapse=", ")))
} else {
splat("Text:")
print(txt)
splat("Coordinates:")
print(coords(x))
}
return(invisible(NULL))
}
## ........... 'yardstick' to display scale information ................
yardstick <- function(x0, y0, x1, y1, txt=NULL, ...) {
nomore <- missing(y0) && missing(x1) && missing(y1)
if(is.ppp(x0) && nomore) {
if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points")
X <- x0
} else if(is.psp(x0) && nomore) {
if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment")
X <- endpoints.psp(x0)
} else {
xx <- c(x0, x1)
yy <- c(y0, y1)
B <- boundingbox(list(x=xx, y=yy))
X <- ppp(xx, yy, window=B, check=FALSE)
}
Window(X) <- boundingbox(X)
Y <- diagramobj(X, txt=txt, otherargs=list(...))
class(Y) <- c("yardstick", class(Y))
return(Y)
}
plot.yardstick <- local({
mysegments <- function(x0, y0, x1, y1, ..., moreargs=list()) {
## ignore unrecognised arguments without whingeing
do.call.matched(segments,
resolve.defaults(list(x0=x0, y0=y0, x1=x1, y1=y1),
list(...),
moreargs),
extrargs=c("col", "lty", "lwd", "xpd", "lend"))
}
myarrows <- function(x0, y0, x1, y1, ...,
left=TRUE, right=TRUE,
angle=20, frac=0.25,
main, show.all, add) {
mysegments(x0, y0, x1, y1, ...)
if(left || right) {
ang <- angle * pi/180
co <- cos(ang)
si <- sin(ang)
dx <- x1-x0
dy <- y1-y0
le <- sqrt(dx^2 + dy^2)
rot <- matrix(c(dx, dy, -dy, dx)/le, 2, 2)
arlen <- frac * le
up <- arlen * (rot %*% c(co, si))
lo <- arlen * (rot %*% c(co, -si))
if(left) {
mysegments(x0, y0, x0+up[1], y0+up[2], ...)
mysegments(x0, y0, x0+lo[1], y0+lo[2], ...)
}
if(right) {
mysegments(x1, y1, x1-up[1], y1-up[2], ...)
mysegments(x1, y1, x1-lo[1], y1-lo[2], ...)
}
}
return(invisible(NULL))
}
plot.yardstick <- function(x, ...,
angle=20,
frac=1/8,
split=FALSE,
shrink=1/4,
pos=NULL,
txt.args=list(),
txt.shift=c(0,0),
do.plot=TRUE) {
if(do.plot) {
txt <- attr(x, "txt")
argh <- resolve.defaults(list(...), attr(x, "otherargs"))
A <- as.numeric(coords(x)[1,])
B <- as.numeric(coords(x)[2,])
M <- (A+B)/2
if(!split) {
## double-headed arrow
myarrows(A[1], A[2], B[1], y1=B[2],
angle=angle, frac=frac, moreargs=argh)
if(is.null(pos) && !("adj" %in% names(txt.args)))
pos <- if(abs(A[1] - B[1]) < abs(A[2] - B[2])) 4 else 3
} else {
## two single-headed arrows with text
dM <- (shrink/2) * (B - A)
AM <- M - dM
BM <- M + dM
newfrac <- frac/((1-shrink)/2)
myarrows(AM[1], AM[2], A[1], A[2],
angle=angle, frac=newfrac, left=FALSE, moreargs=argh)
myarrows(BM[1], BM[2], B[1], B[2],
angle=angle, frac=newfrac, left=FALSE, moreargs=argh)
}
if(is.null(txt.shift)) txt.shift <- rep(0, 2) else
txt.shift <- ensure2vector(unlist(txt.shift))
do.call.matched(text.default,
resolve.defaults(list(x=M[1] + txt.shift[1],
y=M[2] + txt.shift[2]),
txt.args,
list(labels=txt, pos=pos),
argh,
.MatchNull=FALSE),
extrargs=c("srt", "family", "xpd"))
}
return(invisible(Window(x)))
}
plot.yardstick
})
print.yardstick <- function(x, ...) {
splat("Yardstick")
if(!is.null(txt <- attr(x, "txt")))
splat("Text:", txt)
ui <- summary(unitname(x))
splat("Length:", pairdist(x)[1,2], ui$plural, ui$explain)
splat("Midpoint:",
paren(paste(signif(c(mean(x$x), mean(x$y)), 3), collapse=", ")))
dx <- diff(range(x$x))
dy <- diff(range(x$y))
orient <- if(dx == 0) "vertical" else
if(dy == 0) "horizontal" else
paste(atan2(dy, dx) * 180/pi, "degrees")
splat("Orientation:", orient)
return(invisible(NULL))
}
## code to draw a decent-looking arrow in spatstat diagrams
## (works in layered objects)
## The name 'onearrow' is used because R contains
## hidden functions [.arrow, length.arrow
onearrow <- function(x0, y0, x1, y1, txt=NULL, ...) {
nomore <- missing(y0) && missing(x1) && missing(y1)
if(is.ppp(x0) && nomore) {
if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points")
X <- x0
} else if(is.psp(x0) && nomore) {
if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment")
X <- endpoints.psp(x0)
} else {
xx <- c(x0, x1)
yy <- c(y0, y1)
B <- boundingbox(list(x=xx, y=yy))
X <- ppp(xx, yy, window=B, check=FALSE)
}
Window(X) <- boundingbox(X)
Y <- diagramobj(X, txt=txt, otherargs=list(...))
class(Y) <- c("onearrow", class(Y))
return(Y)
}
print.onearrow <- function(x, ...) {
cat("Single arrow", fill=TRUE)
if(!is.null(txt <- attr(x, "txt")))
cat("Text:", txt, fill=TRUE)
NextMethod("print")
}
plot.onearrow <- function(x, ...,
add=FALSE,
main="",
retract=0.05,
headfraction=0.25,
headangle=12, # degrees
headnick=0.1, # fraction of head length
col.head=NA,
lwd.head=lwd,
lwd=1,
col=1,
zap=FALSE,
zapfraction=0.07,
pch=1, cex=1,
do.plot=TRUE,
do.points=FALSE,
show.all=!add) {
result <- plot.ppp(x, main=main, add=add,
pch=pch, cex=cex,
do.plot=do.plot && do.points,
show.all=show.all)
if(do.plot) {
if(!do.points && !add)
plot(Frame(x), main="", type="n")
txt <- attr(x, "txt")
argh <- resolve.defaults(list(...), attr(x, "otherargs"))
A <- as.numeric(coords(x)[1,])
B <- as.numeric(coords(x)[2,])
V <- B - A
AR <- A + retract * V
BR <- B - retract * V
H <- B - headfraction * V
HN <- H + headnick * headfraction * V
headlength <- headfraction * sqrt(sum(V^2))
halfwidth <- headlength * tan((headangle/2) * pi/180)
alpha <- atan2(V[2], V[1]) + pi/2
U <- c(cos(alpha), sin(alpha))
HL <- H + halfwidth * U
HR <- H - halfwidth * U
Head <- rbind(HN, HL, BR, HR, HN)
if(!is.na(col.head))
do.call.matched(polygon,
resolve.defaults(list(x=Head),
argh,
list(col=col.head, lwd=lwd.head)))
if(!zap) {
Tail <- AR
} else {
M <- (AR+HN)/2
dM <- (zapfraction/2) * (1-headfraction) * V
dM <- dM + c(-dM[2], dM[1])
ML <- M + dM
MR <- M - dM
Tail <- rbind(AR, ML, MR)
}
do.call.matched(lines,
resolve.defaults(list(x=rbind(Tail, Head)),
argh,
list(col=col, lwd=lwd)),
extrargs=c("col", "lwd", "lty", "xpd", "lend"))
if(!is.null(txt <- attr(x, "txt"))) {
H <- (A+B)/2
do.call.matched(text.default,
resolve.defaults(
list(x=H[1], y=H[2]),
argh,
list(labels=txt, pos=3 + (V[2] != 0))),
extrargs=c("srt", "family", "xpd"))
}
}
return(invisible(result))
}
|
#! /usr/local/bin/Rscript
data_merge <- read.table("clean_data/clean_data.txt", header = TRUE)
# plot of biom1 vs. biom 2
png("figs/fig2.png")
plot(y = data_merge$biom1, x = data_merge$biom2,
xlab = "Biomarker2", ylab = "Biomarker 1")
dev.off()
|
/lectures/07_make/exercise_soln/R/make_fig2.R
|
no_license
|
rossmck94/open-science-course
|
R
| false | false | 251 |
r
|
#! /usr/local/bin/Rscript
data_merge <- read.table("clean_data/clean_data.txt", header = TRUE)
# plot of biom1 vs. biom 2
png("figs/fig2.png")
plot(y = data_merge$biom1, x = data_merge$biom2,
xlab = "Biomarker2", ylab = "Biomarker 1")
dev.off()
|
#-------------------------------------------------------------------------------
# create_df_variables_desc :
# Read features.txt which contains the description of the features given in
# the test and training sets.
#
# The features contains two columns :
# - V1 : the column number of the feature in the training and test sets
# - V2 : the name of the feature
# V1 and V2 are renamed in feat_id and feat_name respectively.
#
# For this project we are only interested with the mean and std measures so
# we filter the values, keeping only features with "-mean()" or "-std()" in
# their name.
#
# The name of the resulting features is cleaned :
# - the string "()" is removed from the name
# - the name is translated to lower case
#-------------------------------------------------------------------------------
create_df_variables_desc <- function() {
tbl_df(read.table("data/external/UCI HAR Dataset/features.txt")) %>%
rename(feat_id = V1, feat_name = V2) %>%
filter(grepl("(-mean|-std)\\(\\)", feat_name)) %>%
mutate(feat_name = gsub("\\(\\)", "", feat_name)) %>%
mutate(feat_name = tolower(feat_name))
}
# create a constant with all the features
DF_FEATURES <- create_df_variables_desc()
#-------------------------------------------------------------------------------
# create_df_activity_labels :
# Read activity_labels.txt which contains the codes of the different activities
# and their corresponding labels.
#
# The features contains two columns :
# - V1 : the column number of the feature in the training and test sets
# - V2 : the name of the feature
# V1 and V2 are renamed in feat_id and feat_name respectively.
#
# For this project we are only interested with the mean and std measures so
# we filter the values, keeping only features with "-mean()" or "-std()" in
# their name.
#
# The name of the resulting features is cleaned :
# - the string "()" is removed from the name
# - the name is translated to lower case
#-------------------------------------------------------------------------------
create_df_activity_labels <- function() {
tbl_df(read.table("data/external/UCI HAR Dataset/activity_labels.txt")) %>%
rename(activity_id = V1, activity_label = V2)
}
# create a constant describing the list of activities and their label
DF_ACTIVITIES <- create_df_activity_labels()
#-------------------------------------------------------------------------------
# transform_set :
# Create a data set from three files :
# - varriable_file contains the measurements (one line per measurement)
# - activity_file contains the activity for each measurement
# - subject_file contains the subject for each measurement
#
# The varaible file is filtered : only the features contained in DF_FEATURES
# are kept.
# The activity and the subject is added to the varaible set, and the activity id
# is replaced by its label (using a merge with DF_ACTIVITIES).
#
# finally, the names of the columns are replaced by a better description.
#
# The resulting data set is returned by the function
#-------------------------------------------------------------------------------
transform_set <- function(subject_file, activity_file, variable_file) {
df_subjects <- tbl_df(read.table(subject_file)) %>%
rename(subject = V1)
df_activity <- tbl_df(read.table(activity_file)) %>%
rename(activity_id = V1)
df_var <- tbl_df(read.table(variable_file)) %>%
select(paste0("V", DF_FEATURES$feat_id)) %>%
bind_cols(df_activity, df_subjects)
df_var <- merge(df_var, DF_ACTIVITIES, by = "activity_id") %>%
rename(activity = activity_label) %>%
select(-activity_id)
names(df_var) <- c(DF_FEATURES$feat_name, "subject", "activity")
df_var
}
|
/final_work_utils.R
|
no_license
|
dbod/GetAndCleanFinalWork
|
R
| false | false | 3,776 |
r
|
#-------------------------------------------------------------------------------
# create_df_variables_desc :
# Read features.txt which contains the description of the features given in
# the test and training sets.
#
# The features contains two columns :
# - V1 : the column number of the feature in the training and test sets
# - V2 : the name of the feature
# V1 and V2 are renamed in feat_id and feat_name respectively.
#
# For this project we are only interested with the mean and std measures so
# we filter the values, keeping only features with "-mean()" or "-std()" in
# their name.
#
# The name of the resulting features is cleaned :
# - the string "()" is removed from the name
# - the name is translated to lower case
#-------------------------------------------------------------------------------
create_df_variables_desc <- function() {
tbl_df(read.table("data/external/UCI HAR Dataset/features.txt")) %>%
rename(feat_id = V1, feat_name = V2) %>%
filter(grepl("(-mean|-std)\\(\\)", feat_name)) %>%
mutate(feat_name = gsub("\\(\\)", "", feat_name)) %>%
mutate(feat_name = tolower(feat_name))
}
# create a constant with all the features
DF_FEATURES <- create_df_variables_desc()
#-------------------------------------------------------------------------------
# create_df_activity_labels :
# Read activity_labels.txt which contains the codes of the different activities
# and their corresponding labels.
#
# The features contains two columns :
# - V1 : the column number of the feature in the training and test sets
# - V2 : the name of the feature
# V1 and V2 are renamed in feat_id and feat_name respectively.
#
# For this project we are only interested with the mean and std measures so
# we filter the values, keeping only features with "-mean()" or "-std()" in
# their name.
#
# The name of the resulting features is cleaned :
# - the string "()" is removed from the name
# - the name is translated to lower case
#-------------------------------------------------------------------------------
create_df_activity_labels <- function() {
tbl_df(read.table("data/external/UCI HAR Dataset/activity_labels.txt")) %>%
rename(activity_id = V1, activity_label = V2)
}
# create a constant describing the list of activities and their label
DF_ACTIVITIES <- create_df_activity_labels()
#-------------------------------------------------------------------------------
# transform_set :
# Create a data set from three files :
# - varriable_file contains the measurements (one line per measurement)
# - activity_file contains the activity for each measurement
# - subject_file contains the subject for each measurement
#
# The varaible file is filtered : only the features contained in DF_FEATURES
# are kept.
# The activity and the subject is added to the varaible set, and the activity id
# is replaced by its label (using a merge with DF_ACTIVITIES).
#
# finally, the names of the columns are replaced by a better description.
#
# The resulting data set is returned by the function
#-------------------------------------------------------------------------------
transform_set <- function(subject_file, activity_file, variable_file) {
df_subjects <- tbl_df(read.table(subject_file)) %>%
rename(subject = V1)
df_activity <- tbl_df(read.table(activity_file)) %>%
rename(activity_id = V1)
df_var <- tbl_df(read.table(variable_file)) %>%
select(paste0("V", DF_FEATURES$feat_id)) %>%
bind_cols(df_activity, df_subjects)
df_var <- merge(df_var, DF_ACTIVITIES, by = "activity_id") %>%
rename(activity = activity_label) %>%
select(-activity_id)
names(df_var) <- c(DF_FEATURES$feat_name, "subject", "activity")
df_var
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_micaps.R
\name{read_micaps_3}
\alias{read_micaps_3}
\title{Read micaps type 3 file (general scatter point).}
\usage{
read_micaps_3(filename)
}
\arguments{
\item{filename}{: file name}
}
\value{
a list include datatime. data.frame (ID,lon,lat,alt,V1,...) and so on.
if read false, return NULL.
}
\description{
Read micaps type 3 file (general scatter point).
}
\examples{
dataV <- read_micaps_3("Z:\\\\data\\\\surface\\\\jiany_rr\\\\r20\\\\17012108.000")
}
|
/man/read_micaps_3.Rd
|
permissive
|
nmcdev/nmcMetIO
|
R
| false | true | 543 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_micaps.R
\name{read_micaps_3}
\alias{read_micaps_3}
\title{Read micaps type 3 file (general scatter point).}
\usage{
read_micaps_3(filename)
}
\arguments{
\item{filename}{: file name}
}
\value{
a list include datatime. data.frame (ID,lon,lat,alt,V1,...) and so on.
if read false, return NULL.
}
\description{
Read micaps type 3 file (general scatter point).
}
\examples{
dataV <- read_micaps_3("Z:\\\\data\\\\surface\\\\jiany_rr\\\\r20\\\\17012108.000")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trace_coverage.R
\name{trace_coverage}
\alias{trace_coverage}
\alias{trace_coverage.eventlog}
\alias{trace_coverage.grouped_eventlog}
\title{Metric: Trace coverage}
\usage{
trace_coverage(eventlog, level, append, ...)
\method{trace_coverage}{eventlog}(eventlog, level = c("log", "trace", "case"),
append = F, ...)
\method{trace_coverage}{grouped_eventlog}(eventlog, level = c("log", "trace",
"case"), append = F, ...)
}
\arguments{
\item{eventlog}{The dataset to be used. Should be a (grouped) eventlog object.
\code{eventlog}.}
\item{level}{Level of granularity for the analysis: log, case, activity, resource or resource-activity.
For more information, see \code{vignette("metrics", "edeaR")}}
\item{append}{Logical, indicating whether to append results to original event log. Ignored when level is log or trace.}
\item{...}{Deprecated arguments}
}
\description{
Analyses the structuredness of an event log by use of trace frequencies. Applicable at logn case and trace level
}
\details{
\itemize{
\item Trace: The absolute and relative frequency of each trace is returned
\item Case: for each case, the coverage of the corresponding trace is returned
\item Log: Summary statistics of the coverage of traces is returned.
}
}
\section{Methods (by class)}{
\itemize{
\item \code{eventlog}: Trace coverage metric for eventlog
\item \code{grouped_eventlog}: Trace coverage metric for grouped eventlog
}}
\references{
Swennen, M. (2018). Using Event Log Knowledge to Support Operational Exellence Techniques (Doctoral dissertation). Hasselt University.
}
|
/man/trace_coverage.Rd
|
no_license
|
heoa/edeaR
|
R
| false | true | 1,644 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trace_coverage.R
\name{trace_coverage}
\alias{trace_coverage}
\alias{trace_coverage.eventlog}
\alias{trace_coverage.grouped_eventlog}
\title{Metric: Trace coverage}
\usage{
trace_coverage(eventlog, level, append, ...)
\method{trace_coverage}{eventlog}(eventlog, level = c("log", "trace", "case"),
append = F, ...)
\method{trace_coverage}{grouped_eventlog}(eventlog, level = c("log", "trace",
"case"), append = F, ...)
}
\arguments{
\item{eventlog}{The dataset to be used. Should be a (grouped) eventlog object.
\code{eventlog}.}
\item{level}{Level of granularity for the analysis: log, case, activity, resource or resource-activity.
For more information, see \code{vignette("metrics", "edeaR")}}
\item{append}{Logical, indicating whether to append results to original event log. Ignored when level is log or trace.}
\item{...}{Deprecated arguments}
}
\description{
Analyses the structuredness of an event log by use of trace frequencies. Applicable at logn case and trace level
}
\details{
\itemize{
\item Trace: The absolute and relative frequency of each trace is returned
\item Case: for each case, the coverage of the corresponding trace is returned
\item Log: Summary statistics of the coverage of traces is returned.
}
}
\section{Methods (by class)}{
\itemize{
\item \code{eventlog}: Trace coverage metric for eventlog
\item \code{grouped_eventlog}: Trace coverage metric for grouped eventlog
}}
\references{
Swennen, M. (2018). Using Event Log Knowledge to Support Operational Exellence Techniques (Doctoral dissertation). Hasselt University.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{prompt_file}
\alias{prompt_file}
\title{Opens a file selection dialog box}
\usage{
prompt_file(
default = "",
caption = "Select files",
multi = TRUE,
filters = NULL,
index = 1
)
}
\value{
The selected file, or NULL if cancelled
}
\description{
Opens a file selection dialog box
}
|
/man/prompt_file.Rd
|
permissive
|
Cyclic3/c3.263
|
R
| false | true | 381 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{prompt_file}
\alias{prompt_file}
\title{Opens a file selection dialog box}
\usage{
prompt_file(
default = "",
caption = "Select files",
multi = TRUE,
filters = NULL,
index = 1
)
}
\value{
The selected file, or NULL if cancelled
}
\description{
Opens a file selection dialog box
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bulma-menu.R
\name{bulmaMenu}
\alias{bulmaMenu}
\title{bulma Menu}
\usage{
bulmaMenu(...)
}
\arguments{
\item{...}{Slot for bulmaMenuItem and bulmaMenuLabel.}
}
\description{
Create a menu \url{https://bulma.io/documentation/components/menu/}.
}
\note{
You can include as many bulmaMenuLabel as you want. It is better to wrap a bulmaMenu in a bulmaContainer
as well as bulmaColumns (see example).
}
\examples{
if(interactive()){
library(shiny)
shinyApp(
ui = bulmaPage(
bulmaTitle("Hello Bulma"),
bulmaContainer(
bulmaColumns(
bulmaColumn(
width = 4,
bulmaMenu(
# section 1
bulmaMenuLabel(menu_label = "Title 1", target = "title1"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 1"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 2"),
# section 2
bulmaMenuLabel(menu_label = "Title 2", target = "title5"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 1"),
bulmaMenuItem(
active = TRUE,
item_label = "Subtitle 2",
bulmaMenuSubItem(subitem_label = "Subsubtitle 1"),
bulmaMenuSubItem(subitem_label = "Subsubtitle 2"),
bulmaMenuSubItem(subitem_label = "Subsubtitle 3")
),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 3"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 4"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 5"),
# section 3
bulmaMenuLabel(menu_label = "Title 3", target = "title10"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 6"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 7"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 8")
)
),
bulmaColumn(
width = 8,
bulmaTitle("Only title 1, title 2 and title 3 have html anchors."),
bulmaRadioInput(
"select",
c("Miles per galon" = "mpg", "Rear axle ratio" = "drat"),
selected = "mpg"
),
lapply(1:10, FUN = function(i) {
list(
bulmaTitle(paste0("Title", i), id = paste0("title", i)),
plotOutput(paste0("plot", i))
)
})
)
)
)
),
server = function(input, output) {
lapply(1:10, FUN = function(i) {
output[[paste0("plot", i)]] <- renderPlot(
plot(1:nrow(mtcars), mtcars[[input$select]])
)
})
}
)
}
}
\author{
David Granjon, \email{dgranjon@ymail.com}
}
|
/man/bulmaMenu.Rd
|
no_license
|
PaulC91/shinybulma
|
R
| false | true | 2,868 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bulma-menu.R
\name{bulmaMenu}
\alias{bulmaMenu}
\title{bulma Menu}
\usage{
bulmaMenu(...)
}
\arguments{
\item{...}{Slot for bulmaMenuItem and bulmaMenuLabel.}
}
\description{
Create a menu \url{https://bulma.io/documentation/components/menu/}.
}
\note{
You can include as many bulmaMenuLabel as you want. It is better to wrap a bulmaMenu in a bulmaContainer
as well as bulmaColumns (see example).
}
\examples{
if(interactive()){
library(shiny)
shinyApp(
ui = bulmaPage(
bulmaTitle("Hello Bulma"),
bulmaContainer(
bulmaColumns(
bulmaColumn(
width = 4,
bulmaMenu(
# section 1
bulmaMenuLabel(menu_label = "Title 1", target = "title1"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 1"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 2"),
# section 2
bulmaMenuLabel(menu_label = "Title 2", target = "title5"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 1"),
bulmaMenuItem(
active = TRUE,
item_label = "Subtitle 2",
bulmaMenuSubItem(subitem_label = "Subsubtitle 1"),
bulmaMenuSubItem(subitem_label = "Subsubtitle 2"),
bulmaMenuSubItem(subitem_label = "Subsubtitle 3")
),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 3"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 4"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 5"),
# section 3
bulmaMenuLabel(menu_label = "Title 3", target = "title10"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 6"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 7"),
bulmaMenuItem(active = FALSE, item_label = "Subtitle 8")
)
),
bulmaColumn(
width = 8,
bulmaTitle("Only title 1, title 2 and title 3 have html anchors."),
bulmaRadioInput(
"select",
c("Miles per galon" = "mpg", "Rear axle ratio" = "drat"),
selected = "mpg"
),
lapply(1:10, FUN = function(i) {
list(
bulmaTitle(paste0("Title", i), id = paste0("title", i)),
plotOutput(paste0("plot", i))
)
})
)
)
)
),
server = function(input, output) {
lapply(1:10, FUN = function(i) {
output[[paste0("plot", i)]] <- renderPlot(
plot(1:nrow(mtcars), mtcars[[input$select]])
)
})
}
)
}
}
\author{
David Granjon, \email{dgranjon@ymail.com}
}
|
## Main function. Create a tidy dataset.
## path variable is path to file where dataset will be saved.
get_data <- function(path = "dataset.txt") {
## Read train data
train_data <- read_data("train")
## Read test data
test_data <- read_data("test")
## Merge data
data <- rbind(train_data, test_data)
data[["Activity"]] <- name_activity(data[["Activity"]])
len <- ncol(data)
## Divide into group by activities and subjects
ans_data <- aggregate(data[1 : (len - 2)],
by = list(Activity = data[["Activity"]],
Subject = data[["Subject"]]),
FUN = mean)
## Write data
write.table(ans_data, path, row.name = FALSE, sep = ",")
}
## Read ans subsets data
## dataset should be "train" or "test" either.
read_data <- function(dataset) {
##Read data
x_data <- read.table(paste(dataset, "/X_", dataset, ".txt", sep = ""),
header = FALSE, sep = "")
y_data <- read.table(paste(dataset, "/y_", dataset, ".txt", sep = ""),
header = FALSE, sep = "")
subject_data <- read.table(paste(dataset, "/subject_", dataset, ".txt", sep = ""),
header = FALSE)
features <- read.table("features.txt", header = FALSE, sep = "")[[2]]
names(x_data) <- features
## Subset data. List of column has been made manually.
x_data <- x_data[c(1:6, 41:46, 81:86, 121:126, 161:166, 201:202, 214, 215, 227:228,
240:241, 253:254, 266:271, 294:296, 345:350, 373:375, 424:429, 452:454,
503:504, 513, 516:517, 526, 529, 530, 539, 542, 543, 552)]
data <- cbind(x_data, y_data, subject_data)
names(data) <- c(names(x_data), "Activity", "Subject")
data
}
activity_vector <- c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS",
"SITTING", "STANDING", "LAYING")
## Name activity
name_activity <- function(x) unlist(lapply(x, function(y) activity_vector[y]))
|
/run_analysis.R
|
no_license
|
pavelgein/GetData2
|
R
| false | false | 2,067 |
r
|
## Main function. Create a tidy dataset.
## path variable is path to file where dataset will be saved.
get_data <- function(path = "dataset.txt") {
## Read train data
train_data <- read_data("train")
## Read test data
test_data <- read_data("test")
## Merge data
data <- rbind(train_data, test_data)
data[["Activity"]] <- name_activity(data[["Activity"]])
len <- ncol(data)
## Divide into group by activities and subjects
ans_data <- aggregate(data[1 : (len - 2)],
by = list(Activity = data[["Activity"]],
Subject = data[["Subject"]]),
FUN = mean)
## Write data
write.table(ans_data, path, row.name = FALSE, sep = ",")
}
## Read ans subsets data
## dataset should be "train" or "test" either.
read_data <- function(dataset) {
##Read data
x_data <- read.table(paste(dataset, "/X_", dataset, ".txt", sep = ""),
header = FALSE, sep = "")
y_data <- read.table(paste(dataset, "/y_", dataset, ".txt", sep = ""),
header = FALSE, sep = "")
subject_data <- read.table(paste(dataset, "/subject_", dataset, ".txt", sep = ""),
header = FALSE)
features <- read.table("features.txt", header = FALSE, sep = "")[[2]]
names(x_data) <- features
## Subset data. List of column has been made manually.
x_data <- x_data[c(1:6, 41:46, 81:86, 121:126, 161:166, 201:202, 214, 215, 227:228,
240:241, 253:254, 266:271, 294:296, 345:350, 373:375, 424:429, 452:454,
503:504, 513, 516:517, 526, 529, 530, 539, 542, 543, 552)]
data <- cbind(x_data, y_data, subject_data)
names(data) <- c(names(x_data), "Activity", "Subject")
data
}
activity_vector <- c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS",
"SITTING", "STANDING", "LAYING")
## Name activity
name_activity <- function(x) unlist(lapply(x, function(y) activity_vector[y]))
|
stopifnot(require("testthat"),
require("selfisher"))
data(haddock)
dat <- transform(haddock, tot=nfine+nwide, prop=nwide/(nfine+nwide))
context("Very basic selfisher fitting")
m0 <- selfisher(prop~Lengths, pformula=~0, total=tot, psplit = TRUE, dat)
m1 <- selfisher(prop~Lengths, pformula=~1, total=tot, psplit = TRUE, dat)
m2 <- selfisher(prop~Lengths, total=tot, dat, psplit=TRUE)
test_that("Fixed psplit=0.5", {
expect_equal(unname(fixef(m0)$p), 0)
expect_equal(unname(fixef(m0)$r), c(-36.314353,1.233999), tol=1e-3)
})
test_that("Default is p=~1", {
expect_equal(fixef(m1), fixef(m2))
expect_equal(L50SR(m1), L50SR(m2))
expect_is(m0, "selfisher")
expect_is(m1, "selfisher")
})
test_that("AICtab is working", {
expect_equal(unname(summary(m0)$AICtab), c(119.94059, 122.29669, -57.97029, 36.02903, 34.68329, 22.00000), tol=1e-3)
})
|
/selfisher/tests/testthat/test-basics.R
|
no_license
|
mebrooks/selfisher
|
R
| false | false | 860 |
r
|
stopifnot(require("testthat"),
require("selfisher"))
data(haddock)
dat <- transform(haddock, tot=nfine+nwide, prop=nwide/(nfine+nwide))
context("Very basic selfisher fitting")
m0 <- selfisher(prop~Lengths, pformula=~0, total=tot, psplit = TRUE, dat)
m1 <- selfisher(prop~Lengths, pformula=~1, total=tot, psplit = TRUE, dat)
m2 <- selfisher(prop~Lengths, total=tot, dat, psplit=TRUE)
test_that("Fixed psplit=0.5", {
expect_equal(unname(fixef(m0)$p), 0)
expect_equal(unname(fixef(m0)$r), c(-36.314353,1.233999), tol=1e-3)
})
test_that("Default is p=~1", {
expect_equal(fixef(m1), fixef(m2))
expect_equal(L50SR(m1), L50SR(m2))
expect_is(m0, "selfisher")
expect_is(m1, "selfisher")
})
test_that("AICtab is working", {
expect_equal(unname(summary(m0)$AICtab), c(119.94059, 122.29669, -57.97029, 36.02903, 34.68329, 22.00000), tol=1e-3)
})
|
library(mazeGen)
### Name: gridTwentyLeft
### Title: Grid Twenty Left
### Aliases: gridTwentyLeft
### Keywords: datasets
### ** Examples
## Not run:
##D
##D # Returns a Grid with rank = 20
##D data(gridTwentyLeft)
##D coordinates <- gridTwentyLeft
##D
## End(Not run)
|
/data/genthat_extracted_code/mazeGen/examples/gridTwentyLeft.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 279 |
r
|
library(mazeGen)
### Name: gridTwentyLeft
### Title: Grid Twenty Left
### Aliases: gridTwentyLeft
### Keywords: datasets
### ** Examples
## Not run:
##D
##D # Returns a Grid with rank = 20
##D data(gridTwentyLeft)
##D coordinates <- gridTwentyLeft
##D
## End(Not run)
|
#!/bin/env Rscript
args <- commandArgs(trailingOnly=TRUE)
if (is.na(args[2])){
stop("inadequate number of arguments")
}
dfmfilelist <- args[1]
outpdf <- args[2]
dfmfiles <- readLines(dfmfilelist)
dfmlist <- list()
for(i in 1:length(dfmfiles)){
d <- read.table(dfmfiles[i], header = TRUE)
if (nrow(d) > 0){
tname <- gsub(".pos.*$","",basename(dfmfiles[i]))
tname <- gsub("^.*-","",tname)
d$tname <- tname
dfmlist[[i]] <- d
}
}
dfm <- do.call(rbind, dfmlist)
library(ggplot2)
if (!is.null(dfm)){
p1 <- ggplot(dfm, aes(x = ID, y = tname)) +
geom_tile(aes(fill = -log10(COND.P))) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), text = element_text(size=10)) +
scale_fill_gradient(low = "white", high = "red")
}
ggsave(outpdf, width = 20, height = 8)
|
/modules/eqtl/fusion/fusion.heatmap.drop.R
|
no_license
|
drveera/genie
|
R
| false | false | 841 |
r
|
#!/bin/env Rscript
args <- commandArgs(trailingOnly=TRUE)
if (is.na(args[2])){
stop("inadequate number of arguments")
}
dfmfilelist <- args[1]
outpdf <- args[2]
dfmfiles <- readLines(dfmfilelist)
dfmlist <- list()
for(i in 1:length(dfmfiles)){
d <- read.table(dfmfiles[i], header = TRUE)
if (nrow(d) > 0){
tname <- gsub(".pos.*$","",basename(dfmfiles[i]))
tname <- gsub("^.*-","",tname)
d$tname <- tname
dfmlist[[i]] <- d
}
}
dfm <- do.call(rbind, dfmlist)
library(ggplot2)
if (!is.null(dfm)){
p1 <- ggplot(dfm, aes(x = ID, y = tname)) +
geom_tile(aes(fill = -log10(COND.P))) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), text = element_text(size=10)) +
scale_fill_gradient(low = "white", high = "red")
}
ggsave(outpdf, width = 20, height = 8)
|
#' Quantile estimation of a composite extreme value distribution
#'
#' @param p a scalar giving the quantile of the distribution sought
#' @param loc a scalar, vector or matrix giving the location parameter
#' @param scale as above, but scale parameter
#' @param shape as above, but shape parameter
#' @param m a scalar giving the number of values per return period unit, e.g. 365 for daily data giving annual return levels
#' @param alpha a scalar, vector or matrix of weights if within-block variables not identically distributed and of different frequencies
#' @param theta a scalar, vector or matrix of extremal index values
#' @param family a character string giving the family for which return levels sought
#' @param tau a scalar, vector or matrix of values giving the threshold quantile for the GPD (i.e. 1 - probability of exceedance)
#' @param start a 2-vector giving starting values that bound the return level
#'
#' @details
#'
#' If \eqn{F} is the generalised extreme value or generalised Pareto
#' distribution, \code{qev} solves
#' \deqn{\prod_{j=1}^n \big\{F(z)\}^{m \alpha_j \theta_j} = p.}
#'
#' For both distributions, location, scale and shape parameters
#' are given by \code{loc}, \code{scale} and \code{shape}. The
#' generalised Pareto distribution, for \eqn{\xi \neq 0} and \eqn{z > u},
#' is parameterised as \eqn{1 - (1 - \tau) [1 + \xi (z - u) / \psi_u]^{-1/\xi}},
#' where \eqn{u}, \eqn{\psi_u} and \eqn{\xi} are its location, scale and shape
#' parameters, respectively, and \eqn{\tau} corresponds to argument \code{tau}.
#'
#' @examples
#'
#' qev(0.9, c(1, 2), c(1, 1.1), .1, family="gev")
#' qev(0.99, c(1, 2), c(1, 1.1), .1, family="gpd", tau=0.9)
#'
#' @return A scalar or vector of estimates of \code{p}
#'
#' @export
#'
qev <- function(p, loc, scale, shape, m=1, alpha=1, theta=1, family, tau=0, start=NULL) {
if (!(family %in% c("gev", "gpd"))) stop("Invalid family")
loc <- as.matrix(loc)
scale <- as.matrix(scale)
shape <- as.matrix(shape)
nr <- max(nrow(loc), nrow(scale), nrow(shape))
if (length(theta) == 1) theta <- rep(theta, nr)
if (length(alpha) == 1) alpha <- rep(alpha, nr)
alpha <- alpha / sum(alpha)
weights <- m * alpha * theta
nc <- max(ncol(loc), ncol(scale), ncol(shape))
loc <- matrix(loc, nr, nc)
scale <- matrix(scale, nr, nc)
shape <- matrix(shape, nr, nc)
tau <- matrix(tau, nr, nc)
theta <- matrix(theta, nr, nc)
weights <- matrix(weights, nr, nc)
out <- numeric(nc)
for (i in seq_len(nc)) {
out[i] <- .rlvec(p, loc[,i], scale[,i], shape[,i], m, nr, weights[,i], family, tau[,i], theta[,i], start)
}
out
}
.rlvec <- function(p, loc, scale, shape, m, n, weights, family, tau, theta, start=NULL) {
if (is.null(start)) {
if (family == "gev") {
start <- range(.qgev(p^(1/n), loc, scale, shape))
} else {
start <- range(.qgpd(p, loc, scale, shape, 1 - tau, theta, m))
}
dstart <- diff(start)
nullstart <- FALSE
} else {
nullstart <- TRUE
}
while(.rlroot(start[1], loc, scale, shape, tau, weights, p, TRUE, family) > 0) {
if (nullstart) stop("Invalid `start' values given")
start[1] <- start[1] - .2 * dstart
}
while(.rlroot(start[2], loc, scale, shape, tau, weights, p, TRUE, family) < 0) {
if (nullstart) stop("Invalid `start' values given")
start[2] <- start[2] + .2 * dstart
}
opt <- uniroot(.rlroot, start, loc=loc, scale=scale, shape=shape, tau=tau, weights=weights, p=p, TRUE, family)
opt$root
}
.pmax <- function(x, loc, scale, shape, tau, weights, log, family) {
if (family == "gpd") {
out <- .pgpd(x, loc, scale, shape, tau, FALSE, TRUE)
} else {
out <- .pgev(x, loc, scale, shape, FALSE, TRUE)
}
out <- sum(weights * out)
if (!log) out <- exp(out)
out
}
.rlroot <- function(x, loc, scale, shape, tau, weights, p, log, family) {
if (log) p <- log(p)
.pmax(x, loc, scale, shape, tau, weights, log, family) - p
}
|
/evgam/R/retlev.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | false | 3,816 |
r
|
#' Quantile estimation of a composite extreme value distribution
#'
#' @param p a scalar giving the quantile of the distribution sought
#' @param loc a scalar, vector or matrix giving the location parameter
#' @param scale as above, but scale parameter
#' @param shape as above, but shape parameter
#' @param m a scalar giving the number of values per return period unit, e.g. 365 for daily data giving annual return levels
#' @param alpha a scalar, vector or matrix of weights if within-block variables not identically distributed and of different frequencies
#' @param theta a scalar, vector or matrix of extremal index values
#' @param family a character string giving the family for which return levels sought
#' @param tau a scalar, vector or matrix of values giving the threshold quantile for the GPD (i.e. 1 - probability of exceedance)
#' @param start a 2-vector giving starting values that bound the return level
#'
#' @details
#'
#' If \eqn{F} is the generalised extreme value or generalised Pareto
#' distribution, \code{qev} solves
#' \deqn{\prod_{j=1}^n \big\{F(z)\}^{m \alpha_j \theta_j} = p.}
#'
#' For both distributions, location, scale and shape parameters
#' are given by \code{loc}, \code{scale} and \code{shape}. The
#' generalised Pareto distribution, for \eqn{\xi \neq 0} and \eqn{z > u},
#' is parameterised as \eqn{1 - (1 - \tau) [1 + \xi (z - u) / \psi_u]^{-1/\xi}},
#' where \eqn{u}, \eqn{\psi_u} and \eqn{\xi} are its location, scale and shape
#' parameters, respectively, and \eqn{\tau} corresponds to argument \code{tau}.
#'
#' @examples
#'
#' qev(0.9, c(1, 2), c(1, 1.1), .1, family="gev")
#' qev(0.99, c(1, 2), c(1, 1.1), .1, family="gpd", tau=0.9)
#'
#' @return A scalar or vector of estimates of \code{p}
#'
#' @export
#'
qev <- function(p, loc, scale, shape, m=1, alpha=1, theta=1, family, tau=0, start=NULL) {
if (!(family %in% c("gev", "gpd"))) stop("Invalid family")
loc <- as.matrix(loc)
scale <- as.matrix(scale)
shape <- as.matrix(shape)
nr <- max(nrow(loc), nrow(scale), nrow(shape))
if (length(theta) == 1) theta <- rep(theta, nr)
if (length(alpha) == 1) alpha <- rep(alpha, nr)
alpha <- alpha / sum(alpha)
weights <- m * alpha * theta
nc <- max(ncol(loc), ncol(scale), ncol(shape))
loc <- matrix(loc, nr, nc)
scale <- matrix(scale, nr, nc)
shape <- matrix(shape, nr, nc)
tau <- matrix(tau, nr, nc)
theta <- matrix(theta, nr, nc)
weights <- matrix(weights, nr, nc)
out <- numeric(nc)
for (i in seq_len(nc)) {
out[i] <- .rlvec(p, loc[,i], scale[,i], shape[,i], m, nr, weights[,i], family, tau[,i], theta[,i], start)
}
out
}
.rlvec <- function(p, loc, scale, shape, m, n, weights, family, tau, theta, start=NULL) {
if (is.null(start)) {
if (family == "gev") {
start <- range(.qgev(p^(1/n), loc, scale, shape))
} else {
start <- range(.qgpd(p, loc, scale, shape, 1 - tau, theta, m))
}
dstart <- diff(start)
nullstart <- FALSE
} else {
nullstart <- TRUE
}
while(.rlroot(start[1], loc, scale, shape, tau, weights, p, TRUE, family) > 0) {
if (nullstart) stop("Invalid `start' values given")
start[1] <- start[1] - .2 * dstart
}
while(.rlroot(start[2], loc, scale, shape, tau, weights, p, TRUE, family) < 0) {
if (nullstart) stop("Invalid `start' values given")
start[2] <- start[2] + .2 * dstart
}
opt <- uniroot(.rlroot, start, loc=loc, scale=scale, shape=shape, tau=tau, weights=weights, p=p, TRUE, family)
opt$root
}
.pmax <- function(x, loc, scale, shape, tau, weights, log, family) {
if (family == "gpd") {
out <- .pgpd(x, loc, scale, shape, tau, FALSE, TRUE)
} else {
out <- .pgev(x, loc, scale, shape, FALSE, TRUE)
}
out <- sum(weights * out)
if (!log) out <- exp(out)
out
}
.rlroot <- function(x, loc, scale, shape, tau, weights, p, log, family) {
if (log) p <- log(p)
.pmax(x, loc, scale, shape, tau, weights, log, family) - p
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Violations}
\alias{Violations}
\alias{ViolationCodes}
\alias{Cuisines}
\title{NYC Restaurant Health Violations}
\format{{
A data frame with 480,621 observations on the following 16 variables.
\describe{
\item{\code{camis}}{unique identifier}
\item{\code{dba}}{full name doing business as}
\item{\code{boro}}{borough of New York}
\item{\code{building}}{building name}
\item{\code{street}}{street address}
\item{\code{zipcode}}{zipcode}
\item{\code{phone}}{phone number}
\item{\code{inspection_date}}{inspection date}
\item{\code{action}}{action taken}
\item{\code{violation_code}}{violation code, see \code{\link{ViolationCodes}}}
\item{\code{score}}{inspection score}
\item{\code{grade}}{inspection grade}
\item{\code{grade_date}}{grade date}
\item{\code{record_date}}{recording date}
\item{\code{inspection_type}}{inspect type}
\item{\code{cuisine_code}}{cuisine code, see \code{\link{Cuisines}}}
}
}}
\source{
NYC Open Data, \url{https://data.cityofnewyork.us/Health/DOHMH-New-York-City-Restaurant-Inspection-Results/xx67-kt59}
}
\usage{
Violations
ViolationCodes
Cuisines
}
\description{
NYC Restaurant Health Violations
}
\examples{
data(Violations)
Violations \%>\%
inner_join(Cuisines, by = "cuisine_code") \%>\%
filter(cuisine_description == "American") \%>\%
arrange(grade_date) \%>\%
head()
}
\seealso{
\code{\link{ViolationCodes}}, \code{\link{Cuisines}}
}
\keyword{datasets}
|
/man/Violations.Rd
|
no_license
|
pathebah/mdsr
|
R
| false | true | 1,519 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Violations}
\alias{Violations}
\alias{ViolationCodes}
\alias{Cuisines}
\title{NYC Restaurant Health Violations}
\format{{
A data frame with 480,621 observations on the following 16 variables.
\describe{
\item{\code{camis}}{unique identifier}
\item{\code{dba}}{full name doing business as}
\item{\code{boro}}{borough of New York}
\item{\code{building}}{building name}
\item{\code{street}}{street address}
\item{\code{zipcode}}{zipcode}
\item{\code{phone}}{phone number}
\item{\code{inspection_date}}{inspection date}
\item{\code{action}}{action taken}
\item{\code{violation_code}}{violation code, see \code{\link{ViolationCodes}}}
\item{\code{score}}{inspection score}
\item{\code{grade}}{inspection grade}
\item{\code{grade_date}}{grade date}
\item{\code{record_date}}{recording date}
\item{\code{inspection_type}}{inspect type}
\item{\code{cuisine_code}}{cuisine code, see \code{\link{Cuisines}}}
}
}}
\source{
NYC Open Data, \url{https://data.cityofnewyork.us/Health/DOHMH-New-York-City-Restaurant-Inspection-Results/xx67-kt59}
}
\usage{
Violations
ViolationCodes
Cuisines
}
\description{
NYC Restaurant Health Violations
}
\examples{
data(Violations)
Violations \%>\%
inner_join(Cuisines, by = "cuisine_code") \%>\%
filter(cuisine_description == "American") \%>\%
arrange(grade_date) \%>\%
head()
}
\seealso{
\code{\link{ViolationCodes}}, \code{\link{Cuisines}}
}
\keyword{datasets}
|
local({
# the requested version of renv
version <- "0.17.0"
# the project directory
project <- getwd()
# figure out whether the autoloader is enabled
enabled <- local({
# first, check config option
override <- getOption("renv.config.autoloader.enabled")
if (!is.null(override))
return(override)
# next, check environment variables
# TODO: prefer using the configuration one in the future
envvars <- c(
"RENV_CONFIG_AUTOLOADER_ENABLED",
"RENV_AUTOLOADER_ENABLED",
"RENV_ACTIVATE_PROJECT"
)
for (envvar in envvars) {
envval <- Sys.getenv(envvar, unset = NA)
if (!is.na(envval))
return(tolower(envval) %in% c("true", "t", "1"))
}
# enable by default
TRUE
})
if (!enabled)
return(FALSE)
# avoid recursion
if (identical(getOption("renv.autoloader.running"), TRUE)) {
warning("ignoring recursive attempt to run renv autoloader")
return(invisible(TRUE))
}
# signal that we're loading renv during R startup
options(renv.autoloader.running = TRUE)
on.exit(options(renv.autoloader.running = NULL), add = TRUE)
# signal that we've consented to use renv
options(renv.consent = TRUE)
# load the 'utils' package eagerly -- this ensures that renv shims, which
# mask 'utils' packages, will come first on the search path
library(utils, lib.loc = .Library)
# unload renv if it's already been loaded
if ("renv" %in% loadedNamespaces())
unloadNamespace("renv")
# load bootstrap tools
`%||%` <- function(x, y) {
if (is.environment(x) || length(x)) x else y
}
bootstrap <- function(version, library) {
# attempt to download renv
tarball <- tryCatch(renv_bootstrap_download(version), error = identity)
if (inherits(tarball, "error"))
stop("failed to download renv ", version)
# now attempt to install
status <- tryCatch(renv_bootstrap_install(version, tarball, library), error = identity)
if (inherits(status, "error"))
stop("failed to install renv ", version)
}
renv_bootstrap_tests_running <- function() {
getOption("renv.tests.running", default = FALSE)
}
renv_bootstrap_repos <- function() {
# check for repos override
repos <- Sys.getenv("RENV_CONFIG_REPOS_OVERRIDE", unset = NA)
if (!is.na(repos))
return(repos)
# check for lockfile repositories
repos <- tryCatch(renv_bootstrap_repos_lockfile(), error = identity)
if (!inherits(repos, "error") && length(repos))
return(repos)
# if we're testing, re-use the test repositories
if (renv_bootstrap_tests_running()) {
repos <- getOption("renv.tests.repos")
if (!is.null(repos))
return(repos)
}
# retrieve current repos
repos <- getOption("repos")
# ensure @CRAN@ entries are resolved
repos[repos == "@CRAN@"] <- getOption(
"renv.repos.cran",
"https://cloud.r-project.org"
)
# add in renv.bootstrap.repos if set
default <- c(FALLBACK = "https://cloud.r-project.org")
extra <- getOption("renv.bootstrap.repos", default = default)
repos <- c(repos, extra)
# remove duplicates that might've snuck in
dupes <- duplicated(repos) | duplicated(names(repos))
repos[!dupes]
}
renv_bootstrap_repos_lockfile <- function() {
lockpath <- Sys.getenv("RENV_PATHS_LOCKFILE", unset = "renv.lock")
if (!file.exists(lockpath))
return(NULL)
lockfile <- tryCatch(renv_json_read(lockpath), error = identity)
if (inherits(lockfile, "error")) {
warning(lockfile)
return(NULL)
}
repos <- lockfile$R$Repositories
if (length(repos) == 0)
return(NULL)
keys <- vapply(repos, `[[`, "Name", FUN.VALUE = character(1))
vals <- vapply(repos, `[[`, "URL", FUN.VALUE = character(1))
names(vals) <- keys
return(vals)
}
renv_bootstrap_download <- function(version) {
# if the renv version number has 4 components, assume it must
# be retrieved via github
nv <- numeric_version(version)
components <- unclass(nv)[[1]]
# if this appears to be a development version of 'renv', we'll
# try to restore from github
dev <- length(components) == 4L
# begin collecting different methods for finding renv
methods <- c(
renv_bootstrap_download_tarball,
if (dev)
renv_bootstrap_download_github
else c(
renv_bootstrap_download_cran_latest,
renv_bootstrap_download_cran_archive
)
)
for (method in methods) {
path <- tryCatch(method(version), error = identity)
if (is.character(path) && file.exists(path))
return(path)
}
stop("failed to download renv ", version)
}
renv_bootstrap_download_impl <- function(url, destfile) {
mode <- "wb"
# https://bugs.r-project.org/bugzilla/show_bug.cgi?id=17715
fixup <-
Sys.info()[["sysname"]] == "Windows" &&
substring(url, 1L, 5L) == "file:"
if (fixup)
mode <- "w+b"
args <- list(
url = url,
destfile = destfile,
mode = mode,
quiet = TRUE
)
if ("headers" %in% names(formals(utils::download.file)))
args$headers <- renv_bootstrap_download_custom_headers(url)
do.call(utils::download.file, args)
}
renv_bootstrap_download_custom_headers <- function(url) {
headers <- getOption("renv.download.headers")
if (is.null(headers))
return(character())
if (!is.function(headers))
stopf("'renv.download.headers' is not a function")
headers <- headers(url)
if (length(headers) == 0L)
return(character())
if (is.list(headers))
headers <- unlist(headers, recursive = FALSE, use.names = TRUE)
ok <-
is.character(headers) &&
is.character(names(headers)) &&
all(nzchar(names(headers)))
if (!ok)
stop("invocation of 'renv.download.headers' did not return a named character vector")
headers
}
renv_bootstrap_download_cran_latest <- function(version) {
spec <- renv_bootstrap_download_cran_latest_find(version)
type <- spec$type
repos <- spec$repos
message("* Downloading renv ", version, " ... ", appendLF = FALSE)
baseurl <- utils::contrib.url(repos = repos, type = type)
ext <- if (identical(type, "source"))
".tar.gz"
else if (Sys.info()[["sysname"]] == "Windows")
".zip"
else
".tgz"
name <- sprintf("renv_%s%s", version, ext)
url <- paste(baseurl, name, sep = "/")
destfile <- file.path(tempdir(), name)
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (inherits(status, "condition")) {
message("FAILED")
return(FALSE)
}
# report success and return
message("OK (downloaded ", type, ")")
destfile
}
renv_bootstrap_download_cran_latest_find <- function(version) {
# check whether binaries are supported on this system
binary <-
getOption("renv.bootstrap.binary", default = TRUE) &&
!identical(.Platform$pkgType, "source") &&
!identical(getOption("pkgType"), "source") &&
Sys.info()[["sysname"]] %in% c("Darwin", "Windows")
types <- c(if (binary) "binary", "source")
# iterate over types + repositories
for (type in types) {
for (repos in renv_bootstrap_repos()) {
# retrieve package database
db <- tryCatch(
as.data.frame(
utils::available.packages(type = type, repos = repos),
stringsAsFactors = FALSE
),
error = identity
)
if (inherits(db, "error"))
next
# check for compatible entry
entry <- db[db$Package %in% "renv" & db$Version %in% version, ]
if (nrow(entry) == 0)
next
# found it; return spec to caller
spec <- list(entry = entry, type = type, repos = repos)
return(spec)
}
}
# if we got here, we failed to find renv
fmt <- "renv %s is not available from your declared package repositories"
stop(sprintf(fmt, version))
}
renv_bootstrap_download_cran_archive <- function(version) {
name <- sprintf("renv_%s.tar.gz", version)
repos <- renv_bootstrap_repos()
urls <- file.path(repos, "src/contrib/Archive/renv", name)
destfile <- file.path(tempdir(), name)
message("* Downloading renv ", version, " ... ", appendLF = FALSE)
for (url in urls) {
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (identical(status, 0L)) {
message("OK")
return(destfile)
}
}
message("FAILED")
return(FALSE)
}
renv_bootstrap_download_tarball <- function(version) {
# if the user has provided the path to a tarball via
# an environment variable, then use it
tarball <- Sys.getenv("RENV_BOOTSTRAP_TARBALL", unset = NA)
if (is.na(tarball))
return()
# allow directories
if (dir.exists(tarball)) {
name <- sprintf("renv_%s.tar.gz", version)
tarball <- file.path(tarball, name)
}
# bail if it doesn't exist
if (!file.exists(tarball)) {
# let the user know we weren't able to honour their request
fmt <- "* RENV_BOOTSTRAP_TARBALL is set (%s) but does not exist."
msg <- sprintf(fmt, tarball)
warning(msg)
# bail
return()
}
fmt <- "* Bootstrapping with tarball at path '%s'."
msg <- sprintf(fmt, tarball)
message(msg)
tarball
}
renv_bootstrap_download_github <- function(version) {
enabled <- Sys.getenv("RENV_BOOTSTRAP_FROM_GITHUB", unset = "TRUE")
if (!identical(enabled, "TRUE"))
return(FALSE)
# prepare download options
pat <- Sys.getenv("GITHUB_PAT")
if (nzchar(Sys.which("curl")) && nzchar(pat)) {
fmt <- "--location --fail --header \"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "curl", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
} else if (nzchar(Sys.which("wget")) && nzchar(pat)) {
fmt <- "--header=\"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "wget", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
}
message("* Downloading renv ", version, " from GitHub ... ", appendLF = FALSE)
url <- file.path("https://api.github.com/repos/rstudio/renv/tarball", version)
name <- sprintf("renv_%s.tar.gz", version)
destfile <- file.path(tempdir(), name)
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (!identical(status, 0L)) {
message("FAILED")
return(FALSE)
}
message("OK")
return(destfile)
}
renv_bootstrap_install <- function(version, tarball, library) {
# attempt to install it into project library
message("* Installing renv ", version, " ... ", appendLF = FALSE)
dir.create(library, showWarnings = FALSE, recursive = TRUE)
# invoke using system2 so we can capture and report output
bin <- R.home("bin")
exe <- if (Sys.info()[["sysname"]] == "Windows") "R.exe" else "R"
r <- file.path(bin, exe)
args <- c(
"--vanilla", "CMD", "INSTALL", "--no-multiarch",
"-l", shQuote(path.expand(library)),
shQuote(path.expand(tarball))
)
output <- system2(r, args, stdout = TRUE, stderr = TRUE)
message("Done!")
# check for successful install
status <- attr(output, "status")
if (is.numeric(status) && !identical(status, 0L)) {
header <- "Error installing renv:"
lines <- paste(rep.int("=", nchar(header)), collapse = "")
text <- c(header, lines, output)
writeLines(text, con = stderr())
}
status
}
renv_bootstrap_platform_prefix <- function() {
# construct version prefix
version <- paste(R.version$major, R.version$minor, sep = ".")
prefix <- paste("R", numeric_version(version)[1, 1:2], sep = "-")
# include SVN revision for development versions of R
# (to avoid sharing platform-specific artefacts with released versions of R)
devel <-
identical(R.version[["status"]], "Under development (unstable)") ||
identical(R.version[["nickname"]], "Unsuffered Consequences")
if (devel)
prefix <- paste(prefix, R.version[["svn rev"]], sep = "-r")
# build list of path components
components <- c(prefix, R.version$platform)
# include prefix if provided by user
prefix <- renv_bootstrap_platform_prefix_impl()
if (!is.na(prefix) && nzchar(prefix))
components <- c(prefix, components)
# build prefix
paste(components, collapse = "/")
}
renv_bootstrap_platform_prefix_impl <- function() {
# if an explicit prefix has been supplied, use it
prefix <- Sys.getenv("RENV_PATHS_PREFIX", unset = NA)
if (!is.na(prefix))
return(prefix)
# if the user has requested an automatic prefix, generate it
auto <- Sys.getenv("RENV_PATHS_PREFIX_AUTO", unset = NA)
if (auto %in% c("TRUE", "True", "true", "1"))
return(renv_bootstrap_platform_prefix_auto())
# empty string on failure
""
}
renv_bootstrap_platform_prefix_auto <- function() {
prefix <- tryCatch(renv_bootstrap_platform_os(), error = identity)
if (inherits(prefix, "error") || prefix %in% "unknown") {
msg <- paste(
"failed to infer current operating system",
"please file a bug report at https://github.com/rstudio/renv/issues",
sep = "; "
)
warning(msg)
}
prefix
}
renv_bootstrap_platform_os <- function() {
sysinfo <- Sys.info()
sysname <- sysinfo[["sysname"]]
# handle Windows + macOS up front
if (sysname == "Windows")
return("windows")
else if (sysname == "Darwin")
return("macos")
# check for os-release files
for (file in c("/etc/os-release", "/usr/lib/os-release"))
if (file.exists(file))
return(renv_bootstrap_platform_os_via_os_release(file, sysinfo))
# check for redhat-release files
if (file.exists("/etc/redhat-release"))
return(renv_bootstrap_platform_os_via_redhat_release())
"unknown"
}
renv_bootstrap_platform_os_via_os_release <- function(file, sysinfo) {
# read /etc/os-release
release <- utils::read.table(
file = file,
sep = "=",
quote = c("\"", "'"),
col.names = c("Key", "Value"),
comment.char = "#",
stringsAsFactors = FALSE
)
vars <- as.list(release$Value)
names(vars) <- release$Key
# get os name
os <- tolower(sysinfo[["sysname"]])
# read id
id <- "unknown"
for (field in c("ID", "ID_LIKE")) {
if (field %in% names(vars) && nzchar(vars[[field]])) {
id <- vars[[field]]
break
}
}
# read version
version <- "unknown"
for (field in c("UBUNTU_CODENAME", "VERSION_CODENAME", "VERSION_ID", "BUILD_ID")) {
if (field %in% names(vars) && nzchar(vars[[field]])) {
version <- vars[[field]]
break
}
}
# join together
paste(c(os, id, version), collapse = "-")
}
renv_bootstrap_platform_os_via_redhat_release <- function() {
# read /etc/redhat-release
contents <- readLines("/etc/redhat-release", warn = FALSE)
# infer id
id <- if (grepl("centos", contents, ignore.case = TRUE))
"centos"
else if (grepl("redhat", contents, ignore.case = TRUE))
"redhat"
else
"unknown"
# try to find a version component (very hacky)
version <- "unknown"
parts <- strsplit(contents, "[[:space:]]")[[1L]]
for (part in parts) {
nv <- tryCatch(numeric_version(part), error = identity)
if (inherits(nv, "error"))
next
version <- nv[1, 1]
break
}
paste(c("linux", id, version), collapse = "-")
}
renv_bootstrap_library_root_name <- function(project) {
# use project name as-is if requested
asis <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT_ASIS", unset = "FALSE")
if (asis)
return(basename(project))
# otherwise, disambiguate based on project's path
id <- substring(renv_bootstrap_hash_text(project), 1L, 8L)
paste(basename(project), id, sep = "-")
}
renv_bootstrap_library_root <- function(project) {
prefix <- renv_bootstrap_profile_prefix()
path <- Sys.getenv("RENV_PATHS_LIBRARY", unset = NA)
if (!is.na(path))
return(paste(c(path, prefix), collapse = "/"))
path <- renv_bootstrap_library_root_impl(project)
if (!is.null(path)) {
name <- renv_bootstrap_library_root_name(project)
return(paste(c(path, prefix, name), collapse = "/"))
}
renv_bootstrap_paths_renv("library", project = project)
}
renv_bootstrap_library_root_impl <- function(project) {
root <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT", unset = NA)
if (!is.na(root))
return(root)
type <- renv_bootstrap_project_type(project)
if (identical(type, "package")) {
userdir <- renv_bootstrap_user_dir()
return(file.path(userdir, "library"))
}
}
renv_bootstrap_validate_version <- function(version) {
loadedversion <- utils::packageDescription("renv", fields = "Version")
if (version == loadedversion)
return(TRUE)
# assume four-component versions are from GitHub;
# three-component versions are from CRAN
components <- strsplit(loadedversion, "[.-]")[[1]]
remote <- if (length(components) == 4L)
paste("rstudio/renv", loadedversion, sep = "@")
else
paste("renv", loadedversion, sep = "@")
fmt <- paste(
"renv %1$s was loaded from project library, but this project is configured to use renv %2$s.",
"Use `renv::record(\"%3$s\")` to record renv %1$s in the lockfile.",
"Use `renv::restore(packages = \"renv\")` to install renv %2$s into the project library.",
sep = "\n"
)
msg <- sprintf(fmt, loadedversion, version, remote)
warning(msg, call. = FALSE)
FALSE
}
renv_bootstrap_hash_text <- function(text) {
hashfile <- tempfile("renv-hash-")
on.exit(unlink(hashfile), add = TRUE)
writeLines(text, con = hashfile)
tools::md5sum(hashfile)
}
renv_bootstrap_load <- function(project, libpath, version) {
# try to load renv from the project library
if (!requireNamespace("renv", lib.loc = libpath, quietly = TRUE))
return(FALSE)
# warn if the version of renv loaded does not match
renv_bootstrap_validate_version(version)
# execute renv load hooks, if any
hooks <- getHook("renv::autoload")
for (hook in hooks)
if (is.function(hook))
tryCatch(hook(), error = warning)
# load the project
renv::load(project)
TRUE
}
renv_bootstrap_profile_load <- function(project) {
# if RENV_PROFILE is already set, just use that
profile <- Sys.getenv("RENV_PROFILE", unset = NA)
if (!is.na(profile) && nzchar(profile))
return(profile)
# check for a profile file (nothing to do if it doesn't exist)
path <- renv_bootstrap_paths_renv("profile", profile = FALSE, project = project)
if (!file.exists(path))
return(NULL)
# read the profile, and set it if it exists
contents <- readLines(path, warn = FALSE)
if (length(contents) == 0L)
return(NULL)
# set RENV_PROFILE
profile <- contents[[1L]]
if (!profile %in% c("", "default"))
Sys.setenv(RENV_PROFILE = profile)
profile
}
renv_bootstrap_profile_prefix <- function() {
profile <- renv_bootstrap_profile_get()
if (!is.null(profile))
return(file.path("profiles", profile, "renv"))
}
renv_bootstrap_profile_get <- function() {
profile <- Sys.getenv("RENV_PROFILE", unset = "")
renv_bootstrap_profile_normalize(profile)
}
renv_bootstrap_profile_set <- function(profile) {
profile <- renv_bootstrap_profile_normalize(profile)
if (is.null(profile))
Sys.unsetenv("RENV_PROFILE")
else
Sys.setenv(RENV_PROFILE = profile)
}
renv_bootstrap_profile_normalize <- function(profile) {
if (is.null(profile) || profile %in% c("", "default"))
return(NULL)
profile
}
renv_bootstrap_path_absolute <- function(path) {
substr(path, 1L, 1L) %in% c("~", "/", "\\") || (
substr(path, 1L, 1L) %in% c(letters, LETTERS) &&
substr(path, 2L, 3L) %in% c(":/", ":\\")
)
}
renv_bootstrap_paths_renv <- function(..., profile = TRUE, project = NULL) {
renv <- Sys.getenv("RENV_PATHS_RENV", unset = "renv")
root <- if (renv_bootstrap_path_absolute(renv)) NULL else project
prefix <- if (profile) renv_bootstrap_profile_prefix()
components <- c(root, renv, prefix, ...)
paste(components, collapse = "/")
}
renv_bootstrap_project_type <- function(path) {
descpath <- file.path(path, "DESCRIPTION")
if (!file.exists(descpath))
return("unknown")
desc <- tryCatch(
read.dcf(descpath, all = TRUE),
error = identity
)
if (inherits(desc, "error"))
return("unknown")
type <- desc$Type
if (!is.null(type))
return(tolower(type))
package <- desc$Package
if (!is.null(package))
return("package")
"unknown"
}
renv_bootstrap_user_dir <- function() {
dir <- renv_bootstrap_user_dir_impl()
path.expand(chartr("\\", "/", dir))
}
renv_bootstrap_user_dir_impl <- function() {
# use local override if set
override <- getOption("renv.userdir.override")
if (!is.null(override))
return(override)
# use R_user_dir if available
tools <- asNamespace("tools")
if (is.function(tools$R_user_dir))
return(tools$R_user_dir("renv", "cache"))
# try using our own backfill for older versions of R
envvars <- c("R_USER_CACHE_DIR", "XDG_CACHE_HOME")
for (envvar in envvars) {
root <- Sys.getenv(envvar, unset = NA)
if (!is.na(root))
return(file.path(root, "R/renv"))
}
# use platform-specific default fallbacks
if (Sys.info()[["sysname"]] == "Windows")
file.path(Sys.getenv("LOCALAPPDATA"), "R/cache/R/renv")
else if (Sys.info()[["sysname"]] == "Darwin")
"~/Library/Caches/org.R-project.R/R/renv"
else
"~/.cache/R/renv"
}
renv_json_read <- function(file = NULL, text = NULL) {
jlerr <- NULL
# if jsonlite is loaded, use that instead
if ("jsonlite" %in% loadedNamespaces()) {
json <- catch(renv_json_read_jsonlite(file, text))
if (!inherits(json, "error"))
return(json)
jlerr <- json
}
# otherwise, fall back to the default JSON reader
json <- catch(renv_json_read_default(file, text))
if (!inherits(json, "error"))
return(json)
# report an error
if (!is.null(jlerr))
stop(jlerr)
else
stop(json)
}
renv_json_read_jsonlite <- function(file = NULL, text = NULL) {
text <- paste(text %||% read(file), collapse = "\n")
jsonlite::fromJSON(txt = text, simplifyVector = FALSE)
}
renv_json_read_default <- function(file = NULL, text = NULL) {
# find strings in the JSON
text <- paste(text %||% read(file), collapse = "\n")
pattern <- '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]'
locs <- gregexpr(pattern, text, perl = TRUE)[[1]]
# if any are found, replace them with placeholders
replaced <- text
strings <- character()
replacements <- character()
if (!identical(c(locs), -1L)) {
# get the string values
starts <- locs
ends <- locs + attr(locs, "match.length") - 1L
strings <- substring(text, starts, ends)
# only keep those requiring escaping
strings <- grep("[[\\]{}:]", strings, perl = TRUE, value = TRUE)
# compute replacements
replacements <- sprintf('"\032%i\032"', seq_along(strings))
# replace the strings
mapply(function(string, replacement) {
replaced <<- sub(string, replacement, replaced, fixed = TRUE)
}, strings, replacements)
}
# transform the JSON into something the R parser understands
transformed <- replaced
transformed <- gsub("{}", "`names<-`(list(), character())", transformed, fixed = TRUE)
transformed <- gsub("[[{]", "list(", transformed, perl = TRUE)
transformed <- gsub("[]}]", ")", transformed, perl = TRUE)
transformed <- gsub(":", "=", transformed, fixed = TRUE)
text <- paste(transformed, collapse = "\n")
# parse it
json <- parse(text = text, keep.source = FALSE, srcfile = NULL)[[1L]]
# construct map between source strings, replaced strings
map <- as.character(parse(text = strings))
names(map) <- as.character(parse(text = replacements))
# convert to list
map <- as.list(map)
# remap strings in object
remapped <- renv_json_remap(json, map)
# evaluate
eval(remapped, envir = baseenv())
}
renv_json_remap <- function(json, map) {
# fix names
if (!is.null(names(json))) {
lhs <- match(names(json), names(map), nomatch = 0L)
rhs <- match(names(map), names(json), nomatch = 0L)
names(json)[rhs] <- map[lhs]
}
# fix values
if (is.character(json))
return(map[[json]] %||% json)
# handle true, false, null
if (is.name(json)) {
text <- as.character(json)
if (text == "true")
return(TRUE)
else if (text == "false")
return(FALSE)
else if (text == "null")
return(NULL)
}
# recurse
if (is.recursive(json)) {
for (i in seq_along(json)) {
json[i] <- list(renv_json_remap(json[[i]], map))
}
}
json
}
# load the renv profile, if any
renv_bootstrap_profile_load(project)
# construct path to library root
root <- renv_bootstrap_library_root(project)
# construct library prefix for platform
prefix <- renv_bootstrap_platform_prefix()
# construct full libpath
libpath <- file.path(root, prefix)
# attempt to load
if (renv_bootstrap_load(project, libpath, version))
return(TRUE)
# load failed; inform user we're about to bootstrap
prefix <- paste("# Bootstrapping renv", version)
postfix <- paste(rep.int("-", 77L - nchar(prefix)), collapse = "")
header <- paste(prefix, postfix)
message(header)
# perform bootstrap
bootstrap(version, libpath)
# exit early if we're just testing bootstrap
if (!is.na(Sys.getenv("RENV_BOOTSTRAP_INSTALL_ONLY", unset = NA)))
return(TRUE)
# try again to load
if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) {
message("* Successfully installed and loaded renv ", version, ".")
return(renv::load())
}
# failed to download or load renv; warn the user
msg <- c(
"Failed to find an renv installation: the project will not be loaded.",
"Use `renv::activate()` to re-initialize the project."
)
warning(paste(msg, collapse = "\n"), call. = FALSE)
})
|
/book/renv/activate.R
|
permissive
|
mlr-org/mlr3book
|
R
| false | false | 27,691 |
r
|
local({
# the requested version of renv
version <- "0.17.0"
# the project directory
project <- getwd()
# figure out whether the autoloader is enabled
enabled <- local({
# first, check config option
override <- getOption("renv.config.autoloader.enabled")
if (!is.null(override))
return(override)
# next, check environment variables
# TODO: prefer using the configuration one in the future
envvars <- c(
"RENV_CONFIG_AUTOLOADER_ENABLED",
"RENV_AUTOLOADER_ENABLED",
"RENV_ACTIVATE_PROJECT"
)
for (envvar in envvars) {
envval <- Sys.getenv(envvar, unset = NA)
if (!is.na(envval))
return(tolower(envval) %in% c("true", "t", "1"))
}
# enable by default
TRUE
})
if (!enabled)
return(FALSE)
# avoid recursion
if (identical(getOption("renv.autoloader.running"), TRUE)) {
warning("ignoring recursive attempt to run renv autoloader")
return(invisible(TRUE))
}
# signal that we're loading renv during R startup
options(renv.autoloader.running = TRUE)
on.exit(options(renv.autoloader.running = NULL), add = TRUE)
# signal that we've consented to use renv
options(renv.consent = TRUE)
# load the 'utils' package eagerly -- this ensures that renv shims, which
# mask 'utils' packages, will come first on the search path
library(utils, lib.loc = .Library)
# unload renv if it's already been loaded
if ("renv" %in% loadedNamespaces())
unloadNamespace("renv")
# load bootstrap tools
`%||%` <- function(x, y) {
if (is.environment(x) || length(x)) x else y
}
bootstrap <- function(version, library) {
# attempt to download renv
tarball <- tryCatch(renv_bootstrap_download(version), error = identity)
if (inherits(tarball, "error"))
stop("failed to download renv ", version)
# now attempt to install
status <- tryCatch(renv_bootstrap_install(version, tarball, library), error = identity)
if (inherits(status, "error"))
stop("failed to install renv ", version)
}
renv_bootstrap_tests_running <- function() {
getOption("renv.tests.running", default = FALSE)
}
renv_bootstrap_repos <- function() {
# check for repos override
repos <- Sys.getenv("RENV_CONFIG_REPOS_OVERRIDE", unset = NA)
if (!is.na(repos))
return(repos)
# check for lockfile repositories
repos <- tryCatch(renv_bootstrap_repos_lockfile(), error = identity)
if (!inherits(repos, "error") && length(repos))
return(repos)
# if we're testing, re-use the test repositories
if (renv_bootstrap_tests_running()) {
repos <- getOption("renv.tests.repos")
if (!is.null(repos))
return(repos)
}
# retrieve current repos
repos <- getOption("repos")
# ensure @CRAN@ entries are resolved
repos[repos == "@CRAN@"] <- getOption(
"renv.repos.cran",
"https://cloud.r-project.org"
)
# add in renv.bootstrap.repos if set
default <- c(FALLBACK = "https://cloud.r-project.org")
extra <- getOption("renv.bootstrap.repos", default = default)
repos <- c(repos, extra)
# remove duplicates that might've snuck in
dupes <- duplicated(repos) | duplicated(names(repos))
repos[!dupes]
}
renv_bootstrap_repos_lockfile <- function() {
lockpath <- Sys.getenv("RENV_PATHS_LOCKFILE", unset = "renv.lock")
if (!file.exists(lockpath))
return(NULL)
lockfile <- tryCatch(renv_json_read(lockpath), error = identity)
if (inherits(lockfile, "error")) {
warning(lockfile)
return(NULL)
}
repos <- lockfile$R$Repositories
if (length(repos) == 0)
return(NULL)
keys <- vapply(repos, `[[`, "Name", FUN.VALUE = character(1))
vals <- vapply(repos, `[[`, "URL", FUN.VALUE = character(1))
names(vals) <- keys
return(vals)
}
renv_bootstrap_download <- function(version) {
# if the renv version number has 4 components, assume it must
# be retrieved via github
nv <- numeric_version(version)
components <- unclass(nv)[[1]]
# if this appears to be a development version of 'renv', we'll
# try to restore from github
dev <- length(components) == 4L
# begin collecting different methods for finding renv
methods <- c(
renv_bootstrap_download_tarball,
if (dev)
renv_bootstrap_download_github
else c(
renv_bootstrap_download_cran_latest,
renv_bootstrap_download_cran_archive
)
)
for (method in methods) {
path <- tryCatch(method(version), error = identity)
if (is.character(path) && file.exists(path))
return(path)
}
stop("failed to download renv ", version)
}
renv_bootstrap_download_impl <- function(url, destfile) {
mode <- "wb"
# https://bugs.r-project.org/bugzilla/show_bug.cgi?id=17715
fixup <-
Sys.info()[["sysname"]] == "Windows" &&
substring(url, 1L, 5L) == "file:"
if (fixup)
mode <- "w+b"
args <- list(
url = url,
destfile = destfile,
mode = mode,
quiet = TRUE
)
if ("headers" %in% names(formals(utils::download.file)))
args$headers <- renv_bootstrap_download_custom_headers(url)
do.call(utils::download.file, args)
}
renv_bootstrap_download_custom_headers <- function(url) {
headers <- getOption("renv.download.headers")
if (is.null(headers))
return(character())
if (!is.function(headers))
stopf("'renv.download.headers' is not a function")
headers <- headers(url)
if (length(headers) == 0L)
return(character())
if (is.list(headers))
headers <- unlist(headers, recursive = FALSE, use.names = TRUE)
ok <-
is.character(headers) &&
is.character(names(headers)) &&
all(nzchar(names(headers)))
if (!ok)
stop("invocation of 'renv.download.headers' did not return a named character vector")
headers
}
renv_bootstrap_download_cran_latest <- function(version) {
spec <- renv_bootstrap_download_cran_latest_find(version)
type <- spec$type
repos <- spec$repos
message("* Downloading renv ", version, " ... ", appendLF = FALSE)
baseurl <- utils::contrib.url(repos = repos, type = type)
ext <- if (identical(type, "source"))
".tar.gz"
else if (Sys.info()[["sysname"]] == "Windows")
".zip"
else
".tgz"
name <- sprintf("renv_%s%s", version, ext)
url <- paste(baseurl, name, sep = "/")
destfile <- file.path(tempdir(), name)
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (inherits(status, "condition")) {
message("FAILED")
return(FALSE)
}
# report success and return
message("OK (downloaded ", type, ")")
destfile
}
renv_bootstrap_download_cran_latest_find <- function(version) {
# check whether binaries are supported on this system
binary <-
getOption("renv.bootstrap.binary", default = TRUE) &&
!identical(.Platform$pkgType, "source") &&
!identical(getOption("pkgType"), "source") &&
Sys.info()[["sysname"]] %in% c("Darwin", "Windows")
types <- c(if (binary) "binary", "source")
# iterate over types + repositories
for (type in types) {
for (repos in renv_bootstrap_repos()) {
# retrieve package database
db <- tryCatch(
as.data.frame(
utils::available.packages(type = type, repos = repos),
stringsAsFactors = FALSE
),
error = identity
)
if (inherits(db, "error"))
next
# check for compatible entry
entry <- db[db$Package %in% "renv" & db$Version %in% version, ]
if (nrow(entry) == 0)
next
# found it; return spec to caller
spec <- list(entry = entry, type = type, repos = repos)
return(spec)
}
}
# if we got here, we failed to find renv
fmt <- "renv %s is not available from your declared package repositories"
stop(sprintf(fmt, version))
}
renv_bootstrap_download_cran_archive <- function(version) {
name <- sprintf("renv_%s.tar.gz", version)
repos <- renv_bootstrap_repos()
urls <- file.path(repos, "src/contrib/Archive/renv", name)
destfile <- file.path(tempdir(), name)
message("* Downloading renv ", version, " ... ", appendLF = FALSE)
for (url in urls) {
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (identical(status, 0L)) {
message("OK")
return(destfile)
}
}
message("FAILED")
return(FALSE)
}
renv_bootstrap_download_tarball <- function(version) {
# if the user has provided the path to a tarball via
# an environment variable, then use it
tarball <- Sys.getenv("RENV_BOOTSTRAP_TARBALL", unset = NA)
if (is.na(tarball))
return()
# allow directories
if (dir.exists(tarball)) {
name <- sprintf("renv_%s.tar.gz", version)
tarball <- file.path(tarball, name)
}
# bail if it doesn't exist
if (!file.exists(tarball)) {
# let the user know we weren't able to honour their request
fmt <- "* RENV_BOOTSTRAP_TARBALL is set (%s) but does not exist."
msg <- sprintf(fmt, tarball)
warning(msg)
# bail
return()
}
fmt <- "* Bootstrapping with tarball at path '%s'."
msg <- sprintf(fmt, tarball)
message(msg)
tarball
}
renv_bootstrap_download_github <- function(version) {
enabled <- Sys.getenv("RENV_BOOTSTRAP_FROM_GITHUB", unset = "TRUE")
if (!identical(enabled, "TRUE"))
return(FALSE)
# prepare download options
pat <- Sys.getenv("GITHUB_PAT")
if (nzchar(Sys.which("curl")) && nzchar(pat)) {
fmt <- "--location --fail --header \"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "curl", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
} else if (nzchar(Sys.which("wget")) && nzchar(pat)) {
fmt <- "--header=\"Authorization: token %s\""
extra <- sprintf(fmt, pat)
saved <- options("download.file.method", "download.file.extra")
options(download.file.method = "wget", download.file.extra = extra)
on.exit(do.call(base::options, saved), add = TRUE)
}
message("* Downloading renv ", version, " from GitHub ... ", appendLF = FALSE)
url <- file.path("https://api.github.com/repos/rstudio/renv/tarball", version)
name <- sprintf("renv_%s.tar.gz", version)
destfile <- file.path(tempdir(), name)
status <- tryCatch(
renv_bootstrap_download_impl(url, destfile),
condition = identity
)
if (!identical(status, 0L)) {
message("FAILED")
return(FALSE)
}
message("OK")
return(destfile)
}
renv_bootstrap_install <- function(version, tarball, library) {
# attempt to install it into project library
message("* Installing renv ", version, " ... ", appendLF = FALSE)
dir.create(library, showWarnings = FALSE, recursive = TRUE)
# invoke using system2 so we can capture and report output
bin <- R.home("bin")
exe <- if (Sys.info()[["sysname"]] == "Windows") "R.exe" else "R"
r <- file.path(bin, exe)
args <- c(
"--vanilla", "CMD", "INSTALL", "--no-multiarch",
"-l", shQuote(path.expand(library)),
shQuote(path.expand(tarball))
)
output <- system2(r, args, stdout = TRUE, stderr = TRUE)
message("Done!")
# check for successful install
status <- attr(output, "status")
if (is.numeric(status) && !identical(status, 0L)) {
header <- "Error installing renv:"
lines <- paste(rep.int("=", nchar(header)), collapse = "")
text <- c(header, lines, output)
writeLines(text, con = stderr())
}
status
}
renv_bootstrap_platform_prefix <- function() {
# construct version prefix
version <- paste(R.version$major, R.version$minor, sep = ".")
prefix <- paste("R", numeric_version(version)[1, 1:2], sep = "-")
# include SVN revision for development versions of R
# (to avoid sharing platform-specific artefacts with released versions of R)
devel <-
identical(R.version[["status"]], "Under development (unstable)") ||
identical(R.version[["nickname"]], "Unsuffered Consequences")
if (devel)
prefix <- paste(prefix, R.version[["svn rev"]], sep = "-r")
# build list of path components
components <- c(prefix, R.version$platform)
# include prefix if provided by user
prefix <- renv_bootstrap_platform_prefix_impl()
if (!is.na(prefix) && nzchar(prefix))
components <- c(prefix, components)
# build prefix
paste(components, collapse = "/")
}
renv_bootstrap_platform_prefix_impl <- function() {
# if an explicit prefix has been supplied, use it
prefix <- Sys.getenv("RENV_PATHS_PREFIX", unset = NA)
if (!is.na(prefix))
return(prefix)
# if the user has requested an automatic prefix, generate it
auto <- Sys.getenv("RENV_PATHS_PREFIX_AUTO", unset = NA)
if (auto %in% c("TRUE", "True", "true", "1"))
return(renv_bootstrap_platform_prefix_auto())
# empty string on failure
""
}
renv_bootstrap_platform_prefix_auto <- function() {
prefix <- tryCatch(renv_bootstrap_platform_os(), error = identity)
if (inherits(prefix, "error") || prefix %in% "unknown") {
msg <- paste(
"failed to infer current operating system",
"please file a bug report at https://github.com/rstudio/renv/issues",
sep = "; "
)
warning(msg)
}
prefix
}
renv_bootstrap_platform_os <- function() {
sysinfo <- Sys.info()
sysname <- sysinfo[["sysname"]]
# handle Windows + macOS up front
if (sysname == "Windows")
return("windows")
else if (sysname == "Darwin")
return("macos")
# check for os-release files
for (file in c("/etc/os-release", "/usr/lib/os-release"))
if (file.exists(file))
return(renv_bootstrap_platform_os_via_os_release(file, sysinfo))
# check for redhat-release files
if (file.exists("/etc/redhat-release"))
return(renv_bootstrap_platform_os_via_redhat_release())
"unknown"
}
renv_bootstrap_platform_os_via_os_release <- function(file, sysinfo) {
# read /etc/os-release
release <- utils::read.table(
file = file,
sep = "=",
quote = c("\"", "'"),
col.names = c("Key", "Value"),
comment.char = "#",
stringsAsFactors = FALSE
)
vars <- as.list(release$Value)
names(vars) <- release$Key
# get os name
os <- tolower(sysinfo[["sysname"]])
# read id
id <- "unknown"
for (field in c("ID", "ID_LIKE")) {
if (field %in% names(vars) && nzchar(vars[[field]])) {
id <- vars[[field]]
break
}
}
# read version
version <- "unknown"
for (field in c("UBUNTU_CODENAME", "VERSION_CODENAME", "VERSION_ID", "BUILD_ID")) {
if (field %in% names(vars) && nzchar(vars[[field]])) {
version <- vars[[field]]
break
}
}
# join together
paste(c(os, id, version), collapse = "-")
}
renv_bootstrap_platform_os_via_redhat_release <- function() {
# read /etc/redhat-release
contents <- readLines("/etc/redhat-release", warn = FALSE)
# infer id
id <- if (grepl("centos", contents, ignore.case = TRUE))
"centos"
else if (grepl("redhat", contents, ignore.case = TRUE))
"redhat"
else
"unknown"
# try to find a version component (very hacky)
version <- "unknown"
parts <- strsplit(contents, "[[:space:]]")[[1L]]
for (part in parts) {
nv <- tryCatch(numeric_version(part), error = identity)
if (inherits(nv, "error"))
next
version <- nv[1, 1]
break
}
paste(c("linux", id, version), collapse = "-")
}
renv_bootstrap_library_root_name <- function(project) {
# use project name as-is if requested
asis <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT_ASIS", unset = "FALSE")
if (asis)
return(basename(project))
# otherwise, disambiguate based on project's path
id <- substring(renv_bootstrap_hash_text(project), 1L, 8L)
paste(basename(project), id, sep = "-")
}
renv_bootstrap_library_root <- function(project) {
prefix <- renv_bootstrap_profile_prefix()
path <- Sys.getenv("RENV_PATHS_LIBRARY", unset = NA)
if (!is.na(path))
return(paste(c(path, prefix), collapse = "/"))
path <- renv_bootstrap_library_root_impl(project)
if (!is.null(path)) {
name <- renv_bootstrap_library_root_name(project)
return(paste(c(path, prefix, name), collapse = "/"))
}
renv_bootstrap_paths_renv("library", project = project)
}
renv_bootstrap_library_root_impl <- function(project) {
root <- Sys.getenv("RENV_PATHS_LIBRARY_ROOT", unset = NA)
if (!is.na(root))
return(root)
type <- renv_bootstrap_project_type(project)
if (identical(type, "package")) {
userdir <- renv_bootstrap_user_dir()
return(file.path(userdir, "library"))
}
}
renv_bootstrap_validate_version <- function(version) {
loadedversion <- utils::packageDescription("renv", fields = "Version")
if (version == loadedversion)
return(TRUE)
# assume four-component versions are from GitHub;
# three-component versions are from CRAN
components <- strsplit(loadedversion, "[.-]")[[1]]
remote <- if (length(components) == 4L)
paste("rstudio/renv", loadedversion, sep = "@")
else
paste("renv", loadedversion, sep = "@")
fmt <- paste(
"renv %1$s was loaded from project library, but this project is configured to use renv %2$s.",
"Use `renv::record(\"%3$s\")` to record renv %1$s in the lockfile.",
"Use `renv::restore(packages = \"renv\")` to install renv %2$s into the project library.",
sep = "\n"
)
msg <- sprintf(fmt, loadedversion, version, remote)
warning(msg, call. = FALSE)
FALSE
}
renv_bootstrap_hash_text <- function(text) {
hashfile <- tempfile("renv-hash-")
on.exit(unlink(hashfile), add = TRUE)
writeLines(text, con = hashfile)
tools::md5sum(hashfile)
}
renv_bootstrap_load <- function(project, libpath, version) {
# try to load renv from the project library
if (!requireNamespace("renv", lib.loc = libpath, quietly = TRUE))
return(FALSE)
# warn if the version of renv loaded does not match
renv_bootstrap_validate_version(version)
# execute renv load hooks, if any
hooks <- getHook("renv::autoload")
for (hook in hooks)
if (is.function(hook))
tryCatch(hook(), error = warning)
# load the project
renv::load(project)
TRUE
}
renv_bootstrap_profile_load <- function(project) {
# if RENV_PROFILE is already set, just use that
profile <- Sys.getenv("RENV_PROFILE", unset = NA)
if (!is.na(profile) && nzchar(profile))
return(profile)
# check for a profile file (nothing to do if it doesn't exist)
path <- renv_bootstrap_paths_renv("profile", profile = FALSE, project = project)
if (!file.exists(path))
return(NULL)
# read the profile, and set it if it exists
contents <- readLines(path, warn = FALSE)
if (length(contents) == 0L)
return(NULL)
# set RENV_PROFILE
profile <- contents[[1L]]
if (!profile %in% c("", "default"))
Sys.setenv(RENV_PROFILE = profile)
profile
}
renv_bootstrap_profile_prefix <- function() {
profile <- renv_bootstrap_profile_get()
if (!is.null(profile))
return(file.path("profiles", profile, "renv"))
}
renv_bootstrap_profile_get <- function() {
profile <- Sys.getenv("RENV_PROFILE", unset = "")
renv_bootstrap_profile_normalize(profile)
}
renv_bootstrap_profile_set <- function(profile) {
profile <- renv_bootstrap_profile_normalize(profile)
if (is.null(profile))
Sys.unsetenv("RENV_PROFILE")
else
Sys.setenv(RENV_PROFILE = profile)
}
renv_bootstrap_profile_normalize <- function(profile) {
if (is.null(profile) || profile %in% c("", "default"))
return(NULL)
profile
}
renv_bootstrap_path_absolute <- function(path) {
substr(path, 1L, 1L) %in% c("~", "/", "\\") || (
substr(path, 1L, 1L) %in% c(letters, LETTERS) &&
substr(path, 2L, 3L) %in% c(":/", ":\\")
)
}
renv_bootstrap_paths_renv <- function(..., profile = TRUE, project = NULL) {
renv <- Sys.getenv("RENV_PATHS_RENV", unset = "renv")
root <- if (renv_bootstrap_path_absolute(renv)) NULL else project
prefix <- if (profile) renv_bootstrap_profile_prefix()
components <- c(root, renv, prefix, ...)
paste(components, collapse = "/")
}
renv_bootstrap_project_type <- function(path) {
descpath <- file.path(path, "DESCRIPTION")
if (!file.exists(descpath))
return("unknown")
desc <- tryCatch(
read.dcf(descpath, all = TRUE),
error = identity
)
if (inherits(desc, "error"))
return("unknown")
type <- desc$Type
if (!is.null(type))
return(tolower(type))
package <- desc$Package
if (!is.null(package))
return("package")
"unknown"
}
renv_bootstrap_user_dir <- function() {
dir <- renv_bootstrap_user_dir_impl()
path.expand(chartr("\\", "/", dir))
}
renv_bootstrap_user_dir_impl <- function() {
# use local override if set
override <- getOption("renv.userdir.override")
if (!is.null(override))
return(override)
# use R_user_dir if available
tools <- asNamespace("tools")
if (is.function(tools$R_user_dir))
return(tools$R_user_dir("renv", "cache"))
# try using our own backfill for older versions of R
envvars <- c("R_USER_CACHE_DIR", "XDG_CACHE_HOME")
for (envvar in envvars) {
root <- Sys.getenv(envvar, unset = NA)
if (!is.na(root))
return(file.path(root, "R/renv"))
}
# use platform-specific default fallbacks
if (Sys.info()[["sysname"]] == "Windows")
file.path(Sys.getenv("LOCALAPPDATA"), "R/cache/R/renv")
else if (Sys.info()[["sysname"]] == "Darwin")
"~/Library/Caches/org.R-project.R/R/renv"
else
"~/.cache/R/renv"
}
renv_json_read <- function(file = NULL, text = NULL) {
jlerr <- NULL
# if jsonlite is loaded, use that instead
if ("jsonlite" %in% loadedNamespaces()) {
json <- catch(renv_json_read_jsonlite(file, text))
if (!inherits(json, "error"))
return(json)
jlerr <- json
}
# otherwise, fall back to the default JSON reader
json <- catch(renv_json_read_default(file, text))
if (!inherits(json, "error"))
return(json)
# report an error
if (!is.null(jlerr))
stop(jlerr)
else
stop(json)
}
renv_json_read_jsonlite <- function(file = NULL, text = NULL) {
text <- paste(text %||% read(file), collapse = "\n")
jsonlite::fromJSON(txt = text, simplifyVector = FALSE)
}
renv_json_read_default <- function(file = NULL, text = NULL) {
# find strings in the JSON
text <- paste(text %||% read(file), collapse = "\n")
pattern <- '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]'
locs <- gregexpr(pattern, text, perl = TRUE)[[1]]
# if any are found, replace them with placeholders
replaced <- text
strings <- character()
replacements <- character()
if (!identical(c(locs), -1L)) {
# get the string values
starts <- locs
ends <- locs + attr(locs, "match.length") - 1L
strings <- substring(text, starts, ends)
# only keep those requiring escaping
strings <- grep("[[\\]{}:]", strings, perl = TRUE, value = TRUE)
# compute replacements
replacements <- sprintf('"\032%i\032"', seq_along(strings))
# replace the strings
mapply(function(string, replacement) {
replaced <<- sub(string, replacement, replaced, fixed = TRUE)
}, strings, replacements)
}
# transform the JSON into something the R parser understands
transformed <- replaced
transformed <- gsub("{}", "`names<-`(list(), character())", transformed, fixed = TRUE)
transformed <- gsub("[[{]", "list(", transformed, perl = TRUE)
transformed <- gsub("[]}]", ")", transformed, perl = TRUE)
transformed <- gsub(":", "=", transformed, fixed = TRUE)
text <- paste(transformed, collapse = "\n")
# parse it
json <- parse(text = text, keep.source = FALSE, srcfile = NULL)[[1L]]
# construct map between source strings, replaced strings
map <- as.character(parse(text = strings))
names(map) <- as.character(parse(text = replacements))
# convert to list
map <- as.list(map)
# remap strings in object
remapped <- renv_json_remap(json, map)
# evaluate
eval(remapped, envir = baseenv())
}
renv_json_remap <- function(json, map) {
# fix names
if (!is.null(names(json))) {
lhs <- match(names(json), names(map), nomatch = 0L)
rhs <- match(names(map), names(json), nomatch = 0L)
names(json)[rhs] <- map[lhs]
}
# fix values
if (is.character(json))
return(map[[json]] %||% json)
# handle true, false, null
if (is.name(json)) {
text <- as.character(json)
if (text == "true")
return(TRUE)
else if (text == "false")
return(FALSE)
else if (text == "null")
return(NULL)
}
# recurse
if (is.recursive(json)) {
for (i in seq_along(json)) {
json[i] <- list(renv_json_remap(json[[i]], map))
}
}
json
}
# load the renv profile, if any
renv_bootstrap_profile_load(project)
# construct path to library root
root <- renv_bootstrap_library_root(project)
# construct library prefix for platform
prefix <- renv_bootstrap_platform_prefix()
# construct full libpath
libpath <- file.path(root, prefix)
# attempt to load
if (renv_bootstrap_load(project, libpath, version))
return(TRUE)
# load failed; inform user we're about to bootstrap
prefix <- paste("# Bootstrapping renv", version)
postfix <- paste(rep.int("-", 77L - nchar(prefix)), collapse = "")
header <- paste(prefix, postfix)
message(header)
# perform bootstrap
bootstrap(version, libpath)
# exit early if we're just testing bootstrap
if (!is.na(Sys.getenv("RENV_BOOTSTRAP_INSTALL_ONLY", unset = NA)))
return(TRUE)
# try again to load
if (requireNamespace("renv", lib.loc = libpath, quietly = TRUE)) {
message("* Successfully installed and loaded renv ", version, ".")
return(renv::load())
}
# failed to download or load renv; warn the user
msg <- c(
"Failed to find an renv installation: the project will not be loaded.",
"Use `renv::activate()` to re-initialize the project."
)
warning(paste(msg, collapse = "\n"), call. = FALSE)
})
|
#prepare to find object type
install.packages("pryr")
library(pryr)
#download data set
library(MASS)
df=data("USArrests")
#test for generic function
mean(USArrests[["Murder"]])
UseMethod(mean, "Murder")
#determine object type
otype(data("USArrests"))
otype(USArrests["Murder"])
isS4(USArrests)
isS4("Murder")
#explore creating an S4 object
state = setClass("state", slots=list(name="character",murder="numeric"))
s=new("state", murder=13.2)
otype(s)
#call an S4 object
s@murder
|
/assignment7.R
|
no_license
|
cnmwebb/assignment7
|
R
| false | false | 514 |
r
|
#prepare to find object type
install.packages("pryr")
library(pryr)
#download data set
library(MASS)
df=data("USArrests")
#test for generic function
mean(USArrests[["Murder"]])
UseMethod(mean, "Murder")
#determine object type
otype(data("USArrests"))
otype(USArrests["Murder"])
isS4(USArrests)
isS4("Murder")
#explore creating an S4 object
state = setClass("state", slots=list(name="character",murder="numeric"))
s=new("state", murder=13.2)
otype(s)
#call an S4 object
s@murder
|
##### set functions -----
# refresh current year tournament results
update_tourneys <- function() {
library(rvest)
library(tidyverse)
library(lubridate)
library(jsonlite)
library(janitor)
##### get tournament schedule and IDs ----
grab_id <- "[[:digit:]]+"
grab_tournament_name <- "(?<=>).*(?=</option)"
grab_date <- "(?<=>).*(?=</span)"
tournament_ids <- integer() #empty vector to pull tournament ids for season
tournament_names <- character() ##empty vector to pull tournament names for season
tournament_dates <- character()
tournament_year <- character()
tournament_sites <- character()
url <- paste0('https://www.espn.com/golf/leaderboard/_/tournamentId/401242997/')
webpage <- read_html(url)
tournament_id_data <- webpage %>%
html_nodes('optgroup') %>%
html_nodes('option')
#loop id_data and grab tournament ids and names for individual seasons
for(i in 1:length(tournament_id_data)){
to_string <- toString(tournament_id_data[i])
#tournament ids
tournament_id <- str_extract(to_string, grab_id)
tournament_ids <- c(tournament_ids, tournament_id)
#tournament names
tournament_name <- str_extract(to_string, grab_tournament_name)
tournament_names <- c(tournament_names, tournament_name)
#tournament dates
url <- paste0('https://www.espn.com/golf/leaderboard/_/tournamentId/', tournament_id)
webpage <- read_html(url)
tournament_date <- webpage %>%
html_nodes('span.Leaderboard__Event__Date.n7') %>%
toString() %>%
str_extract(grab_date)
tournament_dates <- c(tournament_dates, tournament_date)
tournament_site <- webpage %>%
html_nodes('div.Leaderboard__Course__Location.n8') %>%
toString() %>%
str_extract("(?<=>).*(?=<!-- --> -)")
tournament_sites <- c(tournament_sites, tournament_site)
}
tournaments <- data.frame(tournament_ids, tournament_year = 2021, tournament_sites, tournament_names, tournament_dates, stringsAsFactors = F) %>%
mutate(start_date = gsub(" - .+,", ",", tournament_dates),
start_date = gsub("Oct ", "October ", start_date),
start_date = as.Date(start_date, format = "%B %d, %Y"))
saveRDS(tournaments, here::here("tournament_list_curyr.rData"))
##### pull leaderboard data for each tournament ----
#establish empty dataframe
scores <- tibble(pos = numeric(),
player = character(),
to_par = numeric(),
r1 = numeric(),
r2 = numeric(),
r3 = numeric(),
r4 = numeric(),
tot = numeric(),
earnings = numeric(),
fedex_pts = numeric())
ignore_list <- c(401251634)
tournament_ids <- tournaments %>%
filter(!tournament_ids %in% ignore_list,
start_date <= (Sys.Date() - 4)) %>%
pull(tournament_ids)
for( t in 1:length(tournament_ids)){
url <- paste0('https://www.espn.com/golf/leaderboard/_/tournamentId/', tournament_ids[t])
webpage <- read_html(url)
id_data <- html_nodes(webpage, 'a.AnchorLink.leaderboard_player_name') #links with player ids
grab_id <- "[[:digit:]]+" #reg expression to pull id from player links
ids <- integer() #empty vector to put player ids
#loop id_data and grab only player ids
for(i in 1:length(id_data)){
to_string <- toString(id_data[i])
id <- str_extract(to_string, grab_id)
ids <- c(ids, id)
}
node_list <- webpage %>%
html_nodes("table")
if(length(node_list) == 1){
leaderboard <- node_list %>%
html_table(fill = TRUE) %>%
.[[1]] %>%
clean_names() %>%
mutate(player_id = ids)
} else if(length(node_list) == 2) {
leaderboard <- node_list %>%
.[2] %>%
html_table(fill = TRUE) %>%
.[[1]] %>%
clean_names() %>%
mutate(player_id = ids)
} else {
leaderboard <- tibble(pos = numeric(),
player = character(),
to_par = numeric(),
r1 = numeric(),
r2 = numeric(),
r3 = numeric(),
r4 = numeric(),
tot = numeric(),
earnings = numeric(),
fedex_pts = numeric(),
player_id = character())
}
leaderboard <- leaderboard %>%
mutate(tournament_id = tournament_ids[t])
if(ncol(leaderboard) < 12) {
leaderboard$r4 <- NA
}
scores <- rbind(scores, leaderboard)
}
saveRDS(scores, here::here("leaderboard_scrape_curyr.rData"))
}
# used in preparing historic score data; converts round scores into a standardized format
Norm_Fun <- function(col) {
(col - mean(col, na.rm = T))/sd(col, na.rm = T)
}
# used in prep; cleans the names of the imported tournament field to match with historic results
clean_field <- function(db) {
return(db %>%
mutate(player = gsub("(^.+), (.+$)", "\\2 \\1", name_raw),
player = gsub(",", "", player),
player = gsub("Tyler Strafaci", "ty strafaci (a)", player),
player = gsub("Byeong Hun", "Byeong-Hun", player),
player = gsub("Mark Anderson", "Mark D. Anderson", player),
player = gsub("Fabián Gómez", "Fabian Gomez", player),
player = gsub("Nelson Ledesma", "Nelson Lauta Ledesma", player),
player = gsub("Seung-Yul Noh", "Seung-yul Noh", player),
player = gsub("Alex Noren", "Alexander Noren", player),
player = gsub("Sebastián Muñoz", "Sebastian Munoz", player),
player = gsub("Sangmoon", "Sang-Moon", player),
player = gsub("Rafa ", "Rafael ", player),
player = tolower(player),
player = gsub("sebasti.n mu.oz", "sebastian munoz", player),
check = player %in% player_data$player)
)
}
# used in modeling; given a random number and a player, the function returns the predicted round score
find_score <- function(player, score) {
max(Score_Matrix$Score[Score_Matrix$player == player & Score_Matrix$Score.Prob < score])
}
# used in cleaning odds reports; returns the break even probability for a given odds
Prob_Calc <- function(odds) {
return(
ifelse(odds > 0, 100/(odds + 100), abs(odds)/(abs(odds) + 100))
)
}
# used in cleaning odds reports; converts names to be able to align to other datasets
Clean_Players <- function(data) {
temp <- data
# temp <- gsub("varner", "varner iii", temp)
temp <- gsub("[[:punct:]]", "", temp)
temp <- gsub("kyoung.*hoon", "kyoung-hoon", temp)
temp <- gsub("byeonghun", "byeong hun", temp)
temp <- gsub(".+holmes$", "j.b. holmes", temp)
temp <- gsub("mark anderson", "mark d. anderson", temp)
temp <- gsub(".+choi$", "k.j. choi", temp)
temp <- gsub(".+spaun$", "j.j. spaun", temp)
temp <- gsub("nelson ledesma", "nelson lauta ledesma", temp)
temp <- gsub("ted potter jr", "ted potter jr.", temp)
temp <- gsub("seung.*yul noh", "seung-yul noh", temp)
temp <- gsub("sebastion", "sebastian", temp)
temp <- gsub("c[[:space:]]*t[[:space:]]*pan$", "c.t. pan", temp)
temp <- gsub("^brooke", "brooks", temp)
temp <- gsub("ohair$", "o'hair", temp)
temp <- gsub("hao tong", "haotong", temp)
temp <- gsub("alex ", "alexander ", temp)
temp <- gsub("^.+poston", "j.t. poston", temp)
temp <- gsub("byeong hun an","byeong-hun an", temp)
temp <- gsub("cabrerabello", "cabrera bello", temp)
temp <- gsub("rafael", "rafa", temp)
# temp <- gsub("russel", "russell", temp)
temp <- gsub("schaufflele", "schauffele", temp)
temp <- gsub("^(.)[[:space:]](.) ", "\\1\\2 ", temp)
return(temp)
}
# used in cleaning odds reports; splits apart names and odds from imported Excel data (may not be needed if web scraping)
Clean_Odds <- function(db) {
return(
db %>%
setNames("temp") %>%
mutate(temp = gsub("EVEN", "+100", temp),
Player = tolower(gsub("(^.+)[[:punct:]][[:digit:]]+$", "\\1", temp)),
Player = gsub("byeong hun an","byeong-hun an", Player),
Player = gsub("alex ", "alexander ", Player),
Player = gsub("mark anderson", "mark d. anderson", Player),
Player = gsub("kyoung hoon", "kyoung-hoon", Player),
Player = gsub("sebastion", "sebastian", Player),
Player = gsub("nelson ledesma", "nelson lauta ledesma", Player),
Player = gsub("ted potter jr", "ted potter jr.", Player),
Player = gsub("rafa campos", "rafael campos", Player),
Player = gsub("j. b. holmes","j.b. holmes", Player),
Player = gsub("jt poston", "j.t. poston", Player),
Player = gsub("j. j. spaun", "j.j. spaun", Player),
Player = gsub("kj choi", "k.j. choi", Player),
Player = gsub("rafael", "rafa", Player),
Player = gsub("j b", "jb", Player),
Odds = as.numeric(gsub("^.+([[:punct:]][[:digit:]]+$)", "\\1", temp)),
Prob = Prob_Calc(Odds))
)
}
# used in analyzing outputs; compares the round scoring projections for a group of players
Player_Spread <- function(p) {
temp <- filter(Rounds_Long, player %in% p)
ggplot(temp, aes(x = Score, fill = player)) +
geom_density(alpha = 0.25) +
theme_bw()
}
# used in analyzing outputs; compares the tournament result projections for a group of players
Player_Results <- function(p) {
temp <- filter(Ongoing_Tally, player %in% p)
ggplot(temp, aes(x = Final.Rank, fill = player)) +
geom_density(stat = "count", alpha = 0.25) +
theme_bw()
}
# used in analyzing outputs; returns a report that consists of historic scoring results and either the round or tournament projection graphic
Player_Report <- function(cat,tag, var = "round", site = NA) {
#browser()
if(!is.na(site)) {s <- site} else {s <- c("365", "bodog")}
p <- value_df$player[value_df$Tag == tag & value_df$category == cat & value_df$site %in% s]
p <- p[p != "tie"]
report <- player_prep %>%
filter(player %in% p) %>%
arrange(player) %>%
pivot_wider(names_from = "Round", values_from = "Score")
if(var == "round") {
g <- Player_Spread(p)
} else {
g <- Player_Results(p)
}
list(View(report), g)
}
|
/predict_script_functions.R
|
no_license
|
mhogel/golf_analysis
|
R
| false | false | 10,621 |
r
|
##### set functions -----
# refresh current year tournament results
update_tourneys <- function() {
library(rvest)
library(tidyverse)
library(lubridate)
library(jsonlite)
library(janitor)
##### get tournament schedule and IDs ----
grab_id <- "[[:digit:]]+"
grab_tournament_name <- "(?<=>).*(?=</option)"
grab_date <- "(?<=>).*(?=</span)"
tournament_ids <- integer() #empty vector to pull tournament ids for season
tournament_names <- character() ##empty vector to pull tournament names for season
tournament_dates <- character()
tournament_year <- character()
tournament_sites <- character()
url <- paste0('https://www.espn.com/golf/leaderboard/_/tournamentId/401242997/')
webpage <- read_html(url)
tournament_id_data <- webpage %>%
html_nodes('optgroup') %>%
html_nodes('option')
#loop id_data and grab tournament ids and names for individual seasons
for(i in 1:length(tournament_id_data)){
to_string <- toString(tournament_id_data[i])
#tournament ids
tournament_id <- str_extract(to_string, grab_id)
tournament_ids <- c(tournament_ids, tournament_id)
#tournament names
tournament_name <- str_extract(to_string, grab_tournament_name)
tournament_names <- c(tournament_names, tournament_name)
#tournament dates
url <- paste0('https://www.espn.com/golf/leaderboard/_/tournamentId/', tournament_id)
webpage <- read_html(url)
tournament_date <- webpage %>%
html_nodes('span.Leaderboard__Event__Date.n7') %>%
toString() %>%
str_extract(grab_date)
tournament_dates <- c(tournament_dates, tournament_date)
tournament_site <- webpage %>%
html_nodes('div.Leaderboard__Course__Location.n8') %>%
toString() %>%
str_extract("(?<=>).*(?=<!-- --> -)")
tournament_sites <- c(tournament_sites, tournament_site)
}
tournaments <- data.frame(tournament_ids, tournament_year = 2021, tournament_sites, tournament_names, tournament_dates, stringsAsFactors = F) %>%
mutate(start_date = gsub(" - .+,", ",", tournament_dates),
start_date = gsub("Oct ", "October ", start_date),
start_date = as.Date(start_date, format = "%B %d, %Y"))
saveRDS(tournaments, here::here("tournament_list_curyr.rData"))
##### pull leaderboard data for each tournament ----
#establish empty dataframe
scores <- tibble(pos = numeric(),
player = character(),
to_par = numeric(),
r1 = numeric(),
r2 = numeric(),
r3 = numeric(),
r4 = numeric(),
tot = numeric(),
earnings = numeric(),
fedex_pts = numeric())
ignore_list <- c(401251634)
tournament_ids <- tournaments %>%
filter(!tournament_ids %in% ignore_list,
start_date <= (Sys.Date() - 4)) %>%
pull(tournament_ids)
for( t in 1:length(tournament_ids)){
url <- paste0('https://www.espn.com/golf/leaderboard/_/tournamentId/', tournament_ids[t])
webpage <- read_html(url)
id_data <- html_nodes(webpage, 'a.AnchorLink.leaderboard_player_name') #links with player ids
grab_id <- "[[:digit:]]+" #reg expression to pull id from player links
ids <- integer() #empty vector to put player ids
#loop id_data and grab only player ids
for(i in 1:length(id_data)){
to_string <- toString(id_data[i])
id <- str_extract(to_string, grab_id)
ids <- c(ids, id)
}
node_list <- webpage %>%
html_nodes("table")
if(length(node_list) == 1){
leaderboard <- node_list %>%
html_table(fill = TRUE) %>%
.[[1]] %>%
clean_names() %>%
mutate(player_id = ids)
} else if(length(node_list) == 2) {
leaderboard <- node_list %>%
.[2] %>%
html_table(fill = TRUE) %>%
.[[1]] %>%
clean_names() %>%
mutate(player_id = ids)
} else {
leaderboard <- tibble(pos = numeric(),
player = character(),
to_par = numeric(),
r1 = numeric(),
r2 = numeric(),
r3 = numeric(),
r4 = numeric(),
tot = numeric(),
earnings = numeric(),
fedex_pts = numeric(),
player_id = character())
}
leaderboard <- leaderboard %>%
mutate(tournament_id = tournament_ids[t])
if(ncol(leaderboard) < 12) {
leaderboard$r4 <- NA
}
scores <- rbind(scores, leaderboard)
}
saveRDS(scores, here::here("leaderboard_scrape_curyr.rData"))
}
# used in preparing historic score data; converts round scores into a standardized format
Norm_Fun <- function(col) {
(col - mean(col, na.rm = T))/sd(col, na.rm = T)
}
# used in prep; cleans the names of the imported tournament field to match with historic results
clean_field <- function(db) {
return(db %>%
mutate(player = gsub("(^.+), (.+$)", "\\2 \\1", name_raw),
player = gsub(",", "", player),
player = gsub("Tyler Strafaci", "ty strafaci (a)", player),
player = gsub("Byeong Hun", "Byeong-Hun", player),
player = gsub("Mark Anderson", "Mark D. Anderson", player),
player = gsub("Fabián Gómez", "Fabian Gomez", player),
player = gsub("Nelson Ledesma", "Nelson Lauta Ledesma", player),
player = gsub("Seung-Yul Noh", "Seung-yul Noh", player),
player = gsub("Alex Noren", "Alexander Noren", player),
player = gsub("Sebastián Muñoz", "Sebastian Munoz", player),
player = gsub("Sangmoon", "Sang-Moon", player),
player = gsub("Rafa ", "Rafael ", player),
player = tolower(player),
player = gsub("sebasti.n mu.oz", "sebastian munoz", player),
check = player %in% player_data$player)
)
}
# used in modeling; given a random number and a player, the function returns the predicted round score
find_score <- function(player, score) {
max(Score_Matrix$Score[Score_Matrix$player == player & Score_Matrix$Score.Prob < score])
}
# used in cleaning odds reports; returns the break even probability for a given odds
Prob_Calc <- function(odds) {
return(
ifelse(odds > 0, 100/(odds + 100), abs(odds)/(abs(odds) + 100))
)
}
# used in cleaning odds reports; converts names to be able to align to other datasets
Clean_Players <- function(data) {
temp <- data
# temp <- gsub("varner", "varner iii", temp)
temp <- gsub("[[:punct:]]", "", temp)
temp <- gsub("kyoung.*hoon", "kyoung-hoon", temp)
temp <- gsub("byeonghun", "byeong hun", temp)
temp <- gsub(".+holmes$", "j.b. holmes", temp)
temp <- gsub("mark anderson", "mark d. anderson", temp)
temp <- gsub(".+choi$", "k.j. choi", temp)
temp <- gsub(".+spaun$", "j.j. spaun", temp)
temp <- gsub("nelson ledesma", "nelson lauta ledesma", temp)
temp <- gsub("ted potter jr", "ted potter jr.", temp)
temp <- gsub("seung.*yul noh", "seung-yul noh", temp)
temp <- gsub("sebastion", "sebastian", temp)
temp <- gsub("c[[:space:]]*t[[:space:]]*pan$", "c.t. pan", temp)
temp <- gsub("^brooke", "brooks", temp)
temp <- gsub("ohair$", "o'hair", temp)
temp <- gsub("hao tong", "haotong", temp)
temp <- gsub("alex ", "alexander ", temp)
temp <- gsub("^.+poston", "j.t. poston", temp)
temp <- gsub("byeong hun an","byeong-hun an", temp)
temp <- gsub("cabrerabello", "cabrera bello", temp)
temp <- gsub("rafael", "rafa", temp)
# temp <- gsub("russel", "russell", temp)
temp <- gsub("schaufflele", "schauffele", temp)
temp <- gsub("^(.)[[:space:]](.) ", "\\1\\2 ", temp)
return(temp)
}
# used in cleaning odds reports; splits apart names and odds from imported Excel data (may not be needed if web scraping)
Clean_Odds <- function(db) {
return(
db %>%
setNames("temp") %>%
mutate(temp = gsub("EVEN", "+100", temp),
Player = tolower(gsub("(^.+)[[:punct:]][[:digit:]]+$", "\\1", temp)),
Player = gsub("byeong hun an","byeong-hun an", Player),
Player = gsub("alex ", "alexander ", Player),
Player = gsub("mark anderson", "mark d. anderson", Player),
Player = gsub("kyoung hoon", "kyoung-hoon", Player),
Player = gsub("sebastion", "sebastian", Player),
Player = gsub("nelson ledesma", "nelson lauta ledesma", Player),
Player = gsub("ted potter jr", "ted potter jr.", Player),
Player = gsub("rafa campos", "rafael campos", Player),
Player = gsub("j. b. holmes","j.b. holmes", Player),
Player = gsub("jt poston", "j.t. poston", Player),
Player = gsub("j. j. spaun", "j.j. spaun", Player),
Player = gsub("kj choi", "k.j. choi", Player),
Player = gsub("rafael", "rafa", Player),
Player = gsub("j b", "jb", Player),
Odds = as.numeric(gsub("^.+([[:punct:]][[:digit:]]+$)", "\\1", temp)),
Prob = Prob_Calc(Odds))
)
}
# used in analyzing outputs; compares the round scoring projections for a group of players
Player_Spread <- function(p) {
temp <- filter(Rounds_Long, player %in% p)
ggplot(temp, aes(x = Score, fill = player)) +
geom_density(alpha = 0.25) +
theme_bw()
}
# used in analyzing outputs; compares the tournament result projections for a group of players
Player_Results <- function(p) {
temp <- filter(Ongoing_Tally, player %in% p)
ggplot(temp, aes(x = Final.Rank, fill = player)) +
geom_density(stat = "count", alpha = 0.25) +
theme_bw()
}
# used in analyzing outputs; returns a report that consists of historic scoring results and either the round or tournament projection graphic
Player_Report <- function(cat,tag, var = "round", site = NA) {
#browser()
if(!is.na(site)) {s <- site} else {s <- c("365", "bodog")}
p <- value_df$player[value_df$Tag == tag & value_df$category == cat & value_df$site %in% s]
p <- p[p != "tie"]
report <- player_prep %>%
filter(player %in% p) %>%
arrange(player) %>%
pivot_wider(names_from = "Round", values_from = "Score")
if(var == "round") {
g <- Player_Spread(p)
} else {
g <- Player_Results(p)
}
list(View(report), g)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_evaluation.R
\name{g_tests}
\alias{g_tests}
\title{Graph-based two-sample tests}
\usage{
g_tests(df1, df2, method = "euclidean")
}
\arguments{
\item{df1, df2}{dataframes}
\item{method}{distance measure. Must be one of "euclidean" (default),
"maximum", "manhattan", "canberra", "binary" or "minkowski". See `method`
in `?dist`.}
}
\description{
Test if `df1` and `df2` come from the same distribution. This is a wrapper
of `g.tests` in which the edge matrix of the similarity graph is based on a
minimum spanning tree of the combined data frame.
}
\examples{
## Expect failure to reject null.
g_tests(data.frame(x = rnorm(100)), data.frame(x = rnorm(100)))
## Expect rejection of the null.
g_tests(data.frame(x = rnorm(100)), data.frame(x = runif(100)))
}
\references{
Friedman J. and Rafsky L. Multivariate generalizations of the
WaldWolfowitz and Smirnov two-sample tests. The Annals of Statistics,
7(4):697-717, 1979.
Chen, H. and Friedman, J. H. A new graph-based two-sample test for
multivariate and object data. Journal of the American Statistical
Association, 2016.
Chen, H., Chen, X. and Su, Y. A weighted edge-count two sample test for
multivariate and object data. arXiv:1604.06515.
}
|
/man/g_tests.Rd
|
no_license
|
kcf-jackson/glmGraph
|
R
| false | true | 1,283 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_evaluation.R
\name{g_tests}
\alias{g_tests}
\title{Graph-based two-sample tests}
\usage{
g_tests(df1, df2, method = "euclidean")
}
\arguments{
\item{df1, df2}{dataframes}
\item{method}{distance measure. Must be one of "euclidean" (default),
"maximum", "manhattan", "canberra", "binary" or "minkowski". See `method`
in `?dist`.}
}
\description{
Test if `df1` and `df2` come from the same distribution. This is a wrapper
of `g.tests` in which the edge matrix of the similarity graph is based on a
minimum spanning tree of the combined data frame.
}
\examples{
## Expect failure to reject null.
g_tests(data.frame(x = rnorm(100)), data.frame(x = rnorm(100)))
## Expect rejection of the null.
g_tests(data.frame(x = rnorm(100)), data.frame(x = runif(100)))
}
\references{
Friedman J. and Rafsky L. Multivariate generalizations of the
WaldWolfowitz and Smirnov two-sample tests. The Annals of Statistics,
7(4):697-717, 1979.
Chen, H. and Friedman, J. H. A new graph-based two-sample test for
multivariate and object data. Journal of the American Statistical
Association, 2016.
Chen, H., Chen, X. and Su, Y. A weighted edge-count two sample test for
multivariate and object data. arXiv:1604.06515.
}
|
#' Generate scatter pots for a particular country
#'
#' This function generates a scatter plot for all variables
#' @param i_alloc,
#' @param i_nodeInfo
#' @param i_iso
#' @param i_var_dep
#' @keywords plot, scatter, regression tree
#' @export
#'
#'
plot_ScatterCountry_allVars <- function(i_alloc, i_nodeInfo, i_iso, i_var_dep, PLOT_LABELS=FALSE) {
p_shift_x = 0.00
p_shift_y = 0.03
# Initialisation
# Get X and Y limits
# xlim = c(min(i_alloc[,i_var_x]), max(i_alloc[,i_var_x]))
# ylim = c(min(i_alloc[,i_var_y]), max(i_alloc[,i_var_y]))
# xlim = c(min(i_alloc[which(i_alloc$iso %in% i_iso),i_var_x]), max(i_alloc[which(i_alloc$iso %in% i_iso),i_var_x]))
# ylim = c(min(i_alloc[which(i_alloc$iso %in% i_iso),i_var_y]), max(i_alloc[which(i_alloc$iso %in% i_iso),i_var_y]))
tmp = i_alloc %>%
gather(variable_x, value_x, -iso, -year, -log_E_CC, -tnode)
# Plot all data points
p = ggplot(data=tmp %>% filter(iso %in% i_iso) %>% mutate(iso = factor(iso, levels=i_iso))) +
geom_point(aes_string(x="value_x", y=i_var_dep, fill=i_var_dep, colour=i_var_dep), data=tmp %>% select(-iso), pch=21, alpha=0.33)
# Compute segment information of intermediate nodes
# tmp <- i_nodeInfo[which(!duplicated(i_nodeInfo$nodeID)),] %>% #i_nodeInfo$nodeType == "Intermediate node" &
# select(nodeID, variable, value) %>%
# filter(variable %in% c(i_var_x, i_var_y)) %>%
# mutate(nx = ifelse(variable == i_var_x, value, xlim[1])) %>%
# mutate(nxend = ifelse(variable == i_var_x, value, xlim[2] + 0.05*(xlim[2]-xlim[1]) + p_shift_x*2*(xlim[2]-xlim[1]))) %>%
# mutate(ny = ifelse(variable == i_var_y, value, ylim[1] - 0.05*(ylim[2]-ylim[1]) - p_shift_y*2*(ylim[2]-ylim[1]))) %>%
# mutate(nyend = ifelse(variable == i_var_y, value, ylim[2])) %>%
# group_by(variable) %>%
# arrange(value) %>%
# mutate(nxlab = ifelse(variable == i_var_x, value, xlim[2] + 0.05*(xlim[2]-xlim[1]) + p_shift_x*row_number()%%3*(xlim[2]-xlim[1]))) %>%
# mutate(nylab = ifelse(variable == i_var_y, value, ylim[1] - 0.05*(ylim[2]-ylim[1]) - p_shift_y*row_number()%%3*(ylim[2]-ylim[1]))) %>%
# ungroup()
# Plot segments
# p = p +
# geom_segment(aes(x=nx, xend=nxend, y=ny, yend=nyend, group=nodeID), data=tmp, linetype=2) +
# geom_label(aes(x=nxlab, y=nylab, label=nodeID), colour="black", data=tmp)
# Plot country data (transition)
p = p +
geom_path(aes_string(x="value_x", y=i_var_dep), data=tmp %>% filter(iso %in% i_iso) %>% arrange(year), colour="black", size=1.25) +
geom_point(aes_string(x="value_x", y=i_var_dep, fill=i_var_dep), data=tmp %>% filter(iso %in% i_iso) , colour="black", size=4, pch=21) +
facet_grid(variable_x~iso, scales = "free")
if (PLOT_LABELS) {
p = p +
geom_label_repel(aes_string(x="value_x", y=i_var_dep, fill=i_var_dep, label="year.tnode"),
data=tmp %>% filter(iso %in% i_iso) %>% mutate(year.tnode = paste0(year, " (", tnode, ")")),
colour="white", fontface = "bold")
}
# # Plot options and cosmetics
# xlim[2] = xlim[2] + 0.05*(xlim[2]-xlim[1]) + p_shift_x*2*(xlim[2]-xlim[1])
# ylim[1] = ylim[1] - 0.05*(ylim[2]-ylim[1]) - p_shift_y*2*(ylim[2]-ylim[1])
p = p +
theme_bw() +
scale_colour_gradient(low = "#F5F5DC", high = "#8B0000", space = "Lab", na.value = "grey50", guide = "colourbar") +
scale_fill_gradient(low = "#F5F5DC", high = "#8B0000", space = "Lab", na.value = "grey50", guide = "colourbar") +
theme(legend.position="none")
print(p)
return(p)
}
|
/R/plot_ScatterCountry_allVars.R
|
no_license
|
jhilaire/guidr
|
R
| false | false | 3,583 |
r
|
#' Generate scatter pots for a particular country
#'
#' This function generates a scatter plot for all variables
#' @param i_alloc,
#' @param i_nodeInfo
#' @param i_iso
#' @param i_var_dep
#' @keywords plot, scatter, regression tree
#' @export
#'
#'
plot_ScatterCountry_allVars <- function(i_alloc, i_nodeInfo, i_iso, i_var_dep, PLOT_LABELS=FALSE) {
p_shift_x = 0.00
p_shift_y = 0.03
# Initialisation
# Get X and Y limits
# xlim = c(min(i_alloc[,i_var_x]), max(i_alloc[,i_var_x]))
# ylim = c(min(i_alloc[,i_var_y]), max(i_alloc[,i_var_y]))
# xlim = c(min(i_alloc[which(i_alloc$iso %in% i_iso),i_var_x]), max(i_alloc[which(i_alloc$iso %in% i_iso),i_var_x]))
# ylim = c(min(i_alloc[which(i_alloc$iso %in% i_iso),i_var_y]), max(i_alloc[which(i_alloc$iso %in% i_iso),i_var_y]))
tmp = i_alloc %>%
gather(variable_x, value_x, -iso, -year, -log_E_CC, -tnode)
# Plot all data points
p = ggplot(data=tmp %>% filter(iso %in% i_iso) %>% mutate(iso = factor(iso, levels=i_iso))) +
geom_point(aes_string(x="value_x", y=i_var_dep, fill=i_var_dep, colour=i_var_dep), data=tmp %>% select(-iso), pch=21, alpha=0.33)
# Compute segment information of intermediate nodes
# tmp <- i_nodeInfo[which(!duplicated(i_nodeInfo$nodeID)),] %>% #i_nodeInfo$nodeType == "Intermediate node" &
# select(nodeID, variable, value) %>%
# filter(variable %in% c(i_var_x, i_var_y)) %>%
# mutate(nx = ifelse(variable == i_var_x, value, xlim[1])) %>%
# mutate(nxend = ifelse(variable == i_var_x, value, xlim[2] + 0.05*(xlim[2]-xlim[1]) + p_shift_x*2*(xlim[2]-xlim[1]))) %>%
# mutate(ny = ifelse(variable == i_var_y, value, ylim[1] - 0.05*(ylim[2]-ylim[1]) - p_shift_y*2*(ylim[2]-ylim[1]))) %>%
# mutate(nyend = ifelse(variable == i_var_y, value, ylim[2])) %>%
# group_by(variable) %>%
# arrange(value) %>%
# mutate(nxlab = ifelse(variable == i_var_x, value, xlim[2] + 0.05*(xlim[2]-xlim[1]) + p_shift_x*row_number()%%3*(xlim[2]-xlim[1]))) %>%
# mutate(nylab = ifelse(variable == i_var_y, value, ylim[1] - 0.05*(ylim[2]-ylim[1]) - p_shift_y*row_number()%%3*(ylim[2]-ylim[1]))) %>%
# ungroup()
# Plot segments
# p = p +
# geom_segment(aes(x=nx, xend=nxend, y=ny, yend=nyend, group=nodeID), data=tmp, linetype=2) +
# geom_label(aes(x=nxlab, y=nylab, label=nodeID), colour="black", data=tmp)
# Plot country data (transition)
p = p +
geom_path(aes_string(x="value_x", y=i_var_dep), data=tmp %>% filter(iso %in% i_iso) %>% arrange(year), colour="black", size=1.25) +
geom_point(aes_string(x="value_x", y=i_var_dep, fill=i_var_dep), data=tmp %>% filter(iso %in% i_iso) , colour="black", size=4, pch=21) +
facet_grid(variable_x~iso, scales = "free")
if (PLOT_LABELS) {
p = p +
geom_label_repel(aes_string(x="value_x", y=i_var_dep, fill=i_var_dep, label="year.tnode"),
data=tmp %>% filter(iso %in% i_iso) %>% mutate(year.tnode = paste0(year, " (", tnode, ")")),
colour="white", fontface = "bold")
}
# # Plot options and cosmetics
# xlim[2] = xlim[2] + 0.05*(xlim[2]-xlim[1]) + p_shift_x*2*(xlim[2]-xlim[1])
# ylim[1] = ylim[1] - 0.05*(ylim[2]-ylim[1]) - p_shift_y*2*(ylim[2]-ylim[1])
p = p +
theme_bw() +
scale_colour_gradient(low = "#F5F5DC", high = "#8B0000", space = "Lab", na.value = "grey50", guide = "colourbar") +
scale_fill_gradient(low = "#F5F5DC", high = "#8B0000", space = "Lab", na.value = "grey50", guide = "colourbar") +
theme(legend.position="none")
print(p)
return(p)
}
|
# Load libraries
library("shiny")
library("ggplot2")
library("plotly")
hiv_df <- read.csv("./data/art_coverage_by_country_clean.csv",
stringsAsFactors = FALSE
)
bar_chart_df <- hiv_df %>%
group_by(WHO.Region) %>%
select(
Country, WHO.Region,
Estimated.ART.coverage.among.people.living.with.HIV...._median
) %>%
filter(Estimated.ART.coverage.among.people.living.with.HIV...._median
!= "NA") %>%
rename(
M_ART_coverage_among_living_HIV =
Estimated.ART.coverage.among.people.living.with.HIV...._median
)
intro <- tabPanel(
"Intro",
HTML(
" <head>
<meta charset='utf-8'>
<link rel='preconnect' href='https://fonts.gstatic.com'>
<link href='https://fonts.googleapis.com/css2?family=Roboto&display=swap'rel
='stylesheet'>
<link rel='stylesheet' href='styles.css'>
<title>HIV/AIDS and ART Treatment Project</title>
</head>
<body>
<header>
<h1>HIV/AIDS and ART Treatment Dataset Introduction</h1>
</header>
<main>
<img id='main' src='hiv.jpg' alt='hand holding up hiv ribbon'>
<!-- Image taken from https://indianexpress.com/article/lifestyle/
life-style/we-need-
attention-too-the-plight-of-the-hiv-positive-community-in-india-4405035/
-->
<section id='intro'>
<h2>Purpose and Importance of Our Project</h2>
<p>
The purpose of our project is to view the extent of treatment coverage
for HIV/AIDS in countries globally and ART treatment's impact on the
number of people living with HIV and compare the amount of people
living with HIV by region.
</p>
<h2>Leading Questions of Our Project</h2>
<p>
We have included several graphics of the data,
which aims to answer a few questions about global HIV rates and
ART treatment:
<ol>
<li>How do ART coverage rates for those living with HIV differ between
countries within certain regions?</li>
<li>What are the incidence rates and amount of deaths caused by HIV in
countries during selected time periods?</li>
<li>What are the estimated total number of cases of HIV, the total
coverage, and proportion of coverage to cases in countries
globally?</li>
</ol>
</p>
</section>
<section id='sources'>
<h2>Sources of Dataset</h2>
<p>
The data was collected and generated by WHO and UNESCO from their
public records on the number of people living with HIV/AIDS around the
world.Devakumar cleaned that data from WHO and UNESCO and put the data
into a CSV file that draws a focus on treatment statistics for those
living with HIV/AIDS by country.
<a href='https://www.kaggle.com/imdevskp/hiv-aids-dataset'>HIV/AIDS
and ART Treatment</a>
</p>
<p>
We added another dataset to to add latitude and longitude for all the
countries:
<a href='https://www.kaggle.com/paultimothymooney/
latitude-and-longitude-for-every-country-and-state'>
Latitude and longitude</a>
This dataset was created by Paul Mooney and updated one year ago,
ensuring that the data we present is still prevelant and recent. Paul
Mooney utlized a countries csv that was published to the Google
Public Data Explorer in the Dataset Publishing Language format.
</p>
<p>
We added another dataset that tracks the amount of deathes from HIV
from 1990 to 2017 to see how the availability of ART
(antiretroviral therapy)
has impacted death rates over time:
<a href='https://ourworldindata.org/hiv-aids'>Death rates</a>
This dataset, published by Our World in Data, includes numbers that
were sourced from studies done by the Global Burden of Disease
Collaborative Network.
</p>
<br>
</section>
</main>
<br>
<footer>
<p>
Created by INFO201 Group F1 ©2021
All other attributions cited in page source.
</p>
</footer>
<br>
</body>"
)
)
conclusion <- tabPanel(
"Conclusion",
HTML(
"<head>
<meta charset='utf-8'>
<link rel='preconnect' href='https://fonts.gstatic.com'>
<link href='https://fonts.googleapis.com/css2?family=Roboto&display=swap'
rel='stylesheet'>
<link rel='stylesheet' href='styles.css'>
<title>HIV/AIDS and ART Treatment Dataset Introduction</title>
</head>
<body>
<header>
<h1>Project Takeaways</h1>
</header>
<main>
<img id='globe' src='world.png' alt='globe and hiv ribbon'>
<!-- Image taken from https://images.theconversation.com/files/111819/
original/image-20160217
-19239-zivpuc.png?ixlib=rb-1.1.0&q=45&auto=format&w=926&fit=clip -->
<section id='conclusion'>
<p>
Through organizing, wrangling and visualizing our datasets regarding
global HIV and ART coverage rates We have been able to make several
major conlcusions.
<br>
<br>
Based of the first bar chart on page one, we have
found that in the Eastern Mediterranean, the country with the highest
ART coverage among people with HIV is Jordan. In Africa, the country
with the highest coverage is Namibia, in the Americas it is Peru, in
Europe it is Italy, in the Western Pacific it is Australia, and lastly,
in South-East Asia it is Thailand. Overall, the Americas has the lowest
ART coverage for those with HIV and Africa and Europe have the highest.
<br>
<br>
The map visual on the second page, gave us insight on the proportion of
ART coverage to cases of HIV within regions globally. Through this map
we were able to reognize that many of the regions, Africa, the Americas,
and Western Pacific all have very similar proprotions , around 0.62-0.63.
We also saw the the Eastern Mediterranean has the lowest proprotion of
0.21, and Southeast Asia has the second lowest at 0.47. Europe has the
highest proprotion at 0.71. We see that since the Eastern Mediterranean
has the lowest proprotion of art coverage to HIV cases that countries
within this region need to prioritize getting ART and making it more
accessible to those suffering from HIV. From this map we also can see
that the Eastern Mediterranean also has the lowest total cases of HIV
out of all the regions, therefore this may be why they do not prioritize
ART coverage as much as regions such as Africa that have incredibly high
number of cases of HIV.
<br>
<br>
Lastly, the line chart on page three shows the trends of incidences of
HIV, death rates of HIV, and prevelance of HIV in countries globally
between 1990-2017. We are able to infer from these charts for each country
that while there have not been decreases in incidences or prevleance of
HIV across time, there has been a stabilization and even decrease in
deaths for most countries across the globe. This trend is most likely due
to ART, which is able to help people with HIV maintain longer and healthier
lives, despite HIV still not being able to be cured fully.
</p>
</section>
</main>
<br>
<footer>
<p>
Created by INFO201 Group F1 ©2021
All other attributions cited in page source.
</p>
</footer>
<br>
</body>"
)
)
footer <- HTML(
"<br>
<footer class='footer'>
<p>
Created by INFO201 Group F1 ©2021
All other attributions cited in page source.
</p>
</footer>
<br>"
)
# Bar chart stuff
bar_chart_page <- tabPanel(
"Bar Chart",
h2("Regional Median Estimated ART Coverage Among People Living with HIV"),
h4("Select a Region"),
selectInput(
inputId = "regionselect",
label = "Region",
selected = "Americas",
choices = unique(bar_chart_df$WHO.Region)
),
plotlyOutput(outputId = "region_chart"),
h3("Chart Summary:"),
textOutput(outputId = "chartexplanation"),
footer
)
### Map stuff
# Data for country coordinates
country_coordinates <- read.csv("./data/world_country_coordinate.csv",
stringsAsFactors = FALSE
)
# combine data
hiv_country <- merge(hiv_df, country_coordinates,
by = "Country",
all.x = TRUE
)
# group_by region
hiv_region <- hiv_country %>%
drop_na() %>%
select(
Country, Estimated.number.of.people.living.with.HIV_median,
Reported.number.of.people.receiving.ART, WHO.Region, Latitude,
Longitude
) %>%
filter(Reported.number.of.people.receiving.ART != "Nodata") %>%
mutate(Reported_receiving_art = strtoi(
Reported.number.of.people.receiving.ART
)) %>%
group_by(WHO.Region) %>%
summarize(
total_cases = sum(
Estimated.number.of.people.living.with.HIV_median,
na.rm = TRUE
),
total_coverage = sum(Reported_receiving_art, na.rm = TRUE),
coverage_prop = sum(Reported_receiving_art, na.rm = TRUE) /
sum(Estimated.number.of.people.living.with.HIV_median,
na.rm = TRUE
),
avg_lat = mean(Latitude, na.rm = TRUE),
avg_long = mean(Longitude, na.rm = TRUE)
)
map_page <- tabPanel(
"Map",
h2("View Regional data on the map")
)
region_selection <- selectInput(
"region",
label = h3("Choose a WHO Region"),
choices = hiv_region$WHO.Region
)
map_chart_page <- tabPanel(
"Map Chart",
map_page,
region_selection,
h5("Click the marker that appears to view data about your selected region"),
leafletOutput("region_map"),
h3("Map Summary"),
tags$div(
id = "content-wrap",
textOutput("map_explanation")
),
footer
)
### Line Chart stuff
death_rate_df <- read.csv("./data/deaths-and-new-cases-of-hiv.csv")
death_rate_df <- death_rate_df %>%
group_by(death_rate_df$Entity)
country_selection <- selectInput(
"country",
label = h3("Choose a Country"),
choices = unique(death_rate_df$Entity)
)
years_selection <- sliderInput(
"years",
label = h3("Year Range"),
min = min(death_rate_df$Year, na.rm = TRUE),
max = max(death_rate_df$Year, na.rm = TRUE),
value = c(
min(death_rate_df$Year, na.rm = TRUE),
max(death_rate_df$Year, na.rm = TRUE)
),
sep = ""
)
line_chart_page <- tabPanel(
"Line Chart",
h2("See How Selected Countries Have Been Dealing with HIV/AIDS"),
country_selection,
years_selection,
plotOutput("linechart"),
h3("Chart Summary:"),
textOutput(outputId = "linechartexplanation"),
footer
)
# UI
ui <-
fluidPage(
includeCSS("styles.CSS"),
navbarPage(
tags$div(
id = "navbar",
"HIV Statistics"
),
intro,
bar_chart_page,
map_chart_page,
line_chart_page,
conclusion
)
)
|
/app_ui.R
|
permissive
|
info-201a-sp21/project-ryanzhao-pog
|
R
| false | false | 10,839 |
r
|
# Load libraries
library("shiny")
library("ggplot2")
library("plotly")
hiv_df <- read.csv("./data/art_coverage_by_country_clean.csv",
stringsAsFactors = FALSE
)
bar_chart_df <- hiv_df %>%
group_by(WHO.Region) %>%
select(
Country, WHO.Region,
Estimated.ART.coverage.among.people.living.with.HIV...._median
) %>%
filter(Estimated.ART.coverage.among.people.living.with.HIV...._median
!= "NA") %>%
rename(
M_ART_coverage_among_living_HIV =
Estimated.ART.coverage.among.people.living.with.HIV...._median
)
intro <- tabPanel(
"Intro",
HTML(
" <head>
<meta charset='utf-8'>
<link rel='preconnect' href='https://fonts.gstatic.com'>
<link href='https://fonts.googleapis.com/css2?family=Roboto&display=swap'rel
='stylesheet'>
<link rel='stylesheet' href='styles.css'>
<title>HIV/AIDS and ART Treatment Project</title>
</head>
<body>
<header>
<h1>HIV/AIDS and ART Treatment Dataset Introduction</h1>
</header>
<main>
<img id='main' src='hiv.jpg' alt='hand holding up hiv ribbon'>
<!-- Image taken from https://indianexpress.com/article/lifestyle/
life-style/we-need-
attention-too-the-plight-of-the-hiv-positive-community-in-india-4405035/
-->
<section id='intro'>
<h2>Purpose and Importance of Our Project</h2>
<p>
The purpose of our project is to view the extent of treatment coverage
for HIV/AIDS in countries globally and ART treatment's impact on the
number of people living with HIV and compare the amount of people
living with HIV by region.
</p>
<h2>Leading Questions of Our Project</h2>
<p>
We have included several graphics of the data,
which aims to answer a few questions about global HIV rates and
ART treatment:
<ol>
<li>How do ART coverage rates for those living with HIV differ between
countries within certain regions?</li>
<li>What are the incidence rates and amount of deaths caused by HIV in
countries during selected time periods?</li>
<li>What are the estimated total number of cases of HIV, the total
coverage, and proportion of coverage to cases in countries
globally?</li>
</ol>
</p>
</section>
<section id='sources'>
<h2>Sources of Dataset</h2>
<p>
The data was collected and generated by WHO and UNESCO from their
public records on the number of people living with HIV/AIDS around the
world.Devakumar cleaned that data from WHO and UNESCO and put the data
into a CSV file that draws a focus on treatment statistics for those
living with HIV/AIDS by country.
<a href='https://www.kaggle.com/imdevskp/hiv-aids-dataset'>HIV/AIDS
and ART Treatment</a>
</p>
<p>
We added another dataset to to add latitude and longitude for all the
countries:
<a href='https://www.kaggle.com/paultimothymooney/
latitude-and-longitude-for-every-country-and-state'>
Latitude and longitude</a>
This dataset was created by Paul Mooney and updated one year ago,
ensuring that the data we present is still prevelant and recent. Paul
Mooney utlized a countries csv that was published to the Google
Public Data Explorer in the Dataset Publishing Language format.
</p>
<p>
We added another dataset that tracks the amount of deathes from HIV
from 1990 to 2017 to see how the availability of ART
(antiretroviral therapy)
has impacted death rates over time:
<a href='https://ourworldindata.org/hiv-aids'>Death rates</a>
This dataset, published by Our World in Data, includes numbers that
were sourced from studies done by the Global Burden of Disease
Collaborative Network.
</p>
<br>
</section>
</main>
<br>
<footer>
<p>
Created by INFO201 Group F1 ©2021
All other attributions cited in page source.
</p>
</footer>
<br>
</body>"
)
)
conclusion <- tabPanel(
"Conclusion",
HTML(
"<head>
<meta charset='utf-8'>
<link rel='preconnect' href='https://fonts.gstatic.com'>
<link href='https://fonts.googleapis.com/css2?family=Roboto&display=swap'
rel='stylesheet'>
<link rel='stylesheet' href='styles.css'>
<title>HIV/AIDS and ART Treatment Dataset Introduction</title>
</head>
<body>
<header>
<h1>Project Takeaways</h1>
</header>
<main>
<img id='globe' src='world.png' alt='globe and hiv ribbon'>
<!-- Image taken from https://images.theconversation.com/files/111819/
original/image-20160217
-19239-zivpuc.png?ixlib=rb-1.1.0&q=45&auto=format&w=926&fit=clip -->
<section id='conclusion'>
<p>
Through organizing, wrangling and visualizing our datasets regarding
global HIV and ART coverage rates We have been able to make several
major conlcusions.
<br>
<br>
Based of the first bar chart on page one, we have
found that in the Eastern Mediterranean, the country with the highest
ART coverage among people with HIV is Jordan. In Africa, the country
with the highest coverage is Namibia, in the Americas it is Peru, in
Europe it is Italy, in the Western Pacific it is Australia, and lastly,
in South-East Asia it is Thailand. Overall, the Americas has the lowest
ART coverage for those with HIV and Africa and Europe have the highest.
<br>
<br>
The map visual on the second page, gave us insight on the proportion of
ART coverage to cases of HIV within regions globally. Through this map
we were able to reognize that many of the regions, Africa, the Americas,
and Western Pacific all have very similar proprotions , around 0.62-0.63.
We also saw the the Eastern Mediterranean has the lowest proprotion of
0.21, and Southeast Asia has the second lowest at 0.47. Europe has the
highest proprotion at 0.71. We see that since the Eastern Mediterranean
has the lowest proprotion of art coverage to HIV cases that countries
within this region need to prioritize getting ART and making it more
accessible to those suffering from HIV. From this map we also can see
that the Eastern Mediterranean also has the lowest total cases of HIV
out of all the regions, therefore this may be why they do not prioritize
ART coverage as much as regions such as Africa that have incredibly high
number of cases of HIV.
<br>
<br>
Lastly, the line chart on page three shows the trends of incidences of
HIV, death rates of HIV, and prevelance of HIV in countries globally
between 1990-2017. We are able to infer from these charts for each country
that while there have not been decreases in incidences or prevleance of
HIV across time, there has been a stabilization and even decrease in
deaths for most countries across the globe. This trend is most likely due
to ART, which is able to help people with HIV maintain longer and healthier
lives, despite HIV still not being able to be cured fully.
</p>
</section>
</main>
<br>
<footer>
<p>
Created by INFO201 Group F1 ©2021
All other attributions cited in page source.
</p>
</footer>
<br>
</body>"
)
)
footer <- HTML(
"<br>
<footer class='footer'>
<p>
Created by INFO201 Group F1 ©2021
All other attributions cited in page source.
</p>
</footer>
<br>"
)
# Bar chart stuff
bar_chart_page <- tabPanel(
"Bar Chart",
h2("Regional Median Estimated ART Coverage Among People Living with HIV"),
h4("Select a Region"),
selectInput(
inputId = "regionselect",
label = "Region",
selected = "Americas",
choices = unique(bar_chart_df$WHO.Region)
),
plotlyOutput(outputId = "region_chart"),
h3("Chart Summary:"),
textOutput(outputId = "chartexplanation"),
footer
)
### Map stuff
# Data for country coordinates
country_coordinates <- read.csv("./data/world_country_coordinate.csv",
stringsAsFactors = FALSE
)
# combine data
hiv_country <- merge(hiv_df, country_coordinates,
by = "Country",
all.x = TRUE
)
# group_by region
hiv_region <- hiv_country %>%
drop_na() %>%
select(
Country, Estimated.number.of.people.living.with.HIV_median,
Reported.number.of.people.receiving.ART, WHO.Region, Latitude,
Longitude
) %>%
filter(Reported.number.of.people.receiving.ART != "Nodata") %>%
mutate(Reported_receiving_art = strtoi(
Reported.number.of.people.receiving.ART
)) %>%
group_by(WHO.Region) %>%
summarize(
total_cases = sum(
Estimated.number.of.people.living.with.HIV_median,
na.rm = TRUE
),
total_coverage = sum(Reported_receiving_art, na.rm = TRUE),
coverage_prop = sum(Reported_receiving_art, na.rm = TRUE) /
sum(Estimated.number.of.people.living.with.HIV_median,
na.rm = TRUE
),
avg_lat = mean(Latitude, na.rm = TRUE),
avg_long = mean(Longitude, na.rm = TRUE)
)
map_page <- tabPanel(
"Map",
h2("View Regional data on the map")
)
region_selection <- selectInput(
"region",
label = h3("Choose a WHO Region"),
choices = hiv_region$WHO.Region
)
map_chart_page <- tabPanel(
"Map Chart",
map_page,
region_selection,
h5("Click the marker that appears to view data about your selected region"),
leafletOutput("region_map"),
h3("Map Summary"),
tags$div(
id = "content-wrap",
textOutput("map_explanation")
),
footer
)
### Line Chart stuff
death_rate_df <- read.csv("./data/deaths-and-new-cases-of-hiv.csv")
death_rate_df <- death_rate_df %>%
group_by(death_rate_df$Entity)
country_selection <- selectInput(
"country",
label = h3("Choose a Country"),
choices = unique(death_rate_df$Entity)
)
years_selection <- sliderInput(
"years",
label = h3("Year Range"),
min = min(death_rate_df$Year, na.rm = TRUE),
max = max(death_rate_df$Year, na.rm = TRUE),
value = c(
min(death_rate_df$Year, na.rm = TRUE),
max(death_rate_df$Year, na.rm = TRUE)
),
sep = ""
)
line_chart_page <- tabPanel(
"Line Chart",
h2("See How Selected Countries Have Been Dealing with HIV/AIDS"),
country_selection,
years_selection,
plotOutput("linechart"),
h3("Chart Summary:"),
textOutput(outputId = "linechartexplanation"),
footer
)
# UI
ui <-
fluidPage(
includeCSS("styles.CSS"),
navbarPage(
tags$div(
id = "navbar",
"HIV Statistics"
),
intro,
bar_chart_page,
map_chart_page,
line_chart_page,
conclusion
)
)
|
# Copyright 2011 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
make.json.input.format =
function(key.class = rmr2:::qw(list, vector, data.frame, matrix),
value.class = rmr2:::qw(list, vector, data.frame, matrix), #leave the pkg qualifier in here
nrows = 10^4) {
key.class = match.arg(key.class)
value.class = match.arg(value.class)
cast =
function(class)
switch(
class,
list = identity,
vector = as.vector,
data.frame = function(x) do.call(data.frame, x),
matrix = function(x) do.call(rbind, x))
process.field =
function(field, class)
cast(class)(fromJSON(field, asText = TRUE))
function(con) {
lines = readLines(con, nrows)
if (length(lines) == 0) NULL
else {
splits = strsplit(lines, "\t")
c.keyval(
lapply(splits,
function(x)
if(length(x) == 1)
keyval(NULL, process.field(x[1], value.class))
else
keyval(process.field(x[1], key.class), process.field(x[2], value.class))))}}}
make.json.output.format =
function(write.size = 10^4)
function(kv, con) {
ser =
function(k, v)
paste(
gsub(
"\n",
"",
toJSON(k, .escapeEscapes=TRUE, collapse = "")),
gsub("\n", "", toJSON(v, .escapeEscapes=TRUE, collapse = "")),
sep = "\t")
out = reduce.keyval(kv, ser, write.size)
writeLines(paste(out, collapse = "\n"), sep = "\n", con = con)}
make.text.input.format =
function(nrows = 10^4)
function(con) {
lines = readLines(con, nrows)
if (length(lines) == 0) NULL
else keyval(NULL, lines)}
text.output.format =
function(kv, con) {
ser = function(k, v) paste(k, v, collapse = "", sep = "\t")
out = reduce.keyval(kv, ser, length.keyval(kv))
writeLines(as.character(out), con = con)}
make.csv.input.format =
function(..., nrows = 10^4) {
optlist = list(...)
function(con) {
df =
tryCatch(
do.call(read.table, c(list(file = con, header = FALSE, nrows = nrows), optlist)),
error =
function(e) {
if(e$message != "no lines available in input")
stop(e$message)
NULL})
if(is.null(df) || dim(df)[[1]] == 0) NULL
else keyval(NULL, df)}}
make.csv.output.format =
function(...) function(kv, con) {
k = keys(kv)
v = values(kv)
write.table(file = con,
x = if(is.null(k)) v else cbind(k, v),
...,
row.names = FALSE,
col.names = FALSE)}
typedbytes.reader =
function(data) {
if(is.null(data)) NULL
else
.Call("typedbytes_reader", data, PACKAGE = "rmr2")}
typedbytes.writer =
function(objects, con, native) {
writeBin(
.Call("typedbytes_writer", objects, native, PACKAGE = "rmr2"),
con)}
rmr.coerce =
function(x, template) {
if(is.atomic(template))
switch(
class(template),
factor = factor(unlist(x)),
Date = as.Date(unlist(x), origin = "1970-1-1"),
as(unlist(x), class(template)))
else
I(splat(c)(x))}
to.data.frame =
function(x, template){
x = t.list(x)
y =
lapply(
seq_along(template),
function(i)
rmr.coerce(x[[i]], template[[i]]))
names(y) = names(template)
df = data.frame(y, stringsAsFactors = FALSE)
candidate.names = make.unique(rmr.coerce(x[[length(x)]], character()))
rownames(df) = make.unique(ifelse(is.na(candidate.names), "NA", candidate.names))
df}
from.list =
function (x, template) {
switch(
class(template),
NULL = NULL,
list = splat(c)(x),
matrix = splat(rbind)(x),
data.frame = to.data.frame(x, template),
factor = factor(unlist(x)),
Date = unsplit(x, seq_along(x)),
unlist(x))}
make.typedbytes.input.format =
function(read.size = 10^7, native = FALSE) {
obj.buffer = list()
obj.buffer.rmr.length = 0
raw.buffer = raw()
template = NULL
function(con) {
while(length(obj.buffer) < 2 || (native && is.null(template))) {
raw.buffer <<- c(raw.buffer, readBin(con, raw(), read.size))
if(length(raw.buffer) == 0) break;
parsed = typedbytes.reader(raw.buffer)
if(is.null(template) && !is.null(parsed$template))
template <<- parsed$template
if(parsed$starting.template)
obj.buffer <<- obj.buffer[-length(obj.buffer)]
obj.buffer <<- c(obj.buffer, parsed$objects)
if(parsed$length != 0) raw.buffer <<- raw.buffer[-(1:parsed$length)]}
straddler = list()
retval = {
if(length(obj.buffer) == 0) NULL
else {
if(length(obj.buffer)%%2 ==1) {
straddler = obj.buffer[length(obj.buffer)]
obj.buffer <<- obj.buffer[-length(obj.buffer)]}
kk = odd(obj.buffer)
vv = even(obj.buffer)
if(native) {
stopifnot(!is.null(template))
kk = rep(
kk,
if(is.data.frame(template[[2]]))
sapply.rmr.length.lossy.data.frame(vv)
else
sapply.rmr.length(vv))
keyval(
from.list(kk, template[[1]]),
from.list(vv, template[[2]]))}
else
keyval(kk, vv)}}
obj.buffer <<- straddler
retval}}
make.native.input.format = Curry(make.typedbytes.input.format, native = TRUE)
to.list =
function(x) {
if (is.null(x))
list(NULL)
else {
if (is.matrix(x)) x = as.data.frame(x)
if (is.data.frame(x))
unname(
t.list(
lapply(
x,
function(x) if(is.factor(x)) as.character(x) else x)))
else
as.list(if(is.factor(x)) as.character(x) else x)}}
intersperse =
function(a.list, another.list, every.so.many)
c(
another.list[1],
splat(c)(
mapply(
split(a.list, ceiling(seq_along(a.list)/every.so.many), drop = TRUE),
lapply(another.list, list),
FUN = c,
SIMPLIFY = FALSE)))
intersperse.one =
function(a.list, an.element, every.so.many)
c(
splat(c)(
lapply(
split(a.list, ceiling(seq_along(a.list)/every.so.many)),
function(y) c(list(an.element), y))),
list(an.element))
delevel =
function(x) {
if(is.factor(x)) factor(x)
else{
if(is.data.frame(x))
structure(
data.frame(lapply(x, delevel), stringsAsFactors = FALSE),
row.names = row.names(x))
else x}}
make.native.or.typedbytes.output.format =
function(native, write.size = 10^6) {
template = NULL
function(kv, con){
if(length.keyval(kv) != 0) {
k = keys(kv)
v = values(kv)
kvs = {
if(native)
split.keyval(kv, write.size, TRUE)
else
keyval(to.list(k), to.list(v))}
if(is.null(k)) {
if(!native) stop("Can't handle NULL in typedbytes")
ks = rep(list(NULL), length.keyval(kvs)) }
else
ks = keys(kvs)
vs = values(kvs)
if(native) {
if(is.null(template)) {
template <<-
list(
key = delevel(rmr.slice(k, 0)),
val = delevel(rmr.slice(v, 0)))}
N = {
if(length(vs) < 100) 1
else {
r = ceiling((object.size(ks) + object.size(vs))/10^6)
if (r < 100) length(vs) / 100
else r}}
ks = intersperse(ks, sample(ks, ceiling(length(ks)/N)), N)
vs = intersperse.one(vs, structure(template, rmr.template = TRUE), N)}
typedbytes.writer(
interleave(ks, vs),
con,
native)}}}
make.native.output.format =
Curry(make.native.or.typedbytes.output.format, native = TRUE)
make.typedbytes.output.format =
Curry(make.native.or.typedbytes.output.format, native = FALSE)
pRawToChar =
function(rl)
.Call("raw_list_to_character", rl, PACKAGE="rmr2")
hbase.rec.to.data.frame =
function(
source,
atomic,
dense,
key.deserialize = pRawToChar,
cell.deserialize =
function(x, column, family) pRawToChar(x)) {
filler = replicate(length(unlist(source))/2, NULL)
dest =
list(
key = filler,
family = filler,
column = filler,
cell = filler)
tmp =
.Call(
"hbase_to_df",
source,
dest,
PACKAGE="rmr2")
retval = data.frame(
key =
I(
key.deserialize(
tmp$data.frame$key[1:tmp$nrows])),
family =
pRawToChar(
tmp$data.frame$family[1:tmp$nrows]),
column =
pRawToChar(
tmp$data.frame$column[1:tmp$nrows]),
cell =
I(
cell.deserialize(
tmp$data.frame$cell[1:tmp$nrows],
tmp$data.frame$family[1:tmp$nrows],
tmp$data.frame$column[1:tmp$nrows])))
if(atomic)
retval =
as.data.frame(
lapply(
retval,
function(x) if(is.factor(x)) x else unclass(x)))
if(dense) retval = dcast(retval, key ~ family + column)
retval}
make.hbase.input.format =
function(dense, atomic, key.deserialize, cell.deserialize, read.size) {
deserialize.opt =
function(deser) {
if(is.null(deser)) deser = "raw"
if(is.character(deser))
deser =
switch(
deser,
native =
function(x, family = NULL, column = NULL) lapply(x, unserialize),
typedbytes =
function(x, family = NULL, column = NULL)
typedbytes.reader(
do.call(c, x)),
raw = function(x, family = NULL, column = NULL) pRawToChar(x))
deser}
key.deserialize = deserialize.opt(key.deserialize)
cell.deserialize = deserialize.opt(cell.deserialize)
tif = make.typedbytes.input.format(read.size)
if(is.null(dense)) dense = FALSE
function(con) {
rec = tif(con)
if(is.null(rec)) NULL
else {
df = hbase.rec.to.data.frame(rec, atomic, dense, key.deserialize, cell.deserialize)
keyval(NULL, df)}}}
data.frame.to.nested.map =
function(x, ind) {
if(length(ind)>0 && nrow(x) > 0) {
spl = split(x, x[, ind[1]])
lapply(x[, ind[1]], function(y) keyval(as.character(y), data.frame.to.nested.map(spl[[y]], ind[-1])))}
else x$value}
hbdf.to.m3 = Curry(data.frame.to.nested.map, ind = c("key", "family", "column"))
# I/O
open.stdinout =
function(mode, is.read) {
if(mode == "text") {
if(is.read)
file("stdin", "r") #not stdin() which is parsed by the interpreter
else
stdout()}
else { # binary
cat = {
if(.Platform$OS.type == "windows")
paste(
"\"",
system.file(
package="rmr2",
"bin",
.Platform$r_arch,
"catwin.exe"),
"\"",
sep="")
else
"cat"}
pipe(cat, ifelse(is.read, "rb", "wb"))}}
make.keyval.readwriter =
function(fname, format, is.read) {
con = {
if(is.null(fname))
open.stdinout(format$mode, is.read)
else
file(
fname,
paste(
if(is.read) "r" else "w",
if(format$mode == "text") "" else "b",
sep = ""))}
if (is.read) {
function()
format$format(con)}
else {
function(kv)
format$format(kv, con)}}
make.keyval.reader = Curry(make.keyval.readwriter, is.read = TRUE)
make.keyval.writer = Curry(make.keyval.readwriter, is.read = FALSE)
paste.fromJSON =
function(...)
tryCatch(
rjson::fromJSON(paste("[", paste(..., sep = ", "), "]")),
error =
function(e){
if(is.element(e$message, paste0("unexpected character", c(" 'N'", " 'I'", ": I"), "\n")))
e$message = ("Found unexpected character, try updating Avro to 1.7.7 or trunk")
stop(e$message)})
make.avro.input.format.function =
function(schema.file, ..., read.size = 10^5) {
if(!require("ravro"))
stop("Package ravro needs to be installed before using this format")
schema = ravro:::avro_get_schema(file = schema.file)
function(con) {
lines =
readLines(con = con, n = read.size)
if (length(lines) == 0) NULL
else {
x = splat(paste.fromJSON)(lines)
y = ravro:::parse_avro(x, schema, encoded_unions=FALSE, ...)
keyval(NULL, y)}}}
IO.formats = c("text", "json", "csv", "native",
"sequence.typedbytes", "hbase",
"pig.hive", "avro")
make.input.format =
function(
format = "native",
mode = c("binary", "text"),
streaming.format = NULL,
backend.parameters = NULL,
...) {
mode = match.arg(mode)
backend.parameters = NULL
optlist = list(...)
if(is.character(format)) {
format = match.arg(format, IO.formats)
switch(
format,
text = {
format = make.text.input.format(...)
mode = "text"},
json = {
format = make.json.input.format(...)
mode = "text"},
csv = {
format = make.csv.input.format(...)
mode = "text"},
native = {
format = make.native.input.format(...)
mode = "binary"},
sequence.typedbytes = {
format = make.typedbytes.input.format(...)
mode = "binary"},
pig.hive = {
format =
make.csv.input.format(
sep = "\001",
comment.char = "",
fill = TRUE,
flush = TRUE,
quote = "")
mode = "text"},
hbase = {
format =
make.hbase.input.format(
default(optlist$dense, FALSE),
default(optlist$atomic, FALSE),
default(optlist$key.deserialize, "raw"),
default(optlist$cell.deserialize, "raw"),
default(optlist$read.size, 10^6))
mode = "binary"
streaming.format =
"com.dappervision.hbase.mapred.TypedBytesTableInputFormat"
family.columns = optlist$family.columns
start.row = optlist$start.row
stop.row = optlist$stop.row
regex.row.filter=optlist$regex.row.filter
backend.parameters =
list(
hadoop =
c(
list(
D =
paste(
"hbase.mapred.tablecolumnsb64=",
paste(
sapply(
names(family.columns),
function(fam)
paste(
sapply(
1:length(family.columns[[fam]]),
function(i)
base64encode(
paste(
fam,
":",
family.columns[[fam]][i],
sep = "",
collapse = ""))),
sep = "",
collapse = " ")),
collapse = " "),
sep = "")),
if(!is.null(start.row))
list(
D =
paste(
"hbase.mapred.startrowb64=",
base64encode(start.row),
sep = "")),
if(!is.null(stop.row))
list(
D =
paste(
"hbase.mapred.stoprowb64=",
base64encode(stop.row),
sep = "")),
if(!is.null(regex.row.filter))
list(
D =
paste(
"hbase.mapred.rowfilter=",
regex.row.filter,
sep = "")),
list(
libjars = system.file(package = "rmr2", "hadoopy_hbase.jar"))))},
avro = {
format = make.avro.input.format.function(...)
mode = "text"
streaming.format = "org.apache.avro.mapred.AvroAsTextInputFormat"
backend.parameters =
list(
hadoop =
list(
libjars =
gsub(
if(.Platform$OS.type == "windows")
";"
else
":",
", ", Sys.getenv("AVRO_LIBS"))))})}
if(is.null(streaming.format) && mode == "binary")
streaming.format = "org.apache.hadoop.streaming.AutoInputFormat"
list(mode = mode,
format = format,
streaming.format = streaming.format,
backend.parameters = backend.parameters)}
set.separator.options =
function(sep) {
if(!is.null(sep))
list(
hadoop =
list(
D =
paste(
"mapred.textoutputformat.separator=",
sep,
sep = ""),
D =
paste(
"stream.map.output.field.separator=",
sep,
sep = ""),
D =
paste(
"stream.reduce.output.field.separator=",
sep,
sep = "")))}
make.output.format =
function(
format = "native",
mode = c("binary", "text"),
streaming.format = NULL,
backend.parameters = NULL,
...) {
mode = match.arg(mode)
optlist = list(...)
if(is.character(format)) {
format = match.arg(format, IO.formats)
switch(
format,
text = {
format = text.output.format
mode = "text"
streaming.format = NULL},
json = {
format = make.json.output.format(...)
mode = "text"
streaming.format = NULL},
csv = {
format = make.csv.output.format(...)
mode = "text"
streaming.format = NULL
backend.parameters = set.separator.options(optlist$sep)},
pig.hive = {
format =
make.csv.output.format(
sep = "\001",
quote = FALSE)
mode = "text"
streaming.format = NULL},
native = {
format = make.native.output.format(...)
mode = "binary"
streaming.format = "org.apache.hadoop.mapred.SequenceFileOutputFormat"},
sequence.typedbytes = {
format = make.typedbytes.output.format(...)
mode = "binary"
streaming.format = "org.apache.hadoop.mapred.SequenceFileOutputFormat"},
hbase = {
stop("hbase output format not implemented yet")
format = make.typedbytes.output.format(...)
mode = "binary"
streaming.format = "com.dappervision.mapreduce.TypedBytesTableOutputFormat"
backend.parameters =
list(
hadoop =
list(
D = paste(
"hbase.mapred.tablecolumnsb64=",
optlist$family,
":",
optlist$column,
sep = ""),
libjars = system.file(package = "rmr2", "java/hadoopy_hbase.jar")))})}
mode = match.arg(mode)
list(
mode = mode,
format = format,
streaming.format = streaming.format,
backend.parameters = backend.parameters)}
|
/pkg/R/IO.R
|
no_license
|
beedata-analytics/rmr2
|
R
| false | false | 20,520 |
r
|
# Copyright 2011 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
make.json.input.format =
function(key.class = rmr2:::qw(list, vector, data.frame, matrix),
value.class = rmr2:::qw(list, vector, data.frame, matrix), #leave the pkg qualifier in here
nrows = 10^4) {
key.class = match.arg(key.class)
value.class = match.arg(value.class)
cast =
function(class)
switch(
class,
list = identity,
vector = as.vector,
data.frame = function(x) do.call(data.frame, x),
matrix = function(x) do.call(rbind, x))
process.field =
function(field, class)
cast(class)(fromJSON(field, asText = TRUE))
function(con) {
lines = readLines(con, nrows)
if (length(lines) == 0) NULL
else {
splits = strsplit(lines, "\t")
c.keyval(
lapply(splits,
function(x)
if(length(x) == 1)
keyval(NULL, process.field(x[1], value.class))
else
keyval(process.field(x[1], key.class), process.field(x[2], value.class))))}}}
make.json.output.format =
function(write.size = 10^4)
function(kv, con) {
ser =
function(k, v)
paste(
gsub(
"\n",
"",
toJSON(k, .escapeEscapes=TRUE, collapse = "")),
gsub("\n", "", toJSON(v, .escapeEscapes=TRUE, collapse = "")),
sep = "\t")
out = reduce.keyval(kv, ser, write.size)
writeLines(paste(out, collapse = "\n"), sep = "\n", con = con)}
make.text.input.format =
function(nrows = 10^4)
function(con) {
lines = readLines(con, nrows)
if (length(lines) == 0) NULL
else keyval(NULL, lines)}
text.output.format =
function(kv, con) {
ser = function(k, v) paste(k, v, collapse = "", sep = "\t")
out = reduce.keyval(kv, ser, length.keyval(kv))
writeLines(as.character(out), con = con)}
make.csv.input.format =
function(..., nrows = 10^4) {
optlist = list(...)
function(con) {
df =
tryCatch(
do.call(read.table, c(list(file = con, header = FALSE, nrows = nrows), optlist)),
error =
function(e) {
if(e$message != "no lines available in input")
stop(e$message)
NULL})
if(is.null(df) || dim(df)[[1]] == 0) NULL
else keyval(NULL, df)}}
make.csv.output.format =
function(...) function(kv, con) {
k = keys(kv)
v = values(kv)
write.table(file = con,
x = if(is.null(k)) v else cbind(k, v),
...,
row.names = FALSE,
col.names = FALSE)}
typedbytes.reader =
function(data) {
if(is.null(data)) NULL
else
.Call("typedbytes_reader", data, PACKAGE = "rmr2")}
typedbytes.writer =
function(objects, con, native) {
writeBin(
.Call("typedbytes_writer", objects, native, PACKAGE = "rmr2"),
con)}
rmr.coerce =
function(x, template) {
if(is.atomic(template))
switch(
class(template),
factor = factor(unlist(x)),
Date = as.Date(unlist(x), origin = "1970-1-1"),
as(unlist(x), class(template)))
else
I(splat(c)(x))}
to.data.frame =
function(x, template){
x = t.list(x)
y =
lapply(
seq_along(template),
function(i)
rmr.coerce(x[[i]], template[[i]]))
names(y) = names(template)
df = data.frame(y, stringsAsFactors = FALSE)
candidate.names = make.unique(rmr.coerce(x[[length(x)]], character()))
rownames(df) = make.unique(ifelse(is.na(candidate.names), "NA", candidate.names))
df}
from.list =
function (x, template) {
switch(
class(template),
NULL = NULL,
list = splat(c)(x),
matrix = splat(rbind)(x),
data.frame = to.data.frame(x, template),
factor = factor(unlist(x)),
Date = unsplit(x, seq_along(x)),
unlist(x))}
make.typedbytes.input.format =
function(read.size = 10^7, native = FALSE) {
obj.buffer = list()
obj.buffer.rmr.length = 0
raw.buffer = raw()
template = NULL
function(con) {
while(length(obj.buffer) < 2 || (native && is.null(template))) {
raw.buffer <<- c(raw.buffer, readBin(con, raw(), read.size))
if(length(raw.buffer) == 0) break;
parsed = typedbytes.reader(raw.buffer)
if(is.null(template) && !is.null(parsed$template))
template <<- parsed$template
if(parsed$starting.template)
obj.buffer <<- obj.buffer[-length(obj.buffer)]
obj.buffer <<- c(obj.buffer, parsed$objects)
if(parsed$length != 0) raw.buffer <<- raw.buffer[-(1:parsed$length)]}
straddler = list()
retval = {
if(length(obj.buffer) == 0) NULL
else {
if(length(obj.buffer)%%2 ==1) {
straddler = obj.buffer[length(obj.buffer)]
obj.buffer <<- obj.buffer[-length(obj.buffer)]}
kk = odd(obj.buffer)
vv = even(obj.buffer)
if(native) {
stopifnot(!is.null(template))
kk = rep(
kk,
if(is.data.frame(template[[2]]))
sapply.rmr.length.lossy.data.frame(vv)
else
sapply.rmr.length(vv))
keyval(
from.list(kk, template[[1]]),
from.list(vv, template[[2]]))}
else
keyval(kk, vv)}}
obj.buffer <<- straddler
retval}}
make.native.input.format = Curry(make.typedbytes.input.format, native = TRUE)
to.list =
function(x) {
if (is.null(x))
list(NULL)
else {
if (is.matrix(x)) x = as.data.frame(x)
if (is.data.frame(x))
unname(
t.list(
lapply(
x,
function(x) if(is.factor(x)) as.character(x) else x)))
else
as.list(if(is.factor(x)) as.character(x) else x)}}
intersperse =
function(a.list, another.list, every.so.many)
c(
another.list[1],
splat(c)(
mapply(
split(a.list, ceiling(seq_along(a.list)/every.so.many), drop = TRUE),
lapply(another.list, list),
FUN = c,
SIMPLIFY = FALSE)))
intersperse.one =
function(a.list, an.element, every.so.many)
c(
splat(c)(
lapply(
split(a.list, ceiling(seq_along(a.list)/every.so.many)),
function(y) c(list(an.element), y))),
list(an.element))
delevel =
function(x) {
if(is.factor(x)) factor(x)
else{
if(is.data.frame(x))
structure(
data.frame(lapply(x, delevel), stringsAsFactors = FALSE),
row.names = row.names(x))
else x}}
make.native.or.typedbytes.output.format =
function(native, write.size = 10^6) {
template = NULL
function(kv, con){
if(length.keyval(kv) != 0) {
k = keys(kv)
v = values(kv)
kvs = {
if(native)
split.keyval(kv, write.size, TRUE)
else
keyval(to.list(k), to.list(v))}
if(is.null(k)) {
if(!native) stop("Can't handle NULL in typedbytes")
ks = rep(list(NULL), length.keyval(kvs)) }
else
ks = keys(kvs)
vs = values(kvs)
if(native) {
if(is.null(template)) {
template <<-
list(
key = delevel(rmr.slice(k, 0)),
val = delevel(rmr.slice(v, 0)))}
N = {
if(length(vs) < 100) 1
else {
r = ceiling((object.size(ks) + object.size(vs))/10^6)
if (r < 100) length(vs) / 100
else r}}
ks = intersperse(ks, sample(ks, ceiling(length(ks)/N)), N)
vs = intersperse.one(vs, structure(template, rmr.template = TRUE), N)}
typedbytes.writer(
interleave(ks, vs),
con,
native)}}}
make.native.output.format =
Curry(make.native.or.typedbytes.output.format, native = TRUE)
make.typedbytes.output.format =
Curry(make.native.or.typedbytes.output.format, native = FALSE)
pRawToChar =
function(rl)
.Call("raw_list_to_character", rl, PACKAGE="rmr2")
hbase.rec.to.data.frame =
function(
source,
atomic,
dense,
key.deserialize = pRawToChar,
cell.deserialize =
function(x, column, family) pRawToChar(x)) {
filler = replicate(length(unlist(source))/2, NULL)
dest =
list(
key = filler,
family = filler,
column = filler,
cell = filler)
tmp =
.Call(
"hbase_to_df",
source,
dest,
PACKAGE="rmr2")
retval = data.frame(
key =
I(
key.deserialize(
tmp$data.frame$key[1:tmp$nrows])),
family =
pRawToChar(
tmp$data.frame$family[1:tmp$nrows]),
column =
pRawToChar(
tmp$data.frame$column[1:tmp$nrows]),
cell =
I(
cell.deserialize(
tmp$data.frame$cell[1:tmp$nrows],
tmp$data.frame$family[1:tmp$nrows],
tmp$data.frame$column[1:tmp$nrows])))
if(atomic)
retval =
as.data.frame(
lapply(
retval,
function(x) if(is.factor(x)) x else unclass(x)))
if(dense) retval = dcast(retval, key ~ family + column)
retval}
make.hbase.input.format =
function(dense, atomic, key.deserialize, cell.deserialize, read.size) {
deserialize.opt =
function(deser) {
if(is.null(deser)) deser = "raw"
if(is.character(deser))
deser =
switch(
deser,
native =
function(x, family = NULL, column = NULL) lapply(x, unserialize),
typedbytes =
function(x, family = NULL, column = NULL)
typedbytes.reader(
do.call(c, x)),
raw = function(x, family = NULL, column = NULL) pRawToChar(x))
deser}
key.deserialize = deserialize.opt(key.deserialize)
cell.deserialize = deserialize.opt(cell.deserialize)
tif = make.typedbytes.input.format(read.size)
if(is.null(dense)) dense = FALSE
function(con) {
rec = tif(con)
if(is.null(rec)) NULL
else {
df = hbase.rec.to.data.frame(rec, atomic, dense, key.deserialize, cell.deserialize)
keyval(NULL, df)}}}
data.frame.to.nested.map =
function(x, ind) {
if(length(ind)>0 && nrow(x) > 0) {
spl = split(x, x[, ind[1]])
lapply(x[, ind[1]], function(y) keyval(as.character(y), data.frame.to.nested.map(spl[[y]], ind[-1])))}
else x$value}
hbdf.to.m3 = Curry(data.frame.to.nested.map, ind = c("key", "family", "column"))
# I/O
open.stdinout =
function(mode, is.read) {
if(mode == "text") {
if(is.read)
file("stdin", "r") #not stdin() which is parsed by the interpreter
else
stdout()}
else { # binary
cat = {
if(.Platform$OS.type == "windows")
paste(
"\"",
system.file(
package="rmr2",
"bin",
.Platform$r_arch,
"catwin.exe"),
"\"",
sep="")
else
"cat"}
pipe(cat, ifelse(is.read, "rb", "wb"))}}
make.keyval.readwriter =
function(fname, format, is.read) {
con = {
if(is.null(fname))
open.stdinout(format$mode, is.read)
else
file(
fname,
paste(
if(is.read) "r" else "w",
if(format$mode == "text") "" else "b",
sep = ""))}
if (is.read) {
function()
format$format(con)}
else {
function(kv)
format$format(kv, con)}}
make.keyval.reader = Curry(make.keyval.readwriter, is.read = TRUE)
make.keyval.writer = Curry(make.keyval.readwriter, is.read = FALSE)
paste.fromJSON =
function(...)
tryCatch(
rjson::fromJSON(paste("[", paste(..., sep = ", "), "]")),
error =
function(e){
if(is.element(e$message, paste0("unexpected character", c(" 'N'", " 'I'", ": I"), "\n")))
e$message = ("Found unexpected character, try updating Avro to 1.7.7 or trunk")
stop(e$message)})
make.avro.input.format.function =
function(schema.file, ..., read.size = 10^5) {
if(!require("ravro"))
stop("Package ravro needs to be installed before using this format")
schema = ravro:::avro_get_schema(file = schema.file)
function(con) {
lines =
readLines(con = con, n = read.size)
if (length(lines) == 0) NULL
else {
x = splat(paste.fromJSON)(lines)
y = ravro:::parse_avro(x, schema, encoded_unions=FALSE, ...)
keyval(NULL, y)}}}
IO.formats = c("text", "json", "csv", "native",
"sequence.typedbytes", "hbase",
"pig.hive", "avro")
make.input.format =
function(
format = "native",
mode = c("binary", "text"),
streaming.format = NULL,
backend.parameters = NULL,
...) {
mode = match.arg(mode)
backend.parameters = NULL
optlist = list(...)
if(is.character(format)) {
format = match.arg(format, IO.formats)
switch(
format,
text = {
format = make.text.input.format(...)
mode = "text"},
json = {
format = make.json.input.format(...)
mode = "text"},
csv = {
format = make.csv.input.format(...)
mode = "text"},
native = {
format = make.native.input.format(...)
mode = "binary"},
sequence.typedbytes = {
format = make.typedbytes.input.format(...)
mode = "binary"},
pig.hive = {
format =
make.csv.input.format(
sep = "\001",
comment.char = "",
fill = TRUE,
flush = TRUE,
quote = "")
mode = "text"},
hbase = {
format =
make.hbase.input.format(
default(optlist$dense, FALSE),
default(optlist$atomic, FALSE),
default(optlist$key.deserialize, "raw"),
default(optlist$cell.deserialize, "raw"),
default(optlist$read.size, 10^6))
mode = "binary"
streaming.format =
"com.dappervision.hbase.mapred.TypedBytesTableInputFormat"
family.columns = optlist$family.columns
start.row = optlist$start.row
stop.row = optlist$stop.row
regex.row.filter=optlist$regex.row.filter
backend.parameters =
list(
hadoop =
c(
list(
D =
paste(
"hbase.mapred.tablecolumnsb64=",
paste(
sapply(
names(family.columns),
function(fam)
paste(
sapply(
1:length(family.columns[[fam]]),
function(i)
base64encode(
paste(
fam,
":",
family.columns[[fam]][i],
sep = "",
collapse = ""))),
sep = "",
collapse = " ")),
collapse = " "),
sep = "")),
if(!is.null(start.row))
list(
D =
paste(
"hbase.mapred.startrowb64=",
base64encode(start.row),
sep = "")),
if(!is.null(stop.row))
list(
D =
paste(
"hbase.mapred.stoprowb64=",
base64encode(stop.row),
sep = "")),
if(!is.null(regex.row.filter))
list(
D =
paste(
"hbase.mapred.rowfilter=",
regex.row.filter,
sep = "")),
list(
libjars = system.file(package = "rmr2", "hadoopy_hbase.jar"))))},
avro = {
format = make.avro.input.format.function(...)
mode = "text"
streaming.format = "org.apache.avro.mapred.AvroAsTextInputFormat"
backend.parameters =
list(
hadoop =
list(
libjars =
gsub(
if(.Platform$OS.type == "windows")
";"
else
":",
", ", Sys.getenv("AVRO_LIBS"))))})}
if(is.null(streaming.format) && mode == "binary")
streaming.format = "org.apache.hadoop.streaming.AutoInputFormat"
list(mode = mode,
format = format,
streaming.format = streaming.format,
backend.parameters = backend.parameters)}
set.separator.options =
function(sep) {
if(!is.null(sep))
list(
hadoop =
list(
D =
paste(
"mapred.textoutputformat.separator=",
sep,
sep = ""),
D =
paste(
"stream.map.output.field.separator=",
sep,
sep = ""),
D =
paste(
"stream.reduce.output.field.separator=",
sep,
sep = "")))}
make.output.format =
function(
format = "native",
mode = c("binary", "text"),
streaming.format = NULL,
backend.parameters = NULL,
...) {
mode = match.arg(mode)
optlist = list(...)
if(is.character(format)) {
format = match.arg(format, IO.formats)
switch(
format,
text = {
format = text.output.format
mode = "text"
streaming.format = NULL},
json = {
format = make.json.output.format(...)
mode = "text"
streaming.format = NULL},
csv = {
format = make.csv.output.format(...)
mode = "text"
streaming.format = NULL
backend.parameters = set.separator.options(optlist$sep)},
pig.hive = {
format =
make.csv.output.format(
sep = "\001",
quote = FALSE)
mode = "text"
streaming.format = NULL},
native = {
format = make.native.output.format(...)
mode = "binary"
streaming.format = "org.apache.hadoop.mapred.SequenceFileOutputFormat"},
sequence.typedbytes = {
format = make.typedbytes.output.format(...)
mode = "binary"
streaming.format = "org.apache.hadoop.mapred.SequenceFileOutputFormat"},
hbase = {
stop("hbase output format not implemented yet")
format = make.typedbytes.output.format(...)
mode = "binary"
streaming.format = "com.dappervision.mapreduce.TypedBytesTableOutputFormat"
backend.parameters =
list(
hadoop =
list(
D = paste(
"hbase.mapred.tablecolumnsb64=",
optlist$family,
":",
optlist$column,
sep = ""),
libjars = system.file(package = "rmr2", "java/hadoopy_hbase.jar")))})}
mode = match.arg(mode)
list(
mode = mode,
format = format,
streaming.format = streaming.format,
backend.parameters = backend.parameters)}
|
## Extract reads corresponding to each peak region
setMethod(
f="extractReads",
signature="MosaicsPeak",
definition=function( object, chipFile=NULL, chipFileFormat=NULL,
chipPET=FALSE, chipFragLen=200,
controlFile=NULL, controlFileFormat=NULL,
controlPET=FALSE, controlFragLen=200, keepReads=FALSE,
parallel=FALSE, nCore=8, tempDir=NULL, perl="perl" )
{
# summarize peak info
peakList <- print(object)
nPeak <- nrow(peakList)
chrCommon <- unique(peakList[,1])
chrCommon <- sort(chrCommon)
# process read files
message( "Info: Loading and processing ChIP sample file..." )
outChIP <- .loadReadData( object=object, readfile=chipFile,
fileFormat=chipFileFormat, PET=chipPET, fragLen=chipFragLen, keepReads=keepReads,
parallel=parallel, nCore=nCore, tempDir=tempDir, perl=perl )
if ( !is.null(controlFile) ) {
message( "Info: Loading and processing matched control sample file..." )
outInput <- .loadReadData( object=object, readfile=controlFile,
fileFormat=controlFileFormat, PET=controlPET, fragLen=controlFragLen, keepReads=keepReads,
parallel=parallel, nCore=nCore, tempDir=tempDir, perl=perl )
}
# rearranage results: seqDepth
if ( !is.null(controlFile) ) {
seqDepth <- c( outChIP$seqDepth, outInput$seqDepth )
} else {
seqDepth <- c( outChIP$seqDepth, NA )
}
# rearranage results: stackedFragment
stackedFragment <- vector( "list", nPeak )
for ( i in 1:nPeak ) {
stackedFragment[[i]] <- vector( "list", 2 )
if( !is.na(outChIP$stackedFragment[[i]][[1]]) ) {
stackedFragment[[i]]$ChIP <- outChIP$stackedFragment[[i]]
} else {
stackedFragment[[i]]$ChIP <- vector( "list", 3 )
stackedFragment[[i]]$ChIP[[1]] <- stackedFragment[[i]]$ChIP[[2]] <-
stackedFragment[[i]]$ChIP[[3]] <- NA
}
if ( !is.na(controlFile) && !is.null(outInput$stackedFragment[[i]][[1]]) ) {
stackedFragment[[i]]$Input <- outInput$stackedFragment[[i]]
} else {
stackedFragment[[i]]$Input <- vector( "list", 3 )
stackedFragment[[i]]$Input[[1]] <- stackedFragment[[i]]$Input[[2]] <-
stackedFragment[[i]]$Input[[3]] <- NA
}
}
names(stackedFragment) <- paste( peakList[,1], ":", peakList[,2], "-", peakList[,3], sep="" )
# rearranage results: fragSet
if ( keepReads == TRUE ) {
fragSet <- vector( "list", nPeak )
for ( i in 1:nPeak ) {
fragSet[[i]] <- vector( "list", 2 )
if( !is.na(outChIP$stackedFragment[[i]][[1]]) ) {
fragSet[[i]]$ChIP <- outChIP$fragSet[[i]]
} else {
fragSet[[i]]$ChIP <- GRanges()
}
if ( !is.na(controlFile) && !is.null(outInput$stackedFragment[[i]][[1]]) ) {
fragSet[[i]]$Input <- outInput$fragSet[[i]]
} else {
fragSet[[i]]$Input <- GRanges()
}
}
names(fragSet) <- paste( peakList[,1], ":", peakList[,2], "-", peakList[,3], sep="" )
} else {
fragSet <- list()
}
# rearranage results: numReads
numReads <- matrix( NA, nPeak, 2 )
numReads[,1] <- outChIP$numReads
if ( !is.null(controlFile) ) {
numReads[,2] <- outInput$numReads
}
rownames(numReads) <- paste( peakList[,1], ":", peakList[,2], "-", peakList[,3], sep="" )
colnames(numReads) <- c( "ChIP", "Control" )
# info about preprocessing
cat( "------------------------------------------------------------\n" )
cat( "Info: Preprocessing summary\n" )
cat( "------------------------------------------------------------\n" )
cat( "Number of chromosomes: ",length(chrCommon),"\n", sep="" )
cat( "Number of peaks: ",nPeak,"\n", sep="" )
sumRead <- sum(numReads[,1])
medNumRead <- median(numReads[,1])
cat( "ChIP sample:\n" )
cat( "\tTag type: ",ifelse(chipPET,"PET","SET"),"\n", sep="" )
cat( "\tSequencing depth: ",seqDepth[1],"\n", sep="" )
cat( "\tNumber of utilized reads: ",sumRead,"\n", sep="" )
cat( "\tMedian number of reads in each peak: ",medNumRead,"\n", sep="" )
if ( !is.null(controlFile) ) {
sumRead <- sum(numReads[,2])
medNumRead <- median(numReads[,2])
cat( "Matched control sample:\n" )
cat( "\tTag type: ",ifelse(controlPET,"PET","SET"),"\n", sep="" )
cat( "\tSequencing depth: ",seqDepth[2],"\n", sep="" )
cat( "\tNumber of utilized reads: ",sumRead,"\n", sep="" )
cat( "\tMedian number of reads in each peak: ",medNumRead,"\n", sep="" )
}
cat( "------------------------------------------------------------\n" )
# update object
object@tagLoaded <- TRUE
#object@tagData <- new( "TagData",
# read=fragSet, coverage=stackedFragment, seqDepth=seqDepth )
object@tagData <- new( "TagData",
read=fragSet, numReads=numReads, coverage=stackedFragment, keepReads=keepReads )
object@seqDepth <- seqDepth
return(object)
}
)
|
/R/extractReads.R
|
no_license
|
jdavisturak/mosaics
|
R
| false | false | 5,142 |
r
|
## Extract reads corresponding to each peak region
setMethod(
f="extractReads",
signature="MosaicsPeak",
definition=function( object, chipFile=NULL, chipFileFormat=NULL,
chipPET=FALSE, chipFragLen=200,
controlFile=NULL, controlFileFormat=NULL,
controlPET=FALSE, controlFragLen=200, keepReads=FALSE,
parallel=FALSE, nCore=8, tempDir=NULL, perl="perl" )
{
# summarize peak info
peakList <- print(object)
nPeak <- nrow(peakList)
chrCommon <- unique(peakList[,1])
chrCommon <- sort(chrCommon)
# process read files
message( "Info: Loading and processing ChIP sample file..." )
outChIP <- .loadReadData( object=object, readfile=chipFile,
fileFormat=chipFileFormat, PET=chipPET, fragLen=chipFragLen, keepReads=keepReads,
parallel=parallel, nCore=nCore, tempDir=tempDir, perl=perl )
if ( !is.null(controlFile) ) {
message( "Info: Loading and processing matched control sample file..." )
outInput <- .loadReadData( object=object, readfile=controlFile,
fileFormat=controlFileFormat, PET=controlPET, fragLen=controlFragLen, keepReads=keepReads,
parallel=parallel, nCore=nCore, tempDir=tempDir, perl=perl )
}
# rearranage results: seqDepth
if ( !is.null(controlFile) ) {
seqDepth <- c( outChIP$seqDepth, outInput$seqDepth )
} else {
seqDepth <- c( outChIP$seqDepth, NA )
}
# rearranage results: stackedFragment
stackedFragment <- vector( "list", nPeak )
for ( i in 1:nPeak ) {
stackedFragment[[i]] <- vector( "list", 2 )
if( !is.na(outChIP$stackedFragment[[i]][[1]]) ) {
stackedFragment[[i]]$ChIP <- outChIP$stackedFragment[[i]]
} else {
stackedFragment[[i]]$ChIP <- vector( "list", 3 )
stackedFragment[[i]]$ChIP[[1]] <- stackedFragment[[i]]$ChIP[[2]] <-
stackedFragment[[i]]$ChIP[[3]] <- NA
}
if ( !is.na(controlFile) && !is.null(outInput$stackedFragment[[i]][[1]]) ) {
stackedFragment[[i]]$Input <- outInput$stackedFragment[[i]]
} else {
stackedFragment[[i]]$Input <- vector( "list", 3 )
stackedFragment[[i]]$Input[[1]] <- stackedFragment[[i]]$Input[[2]] <-
stackedFragment[[i]]$Input[[3]] <- NA
}
}
names(stackedFragment) <- paste( peakList[,1], ":", peakList[,2], "-", peakList[,3], sep="" )
# rearranage results: fragSet
if ( keepReads == TRUE ) {
fragSet <- vector( "list", nPeak )
for ( i in 1:nPeak ) {
fragSet[[i]] <- vector( "list", 2 )
if( !is.na(outChIP$stackedFragment[[i]][[1]]) ) {
fragSet[[i]]$ChIP <- outChIP$fragSet[[i]]
} else {
fragSet[[i]]$ChIP <- GRanges()
}
if ( !is.na(controlFile) && !is.null(outInput$stackedFragment[[i]][[1]]) ) {
fragSet[[i]]$Input <- outInput$fragSet[[i]]
} else {
fragSet[[i]]$Input <- GRanges()
}
}
names(fragSet) <- paste( peakList[,1], ":", peakList[,2], "-", peakList[,3], sep="" )
} else {
fragSet <- list()
}
# rearranage results: numReads
numReads <- matrix( NA, nPeak, 2 )
numReads[,1] <- outChIP$numReads
if ( !is.null(controlFile) ) {
numReads[,2] <- outInput$numReads
}
rownames(numReads) <- paste( peakList[,1], ":", peakList[,2], "-", peakList[,3], sep="" )
colnames(numReads) <- c( "ChIP", "Control" )
# info about preprocessing
cat( "------------------------------------------------------------\n" )
cat( "Info: Preprocessing summary\n" )
cat( "------------------------------------------------------------\n" )
cat( "Number of chromosomes: ",length(chrCommon),"\n", sep="" )
cat( "Number of peaks: ",nPeak,"\n", sep="" )
sumRead <- sum(numReads[,1])
medNumRead <- median(numReads[,1])
cat( "ChIP sample:\n" )
cat( "\tTag type: ",ifelse(chipPET,"PET","SET"),"\n", sep="" )
cat( "\tSequencing depth: ",seqDepth[1],"\n", sep="" )
cat( "\tNumber of utilized reads: ",sumRead,"\n", sep="" )
cat( "\tMedian number of reads in each peak: ",medNumRead,"\n", sep="" )
if ( !is.null(controlFile) ) {
sumRead <- sum(numReads[,2])
medNumRead <- median(numReads[,2])
cat( "Matched control sample:\n" )
cat( "\tTag type: ",ifelse(controlPET,"PET","SET"),"\n", sep="" )
cat( "\tSequencing depth: ",seqDepth[2],"\n", sep="" )
cat( "\tNumber of utilized reads: ",sumRead,"\n", sep="" )
cat( "\tMedian number of reads in each peak: ",medNumRead,"\n", sep="" )
}
cat( "------------------------------------------------------------\n" )
# update object
object@tagLoaded <- TRUE
#object@tagData <- new( "TagData",
# read=fragSet, coverage=stackedFragment, seqDepth=seqDepth )
object@tagData <- new( "TagData",
read=fragSet, numReads=numReads, coverage=stackedFragment, keepReads=keepReads )
object@seqDepth <- seqDepth
return(object)
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/instrumental_forest.R
\name{instrumental_forest}
\alias{instrumental_forest}
\title{Intrumental forest}
\usage{
instrumental_forest(X, Y, W, Z, Y.hat = NULL, W.hat = NULL, Z.hat = NULL,
sample.fraction = 0.5, mtry = NULL, num.trees = 2000,
num.threads = NULL, min.node.size = NULL, honesty = TRUE,
ci.group.size = 2, reduced.form.weight = 0, alpha = 0.05,
imbalance.penalty = 0, stabilize.splits = TRUE, seed = NULL,
clusters = NULL, samples_per_cluster = NULL)
}
\arguments{
\item{X}{The covariates used in the instrumental regression.}
\item{Y}{The outcome.}
\item{W}{The treatment assignment (may be binary or real).}
\item{Z}{The instrument (may be binary or real).}
\item{Y.hat}{Estimates of the expected responses E[Y | Xi], marginalizing
over treatment. If Y.hat = NULL, these are estimated using
a separate regression forest.}
\item{W.hat}{Estimates of the treatment propensities E[W | Xi]. If W.hat = NULL,
these are estimated using a separate regression forest.}
\item{Z.hat}{Estimates of the instrument propensities E[Z | Xi]. If Z.hat = NULL,
these are estimated using a separate regression forest.}
\item{sample.fraction}{Fraction of the data used to build each tree.
Note: If honesty is used, these subsamples will
further be cut in half.}
\item{mtry}{Number of variables tried for each split.}
\item{num.trees}{Number of trees grown in the forest. Note: Getting accurate
confidence intervals generally requires more trees than
getting accurate predictions.}
\item{num.threads}{Number of threads used in training. If set to NULL, the software
automatically selects an appropriate amount.}
\item{min.node.size}{A target for the minimum number of observations in each tree leaf. Note that nodes
with size smaller than min.node.size can occur, as in the original randomForest package.}
\item{honesty}{Whether or not honest splitting (i.e., sub-sample splitting) should be used.}
\item{ci.group.size}{The forst will grow ci.group.size trees on each subsample.
In order to provide confidence intervals, ci.group.size must
be at least 2.}
\item{reduced.form.weight}{Whether splits should be regularized towards a naive
splitting criterion that ignores the instrument (and
instead emulates a causal forest).}
\item{alpha}{A tuning parameter that controls the maximum imbalance of a split.}
\item{imbalance.penalty}{A tuning parameter that controls how harshly imbalanced splits are penalized.}
\item{stabilize.splits}{Whether or not the instrument should be taken into account when
determining the imbalance of a split (experimental).}
\item{seed}{The seed for the C++ random number generator.}
\item{clusters}{Vector of integers or factors specifying which cluster each observation corresponds to.}
\item{samples_per_cluster}{If sampling by cluster, the number of observations to be sampled from
each cluster. Must be less than the size of the smallest cluster. If set to NULL
software will set this value to the size of the smallest cluster.}
}
\value{
A trained instrumental forest object.
}
\description{
Trains an instrumental forest that can be used to estimate
conditional local average treatment effects tau(X) identified
using instruments. Formally, the forest estimates
tau(X) = Cov[Y, Z | X = x] / Cov[W, Z | X = x].
Note that when the instrument Z and treatment assignment W
coincide, an instrumental forest is equivalent to a causal forest.
}
|
/grf/man/instrumental_forest.Rd
|
no_license
|
gannawag/grf_custom
|
R
| false | true | 3,471 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/instrumental_forest.R
\name{instrumental_forest}
\alias{instrumental_forest}
\title{Intrumental forest}
\usage{
instrumental_forest(X, Y, W, Z, Y.hat = NULL, W.hat = NULL, Z.hat = NULL,
sample.fraction = 0.5, mtry = NULL, num.trees = 2000,
num.threads = NULL, min.node.size = NULL, honesty = TRUE,
ci.group.size = 2, reduced.form.weight = 0, alpha = 0.05,
imbalance.penalty = 0, stabilize.splits = TRUE, seed = NULL,
clusters = NULL, samples_per_cluster = NULL)
}
\arguments{
\item{X}{The covariates used in the instrumental regression.}
\item{Y}{The outcome.}
\item{W}{The treatment assignment (may be binary or real).}
\item{Z}{The instrument (may be binary or real).}
\item{Y.hat}{Estimates of the expected responses E[Y | Xi], marginalizing
over treatment. If Y.hat = NULL, these are estimated using
a separate regression forest.}
\item{W.hat}{Estimates of the treatment propensities E[W | Xi]. If W.hat = NULL,
these are estimated using a separate regression forest.}
\item{Z.hat}{Estimates of the instrument propensities E[Z | Xi]. If Z.hat = NULL,
these are estimated using a separate regression forest.}
\item{sample.fraction}{Fraction of the data used to build each tree.
Note: If honesty is used, these subsamples will
further be cut in half.}
\item{mtry}{Number of variables tried for each split.}
\item{num.trees}{Number of trees grown in the forest. Note: Getting accurate
confidence intervals generally requires more trees than
getting accurate predictions.}
\item{num.threads}{Number of threads used in training. If set to NULL, the software
automatically selects an appropriate amount.}
\item{min.node.size}{A target for the minimum number of observations in each tree leaf. Note that nodes
with size smaller than min.node.size can occur, as in the original randomForest package.}
\item{honesty}{Whether or not honest splitting (i.e., sub-sample splitting) should be used.}
\item{ci.group.size}{The forst will grow ci.group.size trees on each subsample.
In order to provide confidence intervals, ci.group.size must
be at least 2.}
\item{reduced.form.weight}{Whether splits should be regularized towards a naive
splitting criterion that ignores the instrument (and
instead emulates a causal forest).}
\item{alpha}{A tuning parameter that controls the maximum imbalance of a split.}
\item{imbalance.penalty}{A tuning parameter that controls how harshly imbalanced splits are penalized.}
\item{stabilize.splits}{Whether or not the instrument should be taken into account when
determining the imbalance of a split (experimental).}
\item{seed}{The seed for the C++ random number generator.}
\item{clusters}{Vector of integers or factors specifying which cluster each observation corresponds to.}
\item{samples_per_cluster}{If sampling by cluster, the number of observations to be sampled from
each cluster. Must be less than the size of the smallest cluster. If set to NULL
software will set this value to the size of the smallest cluster.}
}
\value{
A trained instrumental forest object.
}
\description{
Trains an instrumental forest that can be used to estimate
conditional local average treatment effects tau(X) identified
using instruments. Formally, the forest estimates
tau(X) = Cov[Y, Z | X = x] / Cov[W, Z | X = x].
Note that when the instrument Z and treatment assignment W
coincide, an instrumental forest is equivalent to a causal forest.
}
|
library(readxl)
library(tidyverse)
trade_years <- list()
available_countries <- NULL
for(year in as.character(1964:1976)) {
trade_year <- readxl::read_excel(here::here("data-raw", "dyadic-compact.xls"),
sheet = year)
trade_year <- as.matrix(trade_year[, 5:ncol(trade_year)])
trade_year[is.na(trade_year)] <- 0
trade_year[trade_year > 0] <- 1
rownames(trade_year) <- colnames(trade_year)
if(is.null(available_countries)) {
available_countries <- colnames(trade_year)
} else {
keep <- available_countries %in% colnames(trade_year)
available_countries <- available_countries[keep]
}
trade_years[[year]] <- trade_year
}
# filter down countries and make a 3d array
n_countries <- length(available_countries)
Y <- array(0, dim = c(n_countries, n_countries, 13))
t <- 1
for(year in as.character(1964:1976)) {
Y[, , t] <- trade_years[[year]][available_countries, available_countries]
t <- t + 1
}
world_trade<- list(Y = Y, country_names = available_countries)
devtools::use_data(world_trade, overwrite = TRUE)
|
/data-raw/world-trade.R
|
permissive
|
petershan1119/lsmdn
|
R
| false | false | 1,113 |
r
|
library(readxl)
library(tidyverse)
trade_years <- list()
available_countries <- NULL
for(year in as.character(1964:1976)) {
trade_year <- readxl::read_excel(here::here("data-raw", "dyadic-compact.xls"),
sheet = year)
trade_year <- as.matrix(trade_year[, 5:ncol(trade_year)])
trade_year[is.na(trade_year)] <- 0
trade_year[trade_year > 0] <- 1
rownames(trade_year) <- colnames(trade_year)
if(is.null(available_countries)) {
available_countries <- colnames(trade_year)
} else {
keep <- available_countries %in% colnames(trade_year)
available_countries <- available_countries[keep]
}
trade_years[[year]] <- trade_year
}
# filter down countries and make a 3d array
n_countries <- length(available_countries)
Y <- array(0, dim = c(n_countries, n_countries, 13))
t <- 1
for(year in as.character(1964:1976)) {
Y[, , t] <- trade_years[[year]][available_countries, available_countries]
t <- t + 1
}
world_trade<- list(Y = Y, country_names = available_countries)
devtools::use_data(world_trade, overwrite = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarID_functions.R
\name{plotExpNoise}
\alias{plotExpNoise}
\title{Noise-expression scatter plot}
\usage{
plotExpNoise(g, object, noise, set = NULL, ps = 0.1, norm = TRUE, ...)
}
\arguments{
\item{g}{Valid gene ID with available expression and noise estimates.}
\item{object}{\pkg{RaceID} \code{SCseq} object.}
\item{noise}{List object returned by the \code{compTBNoise} function.}
\item{set}{Set of valid cluster numbers. Default is \code{NULL} and data are plotted for cells from all clusters.}
\item{ps}{Real number. Pseudo-count added to noise and expression estimates. Default is 0.1.}
\item{norm}{logical. If \code{FALSE}, then noise is plotted versus non-normalized expression. Default is \code{TRUE} and noise is plotted against normalized expression.}
\item{...}{Additional arguments of \code{plot} function.}
}
\value{
None.
}
\description{
Plotting noise (epsilon) as a function of normalized or non-normalized expression for a given gene.
}
|
/man/plotExpNoise.Rd
|
no_license
|
dgrun/RaceID3_StemID2_package
|
R
| false | true | 1,036 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarID_functions.R
\name{plotExpNoise}
\alias{plotExpNoise}
\title{Noise-expression scatter plot}
\usage{
plotExpNoise(g, object, noise, set = NULL, ps = 0.1, norm = TRUE, ...)
}
\arguments{
\item{g}{Valid gene ID with available expression and noise estimates.}
\item{object}{\pkg{RaceID} \code{SCseq} object.}
\item{noise}{List object returned by the \code{compTBNoise} function.}
\item{set}{Set of valid cluster numbers. Default is \code{NULL} and data are plotted for cells from all clusters.}
\item{ps}{Real number. Pseudo-count added to noise and expression estimates. Default is 0.1.}
\item{norm}{logical. If \code{FALSE}, then noise is plotted versus non-normalized expression. Default is \code{TRUE} and noise is plotted against normalized expression.}
\item{...}{Additional arguments of \code{plot} function.}
}
\value{
None.
}
\description{
Plotting noise (epsilon) as a function of normalized or non-normalized expression for a given gene.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{native_cpp_obj_fun_}
\alias{native_cpp_obj_fun_}
\title{Native object function.}
\usage{
native_cpp_obj_fun_(name, y_vec, x_mat, b_vec)
}
\arguments{
\item{name}{.}
\item{y_vec}{.}
\item{x_mat}{.}
\item{b_vec}{.}
}
\value{
.
}
\description{
This is internal use only function. Manual left blank on purpose.
}
|
/man/native_cpp_obj_fun_.Rd
|
no_license
|
cran/ncpen
|
R
| false | true | 433 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{native_cpp_obj_fun_}
\alias{native_cpp_obj_fun_}
\title{Native object function.}
\usage{
native_cpp_obj_fun_(name, y_vec, x_mat, b_vec)
}
\arguments{
\item{name}{.}
\item{y_vec}{.}
\item{x_mat}{.}
\item{b_vec}{.}
}
\value{
.
}
\description{
This is internal use only function. Manual left blank on purpose.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source.R
\name{source_python}
\alias{source_python}
\title{Read and evaluate a Python script}
\usage{
source_python(file, envir = parent.frame(), convert = TRUE)
}
\arguments{
\item{file}{Source file}
\item{envir}{The environment to assign Python objects into
(for example, \code{parent.frame()} or \code{globalenv()}). Specify \code{NULL} to
not assign Python objects.}
\item{convert}{\code{TRUE} to automatically convert Python objects to their R
equivalent. If you pass \code{FALSE} you can do manual conversion using the
\code{\link[=py_to_r]{py_to_r()}} function.}
}
\description{
Evaluate a Python script and make created Python objects available within R.
}
|
/man/source_python.Rd
|
permissive
|
aespar21/reticulate
|
R
| false | true | 745 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source.R
\name{source_python}
\alias{source_python}
\title{Read and evaluate a Python script}
\usage{
source_python(file, envir = parent.frame(), convert = TRUE)
}
\arguments{
\item{file}{Source file}
\item{envir}{The environment to assign Python objects into
(for example, \code{parent.frame()} or \code{globalenv()}). Specify \code{NULL} to
not assign Python objects.}
\item{convert}{\code{TRUE} to automatically convert Python objects to their R
equivalent. If you pass \code{FALSE} you can do manual conversion using the
\code{\link[=py_to_r]{py_to_r()}} function.}
}
\description{
Evaluate a Python script and make created Python objects available within R.
}
|
## best.R is a function that takes as arguments the state and a certain pathology.
## The function returns the hospital that has the lowest mortality rates in a given state
## and for a certain pathology
best <- function(state, pathology) {
## Read outcome/pathology data
data <- read.csv("/Users/BertCarlier/Documents/R Working Directory/Coursera-Course-Work/Assignment 3 Hospital Quality/rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv", colClasses = "character",na.strings="Not Available")
## Check that state and pathology are valid
validPathology = c("heart attack","heart failure","pneumonia")
if (!pathology %in% validPathology) { stop("invalid pathology")}
validState = unique(data[,7])
if (!state %in% validState) stop("invalid state")
## convert pathology name into column name
fullColName <- c("Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack", "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure", "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")
colName <- fullColName[match(pathology,validPathology)]
## Return hospital name in that state with lowest 30-day death rate
data.state <- data[data$State==state,]
idx <- which.min(as.double(data.state[,colName]))
data.state[idx,"Hospital.Name"]
}
|
/best.R
|
no_license
|
s0159140/ProgrammingAssignment3
|
R
| false | false | 1,291 |
r
|
## best.R is a function that takes as arguments the state and a certain pathology.
## The function returns the hospital that has the lowest mortality rates in a given state
## and for a certain pathology
best <- function(state, pathology) {
## Read outcome/pathology data
data <- read.csv("/Users/BertCarlier/Documents/R Working Directory/Coursera-Course-Work/Assignment 3 Hospital Quality/rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv", colClasses = "character",na.strings="Not Available")
## Check that state and pathology are valid
validPathology = c("heart attack","heart failure","pneumonia")
if (!pathology %in% validPathology) { stop("invalid pathology")}
validState = unique(data[,7])
if (!state %in% validState) stop("invalid state")
## convert pathology name into column name
fullColName <- c("Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack", "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure", "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")
colName <- fullColName[match(pathology,validPathology)]
## Return hospital name in that state with lowest 30-day death rate
data.state <- data[data$State==state,]
idx <- which.min(as.double(data.state[,colName]))
data.state[idx,"Hospital.Name"]
}
|
library(ape)
testtree <- read.tree("9554_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9554_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/9554_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 135 |
r
|
library(ape)
testtree <- read.tree("9554_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9554_0_unrooted.txt")
|
\name{estimateDeriv}
\alias{estimateDeriv}
\title{
Derivative Estimation
}
\description{
Estimate derivative of a function at a point d_0 based on a
local quadratic regression procedure of Fan and Gijbels (1996) that
utilizes an automatic bandwidth selection formula.
}
\usage{
estimateDeriv(explanatory, response, d_0, sigmaSq)
}
\arguments{
\item{explanatory}{
Explanatory sample points
}
\item{response}{
Observed responses at the explanatory sample points
}
\item{d_0}{
d_0 is the point of interest where the derivative is estimated
}
\item{sigmaSq}{
estimate of variance at d_0
}
}
\details{
This is an internal function not meant to be called directly.
}
\value{
Returns a single number representing the derivative estimate at d_0.
If a negative derivative has been estimated, then a warning is given, as this
violates the isotonic (non-decreasing) assumption.
}
\references{
Fan J, Gijbels I (1996). Local polynomial modelling and its applications,
volume 66 of Monographs on Statistics and Applied Probability.
Chapman & Hall, London. ISBN 0-412-98321-4.
}
\author{
Shawn Mankad
}
\examples{
explanatory = runif(50)
response = explanatory^2 + rnorm(50, sd=0.1)
estimateDeriv(explanatory, response, d_0=0.5,
sigmaSq=estimateSigmaSq(explanatory, response)$sigmaSq)
## The function is currently defined as
function (explanatory, response, d_0, sigmaSq)
{
deriv_estimateHelper <- function(explanatory, response, d_0,
sigmaSq) {
n = length(response)
p = 5
X = matrix(0, n, p)
for (i in 1:p) {
X[, i] = (explanatory - d_0)^i
}
beta_hat = lm(response ~ 0 + X)$coef
h = 0
for (i in (p - 1):(p + 1)) {
j = i - p + 2
h = h + beta_hat[i - 1] * factorial(j) * d_0^(j -
1)
}
return(2.275 * (sigmaSq/h^2)^(1/7) * n^(-1/7))
}
n = length(response)
p = 2
X = matrix(0, n, p)
X[, 1] = (explanatory - d_0)
X[, 2] = (explanatory - d_0)^2
bw_opt = deriv_estimateHelper(explanatory, response, d_0,
sigmaSq)
W = 0.75/bw_opt * sapply(1 - ((explanatory - d_0)/bw_opt)^2,
max, 0)
while (sum(W > 1) <= 1 & bw_opt <= max(explanatory) - min(explanatory)) {
bw_opt = bw_opt * 2
W = 0.75/bw_opt * sapply(1 - ((explanatory - d_0)/bw_opt)^2,
max, 0)
}
beta_hat = lm(response ~ 0 + X, weight = W)$coef
while (beta_hat[1] <= 0 & bw_opt <= max(explanatory) - min(explanatory)) {
bw_opt = bw_opt * 2
W = 0.75/bw_opt * sapply(1 - ((explanatory - d_0)/bw_opt)^2,
max, 0)
beta_hat = lm(response ~ 0 + X, weight = W)$coef
}
if (beta_hat[1] <= 0) {
warning("deriv_estimate:WARNING: NEGATIVE DERIVATIVE HAS BEEN ESTIMATED",
.call = FALSE)
return(1/log(n))
}
return(beta_hat[1])
}
}
|
/man/estimateDeriv.Rd
|
no_license
|
cran/twostageTE
|
R
| false | false | 2,896 |
rd
|
\name{estimateDeriv}
\alias{estimateDeriv}
\title{
Derivative Estimation
}
\description{
Estimate derivative of a function at a point d_0 based on a
local quadratic regression procedure of Fan and Gijbels (1996) that
utilizes an automatic bandwidth selection formula.
}
\usage{
estimateDeriv(explanatory, response, d_0, sigmaSq)
}
\arguments{
\item{explanatory}{
Explanatory sample points
}
\item{response}{
Observed responses at the explanatory sample points
}
\item{d_0}{
d_0 is the point of interest where the derivative is estimated
}
\item{sigmaSq}{
estimate of variance at d_0
}
}
\details{
This is an internal function not meant to be called directly.
}
\value{
Returns a single number representing the derivative estimate at d_0.
If a negative derivative has been estimated, then a warning is given, as this
violates the isotonic (non-decreasing) assumption.
}
\references{
Fan J, Gijbels I (1996). Local polynomial modelling and its applications,
volume 66 of Monographs on Statistics and Applied Probability.
Chapman & Hall, London. ISBN 0-412-98321-4.
}
\author{
Shawn Mankad
}
\examples{
explanatory = runif(50)
response = explanatory^2 + rnorm(50, sd=0.1)
estimateDeriv(explanatory, response, d_0=0.5,
sigmaSq=estimateSigmaSq(explanatory, response)$sigmaSq)
## The function is currently defined as
function (explanatory, response, d_0, sigmaSq)
{
deriv_estimateHelper <- function(explanatory, response, d_0,
sigmaSq) {
n = length(response)
p = 5
X = matrix(0, n, p)
for (i in 1:p) {
X[, i] = (explanatory - d_0)^i
}
beta_hat = lm(response ~ 0 + X)$coef
h = 0
for (i in (p - 1):(p + 1)) {
j = i - p + 2
h = h + beta_hat[i - 1] * factorial(j) * d_0^(j -
1)
}
return(2.275 * (sigmaSq/h^2)^(1/7) * n^(-1/7))
}
n = length(response)
p = 2
X = matrix(0, n, p)
X[, 1] = (explanatory - d_0)
X[, 2] = (explanatory - d_0)^2
bw_opt = deriv_estimateHelper(explanatory, response, d_0,
sigmaSq)
W = 0.75/bw_opt * sapply(1 - ((explanatory - d_0)/bw_opt)^2,
max, 0)
while (sum(W > 1) <= 1 & bw_opt <= max(explanatory) - min(explanatory)) {
bw_opt = bw_opt * 2
W = 0.75/bw_opt * sapply(1 - ((explanatory - d_0)/bw_opt)^2,
max, 0)
}
beta_hat = lm(response ~ 0 + X, weight = W)$coef
while (beta_hat[1] <= 0 & bw_opt <= max(explanatory) - min(explanatory)) {
bw_opt = bw_opt * 2
W = 0.75/bw_opt * sapply(1 - ((explanatory - d_0)/bw_opt)^2,
max, 0)
beta_hat = lm(response ~ 0 + X, weight = W)$coef
}
if (beta_hat[1] <= 0) {
warning("deriv_estimate:WARNING: NEGATIVE DERIVATIVE HAS BEEN ESTIMATED",
.call = FALSE)
return(1/log(n))
}
return(beta_hat[1])
}
}
|
# construct species consensus sequences
# PACKAGE: megaptera
# CALLED BY: stepE
# AUTHOR: Christoph Heibl
# LAST UPDATE: 2014-10-30
speciesConsensus <- function(megProj, spec){
## PARAMETERS
## -----------
gene <- megProj@locus@sql
acc.tab <- paste("acc", gsub("^_", "", gene), sep = "_")
spec.tab <- paste("spec", gsub("^_", "", gene), sep = "_")
align.exe <- megProj@align.exe
max.bp <- megProj@params@max.bp
max.dist <- megProj@params@max.dist
logfile <- paste(gene, "stepE.log", sep = "-")
## open database connection
## ------------------------
conn <- dbconnect(megProj@db)
## read species alignments
## -----------------------
obj <- dbReadDNA(spec, conn = conn, tab.name = acc.tab,
max.bp = max.bp, max.dist = max.dist,
enforce.binomial = TRUE)
## check for aberrant sequences
## SHOULD BE REMOVED TO STEP D?!
## ------------------------------
# d <- lapply(obj, myDist)
# md <- sapply(d, max, na.rm = TRUE)
# md <- names(md[md > .1])
# if ( length(md) > 0 ){
# md <- sql.wrap(md, term = "taxon")
# md <- paste("(", md, ")", sep = "")
# md <- paste("SELECT * FROM", acc.tab, "WHERE npos <=", max.bp, "AND distreference <=", max.dist,
# "AND", md, "ORDER BY taxon")
# md <- dbGetQuery(conn = conn, md)
# s <- split(md["distreference"], f = md$taxon)
# s <- sapply(s, min) * 1.25
# gis <- vector()
# for ( i in seq_along(s) ){
# id <- md[md$taxon == names(s)[i] & md$distreference < s[i], "gi"]
# gis <- c(gis, id)
# id <- paste(id, collapse = "|")
# txn <- names(s)[i]
# obj[[txn]] <- obj[[txn]][grep(id, rownames(obj[[txn]])), ]
# obj[[txn]] <- deleteEmptyCells(obj[[txn]], quiet = TRUE)
# }
# gis <- setdiff(md$gi, gis)
# } else {
# gis <- NULL
# }
## update status field
## -------------------
# SQL <- paste("UPDATE", acc.tab,
# "SET status=status || '-selected'",
# "WHERE status !~ 'selected' AND npos <=", max.bp,
# "AND distreference <=", max.dist)
# dbSendQuery(conn, SQL)
# if ( !is.null(gis) ){
# SQL <- c(SQL,
# paste("UPDATE " , acc.tab, " SET status='too distant (from conspecific)' WHERE gi='", gis, "'", sep = ""))
# }
## update locus table
## ---------------------
nb.acc <- nrow(obj)
SQL <- paste("UPDATE locus",
"SET", sql.wrap(nb.acc, term = paste(gene, "sel", sep = "_")),
"WHERE", sql.wrap(spec))
dbSendQuery(conn, SQL)
# tax <- dbReadTable(conn, "locus")$spec
# # tax <- setdiff(tax, union(spec, already))
# tax <- setdiff(tax, spec)
# SQL <- paste("UPDATE locus",
# "SET", sql.wrap(0, term = paste(gene, "sel", sep = "_")),
# "WHERE", sql.wrap(tax, BOOL = NULL))
# lapply(SQL, dbSendQuery, conn = conn)
## species consensus sequences
## ---------------------------
obj <- specCons(obj, log = logfile)
obj <- list(obj)
names(obj) <- spec
class(obj) <- "DNAbin"
write.dna.spectable(conn, spec.tab, obj)
dbDisconnect(conn)
}
|
/R/speciesConsensus.R
|
no_license
|
cran/megaptera
|
R
| false | false | 3,194 |
r
|
# construct species consensus sequences
# PACKAGE: megaptera
# CALLED BY: stepE
# AUTHOR: Christoph Heibl
# LAST UPDATE: 2014-10-30
speciesConsensus <- function(megProj, spec){
## PARAMETERS
## -----------
gene <- megProj@locus@sql
acc.tab <- paste("acc", gsub("^_", "", gene), sep = "_")
spec.tab <- paste("spec", gsub("^_", "", gene), sep = "_")
align.exe <- megProj@align.exe
max.bp <- megProj@params@max.bp
max.dist <- megProj@params@max.dist
logfile <- paste(gene, "stepE.log", sep = "-")
## open database connection
## ------------------------
conn <- dbconnect(megProj@db)
## read species alignments
## -----------------------
obj <- dbReadDNA(spec, conn = conn, tab.name = acc.tab,
max.bp = max.bp, max.dist = max.dist,
enforce.binomial = TRUE)
## check for aberrant sequences
## SHOULD BE REMOVED TO STEP D?!
## ------------------------------
# d <- lapply(obj, myDist)
# md <- sapply(d, max, na.rm = TRUE)
# md <- names(md[md > .1])
# if ( length(md) > 0 ){
# md <- sql.wrap(md, term = "taxon")
# md <- paste("(", md, ")", sep = "")
# md <- paste("SELECT * FROM", acc.tab, "WHERE npos <=", max.bp, "AND distreference <=", max.dist,
# "AND", md, "ORDER BY taxon")
# md <- dbGetQuery(conn = conn, md)
# s <- split(md["distreference"], f = md$taxon)
# s <- sapply(s, min) * 1.25
# gis <- vector()
# for ( i in seq_along(s) ){
# id <- md[md$taxon == names(s)[i] & md$distreference < s[i], "gi"]
# gis <- c(gis, id)
# id <- paste(id, collapse = "|")
# txn <- names(s)[i]
# obj[[txn]] <- obj[[txn]][grep(id, rownames(obj[[txn]])), ]
# obj[[txn]] <- deleteEmptyCells(obj[[txn]], quiet = TRUE)
# }
# gis <- setdiff(md$gi, gis)
# } else {
# gis <- NULL
# }
## update status field
## -------------------
# SQL <- paste("UPDATE", acc.tab,
# "SET status=status || '-selected'",
# "WHERE status !~ 'selected' AND npos <=", max.bp,
# "AND distreference <=", max.dist)
# dbSendQuery(conn, SQL)
# if ( !is.null(gis) ){
# SQL <- c(SQL,
# paste("UPDATE " , acc.tab, " SET status='too distant (from conspecific)' WHERE gi='", gis, "'", sep = ""))
# }
## update locus table
## ---------------------
nb.acc <- nrow(obj)
SQL <- paste("UPDATE locus",
"SET", sql.wrap(nb.acc, term = paste(gene, "sel", sep = "_")),
"WHERE", sql.wrap(spec))
dbSendQuery(conn, SQL)
# tax <- dbReadTable(conn, "locus")$spec
# # tax <- setdiff(tax, union(spec, already))
# tax <- setdiff(tax, spec)
# SQL <- paste("UPDATE locus",
# "SET", sql.wrap(0, term = paste(gene, "sel", sep = "_")),
# "WHERE", sql.wrap(tax, BOOL = NULL))
# lapply(SQL, dbSendQuery, conn = conn)
## species consensus sequences
## ---------------------------
obj <- specCons(obj, log = logfile)
obj <- list(obj)
names(obj) <- spec
class(obj) <- "DNAbin"
write.dna.spectable(conn, spec.tab, obj)
dbDisconnect(conn)
}
|
demix <- function(x = NULL,
Preprocessing = NULL,
pdf = NULL,
k = NULL,
xmin = NULL,
xmax = NULL, ...)
{
digits <- getOption("digits"); options(digits = 15)
if (is.null(x)) {
stop(sQuote("x"), " must not be NULL!", call. = FALSE)
}
if ((!is.numeric(x)) && (!is.data.frame(x))) {
stop(sQuote("x"), " numeric or data frame is requested!", call. = FALSE)
}
x <- as.matrix(x)
d <- ncol(x)
n <- nrow(x)
if (is.null(Preprocessing)) {
stop(sQuote("Preprocessing"), " must not be NULL!", call. = FALSE)
}
if (!is.character(Preprocessing)) {
stop(sQuote("Preprocessing"), " character is requested!", call. = FALSE)
}
Preprocessing <- match.arg(Preprocessing, .rebmix$Preprocessing, several.ok = FALSE)
if (is.null(pdf)) {
stop(sQuote("pdf"), " must not be NULL!", call. = FALSE)
}
if (!is.character(pdf)) {
stop(sQuote("pdf"), " character vector is requested!", call. = FALSE)
}
pdf <- match.arg(pdf, .rebmix$pdf, several.ok = TRUE)
Variables <- NULL
for (i in 1:length(.rebmix$pdf)) {
Variables[which(pdf == .rebmix$pdf[i])] <- .rebmix$pdf.Variables[i]
}
if (is.null(k)) {
stop(sQuote("k"), " must not be NULL!", call. = FALSE)
}
if (!is.wholenumber(k)) {
stop(sQuote("k"), " integer is requested!", call. = FALSE)
}
if (!(k > 0)) {
stop(sQuote("k"), " must be greater than 0!", call. = FALSE)
}
if (is.null(xmin)) {
xmin <- apply(x, 2, min)
}
else {
xmin <- xmin
}
if (is.null(xmax)) {
xmax <- apply(x, 2, max)
}
else {
xmax <- xmax
}
if (Preprocessing == .rebmix$Preprocessing[1]) {
h <- array(data = 0.0, dim = d, dimnames = NULL)
y0 <- array(data = 0.0, dim = d, dimnames = NULL)
for (i in 1:d) {
if (Variables[i] == .rebmix$Variables[1]) {
h[i] = (xmax[i] - xmin[i]) / k; y0[i] = xmin[i] + 0.5 * h[i]
}
else
if (Variables[i] == .rebmix$Variables[2]) {
h[i] = 1.0; y0[i] = xmin[i]
}
}
output <- .C("RPreprocessingH",
h = as.double(h),
y0 = as.double(y0),
length.pdf = as.integer(d),
pdf = as.character(pdf),
k = as.integer(k),
n = as.integer(n),
d = as.integer(d),
x = as.double(unlist(x)),
y = double(n * (d + 1)),
error = integer(1),
PACKAGE = "rebmix")
if (output$error == 1) {
stop("in preprocessing!", call. = FALSE); return(NA)
}
length(output$y) <- output$k * (output$d + 1); dim(output$y) <- c(output$k, output$d + 1)
output$y[, d + 1] <- output$y[, d + 1] / prod(h) / n
output <- as.data.frame(output$y, stringsAsFactors = FALSE)
colnames(output) <- c(paste("x", if (d > 1) 1:d else "", sep = ""), "f")
}
else
if (Preprocessing == .rebmix$Preprocessing[2]) {
h <- array(data = 0.0, dim = d, dimnames = NULL)
for (i in 1:d) {
if (Variables[i] == .rebmix$Variables[1]) {
h[i] = (xmax[i] - xmin[i]) / k
}
else
if (Variables[i] == .rebmix$Variables[2]) {
h[i] = 1.0
}
}
output <- .C("RPreprocessingPW",
h = as.double(h),
n = as.integer(n),
d = as.integer(d),
x = as.double(unlist(x)),
y = double(n * (d + 2)),
error = integer(1),
PACKAGE = "rebmix")
if (output$error == 1) {
stop("in preprocessing!", call. = FALSE); return(NA)
}
dim(output$y) <- c(n, d + 2)
output$y[, d + 2] <- output$y[, d + 2] / prod(h) / n
output <- as.data.frame(output$y[, -(d + 1)], stringsAsFactors = FALSE)
colnames(output) <- c(paste("x", if (d > 1) 1:d else "", sep = ""), "f")
}
else
if (Preprocessing == .rebmix$Preprocessing[3]) {
h <- array(data = 0.0, dim = d, dimnames = NULL)
for (i in 1:d) {
h[i] = xmax[i] - xmin[i]
}
output <- .C("RPreprocessingKNN",
k = as.integer(k),
h = as.double(h),
n = as.integer(n),
d = as.integer(d),
x = as.double(unlist(x)),
y = double(n * (d + 3)),
error = integer(1),
PACKAGE = "rebmix")
if (output$error == 1) {
stop("in preprocessing!", call. = FALSE); return(NA)
}
dim(output$y) <- c(n, d + 3)
output$y[, d + 2] <- k / output$y[, d + 2] / n
output <- as.data.frame(output$y[, c(-(d + 1), -(d + 3))], stringsAsFactors = FALSE)
colnames(output) <- c(paste("x", if (d > 1) 1:d else "", sep = ""), "f")
}
options(digits = digits)
rm(list = ls()[!(ls() %in% c("output"))])
invisible(output)
} ## demix
|
/rebmix/R/demix.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 4,774 |
r
|
demix <- function(x = NULL,
Preprocessing = NULL,
pdf = NULL,
k = NULL,
xmin = NULL,
xmax = NULL, ...)
{
digits <- getOption("digits"); options(digits = 15)
if (is.null(x)) {
stop(sQuote("x"), " must not be NULL!", call. = FALSE)
}
if ((!is.numeric(x)) && (!is.data.frame(x))) {
stop(sQuote("x"), " numeric or data frame is requested!", call. = FALSE)
}
x <- as.matrix(x)
d <- ncol(x)
n <- nrow(x)
if (is.null(Preprocessing)) {
stop(sQuote("Preprocessing"), " must not be NULL!", call. = FALSE)
}
if (!is.character(Preprocessing)) {
stop(sQuote("Preprocessing"), " character is requested!", call. = FALSE)
}
Preprocessing <- match.arg(Preprocessing, .rebmix$Preprocessing, several.ok = FALSE)
if (is.null(pdf)) {
stop(sQuote("pdf"), " must not be NULL!", call. = FALSE)
}
if (!is.character(pdf)) {
stop(sQuote("pdf"), " character vector is requested!", call. = FALSE)
}
pdf <- match.arg(pdf, .rebmix$pdf, several.ok = TRUE)
Variables <- NULL
for (i in 1:length(.rebmix$pdf)) {
Variables[which(pdf == .rebmix$pdf[i])] <- .rebmix$pdf.Variables[i]
}
if (is.null(k)) {
stop(sQuote("k"), " must not be NULL!", call. = FALSE)
}
if (!is.wholenumber(k)) {
stop(sQuote("k"), " integer is requested!", call. = FALSE)
}
if (!(k > 0)) {
stop(sQuote("k"), " must be greater than 0!", call. = FALSE)
}
if (is.null(xmin)) {
xmin <- apply(x, 2, min)
}
else {
xmin <- xmin
}
if (is.null(xmax)) {
xmax <- apply(x, 2, max)
}
else {
xmax <- xmax
}
if (Preprocessing == .rebmix$Preprocessing[1]) {
h <- array(data = 0.0, dim = d, dimnames = NULL)
y0 <- array(data = 0.0, dim = d, dimnames = NULL)
for (i in 1:d) {
if (Variables[i] == .rebmix$Variables[1]) {
h[i] = (xmax[i] - xmin[i]) / k; y0[i] = xmin[i] + 0.5 * h[i]
}
else
if (Variables[i] == .rebmix$Variables[2]) {
h[i] = 1.0; y0[i] = xmin[i]
}
}
output <- .C("RPreprocessingH",
h = as.double(h),
y0 = as.double(y0),
length.pdf = as.integer(d),
pdf = as.character(pdf),
k = as.integer(k),
n = as.integer(n),
d = as.integer(d),
x = as.double(unlist(x)),
y = double(n * (d + 1)),
error = integer(1),
PACKAGE = "rebmix")
if (output$error == 1) {
stop("in preprocessing!", call. = FALSE); return(NA)
}
length(output$y) <- output$k * (output$d + 1); dim(output$y) <- c(output$k, output$d + 1)
output$y[, d + 1] <- output$y[, d + 1] / prod(h) / n
output <- as.data.frame(output$y, stringsAsFactors = FALSE)
colnames(output) <- c(paste("x", if (d > 1) 1:d else "", sep = ""), "f")
}
else
if (Preprocessing == .rebmix$Preprocessing[2]) {
h <- array(data = 0.0, dim = d, dimnames = NULL)
for (i in 1:d) {
if (Variables[i] == .rebmix$Variables[1]) {
h[i] = (xmax[i] - xmin[i]) / k
}
else
if (Variables[i] == .rebmix$Variables[2]) {
h[i] = 1.0
}
}
output <- .C("RPreprocessingPW",
h = as.double(h),
n = as.integer(n),
d = as.integer(d),
x = as.double(unlist(x)),
y = double(n * (d + 2)),
error = integer(1),
PACKAGE = "rebmix")
if (output$error == 1) {
stop("in preprocessing!", call. = FALSE); return(NA)
}
dim(output$y) <- c(n, d + 2)
output$y[, d + 2] <- output$y[, d + 2] / prod(h) / n
output <- as.data.frame(output$y[, -(d + 1)], stringsAsFactors = FALSE)
colnames(output) <- c(paste("x", if (d > 1) 1:d else "", sep = ""), "f")
}
else
if (Preprocessing == .rebmix$Preprocessing[3]) {
h <- array(data = 0.0, dim = d, dimnames = NULL)
for (i in 1:d) {
h[i] = xmax[i] - xmin[i]
}
output <- .C("RPreprocessingKNN",
k = as.integer(k),
h = as.double(h),
n = as.integer(n),
d = as.integer(d),
x = as.double(unlist(x)),
y = double(n * (d + 3)),
error = integer(1),
PACKAGE = "rebmix")
if (output$error == 1) {
stop("in preprocessing!", call. = FALSE); return(NA)
}
dim(output$y) <- c(n, d + 3)
output$y[, d + 2] <- k / output$y[, d + 2] / n
output <- as.data.frame(output$y[, c(-(d + 1), -(d + 3))], stringsAsFactors = FALSE)
colnames(output) <- c(paste("x", if (d > 1) 1:d else "", sep = ""), "f")
}
options(digits = digits)
rm(list = ls()[!(ls() %in% c("output"))])
invisible(output)
} ## demix
|
\name{addqtl}
\alias{addqtl}
\title{Scan for an additional QTL in a multiple-QTL model}
\description{
Scan for an additional QTL in the context of a
multiple QTL model.
}
\usage{
addqtl(cross, chr, pheno.col=1, qtl, covar=NULL, formula,
method=c("imp","hk"), incl.markers=TRUE, verbose=FALSE)
}
\arguments{
\item{cross}{An object of class \code{cross}. See
\code{\link[qtl]{read.cross}} for details.}
\item{chr}{Optional vector indicating the chromosomes to be scanned. If
missing, all chromosomes are scanned. Refer to chromosomes by
name. Refer to chromosomes with a preceding \code{-} to have all
chromosomes but those considered. A logical (TRUE/FALSE) vector may
also be used.}
\item{pheno.col}{Column number in the phenotype matrix to be
used as the phenotype. One may also give a character string matching
a phenotype name. Finally, one may give a numeric vector of
phenotypes, in which case it must have the length equal to the number
of individuals in the cross, and there must be either non-integers or
values < 1 or > no. phenotypes; this last case may be useful for studying
transformations.}
\item{qtl}{An object of class \code{qtl}, as output from
\code{\link[qtl]{makeqtl}}.}
\item{covar}{A matrix or data.frame of covariates. These must be
strictly numeric.}
\item{formula}{An object of class \code{\link[stats]{formula}}
indicating the model to be fitted. (It can also be the character
string representation of a formula.) QTLs are referred to as
\code{Q1}, \code{Q2}, etc. Covariates are referred to by their names
in the data frame \code{covar}. If the new QTL is not included in
the formula, its main effect is added.}
\item{method}{Indicates whether to use multiple imputation or
Haley-Knott regression.}
\item{incl.markers}{If FALSE, do calculations only at points on an
evenly spaced grid. If \code{\link[qtl]{calc.genoprob}} or
\code{\link[qtl]{sim.geno}} were run with
\code{stepwidth="variable"}, we force \code{incl.markers=TRUE}.}
\item{verbose}{If TRUE, display information about the progress of
calculations. If \code{verbose} is an integer > 1, further messages
from \code{\link[qtl]{scanqtl}} are also displayed.}
}
\details{
The formula is used to specified the model to be fit. In the
formula, use \code{Q1}, \code{Q2}, etc., or \code{q1},
\code{q2}, etc., to represent the QTLs, and the column names in the
covariate data frame to represent the covariates.
We enforce a hierarchical structure on the model formula: if a QTL or
covariate is in involved in an interaction, its main effect must also
be included.
If one wishes to scan for QTL that interact with another QTL, include
it in the formula (with an index of one more than the number of QTL in
the input \code{qtl} object).
}
\section{Value}{
An object of class \code{scanone}, as produced by the
\code{\link[qtl]{scanone}} function. LOD scores are relative to the
base model (with any terms that include the new QTL omitted).
}
\references{
Haley, C. S. and Knott, S. A. (1992) A simple regression method for mapping
quantitative trait loci in line crosses using flanking markers.
\emph{Heredity} \bold{69}, 315--324.
Sen, \'S. and Churchill, G. A. (2001) A statistical framework for quantitative
trait mapping. \emph{Genetics} \bold{159}, 371--387.
}
\author{Karl W Broman, \email{kbroman@biostat.wisc.edu}}
\examples{
data(fake.f2)
# take out several QTLs and make QTL object
qc <- c(1, 8, 13)
qp <- c(26, 56, 28)
fake.f2 <- subset(fake.f2, chr=c(1,2,3,8,13))
\dontshow{fake.f2 <- subset(fake.f2, ind=1:50)}
fake.f2 <- calc.genoprob(fake.f2, step=2, err=0.001)
qtl <- makeqtl(fake.f2, qc, qp, what="prob")
# scan for an additional QTL
out1 <- addqtl(fake.f2, qtl=qtl, formula=y~Q1+Q2+Q3, method="hk")
max(out1)
# scan for an additional QTL that interacts with the locus on chr 1
out2 <- addqtl(fake.f2, qtl=qtl, formula=y~Q1*Q4+Q2+Q3, method="hk")
max(out2)
# plot interaction LOD scores
plot(out2-out1)
}
\seealso{ \code{\link[qtl]{scanone}}, \code{\link[qtl]{fitqtl}},
\code{\link[qtl]{scanqtl}}, \code{\link[qtl]{refineqtl}},
\code{\link[qtl]{makeqtl}}, \code{\link[qtl]{addtoqtl}},
\code{\link[qtl]{addpair}}, \code{\link[qtl]{addint}} }
\keyword{models}
|
/man/addqtl.Rd
|
no_license
|
pjotrp/rqtl
|
R
| false | false | 4,328 |
rd
|
\name{addqtl}
\alias{addqtl}
\title{Scan for an additional QTL in a multiple-QTL model}
\description{
Scan for an additional QTL in the context of a
multiple QTL model.
}
\usage{
addqtl(cross, chr, pheno.col=1, qtl, covar=NULL, formula,
method=c("imp","hk"), incl.markers=TRUE, verbose=FALSE)
}
\arguments{
\item{cross}{An object of class \code{cross}. See
\code{\link[qtl]{read.cross}} for details.}
\item{chr}{Optional vector indicating the chromosomes to be scanned. If
missing, all chromosomes are scanned. Refer to chromosomes by
name. Refer to chromosomes with a preceding \code{-} to have all
chromosomes but those considered. A logical (TRUE/FALSE) vector may
also be used.}
\item{pheno.col}{Column number in the phenotype matrix to be
used as the phenotype. One may also give a character string matching
a phenotype name. Finally, one may give a numeric vector of
phenotypes, in which case it must have the length equal to the number
of individuals in the cross, and there must be either non-integers or
values < 1 or > no. phenotypes; this last case may be useful for studying
transformations.}
\item{qtl}{An object of class \code{qtl}, as output from
\code{\link[qtl]{makeqtl}}.}
\item{covar}{A matrix or data.frame of covariates. These must be
strictly numeric.}
\item{formula}{An object of class \code{\link[stats]{formula}}
indicating the model to be fitted. (It can also be the character
string representation of a formula.) QTLs are referred to as
\code{Q1}, \code{Q2}, etc. Covariates are referred to by their names
in the data frame \code{covar}. If the new QTL is not included in
the formula, its main effect is added.}
\item{method}{Indicates whether to use multiple imputation or
Haley-Knott regression.}
\item{incl.markers}{If FALSE, do calculations only at points on an
evenly spaced grid. If \code{\link[qtl]{calc.genoprob}} or
\code{\link[qtl]{sim.geno}} were run with
\code{stepwidth="variable"}, we force \code{incl.markers=TRUE}.}
\item{verbose}{If TRUE, display information about the progress of
calculations. If \code{verbose} is an integer > 1, further messages
from \code{\link[qtl]{scanqtl}} are also displayed.}
}
\details{
The formula is used to specified the model to be fit. In the
formula, use \code{Q1}, \code{Q2}, etc., or \code{q1},
\code{q2}, etc., to represent the QTLs, and the column names in the
covariate data frame to represent the covariates.
We enforce a hierarchical structure on the model formula: if a QTL or
covariate is in involved in an interaction, its main effect must also
be included.
If one wishes to scan for QTL that interact with another QTL, include
it in the formula (with an index of one more than the number of QTL in
the input \code{qtl} object).
}
\section{Value}{
An object of class \code{scanone}, as produced by the
\code{\link[qtl]{scanone}} function. LOD scores are relative to the
base model (with any terms that include the new QTL omitted).
}
\references{
Haley, C. S. and Knott, S. A. (1992) A simple regression method for mapping
quantitative trait loci in line crosses using flanking markers.
\emph{Heredity} \bold{69}, 315--324.
Sen, \'S. and Churchill, G. A. (2001) A statistical framework for quantitative
trait mapping. \emph{Genetics} \bold{159}, 371--387.
}
\author{Karl W Broman, \email{kbroman@biostat.wisc.edu}}
\examples{
data(fake.f2)
# take out several QTLs and make QTL object
qc <- c(1, 8, 13)
qp <- c(26, 56, 28)
fake.f2 <- subset(fake.f2, chr=c(1,2,3,8,13))
\dontshow{fake.f2 <- subset(fake.f2, ind=1:50)}
fake.f2 <- calc.genoprob(fake.f2, step=2, err=0.001)
qtl <- makeqtl(fake.f2, qc, qp, what="prob")
# scan for an additional QTL
out1 <- addqtl(fake.f2, qtl=qtl, formula=y~Q1+Q2+Q3, method="hk")
max(out1)
# scan for an additional QTL that interacts with the locus on chr 1
out2 <- addqtl(fake.f2, qtl=qtl, formula=y~Q1*Q4+Q2+Q3, method="hk")
max(out2)
# plot interaction LOD scores
plot(out2-out1)
}
\seealso{ \code{\link[qtl]{scanone}}, \code{\link[qtl]{fitqtl}},
\code{\link[qtl]{scanqtl}}, \code{\link[qtl]{refineqtl}},
\code{\link[qtl]{makeqtl}}, \code{\link[qtl]{addtoqtl}},
\code{\link[qtl]{addpair}}, \code{\link[qtl]{addint}} }
\keyword{models}
|
setwd("E:/coursera course/Course4- Exploratory Data Analysis/Week1")
electric <- read.table("household_power_consumption.txt", sep = ";", header=TRUE, na.strings="?")
electric$datetime <- strptime(paste(electric$Date, electric$Time, sep = ""), format="%d/%m/%Y %H:%M:%S")
electric$Date <- as.Date(electric$Date, format = "%d/%m/%Y")
electric$Time <- as.ti(electric$Time, format = "%H:%M:%S")
mydata <- electric[electric$Date >= as.Date("2007-02-01") & electric$Date <= as.Date("2007-02-02"),] # subset right.
#### Plot1 ####
hist(mydata$Global_active_power, col = "red", main = paste("Global Active Power"), xlab = "Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
vivaldiibm/ExData_Plotting1
|
R
| false | false | 660 |
r
|
setwd("E:/coursera course/Course4- Exploratory Data Analysis/Week1")
electric <- read.table("household_power_consumption.txt", sep = ";", header=TRUE, na.strings="?")
electric$datetime <- strptime(paste(electric$Date, electric$Time, sep = ""), format="%d/%m/%Y %H:%M:%S")
electric$Date <- as.Date(electric$Date, format = "%d/%m/%Y")
electric$Time <- as.ti(electric$Time, format = "%H:%M:%S")
mydata <- electric[electric$Date >= as.Date("2007-02-01") & electric$Date <= as.Date("2007-02-02"),] # subset right.
#### Plot1 ####
hist(mydata$Global_active_power, col = "red", main = paste("Global Active Power"), xlab = "Global Active Power (kilowatts)")
dev.off()
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_energy_LA123.electricity
#'
#' This script creates electricity generation and inputs by fuel, region and historical year. Estimates are adjusted by efficiency factors.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L123.out_EJ_R_elec_F_Yh}, \code{L123.in_EJ_R_elec_F_Yh}, \code{L123.eff_R_elec_F_Yh}, \code{L123.out_EJ_R_indchp_F_Yh}, \code{L123.in_EJ_R_indchp_F_Yh}. The corresponding file in the
#' original data system was \code{LA123.electricity.R} (energy level1).
#' @details This script creates electricity generation and inputs by fuel, region and historical year. Estimates are adjusted by efficiency factors.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter full_join funs if_else group_by left_join mutate select semi_join summarise summarise_all
#' @author FF April 2017
module_energy_LA123.electricity <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "energy/enduse_fuel_aggregation",
FILE = "energy/A23.chp_elecratio",
"L1011.en_bal_EJ_R_Si_Fi_Yh"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L123.out_EJ_R_elec_F_Yh",
"L123.in_EJ_R_elec_F_Yh",
"L123.eff_R_elec_F_Yh",
"L123.out_EJ_R_indchp_F_Yh",
"L123.in_EJ_R_indchp_F_Yh"))
} else if(command == driver.MAKE) {
year <- value <- GCAM_region_ID <- fuel <- sector <- electricity <-
outputs <- input <- outputs_new <- industry <- elec_ratio <-
outputs_ratio <- NULL # silence package check.
all_data <- list(...)[[1]]
# Load required inputs
enduse_fuel_aggregation <- get_data(all_data, "energy/enduse_fuel_aggregation")
A23.chp_elecratio <- get_data(all_data, "energy/A23.chp_elecratio")
L1011.en_bal_EJ_R_Si_Fi_Yh <- get_data(all_data, "L1011.en_bal_EJ_R_Si_Fi_Yh")
# ===================================================
# Creates end use fuel for electricity to be used to create L123.in_EJ_R_elec_F_Yh
enduse_fuel_aggregation %>%
select(fuel,electricity) %>%
filter(!is.na(electricity)) ->
enduse_fuel_aggregation_electricity
# Creates L123.in_EJ_R_elec_F_Yh based on L1011.en_bal_EJ_R_Si_Fi_Yh and enduse_fuel_aggregation_electricity
# Calculates the inpunts by fuel (based on electricity input fuels), region ID and sector (electricity)
L1011.en_bal_EJ_R_Si_Fi_Yh %>%
filter(sector == "in_electricity generation") %>%
mutate(sector = "electricity generation") %>%
left_join(enduse_fuel_aggregation_electricity, by = "fuel") %>%
mutate(fuel = electricity, input = value) %>%
select(-electricity, -value) %>%
filter(fuel %in% energy.ELECTRICITY_INPUT_FUELS) %>%
group_by(GCAM_region_ID, sector, fuel, year) %>%
summarise_all(funs(sum)) %>%
ungroup() ->
L123.in_EJ_R_elec_F_Yh
# Creates L123.out_EJ_R_elec_F_Yh based on L1011.en_bal_EJ_R_Si_Fi_Yh and enduse_fuel_aggregation_electricity
# Calculates the electricity outputs by fuel, region ID and sector (electricity generation)
L1011.en_bal_EJ_R_Si_Fi_Yh %>%
filter(sector == "out_electricity generation") %>%
mutate(sector = "electricity generation") %>%
left_join(enduse_fuel_aggregation_electricity, by = "fuel") %>%
mutate(fuel = electricity, outputs = value) %>%
select(-electricity, -value) %>%
group_by(GCAM_region_ID, sector, fuel, year) %>%
summarise_all(funs(sum)) %>%
ungroup() %>%
filter(!is.na(fuel)) ->
L123.out_EJ_R_elec_F_Yh
# Long form of L123.in_EJ_R_elec_F_Yh (previously created) to be used to calculate efficiencies
# based on electricity inputs and outputs by fuel and year.
# Calculates electricity generation efficiencies (L123.eff_R_elec_F_Yh) by region, fuel and year
L123.out_EJ_R_elec_F_Yh %>%
semi_join(L123.in_EJ_R_elec_F_Yh, by = c("GCAM_region_ID", "fuel", "year")) %>%
left_join(L123.in_EJ_R_elec_F_Yh, by = c("GCAM_region_ID", "sector", "fuel", "year")) %>%
mutate(value = outputs / input) %>%
select(-outputs, -input) ->
L123.eff_R_elec_F_Yh
# Taking care of NA, 0, and INF values generatted in previous step (efficiency calculations) and updating
# the efficiency output L123.eff_R_elec_F_Yh
L123.eff_R_elec_F_Yh %>%
mutate(value = if_else(!is.na(value), value, energy.DEFAULT_ELECTRIC_EFFICIENCY),
value = if_else(value == 0, energy.DEFAULT_ELECTRIC_EFFICIENCY, value),
value = if_else(is.infinite(value), energy.DEFAULT_ELECTRIC_EFFICIENCY, value)) ->
L123.eff_R_elec_F_Yh
# Tibble (Output_efficiency_based) created to adjust electricity outputs (L123.out_EJ_R_elec_F_Yh)
# based on above modified efficiency calculations
L123.in_EJ_R_elec_F_Yh %>%
left_join(L123.eff_R_elec_F_Yh, by = c("GCAM_region_ID", "sector", "fuel", "year")) %>%
mutate(outputs_new = input * value) %>%
select(-input, -value) ->
Output_efficiency_based
# Adjusting electricity outputs based on new efficiencies
L123.out_EJ_R_elec_F_Yh %>%
left_join(Output_efficiency_based, by = c("GCAM_region_ID", "sector", "fuel", "year")) %>%
mutate(outputs = if_else(!is.na(outputs_new), outputs_new, outputs)) %>%
select(-outputs_new) ->
L123.out_EJ_R_elec_F_Yh
#2: CHP calculations
# filter end use fuel in the industry sector and remove any NA generated.
enduse_fuel_aggregation %>%
select(fuel, industry) %>%
filter(!is.na(industry)) ->
enduse_fuel_aggregation_industry
# Create Industry CHP estimates by fuel, region and industry sector.
L1011.en_bal_EJ_R_Si_Fi_Yh %>%
filter(sector == "out_chp_elec") %>%
mutate(sector = "chp_elec") %>%
left_join(enduse_fuel_aggregation_industry, by = "fuel") %>%
mutate(fuel = industry) %>%
select(-industry) %>%
mutate(fuel = replace(fuel, fuel == "heat", NA),
fuel = replace(fuel, fuel == "electricity", NA)) %>%
group_by(GCAM_region_ID, sector, fuel, year) %>%
summarise_all(funs(sum)) %>%
ungroup() %>%
filter(!is.na(fuel)) ->
L123.out_EJ_R_indchp_F_Yh
# Estimates inputs for CHP in the industry sector based on outputs and the electricity ration by fuel (output/elec_ratio)
L123.out_EJ_R_indchp_F_Yh %>%
full_join(filter(A23.chp_elecratio,fuel != "hydrogen"), by = "fuel") %>%
mutate(outputs_ratio = value / elec_ratio) %>%
select(-value, -elec_ratio) ->
L123.in_EJ_R_indchp_F_Yh
# Rename necessary outputs to value for long test flag
L123.out_EJ_R_elec_F_Yh %>%
mutate(value = outputs) %>%
select(-outputs) ->
L123.out_EJ_R_elec_F_Yh
L123.in_EJ_R_elec_F_Yh %>%
mutate(value = input) %>%
select(-input) ->
L123.in_EJ_R_elec_F_Yh
L123.in_EJ_R_indchp_F_Yh %>%
mutate(value = outputs_ratio) %>%
select(-outputs_ratio) ->
L123.in_EJ_R_indchp_F_Yh
# Save results
L123.out_EJ_R_elec_F_Yh %>%
add_title("Outputs of electricity sector by GCAM region / fuel / historical year") %>%
add_units("EJ") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.out_EJ_R_elec_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.out_EJ_R_elec_F_Yh
L123.in_EJ_R_elec_F_Yh %>%
add_title("Inputs to electricity sector by GCAM region / fuel / historical year") %>%
add_units("EJ") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.in_EJ_R_elec_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.in_EJ_R_elec_F_Yh
L123.eff_R_elec_F_Yh %>%
add_title("Electric sector efficiencies by GCAM region / fuel / historical year") %>%
add_units("Unitless") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.eff_R_elec_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.eff_R_elec_F_Yh
L123.out_EJ_R_indchp_F_Yh %>%
add_title("Industrial CHP electricity generation by GCAM region / fuel / historical year") %>%
add_units("EJ") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.out_EJ_R_indchp_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.out_EJ_R_indchp_F_Yh
L123.in_EJ_R_indchp_F_Yh %>%
add_title("Inputs to industrial CHP by GCAM region / fuel / historical year") %>%
add_units("EJ") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.in_EJ_R_indchp_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.in_EJ_R_indchp_F_Yh
return_data(L123.out_EJ_R_elec_F_Yh, L123.in_EJ_R_elec_F_Yh, L123.eff_R_elec_F_Yh, L123.out_EJ_R_indchp_F_Yh, L123.in_EJ_R_indchp_F_Yh)
} else {
stop("Unknown command")
}
}
|
/input/gcamdata/R/zchunk_LA123.electricity.R
|
permissive
|
djvdven/gcam52
|
R
| false | false | 9,653 |
r
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_energy_LA123.electricity
#'
#' This script creates electricity generation and inputs by fuel, region and historical year. Estimates are adjusted by efficiency factors.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L123.out_EJ_R_elec_F_Yh}, \code{L123.in_EJ_R_elec_F_Yh}, \code{L123.eff_R_elec_F_Yh}, \code{L123.out_EJ_R_indchp_F_Yh}, \code{L123.in_EJ_R_indchp_F_Yh}. The corresponding file in the
#' original data system was \code{LA123.electricity.R} (energy level1).
#' @details This script creates electricity generation and inputs by fuel, region and historical year. Estimates are adjusted by efficiency factors.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter full_join funs if_else group_by left_join mutate select semi_join summarise summarise_all
#' @author FF April 2017
module_energy_LA123.electricity <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "energy/enduse_fuel_aggregation",
FILE = "energy/A23.chp_elecratio",
"L1011.en_bal_EJ_R_Si_Fi_Yh"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L123.out_EJ_R_elec_F_Yh",
"L123.in_EJ_R_elec_F_Yh",
"L123.eff_R_elec_F_Yh",
"L123.out_EJ_R_indchp_F_Yh",
"L123.in_EJ_R_indchp_F_Yh"))
} else if(command == driver.MAKE) {
year <- value <- GCAM_region_ID <- fuel <- sector <- electricity <-
outputs <- input <- outputs_new <- industry <- elec_ratio <-
outputs_ratio <- NULL # silence package check.
all_data <- list(...)[[1]]
# Load required inputs
enduse_fuel_aggregation <- get_data(all_data, "energy/enduse_fuel_aggregation")
A23.chp_elecratio <- get_data(all_data, "energy/A23.chp_elecratio")
L1011.en_bal_EJ_R_Si_Fi_Yh <- get_data(all_data, "L1011.en_bal_EJ_R_Si_Fi_Yh")
# ===================================================
# Creates end use fuel for electricity to be used to create L123.in_EJ_R_elec_F_Yh
enduse_fuel_aggregation %>%
select(fuel,electricity) %>%
filter(!is.na(electricity)) ->
enduse_fuel_aggregation_electricity
# Creates L123.in_EJ_R_elec_F_Yh based on L1011.en_bal_EJ_R_Si_Fi_Yh and enduse_fuel_aggregation_electricity
# Calculates the inpunts by fuel (based on electricity input fuels), region ID and sector (electricity)
L1011.en_bal_EJ_R_Si_Fi_Yh %>%
filter(sector == "in_electricity generation") %>%
mutate(sector = "electricity generation") %>%
left_join(enduse_fuel_aggregation_electricity, by = "fuel") %>%
mutate(fuel = electricity, input = value) %>%
select(-electricity, -value) %>%
filter(fuel %in% energy.ELECTRICITY_INPUT_FUELS) %>%
group_by(GCAM_region_ID, sector, fuel, year) %>%
summarise_all(funs(sum)) %>%
ungroup() ->
L123.in_EJ_R_elec_F_Yh
# Creates L123.out_EJ_R_elec_F_Yh based on L1011.en_bal_EJ_R_Si_Fi_Yh and enduse_fuel_aggregation_electricity
# Calculates the electricity outputs by fuel, region ID and sector (electricity generation)
L1011.en_bal_EJ_R_Si_Fi_Yh %>%
filter(sector == "out_electricity generation") %>%
mutate(sector = "electricity generation") %>%
left_join(enduse_fuel_aggregation_electricity, by = "fuel") %>%
mutate(fuel = electricity, outputs = value) %>%
select(-electricity, -value) %>%
group_by(GCAM_region_ID, sector, fuel, year) %>%
summarise_all(funs(sum)) %>%
ungroup() %>%
filter(!is.na(fuel)) ->
L123.out_EJ_R_elec_F_Yh
# Long form of L123.in_EJ_R_elec_F_Yh (previously created) to be used to calculate efficiencies
# based on electricity inputs and outputs by fuel and year.
# Calculates electricity generation efficiencies (L123.eff_R_elec_F_Yh) by region, fuel and year
L123.out_EJ_R_elec_F_Yh %>%
semi_join(L123.in_EJ_R_elec_F_Yh, by = c("GCAM_region_ID", "fuel", "year")) %>%
left_join(L123.in_EJ_R_elec_F_Yh, by = c("GCAM_region_ID", "sector", "fuel", "year")) %>%
mutate(value = outputs / input) %>%
select(-outputs, -input) ->
L123.eff_R_elec_F_Yh
# Taking care of NA, 0, and INF values generatted in previous step (efficiency calculations) and updating
# the efficiency output L123.eff_R_elec_F_Yh
L123.eff_R_elec_F_Yh %>%
mutate(value = if_else(!is.na(value), value, energy.DEFAULT_ELECTRIC_EFFICIENCY),
value = if_else(value == 0, energy.DEFAULT_ELECTRIC_EFFICIENCY, value),
value = if_else(is.infinite(value), energy.DEFAULT_ELECTRIC_EFFICIENCY, value)) ->
L123.eff_R_elec_F_Yh
# Tibble (Output_efficiency_based) created to adjust electricity outputs (L123.out_EJ_R_elec_F_Yh)
# based on above modified efficiency calculations
L123.in_EJ_R_elec_F_Yh %>%
left_join(L123.eff_R_elec_F_Yh, by = c("GCAM_region_ID", "sector", "fuel", "year")) %>%
mutate(outputs_new = input * value) %>%
select(-input, -value) ->
Output_efficiency_based
# Adjusting electricity outputs based on new efficiencies
L123.out_EJ_R_elec_F_Yh %>%
left_join(Output_efficiency_based, by = c("GCAM_region_ID", "sector", "fuel", "year")) %>%
mutate(outputs = if_else(!is.na(outputs_new), outputs_new, outputs)) %>%
select(-outputs_new) ->
L123.out_EJ_R_elec_F_Yh
#2: CHP calculations
# filter end use fuel in the industry sector and remove any NA generated.
enduse_fuel_aggregation %>%
select(fuel, industry) %>%
filter(!is.na(industry)) ->
enduse_fuel_aggregation_industry
# Create Industry CHP estimates by fuel, region and industry sector.
L1011.en_bal_EJ_R_Si_Fi_Yh %>%
filter(sector == "out_chp_elec") %>%
mutate(sector = "chp_elec") %>%
left_join(enduse_fuel_aggregation_industry, by = "fuel") %>%
mutate(fuel = industry) %>%
select(-industry) %>%
mutate(fuel = replace(fuel, fuel == "heat", NA),
fuel = replace(fuel, fuel == "electricity", NA)) %>%
group_by(GCAM_region_ID, sector, fuel, year) %>%
summarise_all(funs(sum)) %>%
ungroup() %>%
filter(!is.na(fuel)) ->
L123.out_EJ_R_indchp_F_Yh
# Estimates inputs for CHP in the industry sector based on outputs and the electricity ration by fuel (output/elec_ratio)
L123.out_EJ_R_indchp_F_Yh %>%
full_join(filter(A23.chp_elecratio,fuel != "hydrogen"), by = "fuel") %>%
mutate(outputs_ratio = value / elec_ratio) %>%
select(-value, -elec_ratio) ->
L123.in_EJ_R_indchp_F_Yh
# Rename necessary outputs to value for long test flag
L123.out_EJ_R_elec_F_Yh %>%
mutate(value = outputs) %>%
select(-outputs) ->
L123.out_EJ_R_elec_F_Yh
L123.in_EJ_R_elec_F_Yh %>%
mutate(value = input) %>%
select(-input) ->
L123.in_EJ_R_elec_F_Yh
L123.in_EJ_R_indchp_F_Yh %>%
mutate(value = outputs_ratio) %>%
select(-outputs_ratio) ->
L123.in_EJ_R_indchp_F_Yh
# Save results
L123.out_EJ_R_elec_F_Yh %>%
add_title("Outputs of electricity sector by GCAM region / fuel / historical year") %>%
add_units("EJ") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.out_EJ_R_elec_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.out_EJ_R_elec_F_Yh
L123.in_EJ_R_elec_F_Yh %>%
add_title("Inputs to electricity sector by GCAM region / fuel / historical year") %>%
add_units("EJ") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.in_EJ_R_elec_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.in_EJ_R_elec_F_Yh
L123.eff_R_elec_F_Yh %>%
add_title("Electric sector efficiencies by GCAM region / fuel / historical year") %>%
add_units("Unitless") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.eff_R_elec_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.eff_R_elec_F_Yh
L123.out_EJ_R_indchp_F_Yh %>%
add_title("Industrial CHP electricity generation by GCAM region / fuel / historical year") %>%
add_units("EJ") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.out_EJ_R_indchp_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.out_EJ_R_indchp_F_Yh
L123.in_EJ_R_indchp_F_Yh %>%
add_title("Inputs to industrial CHP by GCAM region / fuel / historical year") %>%
add_units("EJ") %>%
add_comments("Written by LA123.electricity.R") %>%
add_legacy_name("L123.in_EJ_R_indchp_F_Yh") %>%
add_precursors("L1011.en_bal_EJ_R_Si_Fi_Yh",
"energy/enduse_fuel_aggregation", "energy/A23.chp_elecratio") ->
L123.in_EJ_R_indchp_F_Yh
return_data(L123.out_EJ_R_elec_F_Yh, L123.in_EJ_R_elec_F_Yh, L123.eff_R_elec_F_Yh, L123.out_EJ_R_indchp_F_Yh, L123.in_EJ_R_indchp_F_Yh)
} else {
stop("Unknown command")
}
}
|
with(a6712399b292f44f9ba9780943185bec6, {ROOT <- 'C:/tools/111719/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';FRAME878836[,(c('report_run_end_date','modified_date','created_date','report_run_date')) := lapply(.SD, function(x) as.Date(x, format='%Y-%m-%d')), .SDcols = c('report_run_end_date','modified_date','created_date','report_run_date')]});
|
/80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/azgKqgoj1hagT.R
|
no_license
|
ayanmanna8/test
|
R
| false | false | 426 |
r
|
with(a6712399b292f44f9ba9780943185bec6, {ROOT <- 'C:/tools/111719/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';FRAME878836[,(c('report_run_end_date','modified_date','created_date','report_run_date')) := lapply(.SD, function(x) as.Date(x, format='%Y-%m-%d')), .SDcols = c('report_run_end_date','modified_date','created_date','report_run_date')]});
|
DrawCatchAll <- function(catch.per.trip, compliant, num.keep.legal,
num.keep.legal.group, length.min.legal.all, length.max.legal.all,
rec.sel.at.length, lengths, len.pop.female, len.pop.male,
modeled.stock.rec.catch.source.subarea, wave) {
#' DrawCatchAll
#'
#' Draw catch information
#'
#' @param catch.per.trip Number of fish caught
#' @param compliant Is the trip compliant with regulations (T/F)
#' @param num.keep.legal Bag limit for each stock
#' @param num.keep.legal.group Bag limit for each species group
#' @param length.min.legal.all Minimum legal length for all stocks
#' @param length.max.legal.all Maximum legal length for all stocks
#' @param rec.sel.at.length Recreational selectivity at length for all
#' stocks
#' @param lengths Length bins for all stocks
#' @param len.pop.female Number of females by length bin for all stocks
#' @param len.pop.male Number of males by length bin for all stocks
#' @param modeled.stock.rec.catch.source.subarea Subarea for modeled stock
#' @param wave Current wave
#'
#' @return catch.info Catch information
#' @export
#' @examples
#'
# Create a list to hold the kept and release catch for each stock
catch.info <- sapply(names(catch.per.trip),
function(x) x=list(kept.lengths=NULL, kept.sexes=NULL,
released.lengths=NULL, released.sexes=NULL),
simplify=FALSE, USE.NAMES=TRUE)
# Calculate the total number of fish caught on this trip
total.catch <- Reduce("+", catch.per.trip)
# If nothing was cuaght, no need to simulate the catch order
if(total.catch==0) return(catch.info)
# Draw the order of the fish caught by stock
catch.order <- sample(rep(names(catch.per.trip), unlist(catch.per.trip)),
total.catch, replace=FALSE)
for(i in 1:total.catch) {
# In order, assessing whether each fish is caught or discarded
# identify the ith stock caught
stock <- catch.order[i]
# Identify the correpsonding modeled population for this stock
# If there is not a corresponding modeled populuation, this will be NA
model.pop.name <- modeled.stock.rec.catch.source.subarea[[stock]]
##### Draw the information for this caught fish
# If this is a static stock, probabilty distribution of the lengths of
# caught fish are just based on catch from recent years
if(is.na(model.pop.name)) {
# Draw fish catch from the probability distribution
catch.length <- sample(x=lengths[[stock]], size=1,
prob=rec.sel.at.length[[stock]])
catch.sex <- 3
} else {
# If it's a dynamic stock, draw length and sex based on rec selectivity and
# current population
# Estimate the probability mass function of the length and sex of caught
# fish based on the current stock structure and recreational selectivity
rec.total.fishable.pop <- sum(rec.sel.at.length[[stock]]$Male *
len.pop.male[[stock]]) + sum(rec.sel.at.length[[stock]]$Female *
len.pop.female[[stock]])
length.caught.prob.male <- (rec.sel.at.length[[stock]]$Male *
len.pop.male[[stock]]) / rec.total.fishable.pop
length.caught.prob.female <- (rec.sel.at.length[[stock]]$Female *
len.pop.female[[stock]]) / rec.total.fishable.pop
# Draw fish catch from the probability distribution
catch.length.sex <- sample(x=(1:(length(lengths[[stock]])*2)), size=1,
prob=c(length.caught.prob.female, length.caught.prob.male))
if(catch.length.sex > length(lengths[[stock]])){
catch.sex <- 2 # Male
catch.length <- lengths[[stock]][catch.length.sex-
length(lengths[[stock]])]
} else {
catch.sex <- 1 # Female
catch.length <- lengths[[stock]][catch.length.sex]
}
} # END if else dynamic or static stock
# Determine if any groups the stock is part of are at their catch limit
at.group.limit <- FALSE
groups.part <- sapply(num.keep.legal.group, function(x) stock %in% x[[1]],
simplify = TRUE, USE.NAMES = TRUE)
for(group in names(which(groups.part==TRUE))){
group.stocks <- num.keep.legal.group[[group]][[1]]
group.total <- sum(sapply(group.stocks,
function(x) length(catch.info[[x]]$kept.lengths), simplify = TRUE,
USE.NAMES = TRUE))
if(group.total >= num.keep.legal.group[[group]][[2]])
at.group.limit=TRUE
} # END species group loop
# Determine if this fish should be kept or released
# Release if it's below the minimum size limit, above the maximum size
# limit, at the catch limit for the stock, or at the catch limit for a group
# the stock part of
if(catch.length < length.min.legal.all[[stock]] ||
catch.length > length.max.legal.all[[stock]] ||
length(catch.info[[stock]]$kept.lengths) >=
num.keep.legal[[stock]][[wave]] || at.group.limit) {
catch.info[[stock]]$released.lengths <- c(
catch.info[[stock]]$released.length, catch.length)
catch.info[[stock]]$released.sexes <- c(
catch.info[[stock]]$released.sexes, catch.sex)
} else { # otherwise keep
catch.info[[stock]]$kept.lengths <- c(catch.info[[stock]]$kept.lengths,
catch.length)
catch.info[[stock]]$kept.sexes <- c(catch.info[[stock]]$kept.sexes,
catch.sex)
} # END keep or discard loop
} # END each fish in catch loop
return(catch.info)
}
|
/R/DrawCatchAll.r
|
no_license
|
allen-chen-noaa-gov/nwblastarca
|
R
| false | false | 5,691 |
r
|
DrawCatchAll <- function(catch.per.trip, compliant, num.keep.legal,
num.keep.legal.group, length.min.legal.all, length.max.legal.all,
rec.sel.at.length, lengths, len.pop.female, len.pop.male,
modeled.stock.rec.catch.source.subarea, wave) {
#' DrawCatchAll
#'
#' Draw catch information
#'
#' @param catch.per.trip Number of fish caught
#' @param compliant Is the trip compliant with regulations (T/F)
#' @param num.keep.legal Bag limit for each stock
#' @param num.keep.legal.group Bag limit for each species group
#' @param length.min.legal.all Minimum legal length for all stocks
#' @param length.max.legal.all Maximum legal length for all stocks
#' @param rec.sel.at.length Recreational selectivity at length for all
#' stocks
#' @param lengths Length bins for all stocks
#' @param len.pop.female Number of females by length bin for all stocks
#' @param len.pop.male Number of males by length bin for all stocks
#' @param modeled.stock.rec.catch.source.subarea Subarea for modeled stock
#' @param wave Current wave
#'
#' @return catch.info Catch information
#' @export
#' @examples
#'
# Create a list to hold the kept and release catch for each stock
catch.info <- sapply(names(catch.per.trip),
function(x) x=list(kept.lengths=NULL, kept.sexes=NULL,
released.lengths=NULL, released.sexes=NULL),
simplify=FALSE, USE.NAMES=TRUE)
# Calculate the total number of fish caught on this trip
total.catch <- Reduce("+", catch.per.trip)
# If nothing was cuaght, no need to simulate the catch order
if(total.catch==0) return(catch.info)
# Draw the order of the fish caught by stock
catch.order <- sample(rep(names(catch.per.trip), unlist(catch.per.trip)),
total.catch, replace=FALSE)
for(i in 1:total.catch) {
# In order, assessing whether each fish is caught or discarded
# identify the ith stock caught
stock <- catch.order[i]
# Identify the correpsonding modeled population for this stock
# If there is not a corresponding modeled populuation, this will be NA
model.pop.name <- modeled.stock.rec.catch.source.subarea[[stock]]
##### Draw the information for this caught fish
# If this is a static stock, probabilty distribution of the lengths of
# caught fish are just based on catch from recent years
if(is.na(model.pop.name)) {
# Draw fish catch from the probability distribution
catch.length <- sample(x=lengths[[stock]], size=1,
prob=rec.sel.at.length[[stock]])
catch.sex <- 3
} else {
# If it's a dynamic stock, draw length and sex based on rec selectivity and
# current population
# Estimate the probability mass function of the length and sex of caught
# fish based on the current stock structure and recreational selectivity
rec.total.fishable.pop <- sum(rec.sel.at.length[[stock]]$Male *
len.pop.male[[stock]]) + sum(rec.sel.at.length[[stock]]$Female *
len.pop.female[[stock]])
length.caught.prob.male <- (rec.sel.at.length[[stock]]$Male *
len.pop.male[[stock]]) / rec.total.fishable.pop
length.caught.prob.female <- (rec.sel.at.length[[stock]]$Female *
len.pop.female[[stock]]) / rec.total.fishable.pop
# Draw fish catch from the probability distribution
catch.length.sex <- sample(x=(1:(length(lengths[[stock]])*2)), size=1,
prob=c(length.caught.prob.female, length.caught.prob.male))
if(catch.length.sex > length(lengths[[stock]])){
catch.sex <- 2 # Male
catch.length <- lengths[[stock]][catch.length.sex-
length(lengths[[stock]])]
} else {
catch.sex <- 1 # Female
catch.length <- lengths[[stock]][catch.length.sex]
}
} # END if else dynamic or static stock
# Determine if any groups the stock is part of are at their catch limit
at.group.limit <- FALSE
groups.part <- sapply(num.keep.legal.group, function(x) stock %in% x[[1]],
simplify = TRUE, USE.NAMES = TRUE)
for(group in names(which(groups.part==TRUE))){
group.stocks <- num.keep.legal.group[[group]][[1]]
group.total <- sum(sapply(group.stocks,
function(x) length(catch.info[[x]]$kept.lengths), simplify = TRUE,
USE.NAMES = TRUE))
if(group.total >= num.keep.legal.group[[group]][[2]])
at.group.limit=TRUE
} # END species group loop
# Determine if this fish should be kept or released
# Release if it's below the minimum size limit, above the maximum size
# limit, at the catch limit for the stock, or at the catch limit for a group
# the stock part of
if(catch.length < length.min.legal.all[[stock]] ||
catch.length > length.max.legal.all[[stock]] ||
length(catch.info[[stock]]$kept.lengths) >=
num.keep.legal[[stock]][[wave]] || at.group.limit) {
catch.info[[stock]]$released.lengths <- c(
catch.info[[stock]]$released.length, catch.length)
catch.info[[stock]]$released.sexes <- c(
catch.info[[stock]]$released.sexes, catch.sex)
} else { # otherwise keep
catch.info[[stock]]$kept.lengths <- c(catch.info[[stock]]$kept.lengths,
catch.length)
catch.info[[stock]]$kept.sexes <- c(catch.info[[stock]]$kept.sexes,
catch.sex)
} # END keep or discard loop
} # END each fish in catch loop
return(catch.info)
}
|
#################################################################
## COMPASS-CABG:Cohort Selection ##
#################################################################
# libraries:
library(pacman)
p_load("tidyverse","Hmisc","rms","survival","haven","readxl","naniar","lubridate")
# first get all the patients that had CABG during the period 2010-01-01 to 2019-12-31.
df = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_compass.sas7bdat')
df$SURGDATE = as_date(df$SURGDATE)
df = df[df$SURGDATE < '2019-01-01', ]
summary(df$SURGDATE)
# now to start first with the exclusion criteria.
# calculate REACH bleeding risk score.
glimpse(df)
# use ICDcodes for the bleeding risk score calculation; need - CHF/PAD/HLP.
# antiplatelet agents and oral anticoagulants can be obtained from the outpat drug files.
pad9 = read_excel('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\pad_codes.xlsx', sheet = 1)
pad10 = read_excel('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\pad_codes.xlsx', sheet = 2)
# now to get a single list of codes.
pad_codes = c(pad9$icd9,pad10$icd10codes)
# now to identify using codes.
df$peri_art_d = with(df, ifelse((ICD902 %in% pad_codes|ICD903 %in% pad_codes|ICD904 %in% pad_codes|ICD905 %in% pad_codes),
1, 0))
df %>% count(peri_art_d) # this contains information regarding PAD.
# now to look at diagnosis of CHF.
chf = c('428.0','I50.2')
df$chf = with(df, ifelse((ICD902 %in% chf|ICD903 %in% chf|ICD904 %in% chf|ICD905 %in% chf), 1, 0))
df %>% count(chf)
hlp = c('272.4','E78.5')
df$hlp = with(df, ifelse((ICD902 %in% hlp|ICD903 %in% hlp|ICD904 %in% hlp|ICD905 %in% hlp), 1, 0))
df %>% count(hlp)
df %>% count(HTN) ; # change NA to 1 --- give missing % here 11/30,000
df$HTN[is.na(df$HTN)]<- 1
# NYHA class
df %>% count(FCC)
# we need to get the LVEF for all the patients using the TIU data.
# let us first identify those patients that fulfil the inclusion criteria:
# we need to remove those patients that may need anticoagulationi due to mechanical valves/ we do not know that #
# so we will remove those patients that had concomitant valve replacement as CPT02/CPT03.
v = c('33364','33427','33426','33405','33420','33411','33425','33426','33430','33460','33463','33464','33465')
df$valve = with(df, ifelse((CPT02 %in% v| CPT03 %in% v), 1, 0))
df %>% count(valve)
# 334 patients had concomitant valve procedures as CPT02/CPT03 and hence removed.
df2 = df %>% filter(valve == 0)
# remove stemi and nstemi first.
# remove stemi/nstemi patients.
# am going to remove those that are EF < 30% and also those that have NYHA class III/IV symptoms.
stemi = c('410.00',"410.10","410.20","410.30","410.40","410.40","410.50","410.60","410.70","410.80","410.90",
"I21.3")
nstemi = c("I21.4","410.70")
ami = c(stemi, nstemi)
df2$AMI = with(df2, ifelse((ICD901 %in% ami|ICD902 %in% ami), 1, 0))
df2 %>% count(AMI)
# 1725 have STEMI/NSTEMI -- removed these.
df3 = df2 %>% filter(AMI == 0)
# now to remove those with EF < 30%
df3 %>% count(LVCGRADE) # missing , so get better values from the TIU data.
summary(df3$SURGDATE)
lv = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\lvef_tiu.sas7bdat')
# also get the crosswalk
cw = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\compass_crosswalk.sas7bdat')
# now to look at the lvef and am going to limit the lvef value to within 3o days of surgery date.
glimpse(lv)
lv2 = lv %>% dplyr::select(PatientSID, ValueDateTime, High_Value, Low_Value)
# now going to get the data for only my patients using the crosswalk.
lv2$mine = with(lv2, ifelse(PatientSID %in% cw$PatientSID, 1, 0))
lv3 = lv2[lv2$mine == 1, ]
lv4 = left_join(lv3, cw, by = "PatientSID")
d_s = df3 %>% dplyr::select(scrssn, SURGDATE)
names(lv4) = tolower(names(lv4))
lv5 = left_join(lv4, d_s, by = "scrssn")
# need to limit to nonmissing surgdate
lv6 = lv5[!is.na(lv5$SURGDATE), ]
lv6$valuedatetime = as_date(lv6$valuedatetime)
summary(lv6$valuedatetime)
lv6 = lv6[!is.na(lv6$valuedatetime), ]
lv6$days = (lv6$SURGDATE %--% lv6$valuedatetime)/ddays(1)
describe(lv6$days)
# limit to within 3 months of the surgdate.
lv6$keep = with(lv6, ifelse(days < 0 & days > -90, 1, 0))
lv6 %>% count(keep)
length(unique(lv6$scrssn))
lv7 = lv6 %>% filter(keep == 1)
lv8 = lv7 %>% arrange(scrssn, -days)
lv9 = lv8[!duplicated(lv8$scrssn), ]
summary(lv9)
lv9$low = with(lv9, ifelse(low_value < 30, 1, 0))
lv9 %>% count(low)
# 2036 have LVEF < 30%
# now to get the data into main dataset df3
lv10 = lv9 %>% dplyr::select(scrssn, low_value, low)
df4 = left_join(df3, lv10 ,by = "scrssn" )
df4 %>% count(low)
df5 = df4[!is.na(df4$low), ]
# remove those with missing LVEF and then keep to only those with LVEF > 30%
# then will need to keep for NYHA class I/II
df6 = df5 %>% filter(low == 0)
# calculate the eGFR and then limit to above 15.
describe(df6$CR)
quantile(df6$CR, 0.1, na.rm = T)
quantile(df6$CR, 0.99, na.rm = T)
df6$CR[df6$CR < 0.79]<- 0.79
df6$CR[df6$CR > 6.08]<- 6.08
names(df6) = tolower(names(df6))
# race
df6$race.mod = with(df6, ifelse(race %in% c("9"), "black",
ifelse(race %in% c("B"), "white", "others")))
df6 %>% count(race.mod)
# BMI calculation
summary(df6$htin)
summary(df6$wtlbs)
# limit to 99th and 1st percentile to clean the data.
# height
quantiles = function(x){
a = quantile(x,0.99,na.rm = T)
b = quantile(x, 0.01, na.rm = T)
c = c(a,b)
c
}
quantiles(df6$htin)
df6$htin2 = with(df6, ifelse(htin < 62, 62,
ifelse(htin > 76, 76, htin)))
summary(df6$htin2)
describe(df6$wtlbs)
quantiles(df6$wtlbs)
df6$wtlbs2 = with(df6, ifelse(wtlbs < 126.86, 126.86,
ifelse(wtlbs > 317, 317, wtlbs)))
df6$htin2[is.na(df6$htin2)]<- 69.39
df6$wtlbs2[is.na(df6$wtlbs2)]<- 215.6
df6$bmi = (df6$wtlbs2/(df6$htin2)^2)*703
describe(df6$bmi)
#- going to convert the BMI into groups
#- going to get eGFR and convert eGFR also into groups.
#- sex female = 1
#- race white = 1
df6$race_n <- with(df6, ifelse(race.mod == 'white', 1, 0
))
df6 %>% count(race_n)
gfr <- function(age, scr,sex, race){
male <- age^(-0.203)*scr^(-1.154)*175
female <- age^(-0.203)*scr^(-1.154)*175*0.742
a <- ifelse(sex == 1, female , male)
b <- ifelse(race == 1, a, a*1.212)
return(b)
}
df6$egfr <- with(df6, gfr(age = age, scr = cr, sex = sex, race = race_n))
describe(df6$egfr)
quantiles(df6$egfr)
df7 = df6[df6$egfr > 15, ] # limited to only eGFR > 15.
# write the dataset as further ...
write_csv(df7,
'P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_further.csv' )
# 2020-11-06
# going to get the dataset again and go futher for cohort selection.
df = read_csv('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_further.csv')
# remove those with severe liver disease.
# am going to use ICD codes for liver disease.
liv = c("456.0","456.1","456.2","572.2","572.3","572.4",
"572.5","572.6","572.7","572.8", "I85.0","I85.9","I86.4",'I98.2',
"K70.4","K71.1","K72.1","K72.9","K76.5","K76.6","K76.7")
df$liver_d = with(df, ifelse((icd901 %in% liv|icd902 %in% liv|icd903 %in% liv|
icd904 %in% liv|icd905 %in% liv), 1, 0))
df %>% count(liver_d)
df2 = df[df$liver_d == 0, ]
# also get patients that are on DAPT prior to surgery.
# now need to obtain the information regarding patients that we on DAPT
# and OAC prior to surgery.
# other criteria is stroke within 1 month prior to surgery
# we can assume that given the need for CPB, the surgery would not be
# performed within 1 month of hemorragic stroke
# we need to identify those patients that at higher risk of bleeding.
# we can do that using codes for bleeding risk in the past 1 year prior to
# surgery.
# we need to remove patients that have chronic AF.
af = c("427.31","427.32","I48.20")
df2$chronicaf = with(df2, ifelse((icd903 %in% af|icd904 %in% af|icd905 %in% af|
icd906 %in% af|icd907 %in% af), 1, 0))
df2 %>% count(chronicaf)
# remove those patients that have chronic AF
df3 = df2[df2$chronicaf == 0, ]
# DAPT prior to surgery.
# identify those patients that had DAPT fill 6 months prior to surgery date.
# get clopidogrel fill
c = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\clopidogrelfill_c.sas7bdat')
# get the list of patientsid of my patients.
cw = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\compass_crosswalk.sas7bdat')
cw$mine = with(cw, ifelse(ScrSSN %in% df3$scrssn, 1, 0))
cw %>% count(mine)
cw2 = cw %>% filter(mine == 1)
mypat = cw2$PatientSID
c$keep = with(c, ifelse(PatientSID %in% mypat, 1, 0))
c %>% count(keep)
c2 = c %>% filter(keep == 1)
names(c2) = tolower(names(c2))
# now that we have outpat refills for clopidogrel we need to keep only those that are
# 6 months before surgdate.
cd = df3 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
c3 = left_join(c2, cd2, by = "patientsid")
c4 = c3 %>% dplyr::select(scrssn, patientsid, filldatetime, rxstatus, surgdate)
c4$surgdate = as_date(c4$surgdate)
c4$filldatetime = as_date(c4$filldatetime)
# now to keep prescription within 1 month prior to surgery.
c4$days = (c4$surgdate %--% c4$filldatetime)/ddays(1)
# 0 = active / ACTIVE = ACTIVE
summary(c4$days)
c5 = c4 %>% filter(days < 0 & days > -30)
glimpse(c5)
c6 = c5 %>% filter(rxstatus == "0"| rxstatus == "ACTIVE")
glimpse(c6)
clopidogrel = c6$scrssn
# now to flag those that have active clopidogrel therapy within 1 month of surgery.
df3$clopidogrel = with(df3, ifelse(scrssn %in% clopidogrel, 1, 0))
df3 %>% count(clopidogrel)
# now to do the same for prasugrel and ticagrelor and then warfarin.
############### PRASUGREL
p = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\prasugrelfill_c.sas7bdat')
# limit to active medication refill now
p2 = p %>% filter(RxStatus == "0"|RxStatus == "ACTIVE")
cw = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\compass_crosswalk.sas7bdat')
cw$mine = with(cw, ifelse(ScrSSN %in% df3$scrssn, 1, 0))
cw %>% count(mine)
cw2 = cw %>% filter(mine == 1)
mypat = cw2$PatientSID
p2$keep = with(p2, ifelse(PatientSID %in% mypat, 1, 0))
p2 %>% count(keep)
# no one on prasugrel before surgery.
############### TICAGRELOR
t = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\ticagrelorfill_c.sas7bdat')
t2 = t %>% filter(RxStatus == "0"|RxStatus == "ACTIVE")
t2$keep = with(t2, ifelse(PatientSID %in% mypat, 1, 0))
t2 %>% count(keep)
t3 = t2 %>% filter(keep == 1)
cd = df3 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
names(t3) = tolower(names(t3))
t4 = left_join(t3, cd2, by = "patientsid")
t5 = t4 %>% dplyr::select(scrssn, patientsid, filldatetime, rxstatus, surgdate)
t5$surgdate = as_date(t5$surgdate)
t5$filldatetime = as_date(t5$filldatetime)
# now to keep prescription within 1 month prior to surgery.
t5$days = (t5$surgdate %--% t5$filldatetime)/ddays(1)
t6 = t5 %>% filter(days < 0 & days > -30)
glimpse(t6)
tica = t6$scrssn
df3$ticagrelor = with(df3, ifelse(scrssn %in% tica, 1, 0))
df3 %>% count(ticagrelor)
# now dapt == clopidogrel or ticagrelor
df3$dapt = with(df3, ifelse((clopidogrel == 1|ticagrelor == 1), 1, 0))
df3 %>% count(dapt)
df4 = df3[df3$dapt == 0, ]
# to limit to those on Coumadin.
w = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\warfarinfill_c.sas7bdat')
w2 = w %>% filter(RxStatus == "0"|RxStatus == "ACTIVE")
w2$keep = with(w2, ifelse(PatientSID %in% mypat, 1, 0))
w2 %>% count(keep)
w3 = w2 %>% filter(keep == 1)
cd = df3 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
names(w3) = tolower(names(w3))
w4 = left_join(w3, cd2, by = "patientsid")
w5 = w4 %>% dplyr::select(scrssn, patientsid, filldatetime, rxstatus, surgdate)
w5$surgdate = as_date(w5$surgdate)
w5$filldatetime = as_date(w5$filldatetime)
w5$days = (w5$surgdate %--% w5$filldatetime)/ddays(1)
w6 = w5 %>% filter(days < 0 & days > -30)
glimpse(w6)
coum = w6$scrssn
df4$coumadin = with(df4, ifelse(scrssn %in% coum, 1, 0))
df4 %>% count(coumadin)
# remove coumadim now.
df5 = df4[df4$coumadin == 0, ]
write_csv(df5,
'P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_further2.csv' )
# high risk of bleeding defined as those with history for admission with ICD codes
# of GI bleeding or ICH within 1 year prior to surgery.
# start with GIB.
gib9 = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\gib_icd9.sas7bdat')
# limit to my patients and then limit to within 1 year prior to CABG.
gib10 = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\gib_icd10.sas7bdat')
names(gib9); names(gib10)
gib9 = gib9 %>% rename(code = ICD9SID); gib10 = gib10 %>% rename(code = ICD10SID)
gib = rbind(gib9, gib10)
# now to limit to my patients only
gib$keep = with(gib, ifelse(PatientSID %in% mypat, 1, 0))
gib2 = gib %>% filter(keep == 1)
# now to get the surgery date and limit to 1 year prior to surgery.
cd = df3 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
names(gib2) = tolower(names(gib2))
gib3 = left_join(gib2, cd2, by = "patientsid")
gib3$admitdatetime = as_date(gib3$admitdatetime) ; gib3$surgdate = as_date(gib3$surgdate)
gib3$days = (gib3$surgdate %--% gib3$admitdatetime)/ddays(1)
gib4 = gib3 %>% filter(days < 0)
gib4 = gib4$scrssn
df5$gib = with(df5, ifelse(scrssn %in% gib, 1, 0))
df5 %>% count(gib) # no one had significant admission with GIB in the past.
###### ICH
ich9 = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\ich_icd9.sas7bdat')
# limit to my patients and then limit to within 1 year prior to CABG.
ich10 = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\ich_icd10.sas7bdat')
names(gib9); names(gib10)
ich9 = ich9 %>% rename(code = ICD9SID); ich10 = ich10 %>% rename(code = ICD10SID)
ich = rbind(ich9, ich10)
# now to limit to my patients only
ich$keep = with(ich, ifelse(PatientSID %in% mypat, 1, 0))
ich2 = ich %>% filter(keep == 1)
# now to get the surgery date and limit to 1 year prior to surgery.
cd = df4 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
names(ich2) = tolower(names(ich2))
ich3 = left_join(ich2, cd2, by = "patientsid")
ich3$admitdatetime = as_date(ich3$admitdatetime) ; ich3$surgdate = as_date(ich3$surgdate)
ich3$days = (ich3$surgdate %--% ich3$admitdatetime)/ddays(1)
ich4 = ich3 %>% filter(days < 0)
ich = ich4$scrssn
df5$ich = with(df5, ifelse(scrssn %in% ich, 1, 0))
df5 %>% count(ich)
# remove these patients with significant bleeding risk.
df6 = df5[df5$ich == 0, ]
write_csv(df6,
'P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_further3.csv' )
# now with all the exclusion criteria listed, cabg_further3 is the dataset.
# now to work on the inclusion criteria from this and this would be the COMPASS eligible cohort.
# rest would be the COMPASS ineligible cohort.
|
/cohort_selection.R
|
permissive
|
svd09/CABG_COMPASS
|
R
| false | false | 15,948 |
r
|
#################################################################
## COMPASS-CABG:Cohort Selection ##
#################################################################
# libraries:
library(pacman)
p_load("tidyverse","Hmisc","rms","survival","haven","readxl","naniar","lubridate")
# first get all the patients that had CABG during the period 2010-01-01 to 2019-12-31.
df = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_compass.sas7bdat')
df$SURGDATE = as_date(df$SURGDATE)
df = df[df$SURGDATE < '2019-01-01', ]
summary(df$SURGDATE)
# now to start first with the exclusion criteria.
# calculate REACH bleeding risk score.
glimpse(df)
# use ICDcodes for the bleeding risk score calculation; need - CHF/PAD/HLP.
# antiplatelet agents and oral anticoagulants can be obtained from the outpat drug files.
pad9 = read_excel('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\pad_codes.xlsx', sheet = 1)
pad10 = read_excel('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\pad_codes.xlsx', sheet = 2)
# now to get a single list of codes.
pad_codes = c(pad9$icd9,pad10$icd10codes)
# now to identify using codes.
df$peri_art_d = with(df, ifelse((ICD902 %in% pad_codes|ICD903 %in% pad_codes|ICD904 %in% pad_codes|ICD905 %in% pad_codes),
1, 0))
df %>% count(peri_art_d) # this contains information regarding PAD.
# now to look at diagnosis of CHF.
chf = c('428.0','I50.2')
df$chf = with(df, ifelse((ICD902 %in% chf|ICD903 %in% chf|ICD904 %in% chf|ICD905 %in% chf), 1, 0))
df %>% count(chf)
hlp = c('272.4','E78.5')
df$hlp = with(df, ifelse((ICD902 %in% hlp|ICD903 %in% hlp|ICD904 %in% hlp|ICD905 %in% hlp), 1, 0))
df %>% count(hlp)
df %>% count(HTN) ; # change NA to 1 --- give missing % here 11/30,000
df$HTN[is.na(df$HTN)]<- 1
# NYHA class
df %>% count(FCC)
# we need to get the LVEF for all the patients using the TIU data.
# let us first identify those patients that fulfil the inclusion criteria:
# we need to remove those patients that may need anticoagulationi due to mechanical valves/ we do not know that #
# so we will remove those patients that had concomitant valve replacement as CPT02/CPT03.
v = c('33364','33427','33426','33405','33420','33411','33425','33426','33430','33460','33463','33464','33465')
df$valve = with(df, ifelse((CPT02 %in% v| CPT03 %in% v), 1, 0))
df %>% count(valve)
# 334 patients had concomitant valve procedures as CPT02/CPT03 and hence removed.
df2 = df %>% filter(valve == 0)
# remove stemi and nstemi first.
# remove stemi/nstemi patients.
# am going to remove those that are EF < 30% and also those that have NYHA class III/IV symptoms.
stemi = c('410.00',"410.10","410.20","410.30","410.40","410.40","410.50","410.60","410.70","410.80","410.90",
"I21.3")
nstemi = c("I21.4","410.70")
ami = c(stemi, nstemi)
df2$AMI = with(df2, ifelse((ICD901 %in% ami|ICD902 %in% ami), 1, 0))
df2 %>% count(AMI)
# 1725 have STEMI/NSTEMI -- removed these.
df3 = df2 %>% filter(AMI == 0)
# now to remove those with EF < 30%
df3 %>% count(LVCGRADE) # missing , so get better values from the TIU data.
summary(df3$SURGDATE)
lv = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\lvef_tiu.sas7bdat')
# also get the crosswalk
cw = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\compass_crosswalk.sas7bdat')
# now to look at the lvef and am going to limit the lvef value to within 3o days of surgery date.
glimpse(lv)
lv2 = lv %>% dplyr::select(PatientSID, ValueDateTime, High_Value, Low_Value)
# now going to get the data for only my patients using the crosswalk.
lv2$mine = with(lv2, ifelse(PatientSID %in% cw$PatientSID, 1, 0))
lv3 = lv2[lv2$mine == 1, ]
lv4 = left_join(lv3, cw, by = "PatientSID")
d_s = df3 %>% dplyr::select(scrssn, SURGDATE)
names(lv4) = tolower(names(lv4))
lv5 = left_join(lv4, d_s, by = "scrssn")
# need to limit to nonmissing surgdate
lv6 = lv5[!is.na(lv5$SURGDATE), ]
lv6$valuedatetime = as_date(lv6$valuedatetime)
summary(lv6$valuedatetime)
lv6 = lv6[!is.na(lv6$valuedatetime), ]
lv6$days = (lv6$SURGDATE %--% lv6$valuedatetime)/ddays(1)
describe(lv6$days)
# limit to within 3 months of the surgdate.
lv6$keep = with(lv6, ifelse(days < 0 & days > -90, 1, 0))
lv6 %>% count(keep)
length(unique(lv6$scrssn))
lv7 = lv6 %>% filter(keep == 1)
lv8 = lv7 %>% arrange(scrssn, -days)
lv9 = lv8[!duplicated(lv8$scrssn), ]
summary(lv9)
lv9$low = with(lv9, ifelse(low_value < 30, 1, 0))
lv9 %>% count(low)
# 2036 have LVEF < 30%
# now to get the data into main dataset df3
lv10 = lv9 %>% dplyr::select(scrssn, low_value, low)
df4 = left_join(df3, lv10 ,by = "scrssn" )
df4 %>% count(low)
df5 = df4[!is.na(df4$low), ]
# remove those with missing LVEF and then keep to only those with LVEF > 30%
# then will need to keep for NYHA class I/II
df6 = df5 %>% filter(low == 0)
# calculate the eGFR and then limit to above 15.
describe(df6$CR)
quantile(df6$CR, 0.1, na.rm = T)
quantile(df6$CR, 0.99, na.rm = T)
df6$CR[df6$CR < 0.79]<- 0.79
df6$CR[df6$CR > 6.08]<- 6.08
names(df6) = tolower(names(df6))
# race
df6$race.mod = with(df6, ifelse(race %in% c("9"), "black",
ifelse(race %in% c("B"), "white", "others")))
df6 %>% count(race.mod)
# BMI calculation
summary(df6$htin)
summary(df6$wtlbs)
# limit to 99th and 1st percentile to clean the data.
# height
quantiles = function(x){
a = quantile(x,0.99,na.rm = T)
b = quantile(x, 0.01, na.rm = T)
c = c(a,b)
c
}
quantiles(df6$htin)
df6$htin2 = with(df6, ifelse(htin < 62, 62,
ifelse(htin > 76, 76, htin)))
summary(df6$htin2)
describe(df6$wtlbs)
quantiles(df6$wtlbs)
df6$wtlbs2 = with(df6, ifelse(wtlbs < 126.86, 126.86,
ifelse(wtlbs > 317, 317, wtlbs)))
df6$htin2[is.na(df6$htin2)]<- 69.39
df6$wtlbs2[is.na(df6$wtlbs2)]<- 215.6
df6$bmi = (df6$wtlbs2/(df6$htin2)^2)*703
describe(df6$bmi)
#- going to convert the BMI into groups
#- going to get eGFR and convert eGFR also into groups.
#- sex female = 1
#- race white = 1
df6$race_n <- with(df6, ifelse(race.mod == 'white', 1, 0
))
df6 %>% count(race_n)
gfr <- function(age, scr,sex, race){
male <- age^(-0.203)*scr^(-1.154)*175
female <- age^(-0.203)*scr^(-1.154)*175*0.742
a <- ifelse(sex == 1, female , male)
b <- ifelse(race == 1, a, a*1.212)
return(b)
}
df6$egfr <- with(df6, gfr(age = age, scr = cr, sex = sex, race = race_n))
describe(df6$egfr)
quantiles(df6$egfr)
df7 = df6[df6$egfr > 15, ] # limited to only eGFR > 15.
# write the dataset as further ...
write_csv(df7,
'P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_further.csv' )
# 2020-11-06
# going to get the dataset again and go futher for cohort selection.
df = read_csv('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_further.csv')
# remove those with severe liver disease.
# am going to use ICD codes for liver disease.
liv = c("456.0","456.1","456.2","572.2","572.3","572.4",
"572.5","572.6","572.7","572.8", "I85.0","I85.9","I86.4",'I98.2',
"K70.4","K71.1","K72.1","K72.9","K76.5","K76.6","K76.7")
df$liver_d = with(df, ifelse((icd901 %in% liv|icd902 %in% liv|icd903 %in% liv|
icd904 %in% liv|icd905 %in% liv), 1, 0))
df %>% count(liver_d)
df2 = df[df$liver_d == 0, ]
# also get patients that are on DAPT prior to surgery.
# now need to obtain the information regarding patients that we on DAPT
# and OAC prior to surgery.
# other criteria is stroke within 1 month prior to surgery
# we can assume that given the need for CPB, the surgery would not be
# performed within 1 month of hemorragic stroke
# we need to identify those patients that at higher risk of bleeding.
# we can do that using codes for bleeding risk in the past 1 year prior to
# surgery.
# we need to remove patients that have chronic AF.
af = c("427.31","427.32","I48.20")
df2$chronicaf = with(df2, ifelse((icd903 %in% af|icd904 %in% af|icd905 %in% af|
icd906 %in% af|icd907 %in% af), 1, 0))
df2 %>% count(chronicaf)
# remove those patients that have chronic AF
df3 = df2[df2$chronicaf == 0, ]
# DAPT prior to surgery.
# identify those patients that had DAPT fill 6 months prior to surgery date.
# get clopidogrel fill
c = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\clopidogrelfill_c.sas7bdat')
# get the list of patientsid of my patients.
cw = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\compass_crosswalk.sas7bdat')
cw$mine = with(cw, ifelse(ScrSSN %in% df3$scrssn, 1, 0))
cw %>% count(mine)
cw2 = cw %>% filter(mine == 1)
mypat = cw2$PatientSID
c$keep = with(c, ifelse(PatientSID %in% mypat, 1, 0))
c %>% count(keep)
c2 = c %>% filter(keep == 1)
names(c2) = tolower(names(c2))
# now that we have outpat refills for clopidogrel we need to keep only those that are
# 6 months before surgdate.
cd = df3 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
c3 = left_join(c2, cd2, by = "patientsid")
c4 = c3 %>% dplyr::select(scrssn, patientsid, filldatetime, rxstatus, surgdate)
c4$surgdate = as_date(c4$surgdate)
c4$filldatetime = as_date(c4$filldatetime)
# now to keep prescription within 1 month prior to surgery.
c4$days = (c4$surgdate %--% c4$filldatetime)/ddays(1)
# 0 = active / ACTIVE = ACTIVE
summary(c4$days)
c5 = c4 %>% filter(days < 0 & days > -30)
glimpse(c5)
c6 = c5 %>% filter(rxstatus == "0"| rxstatus == "ACTIVE")
glimpse(c6)
clopidogrel = c6$scrssn
# now to flag those that have active clopidogrel therapy within 1 month of surgery.
df3$clopidogrel = with(df3, ifelse(scrssn %in% clopidogrel, 1, 0))
df3 %>% count(clopidogrel)
# now to do the same for prasugrel and ticagrelor and then warfarin.
############### PRASUGREL
p = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\prasugrelfill_c.sas7bdat')
# limit to active medication refill now
p2 = p %>% filter(RxStatus == "0"|RxStatus == "ACTIVE")
cw = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\compass_crosswalk.sas7bdat')
cw$mine = with(cw, ifelse(ScrSSN %in% df3$scrssn, 1, 0))
cw %>% count(mine)
cw2 = cw %>% filter(mine == 1)
mypat = cw2$PatientSID
p2$keep = with(p2, ifelse(PatientSID %in% mypat, 1, 0))
p2 %>% count(keep)
# no one on prasugrel before surgery.
############### TICAGRELOR
t = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\ticagrelorfill_c.sas7bdat')
t2 = t %>% filter(RxStatus == "0"|RxStatus == "ACTIVE")
t2$keep = with(t2, ifelse(PatientSID %in% mypat, 1, 0))
t2 %>% count(keep)
t3 = t2 %>% filter(keep == 1)
cd = df3 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
names(t3) = tolower(names(t3))
t4 = left_join(t3, cd2, by = "patientsid")
t5 = t4 %>% dplyr::select(scrssn, patientsid, filldatetime, rxstatus, surgdate)
t5$surgdate = as_date(t5$surgdate)
t5$filldatetime = as_date(t5$filldatetime)
# now to keep prescription within 1 month prior to surgery.
t5$days = (t5$surgdate %--% t5$filldatetime)/ddays(1)
t6 = t5 %>% filter(days < 0 & days > -30)
glimpse(t6)
tica = t6$scrssn
df3$ticagrelor = with(df3, ifelse(scrssn %in% tica, 1, 0))
df3 %>% count(ticagrelor)
# now dapt == clopidogrel or ticagrelor
df3$dapt = with(df3, ifelse((clopidogrel == 1|ticagrelor == 1), 1, 0))
df3 %>% count(dapt)
df4 = df3[df3$dapt == 0, ]
# to limit to those on Coumadin.
w = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\warfarinfill_c.sas7bdat')
w2 = w %>% filter(RxStatus == "0"|RxStatus == "ACTIVE")
w2$keep = with(w2, ifelse(PatientSID %in% mypat, 1, 0))
w2 %>% count(keep)
w3 = w2 %>% filter(keep == 1)
cd = df3 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
names(w3) = tolower(names(w3))
w4 = left_join(w3, cd2, by = "patientsid")
w5 = w4 %>% dplyr::select(scrssn, patientsid, filldatetime, rxstatus, surgdate)
w5$surgdate = as_date(w5$surgdate)
w5$filldatetime = as_date(w5$filldatetime)
w5$days = (w5$surgdate %--% w5$filldatetime)/ddays(1)
w6 = w5 %>% filter(days < 0 & days > -30)
glimpse(w6)
coum = w6$scrssn
df4$coumadin = with(df4, ifelse(scrssn %in% coum, 1, 0))
df4 %>% count(coumadin)
# remove coumadim now.
df5 = df4[df4$coumadin == 0, ]
write_csv(df5,
'P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_further2.csv' )
# high risk of bleeding defined as those with history for admission with ICD codes
# of GI bleeding or ICH within 1 year prior to surgery.
# start with GIB.
gib9 = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\gib_icd9.sas7bdat')
# limit to my patients and then limit to within 1 year prior to CABG.
gib10 = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\gib_icd10.sas7bdat')
names(gib9); names(gib10)
gib9 = gib9 %>% rename(code = ICD9SID); gib10 = gib10 %>% rename(code = ICD10SID)
gib = rbind(gib9, gib10)
# now to limit to my patients only
gib$keep = with(gib, ifelse(PatientSID %in% mypat, 1, 0))
gib2 = gib %>% filter(keep == 1)
# now to get the surgery date and limit to 1 year prior to surgery.
cd = df3 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
names(gib2) = tolower(names(gib2))
gib3 = left_join(gib2, cd2, by = "patientsid")
gib3$admitdatetime = as_date(gib3$admitdatetime) ; gib3$surgdate = as_date(gib3$surgdate)
gib3$days = (gib3$surgdate %--% gib3$admitdatetime)/ddays(1)
gib4 = gib3 %>% filter(days < 0)
gib4 = gib4$scrssn
df5$gib = with(df5, ifelse(scrssn %in% gib, 1, 0))
df5 %>% count(gib) # no one had significant admission with GIB in the past.
###### ICH
ich9 = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\ich_icd9.sas7bdat')
# limit to my patients and then limit to within 1 year prior to CABG.
ich10 = read_sas('P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\ich_icd10.sas7bdat')
names(gib9); names(gib10)
ich9 = ich9 %>% rename(code = ICD9SID); ich10 = ich10 %>% rename(code = ICD10SID)
ich = rbind(ich9, ich10)
# now to limit to my patients only
ich$keep = with(ich, ifelse(PatientSID %in% mypat, 1, 0))
ich2 = ich %>% filter(keep == 1)
# now to get the surgery date and limit to 1 year prior to surgery.
cd = df4 %>% dplyr::select(scrssn, surgdate)
names(cw2) = tolower(names(cw2))
cd2 = left_join(cd, cw2, by = "scrssn")
names(ich2) = tolower(names(ich2))
ich3 = left_join(ich2, cd2, by = "patientsid")
ich3$admitdatetime = as_date(ich3$admitdatetime) ; ich3$surgdate = as_date(ich3$surgdate)
ich3$days = (ich3$surgdate %--% ich3$admitdatetime)/ddays(1)
ich4 = ich3 %>% filter(days < 0)
ich = ich4$scrssn
df5$ich = with(df5, ifelse(scrssn %in% ich, 1, 0))
df5 %>% count(ich)
# remove these patients with significant bleeding risk.
df6 = df5[df5$ich == 0, ]
write_csv(df6,
'P:\\ORD_Deo_202008039D\\COMPASS_CABG\\sas_data\\cabg_further3.csv' )
# now with all the exclusion criteria listed, cabg_further3 is the dataset.
# now to work on the inclusion criteria from this and this would be the COMPASS eligible cohort.
# rest would be the COMPASS ineligible cohort.
|
#' Create a special "matrix" object that can cache its inverse.
#'
#' @param x A matrix
#'
#' @return A list containing four functions to set and get the value of the
#' matrix and to set and get the inverse of the matrix
#'
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
# Define function to set the value of the matrix. It also clears the old
# inverse from the cache
set <- function(y) {
x <<- y # Set the value
m <<- NULL # Clear the cache
}
# Define function to get the value of the matrix
get <- function() x
# Define function to set the inverse. This is only used by getinverse() when
# there is no cached inverse
setInverse <- function(inverse) m <<- inverse
# Define function to get the inverse
getInverse <- function() m
# Return a list with the above four functions
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
#' Return inverse of matrix x
#'
#' This function computes the inverse of the special "matrix" returned by
#' makeCacheMatrix above. If the inverse has already been calculated
#' (and the matrix has not changed), then the cachesolve retrieves the
#' inverse from the cache.
#'
#' @param x a special matrix created with makeCacheMatrix
#'
#' @return The inverse of the matrix x
#'
cacheSolve <- function(x) {
m <- x$getInverse() # This fetches the cached value for the inverse
if(!is.null(m)) { # If the cache was not empty, we can just return it
message("getting cached data")
return(m)
}
# The cache was empty. We need to calculate it, cache it, and then return it.
data <- x$get() # Get value of matrix
m <- solve(data) # Calculate inverse
x$setInverse(m) # Cache the result
m # Return the inverse
}
|
/cachematrix.R
|
no_license
|
dhivyab90/ProgrammingAssignment2
|
R
| false | false | 1,848 |
r
|
#' Create a special "matrix" object that can cache its inverse.
#'
#' @param x A matrix
#'
#' @return A list containing four functions to set and get the value of the
#' matrix and to set and get the inverse of the matrix
#'
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
# Define function to set the value of the matrix. It also clears the old
# inverse from the cache
set <- function(y) {
x <<- y # Set the value
m <<- NULL # Clear the cache
}
# Define function to get the value of the matrix
get <- function() x
# Define function to set the inverse. This is only used by getinverse() when
# there is no cached inverse
setInverse <- function(inverse) m <<- inverse
# Define function to get the inverse
getInverse <- function() m
# Return a list with the above four functions
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
#' Return inverse of matrix x
#'
#' This function computes the inverse of the special "matrix" returned by
#' makeCacheMatrix above. If the inverse has already been calculated
#' (and the matrix has not changed), then the cachesolve retrieves the
#' inverse from the cache.
#'
#' @param x a special matrix created with makeCacheMatrix
#'
#' @return The inverse of the matrix x
#'
cacheSolve <- function(x) {
m <- x$getInverse() # This fetches the cached value for the inverse
if(!is.null(m)) { # If the cache was not empty, we can just return it
message("getting cached data")
return(m)
}
# The cache was empty. We need to calculate it, cache it, and then return it.
data <- x$get() # Get value of matrix
m <- solve(data) # Calculate inverse
x$setInverse(m) # Cache the result
m # Return the inverse
}
|
##Load packages
# install.packages("dplyr")
library(dplyr)
library(e1071)
# install.packages("tidyr")
library(tidyr)
install.packages("recommenderlab")
library(recommenderlab)
#install.packages("Matrix")
library(Matrix)
##############################################Set the working directory
wd = "C:/Users/dkewon/Documents/RecommendationTool/Data Group Assignment"
setwd(wd)
getwd()
##############################################Read in the data
artists <- read.csv("Artists.dat", sep="\t")
tags <- read.csv("tags.dat", sep="\t")
user_artists<- read.csv("user_artists.dat", sep="\t")
user_taggedartists <- read.csv("user_taggedartists.dat", sep="\t")
#unique user_taggedartists artists/items
length(unique(user_taggedartists$artistID)) #12523
##Check unique users and artist IDs
#unique users
length(unique(user_artists$userID)) #1892
#unique artists/items
length(unique(user_artists$artistID)) #17632
##Check the distribution of the weights
summary(user_artists$weight)
hist(user_artists$weight) # histogram is right skewed
skewness(user_artists$weight)
##################################################Transform data to fix the skewness using log transformation
# subset, select only artists those appear in user_taggedartists
New_user_artists <- user_artists[user_artists$artistID %in% unique(user_taggedartists$artistID),]
New_user_artists$weight <- as.numeric(New_user_artists$weight)
New_user_artists$trans_weight<-log10(10*New_user_artists$weight)
##round of the weight values
New_user_artists <- New_user_artists %>% mutate_at(vars(trans_weight), funs(round(., 2)))
hist(New_user_artists$trans_weight)
#str(New_user_artists)
summary(New_user_artists$trans_weight)
#####################################################Convert the dataframe into a wide matrix
##Preprocess data before transforming it into a wide matrix
##Pick only userid,artistid and new transformed weights
New_user_artists <- New_user_artists[,c(1,2,4)]
#for the purpose of fast execution randomly split the dataframe before tranposing only 1000 users were picked
#New_user_artists <- New_user_artists[sample(nrow(New_user_artists), 100), ]
## transform all user id into 4 integer length
New_user_artists$userID<- sprintf("%04d",New_user_artists$userID)
##add 'u' before all userid numbers eg u0002
New_user_artists$userID <-paste0('u',New_user_artists$userID)
## transform all artist id into 5 integer length
New_user_artists$artistID<- sprintf("%05d",New_user_artists$artistID)
##add 'a' before all artistid numbers eg a00002
New_user_artists$artistID <-paste0('a',New_user_artists$artistID)
############## Use spread function to transpose the data
New_user_artists_wide <- spread(New_user_artists, key = artistID, value = trans_weight )
class(New_user_artists_wide)
#***subset the data
### This will takes about 83 mins to complete ###
#Preview the data and splitting to allow computation. Two runs were done one with 2000 artists and another with 5000 artists
New_user_artists_wide <-New_user_artists_wide[,1:2001]
New_user_artists_wide[1:10,1:10]
#convert into a matrix
New_user_artists_matrix <- data.matrix(New_user_artists_wide)
row.names(New_user_artists_matrix) <- New_user_artists_matrix[,1]
#drop first column
New_user_artists_matrix<- New_user_artists_matrix[,-1]
#add row names
row.names(New_user_artists_matrix) <- New_user_artists_wide[,1]
New_user_artists_matrix[1:10,1:10]
#######################################Computing pearson correlation function
##split the data into train and test
num_rows <- nrow(New_user_artists_matrix)
New_user_artists_matrix[is.na(New_user_artists_matrix)] <- 0
set.seed(123) # Set a seed to have the same subsets every time
# Define proportion to be in training set
p <- 0.7
# Define observations to be in training set
training_locations <- sort(sample(num_rows,floor(p*num_rows)))
train_data <- New_user_artists_matrix[training_locations,]
train_data[1:10,1:10]
test_data <- New_user_artists_matrix[-training_locations,]
test_data[1:10,1:10]
dim(test_data)
#rownames(train_data)
#rownames(test_data)
##define your number of recommendations N,nearest neighbour NN and OnlyNew (recommend only new stuff)
NN = 3
N = 10
onlyNew=TRUE
library(proxy)
ItemBasedCF <- function(train_data, test_data, N, NN, onlyNew=TRUE){
similarity_matrix = matrix(, ncol=ncol(train_data), nrow=ncol(train_data), dimnames = list(colnames(train_data), colnames(train_data)))
rowmeans = rowMeans(train_data)
ptm <- proc.time()
for (i in colnames(train_data)){
for (j in colnames(train_data)){
r_ui <- train_data[,i]
r_uj <- train_data[,j]
sim <- sum((r_ui- rowmeans)*(r_uj - rowmeans), na.rm=TRUE)/(sqrt(sum((r_ui-rowmeans)^2)) * sum((r_uj -rowmeans)^2))
similarity_matrix[i, j] <- sim
}
Time <- (proc.time() - ptm)
print(i)
print(Time)
}
print("Similarity calculation done")
# Nearest Neighbor
similarity_matrix_NN <- similarity_matrix
for (k in 1:ncol(similarity_matrix_NN)){
crit_val <- -sort(-similarity_matrix_NN[,k])[NN]
similarity_matrix_NN[,k] <- ifelse(similarity_matrix_NN[,k] >= crit_val, similarity_matrix_NN[,k], NA)
}
similarity_matrix_NN[is.na(similarity_matrix_NN)] <- 0
train_data[is.na(train_data)] <- 0
test_data2 <- test_data
test_data2[is.na(test_data2)] <- 0
print("Nearest neighbor selection done")
### Prediction ###
prediction <- matrix(, nrow=nrow(test_data), ncol=ncol(test_data),
dimnames=list(rownames(test_data), colnames(test_data)))
prediction2 <- matrix(, nrow=nrow(test_data), ncol(test_data),
dimnames=list(rownames(test_data), colnames(test_data)))
TopN <- matrix(, nrow=nrow(test_data), N, dimnames=list(rownames(test_data)))
for (u in rownames(test_data)){
# Numerator
Num <- test_data2[u, ] %*% similarity_matrix_NN
# Denominator
Denom <- colSums(similarity_matrix_NN, na.rm=TRUE)
# Prediction
prediction[u, ] <- Num/Denom
if (onlyNew == TRUE){
unseen <- names(test_data[u, test_data[u,]==0])
prediction2[u, ] <- ifelse(colnames(prediction) %in% unseen, prediction[u, ], NA)
}else{
prediction2[u, ] <- prediction[u, ]
}
TopN[u, ] <- names(-sort(-prediction2[u, ])[1:N])
}
print("Prediction done")
res <- list(prediction, TopN)
names(res) <- c('prediction', 'topN')
return(res)
}
######Check for results using the function
ResultsIBCF <- ItemBasedCF(train_data, test_data, N = 3, NN= 10, onlyNew=TRUE) # onlyNew = TRUE
prediction <- as.data.frame(ResultsIBCF$prediction)
# write.csv(prediction,'prediction_IBCF.csv')
prediction_IBCF <-prediction
library(data.table)
# prediction_IBCF <-fread('prediction_IBCF.csv', header = T, sep = ',')
saveRDS(prediction_IBCF, file = "prediction_IBCF.rds")
(-sort(prediction[1,]))[1:10]
TopNIBCF <- as.data.frame(ResultsIBCF$topN)
write.csv(TopNIBCF,'TopNIBCF2.csv')
####################################################################################################
########MAE#########
MAE <- function(prediction, real){
if (nrow(prediction) == nrow(real) & ncol(prediction) == ncol(real)){
MAE = (sum( abs(prediction - real), na.rm = TRUE ) / (nrow(prediction) * ncol(prediction)) )
return(MAE)
}else{
return("MAE is done")
}
}
UBCF_MAE <- MAE(prediction, test_data)
# UBCF_MAE = 0.06446814 when 2000 items are picked
# UBCF_MAE = 0.03257037 when 5000 items are picked
########## Recall/Precision ##########
Classification <- function(prediction, real, threshold=NA, TopN=NA){
if (nrow(prediction) == nrow(real) & ncol(prediction) == ncol(real)){
# Threshold #
if (!is.na(threshold)){
TP = sum(ifelse(prediction >= threshold & real >= threshold, 1, 0), na.rm=T)
FP = sum(ifelse(prediction >= threshold & real < threshold, 1, 0), na.rm=T)
FN = sum(ifelse(prediction < threshold & real >= threshold, 1, 0), na.rm=T)
Recall = TP/(TP+FN)
Precision = TP/(TP+FP)
Class_Thres = list(Recall, Precision)
names(Class_Thres) = c("Recall", "Precision")
}
if (!is.na(TopN)){
TP = vector(, length = nrow(prediction))
FP = vector(, length = nrow(prediction))
FN = vector(, length = nrow(prediction))
for (i in nrow(prediction)){
threshold_pred = -sort(-prediction[i, ])[TopN]
threshold_real = -sort(-real[i, ])[TopN]
TP[i] = sum(ifelse(prediction[i, ] >= threshold_pred & real[i, ] >= threshold_real, 1, 0), na.rm=T)
FP[i] = sum(ifelse(prediction[i, ] >= threshold_pred & real[i, ] < threshold_real, 1, 0), na.rm=T)
FN[i] = sum(ifelse(prediction[i, ] < threshold_pred & real[i, ] >= threshold_real, 1, 0), na.rm=T)
}
TP = sum(TP[i])
FP = sum(FP[i])
FN = sum(FN[i])
Recall = TP/(TP+FN)
Precision = TP/(TP+FP)
Class_TopN = list(Recall, Precision)
names(Class_TopN) = c("Recall", "Precision")
}
if (!is.na(threshold) & !is.na(TopN)){
Class = list(Class_Thres, Class_TopN)
names(Class) = c("Threshold", "TopN")
}else if (!is.na(threshold) & is.na(TopN)) {
Class = Class_Thres
}else if (is.na(threshold) & !is.na(TopN)) {
Class = Class_TopN
}else{
Class = "You have to specify the 'Threshold' or 'TopN' parameter!"
}
return(Class)
}else{
return("Dimension of prediction are not equal to dimension of real")
}
}
x <- Classification(prediction, test_data, threshold=2)
print(x)
recall <- x$Recall
#Recall 0.3493662 when 2000 artists are picked
#Recall 0.2747396 when 5000 artists are picked
precision <- x$Precision
#precision 0.7350689 when 2000 artists are picked
#precision 0.7149849 when 5000 artists are picked
########## F1 Score ##########
UBCF_F1Score <- 2*((precision*recall)/(precision+recall))
print(UBCF_F1Score)
# UBCF_F1Score = 0.4736258 when 2000 artists are picked
# UBCF_F1Score = 0.3969482 when 5000 artists are picked
|
/script/2_IBCF_Prediction.R
|
no_license
|
dkewon/Last-FM-Recommendation-System
|
R
| false | false | 10,890 |
r
|
##Load packages
# install.packages("dplyr")
library(dplyr)
library(e1071)
# install.packages("tidyr")
library(tidyr)
install.packages("recommenderlab")
library(recommenderlab)
#install.packages("Matrix")
library(Matrix)
##############################################Set the working directory
wd = "C:/Users/dkewon/Documents/RecommendationTool/Data Group Assignment"
setwd(wd)
getwd()
##############################################Read in the data
artists <- read.csv("Artists.dat", sep="\t")
tags <- read.csv("tags.dat", sep="\t")
user_artists<- read.csv("user_artists.dat", sep="\t")
user_taggedartists <- read.csv("user_taggedartists.dat", sep="\t")
#unique user_taggedartists artists/items
length(unique(user_taggedartists$artistID)) #12523
##Check unique users and artist IDs
#unique users
length(unique(user_artists$userID)) #1892
#unique artists/items
length(unique(user_artists$artistID)) #17632
##Check the distribution of the weights
summary(user_artists$weight)
hist(user_artists$weight) # histogram is right skewed
skewness(user_artists$weight)
##################################################Transform data to fix the skewness using log transformation
# subset, select only artists those appear in user_taggedartists
New_user_artists <- user_artists[user_artists$artistID %in% unique(user_taggedartists$artistID),]
New_user_artists$weight <- as.numeric(New_user_artists$weight)
New_user_artists$trans_weight<-log10(10*New_user_artists$weight)
##round of the weight values
New_user_artists <- New_user_artists %>% mutate_at(vars(trans_weight), funs(round(., 2)))
hist(New_user_artists$trans_weight)
#str(New_user_artists)
summary(New_user_artists$trans_weight)
#####################################################Convert the dataframe into a wide matrix
##Preprocess data before transforming it into a wide matrix
##Pick only userid,artistid and new transformed weights
New_user_artists <- New_user_artists[,c(1,2,4)]
#for the purpose of fast execution randomly split the dataframe before tranposing only 1000 users were picked
#New_user_artists <- New_user_artists[sample(nrow(New_user_artists), 100), ]
## transform all user id into 4 integer length
New_user_artists$userID<- sprintf("%04d",New_user_artists$userID)
##add 'u' before all userid numbers eg u0002
New_user_artists$userID <-paste0('u',New_user_artists$userID)
## transform all artist id into 5 integer length
New_user_artists$artistID<- sprintf("%05d",New_user_artists$artistID)
##add 'a' before all artistid numbers eg a00002
New_user_artists$artistID <-paste0('a',New_user_artists$artistID)
############## Use spread function to transpose the data
New_user_artists_wide <- spread(New_user_artists, key = artistID, value = trans_weight )
class(New_user_artists_wide)
#***subset the data
### This will takes about 83 mins to complete ###
#Preview the data and splitting to allow computation. Two runs were done one with 2000 artists and another with 5000 artists
New_user_artists_wide <-New_user_artists_wide[,1:2001]
New_user_artists_wide[1:10,1:10]
#convert into a matrix
New_user_artists_matrix <- data.matrix(New_user_artists_wide)
row.names(New_user_artists_matrix) <- New_user_artists_matrix[,1]
#drop first column
New_user_artists_matrix<- New_user_artists_matrix[,-1]
#add row names
row.names(New_user_artists_matrix) <- New_user_artists_wide[,1]
New_user_artists_matrix[1:10,1:10]
#######################################Computing pearson correlation function
##split the data into train and test
num_rows <- nrow(New_user_artists_matrix)
New_user_artists_matrix[is.na(New_user_artists_matrix)] <- 0
set.seed(123) # Set a seed to have the same subsets every time
# Define proportion to be in training set
p <- 0.7
# Define observations to be in training set
training_locations <- sort(sample(num_rows,floor(p*num_rows)))
train_data <- New_user_artists_matrix[training_locations,]
train_data[1:10,1:10]
test_data <- New_user_artists_matrix[-training_locations,]
test_data[1:10,1:10]
dim(test_data)
#rownames(train_data)
#rownames(test_data)
##define your number of recommendations N,nearest neighbour NN and OnlyNew (recommend only new stuff)
NN = 3
N = 10
onlyNew=TRUE
library(proxy)
ItemBasedCF <- function(train_data, test_data, N, NN, onlyNew=TRUE){
similarity_matrix = matrix(, ncol=ncol(train_data), nrow=ncol(train_data), dimnames = list(colnames(train_data), colnames(train_data)))
rowmeans = rowMeans(train_data)
ptm <- proc.time()
for (i in colnames(train_data)){
for (j in colnames(train_data)){
r_ui <- train_data[,i]
r_uj <- train_data[,j]
sim <- sum((r_ui- rowmeans)*(r_uj - rowmeans), na.rm=TRUE)/(sqrt(sum((r_ui-rowmeans)^2)) * sum((r_uj -rowmeans)^2))
similarity_matrix[i, j] <- sim
}
Time <- (proc.time() - ptm)
print(i)
print(Time)
}
print("Similarity calculation done")
# Nearest Neighbor
similarity_matrix_NN <- similarity_matrix
for (k in 1:ncol(similarity_matrix_NN)){
crit_val <- -sort(-similarity_matrix_NN[,k])[NN]
similarity_matrix_NN[,k] <- ifelse(similarity_matrix_NN[,k] >= crit_val, similarity_matrix_NN[,k], NA)
}
similarity_matrix_NN[is.na(similarity_matrix_NN)] <- 0
train_data[is.na(train_data)] <- 0
test_data2 <- test_data
test_data2[is.na(test_data2)] <- 0
print("Nearest neighbor selection done")
### Prediction ###
prediction <- matrix(, nrow=nrow(test_data), ncol=ncol(test_data),
dimnames=list(rownames(test_data), colnames(test_data)))
prediction2 <- matrix(, nrow=nrow(test_data), ncol(test_data),
dimnames=list(rownames(test_data), colnames(test_data)))
TopN <- matrix(, nrow=nrow(test_data), N, dimnames=list(rownames(test_data)))
for (u in rownames(test_data)){
# Numerator
Num <- test_data2[u, ] %*% similarity_matrix_NN
# Denominator
Denom <- colSums(similarity_matrix_NN, na.rm=TRUE)
# Prediction
prediction[u, ] <- Num/Denom
if (onlyNew == TRUE){
unseen <- names(test_data[u, test_data[u,]==0])
prediction2[u, ] <- ifelse(colnames(prediction) %in% unseen, prediction[u, ], NA)
}else{
prediction2[u, ] <- prediction[u, ]
}
TopN[u, ] <- names(-sort(-prediction2[u, ])[1:N])
}
print("Prediction done")
res <- list(prediction, TopN)
names(res) <- c('prediction', 'topN')
return(res)
}
######Check for results using the function
ResultsIBCF <- ItemBasedCF(train_data, test_data, N = 3, NN= 10, onlyNew=TRUE) # onlyNew = TRUE
prediction <- as.data.frame(ResultsIBCF$prediction)
# write.csv(prediction,'prediction_IBCF.csv')
prediction_IBCF <-prediction
library(data.table)
# prediction_IBCF <-fread('prediction_IBCF.csv', header = T, sep = ',')
saveRDS(prediction_IBCF, file = "prediction_IBCF.rds")
(-sort(prediction[1,]))[1:10]
TopNIBCF <- as.data.frame(ResultsIBCF$topN)
write.csv(TopNIBCF,'TopNIBCF2.csv')
####################################################################################################
########MAE#########
MAE <- function(prediction, real){
if (nrow(prediction) == nrow(real) & ncol(prediction) == ncol(real)){
MAE = (sum( abs(prediction - real), na.rm = TRUE ) / (nrow(prediction) * ncol(prediction)) )
return(MAE)
}else{
return("MAE is done")
}
}
UBCF_MAE <- MAE(prediction, test_data)
# UBCF_MAE = 0.06446814 when 2000 items are picked
# UBCF_MAE = 0.03257037 when 5000 items are picked
########## Recall/Precision ##########
Classification <- function(prediction, real, threshold=NA, TopN=NA){
if (nrow(prediction) == nrow(real) & ncol(prediction) == ncol(real)){
# Threshold #
if (!is.na(threshold)){
TP = sum(ifelse(prediction >= threshold & real >= threshold, 1, 0), na.rm=T)
FP = sum(ifelse(prediction >= threshold & real < threshold, 1, 0), na.rm=T)
FN = sum(ifelse(prediction < threshold & real >= threshold, 1, 0), na.rm=T)
Recall = TP/(TP+FN)
Precision = TP/(TP+FP)
Class_Thres = list(Recall, Precision)
names(Class_Thres) = c("Recall", "Precision")
}
if (!is.na(TopN)){
TP = vector(, length = nrow(prediction))
FP = vector(, length = nrow(prediction))
FN = vector(, length = nrow(prediction))
for (i in nrow(prediction)){
threshold_pred = -sort(-prediction[i, ])[TopN]
threshold_real = -sort(-real[i, ])[TopN]
TP[i] = sum(ifelse(prediction[i, ] >= threshold_pred & real[i, ] >= threshold_real, 1, 0), na.rm=T)
FP[i] = sum(ifelse(prediction[i, ] >= threshold_pred & real[i, ] < threshold_real, 1, 0), na.rm=T)
FN[i] = sum(ifelse(prediction[i, ] < threshold_pred & real[i, ] >= threshold_real, 1, 0), na.rm=T)
}
TP = sum(TP[i])
FP = sum(FP[i])
FN = sum(FN[i])
Recall = TP/(TP+FN)
Precision = TP/(TP+FP)
Class_TopN = list(Recall, Precision)
names(Class_TopN) = c("Recall", "Precision")
}
if (!is.na(threshold) & !is.na(TopN)){
Class = list(Class_Thres, Class_TopN)
names(Class) = c("Threshold", "TopN")
}else if (!is.na(threshold) & is.na(TopN)) {
Class = Class_Thres
}else if (is.na(threshold) & !is.na(TopN)) {
Class = Class_TopN
}else{
Class = "You have to specify the 'Threshold' or 'TopN' parameter!"
}
return(Class)
}else{
return("Dimension of prediction are not equal to dimension of real")
}
}
x <- Classification(prediction, test_data, threshold=2)
print(x)
recall <- x$Recall
#Recall 0.3493662 when 2000 artists are picked
#Recall 0.2747396 when 5000 artists are picked
precision <- x$Precision
#precision 0.7350689 when 2000 artists are picked
#precision 0.7149849 when 5000 artists are picked
########## F1 Score ##########
UBCF_F1Score <- 2*((precision*recall)/(precision+recall))
print(UBCF_F1Score)
# UBCF_F1Score = 0.4736258 when 2000 artists are picked
# UBCF_F1Score = 0.3969482 when 5000 artists are picked
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.