content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
## 1. [Function]
## Show the invese metrix of any square invertible matrix
## 2. [How to use]
## **ma = any square invertible matrix**
## First use:
## a <- makeCacheMatrix(ma) cacheSolve(a)
## cacheSolve(a)
## Ensuing use:
## a@set(ma)
## cacheSolve(a)
## 3. [Basic Principle]
## a) Such a design is used to minimize computation power consumption
## b) The first function (makeCacheMatrix) creates a "matrix" object
## that can cache its inverse.
## c) The second function (cacheSolve) first retrieve the inversed
## matrix saved in the first function, if there is any. Then it will
## calculate the invese matrix in the case when there is no saved
## matrix available
## 4. [Credit]
## The majority of programing idea is borrowed from Roger D. Peng,
## PhD, Associate Professor @ Biostatistics @ Bloomberg School of
## Public Health @ Johns Hopkins University.
## (https://github.com/rdpeng/ProgrammingAssignment2)
## Any inquiries? Contact my @ https://github.com/nbnb1
## makeCacheMatrix creates a "matrix" object that can cache its
## inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinvese <- function(solve) i <<- solve
getinvese <- function() i
list(set = set, get = get,
setinvese = setinvese,
getinvese = getinvese)
}
## cacheSolve 1) first retrieve the inversed matrix saved in the
## first function, if there is any, or 2) in the case when there
## is no saved matrix availableThen it will calculate the invese
## matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinvese()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinvese(i)
i
}
|
/cachematrix.R
|
no_license
|
runchengxie/ProgrammingAssignment2
|
R
| false | false | 1,872 |
r
|
## 1. [Function]
## Show the invese metrix of any square invertible matrix
## 2. [How to use]
## **ma = any square invertible matrix**
## First use:
## a <- makeCacheMatrix(ma) cacheSolve(a)
## cacheSolve(a)
## Ensuing use:
## a@set(ma)
## cacheSolve(a)
## 3. [Basic Principle]
## a) Such a design is used to minimize computation power consumption
## b) The first function (makeCacheMatrix) creates a "matrix" object
## that can cache its inverse.
## c) The second function (cacheSolve) first retrieve the inversed
## matrix saved in the first function, if there is any. Then it will
## calculate the invese matrix in the case when there is no saved
## matrix available
## 4. [Credit]
## The majority of programing idea is borrowed from Roger D. Peng,
## PhD, Associate Professor @ Biostatistics @ Bloomberg School of
## Public Health @ Johns Hopkins University.
## (https://github.com/rdpeng/ProgrammingAssignment2)
## Any inquiries? Contact my @ https://github.com/nbnb1
## makeCacheMatrix creates a "matrix" object that can cache its
## inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinvese <- function(solve) i <<- solve
getinvese <- function() i
list(set = set, get = get,
setinvese = setinvese,
getinvese = getinvese)
}
## cacheSolve 1) first retrieve the inversed matrix saved in the
## first function, if there is any, or 2) in the case when there
## is no saved matrix availableThen it will calculate the invese
## matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinvese()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinvese(i)
i
}
|
# Function to create a raster stack using subset MODIS data
# Description: Automatically creates the extent of the selected flux towers and returns a georefernced raster stack.
# parameter (type, description)
# df (data.frame, Takes as input the data frame that is resulted from ModisSubset function)
# site (Character, It can be one of the flux towers from around the world)
modisRaster <-
function(df, site)
{
Sites_info <- read.csv("data/MODIS_Subset_Sites_Information.csv")
pos <- which(Sites_info$Site_Name == site)
# create the extent, (xmin, xmax, ymin, ymax), by looking at modis subset info data frame. Further informations can be found in: http://daac.ornl.gov/cgi-bin/MODIS/GR_col5_1/mod_viz.html
extent <- extent(Sites_info$NW_Longitude_edge[pos], Sites_info$SE_Longitude_edge[pos], Sites_info$SE_Latitude_edge[pos], Sites_info$NW_Latitude_edge[pos])
## the site coordinates are projected in lat/long
latlong <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
## convert these to sinusoidal projection
modissin <-
"+proj=sinu +lon_0=0 +x_0=0 +y_0=0+a=6371007.181 +b=6371007.181+units=m +no_defs"
# convert the df to rasters by using the df2raster function
rasters <- apply(X=df, 1, FUN=df2raster)
# create the stack of the rasters
stack <- stack(rasters)
# project the stack
projection(stack)<-latlong
# use the extent in the raster layer
stack <- setExtent(stack, extent, keepres=FALSE, snap=FALSE)
return(stack)
}
|
/R/modisRaster.R
|
no_license
|
tziol001/Project-Geoscripting
|
R
| false | false | 1,574 |
r
|
# Function to create a raster stack using subset MODIS data
# Description: Automatically creates the extent of the selected flux towers and returns a georefernced raster stack.
# parameter (type, description)
# df (data.frame, Takes as input the data frame that is resulted from ModisSubset function)
# site (Character, It can be one of the flux towers from around the world)
modisRaster <-
function(df, site)
{
Sites_info <- read.csv("data/MODIS_Subset_Sites_Information.csv")
pos <- which(Sites_info$Site_Name == site)
# create the extent, (xmin, xmax, ymin, ymax), by looking at modis subset info data frame. Further informations can be found in: http://daac.ornl.gov/cgi-bin/MODIS/GR_col5_1/mod_viz.html
extent <- extent(Sites_info$NW_Longitude_edge[pos], Sites_info$SE_Longitude_edge[pos], Sites_info$SE_Latitude_edge[pos], Sites_info$NW_Latitude_edge[pos])
## the site coordinates are projected in lat/long
latlong <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
## convert these to sinusoidal projection
modissin <-
"+proj=sinu +lon_0=0 +x_0=0 +y_0=0+a=6371007.181 +b=6371007.181+units=m +no_defs"
# convert the df to rasters by using the df2raster function
rasters <- apply(X=df, 1, FUN=df2raster)
# create the stack of the rasters
stack <- stack(rasters)
# project the stack
projection(stack)<-latlong
# use the extent in the raster layer
stack <- setExtent(stack, extent, keepres=FALSE, snap=FALSE)
return(stack)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{readme}
\alias{readme}
\title{Readme HTML maker and Viewer}
\usage{
readme(tt)
}
\arguments{
\item{tt}{tt_data object for printing}
}
\value{
Does not return anything. Used to show readme of the downloaded
tidytuesday dataset in the Viewer.
}
\description{
Readme HTML maker and Viewer
}
\examples{
\donttest{
tt_output <- tt_load_gh("2019-01-15")
readme(tt_output)
}
}
|
/man/readme.Rd
|
permissive
|
thecodemasterk/tidytuesdayR
|
R
| false | true | 463 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{readme}
\alias{readme}
\title{Readme HTML maker and Viewer}
\usage{
readme(tt)
}
\arguments{
\item{tt}{tt_data object for printing}
}
\value{
Does not return anything. Used to show readme of the downloaded
tidytuesday dataset in the Viewer.
}
\description{
Readme HTML maker and Viewer
}
\examples{
\donttest{
tt_output <- tt_load_gh("2019-01-15")
readme(tt_output)
}
}
|
load("~/Box Sync/Information_Content_in_Probes/Code/Shape-Seq_data_analysis/shape2.coveragelist.RData")
source('~/Box Sync/Information_Content_in_Probes/Code/Shape-Seq_data_analysis/shape.seq.tools.R')
source('~/Box Sync/Information_Content_in_Probes/Code/shape-seq QC/SNR_syncable//shapeseq.count.info.R')
rna.range <- reactivity.quicksplit(react.1101, 1)
react.1101 <- reactivitylooper.shape2list(shape2.coveragelist, 1, 1, 0, 1)
count.summary.stats <- count.info(shape2.coveragelist)
bootstrap.mean.var <- read.csv('~/Box Sync/Information_Content_in_Probes/Code/shape-seq QC/mean variance/diff.MR_28_zeroed.csv')
snr.generator <- function (react, rna.range, snr.type.flag) {
rnas <- 8
output <- numeric()
total.mean <- numeric()
total.sd <- numeric()
for (i in 1:rnas) {
sub.react.df <- replicate.subsetter(react, rna.range, i)
rep1 <- sub.react.df$rep1.react
rep2 <- sub.react.df$rep2.react
rep3 <- sub.react.df$rep3.react
# get mean or median SNR for 3 replicates
if (snr.type.flag == 1) {
snr.3rep <- apply(data.frame(rep1, rep2, rep3), 1, mean) /
apply(data.frame(rep1, rep2, rep3), 1, sd)
output <- c(output, (mean(snr.3rep, na.rm = T)))
}
# get pairwise SNR mean or median (12, 13, 23)
if (snr.type.flag == 2) {
snr.12 <- apply(data.frame(rep1, rep2), 1, mean) /
apply(data.frame(rep1, rep2), 1, sd)
snr.13 <- apply(data.frame(rep1, rep3), 1, mean) /
apply(data.frame(rep1, rep3), 1, sd)
snr.23 <- apply(data.frame(rep2, rep3), 1, mean) /
apply(data.frame(rep2, rep3), 1, sd)
output <- c(output, (c(mean(snr.12, na.rm = T), mean(snr.13, na.rm = T),
mean(snr.23, na.rm = T))))
}
# get residue SNR for 3 replicates
if (snr.type.flag == 3) {
rep.mean <- apply(data.frame(rep1, rep2, rep3), 1, mean)
rep.sd <- apply(data.frame(rep1, rep2, rep3), 1, sd)
snr.3rep <- rep.mean / rep.sd
output <- c(output, snr.3rep)
total.mean <- c(total.mean, rep.mean)
total.sd <- c(total.sd, rep.sd)
}
}
if (snr.type.flag == 3) {
plot(total.mean, total.sd, log = 'xy')
lines(c(1e-5, 1e1), c(1e-5, 1e1), col = 'red')
hist(total.mean / total.sd, breaks = 100)
hist(log(total.mean) / log(total.sd), breaks = 100)
return(data.frame(total.mean, total.sd))
}
return(output)
}
pairwise.correlation.test <- function(react, rna.range, this.cor) {
rnas <- 8
output <- numeric()
for (i in 1:rnas) {
sub.react.df <- replicate.subsetter(react, rna.range, i)
rep1 <- sub.react.df$rep1.react
rep2 <- sub.react.df$rep2.react
rep3 <- sub.react.df$rep3.react
cor.12 <- cor(rep1, rep2, method = this.cor)
cor.13 <- cor(rep1, rep3, method = this.cor)
cor.23 <- cor(rep2, rep3, method = this.cor)
output <- c(output, cor.12, cor.13, cor.23)
}
return(output)
}
count.data.parser <- function(count.data, pairwise.flag, snr.flag) {
#pairwise flag either processes all 3 replicates as one, or pairwise 12, 23, 13
#snr.flag allows for snr of the data values to be generated, rather than the absolute value
rnas <- 8
summary.mean <- data.frame(minus.coverage.rate = numeric(), plus.coverage.rate = numeric(),
total.minus.coverage = numeric(),total.plus.coverage = numeric(),
rate.ratio = numeric(), total.ratio = numeric(), hit.rate = numeric())
summary.sd <- data.frame(minus.coverage.rate = numeric(), plus.coverage.rate = numeric(),
total.minus.coverage = numeric(),total.plus.coverage = numeric(),
rate.ratio = numeric(), total.ratio = numeric(), hit.rate = numeric())
for (i in 1:rnas) {
begin <- (i - 1) * 3 + 1
middle <- (i - 1) * 3 + 2
finish <- (i - 1) * 3 + 3
if (pairwise.flag == 0) {
sub.mean <- apply(count.data[begin:finish, ], 2, mean)
sub.sd <- apply(count.data[begin:finish, ], 2, sd)
summary.mean[i, ] <- sub.mean
summary.sd[i, ] <- sub.sd
}
if (pairwise.flag == 1) {
sub.mean.12 <- apply(count.data[begin:middle, ], 2, mean)
sub.mean.13 <- apply(count.data[c(begin, finish), ], 2, mean)
sub.mean.23 <- apply(count.data[middle:finish, ], 2, mean)
sub.sd.12 <- apply(count.data[begin:middle, ], 2, sd)
sub.sd.13 <- apply(count.data[c(begin, finish), ], 2, sd)
sub.sd.23 <- apply(count.data[middle:finish, ], 2, sd)
summary.mean[begin, ] <- sub.mean.12
summary.mean[middle, ] <- sub.mean.13
summary.mean[finish, ] <- sub.mean.23
summary.sd[begin, ] <- sub.sd.12
summary.sd[middle, ] <- sub.sd.13
summary.sd[finish, ] <- sub.sd.23
}
}
if (snr.flag == 0) {return(list(summary.mean, summary.sd))}
if (snr.flag == 1) {return(summary.mean / summary.sd)}
}
replicate.subsetter <- function(react, rna.range, i) {
rep1 <- (i - 1) * 3 + 1
rep2 <- (i - 1) * 3 + 2
rep3 <- (i - 1) * 3 + 3
rep1.range <- rna.range[rep1, ]
rep2.range <- rna.range[rep2, ]
rep3.range <- rna.range[rep3, ]
rep1.react <- react$reactivity[rep1.range[1]:rep1.range[2]]
rep2.react <- react$reactivity[rep2.range[1]:rep2.range[2]]
rep3.react <- react$reactivity[rep3.range[1]:rep3.range[2]]
return(data.frame(rep1.react, rep2.react, rep3.react))
}
snr.bootrep <- function(boot.means, rna.range) {
# find the mean SNR per residue for each RNA
output <- numeric()
for (i in 1:24) {
sub.mean <- boot.means$reactivity[rna.range[i, 1]:rna.range[i, 2]]
sub.sd <- sqrt(boot.means$react.var[rna.range[i, 1]:rna.range[i, 2]])
output <- c(output, mean(sub.mean / sub.sd, na.rm = T))
#output <- c(output, mean(sub.mean, na.rm = T))
}
return(output)
}
|
/Analylsis_Tools/SHAPE_Seq/SNR.QC.R
|
no_license
|
MingleiYang/SPEQC
|
R
| false | false | 5,924 |
r
|
load("~/Box Sync/Information_Content_in_Probes/Code/Shape-Seq_data_analysis/shape2.coveragelist.RData")
source('~/Box Sync/Information_Content_in_Probes/Code/Shape-Seq_data_analysis/shape.seq.tools.R')
source('~/Box Sync/Information_Content_in_Probes/Code/shape-seq QC/SNR_syncable//shapeseq.count.info.R')
rna.range <- reactivity.quicksplit(react.1101, 1)
react.1101 <- reactivitylooper.shape2list(shape2.coveragelist, 1, 1, 0, 1)
count.summary.stats <- count.info(shape2.coveragelist)
bootstrap.mean.var <- read.csv('~/Box Sync/Information_Content_in_Probes/Code/shape-seq QC/mean variance/diff.MR_28_zeroed.csv')
snr.generator <- function (react, rna.range, snr.type.flag) {
rnas <- 8
output <- numeric()
total.mean <- numeric()
total.sd <- numeric()
for (i in 1:rnas) {
sub.react.df <- replicate.subsetter(react, rna.range, i)
rep1 <- sub.react.df$rep1.react
rep2 <- sub.react.df$rep2.react
rep3 <- sub.react.df$rep3.react
# get mean or median SNR for 3 replicates
if (snr.type.flag == 1) {
snr.3rep <- apply(data.frame(rep1, rep2, rep3), 1, mean) /
apply(data.frame(rep1, rep2, rep3), 1, sd)
output <- c(output, (mean(snr.3rep, na.rm = T)))
}
# get pairwise SNR mean or median (12, 13, 23)
if (snr.type.flag == 2) {
snr.12 <- apply(data.frame(rep1, rep2), 1, mean) /
apply(data.frame(rep1, rep2), 1, sd)
snr.13 <- apply(data.frame(rep1, rep3), 1, mean) /
apply(data.frame(rep1, rep3), 1, sd)
snr.23 <- apply(data.frame(rep2, rep3), 1, mean) /
apply(data.frame(rep2, rep3), 1, sd)
output <- c(output, (c(mean(snr.12, na.rm = T), mean(snr.13, na.rm = T),
mean(snr.23, na.rm = T))))
}
# get residue SNR for 3 replicates
if (snr.type.flag == 3) {
rep.mean <- apply(data.frame(rep1, rep2, rep3), 1, mean)
rep.sd <- apply(data.frame(rep1, rep2, rep3), 1, sd)
snr.3rep <- rep.mean / rep.sd
output <- c(output, snr.3rep)
total.mean <- c(total.mean, rep.mean)
total.sd <- c(total.sd, rep.sd)
}
}
if (snr.type.flag == 3) {
plot(total.mean, total.sd, log = 'xy')
lines(c(1e-5, 1e1), c(1e-5, 1e1), col = 'red')
hist(total.mean / total.sd, breaks = 100)
hist(log(total.mean) / log(total.sd), breaks = 100)
return(data.frame(total.mean, total.sd))
}
return(output)
}
pairwise.correlation.test <- function(react, rna.range, this.cor) {
rnas <- 8
output <- numeric()
for (i in 1:rnas) {
sub.react.df <- replicate.subsetter(react, rna.range, i)
rep1 <- sub.react.df$rep1.react
rep2 <- sub.react.df$rep2.react
rep3 <- sub.react.df$rep3.react
cor.12 <- cor(rep1, rep2, method = this.cor)
cor.13 <- cor(rep1, rep3, method = this.cor)
cor.23 <- cor(rep2, rep3, method = this.cor)
output <- c(output, cor.12, cor.13, cor.23)
}
return(output)
}
count.data.parser <- function(count.data, pairwise.flag, snr.flag) {
#pairwise flag either processes all 3 replicates as one, or pairwise 12, 23, 13
#snr.flag allows for snr of the data values to be generated, rather than the absolute value
rnas <- 8
summary.mean <- data.frame(minus.coverage.rate = numeric(), plus.coverage.rate = numeric(),
total.minus.coverage = numeric(),total.plus.coverage = numeric(),
rate.ratio = numeric(), total.ratio = numeric(), hit.rate = numeric())
summary.sd <- data.frame(minus.coverage.rate = numeric(), plus.coverage.rate = numeric(),
total.minus.coverage = numeric(),total.plus.coverage = numeric(),
rate.ratio = numeric(), total.ratio = numeric(), hit.rate = numeric())
for (i in 1:rnas) {
begin <- (i - 1) * 3 + 1
middle <- (i - 1) * 3 + 2
finish <- (i - 1) * 3 + 3
if (pairwise.flag == 0) {
sub.mean <- apply(count.data[begin:finish, ], 2, mean)
sub.sd <- apply(count.data[begin:finish, ], 2, sd)
summary.mean[i, ] <- sub.mean
summary.sd[i, ] <- sub.sd
}
if (pairwise.flag == 1) {
sub.mean.12 <- apply(count.data[begin:middle, ], 2, mean)
sub.mean.13 <- apply(count.data[c(begin, finish), ], 2, mean)
sub.mean.23 <- apply(count.data[middle:finish, ], 2, mean)
sub.sd.12 <- apply(count.data[begin:middle, ], 2, sd)
sub.sd.13 <- apply(count.data[c(begin, finish), ], 2, sd)
sub.sd.23 <- apply(count.data[middle:finish, ], 2, sd)
summary.mean[begin, ] <- sub.mean.12
summary.mean[middle, ] <- sub.mean.13
summary.mean[finish, ] <- sub.mean.23
summary.sd[begin, ] <- sub.sd.12
summary.sd[middle, ] <- sub.sd.13
summary.sd[finish, ] <- sub.sd.23
}
}
if (snr.flag == 0) {return(list(summary.mean, summary.sd))}
if (snr.flag == 1) {return(summary.mean / summary.sd)}
}
replicate.subsetter <- function(react, rna.range, i) {
rep1 <- (i - 1) * 3 + 1
rep2 <- (i - 1) * 3 + 2
rep3 <- (i - 1) * 3 + 3
rep1.range <- rna.range[rep1, ]
rep2.range <- rna.range[rep2, ]
rep3.range <- rna.range[rep3, ]
rep1.react <- react$reactivity[rep1.range[1]:rep1.range[2]]
rep2.react <- react$reactivity[rep2.range[1]:rep2.range[2]]
rep3.react <- react$reactivity[rep3.range[1]:rep3.range[2]]
return(data.frame(rep1.react, rep2.react, rep3.react))
}
snr.bootrep <- function(boot.means, rna.range) {
# find the mean SNR per residue for each RNA
output <- numeric()
for (i in 1:24) {
sub.mean <- boot.means$reactivity[rna.range[i, 1]:rna.range[i, 2]]
sub.sd <- sqrt(boot.means$react.var[rna.range[i, 1]:rna.range[i, 2]])
output <- c(output, mean(sub.mean / sub.sd, na.rm = T))
#output <- c(output, mean(sub.mean, na.rm = T))
}
return(output)
}
|
library(GLMsData)
### Name: galapagos
### Title: Gal\'apagos Island species data
### Aliases: galapagos
### Keywords: datasets
### ** Examples
data(galapagos)
summary(galapagos)
|
/data/genthat_extracted_code/GLMsData/examples/galapagos.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 191 |
r
|
library(GLMsData)
### Name: galapagos
### Title: Gal\'apagos Island species data
### Aliases: galapagos
### Keywords: datasets
### ** Examples
data(galapagos)
summary(galapagos)
|
# Load libraries
library(tidyverse)
library(dplyr)
# Read files
NEI <- readRDS('summarySCC_PM25.rds')
SCC <- readRDS('Source_Classification_Code.rds')
# Q2
data <- filter(NEI, fips == "24510")
data <- tapply(data$Emissions, data$year, sum)
plot(names(data), data, xlab = 'Year', ylab = 'PM 2.5', main = 'Emissions by year for Baltimore', type = 'l')
dev.copy(device = png, filename = 'plot2.png', width = 500, height = 400)
dev.off()
|
/plot2.R
|
no_license
|
Gerardo203/Project2
|
R
| false | false | 435 |
r
|
# Load libraries
library(tidyverse)
library(dplyr)
# Read files
NEI <- readRDS('summarySCC_PM25.rds')
SCC <- readRDS('Source_Classification_Code.rds')
# Q2
data <- filter(NEI, fips == "24510")
data <- tapply(data$Emissions, data$year, sum)
plot(names(data), data, xlab = 'Year', ylab = 'PM 2.5', main = 'Emissions by year for Baltimore', type = 'l')
dev.copy(device = png, filename = 'plot2.png', width = 500, height = 400)
dev.off()
|
# ## WDI indicators (? specific set? look at openDataBlog to see details?)
# indicators <- read_csv("WDI/popWDI.csv")
#
#
# ##
# indicatorChoice <- indicators$indicator
# names(indicatorChoice) <- indicators$description
indy <- reactive({
choice <- input$indicator
# print(choice) choice <- "EN.ATM.CO2E.PC"
dat <- WDI(country = "all",
indicator = choice,
start = 1980,
end = 2015)
#dat$SP.DYN.LE00.IN <- round(dat$SP.DYN.LE00.IN, 1)
dat[3] <- round(dat[3], 1)
#head(dat[3]) #head(dat,1) #str(dat)
names(dat)[3] <- "value"
#dat <-dat[!is.na(dat$value),]
#dat[dat$value<0,]$value <- 0
print(min(dat$value))
minYr <- min(dat[!is.na(dat$value),]$year)
maxYr <- max(dat[!is.na(dat$value),]$year)
description <- indicators[indicators$indicator==choice,]$description
#print(description)
info=list(dat=dat,minYr=minYr,maxYr=maxYr,description=description)
# print(maxYr)
# print(str(dat))
return(info)
})
#
# ## Perform the merge. The sort = FALSE argument is crucial here - but be sure to double-check as merge
# ## can behave badly with spatial objects in R.
#
#
#print("so good so far")
mb_tiles <- "http://a.tiles.mapbox.com/v3/kwalkertcu.l1fc0hab/{z}/{x}/{y}.png"
mb_attribution <- 'Mapbox <a href="http://mapbox.com/about/maps" target="_blank">Terms & Feedback</a>'
#df <- data.frame(Age_Range=levels(cut(countries2$SP.DYN.LE00.IN,6)))
#print("so good so far")
# class(countries2) #SpatialPolygonsDataFrame
# str(countries2@data)
# 'data.frame': 241 obs. of 66 variables:
# inc $ SP.DYN.LE00.IN: num 75.2
output$a <- renderUI({
# maxYr <- as.integer(indy()$maxYr)
# print(maxYr)
inputPanel(
sliderInput("year","Choose Year",min=indy()$minYr, max=indy()$maxYr,value=indy()$maxYr,step=1,sep=""),
sliderInput("buckets","No. Groups (1=continuous)",min=1, max=11,value=6,step=1,sep="")
)
})
uiOutput('a')
## Create the map
dt <- reactive({
yearDat <- indy()$dat %>%
filter(year==input$year)
# yearDat <- dat %>%
# filter(year==1999)
countries2 <- sp::merge(countries,
yearDat,
by.x = "iso_a2",
by.y = "iso2c",
sort = FALSE)
if (input$buckets>1){
df <- data.frame(valueRange=levels(cut(countries2$value,input$buckets)))
valueRange <-(levels(df$valueRange))
} else {
df <- NULL
valueRange <- NULL
}
# print(str(df))
country_popup <- paste0("<strong>Country: </strong>",
countries2$country,
"<br><strong>",indy()$description," ",input$year,": </strong>",
countries2$value)
info=list(countries2=countries2,df=df,country_popup=country_popup,valueRange=valueRange)
return(info)
})
output$ages <- renderText({
if (is.null(indy()$maxYr)) return()
if (is.null(dt()$valueRange)) return()
paste(indy()$description," ",paste(dt()$valueRange,collapse=", "))
})
textOutput("ages")
output$WDImap <- renderLeaflet({
if (is.null(input$buckets)) return()
if (input$buckets!=1) {
pal <- colorQuantile("RdBu", NULL, n = input$buckets)
} else {
# pal <- colorBin(colorRamp(c("#FFFFFF", "#006633"), interpolate="spline"),domain=NULL)
pal <- colorBin(colorRamp(c("#b2182b", "#2166ac"), interpolate="spline"),domain=NULL)
}
# print("print class")
# print(class(dt()$countries2))
leaflet(data = dt()$countries2) %>%
addTiles(urlTemplate = mb_tiles,
attribution = mb_attribution) %>%
addPolygons(fillColor = ~pal(value),
fillOpacity = 0.8,
color = "#BDBDC3",
weight = 1,
popup = dt()$country_popup) %>%
mapOptions(zoomToLimits="first")
})
|
/WDI/code/wdi.R
|
no_license
|
pssguy/socEcon
|
R
| false | false | 3,897 |
r
|
# ## WDI indicators (? specific set? look at openDataBlog to see details?)
# indicators <- read_csv("WDI/popWDI.csv")
#
#
# ##
# indicatorChoice <- indicators$indicator
# names(indicatorChoice) <- indicators$description
indy <- reactive({
choice <- input$indicator
# print(choice) choice <- "EN.ATM.CO2E.PC"
dat <- WDI(country = "all",
indicator = choice,
start = 1980,
end = 2015)
#dat$SP.DYN.LE00.IN <- round(dat$SP.DYN.LE00.IN, 1)
dat[3] <- round(dat[3], 1)
#head(dat[3]) #head(dat,1) #str(dat)
names(dat)[3] <- "value"
#dat <-dat[!is.na(dat$value),]
#dat[dat$value<0,]$value <- 0
print(min(dat$value))
minYr <- min(dat[!is.na(dat$value),]$year)
maxYr <- max(dat[!is.na(dat$value),]$year)
description <- indicators[indicators$indicator==choice,]$description
#print(description)
info=list(dat=dat,minYr=minYr,maxYr=maxYr,description=description)
# print(maxYr)
# print(str(dat))
return(info)
})
#
# ## Perform the merge. The sort = FALSE argument is crucial here - but be sure to double-check as merge
# ## can behave badly with spatial objects in R.
#
#
#print("so good so far")
mb_tiles <- "http://a.tiles.mapbox.com/v3/kwalkertcu.l1fc0hab/{z}/{x}/{y}.png"
mb_attribution <- 'Mapbox <a href="http://mapbox.com/about/maps" target="_blank">Terms & Feedback</a>'
#df <- data.frame(Age_Range=levels(cut(countries2$SP.DYN.LE00.IN,6)))
#print("so good so far")
# class(countries2) #SpatialPolygonsDataFrame
# str(countries2@data)
# 'data.frame': 241 obs. of 66 variables:
# inc $ SP.DYN.LE00.IN: num 75.2
output$a <- renderUI({
# maxYr <- as.integer(indy()$maxYr)
# print(maxYr)
inputPanel(
sliderInput("year","Choose Year",min=indy()$minYr, max=indy()$maxYr,value=indy()$maxYr,step=1,sep=""),
sliderInput("buckets","No. Groups (1=continuous)",min=1, max=11,value=6,step=1,sep="")
)
})
uiOutput('a')
## Create the map
dt <- reactive({
yearDat <- indy()$dat %>%
filter(year==input$year)
# yearDat <- dat %>%
# filter(year==1999)
countries2 <- sp::merge(countries,
yearDat,
by.x = "iso_a2",
by.y = "iso2c",
sort = FALSE)
if (input$buckets>1){
df <- data.frame(valueRange=levels(cut(countries2$value,input$buckets)))
valueRange <-(levels(df$valueRange))
} else {
df <- NULL
valueRange <- NULL
}
# print(str(df))
country_popup <- paste0("<strong>Country: </strong>",
countries2$country,
"<br><strong>",indy()$description," ",input$year,": </strong>",
countries2$value)
info=list(countries2=countries2,df=df,country_popup=country_popup,valueRange=valueRange)
return(info)
})
output$ages <- renderText({
if (is.null(indy()$maxYr)) return()
if (is.null(dt()$valueRange)) return()
paste(indy()$description," ",paste(dt()$valueRange,collapse=", "))
})
textOutput("ages")
output$WDImap <- renderLeaflet({
if (is.null(input$buckets)) return()
if (input$buckets!=1) {
pal <- colorQuantile("RdBu", NULL, n = input$buckets)
} else {
# pal <- colorBin(colorRamp(c("#FFFFFF", "#006633"), interpolate="spline"),domain=NULL)
pal <- colorBin(colorRamp(c("#b2182b", "#2166ac"), interpolate="spline"),domain=NULL)
}
# print("print class")
# print(class(dt()$countries2))
leaflet(data = dt()$countries2) %>%
addTiles(urlTemplate = mb_tiles,
attribution = mb_attribution) %>%
addPolygons(fillColor = ~pal(value),
fillOpacity = 0.8,
color = "#BDBDC3",
weight = 1,
popup = dt()$country_popup) %>%
mapOptions(zoomToLimits="first")
})
|
#' @rdname ranges-overlaps
#' @importFrom rlang syms
#' @export
group_by_overlaps <- function(x, y, maxgap, minoverlap) { UseMethod("group_by_overlaps") }
#' @rdname ranges-overlaps
#' @export
group_by_overlaps.IntegerRanges <- function(x, y, maxgap = -1L, minoverlap = 0L) {
hits <- make_hits(x, y, findOverlaps, maxgap = maxgap, minoverlap = minoverlap)
left <- expand_by_hits(x, y, c(".query", ".subject"), hits)
mcols(left)$query <- queryHits(hits)
new_grouped_ir(left, UQS(rlang::syms("query")))
}
#' @rdname ranges-overlaps
#' @export
group_by_overlaps.GenomicRanges <- function(x, y, maxgap = -1L, minoverlap = 0L) {
hits <- make_hits(x, y, findOverlaps,
maxgap = maxgap,
minoverlap = minoverlap,
ignore.strand = TRUE)
left <- expand_by_hits(x, y, c(".query", ".subject"), hits)
mcols(left)$query <- queryHits(hits)
new_grouped_gr(left, UQS(rlang::syms("query")))
}
# TODO add in more variants here?
|
/R/ranges-overlap-groups.R
|
no_license
|
Shians/plyranges
|
R
| false | false | 988 |
r
|
#' @rdname ranges-overlaps
#' @importFrom rlang syms
#' @export
group_by_overlaps <- function(x, y, maxgap, minoverlap) { UseMethod("group_by_overlaps") }
#' @rdname ranges-overlaps
#' @export
group_by_overlaps.IntegerRanges <- function(x, y, maxgap = -1L, minoverlap = 0L) {
hits <- make_hits(x, y, findOverlaps, maxgap = maxgap, minoverlap = minoverlap)
left <- expand_by_hits(x, y, c(".query", ".subject"), hits)
mcols(left)$query <- queryHits(hits)
new_grouped_ir(left, UQS(rlang::syms("query")))
}
#' @rdname ranges-overlaps
#' @export
group_by_overlaps.GenomicRanges <- function(x, y, maxgap = -1L, minoverlap = 0L) {
hits <- make_hits(x, y, findOverlaps,
maxgap = maxgap,
minoverlap = minoverlap,
ignore.strand = TRUE)
left <- expand_by_hits(x, y, c(".query", ".subject"), hits)
mcols(left)$query <- queryHits(hits)
new_grouped_gr(left, UQS(rlang::syms("query")))
}
# TODO add in more variants here?
|
library('quantmod')
library('corrplot')
getSymbols(c("AMTD"), from="2009-04-20", to="2020-01-17", src="yahoo", periodicity = 'weekly')
data = read.csv('AMTD 2004 .csv', header = T)
AMTD.trends = data[,c("Month", "TDAmeritrade")]
##Extract Earnings Dates
##First Earnings = 4/21/2009
AMTD[547]
length(AMTD$AMTD.Close)
x = seq(1,561, by = 13)
length(x)
AMTD.Earnings = AMTD[x]
AMTD.Earnings[44]
AMTD.Earnings[43]
AMTD[3*13+1+1-13]
AMTD.Earnings[3,] = AMTD[3*13+1+1-13]
AMTD.Earnings[7,] = AMTD[7*13+1+1-13]
AMTD.Earnings[11,] = AMTD[11*13+1+1-13]
AMTD.Earnings[17,] = AMTD[17*13+1+1-13]
AMTD.Earnings[18,] = AMTD[18*13+1+1-13]
AMTD.Earnings[20,] = AMTD[20*13+1+1-13]
AMTD.Earnings[21,] = AMTD[21*13+1+1-13]
AMTD.Earnings[22,] = AMTD[22*13+1+1-13]
AMTD.Earnings[24,] = AMTD[24*13+1+1-13]
AMTD.Earnings[25,] = AMTD[25*13+1+1-13]
AMTD.Earnings[26,] = AMTD[26*13+1+1-13]
AMTD.Earnings[28,] = AMTD[28*13+1+1-13]
AMTD.Earnings[29,] = AMTD[29*13+1+1-13]
AMTD.Earnings[30,] = AMTD[30*13+1+1-13]
AMTD.Earnings[32,] = AMTD[32*13+1+1-13]
AMTD.Earnings[33,] = AMTD[33*13+1+1-13]
AMTD.Earnings[34,] = AMTD[34*13+1+1-13]
AMTD.Earnings[19,] = AMTD[19*13+1+1+1-13]
AMTD.Earnings[23,] = AMTD[23*13+1+1+1-13]
AMTD.Earnings[27,] = AMTD[27*13+1+1+1-13]
AMTD.Earnings[31,] = AMTD[31*13+1+1+1-13]
AMTD.Earnings[35,] = AMTD[35*13+1+1+1-13]
AMTD.Earnings[36,] = AMTD[36*13+1+1+1-13]
AMTD.Earnings[37,] = AMTD[37*13+1+1+1-13]
AMTD.Earnings[38,] = AMTD[38*13+1+1+1-13]
AMTD.Earnings[39,] = AMTD[39*13+1+1+1-13]
AMTD.Earnings[40,] = AMTD[40*13+1+1+1-13]
AMTD.Earnings[41,] = AMTD[41*13+1+1+1-13]
AMTD.Earnings[42,] = AMTD[42*13+1+1+1-13]
AMTD.Earnings[43,] = AMTD[43*13+1+1+1-13]
##1,2,4,5,6,8,9,10,12,13,14,15,16,
##3,7,11, 17,18,20,21,22,24,25,26,28,29,30,32,33,34
##19,23,27,31,35,36,37,38,39,40,41,42,43
length(AMTD.Earnings)
x = seq(1, 43, by = 1)
AMTD.EarningsClose = AMTD.Earnings$AMTD.Close[x] ##Quarterly Dates
AMTD.EarningsOpen = AMTD.Earnings$AMTD.Open[x]
AMTD.EarningsChange = (AMTD.EarningsClose - AMTD.EarningsOpen)/(AMTD.EarningsOpen)
##TrendQuarterlySum
##7/21/2009 = 5,6,7
##10/27/2009 = 8,7,9
y = seq(61,193, by = 3)
length(y)
AMTD.trends$Month[190]
AMTD.trendSum = c()
y[1]
y[2]
AMTD.trendSum[1] = sum(AMTD.trends$TDAmeritrade[62:64]) ## 2-4
AMTD.trendSum[2] = sum(AMTD.trends$TDAmeritrade[65:67]) ## 5 - 7
AMTD.trendSum[3] = sum(AMTD.trends$TDAmeritrade[68:70]) ## 8 - 10
AMTD.trendSum[4] = sum(AMTD.trends$TDAmeritrade[71:73]) ## 11 - 1
AMTD.trendSum[5] = sum(AMTD.trends$TDAmeritrade[74:76]) ## 2 - 4
AMTD.trendSum[6] = sum(AMTD.trends$TDAmeritrade[77:79])
AMTD.trendSum[7] = sum(AMTD.trends$TDAmeritrade[80:82])
AMTD.trendSum[8] = sum(AMTD.trends$TDAmeritrade[83:85])
AMTD.trendSum[9] = sum(AMTD.trends$TDAmeritrade[86:88])
AMTD.trendSum[10] = sum(AMTD.trends$TDAmeritrade[89:91])
AMTD.trendSum[11] = sum(AMTD.trends$TDAmeritrade[92:94])
AMTD.trendSum[12] = sum(AMTD.trends$TDAmeritrade[95:97])
AMTD.trendSum[13] = sum(AMTD.trends$TDAmeritrade[98:100])
AMTD.trendSum[14] = sum(AMTD.trends$TDAmeritrade[101:103])
AMTD.trendSum[15] = sum(AMTD.trends$TDAmeritrade[104:106])
AMTD.trendSum[16] = sum(AMTD.trends$TDAmeritrade[107:109])
AMTD.trendSum[17] = sum(AMTD.trends$TDAmeritrade[110:112])
AMTD.trendSum[18] = sum(AMTD.trends$TDAmeritrade[113:115])
AMTD.trendSum[19] = sum(AMTD.trends$TDAmeritrade[116:118])
AMTD.trendSum[20] = sum(AMTD.trends$TDAmeritrade[119:121])
AMTD.trendSum[21] = sum(AMTD.trends$TDAmeritrade[122:124])
AMTD.trendSum[22] = sum(AMTD.trends$TDAmeritrade[125:127])
AMTD.trendSum[23] = sum(AMTD.trends$TDAmeritrade[128:130])
AMTD.trendSum[24] = sum(AMTD.trends$TDAmeritrade[131:133])
AMTD.trendSum[25] = sum(AMTD.trends$TDAmeritrade[134:136])
AMTD.trendSum[26] = sum(AMTD.trends$TDAmeritrade[137:139])
AMTD.trendSum[27] = sum(AMTD.trends$TDAmeritrade[140:142])
AMTD.trendSum[28] = sum(AMTD.trends$TDAmeritrade[143:145])
AMTD.trendSum[29] = sum(AMTD.trends$TDAmeritrade[146:148])
AMTD.trendSum[30] = sum(AMTD.trends$TDAmeritrade[149:151])
AMTD.trendSum[31] = sum(AMTD.trends$TDAmeritrade[152:154])
AMTD.trendSum[32] = sum(AMTD.trends$TDAmeritrade[155:157])
AMTD.trendSum[33] = sum(AMTD.trends$TDAmeritrade[158:160])
AMTD.trendSum[34] = sum(AMTD.trends$TDAmeritrade[161:163])
AMTD.trendSum[35] = sum(AMTD.trends$TDAmeritrade[164:166])
AMTD.trendSum[36] = sum(AMTD.trends$TDAmeritrade[167:169])
AMTD.trendSum[37] = sum(AMTD.trends$TDAmeritrade[170:172])
AMTD.trendSum[38] = sum(AMTD.trends$TDAmeritrade[173:175])
AMTD.trendSum[39] = sum(AMTD.trends$TDAmeritrade[176:178])
AMTD.trendSum[40] = sum(AMTD.trends$TDAmeritrade[179:181])
AMTD.trendSum[41] = sum(AMTD.trends$TDAmeritrade[182:184])
AMTD.trendSum[42] = sum(AMTD.trends$TDAmeritrade[185:187])
AMTD.trendSum[43] = sum(AMTD.trends$TDAmeritrade[188:190])
AMTD.trendSum[44] = sum(AMTD.trends$TDAmeritrade[191:193])
##Quarter 1, April (4)
numQ1 = seq(1, 44, by = 4)
AMTD.trendSumQ1 = AMTD.trendSum[numQ1]
AMTD.trendSumQ1.log = diff(as.vector(log(AMTD.trendSumQ1)))
length(AMTD.trendSumQ1.log)
num2Q1 = seq(5, 43, by = 4)
AMTD.EarningsChange.Q1 = AMTD.EarningsChange[num2Q1]
AMTD.trendSumQ1.C = diff(as.vector(AMTD.trendSumQ1))
fit <- lm(AMTD.EarningsChange.Q1 ~ AMTD.trendSumQ1.log)
fitC <- lm(AMTD.EarningsChange.Q1 ~ AMTD.trendSumQ1.C)
summary(fit)
ratesAQ1 <- data.frame(AMTD.EarningsChange.Q1, AMTD.trendSumQ1.log)
corrplot.mixed(cor(ratesAQ1), upper = "ellipse")
summary(fitC)
ratesAQ1C <- data.frame(AMTD.EarningsChange.Q1, AMTD.trendSumQ1.C)
corrplot.mixed(cor(ratesAQ1C), upper = "ellipse")
##Quarter 2, July (7)
numQ2 = seq(2, 44, by = 4)
AMTD.trendSumQ2 = AMTD.trendSum[numQ2]
AMTD.trendSumQ2.log = diff(as.vector(log(AMTD.trendSumQ2)))
AMTD.trendSumQ2.C = diff(as.vector(AMTD.trendSumQ2))
length(AMTD.trendSumQ2.log)
num2Q2 = seq(6, 43, by = 4)
AMTD.EarningsChange.Q2 = AMTD.EarningsChange[num2Q2]
fit2 <- lm(AMTD.EarningsChange.Q2 ~ AMTD.trendSumQ2.log)
fit2
summary(fit2)
ratesAQ2 <- data.frame(AMTD.EarningsChange.Q2, AMTD.trendSumQ2.log)
corrplot.mixed(cor(ratesAQ2), upper = "ellipse")
fitC2 <- lm(AMTD.EarningsChange.Q1 ~ AMTD.trendSumQ1.C)
summary(fitC2)
ratesAQ2C <- data.frame(AMTD.EarningsChange.Q2, AMTD.trendSumQ2.C)
corrplot.mixed(cor(ratesAQ2C), upper = "ellipse")
##Quarter 3 October (10)
numQ3 = seq(3, 44, by = 4)
AMTD.trendSumQ3 = AMTD.trendSum[numQ3]
AMTD.trendSumQ3.log = diff(as.vector(log(AMTD.trendSumQ3)))
AMTD.trendSumQ3.C = diff(as.vector(AMTD.trendSumQ3))
length(AMTD.trendSumQ3.log)
num2Q3 = seq(7, 43, by = 4)
AMTD.EarningsChange.Q3 = AMTD.EarningsChange[num2Q3]
fit3 <- lm(AMTD.EarningsChange.Q3 ~ AMTD.trendSumQ3.log)
fit3
summary(fit3)
ratesAQ3 <- data.frame(AMTD.EarningsChange.Q3, AMTD.trendSumQ3.log)
corrplot.mixed(cor(ratesAQ3), upper = "ellipse")
fitC3 <- lm(AMTD.EarningsChange.Q3 ~ AMTD.trendSumQ3.C)
summary(fitC3)
ratesAQ3C <- data.frame(AMTD.EarningsChange.Q3, AMTD.trendSumQ3.C)
corrplot.mixed(cor(ratesAQ3C), upper = "ellipse")
##Quarter 4, January (1)
numQ4 = seq(4, 43, by = 4)
AMTD.trendSumQ4 = AMTD.trendSum[numQ4]
AMTD.trendSumQ4.log = diff(as.vector(log(AMTD.trendSumQ4)))
AMTD.trendSumQ4.C = diff(as.vector(AMTD.trendSumQ4))
num2Q4 = seq(8, 43, by = 4)
AMTD.EarningsChange.Q4 = AMTD.EarningsChange[num2Q4]
fit4 <- lm(AMTD.EarningsChange.Q4 ~ AMTD.trendSumQ4.log)
fit4
summary(fit4)
ratesAQ4 <- data.frame(AMTD.EarningsChange.Q4, AMTD.trendSumQ4.log)
corrplot.mixed(cor(ratesAQ4), upper = "ellipse")
fitC4 <- lm(AMTD.EarningsChange.Q4 ~ AMTD.trendSumQ4.C)
summary(fitC4)
ratesAQ4C <- data.frame(AMTD.EarningsChange.Q4, AMTD.trendSumQ4.C)
corrplot.mixed(cor(ratesAQ4C), upper = "ellipse")
plot(AMTD.EarningsChange.Q4$AMTD.Close)
## Not Dividing By Quarter
AMTD.trendSumT = AMTD.trendSum[1:43] ##1 = 3 months before 4th 2009, 43 = data collected past most recent earning
AMTD.trendSumT.log = diff(as.vector(log(AMTD.trendSumT)))
AMTD.trendSumT.C = diff(as.vector(AMTD.trendSumT))
length(AMTD.trendSumT.log)
length(AMTD.EarningsChange)
AMTD.EarningsChangeT = AMTD.EarningsChange[2:43] ##43 = previous earnings date
fitT <- lm(AMTD.EarningsChangeT ~ AMTD.trendSumT.log)
fitT
summary(fitT)
ratesAT.log <- data.frame(AMTD.EarningsChangeT, AMTD.trendSumT.log)
corrplot.mixed(cor(ratesAT.log), upper = "ellipse")
fitCT<- lm(AMTD.EarningsChangeT ~ AMTD.trendSumT.C)
summary(fitCT)
ratesAT <- data.frame(AMTD.EarningsChangeT, AMTD.trendSumT.C)
corrplot.mixed(cor(ratesAT), upper = "ellipse")
##Testing
AMTD.trendSumQ4.C
AMTD.EarningsChange.Q4
AMTD.trendSumT.C
AMTD.EarningsChangeT
##Polynomial Q4
AMTD.trendSum[44] - AMTD.trendSum[40]
PolyFunc = function(x){
y = -2E+11*x^6 + 5E+08*x^5 + 6E+08*x^4 + 503853*x^3 - 363782*x^2 - 105.03*x + 31.203
y
}
PolyFunc(45)
|
/NLPSENTfiles/R_files/AMTD Month Analysis 2004 - 2020.R
|
no_license
|
ATCUWgithub/GoogleTrends
|
R
| false | false | 8,619 |
r
|
library('quantmod')
library('corrplot')
getSymbols(c("AMTD"), from="2009-04-20", to="2020-01-17", src="yahoo", periodicity = 'weekly')
data = read.csv('AMTD 2004 .csv', header = T)
AMTD.trends = data[,c("Month", "TDAmeritrade")]
##Extract Earnings Dates
##First Earnings = 4/21/2009
AMTD[547]
length(AMTD$AMTD.Close)
x = seq(1,561, by = 13)
length(x)
AMTD.Earnings = AMTD[x]
AMTD.Earnings[44]
AMTD.Earnings[43]
AMTD[3*13+1+1-13]
AMTD.Earnings[3,] = AMTD[3*13+1+1-13]
AMTD.Earnings[7,] = AMTD[7*13+1+1-13]
AMTD.Earnings[11,] = AMTD[11*13+1+1-13]
AMTD.Earnings[17,] = AMTD[17*13+1+1-13]
AMTD.Earnings[18,] = AMTD[18*13+1+1-13]
AMTD.Earnings[20,] = AMTD[20*13+1+1-13]
AMTD.Earnings[21,] = AMTD[21*13+1+1-13]
AMTD.Earnings[22,] = AMTD[22*13+1+1-13]
AMTD.Earnings[24,] = AMTD[24*13+1+1-13]
AMTD.Earnings[25,] = AMTD[25*13+1+1-13]
AMTD.Earnings[26,] = AMTD[26*13+1+1-13]
AMTD.Earnings[28,] = AMTD[28*13+1+1-13]
AMTD.Earnings[29,] = AMTD[29*13+1+1-13]
AMTD.Earnings[30,] = AMTD[30*13+1+1-13]
AMTD.Earnings[32,] = AMTD[32*13+1+1-13]
AMTD.Earnings[33,] = AMTD[33*13+1+1-13]
AMTD.Earnings[34,] = AMTD[34*13+1+1-13]
AMTD.Earnings[19,] = AMTD[19*13+1+1+1-13]
AMTD.Earnings[23,] = AMTD[23*13+1+1+1-13]
AMTD.Earnings[27,] = AMTD[27*13+1+1+1-13]
AMTD.Earnings[31,] = AMTD[31*13+1+1+1-13]
AMTD.Earnings[35,] = AMTD[35*13+1+1+1-13]
AMTD.Earnings[36,] = AMTD[36*13+1+1+1-13]
AMTD.Earnings[37,] = AMTD[37*13+1+1+1-13]
AMTD.Earnings[38,] = AMTD[38*13+1+1+1-13]
AMTD.Earnings[39,] = AMTD[39*13+1+1+1-13]
AMTD.Earnings[40,] = AMTD[40*13+1+1+1-13]
AMTD.Earnings[41,] = AMTD[41*13+1+1+1-13]
AMTD.Earnings[42,] = AMTD[42*13+1+1+1-13]
AMTD.Earnings[43,] = AMTD[43*13+1+1+1-13]
##1,2,4,5,6,8,9,10,12,13,14,15,16,
##3,7,11, 17,18,20,21,22,24,25,26,28,29,30,32,33,34
##19,23,27,31,35,36,37,38,39,40,41,42,43
length(AMTD.Earnings)
x = seq(1, 43, by = 1)
AMTD.EarningsClose = AMTD.Earnings$AMTD.Close[x] ##Quarterly Dates
AMTD.EarningsOpen = AMTD.Earnings$AMTD.Open[x]
AMTD.EarningsChange = (AMTD.EarningsClose - AMTD.EarningsOpen)/(AMTD.EarningsOpen)
##TrendQuarterlySum
##7/21/2009 = 5,6,7
##10/27/2009 = 8,7,9
y = seq(61,193, by = 3)
length(y)
AMTD.trends$Month[190]
AMTD.trendSum = c()
y[1]
y[2]
AMTD.trendSum[1] = sum(AMTD.trends$TDAmeritrade[62:64]) ## 2-4
AMTD.trendSum[2] = sum(AMTD.trends$TDAmeritrade[65:67]) ## 5 - 7
AMTD.trendSum[3] = sum(AMTD.trends$TDAmeritrade[68:70]) ## 8 - 10
AMTD.trendSum[4] = sum(AMTD.trends$TDAmeritrade[71:73]) ## 11 - 1
AMTD.trendSum[5] = sum(AMTD.trends$TDAmeritrade[74:76]) ## 2 - 4
AMTD.trendSum[6] = sum(AMTD.trends$TDAmeritrade[77:79])
AMTD.trendSum[7] = sum(AMTD.trends$TDAmeritrade[80:82])
AMTD.trendSum[8] = sum(AMTD.trends$TDAmeritrade[83:85])
AMTD.trendSum[9] = sum(AMTD.trends$TDAmeritrade[86:88])
AMTD.trendSum[10] = sum(AMTD.trends$TDAmeritrade[89:91])
AMTD.trendSum[11] = sum(AMTD.trends$TDAmeritrade[92:94])
AMTD.trendSum[12] = sum(AMTD.trends$TDAmeritrade[95:97])
AMTD.trendSum[13] = sum(AMTD.trends$TDAmeritrade[98:100])
AMTD.trendSum[14] = sum(AMTD.trends$TDAmeritrade[101:103])
AMTD.trendSum[15] = sum(AMTD.trends$TDAmeritrade[104:106])
AMTD.trendSum[16] = sum(AMTD.trends$TDAmeritrade[107:109])
AMTD.trendSum[17] = sum(AMTD.trends$TDAmeritrade[110:112])
AMTD.trendSum[18] = sum(AMTD.trends$TDAmeritrade[113:115])
AMTD.trendSum[19] = sum(AMTD.trends$TDAmeritrade[116:118])
AMTD.trendSum[20] = sum(AMTD.trends$TDAmeritrade[119:121])
AMTD.trendSum[21] = sum(AMTD.trends$TDAmeritrade[122:124])
AMTD.trendSum[22] = sum(AMTD.trends$TDAmeritrade[125:127])
AMTD.trendSum[23] = sum(AMTD.trends$TDAmeritrade[128:130])
AMTD.trendSum[24] = sum(AMTD.trends$TDAmeritrade[131:133])
AMTD.trendSum[25] = sum(AMTD.trends$TDAmeritrade[134:136])
AMTD.trendSum[26] = sum(AMTD.trends$TDAmeritrade[137:139])
AMTD.trendSum[27] = sum(AMTD.trends$TDAmeritrade[140:142])
AMTD.trendSum[28] = sum(AMTD.trends$TDAmeritrade[143:145])
AMTD.trendSum[29] = sum(AMTD.trends$TDAmeritrade[146:148])
AMTD.trendSum[30] = sum(AMTD.trends$TDAmeritrade[149:151])
AMTD.trendSum[31] = sum(AMTD.trends$TDAmeritrade[152:154])
AMTD.trendSum[32] = sum(AMTD.trends$TDAmeritrade[155:157])
AMTD.trendSum[33] = sum(AMTD.trends$TDAmeritrade[158:160])
AMTD.trendSum[34] = sum(AMTD.trends$TDAmeritrade[161:163])
AMTD.trendSum[35] = sum(AMTD.trends$TDAmeritrade[164:166])
AMTD.trendSum[36] = sum(AMTD.trends$TDAmeritrade[167:169])
AMTD.trendSum[37] = sum(AMTD.trends$TDAmeritrade[170:172])
AMTD.trendSum[38] = sum(AMTD.trends$TDAmeritrade[173:175])
AMTD.trendSum[39] = sum(AMTD.trends$TDAmeritrade[176:178])
AMTD.trendSum[40] = sum(AMTD.trends$TDAmeritrade[179:181])
AMTD.trendSum[41] = sum(AMTD.trends$TDAmeritrade[182:184])
AMTD.trendSum[42] = sum(AMTD.trends$TDAmeritrade[185:187])
AMTD.trendSum[43] = sum(AMTD.trends$TDAmeritrade[188:190])
AMTD.trendSum[44] = sum(AMTD.trends$TDAmeritrade[191:193])
##Quarter 1, April (4)
numQ1 = seq(1, 44, by = 4)
AMTD.trendSumQ1 = AMTD.trendSum[numQ1]
AMTD.trendSumQ1.log = diff(as.vector(log(AMTD.trendSumQ1)))
length(AMTD.trendSumQ1.log)
num2Q1 = seq(5, 43, by = 4)
AMTD.EarningsChange.Q1 = AMTD.EarningsChange[num2Q1]
AMTD.trendSumQ1.C = diff(as.vector(AMTD.trendSumQ1))
fit <- lm(AMTD.EarningsChange.Q1 ~ AMTD.trendSumQ1.log)
fitC <- lm(AMTD.EarningsChange.Q1 ~ AMTD.trendSumQ1.C)
summary(fit)
ratesAQ1 <- data.frame(AMTD.EarningsChange.Q1, AMTD.trendSumQ1.log)
corrplot.mixed(cor(ratesAQ1), upper = "ellipse")
summary(fitC)
ratesAQ1C <- data.frame(AMTD.EarningsChange.Q1, AMTD.trendSumQ1.C)
corrplot.mixed(cor(ratesAQ1C), upper = "ellipse")
##Quarter 2, July (7)
numQ2 = seq(2, 44, by = 4)
AMTD.trendSumQ2 = AMTD.trendSum[numQ2]
AMTD.trendSumQ2.log = diff(as.vector(log(AMTD.trendSumQ2)))
AMTD.trendSumQ2.C = diff(as.vector(AMTD.trendSumQ2))
length(AMTD.trendSumQ2.log)
num2Q2 = seq(6, 43, by = 4)
AMTD.EarningsChange.Q2 = AMTD.EarningsChange[num2Q2]
fit2 <- lm(AMTD.EarningsChange.Q2 ~ AMTD.trendSumQ2.log)
fit2
summary(fit2)
ratesAQ2 <- data.frame(AMTD.EarningsChange.Q2, AMTD.trendSumQ2.log)
corrplot.mixed(cor(ratesAQ2), upper = "ellipse")
fitC2 <- lm(AMTD.EarningsChange.Q1 ~ AMTD.trendSumQ1.C)
summary(fitC2)
ratesAQ2C <- data.frame(AMTD.EarningsChange.Q2, AMTD.trendSumQ2.C)
corrplot.mixed(cor(ratesAQ2C), upper = "ellipse")
##Quarter 3 October (10)
numQ3 = seq(3, 44, by = 4)
AMTD.trendSumQ3 = AMTD.trendSum[numQ3]
AMTD.trendSumQ3.log = diff(as.vector(log(AMTD.trendSumQ3)))
AMTD.trendSumQ3.C = diff(as.vector(AMTD.trendSumQ3))
length(AMTD.trendSumQ3.log)
num2Q3 = seq(7, 43, by = 4)
AMTD.EarningsChange.Q3 = AMTD.EarningsChange[num2Q3]
fit3 <- lm(AMTD.EarningsChange.Q3 ~ AMTD.trendSumQ3.log)
fit3
summary(fit3)
ratesAQ3 <- data.frame(AMTD.EarningsChange.Q3, AMTD.trendSumQ3.log)
corrplot.mixed(cor(ratesAQ3), upper = "ellipse")
fitC3 <- lm(AMTD.EarningsChange.Q3 ~ AMTD.trendSumQ3.C)
summary(fitC3)
ratesAQ3C <- data.frame(AMTD.EarningsChange.Q3, AMTD.trendSumQ3.C)
corrplot.mixed(cor(ratesAQ3C), upper = "ellipse")
##Quarter 4, January (1)
numQ4 = seq(4, 43, by = 4)
AMTD.trendSumQ4 = AMTD.trendSum[numQ4]
AMTD.trendSumQ4.log = diff(as.vector(log(AMTD.trendSumQ4)))
AMTD.trendSumQ4.C = diff(as.vector(AMTD.trendSumQ4))
num2Q4 = seq(8, 43, by = 4)
AMTD.EarningsChange.Q4 = AMTD.EarningsChange[num2Q4]
fit4 <- lm(AMTD.EarningsChange.Q4 ~ AMTD.trendSumQ4.log)
fit4
summary(fit4)
ratesAQ4 <- data.frame(AMTD.EarningsChange.Q4, AMTD.trendSumQ4.log)
corrplot.mixed(cor(ratesAQ4), upper = "ellipse")
fitC4 <- lm(AMTD.EarningsChange.Q4 ~ AMTD.trendSumQ4.C)
summary(fitC4)
ratesAQ4C <- data.frame(AMTD.EarningsChange.Q4, AMTD.trendSumQ4.C)
corrplot.mixed(cor(ratesAQ4C), upper = "ellipse")
plot(AMTD.EarningsChange.Q4$AMTD.Close)
## Not Dividing By Quarter
AMTD.trendSumT = AMTD.trendSum[1:43] ##1 = 3 months before 4th 2009, 43 = data collected past most recent earning
AMTD.trendSumT.log = diff(as.vector(log(AMTD.trendSumT)))
AMTD.trendSumT.C = diff(as.vector(AMTD.trendSumT))
length(AMTD.trendSumT.log)
length(AMTD.EarningsChange)
AMTD.EarningsChangeT = AMTD.EarningsChange[2:43] ##43 = previous earnings date
fitT <- lm(AMTD.EarningsChangeT ~ AMTD.trendSumT.log)
fitT
summary(fitT)
ratesAT.log <- data.frame(AMTD.EarningsChangeT, AMTD.trendSumT.log)
corrplot.mixed(cor(ratesAT.log), upper = "ellipse")
fitCT<- lm(AMTD.EarningsChangeT ~ AMTD.trendSumT.C)
summary(fitCT)
ratesAT <- data.frame(AMTD.EarningsChangeT, AMTD.trendSumT.C)
corrplot.mixed(cor(ratesAT), upper = "ellipse")
##Testing
AMTD.trendSumQ4.C
AMTD.EarningsChange.Q4
AMTD.trendSumT.C
AMTD.EarningsChangeT
##Polynomial Q4
AMTD.trendSum[44] - AMTD.trendSum[40]
PolyFunc = function(x){
y = -2E+11*x^6 + 5E+08*x^5 + 6E+08*x^4 + 503853*x^3 - 363782*x^2 - 105.03*x + 31.203
y
}
PolyFunc(45)
|
# install necessary packages
if(!require(RColorBrewer)){install.packages('RColorBrewer'); library(RColorBrewer)}
if(!require(shape)){install.packages('shape'); library(shape)}
# specify path to output files
path_output_leaky <- '../../output/analysis/eir_vs_heterogeneity/output_files/leaky/efficacy/'
path_output_all_or_none <- '../../output/analysis/eir_vs_heterogeneity/output_files/all_or_none/efficacy/'
# list all of the output files
files_output_leaky <- list.files(path = path_output_leaky, full.names = T, pattern = 'efficacy')
files_output_all_or_none <- list.files(path = path_output_all_or_none, full.names = T, pattern = 'efficacy')
# load the files
output_leaky <- lapply(files_output_leaky, function(ff){read.csv(ff)})
output_efficacy_leaky <- as.data.frame((do.call('rbind', output_leaky)))
output_all_or_none <- lapply(files_output_all_or_none, function(ff){read.csv(ff)})
output_efficacy_all_or_none <- as.data.frame((do.call('rbind', output_all_or_none)))
# get the unique eir and sig_het values
eir_equil <- sort(unique(output_efficacy_all_or_none$eir_equil))
sig_het <- sort(unique(output_efficacy_all_or_none$sig_het))
# get the efficacies
eff_leaky <- lapply(eir_equil, function(eir){sapply(sig_het, function(sig){quantile(output_efficacy_leaky$eff_cph_recurrent_LM[output_efficacy_leaky$sig_het == sig & output_efficacy_leaky$eir_equil == eir],
probs = c(0.25, 0.50, 0.75), na.rm = T)})})
eff_all_or_none <- lapply(eir_equil, function(eir){sapply(sig_het, function(sig){quantile(output_efficacy_all_or_none$eff_cph_recurrent_LM[output_efficacy_all_or_none$sig_het == sig & output_efficacy_all_or_none$eir_equil == eir],
probs = c(0.25, 0.50, 0.75), na.rm = T)})})
# generate plot
palette <- brewer.pal(n = 9, name = 'YlOrRd')
palette <- palette[c(3,5,7,9)]
offset <- seq(from = -0.4, to = 0.4, length.out = 2 * length(sig_het))
jpeg(filename = '../../output/figs/fig_S6.jpg', width = 8, height = 5, units = 'in', res = 500)
par(mar = c(3.6, 3.6, 0.8, 0.8))
plot(NA, NA, type = 'n', axes = F, xlim = c(0.5, length(eir_equil) + 0.5), ylim = c(0, 1),
xlab = '', ylab = '', xaxs = 'i', yaxs = 'i')
abline(h = 0.75, lwd = 1, lty = 2)
abline(v = seq(from = 0.5, to = length(eir_equil) + 0.5, by = 1), col = '#222222')
for(ee in 1:length(eir_equil))
{
for(ss in 1:length(sig_het))
{
segments(x0 = ee + offset[1 + 2 * (ss-1)], y0 = eff_all_or_none[[ee]][1,ss], y1 = eff_all_or_none[[ee]][3,ss], col = palette[ss], lwd = 1.5)
points(ee + offset[1 + 2 * (ss - 1)], eff_all_or_none[[ee]][2,ss], pch = 16, cex = 1.5, col = palette[ss])
segments(x0 = ee + offset[2 + 2 * (ss-1)], y0 = eff_leaky[[ee]][1,ss], y1 = eff_leaky[[ee]][3,ss], col = palette[ss], lwd = 1.5)
points(ee + offset[2 + 2 * (ss - 1)], eff_leaky[[ee]][2,ss], pch = 17, cex = 1.5, col = palette[ss])
}
}
box()
axis(side = 1, at = 1:length(eir_equil), labels = eir_equil)
axis(side = 2, las = 1)
mtext(side = 1, line = 2.3, 'EIR')
mtext(side = 2, line = 2.3, 'Efficacy')
legend(x = 0.5, y = 0.375,
lwd = c(NA, NA, 1,rep(NA, length(palette))), col = c('#222222','#222222','#222222', palette),
lty = c(NA, NA, 2,rep(NA, length(palette))),
pch = c(1,2,NA, rep(15, length(palette))),
legend = c('All-or-None','Leaky','Clearance Prob', '', '', '', ''),
bty = 'n', pt.cex = 1.5, cex = 0.8)
Arrows(x0 = 0.675, x1 = 0.675, y0 = 0.23, y1 = 0.0975, arr.type="triangle",
arr.length = 0.15, arr.width = 0.1)
text(x = 0.70, y = (0.23 + 0.0975)/2, 'Greater Heterogeneity',
pos = 4, offset = 0, cex = 0.8)
dev.off()
|
/code/R/fig_FigS6.R
|
no_license
|
johnhhuber/Radical_Cure_Uncertainty
|
R
| false | false | 3,747 |
r
|
# install necessary packages
if(!require(RColorBrewer)){install.packages('RColorBrewer'); library(RColorBrewer)}
if(!require(shape)){install.packages('shape'); library(shape)}
# specify path to output files
path_output_leaky <- '../../output/analysis/eir_vs_heterogeneity/output_files/leaky/efficacy/'
path_output_all_or_none <- '../../output/analysis/eir_vs_heterogeneity/output_files/all_or_none/efficacy/'
# list all of the output files
files_output_leaky <- list.files(path = path_output_leaky, full.names = T, pattern = 'efficacy')
files_output_all_or_none <- list.files(path = path_output_all_or_none, full.names = T, pattern = 'efficacy')
# load the files
output_leaky <- lapply(files_output_leaky, function(ff){read.csv(ff)})
output_efficacy_leaky <- as.data.frame((do.call('rbind', output_leaky)))
output_all_or_none <- lapply(files_output_all_or_none, function(ff){read.csv(ff)})
output_efficacy_all_or_none <- as.data.frame((do.call('rbind', output_all_or_none)))
# get the unique eir and sig_het values
eir_equil <- sort(unique(output_efficacy_all_or_none$eir_equil))
sig_het <- sort(unique(output_efficacy_all_or_none$sig_het))
# get the efficacies
eff_leaky <- lapply(eir_equil, function(eir){sapply(sig_het, function(sig){quantile(output_efficacy_leaky$eff_cph_recurrent_LM[output_efficacy_leaky$sig_het == sig & output_efficacy_leaky$eir_equil == eir],
probs = c(0.25, 0.50, 0.75), na.rm = T)})})
eff_all_or_none <- lapply(eir_equil, function(eir){sapply(sig_het, function(sig){quantile(output_efficacy_all_or_none$eff_cph_recurrent_LM[output_efficacy_all_or_none$sig_het == sig & output_efficacy_all_or_none$eir_equil == eir],
probs = c(0.25, 0.50, 0.75), na.rm = T)})})
# generate plot
palette <- brewer.pal(n = 9, name = 'YlOrRd')
palette <- palette[c(3,5,7,9)]
offset <- seq(from = -0.4, to = 0.4, length.out = 2 * length(sig_het))
jpeg(filename = '../../output/figs/fig_S6.jpg', width = 8, height = 5, units = 'in', res = 500)
par(mar = c(3.6, 3.6, 0.8, 0.8))
plot(NA, NA, type = 'n', axes = F, xlim = c(0.5, length(eir_equil) + 0.5), ylim = c(0, 1),
xlab = '', ylab = '', xaxs = 'i', yaxs = 'i')
abline(h = 0.75, lwd = 1, lty = 2)
abline(v = seq(from = 0.5, to = length(eir_equil) + 0.5, by = 1), col = '#222222')
for(ee in 1:length(eir_equil))
{
for(ss in 1:length(sig_het))
{
segments(x0 = ee + offset[1 + 2 * (ss-1)], y0 = eff_all_or_none[[ee]][1,ss], y1 = eff_all_or_none[[ee]][3,ss], col = palette[ss], lwd = 1.5)
points(ee + offset[1 + 2 * (ss - 1)], eff_all_or_none[[ee]][2,ss], pch = 16, cex = 1.5, col = palette[ss])
segments(x0 = ee + offset[2 + 2 * (ss-1)], y0 = eff_leaky[[ee]][1,ss], y1 = eff_leaky[[ee]][3,ss], col = palette[ss], lwd = 1.5)
points(ee + offset[2 + 2 * (ss - 1)], eff_leaky[[ee]][2,ss], pch = 17, cex = 1.5, col = palette[ss])
}
}
box()
axis(side = 1, at = 1:length(eir_equil), labels = eir_equil)
axis(side = 2, las = 1)
mtext(side = 1, line = 2.3, 'EIR')
mtext(side = 2, line = 2.3, 'Efficacy')
legend(x = 0.5, y = 0.375,
lwd = c(NA, NA, 1,rep(NA, length(palette))), col = c('#222222','#222222','#222222', palette),
lty = c(NA, NA, 2,rep(NA, length(palette))),
pch = c(1,2,NA, rep(15, length(palette))),
legend = c('All-or-None','Leaky','Clearance Prob', '', '', '', ''),
bty = 'n', pt.cex = 1.5, cex = 0.8)
Arrows(x0 = 0.675, x1 = 0.675, y0 = 0.23, y1 = 0.0975, arr.type="triangle",
arr.length = 0.15, arr.width = 0.1)
text(x = 0.70, y = (0.23 + 0.0975)/2, 'Greater Heterogeneity',
pos = 4, offset = 0, cex = 0.8)
dev.off()
|
library(synapseClient)
synapseLogin("in.sock.jang@sagebase.org","tjsDUD@")
id_exprLayer <- "syn1532991"
layer_expr <- loadEntity(id_exprLayer)
res2 <- layer_expr$objects$res2
id_headerLayer <- "syn1532987"
layer_header <- loadEntity(id_headerLayer)
header <- layer_header$objects$header
# ### split data into control vs. AD
BinaryPhenotype<-header$TRIX
# without MCI
cont <- which(BinaryPhenotype!=2)
res2<-res2[,cont]
### Do some classification works for 5 fold cross validation
library(predictiveModeling)
library(synapseClient)
library(ggplot2)
library(ROCR)
library(modeest)
synapseLogin("in.sock.jang@sagebase.org","tjsDUD@")
source("~/COMPBIO/trunk/users/jang/R5/crossValidatePredictiveModel_categorical2.R")
source("~/COMPBIO/trunk/users/jang/R5/myCatEnetModel.R")
data_expr <- res2
data_drug<-as.numeric(as.matrix(header$TRIX[cont]))
data_drug[which(data_drug==3)]<-1
names(data_drug)<-header$SAMPLE.ID[cont]
# NA filter for training set
featureData_filtered <- filterNasFromMatrix(data_expr, filterBy = "rows")
dataSets <- createFeatureAndResponseDataList(t(featureData_filtered),data_drug)
filteredData<-filterPredictiveModelData(dataSets$featureData,dataSets$responseData[,drop=FALSE])
filteredFeatureData <- scale(filteredData$featureData)
filteredResponseData <- (filteredData$responseData)
alphas =unique(createENetTuneGrid()[,1])
lambdas = createENetTuneGrid(alphas = 1)[,2]
resultsLasso<-crossValidatePredictiveModel_categorical2(filteredFeatureData, filteredResponseData, model = myCatEnetModel$new(), alpha=1, lambda = lambdas)
resultsENet<-crossValidatePredictiveModel_categorical2(filteredFeatureData, filteredResponseData, model = myCatEnetModel$new(), alpha=alphas, lambda = lambdas)
save(resultsLasso,file = "~/COMPBIO/trunk/users/jang/AD/Control_AD/cv5Lasso.Rdata")
save(resultsENet,file = "~/COMPBIO/trunk/users/jang/AD/Control_AD/cv5ENet.Rdata")
|
/AD/Control_AD/categoricalModel_CV.R
|
no_license
|
insockjang/DrugResponse
|
R
| false | false | 1,887 |
r
|
library(synapseClient)
synapseLogin("in.sock.jang@sagebase.org","tjsDUD@")
id_exprLayer <- "syn1532991"
layer_expr <- loadEntity(id_exprLayer)
res2 <- layer_expr$objects$res2
id_headerLayer <- "syn1532987"
layer_header <- loadEntity(id_headerLayer)
header <- layer_header$objects$header
# ### split data into control vs. AD
BinaryPhenotype<-header$TRIX
# without MCI
cont <- which(BinaryPhenotype!=2)
res2<-res2[,cont]
### Do some classification works for 5 fold cross validation
library(predictiveModeling)
library(synapseClient)
library(ggplot2)
library(ROCR)
library(modeest)
synapseLogin("in.sock.jang@sagebase.org","tjsDUD@")
source("~/COMPBIO/trunk/users/jang/R5/crossValidatePredictiveModel_categorical2.R")
source("~/COMPBIO/trunk/users/jang/R5/myCatEnetModel.R")
data_expr <- res2
data_drug<-as.numeric(as.matrix(header$TRIX[cont]))
data_drug[which(data_drug==3)]<-1
names(data_drug)<-header$SAMPLE.ID[cont]
# NA filter for training set
featureData_filtered <- filterNasFromMatrix(data_expr, filterBy = "rows")
dataSets <- createFeatureAndResponseDataList(t(featureData_filtered),data_drug)
filteredData<-filterPredictiveModelData(dataSets$featureData,dataSets$responseData[,drop=FALSE])
filteredFeatureData <- scale(filteredData$featureData)
filteredResponseData <- (filteredData$responseData)
alphas =unique(createENetTuneGrid()[,1])
lambdas = createENetTuneGrid(alphas = 1)[,2]
resultsLasso<-crossValidatePredictiveModel_categorical2(filteredFeatureData, filteredResponseData, model = myCatEnetModel$new(), alpha=1, lambda = lambdas)
resultsENet<-crossValidatePredictiveModel_categorical2(filteredFeatureData, filteredResponseData, model = myCatEnetModel$new(), alpha=alphas, lambda = lambdas)
save(resultsLasso,file = "~/COMPBIO/trunk/users/jang/AD/Control_AD/cv5Lasso.Rdata")
save(resultsENet,file = "~/COMPBIO/trunk/users/jang/AD/Control_AD/cv5ENet.Rdata")
|
calculateOverlaps <- function(listOfIdentifiers){
mtrx1 <- mapply(
Y=names(listOfIdentifiers),
FUN=function(Y){
mapply( x=names(listOfIdentifiers),
FUN=function(x){
length(
intersect(listOfIdentifiers[[x]],listOfIdentifiers[[Y]])
)
}
)
}
)
colnames(mtrx1) <- apply(X=as.array(colnames(mtrx1)),FUN=function(X){paste("vs.",X,sep="")},1)
mtrx2 <- cbind(full=as.numeric(lapply(listOfIdentifiers,length)),mtrx1)
return(mtrx2)
}
|
/R/calculateOverlaps.R
|
no_license
|
scfurl/virtualArray
|
R
| false | false | 466 |
r
|
calculateOverlaps <- function(listOfIdentifiers){
mtrx1 <- mapply(
Y=names(listOfIdentifiers),
FUN=function(Y){
mapply( x=names(listOfIdentifiers),
FUN=function(x){
length(
intersect(listOfIdentifiers[[x]],listOfIdentifiers[[Y]])
)
}
)
}
)
colnames(mtrx1) <- apply(X=as.array(colnames(mtrx1)),FUN=function(X){paste("vs.",X,sep="")},1)
mtrx2 <- cbind(full=as.numeric(lapply(listOfIdentifiers,length)),mtrx1)
return(mtrx2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/count_model_utilities.R
\name{dzip}
\alias{dzip}
\title{ZIP model pdf}
\usage{
dzip(x, lambda, pi)
}
\arguments{
\item{x}{Value the pdf is to be evaluated at}
\item{lambda}{The expected value for the Poisson model}
\item{pi}{The probability of being a structural zero}
}
\value{
The probability
}
\description{
Evaluates zip model at particular value k.
}
|
/man/dzip.Rd
|
no_license
|
daniel-conn17/scRNAzirf
|
R
| false | true | 436 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/count_model_utilities.R
\name{dzip}
\alias{dzip}
\title{ZIP model pdf}
\usage{
dzip(x, lambda, pi)
}
\arguments{
\item{x}{Value the pdf is to be evaluated at}
\item{lambda}{The expected value for the Poisson model}
\item{pi}{The probability of being a structural zero}
}
\value{
The probability
}
\description{
Evaluates zip model at particular value k.
}
|
GoogleRoadDist<- function(orilat,orilon,deslat,deslon,mode='driving',apikey=NULL,departure_time=NULL,arrival_time=NULL,transit_mode=NULL,transit_routing_preference=NULL){
requireNamespace("XML",quietly = TRUE)
requireNamespace("RCurl",quietly = TRUE)
requireNamespace("stringr",quietly = TRUE)
origin<-paste0(orilat,',',orilon)
destination<-paste0(deslat,',',deslon)
mode<-paste0(mode)
apikey<-paste0("&key=",as.character(apikey))
core<-"https://maps.googleapis.com/maps/api/distancematrix/xml?origins="
xml.url <- paste0(core,origin,'&destinations=',destination,'&mode=',mode)
xmlfile <- XML::xmlTreeParse(RCurl::getURL(xml.url))
xmltop <- XML::xmlRoot(xmlfile)
distance <- xmltop[['row']][[1]][['distance']][['value']][[1]]
distance <- as.numeric(unclass(distance)[['value']])
miles <- distance*0.000621371
duration <- xmltop[['row']][[1]][['duration']][['text']][[1]]
duration <- unclass(duration)[['value']]
duration <- as.numeric(stringr::str_extract(duration,"\\d"))
minutes<-duration
D<-list(miles=miles,minutes=minutes)
return(D)
}
|
/R/GoogleRoadDist.R
|
no_license
|
estebanlp/GoogleDataSearch
|
R
| false | false | 1,090 |
r
|
GoogleRoadDist<- function(orilat,orilon,deslat,deslon,mode='driving',apikey=NULL,departure_time=NULL,arrival_time=NULL,transit_mode=NULL,transit_routing_preference=NULL){
requireNamespace("XML",quietly = TRUE)
requireNamespace("RCurl",quietly = TRUE)
requireNamespace("stringr",quietly = TRUE)
origin<-paste0(orilat,',',orilon)
destination<-paste0(deslat,',',deslon)
mode<-paste0(mode)
apikey<-paste0("&key=",as.character(apikey))
core<-"https://maps.googleapis.com/maps/api/distancematrix/xml?origins="
xml.url <- paste0(core,origin,'&destinations=',destination,'&mode=',mode)
xmlfile <- XML::xmlTreeParse(RCurl::getURL(xml.url))
xmltop <- XML::xmlRoot(xmlfile)
distance <- xmltop[['row']][[1]][['distance']][['value']][[1]]
distance <- as.numeric(unclass(distance)[['value']])
miles <- distance*0.000621371
duration <- xmltop[['row']][[1]][['duration']][['text']][[1]]
duration <- unclass(duration)[['value']]
duration <- as.numeric(stringr::str_extract(duration,"\\d"))
minutes<-duration
D<-list(miles=miles,minutes=minutes)
return(D)
}
|
#' @title Generating rsem-star genome index
#' @description This function executes the docker container rsem-star1 where RSEM and STAR are installed. The index is created using ENSEMBL genome fasta file. User needs to provide the URL for ENSEMBL genome located in the ENSEMBL ftp
#'
#' @param group, a character string. Two options: \code{"sudo"} or \code{"docker"}, depending to which group the user belongs
#' @param genome.folder, a character string indicating the folder where the indexed reference genome for bwa will be located
#' @param ensembl.urlgenome, a character string indicating the URL from ENSEMBL ftp for the unmasked genome sequence of interest
#' @param ensembl.urlgtf, a character string indicating the URL from ENSEMBL ftp for the GTF for genome of interest
#' @param threads, a number indicating the number of cores to be used from the application
#'
#' @return The indexed bwa genome reference sequence
#' @examples
#'\dontrun{
#' #running rsemstar index for human
#' rsemstarIndex(group="sudo",genome.folder="/data/scratch/hg38star",
#' ensembl.urlgenome=
#' "ftp://ftp.ensembl.org/pub/release-87/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.toplevel.fa.gz",
#' ensembl.urlgtf=
#' "ftp://ftp.ensembl.org/pub/release-87/gtf/homo_sapiens/Homo_sapiens.GRCh38.87.gtf.gz",
#' threads=24)
#'
#' #running rsemstar index for mouse
#' rsemstarIndex(group="docker",genome.folder="/data/scratch/mm10star",
#' ensembl.urlgenome="ftp://ftp.ensembl.org/pub/release-87/fasta/mus_musculus/dna/Mus_musculus.GRCm38.dna.toplevel.fa.gz",
#' ensembl.urlgtf="ftp://ftp.ensembl.org/pub/release-87/gtf/mus_musculus/Mus_musculus.GRCm38.87.gtf.gz",
#' threads=24)
#'
#' }
#' @export
rsemstarIndex <- function(group=c("sudo","docker"), genome.folder=getwd(), ensembl.urlgenome=NULL, ensembl.urlgtf=NULL, threads=1){
#running time 1
ptm <- proc.time()
#running time 1
test <- dockerTest()
if(!test){
cat("\nERROR: Docker seems not to be installed in your system\n")
return()
}
#########check scratch folder exist###########
if (!file.exists(genome.folder)){
cat(paste("\nIt seems that the ",genome.folder, "folder does not exist, I create it\n"))
dir.create(genome.folder)
}
#############################################
cat("\nsetting as working dir the genome folder and running bwa docker container\n")
if(group=="sudo"){
params <- paste("--cidfile ",genome.folder,"/dockerID -v ",genome.folder,":/data/scratch"," -d docker.io/rcaloger/rsemstar.2017.01 sh /bin/rsemstar.index.sh "," ",genome.folder," ",ensembl.urlgenome," ",ensembl.urlgtf," ",threads, sep="")
runDocker(group="sudo",container="docker.io/rcaloger/rsemstar.2017.01", params=params)
}else{
params <- paste("--cidfile ",genome.folder,"/dockerID -v ",genome.folder,":/data/scratch"," -d docker.io/rcaloger/rsemstar.2017.01 sh /bin/rsemstar.index.sh "," ",genome.folder," ",ensembl.urlgenome," ",ensembl.urlgtf," ",threads, sep="")
runDocker(group="docker",container="docker.io/rcaloger/rsemstar.2017.01", params=params)
}
out <- "xxxx"
#waiting for the end of the container work
while(out != "out.info"){
Sys.sleep(10)
cat(".")
out.tmp <- dir(genome.folder)
out.tmp <- out.tmp[grep("out.info",out.tmp)]
if(length(out.tmp)>0){
out <- "out.info"
}
}
#running time 2
ptm <- proc.time() - ptm
con <- file(paste(genome.folder,"run.info", sep="/"), "r")
tmp.run <- readLines(con)
close(con)
tmp.run <- NULL
tmp.run[length(tmp.run)+1] <- paste("user run time mins ",ptm[1]/60, sep="")
tmp.run[length(tmp.run)+1] <- paste("system run time mins ",ptm[2]/60, sep="")
tmp.run[length(tmp.run)+1] <- paste("elapsed run time mins ",ptm[3]/60, sep="")
writeLines(tmp.run, paste(genome.folder,"run.info", sep="/"))
#saving log and removing docker container
container.id <- readLines(paste(genome.folder,"/dockerID", sep=""), warn = FALSE)
system(paste("docker logs ", container.id, " >& ", substr(container.id,1,12),".log", sep=""))
system(paste("docker rm ", container.id, sep=""))
#running time 2
system(paste("rm ",genome.folder,"/dockerID", sep=""))
system(paste("cp ",paste(path.package(package="docker4seq"),"containers/containers.txt",sep="/")," ",genome.folder, sep=""))
}
|
/R/rsemstarIndex.R
|
no_license
|
inambioinfo/docker4seq
|
R
| false | false | 4,329 |
r
|
#' @title Generating rsem-star genome index
#' @description This function executes the docker container rsem-star1 where RSEM and STAR are installed. The index is created using ENSEMBL genome fasta file. User needs to provide the URL for ENSEMBL genome located in the ENSEMBL ftp
#'
#' @param group, a character string. Two options: \code{"sudo"} or \code{"docker"}, depending to which group the user belongs
#' @param genome.folder, a character string indicating the folder where the indexed reference genome for bwa will be located
#' @param ensembl.urlgenome, a character string indicating the URL from ENSEMBL ftp for the unmasked genome sequence of interest
#' @param ensembl.urlgtf, a character string indicating the URL from ENSEMBL ftp for the GTF for genome of interest
#' @param threads, a number indicating the number of cores to be used from the application
#'
#' @return The indexed bwa genome reference sequence
#' @examples
#'\dontrun{
#' #running rsemstar index for human
#' rsemstarIndex(group="sudo",genome.folder="/data/scratch/hg38star",
#' ensembl.urlgenome=
#' "ftp://ftp.ensembl.org/pub/release-87/fasta/homo_sapiens/dna/Homo_sapiens.GRCh38.dna.toplevel.fa.gz",
#' ensembl.urlgtf=
#' "ftp://ftp.ensembl.org/pub/release-87/gtf/homo_sapiens/Homo_sapiens.GRCh38.87.gtf.gz",
#' threads=24)
#'
#' #running rsemstar index for mouse
#' rsemstarIndex(group="docker",genome.folder="/data/scratch/mm10star",
#' ensembl.urlgenome="ftp://ftp.ensembl.org/pub/release-87/fasta/mus_musculus/dna/Mus_musculus.GRCm38.dna.toplevel.fa.gz",
#' ensembl.urlgtf="ftp://ftp.ensembl.org/pub/release-87/gtf/mus_musculus/Mus_musculus.GRCm38.87.gtf.gz",
#' threads=24)
#'
#' }
#' @export
rsemstarIndex <- function(group=c("sudo","docker"), genome.folder=getwd(), ensembl.urlgenome=NULL, ensembl.urlgtf=NULL, threads=1){
#running time 1
ptm <- proc.time()
#running time 1
test <- dockerTest()
if(!test){
cat("\nERROR: Docker seems not to be installed in your system\n")
return()
}
#########check scratch folder exist###########
if (!file.exists(genome.folder)){
cat(paste("\nIt seems that the ",genome.folder, "folder does not exist, I create it\n"))
dir.create(genome.folder)
}
#############################################
cat("\nsetting as working dir the genome folder and running bwa docker container\n")
if(group=="sudo"){
params <- paste("--cidfile ",genome.folder,"/dockerID -v ",genome.folder,":/data/scratch"," -d docker.io/rcaloger/rsemstar.2017.01 sh /bin/rsemstar.index.sh "," ",genome.folder," ",ensembl.urlgenome," ",ensembl.urlgtf," ",threads, sep="")
runDocker(group="sudo",container="docker.io/rcaloger/rsemstar.2017.01", params=params)
}else{
params <- paste("--cidfile ",genome.folder,"/dockerID -v ",genome.folder,":/data/scratch"," -d docker.io/rcaloger/rsemstar.2017.01 sh /bin/rsemstar.index.sh "," ",genome.folder," ",ensembl.urlgenome," ",ensembl.urlgtf," ",threads, sep="")
runDocker(group="docker",container="docker.io/rcaloger/rsemstar.2017.01", params=params)
}
out <- "xxxx"
#waiting for the end of the container work
while(out != "out.info"){
Sys.sleep(10)
cat(".")
out.tmp <- dir(genome.folder)
out.tmp <- out.tmp[grep("out.info",out.tmp)]
if(length(out.tmp)>0){
out <- "out.info"
}
}
#running time 2
ptm <- proc.time() - ptm
con <- file(paste(genome.folder,"run.info", sep="/"), "r")
tmp.run <- readLines(con)
close(con)
tmp.run <- NULL
tmp.run[length(tmp.run)+1] <- paste("user run time mins ",ptm[1]/60, sep="")
tmp.run[length(tmp.run)+1] <- paste("system run time mins ",ptm[2]/60, sep="")
tmp.run[length(tmp.run)+1] <- paste("elapsed run time mins ",ptm[3]/60, sep="")
writeLines(tmp.run, paste(genome.folder,"run.info", sep="/"))
#saving log and removing docker container
container.id <- readLines(paste(genome.folder,"/dockerID", sep=""), warn = FALSE)
system(paste("docker logs ", container.id, " >& ", substr(container.id,1,12),".log", sep=""))
system(paste("docker rm ", container.id, sep=""))
#running time 2
system(paste("rm ",genome.folder,"/dockerID", sep=""))
system(paste("cp ",paste(path.package(package="docker4seq"),"containers/containers.txt",sep="/")," ",genome.folder, sep=""))
}
|
## Put comments here that give an overall description of what your
## functions do
##makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
##If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
## Write a short comment describing this function
#The below function consists of 4 functions: set(): to set matrix, get(): to get matrix, setInverse(): to set the inverse of matrix calculated by using solve in a different environment
# getInverse(): to get inverse of matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
# The below function checks if the inverse of the matrix is not cached then it calculates the inverse and caches it
#by calling the setInverse() function. And in case the inverse is got to be cached the message is displayed along with
#cached inverse matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
/cachematrix.R
|
no_license
|
DSRM/ProgrammingAssignment2
|
R
| false | false | 1,575 |
r
|
## Put comments here that give an overall description of what your
## functions do
##makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
##If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
## Write a short comment describing this function
#The below function consists of 4 functions: set(): to set matrix, get(): to get matrix, setInverse(): to set the inverse of matrix calculated by using solve in a different environment
# getInverse(): to get inverse of matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
# The below function checks if the inverse of the matrix is not cached then it calculates the inverse and caches it
#by calling the setInverse() function. And in case the inverse is got to be cached the message is displayed along with
#cached inverse matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
testlist <- list(lambda = numeric(0), logq = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), nu = numeric(0), tol = 0, ymax = 0)
result <- do.call(COMPoissonReg:::qcmp_cpp,testlist)
str(result)
|
/COMPoissonReg/inst/testfiles/qcmp_cpp/libFuzzer_qcmp_cpp/qcmp_cpp_valgrind_files/1612728489-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 450 |
r
|
testlist <- list(lambda = numeric(0), logq = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), nu = numeric(0), tol = 0, ymax = 0)
result <- do.call(COMPoissonReg:::qcmp_cpp,testlist)
str(result)
|
## we need a data object with doses >= 1:
data<-Data(x=c(25,50,50,75,150,200,225,300),
y=c(0,0,0,0,1,1,1,1),
doseGrid=seq(from=25,to=300,by=25))
##The 'nextBest' method using NextBestTD' rules class object
## That is dose-esclation procedure using the 'logisticIndepBeta' DLE model involving DLE samples
## model must be of 'LogisticIndepBeta' class
model<-LogisticIndepBeta(binDLE=c(1.05,1.8),DLEweights=c(3,3),DLEdose=c(25,300),data=data)
##target probabilities of the occurrence of a DLE during trial and at the end of trial
## are defined as 0.35 and 0.3, respectively
tdNextBest<-NextBestTD(targetDuringTrial=0.35,targetEndOfTrial=0.3)
##doselimit is the maximum allowable dose level to be given to subjects
RecommendDose<- nextBest(tdNextBest,
doselimit=max(data@doseGrid),
model=model,
data=data)
|
/examples/Rules-method-nextbest_TD.R
|
no_license
|
insightsengineering/crmPack
|
R
| false | false | 869 |
r
|
## we need a data object with doses >= 1:
data<-Data(x=c(25,50,50,75,150,200,225,300),
y=c(0,0,0,0,1,1,1,1),
doseGrid=seq(from=25,to=300,by=25))
##The 'nextBest' method using NextBestTD' rules class object
## That is dose-esclation procedure using the 'logisticIndepBeta' DLE model involving DLE samples
## model must be of 'LogisticIndepBeta' class
model<-LogisticIndepBeta(binDLE=c(1.05,1.8),DLEweights=c(3,3),DLEdose=c(25,300),data=data)
##target probabilities of the occurrence of a DLE during trial and at the end of trial
## are defined as 0.35 and 0.3, respectively
tdNextBest<-NextBestTD(targetDuringTrial=0.35,targetEndOfTrial=0.3)
##doselimit is the maximum allowable dose level to be given to subjects
RecommendDose<- nextBest(tdNextBest,
doselimit=max(data@doseGrid),
model=model,
data=data)
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Predicting the Survival on the Titanic"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h1('Plot Options'),
helpText("Change the class for which survival probabilities are shown in the histogram"),
radioButtons("Class", 'Class', choices=c('1st', '2nd', '3rd', 'Crew')),
h2('Prediction Options'),
helpText("Make a prediction for a passenger based on their class, sex and age"),
radioButtons("class", 'Class', choices=c('1st', '2nd', '3rd', 'Crew')),
radioButtons("Sex", 'Gender', choices=c('Male', 'Female')),
radioButtons("Age", 'Age', choices=c('Child', 'Adult'))
),
# Show a plot of the generated distribution
mainPanel(
p(paste0("To visualize the survival probabilities for passengers of different classes, change the class in the ",
"plot options in the sidebar. Make a prediction for whether a passenger would have survived or not, ",
"specify the passenger's class, sex and age in the prediction options.")),
plotOutput("barplot"),
h3(textOutput('prediction'))
)
)
))
|
/ui.R
|
no_license
|
Yiwen7735/DevelopingDataProductsWeek4
|
R
| false | false | 1,537 |
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Predicting the Survival on the Titanic"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h1('Plot Options'),
helpText("Change the class for which survival probabilities are shown in the histogram"),
radioButtons("Class", 'Class', choices=c('1st', '2nd', '3rd', 'Crew')),
h2('Prediction Options'),
helpText("Make a prediction for a passenger based on their class, sex and age"),
radioButtons("class", 'Class', choices=c('1st', '2nd', '3rd', 'Crew')),
radioButtons("Sex", 'Gender', choices=c('Male', 'Female')),
radioButtons("Age", 'Age', choices=c('Child', 'Adult'))
),
# Show a plot of the generated distribution
mainPanel(
p(paste0("To visualize the survival probabilities for passengers of different classes, change the class in the ",
"plot options in the sidebar. Make a prediction for whether a passenger would have survived or not, ",
"specify the passenger's class, sex and age in the prediction options.")),
plotOutput("barplot"),
h3(textOutput('prediction'))
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme_revi.R
\name{theme_revi}
\alias{theme_revi}
\title{Revi theme, based on Theme Ipsum RC by Hrbrthemes}
\usage{
theme_revi(border = TRUE, ...)
}
\arguments{
\item{border}{Indicates whether the panel border should be included on the plot}
\item{...}{params for theme_ipsum_rc}
}
\description{
Revi theme, based on Theme Ipsum RC by Hrbrthemes
}
|
/man/theme_revi.Rd
|
no_license
|
rorevillaca/ReviMisc
|
R
| false | true | 427 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme_revi.R
\name{theme_revi}
\alias{theme_revi}
\title{Revi theme, based on Theme Ipsum RC by Hrbrthemes}
\usage{
theme_revi(border = TRUE, ...)
}
\arguments{
\item{border}{Indicates whether the panel border should be included on the plot}
\item{...}{params for theme_ipsum_rc}
}
\description{
Revi theme, based on Theme Ipsum RC by Hrbrthemes
}
|
library('plot3D')
STEM2=read.table('STEM3.mouse')
pbmc_stem2=pbmc@data[which(rownames(pbmc@data) %in% STEM2[,3]),]
stem2_var=apply(pbmc_stem2,1,var)
stem2_var_gene=which(rank(-stem2_var,)<=200)
#pbmc_step2_sum=apply(pbmc_stem2[stem2_var_gene,],2,mean)
pbmc_step2_sum=apply(pbmc_stem2[stem2_var_gene,],2,median)
#pbmc_step2_sum=apply(pbmc_stem2,2,quantile,0.75)
pbmc_stem2_added=AddMetaData(object=pbmc,metadata=pbmc_step2_sum,col.name='STEM2')
#VlnPlot(object=pbmc_stem2_added,features.plot=c('STEM2'))
phi=0
theta=60
pdf('3D_PLOT_MEDIAN.pdf')
PCX=1
PCY=2
cor(pbmc_stem2_added@pca.rot[,PCX])
VlnPlot(object=pbmc_stem2_added,features.plot=c('STEM2'))
x=pbmc_stem2_added@pca.rot[,PCX]
y=pbmc_stem2_added@pca.rot[,PCY]
z=pbmc_step2_sum
scatter3D(x,y,z,pch=16,phi = 0, theta = -45)
scatter3D(x,y,z,pch=16,phi = 30, theta = -45)
scatter3D(x,y,z,pch=16,phi = 45, theta = -45)
scatter3D(x,y,z,pch=16,phi = 60, theta = -45)
scatter3D(x,y,z,pch=16,phi = 45, theta = 60)
scatter3D(x,y,z,pch=16,phi = 45, theta = 45)
scatter3D(x,y,z,pch=16,phi = 45, theta = 30)
scatter3D(x,y,z,pch=16,phi = 45, theta = 0)
scatter3D(x,y,z,pch=16,phi = 45, theta = -30)
scatter3D(x,y,z,pch=16,phi = 45, theta = -45)
scatter3D(x,y,z,pch=16,phi = 45, theta = -60)
scatter3D(x,y,z,pch=16,phi = 0, theta = 60)
scatter3D(x,y,z,pch=16,phi = 0, theta = 45)
scatter3D(x,y,z,pch=16,phi = 0, theta = 30)
scatter3D(x,y,z,pch=16,phi = 0, theta = 0)
scatter3D(x,y,z,pch=16,phi = 0, theta = -30)
scatter3D(x,y,z,pch=16,phi = 0, theta = -45)
scatter3D(x,y,z,pch=16,phi = 0, theta = -60)
i=0
while(i<=9){
COL=rep('grey90',length(pbmc_stem2_added@pca.rot[,1]))
cc=which(pbmc@ident==i)
COL[cc]='red'
scatter3D(x=pbmc_stem2_added@pca.rot[,PCX],y=pbmc_stem2_added@pca.rot[,PCY],z=pbmc_step2_sum,pch=16,phi = phi, theta = theta,col=COL,main=as.character(i))
i=i+1}
dev.off()
|
/Plot3D/go.R
|
no_license
|
jumphone/Bioinformatics
|
R
| false | false | 1,830 |
r
|
library('plot3D')
STEM2=read.table('STEM3.mouse')
pbmc_stem2=pbmc@data[which(rownames(pbmc@data) %in% STEM2[,3]),]
stem2_var=apply(pbmc_stem2,1,var)
stem2_var_gene=which(rank(-stem2_var,)<=200)
#pbmc_step2_sum=apply(pbmc_stem2[stem2_var_gene,],2,mean)
pbmc_step2_sum=apply(pbmc_stem2[stem2_var_gene,],2,median)
#pbmc_step2_sum=apply(pbmc_stem2,2,quantile,0.75)
pbmc_stem2_added=AddMetaData(object=pbmc,metadata=pbmc_step2_sum,col.name='STEM2')
#VlnPlot(object=pbmc_stem2_added,features.plot=c('STEM2'))
phi=0
theta=60
pdf('3D_PLOT_MEDIAN.pdf')
PCX=1
PCY=2
cor(pbmc_stem2_added@pca.rot[,PCX])
VlnPlot(object=pbmc_stem2_added,features.plot=c('STEM2'))
x=pbmc_stem2_added@pca.rot[,PCX]
y=pbmc_stem2_added@pca.rot[,PCY]
z=pbmc_step2_sum
scatter3D(x,y,z,pch=16,phi = 0, theta = -45)
scatter3D(x,y,z,pch=16,phi = 30, theta = -45)
scatter3D(x,y,z,pch=16,phi = 45, theta = -45)
scatter3D(x,y,z,pch=16,phi = 60, theta = -45)
scatter3D(x,y,z,pch=16,phi = 45, theta = 60)
scatter3D(x,y,z,pch=16,phi = 45, theta = 45)
scatter3D(x,y,z,pch=16,phi = 45, theta = 30)
scatter3D(x,y,z,pch=16,phi = 45, theta = 0)
scatter3D(x,y,z,pch=16,phi = 45, theta = -30)
scatter3D(x,y,z,pch=16,phi = 45, theta = -45)
scatter3D(x,y,z,pch=16,phi = 45, theta = -60)
scatter3D(x,y,z,pch=16,phi = 0, theta = 60)
scatter3D(x,y,z,pch=16,phi = 0, theta = 45)
scatter3D(x,y,z,pch=16,phi = 0, theta = 30)
scatter3D(x,y,z,pch=16,phi = 0, theta = 0)
scatter3D(x,y,z,pch=16,phi = 0, theta = -30)
scatter3D(x,y,z,pch=16,phi = 0, theta = -45)
scatter3D(x,y,z,pch=16,phi = 0, theta = -60)
i=0
while(i<=9){
COL=rep('grey90',length(pbmc_stem2_added@pca.rot[,1]))
cc=which(pbmc@ident==i)
COL[cc]='red'
scatter3D(x=pbmc_stem2_added@pca.rot[,PCX],y=pbmc_stem2_added@pca.rot[,PCY],z=pbmc_step2_sum,pch=16,phi = phi, theta = theta,col=COL,main=as.character(i))
i=i+1}
dev.off()
|
# Script for simple function that checks the difference in height from the sex-
# specific mean for each of the students in the given dataframe
# Date: 24.10.2017
# Author: Jann Goschenhofer
library(dplyr)
age = c(19, 22, 21, 23, 22, 20, 28, 25)
weight = c(50, 75, 80, 56, 75, 58, 65, 82)
height = c(1.66, 1.78, 1.90, 1.72, 1.83, 1.68, 1.70, 1.85)
sex = c("F", "M", "M", "F", "M", "F", "F", "M")
students = data.frame(cbind(age, weight, height, sex))
students = transform(students, age = as.numeric(as.character(age)))
students = transform(students, height = as.numeric(as.character(height)))
students = transform(students, weight = as.numeric(as.character(weight)))
students$name = c("Maria", "Franz", "Peter", "Lisa", "Hans", "Eva", "Mia", "Karl")
checkHeight3 = function(students.input = students){
#' calculate height difference of persons in a data.frame to
#' the average height of their gender
#' @param students.input ('data.frame') \cr
#' data.frame of students, needs columns height, sex and name
#' @return ('data.frame') \cr
#' returns data.frame with names and difference to average gender height
# create return dataframe
result.frame = data.frame(matrix(NA, nrow = nrow(students.input), ncol = 2))
colnames(result.frame) = c("name", "difference")
# calculate average height by gender
male.mean = students.input %>%
filter(sex == "M") %>%
summarise(mean = mean(height))
female.mean = students.input %>%
filter(sex == "F") %>%
summarise(mean = mean(height))
# calculate diff for each person
height.diff = apply(students.input, 1, function(person, avg_male, avg_female) {
getDiff(avg_male, avg_female, data.frame(t(person)))
}, "avg_male" = male.mean$mean, "avg_female" = female.mean$mean)
# fill return data.frame
result.frame$name = students.input$name
result.frame$difference = height.diff
return(result.frame)
}
getDiff = function(avg_male, avg_female, person) {
#' function to calculate difference of height of one person
#' to average height based on gender
#' @param avg_male ('float(1)')\cr
#' average male height
#' @param avg_female ('float(1)')\cr
#' average female height
#' @return ('float(1)')\cr
#' difference of persons height to average
mean_height = ifelse(person$sex == "F", avg_female, avg_male)
diff = (as.numeric(as.character(person$height)) - mean_height)*100
return(diff)
}
checkHeight3(students.input = students)
|
/checkHeight_script.R
|
no_license
|
dandls/heightchecker
|
R
| false | false | 2,466 |
r
|
# Script for simple function that checks the difference in height from the sex-
# specific mean for each of the students in the given dataframe
# Date: 24.10.2017
# Author: Jann Goschenhofer
library(dplyr)
age = c(19, 22, 21, 23, 22, 20, 28, 25)
weight = c(50, 75, 80, 56, 75, 58, 65, 82)
height = c(1.66, 1.78, 1.90, 1.72, 1.83, 1.68, 1.70, 1.85)
sex = c("F", "M", "M", "F", "M", "F", "F", "M")
students = data.frame(cbind(age, weight, height, sex))
students = transform(students, age = as.numeric(as.character(age)))
students = transform(students, height = as.numeric(as.character(height)))
students = transform(students, weight = as.numeric(as.character(weight)))
students$name = c("Maria", "Franz", "Peter", "Lisa", "Hans", "Eva", "Mia", "Karl")
checkHeight3 = function(students.input = students){
#' calculate height difference of persons in a data.frame to
#' the average height of their gender
#' @param students.input ('data.frame') \cr
#' data.frame of students, needs columns height, sex and name
#' @return ('data.frame') \cr
#' returns data.frame with names and difference to average gender height
# create return dataframe
result.frame = data.frame(matrix(NA, nrow = nrow(students.input), ncol = 2))
colnames(result.frame) = c("name", "difference")
# calculate average height by gender
male.mean = students.input %>%
filter(sex == "M") %>%
summarise(mean = mean(height))
female.mean = students.input %>%
filter(sex == "F") %>%
summarise(mean = mean(height))
# calculate diff for each person
height.diff = apply(students.input, 1, function(person, avg_male, avg_female) {
getDiff(avg_male, avg_female, data.frame(t(person)))
}, "avg_male" = male.mean$mean, "avg_female" = female.mean$mean)
# fill return data.frame
result.frame$name = students.input$name
result.frame$difference = height.diff
return(result.frame)
}
getDiff = function(avg_male, avg_female, person) {
#' function to calculate difference of height of one person
#' to average height based on gender
#' @param avg_male ('float(1)')\cr
#' average male height
#' @param avg_female ('float(1)')\cr
#' average female height
#' @return ('float(1)')\cr
#' difference of persons height to average
mean_height = ifelse(person$sex == "F", avg_female, avg_male)
diff = (as.numeric(as.character(person$height)) - mean_height)*100
return(diff)
}
checkHeight3(students.input = students)
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test BatchParsedFullNameIn")
model.instance <- BatchParsedFullNameIn$new()
test_that("personal_names", {
# tests for the property `personal_names` (ParsedFullNameIn)
# uncomment below to test the property
#expect_equal(model.instance$`personal_names`, "EXPECTED_RESULT")
})
|
/tests/testthat/test_batch_parsed_full_name_in.R
|
no_license
|
namsor/namsor-r-sdk2
|
R
| false | false | 414 |
r
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test BatchParsedFullNameIn")
model.instance <- BatchParsedFullNameIn$new()
test_that("personal_names", {
# tests for the property `personal_names` (ParsedFullNameIn)
# uncomment below to test the property
#expect_equal(model.instance$`personal_names`, "EXPECTED_RESULT")
})
|
# R script does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Style guide used:
# Hadley Wickam's R Style guide
# http://adv-r.had.co.nz/Style.html
#
library(data.table)
library(stringr)
library(dplyr)
library(tidyr)
# Import the activity labels from the current directory
#
activity_labels <- fread("./activity_labels.txt", stringsAsFactors = FALSE)
setnames(activity_labels, c("V1", "V2"), c("value", "label"))
# Import and clean the feature names by:
# removing non alpha-numeric characters
# replacing '-' with '_'
# converting names to lowercase
#
# Piping '%>%' used to simplify code
#
feature_names <- fread("./features.txt", stringsAsFactors = FALSE)
feature_names <- (
feature_names %>%
select(feature_name = V2) %>%
mutate(feature_name = str_replace_all(feature_name, "\\,", "")) %>%
mutate(feature_name = str_replace_all(feature_name, "\\-", "_")) %>%
mutate(feature_name = str_replace_all(feature_name, "\\(\\)", "")) %>%
mutate(feature_name = str_replace_all(feature_name, "\\(", "")) %>%
mutate(feature_name = str_replace_all(feature_name, "\\)", "")) %>%
mutate(feature_name = tolower(feature_name))
)
# Function to import the training or test datasets based on the subfolder
# of the curent directory and file naming scheme, and combine
# the subject and activity variables with the rest of the measurements,
# Apply the cleaned variable names to the datasets.
#
# The function enables code resuse.
#
importdata <- function(dataset) {
x <- data.table(read.table(paste("./", dataset, "/X_", dataset, ".txt", sep = "")))
setnames(x, colnames(x), feature_names[, feature_name])
y <- data.table(read.table(paste("./", dataset, "/y_", dataset, ".txt", sep = "")))
setnames(y, "V1", "activity")
subject <- data.table(read.table(paste("./", dataset, "/subject_", dataset, ".txt", sep = "")))
setnames(subject, "V1", "subject")
y_x <- cbind(y, x)
cbind(subject, y_x)
}
# Import the training and test datasets
#
subject_y_x_train <- importdata("train")
subject_y_x_test <- importdata("test")
# Combine the full training and test datasets and
# change the activity variable from integer to a factor
# with the appropriate activity label.
#
all_data <- (
rbind(subject_y_x_train, subject_y_x_test) %>%
mutate(all_data, activity = cut(activity, breaks = nrow(activity_labels), labels = activity_labels[, label]))
)
# Select only columns which contain mean or standard deviation
#
all_data <- cbind(select(all_data, c(subject, activity)), select(all_data, contains("mean")), select(all_data, contains("std")))
# Calculate the means of the retained variables
#
summary_data <- all_data[, lapply(.SD, mean, na.rm = TRUE), by = list(activity, subject)]
# Ourput the summary dataset for uploading
#
write.table(summary_data, file = "summary_data.txt", append = FALSE, sep = "\t", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
basilrormose/GettingAndCleaningDataProject
|
R
| false | false | 3,348 |
r
|
# R script does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Style guide used:
# Hadley Wickam's R Style guide
# http://adv-r.had.co.nz/Style.html
#
library(data.table)
library(stringr)
library(dplyr)
library(tidyr)
# Import the activity labels from the current directory
#
activity_labels <- fread("./activity_labels.txt", stringsAsFactors = FALSE)
setnames(activity_labels, c("V1", "V2"), c("value", "label"))
# Import and clean the feature names by:
# removing non alpha-numeric characters
# replacing '-' with '_'
# converting names to lowercase
#
# Piping '%>%' used to simplify code
#
feature_names <- fread("./features.txt", stringsAsFactors = FALSE)
feature_names <- (
feature_names %>%
select(feature_name = V2) %>%
mutate(feature_name = str_replace_all(feature_name, "\\,", "")) %>%
mutate(feature_name = str_replace_all(feature_name, "\\-", "_")) %>%
mutate(feature_name = str_replace_all(feature_name, "\\(\\)", "")) %>%
mutate(feature_name = str_replace_all(feature_name, "\\(", "")) %>%
mutate(feature_name = str_replace_all(feature_name, "\\)", "")) %>%
mutate(feature_name = tolower(feature_name))
)
# Function to import the training or test datasets based on the subfolder
# of the curent directory and file naming scheme, and combine
# the subject and activity variables with the rest of the measurements,
# Apply the cleaned variable names to the datasets.
#
# The function enables code resuse.
#
importdata <- function(dataset) {
x <- data.table(read.table(paste("./", dataset, "/X_", dataset, ".txt", sep = "")))
setnames(x, colnames(x), feature_names[, feature_name])
y <- data.table(read.table(paste("./", dataset, "/y_", dataset, ".txt", sep = "")))
setnames(y, "V1", "activity")
subject <- data.table(read.table(paste("./", dataset, "/subject_", dataset, ".txt", sep = "")))
setnames(subject, "V1", "subject")
y_x <- cbind(y, x)
cbind(subject, y_x)
}
# Import the training and test datasets
#
subject_y_x_train <- importdata("train")
subject_y_x_test <- importdata("test")
# Combine the full training and test datasets and
# change the activity variable from integer to a factor
# with the appropriate activity label.
#
all_data <- (
rbind(subject_y_x_train, subject_y_x_test) %>%
mutate(all_data, activity = cut(activity, breaks = nrow(activity_labels), labels = activity_labels[, label]))
)
# Select only columns which contain mean or standard deviation
#
all_data <- cbind(select(all_data, c(subject, activity)), select(all_data, contains("mean")), select(all_data, contains("std")))
# Calculate the means of the retained variables
#
summary_data <- all_data[, lapply(.SD, mean, na.rm = TRUE), by = list(activity, subject)]
# Ourput the summary dataset for uploading
#
write.table(summary_data, file = "summary_data.txt", append = FALSE, sep = "\t", row.names = FALSE)
|
## ----eval=FALSE----------------------------------------------------------
#
# drawPoleFunc <- function(fixedEnd.x,fixedEnd.y,poleLength, theta,
# fillColour=NA, borderColour="black"){
# floatingEnd.x <- fixedEnd.x-poleLength * sin(theta)
# floatingEnd.y <- fixedEnd.y+poleLength * cos(theta)
#
# polygon(c(fixedEnd.x,floatingEnd.x,floatingEnd.x,fixedEnd.x),
# c(fixedEnd.y,floatingEnd.y,floatingEnd.y,fixedEnd.y),
# col = fillColour, border=borderColour)
# }
#
# drawPendulum <- function(fixedEnd.x,fixedEnd.y,poleLength, theta,
# radius,fillColour=NA, borderColour="black"){
# floatingEnd.x <- fixedEnd.x-poleLength * sin(theta)
# floatingEnd.y <- fixedEnd.y+poleLength * cos(theta)
# createCircleFunc(floatingEnd.x,floatingEnd.y,radius,fillColour,borderColour)
# }
#
# #Parameters to control the simulation
# simulation.timestep = 0.005
# simulation.gravity = 9.8 #meters per second^2
# simulation.numoftimesteps = 2000
#
# pole.length = 1 #meters, total pole length
# pole.width = 0.2
# pole.theta = pi
# pole.thetaDot = 0
# pole.thetaDotDot = 0
# pole.colour = "purple"
#
#
# pendulum.centerX = NA
# pendulum.centerY = NA
# pendulum.radius = 0.1
# pendulum.mass = 0.1
# pendulum.colour = "purple"
#
# cart.width=0.5
# cart.centerX = 0
# cart.centerY = 0
# cart.height=0.2
# cart.colour="red"
# cart.centerXDot = 0
# cart.centerXDotDot = 0
# cart.mass = 0.4
# cart.force = 0
# cart.mu=2
#
#
# track.limit= 10 #meters from center
# track.x = -track.limit
# track.height=0.01
# track.y = 0.5*track.height
# track.colour = "blue"
#
# leftBuffer.width=0.1
# leftBuffer.height=0.2
# leftBuffer.x=-track.limit-0.5*cart.width-leftBuffer.width
# leftBuffer.y=0.5*leftBuffer.height
# leftBuffer.colour = "blue"
#
# rightBuffer.width=0.1
# rightBuffer.height=0.2
# rightBuffer.x=track.limit+0.5*cart.width
# rightBuffer.y=0.5*rightBuffer.height
# rightBuffer.colour = "blue"
#
# #Define the size of the scene (used to visualise what is happening in the simulation)
# scene.width = 2*max(rightBuffer.x+rightBuffer.width,track.limit+pole.length+pendulum.radius)
# scene.bottomLeftX = -0.5*scene.width
# scene.height=max(pole.length+pendulum.radius,scene.width)
# scene.bottomLeftY = -0.5*scene.height
#
# poleBalance.InitialState <- function(){
# state <- list()
# state[1] <- cart.centerX
# state[2] <- cart.centerXDot
# state[3] <- cart.centerXDotDot
# state[4] <- cart.force
# state[5] <- pole.theta
# state[6] <- pole.thetaDot
# state[7] <- pole.thetaDotDot
# return(state)
# }
#
# poleBalance.ConvertStateToNeuralNetInputs <- function(currentState){
# return (currentState)
# }
#
# poleBalance.UpdatePoleState <- function(currentState,neuralNetOutputs){
# #print("Updating pole state")
# #print(neuralNetOutputs)
# cart.centerX <- currentState[[1]]
# cart.centerXDot <- currentState[[2]]
# cart.centerXDotDot <- currentState[[3]]
# cart.force <- currentState[[4]]+neuralNetOutputs[[1]]
# pole.theta <- currentState[[5]]
# pole.thetaDot <- currentState[[6]]
# pole.thetaDotDot <- currentState[[7]]
#
# costheta = cos(pole.theta)
# sintheta = sin(pole.theta)
# totalmass = cart.mass+pendulum.mass
# masslength = pendulum.mass*pole.length
#
# pole.thetaDotDot = (simulation.gravity*totalmass*sintheta+costheta*
# (cart.force-masslength*pole.thetaDot^2*sintheta-cart.mu*cart.centerXDot))/
# (pole.length*(totalmass-pendulum.mass*costheta^2))
#
# cart.centerXDotDot =(cart.force+masslength*(pole.thetaDotDot*costheta-pole.thetaDot^2*sintheta)-
# cart.mu*cart.centerXDot)/totalmass
#
# cart.centerX = cart.centerX+simulation.timestep*cart.centerXDot
# cart.centerXDot = cart.centerXDot+simulation.timestep*cart.centerXDotDot
# pole.theta = (pole.theta +simulation.timestep*pole.thetaDot )
# pole.thetaDot = pole.thetaDot+simulation.timestep*pole.thetaDotDot
#
# currentState[1] <- cart.centerX
# currentState[2] <- cart.centerXDot
# currentState[3] <- cart.centerXDotDot
# currentState[4] <- cart.force
# currentState[5] <- pole.theta
# currentState[6] <- pole.thetaDot
# currentState[7] <- pole.thetaDotDot
# return (currentState)
# }
#
#
#
# poleBalance.UpdateFitness <- function(oldState,updatedState,oldFitness){
# #return (oldFitness+1) #fitness is just how long we've ran for
# #return (oldFitness+((track.limit-abs(updatedState[[1]]))/track.limit)^2)
# #More reward for staying near middle of track
#
# height <- cos(updatedState[[5]]) #is -ve if below track
# heightFitness <- max(height,0)
# centerFitness <- (track.limit-abs(updatedState[[1]]))/track.limit
# return (oldFitness+(heightFitness + heightFitness*centerFitness))
# }
#
# poleBalance.CheckForTermination <- function(frameNum,oldState,updatedState,oldFitness,newFitness){
# cart.centerX <- updatedState[[1]]
# cart.centerXDot <- updatedState[[2]]
# cart.centerXDotDot <- updatedState[[3]]
# cart.force <- updatedState[[4]]
# pole.theta <- updatedState[[5]]
# pole.thetaDot <- updatedState[[6]]
# pole.thetaDotDot <- updatedState[[7]]
#
# oldpole.theta <- oldState[[5]]
# if(frameNum > 20000){
# print("Max Frame Num Exceeded , stopping simulation")
# return (TRUE)
# }
#
# height <- cos(pole.theta)
# oldHeight <- cos(oldpole.theta)
# if(height==-1 & cart.force==0){
# return(TRUE)
# }
#
# if(oldHeight >= 0 & height < 0){
# #print("Pole fell over")
# return (TRUE)
# }
# if(cart.centerX < track.x | cart.centerX > (track.x+2*track.limit)){
# #print("Exceeded track length")
# return (TRUE)
# } else {
# return (FALSE)
# }
# }
#
# poleBalance.PlotState <-function(updatedState){
# cart.centerX <- updatedState[[1]]
# cart.centerXDot <- updatedState[[2]]
# cart.centerXDotDot <- updatedState[[3]]
# cart.force <- updatedState[[4]]
# pole.theta <- updatedState[[5]]
# pole.thetaDot <- updatedState[[6]]
# pole.thetaDotDot <- updatedState[[7]]
#
# createSceneFunc(scene.bottomLeftX,scene.bottomLeftY,scene.width,scene.height,
# main="Simulation of Inverted Pendulum - www.gekkoquant.com",xlab="",
# ylab="",xlim=c(-0.5*scene.width,0.5*scene.width),
# ylim=c(-0.5*scene.height,0.5*scene.height))
#
# createBoxFunc(track.x,track.y,track.limit*2,track.height,track.colour)
# createBoxFunc(leftBuffer.x,leftBuffer.y,leftBuffer.width,leftBuffer.height,leftBuffer.colour)
# createBoxFunc(rightBuffer.x,rightBuffer.y,rightBuffer.width,
# rightBuffer.height,rightBuffer.colour)
# createBoxFunc(cart.centerX-0.5*cart.width,cart.centerY+0.5*cart.height,cart.width,cart.height,
# cart.colour)
# drawPoleFunc(cart.centerX,cart.centerY,2*pole.length,pole.theta,pole.colour)
# drawPendulum(cart.centerX,cart.centerY,2*pole.length,pole.theta,pendulum.radius,pendulum.colour)
#
# }
#
# config <- newConfigNEAT(7,1,500,50)
# poleSimulation <- newNEATSimulation(config, poleBalance.InitialState,
# poleBalance.UpdatePoleState,
# poleBalance.ConvertStateToNeuralNetInputs,
# poleBalance.UpdateFitness,
# poleBalance.CheckForTermination,
# poleBalance.PlotState)
#
# nMax <- 1 #Number of generations to run
# for(i in seq(1,nMax)){
# poleSimulation <- NEATSimulation.RunSingleGeneration(poleSimulation)
# #poleSimulation <- NEATSimulation.RunSingleGeneration(poleSimulation,T,"videos",
# # "poleBalance",1/simulation.timestep)
# }
#
#
#
#
#
|
/vignettes/rneat-vignette.R
|
no_license
|
ah-box/RNeat
|
R
| false | false | 7,983 |
r
|
## ----eval=FALSE----------------------------------------------------------
#
# drawPoleFunc <- function(fixedEnd.x,fixedEnd.y,poleLength, theta,
# fillColour=NA, borderColour="black"){
# floatingEnd.x <- fixedEnd.x-poleLength * sin(theta)
# floatingEnd.y <- fixedEnd.y+poleLength * cos(theta)
#
# polygon(c(fixedEnd.x,floatingEnd.x,floatingEnd.x,fixedEnd.x),
# c(fixedEnd.y,floatingEnd.y,floatingEnd.y,fixedEnd.y),
# col = fillColour, border=borderColour)
# }
#
# drawPendulum <- function(fixedEnd.x,fixedEnd.y,poleLength, theta,
# radius,fillColour=NA, borderColour="black"){
# floatingEnd.x <- fixedEnd.x-poleLength * sin(theta)
# floatingEnd.y <- fixedEnd.y+poleLength * cos(theta)
# createCircleFunc(floatingEnd.x,floatingEnd.y,radius,fillColour,borderColour)
# }
#
# #Parameters to control the simulation
# simulation.timestep = 0.005
# simulation.gravity = 9.8 #meters per second^2
# simulation.numoftimesteps = 2000
#
# pole.length = 1 #meters, total pole length
# pole.width = 0.2
# pole.theta = pi
# pole.thetaDot = 0
# pole.thetaDotDot = 0
# pole.colour = "purple"
#
#
# pendulum.centerX = NA
# pendulum.centerY = NA
# pendulum.radius = 0.1
# pendulum.mass = 0.1
# pendulum.colour = "purple"
#
# cart.width=0.5
# cart.centerX = 0
# cart.centerY = 0
# cart.height=0.2
# cart.colour="red"
# cart.centerXDot = 0
# cart.centerXDotDot = 0
# cart.mass = 0.4
# cart.force = 0
# cart.mu=2
#
#
# track.limit= 10 #meters from center
# track.x = -track.limit
# track.height=0.01
# track.y = 0.5*track.height
# track.colour = "blue"
#
# leftBuffer.width=0.1
# leftBuffer.height=0.2
# leftBuffer.x=-track.limit-0.5*cart.width-leftBuffer.width
# leftBuffer.y=0.5*leftBuffer.height
# leftBuffer.colour = "blue"
#
# rightBuffer.width=0.1
# rightBuffer.height=0.2
# rightBuffer.x=track.limit+0.5*cart.width
# rightBuffer.y=0.5*rightBuffer.height
# rightBuffer.colour = "blue"
#
# #Define the size of the scene (used to visualise what is happening in the simulation)
# scene.width = 2*max(rightBuffer.x+rightBuffer.width,track.limit+pole.length+pendulum.radius)
# scene.bottomLeftX = -0.5*scene.width
# scene.height=max(pole.length+pendulum.radius,scene.width)
# scene.bottomLeftY = -0.5*scene.height
#
# poleBalance.InitialState <- function(){
# state <- list()
# state[1] <- cart.centerX
# state[2] <- cart.centerXDot
# state[3] <- cart.centerXDotDot
# state[4] <- cart.force
# state[5] <- pole.theta
# state[6] <- pole.thetaDot
# state[7] <- pole.thetaDotDot
# return(state)
# }
#
# poleBalance.ConvertStateToNeuralNetInputs <- function(currentState){
# return (currentState)
# }
#
# poleBalance.UpdatePoleState <- function(currentState,neuralNetOutputs){
# #print("Updating pole state")
# #print(neuralNetOutputs)
# cart.centerX <- currentState[[1]]
# cart.centerXDot <- currentState[[2]]
# cart.centerXDotDot <- currentState[[3]]
# cart.force <- currentState[[4]]+neuralNetOutputs[[1]]
# pole.theta <- currentState[[5]]
# pole.thetaDot <- currentState[[6]]
# pole.thetaDotDot <- currentState[[7]]
#
# costheta = cos(pole.theta)
# sintheta = sin(pole.theta)
# totalmass = cart.mass+pendulum.mass
# masslength = pendulum.mass*pole.length
#
# pole.thetaDotDot = (simulation.gravity*totalmass*sintheta+costheta*
# (cart.force-masslength*pole.thetaDot^2*sintheta-cart.mu*cart.centerXDot))/
# (pole.length*(totalmass-pendulum.mass*costheta^2))
#
# cart.centerXDotDot =(cart.force+masslength*(pole.thetaDotDot*costheta-pole.thetaDot^2*sintheta)-
# cart.mu*cart.centerXDot)/totalmass
#
# cart.centerX = cart.centerX+simulation.timestep*cart.centerXDot
# cart.centerXDot = cart.centerXDot+simulation.timestep*cart.centerXDotDot
# pole.theta = (pole.theta +simulation.timestep*pole.thetaDot )
# pole.thetaDot = pole.thetaDot+simulation.timestep*pole.thetaDotDot
#
# currentState[1] <- cart.centerX
# currentState[2] <- cart.centerXDot
# currentState[3] <- cart.centerXDotDot
# currentState[4] <- cart.force
# currentState[5] <- pole.theta
# currentState[6] <- pole.thetaDot
# currentState[7] <- pole.thetaDotDot
# return (currentState)
# }
#
#
#
# poleBalance.UpdateFitness <- function(oldState,updatedState,oldFitness){
# #return (oldFitness+1) #fitness is just how long we've ran for
# #return (oldFitness+((track.limit-abs(updatedState[[1]]))/track.limit)^2)
# #More reward for staying near middle of track
#
# height <- cos(updatedState[[5]]) #is -ve if below track
# heightFitness <- max(height,0)
# centerFitness <- (track.limit-abs(updatedState[[1]]))/track.limit
# return (oldFitness+(heightFitness + heightFitness*centerFitness))
# }
#
# poleBalance.CheckForTermination <- function(frameNum,oldState,updatedState,oldFitness,newFitness){
# cart.centerX <- updatedState[[1]]
# cart.centerXDot <- updatedState[[2]]
# cart.centerXDotDot <- updatedState[[3]]
# cart.force <- updatedState[[4]]
# pole.theta <- updatedState[[5]]
# pole.thetaDot <- updatedState[[6]]
# pole.thetaDotDot <- updatedState[[7]]
#
# oldpole.theta <- oldState[[5]]
# if(frameNum > 20000){
# print("Max Frame Num Exceeded , stopping simulation")
# return (TRUE)
# }
#
# height <- cos(pole.theta)
# oldHeight <- cos(oldpole.theta)
# if(height==-1 & cart.force==0){
# return(TRUE)
# }
#
# if(oldHeight >= 0 & height < 0){
# #print("Pole fell over")
# return (TRUE)
# }
# if(cart.centerX < track.x | cart.centerX > (track.x+2*track.limit)){
# #print("Exceeded track length")
# return (TRUE)
# } else {
# return (FALSE)
# }
# }
#
# poleBalance.PlotState <-function(updatedState){
# cart.centerX <- updatedState[[1]]
# cart.centerXDot <- updatedState[[2]]
# cart.centerXDotDot <- updatedState[[3]]
# cart.force <- updatedState[[4]]
# pole.theta <- updatedState[[5]]
# pole.thetaDot <- updatedState[[6]]
# pole.thetaDotDot <- updatedState[[7]]
#
# createSceneFunc(scene.bottomLeftX,scene.bottomLeftY,scene.width,scene.height,
# main="Simulation of Inverted Pendulum - www.gekkoquant.com",xlab="",
# ylab="",xlim=c(-0.5*scene.width,0.5*scene.width),
# ylim=c(-0.5*scene.height,0.5*scene.height))
#
# createBoxFunc(track.x,track.y,track.limit*2,track.height,track.colour)
# createBoxFunc(leftBuffer.x,leftBuffer.y,leftBuffer.width,leftBuffer.height,leftBuffer.colour)
# createBoxFunc(rightBuffer.x,rightBuffer.y,rightBuffer.width,
# rightBuffer.height,rightBuffer.colour)
# createBoxFunc(cart.centerX-0.5*cart.width,cart.centerY+0.5*cart.height,cart.width,cart.height,
# cart.colour)
# drawPoleFunc(cart.centerX,cart.centerY,2*pole.length,pole.theta,pole.colour)
# drawPendulum(cart.centerX,cart.centerY,2*pole.length,pole.theta,pendulum.radius,pendulum.colour)
#
# }
#
# config <- newConfigNEAT(7,1,500,50)
# poleSimulation <- newNEATSimulation(config, poleBalance.InitialState,
# poleBalance.UpdatePoleState,
# poleBalance.ConvertStateToNeuralNetInputs,
# poleBalance.UpdateFitness,
# poleBalance.CheckForTermination,
# poleBalance.PlotState)
#
# nMax <- 1 #Number of generations to run
# for(i in seq(1,nMax)){
# poleSimulation <- NEATSimulation.RunSingleGeneration(poleSimulation)
# #poleSimulation <- NEATSimulation.RunSingleGeneration(poleSimulation,T,"videos",
# # "poleBalance",1/simulation.timestep)
# }
#
#
#
#
#
|
library("JointAI")
# check_formula_list -----------------------------------------------------------
test_that("check_random_lvls", {
expect_null(check_random_lvls(random = NULL, rel_lvls = NULL))
expect_null(check_random_lvls(random = ~ time | id, rel_lvls = NULL))
expect_equal(check_random_lvls(random = NULL, rel_lvls = c("id", "center")),
list(id = ~ 1, center = ~ 1), ignore_attr = TRUE
)
expect_equal(
check_random_lvls(random = ~1 | center, rel_lvls = c("id", "center")),
list(center = ~ 1), ignore_attr = TRUE
)
expect_error(
check_random_lvls(random = ~ (1 | center) + (1 | hospital),
rel_lvls = c("id", "center"))
)
})
test_that("get_rds_lp", {
# no random slope
expect_equal(get_rds_lp(rd_slope_coefs = NULL,
index = NULL, lvl = NULL,
parname = NULL),
list()
)
# random slope with corresponding fixed effect
expect_equal(
get_rds_lp(rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = 1),
parname = "beta", index = NULL, lvl = NULL),
list(time = c("beta[1]")))
# random slope without fixed effect
expect_equal(
get_rds_lp(rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = NA),
parname = "beta", index = NULL, lvl = NULL),
list(time = c("0")))
# multiple random slopes
expect_equal(
get_rds_lp(rd_slope_coefs =
data.frame(rd_effect = c("time", "time2"),
parelmts = c(3, 6)),
parname = "beta", index = NULL, lvl = NULL),
list(time = "beta[3]", time2 = "beta[6]"))
# multiple random slopes, some with, some without fixed effects
expect_equal(
get_rds_lp(rd_slope_coefs =
data.frame(rd_effect = c("time", "time2"),
parelmts = c(3, NA)),
parname = "beta", index = NULL, lvl = NULL),
list(time = "beta[3]", time2 = "0"))
# random slope with corresponding fixed effect & interaction
expect_equal(
get_rds_lp(rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = 1),
rd_slope_interact_coefs = data.frame(rd_effect = "time",
matrix = "M_id",
parelmts = 2,
cols = 3),
lvl = "id", parname = "beta", index = c(id = "i")),
list(time = "beta[1] + M_id[i, 3] * beta[2]")
)
# interaction with multiple variables
expect_equal(
get_rds_lp(rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = 1),
rd_slope_interact_coefs = data.frame(rd_effect = "time",
matrix = "M_id",
parelmts = c(2, 3),
cols = c(3, 4)),
lvl = "id", parname = "beta", index = c(id = "i")),
list(time = "beta[1] + M_id[i, 3] * beta[2] + M_id[i, 4] * beta[3]")
)
# combination of random slopes with/out corresponding fixed effect &
# (no) interaction
expect_equal(
get_rds_lp(
rd_slope_coefs = data.frame(rd_effect = c("time", "time2", "time3"),
parelmts = c(1, 2, NA)),
rd_slope_interact_coefs = data.frame(rd_effect = "time",
matrix = "M_id",
parelmts = c(4, 5),
cols = c(3, 4)),
lvl = "id", parname = "beta", index = c(id = "i")),
list(time = "beta[1] + M_id[i, 3] * beta[4] + M_id[i, 4] * beta[5]",
time2 = "beta[2]",
time3 = "0")
)
expect_equal(
get_rds_lp(
rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = NA),
rd_slope_interact_coefs = data.frame(rd_effect = "time",
matrix = "M_id",
parelmts = NA,
cols = 2),
lvl = "id", parname = "beta", index = c(id = "i")),
list(time = "0")
)
})
# hc_rdslope_info(list(time = list(main = c(M_lvlone = 3),
# interact = NULL)),
# parelmts = list(M_id = c("(Intercept)" = 1,
# C1 = 2),
# M_lvlone = c(b21 = 3,
# time = 4)
# )
# )
|
/tests/testthat/test-helpfunctions_ranefs.R
|
no_license
|
NErler/JointAI
|
R
| false | false | 4,796 |
r
|
library("JointAI")
# check_formula_list -----------------------------------------------------------
test_that("check_random_lvls", {
expect_null(check_random_lvls(random = NULL, rel_lvls = NULL))
expect_null(check_random_lvls(random = ~ time | id, rel_lvls = NULL))
expect_equal(check_random_lvls(random = NULL, rel_lvls = c("id", "center")),
list(id = ~ 1, center = ~ 1), ignore_attr = TRUE
)
expect_equal(
check_random_lvls(random = ~1 | center, rel_lvls = c("id", "center")),
list(center = ~ 1), ignore_attr = TRUE
)
expect_error(
check_random_lvls(random = ~ (1 | center) + (1 | hospital),
rel_lvls = c("id", "center"))
)
})
test_that("get_rds_lp", {
# no random slope
expect_equal(get_rds_lp(rd_slope_coefs = NULL,
index = NULL, lvl = NULL,
parname = NULL),
list()
)
# random slope with corresponding fixed effect
expect_equal(
get_rds_lp(rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = 1),
parname = "beta", index = NULL, lvl = NULL),
list(time = c("beta[1]")))
# random slope without fixed effect
expect_equal(
get_rds_lp(rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = NA),
parname = "beta", index = NULL, lvl = NULL),
list(time = c("0")))
# multiple random slopes
expect_equal(
get_rds_lp(rd_slope_coefs =
data.frame(rd_effect = c("time", "time2"),
parelmts = c(3, 6)),
parname = "beta", index = NULL, lvl = NULL),
list(time = "beta[3]", time2 = "beta[6]"))
# multiple random slopes, some with, some without fixed effects
expect_equal(
get_rds_lp(rd_slope_coefs =
data.frame(rd_effect = c("time", "time2"),
parelmts = c(3, NA)),
parname = "beta", index = NULL, lvl = NULL),
list(time = "beta[3]", time2 = "0"))
# random slope with corresponding fixed effect & interaction
expect_equal(
get_rds_lp(rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = 1),
rd_slope_interact_coefs = data.frame(rd_effect = "time",
matrix = "M_id",
parelmts = 2,
cols = 3),
lvl = "id", parname = "beta", index = c(id = "i")),
list(time = "beta[1] + M_id[i, 3] * beta[2]")
)
# interaction with multiple variables
expect_equal(
get_rds_lp(rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = 1),
rd_slope_interact_coefs = data.frame(rd_effect = "time",
matrix = "M_id",
parelmts = c(2, 3),
cols = c(3, 4)),
lvl = "id", parname = "beta", index = c(id = "i")),
list(time = "beta[1] + M_id[i, 3] * beta[2] + M_id[i, 4] * beta[3]")
)
# combination of random slopes with/out corresponding fixed effect &
# (no) interaction
expect_equal(
get_rds_lp(
rd_slope_coefs = data.frame(rd_effect = c("time", "time2", "time3"),
parelmts = c(1, 2, NA)),
rd_slope_interact_coefs = data.frame(rd_effect = "time",
matrix = "M_id",
parelmts = c(4, 5),
cols = c(3, 4)),
lvl = "id", parname = "beta", index = c(id = "i")),
list(time = "beta[1] + M_id[i, 3] * beta[4] + M_id[i, 4] * beta[5]",
time2 = "beta[2]",
time3 = "0")
)
expect_equal(
get_rds_lp(
rd_slope_coefs = data.frame(rd_effect = "time",
parelmts = NA),
rd_slope_interact_coefs = data.frame(rd_effect = "time",
matrix = "M_id",
parelmts = NA,
cols = 2),
lvl = "id", parname = "beta", index = c(id = "i")),
list(time = "0")
)
})
# hc_rdslope_info(list(time = list(main = c(M_lvlone = 3),
# interact = NULL)),
# parelmts = list(M_id = c("(Intercept)" = 1,
# C1 = 2),
# M_lvlone = c(b21 = 3,
# time = 4)
# )
# )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_gameteFunctions.R
\name{sim_gameteFormation}
\alias{sim_gameteFormation}
\title{Simulate formation of gametes.}
\usage{
sim_gameteFormation(chrom_map, allele_IDs, burn_in = 1000,
gamma_params = c(2.63, 2.63/0.5))
}
\arguments{
\item{chrom_map}{Data.frame. A data.frame consisting of three columns: column 1 contains the chromosome numbers, column 2 start postion of chromosome (in cM), column 3 end position of chromosome (in cM).}
\item{allele_IDs}{List of length 2. The identification numbers for the respective paternal and maternal alleles of the individual for whom we wish to simulate recombination. (Can accomodate numeric or string entries)}
\item{burn_in}{Numeric. The "burn-in" distance in centiMorgan, as defined by Voorrips and Maliepaard (2012), which is required before simulating the location of the first chiasmata with interference. By default, \code{burn_in = 1000}.
The burn in distance in cM. By default, \code{burn_in = 1000}.}
\item{gamma_params}{Numeric list of length 2. The respective shape and rate parameters of the gamma distribution used to simulate distance between chiasmata. By default, \code{gamma} \code{_params} \code{= c(2.63, 2*2.63)}, as discussed in Voorrips and Maliepaard (2012).}
}
\value{
A list containing the following:
\code{chrom_haps} A list of dataframes, each dataframe is a set of four recombined haplotypes for a single chromosome (in the order specified in \code{chrom_map}), each with a gamete group identifier column.
\code{gamete_group} A list of lists, each list contains the crossover positions for a single chromosome (in the order specified in \code{chrom_map}).
}
\description{
Simulate formation of gametes. \strong{This function will likely become an internal function}.
}
\keyword{internal}
|
/man/sim_gameteFormation.Rd
|
no_license
|
cran/SimRVSequences
|
R
| false | true | 1,876 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_gameteFunctions.R
\name{sim_gameteFormation}
\alias{sim_gameteFormation}
\title{Simulate formation of gametes.}
\usage{
sim_gameteFormation(chrom_map, allele_IDs, burn_in = 1000,
gamma_params = c(2.63, 2.63/0.5))
}
\arguments{
\item{chrom_map}{Data.frame. A data.frame consisting of three columns: column 1 contains the chromosome numbers, column 2 start postion of chromosome (in cM), column 3 end position of chromosome (in cM).}
\item{allele_IDs}{List of length 2. The identification numbers for the respective paternal and maternal alleles of the individual for whom we wish to simulate recombination. (Can accomodate numeric or string entries)}
\item{burn_in}{Numeric. The "burn-in" distance in centiMorgan, as defined by Voorrips and Maliepaard (2012), which is required before simulating the location of the first chiasmata with interference. By default, \code{burn_in = 1000}.
The burn in distance in cM. By default, \code{burn_in = 1000}.}
\item{gamma_params}{Numeric list of length 2. The respective shape and rate parameters of the gamma distribution used to simulate distance between chiasmata. By default, \code{gamma} \code{_params} \code{= c(2.63, 2*2.63)}, as discussed in Voorrips and Maliepaard (2012).}
}
\value{
A list containing the following:
\code{chrom_haps} A list of dataframes, each dataframe is a set of four recombined haplotypes for a single chromosome (in the order specified in \code{chrom_map}), each with a gamete group identifier column.
\code{gamete_group} A list of lists, each list contains the crossover positions for a single chromosome (in the order specified in \code{chrom_map}).
}
\description{
Simulate formation of gametes. \strong{This function will likely become an internal function}.
}
\keyword{internal}
|
library(mallet)
#turning the user_id into a character
my_data$owner_id <- as.character(my_data$owner_id)
documents <- data.frame(text = my_data$caption,
id = make.unique(my_data$owner_id),
class = my_data$taken_at_timestamp,
stringsAsFactors=FALSE)
mallet.instances <- mallet.import(documents$id, documents$text, "en.txt", token.regexp = "\\p{L}[\\p{L}\\p{P}]+\\p{L}")
## the stopwords file isi added humanbone and humanskull to the stopwords since those were original search terms
## and because skull, skulls, human are extremely prominent after so doing, we add those in too.
## the stopwords list is that which comes bundeled with MALLET, http://mallet.cs.umass.edu/
## Create a topic trainer object.
n.topics <- 25
topic.model <- MalletLDA(n.topics)
## Load our documents. We could also pass in the filename of a
## saved instance list file that we build from the command-line tools.
topic.model$loadDocuments(mallet.instances)
## Get the vocabulary, and some statistics about word frequencies.
## These may be useful in further curating the stopword list.
vocabulary <- topic.model$getVocabulary()
word.freqs <- mallet.word.freqs(topic.model)
## Optimize hyperparameters every 20 iterations,
## after 50 burn-in iterations.
topic.model$setAlphaOptimization(20, 50)
## Now train a model. Note that hyperparameter optimization is on, by default.
## We can specify the number of iterations. Here we'll use a large-ish round number.
topic.model$train(1000)
## NEW: run through a few iterations where we pick the best topic for each token,
## rather than sampling from the posterior distribution.
topic.model$maximize(10)
## Get the probability of topics in documents and the probability of words in topics.
## By default, these functions return raw word counts. Here we want probabilities,
## so we normalize, and add "smoothing" so that nothing has exactly 0 probability.
doc.topics <- mallet.doc.topics(topic.model, smoothed=T, normalized=T)
topic.words <- mallet.topic.words(topic.model, smoothed=T, normalized=T)
# from http://www.cs.princeton.edu/~mimno/R/clustertrees.R
## transpose and normalize the doc topics
topic.docs <- t(doc.topics)
topic.docs <- topic.docs / rowSums(topic.docs)
write.csv(doc.topics, file = "GIVE-A-USEFUL-NAME-HERE.csv")
## Get a vector containing short names for the topics
topics.labels <- rep("", n.topics)
for (topic in 1:n.topics) topics.labels[topic] <- paste(mallet.top.words(topic.model, topic.words[topic,], num.top.words=10)$words, collapse=" ")
# have a look at keywords for each topic
topics.labels
# this is where you might decide that some word is appearing too frequently - an artefact of your collections strategy, perhaps, so you'd add it to your stop list and start again.
|
/getting-started/topic-model.r
|
no_license
|
bonetrade/r-scripts
|
R
| false | false | 2,822 |
r
|
library(mallet)
#turning the user_id into a character
my_data$owner_id <- as.character(my_data$owner_id)
documents <- data.frame(text = my_data$caption,
id = make.unique(my_data$owner_id),
class = my_data$taken_at_timestamp,
stringsAsFactors=FALSE)
mallet.instances <- mallet.import(documents$id, documents$text, "en.txt", token.regexp = "\\p{L}[\\p{L}\\p{P}]+\\p{L}")
## the stopwords file isi added humanbone and humanskull to the stopwords since those were original search terms
## and because skull, skulls, human are extremely prominent after so doing, we add those in too.
## the stopwords list is that which comes bundeled with MALLET, http://mallet.cs.umass.edu/
## Create a topic trainer object.
n.topics <- 25
topic.model <- MalletLDA(n.topics)
## Load our documents. We could also pass in the filename of a
## saved instance list file that we build from the command-line tools.
topic.model$loadDocuments(mallet.instances)
## Get the vocabulary, and some statistics about word frequencies.
## These may be useful in further curating the stopword list.
vocabulary <- topic.model$getVocabulary()
word.freqs <- mallet.word.freqs(topic.model)
## Optimize hyperparameters every 20 iterations,
## after 50 burn-in iterations.
topic.model$setAlphaOptimization(20, 50)
## Now train a model. Note that hyperparameter optimization is on, by default.
## We can specify the number of iterations. Here we'll use a large-ish round number.
topic.model$train(1000)
## NEW: run through a few iterations where we pick the best topic for each token,
## rather than sampling from the posterior distribution.
topic.model$maximize(10)
## Get the probability of topics in documents and the probability of words in topics.
## By default, these functions return raw word counts. Here we want probabilities,
## so we normalize, and add "smoothing" so that nothing has exactly 0 probability.
doc.topics <- mallet.doc.topics(topic.model, smoothed=T, normalized=T)
topic.words <- mallet.topic.words(topic.model, smoothed=T, normalized=T)
# from http://www.cs.princeton.edu/~mimno/R/clustertrees.R
## transpose and normalize the doc topics
topic.docs <- t(doc.topics)
topic.docs <- topic.docs / rowSums(topic.docs)
write.csv(doc.topics, file = "GIVE-A-USEFUL-NAME-HERE.csv")
## Get a vector containing short names for the topics
topics.labels <- rep("", n.topics)
for (topic in 1:n.topics) topics.labels[topic] <- paste(mallet.top.words(topic.model, topic.words[topic,], num.top.words=10)$words, collapse=" ")
# have a look at keywords for each topic
topics.labels
# this is where you might decide that some word is appearing too frequently - an artefact of your collections strategy, perhaps, so you'd add it to your stop list and start again.
|
library(shiny)
library(V8)
library(sodium)
library(openssl)
library(rJava) #For sending an email from R
library(mailR) #For sending an email from R
library(DBI)
library(pool)
library(RSQLite)
#Database
sqlite_path = "www/sqlite/users"
pool <- dbPool(drv = RSQLite::SQLite(), dbname=sqlite_path)
onStop(function() {
poolClose(pool)
})
#Create table user in DB
dbExecute(pool, 'CREATE TABLE IF NOT EXISTS user (user_name TEXT, country TEXT, email TEXT, password TEXT)')
#Countries
countries.list <- read.table("www/countries.txt", header = FALSE, sep = "|",
stringsAsFactors = FALSE, quote = "",
col.names = c("abbr", "country"))
choice.country <- as.list(as.character(countries.list$country))
names(choice.country) <- countries.list$country
server <- function(input, output, session) {
#####################################################################################
########################## Start LogIn ################################################
#####################################################################################
## Initialize - user is not logged in
#user_abu <- reactiveValues(login = FALSE, name = NULL, role = NULL, header = NULL)
loggedIn <- reactiveVal(value = FALSE)
user <- reactiveValues(name = NULL, id=NULL)
#observeEvent will execute only if butLogin is pressed. observe is executed uniformly over time.#
#THis after pressing login#
observeEvent(input$butLogin, {
#browser() #: for debug mode test
req(input$username, input$pwInp) #Make sure username and passowrd are entered#
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$username,email=input$username)
user_data <- dbGetQuery(pool,query)
if(nrow(user_data) > 0){ # If the active user is in the DB then logged in
if(sha256(input$pwInp) == user_data[1, "password"]){
user$name <- user_data[1, "user_name"]
user$id <- user_data[1, "user_id"]
loggedIn(TRUE)
#print(paste("- User:", user$name, "logged in"))
#removeModal() ## remove the modal
toggleModal(session, "window", toggle = "close")
output$App_Panel <- renderUI({
span(
strong(paste("welcome", user$name, "|")),
actionLink(inputId = "logout", "Logout")
)
})
}
} else {
loggedIn(FALSE)
}
})
output$login_status <- renderUI({
if(input$butLogin == 0){
return(NULL)
} else {
if(!loggedIn()){
return(span("The Username or Password is Incorrect", style = "color:red"))
}
}
})
#For creating a new account#
observeEvent(input$create_account, {
showModal(
modalDialog(title = "Create an account", size = "m",
textInput(inputId = "new_user", label = "Username"),
textInput(inputId = "new_email", label = "Email"),
selectizeInput(inputId = 'country', 'Country', choices = choice.country),
passwordInput(inputId = "new_pw", label = "Password"),
passwordInput(inputId = "new_pw_conf", label = "Confirm password"),
checkboxInput(inputId = "terms", label = a("I, agree for terms and conditions",target="_blank",href="Disclaimer-TermsandConditions.html")),
actionButton(inputId = "register_user", label = "Submit"),
#p(input$register_user),
uiOutput("register_status"),
footer = actionButton("dismiss_modal",label = "Dismiss")
)
)
register_user()
})
observeEvent(input$dismiss_modal,{
removeModal()
})
register_user <- eventReactive(input$register_user, {
if(!isTruthy(input$new_user) | !isTruthy(input$new_email) | !isTruthy(input$new_pw) ){
return(span("Fill required information correctly", style = "color:red"))
}
if (!isValidEmail(input$new_email)){
return(span("Please provide a valid email address", style = "color:red"))
}
if (sha256(input$new_pw)!=sha256(input$new_pw_conf)){
return(span("Entered passwords do not match.", style = "color:red"))
}
if (!input$terms){
return(span("Please tick the box to show that you agree with terms and conditions", style = "color:red"))
}
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$new_user,email=input$new_email)
users_data <- dbGetQuery(pool,query)
#users_data <- DB_get_user(input$new_user)
if(nrow(users_data) > 0){
return(span("User already exists", style = "color:red"))
}
new_hash <- sha256(input$new_pw)
new_user <- input$new_user
dbExecute(pool,paste0("INSERT INTO user (user_name, country, email, password) values ","('",new_user,"','",input$country,"','",input$new_email,"','",new_hash,"')", ";"))
print("- New user added to database")
#Send an email to the newly regitered user. The email will provide him with username and password#
# isolate({send.mail(from = "....@gmail.com",
# to = input$new_email,
# subject = "Welcome to ... App",
# body = HTML(paste(paste("Hi",new_user,","),
# "<p>Thank you for using https://test.com. Please find below your credentials for future reference:</p>",
# paste("Username:",new_user,"<br>"),
# paste("Password:",input$new_pw,"<br><br><br>"),
# paste("Best regards, <br><br>Test.com Team"))),
# smtp = list(host.name = "smtp.gmail.com", port = 465, user.name = "...@gmail.com", passwd = "...", ssl = TRUE),
# authenticate = TRUE,
# html = TRUE,
# send = TRUE)})
#
return(span("Your registration was successful. An email with your credential is sent to the registred email adrress", style = "color:green"))
loggedIn(FALSE)
})
output$register_status <- renderUI({
if(input$register_user == 0){
return(NULL)
} else {
isolate(register_user())
}
})
observeEvent(input$logout, {
user$name <- NULL
user$id <- NULL
loggedIn(FALSE)
js$reset2()
#stopApp()
#print("- User: logged out")
})
}
|
/server.R
|
no_license
|
kuzmenkov111/Shiny-login-page
|
R
| false | false | 6,652 |
r
|
library(shiny)
library(V8)
library(sodium)
library(openssl)
library(rJava) #For sending an email from R
library(mailR) #For sending an email from R
library(DBI)
library(pool)
library(RSQLite)
#Database
sqlite_path = "www/sqlite/users"
pool <- dbPool(drv = RSQLite::SQLite(), dbname=sqlite_path)
onStop(function() {
poolClose(pool)
})
#Create table user in DB
dbExecute(pool, 'CREATE TABLE IF NOT EXISTS user (user_name TEXT, country TEXT, email TEXT, password TEXT)')
#Countries
countries.list <- read.table("www/countries.txt", header = FALSE, sep = "|",
stringsAsFactors = FALSE, quote = "",
col.names = c("abbr", "country"))
choice.country <- as.list(as.character(countries.list$country))
names(choice.country) <- countries.list$country
server <- function(input, output, session) {
#####################################################################################
########################## Start LogIn ################################################
#####################################################################################
## Initialize - user is not logged in
#user_abu <- reactiveValues(login = FALSE, name = NULL, role = NULL, header = NULL)
loggedIn <- reactiveVal(value = FALSE)
user <- reactiveValues(name = NULL, id=NULL)
#observeEvent will execute only if butLogin is pressed. observe is executed uniformly over time.#
#THis after pressing login#
observeEvent(input$butLogin, {
#browser() #: for debug mode test
req(input$username, input$pwInp) #Make sure username and passowrd are entered#
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$username,email=input$username)
user_data <- dbGetQuery(pool,query)
if(nrow(user_data) > 0){ # If the active user is in the DB then logged in
if(sha256(input$pwInp) == user_data[1, "password"]){
user$name <- user_data[1, "user_name"]
user$id <- user_data[1, "user_id"]
loggedIn(TRUE)
#print(paste("- User:", user$name, "logged in"))
#removeModal() ## remove the modal
toggleModal(session, "window", toggle = "close")
output$App_Panel <- renderUI({
span(
strong(paste("welcome", user$name, "|")),
actionLink(inputId = "logout", "Logout")
)
})
}
} else {
loggedIn(FALSE)
}
})
output$login_status <- renderUI({
if(input$butLogin == 0){
return(NULL)
} else {
if(!loggedIn()){
return(span("The Username or Password is Incorrect", style = "color:red"))
}
}
})
#For creating a new account#
observeEvent(input$create_account, {
showModal(
modalDialog(title = "Create an account", size = "m",
textInput(inputId = "new_user", label = "Username"),
textInput(inputId = "new_email", label = "Email"),
selectizeInput(inputId = 'country', 'Country', choices = choice.country),
passwordInput(inputId = "new_pw", label = "Password"),
passwordInput(inputId = "new_pw_conf", label = "Confirm password"),
checkboxInput(inputId = "terms", label = a("I, agree for terms and conditions",target="_blank",href="Disclaimer-TermsandConditions.html")),
actionButton(inputId = "register_user", label = "Submit"),
#p(input$register_user),
uiOutput("register_status"),
footer = actionButton("dismiss_modal",label = "Dismiss")
)
)
register_user()
})
observeEvent(input$dismiss_modal,{
removeModal()
})
register_user <- eventReactive(input$register_user, {
if(!isTruthy(input$new_user) | !isTruthy(input$new_email) | !isTruthy(input$new_pw) ){
return(span("Fill required information correctly", style = "color:red"))
}
if (!isValidEmail(input$new_email)){
return(span("Please provide a valid email address", style = "color:red"))
}
if (sha256(input$new_pw)!=sha256(input$new_pw_conf)){
return(span("Entered passwords do not match.", style = "color:red"))
}
if (!input$terms){
return(span("Please tick the box to show that you agree with terms and conditions", style = "color:red"))
}
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$new_user,email=input$new_email)
users_data <- dbGetQuery(pool,query)
#users_data <- DB_get_user(input$new_user)
if(nrow(users_data) > 0){
return(span("User already exists", style = "color:red"))
}
new_hash <- sha256(input$new_pw)
new_user <- input$new_user
dbExecute(pool,paste0("INSERT INTO user (user_name, country, email, password) values ","('",new_user,"','",input$country,"','",input$new_email,"','",new_hash,"')", ";"))
print("- New user added to database")
#Send an email to the newly regitered user. The email will provide him with username and password#
# isolate({send.mail(from = "....@gmail.com",
# to = input$new_email,
# subject = "Welcome to ... App",
# body = HTML(paste(paste("Hi",new_user,","),
# "<p>Thank you for using https://test.com. Please find below your credentials for future reference:</p>",
# paste("Username:",new_user,"<br>"),
# paste("Password:",input$new_pw,"<br><br><br>"),
# paste("Best regards, <br><br>Test.com Team"))),
# smtp = list(host.name = "smtp.gmail.com", port = 465, user.name = "...@gmail.com", passwd = "...", ssl = TRUE),
# authenticate = TRUE,
# html = TRUE,
# send = TRUE)})
#
return(span("Your registration was successful. An email with your credential is sent to the registred email adrress", style = "color:green"))
loggedIn(FALSE)
})
output$register_status <- renderUI({
if(input$register_user == 0){
return(NULL)
} else {
isolate(register_user())
}
})
observeEvent(input$logout, {
user$name <- NULL
user$id <- NULL
loggedIn(FALSE)
js$reset2()
#stopApp()
#print("- User: logged out")
})
}
|
########################################################################
## perform RJMCMC step (birth, death, or change)
########################################################################
birth_cat_func<-function(curr,prior,data){
cand.cat<-genCandBasisCat(minInt=prior$minInt,maxInt=prior$maxInt.cat,I.vec=curr$I.vec.cat,z.vec=curr$z.vec.cat,p=data$pcat,xx=data$xx.cat,nlevels=data$nlevels,levels=data$levels,prior)
if(sum(cand.cat$basis!=0)<prior$npart.des)
return(curr)
cand.func<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.func,I.vec=curr$I.vec.func,z.vec=curr$z.vec.func,p=data$pfunc,xxt=data$xxt.func,q=prior$q,xx.unique.ind=data$unique.ind.func,vars.len=data$vars.len.func,prior)
if(sum(cand.func$basis!=0)<prior$npart.func)
return(curr)
if(cand.cat$n.int + cand.func$n.int == 0) # intercept
return(curr)
ata<-crossprod(cand.cat$basis)*crossprod(cand.func$basis)
Xta<-crossprod(curr$cat.basis,cand.cat$basis)*crossprod(curr$func.basis,cand.func$basis)
aty<-tcrossprod(crossprod(cand.cat$basis,data$y),cand.func$basis)
curr$Xty[curr$nc+1]<-aty
curr$XtX[1:curr$nc,curr$nc+1]<-Xta
curr$XtX[curr$nc+1,curr$nc+1]<-ata
qf.cand.list<-getQf(curr$XtX[1:(curr$nc+1),1:(curr$nc+1)],curr$Xty[1:(curr$nc+1)])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
## calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(.5/curr$s2*(qf.cand.list$qf-curr$qf) + log(curr$lam) - log(curr$nc) + log(data$death.prob.next/data$birth.prob) - cand.cat$lbmcmp - cand.func$lbmcmp)
## assign new values
if(log(runif(1)) < alpha){
curr<-addBasis(curr,cand.cat,qf.cand.list,prior)
curr<-addBasisCat(curr,cand.cat,qf.cand.list,prior)
curr<-addBasisFunc(curr,cand.func,qf.cand.list,prior)
# if type has cat and des, want to update curr$dc.basis also
}
return(curr)
}
death_cat_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
ind<-(1:curr$nc)[-(basis+1)]
qf.cand.list<-getQf(curr$XtX[ind,ind],curr$Xty[ind])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr) # TODO: not sure why I need this, I shouldn't need it in theory
}
I.star.cat<-curr$I.star.cat
I.star.cat[curr$n.int.cat[basis]+1]<-I.star.cat[curr$n.int.cat[basis]+1]-1
I.vec.cat<-I.star.cat/sum(I.star.cat)
z.star.cat<-curr$z.star.cat
if(curr$n.int.cat[basis]>0)
z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]<-z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]-1
z.vec.cat<-z.star.cat/sum(z.star.cat)
I.star.func<-curr$I.star.func
I.star.func[curr$n.int.func[basis]+1]<-I.star.func[curr$n.int.func[basis]+1]-1
I.vec.func<-I.star.func/sum(I.star.func)
z.star.func<-curr$z.star.func
if(curr$n.int.func[basis]>0)
z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]<-z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]-1
z.vec.func<-z.star.func/sum(z.star.func)
lpbmcmp<-0
if(curr$n.int.cat[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeModCat(curr$n.int.cat[basis],curr$vars.cat[basis,1:curr$n.int.cat[basis]],I.vec.cat,z.vec.cat,data$pcat,data$nlevels,curr$sub.size[basis,],prior$maxInt.cat,prior$miC)
}
if(curr$n.int.func[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeMod(curr$n.int.func[basis],curr$vars.func[basis,1:curr$n.int.func[basis]],I.vec.func,z.vec.func,data$pfunc,data$vars.len.func,prior$maxInt.func,prior$miC)
}
# calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(.5/curr$s2*(qf.cand.list$qf-curr$qf) - log(curr$lam) + log(data$birth.prob.last/data$death.prob) + log(curr$nbasis) + lpbmcmp)
if(log(runif(1)) < alpha){
curr<-deleteBasis(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
curr<-deleteBasisCat(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
curr<-deleteBasisFunc(curr,basis,ind,qf.cand.list,I.star.func,I.vec.func,z.star.func,z.vec.func)
}
return(curr)
}
change_cat_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
type.change<-sample(c('cat','func'),size=1,prob=c(curr$n.int.cat[basis],curr$n.int.func[basis]))
if(type.change=='cat'){
int.change<-sample(1:(curr$n.int.cat[basis]),size=1)
use<-1:curr$n.int.cat[basis]
cand.cat<-genBasisChangeCat(curr,basis,int.change,data$xx.cat,data$nlevels,data$levels,curr$sub.size[basis,use],curr$sub.list[[basis]],vars=curr$vars.cat[basis,use])
cand.func<-list(basis=curr$func.basis[,basis+1])
} else{
int.change<-sample(1:(curr$n.int.func[basis]),size=1)
use<-1:curr$n.int.func[basis]
cand.func<-genBasisChange(curr,basis,int.change,data$xxt.func,prior$q,knots=curr$knots.func[basis,use],knotInd=curr$knotInd.func[basis,use],signs=curr$signs.func[basis,use],vars=curr$vars.func[basis,use],xx.unique.ind=data$unique.ind.func)
cand.cat<-list(basis=curr$cat.basis[,basis+1])
}
if(sum(cand.cat$basis!=0)<prior$npart.des){
return(curr)
}
if(sum(cand.func$basis!=0)<prior$npart.func){
return(curr)
}
XtX.cand<-curr$XtX[1:curr$nc,1:curr$nc]
XtX.cand[basis+1,]<-XtX.cand[,basis+1]<-crossprod(curr$cat.basis,cand.cat$basis)*crossprod(curr$func.basis,cand.func$basis)
XtX.cand[basis+1,basis+1]<-crossprod(cand.cat$basis)*crossprod(cand.func$basis)
Xty.cand<-curr$Xty[1:curr$nc]
Xty.cand[basis+1]<-tcrossprod(crossprod(cand.cat$basis,data$y),cand.func$basis)
qf.cand.list<-getQf(XtX.cand,Xty.cand)
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
alpha<-data$itemp.ladder[curr$temp.ind]*.5/curr$s2*(qf.cand.list$qf-curr$qf)
if(log(runif(1))<alpha){
curr<-changeBasis(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='cat')
curr<-changeBasisCat(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='func')
curr<-changeBasisFunc(curr,cand.func,basis,qf.cand.list,XtX.cand,Xty.cand)
}
return(curr)
}
|
/R/rjmcmc_cat_func.R
|
no_license
|
dfrancom/BASS
|
R
| false | false | 5,956 |
r
|
########################################################################
## perform RJMCMC step (birth, death, or change)
########################################################################
birth_cat_func<-function(curr,prior,data){
cand.cat<-genCandBasisCat(minInt=prior$minInt,maxInt=prior$maxInt.cat,I.vec=curr$I.vec.cat,z.vec=curr$z.vec.cat,p=data$pcat,xx=data$xx.cat,nlevels=data$nlevels,levels=data$levels,prior)
if(sum(cand.cat$basis!=0)<prior$npart.des)
return(curr)
cand.func<-genCandBasis(minInt=prior$minInt,maxInt=prior$maxInt.func,I.vec=curr$I.vec.func,z.vec=curr$z.vec.func,p=data$pfunc,xxt=data$xxt.func,q=prior$q,xx.unique.ind=data$unique.ind.func,vars.len=data$vars.len.func,prior)
if(sum(cand.func$basis!=0)<prior$npart.func)
return(curr)
if(cand.cat$n.int + cand.func$n.int == 0) # intercept
return(curr)
ata<-crossprod(cand.cat$basis)*crossprod(cand.func$basis)
Xta<-crossprod(curr$cat.basis,cand.cat$basis)*crossprod(curr$func.basis,cand.func$basis)
aty<-tcrossprod(crossprod(cand.cat$basis,data$y),cand.func$basis)
curr$Xty[curr$nc+1]<-aty
curr$XtX[1:curr$nc,curr$nc+1]<-Xta
curr$XtX[curr$nc+1,curr$nc+1]<-ata
qf.cand.list<-getQf(curr$XtX[1:(curr$nc+1),1:(curr$nc+1)],curr$Xty[1:(curr$nc+1)])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
## calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(.5/curr$s2*(qf.cand.list$qf-curr$qf) + log(curr$lam) - log(curr$nc) + log(data$death.prob.next/data$birth.prob) - cand.cat$lbmcmp - cand.func$lbmcmp)
## assign new values
if(log(runif(1)) < alpha){
curr<-addBasis(curr,cand.cat,qf.cand.list,prior)
curr<-addBasisCat(curr,cand.cat,qf.cand.list,prior)
curr<-addBasisFunc(curr,cand.func,qf.cand.list,prior)
# if type has cat and des, want to update curr$dc.basis also
}
return(curr)
}
death_cat_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
ind<-(1:curr$nc)[-(basis+1)]
qf.cand.list<-getQf(curr$XtX[ind,ind],curr$Xty[ind])
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr) # TODO: not sure why I need this, I shouldn't need it in theory
}
I.star.cat<-curr$I.star.cat
I.star.cat[curr$n.int.cat[basis]+1]<-I.star.cat[curr$n.int.cat[basis]+1]-1
I.vec.cat<-I.star.cat/sum(I.star.cat)
z.star.cat<-curr$z.star.cat
if(curr$n.int.cat[basis]>0)
z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]<-z.star.cat[curr$vars.cat[basis,1:curr$n.int.cat[basis]]]-1
z.vec.cat<-z.star.cat/sum(z.star.cat)
I.star.func<-curr$I.star.func
I.star.func[curr$n.int.func[basis]+1]<-I.star.func[curr$n.int.func[basis]+1]-1
I.vec.func<-I.star.func/sum(I.star.func)
z.star.func<-curr$z.star.func
if(curr$n.int.func[basis]>0)
z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]<-z.star.func[curr$vars.func[basis,1:curr$n.int.func[basis]]]-1
z.vec.func<-z.star.func/sum(z.star.func)
lpbmcmp<-0
if(curr$n.int.cat[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeModCat(curr$n.int.cat[basis],curr$vars.cat[basis,1:curr$n.int.cat[basis]],I.vec.cat,z.vec.cat,data$pcat,data$nlevels,curr$sub.size[basis,],prior$maxInt.cat,prior$miC)
}
if(curr$n.int.func[basis]>0){
lpbmcmp<-lpbmcmp+logProbChangeMod(curr$n.int.func[basis],curr$vars.func[basis,1:curr$n.int.func[basis]],I.vec.func,z.vec.func,data$pfunc,data$vars.len.func,prior$maxInt.func,prior$miC)
}
# calculate log acceptance probability
alpha<- data$itemp.ladder[curr$temp.ind]*(.5/curr$s2*(qf.cand.list$qf-curr$qf) - log(curr$lam) + log(data$birth.prob.last/data$death.prob) + log(curr$nbasis) + lpbmcmp)
if(log(runif(1)) < alpha){
curr<-deleteBasis(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
curr<-deleteBasisCat(curr,basis,ind,qf.cand.list,I.star.cat,I.vec.cat,z.star.cat,z.vec.cat)
curr<-deleteBasisFunc(curr,basis,ind,qf.cand.list,I.star.func,I.vec.func,z.star.func,z.vec.func)
}
return(curr)
}
change_cat_func<-function(curr,prior,data){
basis<-sample(1:curr$nbasis,size=1)
type.change<-sample(c('cat','func'),size=1,prob=c(curr$n.int.cat[basis],curr$n.int.func[basis]))
if(type.change=='cat'){
int.change<-sample(1:(curr$n.int.cat[basis]),size=1)
use<-1:curr$n.int.cat[basis]
cand.cat<-genBasisChangeCat(curr,basis,int.change,data$xx.cat,data$nlevels,data$levels,curr$sub.size[basis,use],curr$sub.list[[basis]],vars=curr$vars.cat[basis,use])
cand.func<-list(basis=curr$func.basis[,basis+1])
} else{
int.change<-sample(1:(curr$n.int.func[basis]),size=1)
use<-1:curr$n.int.func[basis]
cand.func<-genBasisChange(curr,basis,int.change,data$xxt.func,prior$q,knots=curr$knots.func[basis,use],knotInd=curr$knotInd.func[basis,use],signs=curr$signs.func[basis,use],vars=curr$vars.func[basis,use],xx.unique.ind=data$unique.ind.func)
cand.cat<-list(basis=curr$cat.basis[,basis+1])
}
if(sum(cand.cat$basis!=0)<prior$npart.des){
return(curr)
}
if(sum(cand.func$basis!=0)<prior$npart.func){
return(curr)
}
XtX.cand<-curr$XtX[1:curr$nc,1:curr$nc]
XtX.cand[basis+1,]<-XtX.cand[,basis+1]<-crossprod(curr$cat.basis,cand.cat$basis)*crossprod(curr$func.basis,cand.func$basis)
XtX.cand[basis+1,basis+1]<-crossprod(cand.cat$basis)*crossprod(cand.func$basis)
Xty.cand<-curr$Xty[1:curr$nc]
Xty.cand[basis+1]<-tcrossprod(crossprod(cand.cat$basis,data$y),cand.func$basis)
qf.cand.list<-getQf(XtX.cand,Xty.cand)
fullRank<-!is.null(qf.cand.list$qf)
if(!fullRank){
return(curr)
}
alpha<-data$itemp.ladder[curr$temp.ind]*.5/curr$s2*(qf.cand.list$qf-curr$qf)
if(log(runif(1))<alpha){
curr<-changeBasis(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='cat')
curr<-changeBasisCat(curr,cand.cat,basis,qf.cand.list,XtX.cand,Xty.cand)
if(type.change=='func')
curr<-changeBasisFunc(curr,cand.func,basis,qf.cand.list,XtX.cand,Xty.cand)
}
return(curr)
}
|
#' Remove extra white spaces before and after text.
#'
#' @param x a character vector to be 'trimmed'
#' @return a character vector without preceding and trailing whitespace
#' @export
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
#' Extract dates from lexis nexis downloads
#'
#' @param filename a character string that is the full path to the file
#' @param end_of_article a character string that denotes the pattern at the end of each article
#' @param skip number of lines to skip at top of file
#' @return a date vector
#' @export
extract_dates <- function(filename, end_of_article = "All Rights Reserved", skip = 11){
dat <- scan(filename,
what = "character", blank.lines.skip = TRUE,
sep = "\n", encoding = "UTF-8", skipNul = TRUE, skip = 0)
article_list <- wordtools::split_tx(dat, patt = end_of_article)
# find row number with dates
#date_row <- sapply(FUN = function(x){
# find_date_row(x)
#},
#X = article_list)
date_row <- sapply(FUN = function(x)which(is_date(trim(x)))[1], X = article_list)
#
predates <- vector(length = length(article_list))
for (i in 1:length(article_list)){
predates[i] <- trim(article_list[[i]][date_row[i]])
}
dates <- lubridate::as_date(predates, format = "%B %d, %Y")
return(dates)
}
#' Determine whether a given article (from lexis nexis) is a blog post
#'
#' @param x a character vector containing text for a single article
#' @param patt pattern that says that an article is a blog
#' @export
is_blog <- function(x, patt = "PUBLICATION-TYPE: Web Blog"){
sum(stringr::str_detect(x, patt)) == 1
}
|
/R/extract_dates.R
|
no_license
|
fboehm/wordtools
|
R
| false | false | 1,608 |
r
|
#' Remove extra white spaces before and after text.
#'
#' @param x a character vector to be 'trimmed'
#' @return a character vector without preceding and trailing whitespace
#' @export
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
#' Extract dates from lexis nexis downloads
#'
#' @param filename a character string that is the full path to the file
#' @param end_of_article a character string that denotes the pattern at the end of each article
#' @param skip number of lines to skip at top of file
#' @return a date vector
#' @export
extract_dates <- function(filename, end_of_article = "All Rights Reserved", skip = 11){
dat <- scan(filename,
what = "character", blank.lines.skip = TRUE,
sep = "\n", encoding = "UTF-8", skipNul = TRUE, skip = 0)
article_list <- wordtools::split_tx(dat, patt = end_of_article)
# find row number with dates
#date_row <- sapply(FUN = function(x){
# find_date_row(x)
#},
#X = article_list)
date_row <- sapply(FUN = function(x)which(is_date(trim(x)))[1], X = article_list)
#
predates <- vector(length = length(article_list))
for (i in 1:length(article_list)){
predates[i] <- trim(article_list[[i]][date_row[i]])
}
dates <- lubridate::as_date(predates, format = "%B %d, %Y")
return(dates)
}
#' Determine whether a given article (from lexis nexis) is a blog post
#'
#' @param x a character vector containing text for a single article
#' @param patt pattern that says that an article is a blog
#' @export
is_blog <- function(x, patt = "PUBLICATION-TYPE: Web Blog"){
sum(stringr::str_detect(x, patt)) == 1
}
|
setwd(Sys.getenv("finemap"))
### Get data
dat = read.table("credible_sets_with_proxies_from_Jamie.txt", header=TRUE)
#dat = read.table("all_creds_plus_ld_a12.txt", header=TRUE)
#dat_old = read.table("credible_sets_with_proxies_from_Jamie_20191001.txt", header=TRUE)
dat$ChrPos = paste0("chr",dat$chromosome, ":",dat$position)
annot = read.csv(paste0(Sys.getenv("annot"), "/annovar_output_credible_sets_with_proxies.hg38_multianno.csv"))
annot$ChrPos = paste0("chr",annot$Chr, ":",annot$End)
### Merge
#make sure chr:pos is a reasonable unique identifier for merging data sets
sum(!annot$ChrPos %in% dat$ChrPos)
sum(!dat$ChrPos %in% annot$ChrPos)
sum(duplicated(annot$ChrPos))
datnew = merge(dat, annot, by="ChrPos")
### Check for swapped alleles and flip beta when necessary
datnew$Allele1_new <- datnew$Ref
datnew$Allele2_new <- datnew$Alt
datnew$swap = (!is.na(datnew$Allele1) & !is.na(datnew$Allele2) & (datnew$Allele1==datnew$Allele2_new) & (datnew$Allele2==datnew$Allele1_new))
datnew$Effect_new[datnew$swap==TRUE] <- (-1)*datnew$Effect[datnew$swap==TRUE]
datnew$Effect_new[datnew$swap==FALSE] <- datnew$Effect[datnew$swap==FALSE]
### Reformat variables
datnew$OddsRatio = exp(datnew$Effect_new)
datnew$AF_1000G_AFR = datnew$X1000g2015aug_afr
datnew$AF_1000G_EUR = datnew$X1000g2015aug_eur
datnew$rsid_merged = sapply(datnew$ChrPos, function(x) {
ID1 = datnew[datnew$ChrPos==x & !is.na(datnew$ChrPos),"ID"];
ID2 = datnew[datnew$ChrPos==x & !is.na(datnew$ChrPos),"avsnp150"];
if (ID1==ID2) {rsid=ID2}
else if (ID1==".") {rsid=ID2}
else if (ID2==".") {rsid=ID1}
else {rsid=paste(ID1,ID2,sep=";")}
}
)
datnew$tag=gsub(".",":",datnew$tag, fixed=TRUE)
datnew$tag_rsid = sapply(datnew$tag, function(x) { rsid = datnew$rsid_merged[datnew$MarkerName==x & !is.na(datnew$MarkerName)]; if (rsid==".") {tag=x} else {tag=rsid}; return(tag)} )
datnew$ppsum = sapply(datnew$tag, function(x) {datnew$ppsum[datnew$MarkerName==x & !is.na(datnew$MarkerName)]})
datnew$credset_size = sapply(datnew$tag, function(x) {d = datnew[datnew$tag==x,]; return(dim(d)[1])})
### Order data.frame by chromosome and region
datnew$tag_pos = sapply(datnew$tag, function(x) {datnew$position[datnew$MarkerName==x & !is.na(datnew$MarkerName)]})
datnew$region_pos = sapply(datnew$tag, function(x) {reg = datnew$region[datnew$MarkerName==x & !is.na(datnew$MarkerName)]; as.numeric(strsplit(reg, split="[-:]", perl=TRUE)[[1]][2])})
datnew_ordered = datnew[order(datnew$chromosome, datnew$region_pos, datnew$tag_pos, datnew$position),]
### Generate tables for saving
fields = c("region","region_pos","MarkerName","ID","avsnp150","rsid_merged","tag","tag_rsid","tag_pos","credset_size","chromosome","position","Allele1_new","Allele2_new","AF_1000G_AFR","AF_1000G_EUR","OddsRatio","Effect_new","StdErr","P.value","ppsum","pp","excluded","cytoBand","Func.ensGene", "Func.refGene", "Gene.ensGene","Gene.refGene","ExonicFunc.ensGene","ExonicFunc.refGene", "AAChange.ensGene", "AAChange.refGene")
OUTTABLE_all = datnew_ordered[,fields]
OUTTABLE_protein_altering = OUTTABLE_all[OUTTABLE_all$ppsum>0.5 & (OUTTABLE_all$ExonicFunc.refGene %in% c("stopgain","nonsynonymous SNV","frameshift substitution") | OUTTABLE_all$Func.refGene=="splicing" | OUTTABLE_all$ExonicFunc.ensGene %in% c("stopgain","nonsynonymous SNV","frameshift substitution") | OUTTABLE_all$Func.ensGene=="splicing"), ]
OUTTABLE_top_candidates = OUTTABLE_protein_altering[!is.na(OUTTABLE_protein_altering$pp) & OUTTABLE_protein_altering$pp>0.1,]
### Save to files
write.table(OUTTABLE_all, file="credible_sets_with_proxies_from_Jamie_ANNOTATED.txt", sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
write.table(OUTTABLE_protein_altering, file="credible_sets_with_proxies_from_Jamie_ANNOTATED_protein_altering.txt", sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
write.table(OUTTABLE_top_candidates, file="credible_sets_with_proxies_from_Jamie_ANNOTATED_protein_altering_top_candidates.txt", sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
#rsync -v $rivanna:/nv/vol185/MEGA/release4/IMPUTED_TOPMED/fine_mapping/credible_sets_with_proxies_from_Jamie_ANNOTATED.txt .
#rsync -v $rivanna:/nv/vol185/MEGA/release4/IMPUTED_TOPMED/fine_mapping/credible_sets_with_proxies_from_Jamie_ANNOTATED_protein_altering.txt .
#rsync -v $rivanna:/nv/vol185/MEGA/release4/IMPUTED_TOPMED/fine_mapping/credible_sets_with_proxies_from_Jamie_ANNOTATED_protein_altering_top_candidates.txt .
|
/scripts/annotation/add_annotation_to_credible_set_table.R
|
no_license
|
jinshaw16/t1d-immunochip-2020
|
R
| false | false | 4,450 |
r
|
setwd(Sys.getenv("finemap"))
### Get data
dat = read.table("credible_sets_with_proxies_from_Jamie.txt", header=TRUE)
#dat = read.table("all_creds_plus_ld_a12.txt", header=TRUE)
#dat_old = read.table("credible_sets_with_proxies_from_Jamie_20191001.txt", header=TRUE)
dat$ChrPos = paste0("chr",dat$chromosome, ":",dat$position)
annot = read.csv(paste0(Sys.getenv("annot"), "/annovar_output_credible_sets_with_proxies.hg38_multianno.csv"))
annot$ChrPos = paste0("chr",annot$Chr, ":",annot$End)
### Merge
#make sure chr:pos is a reasonable unique identifier for merging data sets
sum(!annot$ChrPos %in% dat$ChrPos)
sum(!dat$ChrPos %in% annot$ChrPos)
sum(duplicated(annot$ChrPos))
datnew = merge(dat, annot, by="ChrPos")
### Check for swapped alleles and flip beta when necessary
datnew$Allele1_new <- datnew$Ref
datnew$Allele2_new <- datnew$Alt
datnew$swap = (!is.na(datnew$Allele1) & !is.na(datnew$Allele2) & (datnew$Allele1==datnew$Allele2_new) & (datnew$Allele2==datnew$Allele1_new))
datnew$Effect_new[datnew$swap==TRUE] <- (-1)*datnew$Effect[datnew$swap==TRUE]
datnew$Effect_new[datnew$swap==FALSE] <- datnew$Effect[datnew$swap==FALSE]
### Reformat variables
datnew$OddsRatio = exp(datnew$Effect_new)
datnew$AF_1000G_AFR = datnew$X1000g2015aug_afr
datnew$AF_1000G_EUR = datnew$X1000g2015aug_eur
datnew$rsid_merged = sapply(datnew$ChrPos, function(x) {
ID1 = datnew[datnew$ChrPos==x & !is.na(datnew$ChrPos),"ID"];
ID2 = datnew[datnew$ChrPos==x & !is.na(datnew$ChrPos),"avsnp150"];
if (ID1==ID2) {rsid=ID2}
else if (ID1==".") {rsid=ID2}
else if (ID2==".") {rsid=ID1}
else {rsid=paste(ID1,ID2,sep=";")}
}
)
datnew$tag=gsub(".",":",datnew$tag, fixed=TRUE)
datnew$tag_rsid = sapply(datnew$tag, function(x) { rsid = datnew$rsid_merged[datnew$MarkerName==x & !is.na(datnew$MarkerName)]; if (rsid==".") {tag=x} else {tag=rsid}; return(tag)} )
datnew$ppsum = sapply(datnew$tag, function(x) {datnew$ppsum[datnew$MarkerName==x & !is.na(datnew$MarkerName)]})
datnew$credset_size = sapply(datnew$tag, function(x) {d = datnew[datnew$tag==x,]; return(dim(d)[1])})
### Order data.frame by chromosome and region
datnew$tag_pos = sapply(datnew$tag, function(x) {datnew$position[datnew$MarkerName==x & !is.na(datnew$MarkerName)]})
datnew$region_pos = sapply(datnew$tag, function(x) {reg = datnew$region[datnew$MarkerName==x & !is.na(datnew$MarkerName)]; as.numeric(strsplit(reg, split="[-:]", perl=TRUE)[[1]][2])})
datnew_ordered = datnew[order(datnew$chromosome, datnew$region_pos, datnew$tag_pos, datnew$position),]
### Generate tables for saving
fields = c("region","region_pos","MarkerName","ID","avsnp150","rsid_merged","tag","tag_rsid","tag_pos","credset_size","chromosome","position","Allele1_new","Allele2_new","AF_1000G_AFR","AF_1000G_EUR","OddsRatio","Effect_new","StdErr","P.value","ppsum","pp","excluded","cytoBand","Func.ensGene", "Func.refGene", "Gene.ensGene","Gene.refGene","ExonicFunc.ensGene","ExonicFunc.refGene", "AAChange.ensGene", "AAChange.refGene")
OUTTABLE_all = datnew_ordered[,fields]
OUTTABLE_protein_altering = OUTTABLE_all[OUTTABLE_all$ppsum>0.5 & (OUTTABLE_all$ExonicFunc.refGene %in% c("stopgain","nonsynonymous SNV","frameshift substitution") | OUTTABLE_all$Func.refGene=="splicing" | OUTTABLE_all$ExonicFunc.ensGene %in% c("stopgain","nonsynonymous SNV","frameshift substitution") | OUTTABLE_all$Func.ensGene=="splicing"), ]
OUTTABLE_top_candidates = OUTTABLE_protein_altering[!is.na(OUTTABLE_protein_altering$pp) & OUTTABLE_protein_altering$pp>0.1,]
### Save to files
write.table(OUTTABLE_all, file="credible_sets_with_proxies_from_Jamie_ANNOTATED.txt", sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
write.table(OUTTABLE_protein_altering, file="credible_sets_with_proxies_from_Jamie_ANNOTATED_protein_altering.txt", sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
write.table(OUTTABLE_top_candidates, file="credible_sets_with_proxies_from_Jamie_ANNOTATED_protein_altering_top_candidates.txt", sep="\t", col.names=TRUE, row.names=FALSE, quote=FALSE)
#rsync -v $rivanna:/nv/vol185/MEGA/release4/IMPUTED_TOPMED/fine_mapping/credible_sets_with_proxies_from_Jamie_ANNOTATED.txt .
#rsync -v $rivanna:/nv/vol185/MEGA/release4/IMPUTED_TOPMED/fine_mapping/credible_sets_with_proxies_from_Jamie_ANNOTATED_protein_altering.txt .
#rsync -v $rivanna:/nv/vol185/MEGA/release4/IMPUTED_TOPMED/fine_mapping/credible_sets_with_proxies_from_Jamie_ANNOTATED_protein_altering_top_candidates.txt .
|
# Standard preprocessing
source("../Functions/networkFunctions-extras-19.R")
source("../Functions/labelPoints2-01.R");
source("../Functions/outlierRemovalFunctions.R")
source("../Functions/preprocessing-General-013.R")
source("../Functions/GNVFunctions-016.R")
library(anRichment)
dir.create("RData", recursive = TRUE);
dir.create("Results", recursive = TRUE);
dir.create("Plots", recursive = TRUE);
source("../../../../RLibs/inHouseGeneAnnotation/anRichmentMethods/R/enrichmentAnalysis.R");
# Load data
baseDir = "../Data/TCGA/";
setNames = c("LICA-FR", "LIHC-US", "LIRI-JP");
exprDir.11 = file.path(baseDir, "Expression/011-RawData-Reformatted");
nSets = length(setNames)
counts0 = list();
for (set in 1:nSets)
{
counts0[[set]] = loadTable(file = gzfile(spaste(exprDir.11, "/countMatrix-", setNames[set], ".csv.gz")),
transpose = TRUE, convertToMatrix = TRUE, sep = ",", header = TRUE);
}
names(counts0) = setNames;
sampleDir.11 = file.path(baseDir, "SampleAnnotation/011-Combined");
sampleAnnot1 = lapply(setNames, function(.name)
read.csv( file = gzfile(spaste(exprDir.11, "/sampleData-ICGCcombinedAnnotation-", .name, ".csv.gz"))));
names(sampleAnnot1) = setNames;
clinicalData1 = read.csv(file.path("../Data/TCGA/SampleAnnotation/010-AsSupplied",
"List_of_HCC_samples_in_TCGA_and_ICGC-ICGCLiverAndNormalTissue-Bioportal.csv"),
check.names = FALSE)
sampleData2 = read.csv(file.path("../Data/TCGA/SampleAnnotation/010-AsSupplied",
"List_of_HCC_samples_in_TCGA_and_ICGC-ICGCLiverAndNormalTissue.csv"),
check.names = FALSE)
ensemblAnnot = read.csv(gzfile("../Data/Annotation/Ensembl/Homo_sapiens_GRCh37p7.txt.gz"));
NCBIAnnot = read.csv(gzfile("../Data/Annotation/NCBI/geneAnnotation-NCBIHuman37.3.csv.gz"));
lapply(sampleAnnot1, function(sa) table(sa$icgc_specimen_id %in% sampleData2$icgc_specimen_id))
sampleID1 = lapply(sampleAnnot1, getElement, "icgc_specimen_id");
sampleID2 = lapply(sampleAnnot1, getElement, "submitted_specimen_id");
sampleID3 = sampleID2;
sampleID3[[2]] = sub("[A-Z]$", "", sampleID3[[2]]);
rows.clinicalData1 = lapply(sampleID3, match, clinicalData1$`Sample ID`)
lapply(rows.clinicalData1, function(x) table(is.na(x)))
clinicalData1.IDs = clinicalData1[ , grep("ID", names(clinicalData1))];
#============================================================================================================
#
# Fish out clinical data.
#
#============================================================================================================
# We're missing a lot of the samples, but at least we have some.
sampleAnnot2 = mymapply(function(sa, rows)
data.frame(sa, clinicalData1[rows, !names(clinicalData1) %in% names(sa)], check.names = FALSE),
sampleAnnot1, rows.clinicalData1); ### This one contains lots of missing samples
ensemblAnnot2 = setNames(ensemblAnnot, multiSub(c("EntrezGene.ID", "Gene.Start..bp.", "Chromosome.Name", "HGNC.symbol"),
c("Entrez", "Loc", "Chr", "Symbol"), names(ensemblAnnot)));
geneAnnot0 = list();
geneAnnot0[[1]] = data.frame(GeneID = colnames(counts0[[1]]),
ensemblAnnot2[ match(colnames(counts0[[1]]), ensemblAnnot2$Ensembl.Gene.ID), ])
table(is.na(geneAnnot0[[1]]$Ensembl.Gene.ID))
# FALSE TRUE
# 46812 6268
for (set in 2:3)
{
geneAnnot0[[set]] = data.frame(GeneID = colnames(counts0[[set]]),
geneAnnotationFromEntrez(convert2entrez(symbol = colnames(counts0[[set]]), organism = "human"),
organism = "human", includePosition = TRUE));
}
sapply(geneAnnot0, function(ga) sum(!is.na(ga$Entrez)))
#[1] 21817 19903 21721
#===============================================================================================================
#
# prettifyList
#
#===============================================================================================================
prettifyList = list(from = "A", to = "A");
as.data.frame(prettifyList)
sampleAnnot3 = list2multiData(sampleAnnot2);
names(counts0) = names(geneAnnot0) = names(sampleAnnot2) = setNames;
maxInt = 2^31-1;
counts0 = lapply(counts0, function(x) {x[x > maxInt] = maxInt; x});
counts1 = list2multiData(counts0);
#===============================================================================================================
#
# predict sex from expression data
#
#===============================================================================================================
library(DESeq2)
genes = c("XIST")
for (set in 1:nSets)
{
sa = sampleAnnot3[[set]]$data;
rownames(sa) = rownames(counts1[[set]]$data);
ds1 = DESeqDataSetFromMatrix.PL(t(counts1[[set]]$data), colData = data.frame(a = rnorm(nrow(sa))), design = ~1);
ds1 = estimateSizeFactors(ds1);
expr1 = log2(t(counts(ds1, normalized = TRUE)) + 1);
for (gene in genes)
{
index = match(gene, geneAnnot0[[set]]$Symbol);
cn1 = expr1[, index]
pdf(file = spaste("Plots/expressionVsSex-", gene, "-", setNames[set], ".pdf"), wi = 5, he = 5);
scpp(1.3);
labeledBoxplot(tapply(cn1, sampleAnnot3[[set]]$data$donor_sex, identity),
names = sort(unique(sampleAnnot3[[set]]$data$donor_sex)),
main = setNames[set], ylab = spaste(gene, " expression"),
xlab = "Sex", addScatterplot = TRUE, notch = TRUE);
dev.off();
normals = tapply(cn1, sampleAnnot3[[set]]$data$donor_sex, median, na.rm = TRUE)
predictedSex = names(normals)[ apply( do.call(cbind, lapply(normals, function(x) abs(cn1-x))),
1, which.min)]
printFlush(spaste("=========== ", setNames[set], " ======================="));
print(table(predictedSex, sampleAnnot3[[set]]$data$donor_sex));
sampleAnnot3[[set]]$data$predictedSex = predictedSex;
}
}
#================================================================================================================
#
# Preprocessing
#
#================================================================================================================
nSamples = checkSets(counts1, checkStructure = TRUE)$nSamples
traitsOfInterest = c("Disease Stage", "Liver fibrosis ishak score category", "Tumor Size", "Vascular Invasion",
"Subtype");
nWithData = do.call(cbind, mtd.apply(sampleAnnot3, function(x) sapply(traitsOfInterest, function(tr)
{
x1 = x[[tr]];
printFlush("================================================================")
printFlush(tr);
print(table(!is.na(x1)));
print(table(x1));
sum(!is.na(x1));
}), returnList = TRUE))
colnames(nWithData) = setNames;
nWithData = rbind(nSamples = t(as.matrix(c(mtd.apply(sampleAnnot3, nrow, mdaSimplify = TRUE)))), nWithData)
rownames(nWithData)[1] = "Total samples";
write.csv.nr(dataForTable(nWithData, transpose = FALSE, IDcolName = "Trait"), "Results/numbersOfSamplesWithClinicalData.csv");
traits1 = c("donor_sex", "donor_age_at_diagnosis", "specimen_type",
"specimen_donor_treatment_type",
"tumour_histological_type", traitsOfInterest);
mtd.apply(mtd.subset(sampleAnnot3, , traits1), lapply, table);
# For pheno colors: drop uniformative columns
sampleAnnot4 = mtd.apply(mtd.subset(sampleAnnot3, , traits1), dropConstantColumns);
phenoColors = mtd.apply(sampleAnnot4, dataFrame2colors, maxOrdinalLevels = 5, maxLegendLength = 120)
library(sva)
library(DESeq2)
groupProp = rep(1/4, nSets);
prepr = list();
for (set in 1:nSets)
prepr[[set]] = list(data = preprocessGeneral(
# Main input: xdata
xdata = counts1[[set]]$data,
analysisID = setNames[set],
sampleAnnotation = sampleAnnot3[[set]]$data,
phenoColors = phenoColors[[set]]$data,
geneAnnotation = geneAnnot0[[set]],
stp.mainBase = setNames[set],
minProportion = groupProp[set],
bw.groupsForMinWeightRestriction = mtd.subset(sampleAnnot3, , "donor_sex", drop = TRUE)[[set]]$data,
organism = "human",
# Flow control, for some measure of interactivity
# intermediateResults = prepr[[set]]$data,
#stopAt = "30-VST",
addToAnalysisManager = FALSE,
idsAreSymbols = FALSE,
minValue = 1,
vst.design = "~donor_sex + specimen_type",
adj.calculateNPCs = 4,
adj.removedCovariates = NULL,
adj.calculateSVAFactors = FALSE,
adj.sva.useNFactors = 1,
adj.svaModel = "~ ",
bw.groupBy = c("donor_sex", "specimen_type"),
bw.otherArgs = list(maxPOutliers = 0.10,
outlierReferenceWeight = 0.1,
minWeightInGroups = 0.9,
maxPropUnderMinWeight = 0.4,
defaultWeight = 1),
# Outlier removal options
outlierRemovalZ = 6,
iorFromHEGenes = TRUE,
ior.replace = FALSE,
ior.remove = FALSE,
restrictIORExprToHE = TRUE,
restrictIORXDataToHE = FALSE,
stp.width = 15,
stp.height = 5,
stp.mainSep = ", ",
stp.marAll = c(1, 20, 3, 1),
stp.maxCharPerLine = 60,
plotDir = "Plots",
pca.correctForColumn = NULL,
pca.colorColumn = "specimen_type",
pca.colorPrefix = "",
pca.shapeColumn = "donor_sex",
pca.shapePrefix = "",
pca.mainSep = ", ",
pca.legendWidth = 3.5,
indent = 2, verbose = 5,
saveDir.base = "../Data/TCGA"))
dir.create("RData", recursive = TRUE)
save(prepr, file = "RData/prepr.RData");
#load(file = "RData/prepr.RData");
multiExpr = mtd.apply(prepr, lastHE.VST.OR, "OSR", "expr.OSR");
multiCounts.all = mtd.apply(prepr, lastHE.VST.OR, "OSR", "xdata.OSR");
multiCounts = mtd.mapply(function(x, ref) x[, colnames(ref)], multiCounts.all, multiExpr);
multiWeights = mtd.apply(prepr, function(p) p$IOR$weightsForIOR);
weightFactors = mtd.apply(prepr, function(p) p$IOR$weightFactorsForIOR);
mtd.apply(multiWeights, function(x) table(x >0.1)/length(x))
mtd.apply(weightFactors, function(x) table(x >1)/length(x))
multiSampleAnnot = mtd.apply(prepr, lastHE.VST.OR, "OSR", "sampleAnnotation.OSR");
multiGeneAnnot = mtd.apply(prepr, lastHE.VST.OR, "HEFilter", "geneAnnotation.he");
names(multiCounts) = names(multiExpr) = names(multiSampleAnnot) = names(multiGeneAnnot) = setNames;
names(multiWeights) = names(weightFactors) = setNames;
numericCols = mtd.apply(multiSampleAnnot, sapply, is.numeric);
multiNumericPheno = mtd.subset(multiSampleAnnot, , numericCols[[1]]$data);
multiNumericPheno = mtd.subset(multiNumericPheno, ,
multiGrep(c("SVAFactor.[2-9]", "PrincipalComponent.[2-9]"), mtd.colnames(multiNumericPheno), invert = TRUE));
save(multiCounts, multiExpr, multiSampleAnnot, multiGeneAnnot, multiWeights, #weightFactors,
prettifyList, multiNumericPheno,
setNames, file = "RData/preprocessedData.RData");
sink(file = "Results/sessionInfo.txt");
sessionInfo();
sink(NULL);
#=============================================================================================================
#
# Sanity check for weights/weight factors
#
#=============================================================================================================
nSamples.OR = checkSets(multiExpr, checkStructure = TRUE)$nSamples;
nPlots = 24;
pdf(file = "Plots/GenesWithOutlierWeights.pdf", wi = 15, he = 10)
for (set in 1:nSets)
{
ord = order(-weightFactors[[set]]$data);
plotIndex = ord[1:nPlots];
plotGenes = unique(floor((plotIndex-1)/nSamples.OR[set]) + 1);
par(mfrow = c(4,6));
scpp(1.4);
par(cex = 1);
for (g in plotGenes)
{
plot(multiExpr[[set]]$data[, g], weightFactors[[set]]$data[, g],
pch = 21,
bg = labels2colors(as.numeric(factor(multiSampleAnnot[[set]]$data$Diets))), cex = 2,
main = spaste(multiGeneAnnot[[set]]$data$Gene[g], ", chr ", multiGeneAnnot[[set]]$data$Chr[g]),
xlab = "VST expression", ylab = "Outlier statistic");
abline(h = 1, col = "grey");
}
}
for (set in 1:nSets)
{
ord = order(-weightFactors[[set]]$data);
plotIndex = ord[1:nPlots];
plotGenes = unique(floor((plotIndex-1)/nSamples.OR[set]) + 1);
x= as.numeric(factor(multiSampleAnnot[[set]]$data$Diets));
xl = levels(factor(multiSampleAnnot[[set]]$data$Diets));
par(mfrow = c(4,2));
scpp(1.4);
par(cex = 1);
for (g in plotGenes)
{
plot(x, multiExpr[[set]]$data[, g],
pch = 21,
bg = numbers2colors(weightFactors[[set]]$data[, g], colors = grey.colors(100, start = 0, end = 1), lim = c(0,1)),
cex = 2,
main = spaste(multiGeneAnnot[[set]]$data$Gene[g], ", chr ", multiGeneAnnot[[set]]$data$Chr[g]),
ylab = "VST expression", xlab = "Age", xaxt = "n");
axis(1, at = sort(unique(x)), labels = xl, cex.axis = 0.5);
abline(h = 1, col = "grey");
}
}
dev.off()
#=============================================================================================================
#
# Check if the removed 1st PC/1st SVA factor correlates with any of the traits.
#
#=============================================================================================================
cp = corAndPvalue(multiNumericPheno[[1]]$data);
diag(cp$p) = NA
pdf(file = "Plots/correlationHeatmapOfTraitsAndSurrogateVariables.pdf", wi = 17, he = 10);
corHeatmapWithDendro(cp$cor, pValues = cp$p, main = "Correlations of phenotypes and surrogate variables",
mar.main = c(14, 18, 2,1), cex.text = 0.6, dendroWidth = 0.17);
dev.off()
|
/110-Preprocessing-TCGA/010-preprocessing.R
|
no_license
|
plangfelder/Core-liver-homeostatic-networks
|
R
| false | false | 13,429 |
r
|
# Standard preprocessing
source("../Functions/networkFunctions-extras-19.R")
source("../Functions/labelPoints2-01.R");
source("../Functions/outlierRemovalFunctions.R")
source("../Functions/preprocessing-General-013.R")
source("../Functions/GNVFunctions-016.R")
library(anRichment)
dir.create("RData", recursive = TRUE);
dir.create("Results", recursive = TRUE);
dir.create("Plots", recursive = TRUE);
source("../../../../RLibs/inHouseGeneAnnotation/anRichmentMethods/R/enrichmentAnalysis.R");
# Load data
baseDir = "../Data/TCGA/";
setNames = c("LICA-FR", "LIHC-US", "LIRI-JP");
exprDir.11 = file.path(baseDir, "Expression/011-RawData-Reformatted");
nSets = length(setNames)
counts0 = list();
for (set in 1:nSets)
{
counts0[[set]] = loadTable(file = gzfile(spaste(exprDir.11, "/countMatrix-", setNames[set], ".csv.gz")),
transpose = TRUE, convertToMatrix = TRUE, sep = ",", header = TRUE);
}
names(counts0) = setNames;
sampleDir.11 = file.path(baseDir, "SampleAnnotation/011-Combined");
sampleAnnot1 = lapply(setNames, function(.name)
read.csv( file = gzfile(spaste(exprDir.11, "/sampleData-ICGCcombinedAnnotation-", .name, ".csv.gz"))));
names(sampleAnnot1) = setNames;
clinicalData1 = read.csv(file.path("../Data/TCGA/SampleAnnotation/010-AsSupplied",
"List_of_HCC_samples_in_TCGA_and_ICGC-ICGCLiverAndNormalTissue-Bioportal.csv"),
check.names = FALSE)
sampleData2 = read.csv(file.path("../Data/TCGA/SampleAnnotation/010-AsSupplied",
"List_of_HCC_samples_in_TCGA_and_ICGC-ICGCLiverAndNormalTissue.csv"),
check.names = FALSE)
ensemblAnnot = read.csv(gzfile("../Data/Annotation/Ensembl/Homo_sapiens_GRCh37p7.txt.gz"));
NCBIAnnot = read.csv(gzfile("../Data/Annotation/NCBI/geneAnnotation-NCBIHuman37.3.csv.gz"));
lapply(sampleAnnot1, function(sa) table(sa$icgc_specimen_id %in% sampleData2$icgc_specimen_id))
sampleID1 = lapply(sampleAnnot1, getElement, "icgc_specimen_id");
sampleID2 = lapply(sampleAnnot1, getElement, "submitted_specimen_id");
sampleID3 = sampleID2;
sampleID3[[2]] = sub("[A-Z]$", "", sampleID3[[2]]);
rows.clinicalData1 = lapply(sampleID3, match, clinicalData1$`Sample ID`)
lapply(rows.clinicalData1, function(x) table(is.na(x)))
clinicalData1.IDs = clinicalData1[ , grep("ID", names(clinicalData1))];
#============================================================================================================
#
# Fish out clinical data.
#
#============================================================================================================
# We're missing a lot of the samples, but at least we have some.
sampleAnnot2 = mymapply(function(sa, rows)
data.frame(sa, clinicalData1[rows, !names(clinicalData1) %in% names(sa)], check.names = FALSE),
sampleAnnot1, rows.clinicalData1); ### This one contains lots of missing samples
ensemblAnnot2 = setNames(ensemblAnnot, multiSub(c("EntrezGene.ID", "Gene.Start..bp.", "Chromosome.Name", "HGNC.symbol"),
c("Entrez", "Loc", "Chr", "Symbol"), names(ensemblAnnot)));
geneAnnot0 = list();
geneAnnot0[[1]] = data.frame(GeneID = colnames(counts0[[1]]),
ensemblAnnot2[ match(colnames(counts0[[1]]), ensemblAnnot2$Ensembl.Gene.ID), ])
table(is.na(geneAnnot0[[1]]$Ensembl.Gene.ID))
# FALSE TRUE
# 46812 6268
for (set in 2:3)
{
geneAnnot0[[set]] = data.frame(GeneID = colnames(counts0[[set]]),
geneAnnotationFromEntrez(convert2entrez(symbol = colnames(counts0[[set]]), organism = "human"),
organism = "human", includePosition = TRUE));
}
sapply(geneAnnot0, function(ga) sum(!is.na(ga$Entrez)))
#[1] 21817 19903 21721
#===============================================================================================================
#
# prettifyList
#
#===============================================================================================================
prettifyList = list(from = "A", to = "A");
as.data.frame(prettifyList)
sampleAnnot3 = list2multiData(sampleAnnot2);
names(counts0) = names(geneAnnot0) = names(sampleAnnot2) = setNames;
maxInt = 2^31-1;
counts0 = lapply(counts0, function(x) {x[x > maxInt] = maxInt; x});
counts1 = list2multiData(counts0);
#===============================================================================================================
#
# predict sex from expression data
#
#===============================================================================================================
library(DESeq2)
genes = c("XIST")
for (set in 1:nSets)
{
sa = sampleAnnot3[[set]]$data;
rownames(sa) = rownames(counts1[[set]]$data);
ds1 = DESeqDataSetFromMatrix.PL(t(counts1[[set]]$data), colData = data.frame(a = rnorm(nrow(sa))), design = ~1);
ds1 = estimateSizeFactors(ds1);
expr1 = log2(t(counts(ds1, normalized = TRUE)) + 1);
for (gene in genes)
{
index = match(gene, geneAnnot0[[set]]$Symbol);
cn1 = expr1[, index]
pdf(file = spaste("Plots/expressionVsSex-", gene, "-", setNames[set], ".pdf"), wi = 5, he = 5);
scpp(1.3);
labeledBoxplot(tapply(cn1, sampleAnnot3[[set]]$data$donor_sex, identity),
names = sort(unique(sampleAnnot3[[set]]$data$donor_sex)),
main = setNames[set], ylab = spaste(gene, " expression"),
xlab = "Sex", addScatterplot = TRUE, notch = TRUE);
dev.off();
normals = tapply(cn1, sampleAnnot3[[set]]$data$donor_sex, median, na.rm = TRUE)
predictedSex = names(normals)[ apply( do.call(cbind, lapply(normals, function(x) abs(cn1-x))),
1, which.min)]
printFlush(spaste("=========== ", setNames[set], " ======================="));
print(table(predictedSex, sampleAnnot3[[set]]$data$donor_sex));
sampleAnnot3[[set]]$data$predictedSex = predictedSex;
}
}
#================================================================================================================
#
# Preprocessing
#
#================================================================================================================
nSamples = checkSets(counts1, checkStructure = TRUE)$nSamples
traitsOfInterest = c("Disease Stage", "Liver fibrosis ishak score category", "Tumor Size", "Vascular Invasion",
"Subtype");
nWithData = do.call(cbind, mtd.apply(sampleAnnot3, function(x) sapply(traitsOfInterest, function(tr)
{
x1 = x[[tr]];
printFlush("================================================================")
printFlush(tr);
print(table(!is.na(x1)));
print(table(x1));
sum(!is.na(x1));
}), returnList = TRUE))
colnames(nWithData) = setNames;
nWithData = rbind(nSamples = t(as.matrix(c(mtd.apply(sampleAnnot3, nrow, mdaSimplify = TRUE)))), nWithData)
rownames(nWithData)[1] = "Total samples";
write.csv.nr(dataForTable(nWithData, transpose = FALSE, IDcolName = "Trait"), "Results/numbersOfSamplesWithClinicalData.csv");
traits1 = c("donor_sex", "donor_age_at_diagnosis", "specimen_type",
"specimen_donor_treatment_type",
"tumour_histological_type", traitsOfInterest);
mtd.apply(mtd.subset(sampleAnnot3, , traits1), lapply, table);
# For pheno colors: drop uniformative columns
sampleAnnot4 = mtd.apply(mtd.subset(sampleAnnot3, , traits1), dropConstantColumns);
phenoColors = mtd.apply(sampleAnnot4, dataFrame2colors, maxOrdinalLevels = 5, maxLegendLength = 120)
library(sva)
library(DESeq2)
groupProp = rep(1/4, nSets);
prepr = list();
for (set in 1:nSets)
prepr[[set]] = list(data = preprocessGeneral(
# Main input: xdata
xdata = counts1[[set]]$data,
analysisID = setNames[set],
sampleAnnotation = sampleAnnot3[[set]]$data,
phenoColors = phenoColors[[set]]$data,
geneAnnotation = geneAnnot0[[set]],
stp.mainBase = setNames[set],
minProportion = groupProp[set],
bw.groupsForMinWeightRestriction = mtd.subset(sampleAnnot3, , "donor_sex", drop = TRUE)[[set]]$data,
organism = "human",
# Flow control, for some measure of interactivity
# intermediateResults = prepr[[set]]$data,
#stopAt = "30-VST",
addToAnalysisManager = FALSE,
idsAreSymbols = FALSE,
minValue = 1,
vst.design = "~donor_sex + specimen_type",
adj.calculateNPCs = 4,
adj.removedCovariates = NULL,
adj.calculateSVAFactors = FALSE,
adj.sva.useNFactors = 1,
adj.svaModel = "~ ",
bw.groupBy = c("donor_sex", "specimen_type"),
bw.otherArgs = list(maxPOutliers = 0.10,
outlierReferenceWeight = 0.1,
minWeightInGroups = 0.9,
maxPropUnderMinWeight = 0.4,
defaultWeight = 1),
# Outlier removal options
outlierRemovalZ = 6,
iorFromHEGenes = TRUE,
ior.replace = FALSE,
ior.remove = FALSE,
restrictIORExprToHE = TRUE,
restrictIORXDataToHE = FALSE,
stp.width = 15,
stp.height = 5,
stp.mainSep = ", ",
stp.marAll = c(1, 20, 3, 1),
stp.maxCharPerLine = 60,
plotDir = "Plots",
pca.correctForColumn = NULL,
pca.colorColumn = "specimen_type",
pca.colorPrefix = "",
pca.shapeColumn = "donor_sex",
pca.shapePrefix = "",
pca.mainSep = ", ",
pca.legendWidth = 3.5,
indent = 2, verbose = 5,
saveDir.base = "../Data/TCGA"))
dir.create("RData", recursive = TRUE)
save(prepr, file = "RData/prepr.RData");
#load(file = "RData/prepr.RData");
multiExpr = mtd.apply(prepr, lastHE.VST.OR, "OSR", "expr.OSR");
multiCounts.all = mtd.apply(prepr, lastHE.VST.OR, "OSR", "xdata.OSR");
multiCounts = mtd.mapply(function(x, ref) x[, colnames(ref)], multiCounts.all, multiExpr);
multiWeights = mtd.apply(prepr, function(p) p$IOR$weightsForIOR);
weightFactors = mtd.apply(prepr, function(p) p$IOR$weightFactorsForIOR);
mtd.apply(multiWeights, function(x) table(x >0.1)/length(x))
mtd.apply(weightFactors, function(x) table(x >1)/length(x))
multiSampleAnnot = mtd.apply(prepr, lastHE.VST.OR, "OSR", "sampleAnnotation.OSR");
multiGeneAnnot = mtd.apply(prepr, lastHE.VST.OR, "HEFilter", "geneAnnotation.he");
names(multiCounts) = names(multiExpr) = names(multiSampleAnnot) = names(multiGeneAnnot) = setNames;
names(multiWeights) = names(weightFactors) = setNames;
numericCols = mtd.apply(multiSampleAnnot, sapply, is.numeric);
multiNumericPheno = mtd.subset(multiSampleAnnot, , numericCols[[1]]$data);
multiNumericPheno = mtd.subset(multiNumericPheno, ,
multiGrep(c("SVAFactor.[2-9]", "PrincipalComponent.[2-9]"), mtd.colnames(multiNumericPheno), invert = TRUE));
save(multiCounts, multiExpr, multiSampleAnnot, multiGeneAnnot, multiWeights, #weightFactors,
prettifyList, multiNumericPheno,
setNames, file = "RData/preprocessedData.RData");
sink(file = "Results/sessionInfo.txt");
sessionInfo();
sink(NULL);
#=============================================================================================================
#
# Sanity check for weights/weight factors
#
#=============================================================================================================
nSamples.OR = checkSets(multiExpr, checkStructure = TRUE)$nSamples;
nPlots = 24;
pdf(file = "Plots/GenesWithOutlierWeights.pdf", wi = 15, he = 10)
for (set in 1:nSets)
{
ord = order(-weightFactors[[set]]$data);
plotIndex = ord[1:nPlots];
plotGenes = unique(floor((plotIndex-1)/nSamples.OR[set]) + 1);
par(mfrow = c(4,6));
scpp(1.4);
par(cex = 1);
for (g in plotGenes)
{
plot(multiExpr[[set]]$data[, g], weightFactors[[set]]$data[, g],
pch = 21,
bg = labels2colors(as.numeric(factor(multiSampleAnnot[[set]]$data$Diets))), cex = 2,
main = spaste(multiGeneAnnot[[set]]$data$Gene[g], ", chr ", multiGeneAnnot[[set]]$data$Chr[g]),
xlab = "VST expression", ylab = "Outlier statistic");
abline(h = 1, col = "grey");
}
}
for (set in 1:nSets)
{
ord = order(-weightFactors[[set]]$data);
plotIndex = ord[1:nPlots];
plotGenes = unique(floor((plotIndex-1)/nSamples.OR[set]) + 1);
x= as.numeric(factor(multiSampleAnnot[[set]]$data$Diets));
xl = levels(factor(multiSampleAnnot[[set]]$data$Diets));
par(mfrow = c(4,2));
scpp(1.4);
par(cex = 1);
for (g in plotGenes)
{
plot(x, multiExpr[[set]]$data[, g],
pch = 21,
bg = numbers2colors(weightFactors[[set]]$data[, g], colors = grey.colors(100, start = 0, end = 1), lim = c(0,1)),
cex = 2,
main = spaste(multiGeneAnnot[[set]]$data$Gene[g], ", chr ", multiGeneAnnot[[set]]$data$Chr[g]),
ylab = "VST expression", xlab = "Age", xaxt = "n");
axis(1, at = sort(unique(x)), labels = xl, cex.axis = 0.5);
abline(h = 1, col = "grey");
}
}
dev.off()
#=============================================================================================================
#
# Check if the removed 1st PC/1st SVA factor correlates with any of the traits.
#
#=============================================================================================================
cp = corAndPvalue(multiNumericPheno[[1]]$data);
diag(cp$p) = NA
pdf(file = "Plots/correlationHeatmapOfTraitsAndSurrogateVariables.pdf", wi = 17, he = 10);
corHeatmapWithDendro(cp$cor, pValues = cp$p, main = "Correlations of phenotypes and surrogate variables",
mar.main = c(14, 18, 2,1), cex.text = 0.6, dendroWidth = 0.17);
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CatEncodeFit.R
\name{encode_generic_fit}
\alias{encode_generic_fit}
\title{A fit function to encode categorical data}
\usage{
encode_generic_fit(x, fit)
}
\arguments{
\item{x}{Any categorical vector which needs to be encoded}
\item{fit}{A list returned from "BestCatEncode" that is used to fit the test data.}
}
\value{
Returns the encoded data vector
}
\description{
Detects the categorical variables and treats it based on the fit file generated by train data.
}
|
/man/encode_generic_fit.Rd
|
no_license
|
akunuriYoshitha/CatEncode
|
R
| false | true | 544 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CatEncodeFit.R
\name{encode_generic_fit}
\alias{encode_generic_fit}
\title{A fit function to encode categorical data}
\usage{
encode_generic_fit(x, fit)
}
\arguments{
\item{x}{Any categorical vector which needs to be encoded}
\item{fit}{A list returned from "BestCatEncode" that is used to fit the test data.}
}
\value{
Returns the encoded data vector
}
\description{
Detects the categorical variables and treats it based on the fit file generated by train data.
}
|
# Required libraries
library(tidyverse)
library(dplyr)
####################################################################################
# Method Where between (value types: numeric, date)
filter_where_between <- function(df_where_between,column_name,value1,value2) {
df_where_between %>% filter(between(column_name,value1,value2))
}
# Call the function to filter the data set
filter_where_between(df,df$Population,"10000", "50000")
####################################################################################
|
/sources/4-wranglers/3-reduce/2-filter-by-number/patterns/r/filter-between.R
|
permissive
|
UN-AVT/kamino-source
|
R
| false | false | 526 |
r
|
# Required libraries
library(tidyverse)
library(dplyr)
####################################################################################
# Method Where between (value types: numeric, date)
filter_where_between <- function(df_where_between,column_name,value1,value2) {
df_where_between %>% filter(between(column_name,value1,value2))
}
# Call the function to filter the data set
filter_where_between(df,df$Population,"10000", "50000")
####################################################################################
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{should_stop}
\alias{should_stop}
\title{Used in examples to illustrate when errors should occur.}
\usage{
should_stop(expr)
}
\arguments{
\item{expr}{code to evaluate.}
}
\description{
Used in examples to illustrate when errors should occur.
}
\examples{
should_stop(stop("Hi!"))
should_stop(should_stop("Hi!"))
}
\keyword{internal}
|
/man/should_stop.Rd
|
no_license
|
richierocks/ggplot2
|
R
| false | false | 393 |
rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{should_stop}
\alias{should_stop}
\title{Used in examples to illustrate when errors should occur.}
\usage{
should_stop(expr)
}
\arguments{
\item{expr}{code to evaluate.}
}
\description{
Used in examples to illustrate when errors should occur.
}
\examples{
should_stop(stop("Hi!"))
should_stop(should_stop("Hi!"))
}
\keyword{internal}
|
/transform.R
|
no_license
|
uayeb25/survey-cleaning
|
R
| false | false | 3,062 |
r
| ||
# Create project folder
# getwd() # Find the working directory
# setwd('..') # Go up one level
# Create project
library('ProjectTemplate')
create.project('letters')
# Load the project
setwd("letters")
library(ProjectTemplate) : load.project()
# Load libraries - config/global.dcf
# First, we have to edit the config/global.dcf file to make sure that the load_libraries setting is turned on:
# --> load_libraries: TRUE
# Second, we need to make sure that the plyr package will be loaded automatically when we run load.project().
# To do that, we check that the value of the libraries configuration setting contains plyr
# To stop recomputing work we've already cached, we edit our configuration file and turn munging off:
# --> munging: FALSE
# Caching data
cache('first.letter.counts')
cache('second.letter.counts')
# Now when we reload our project we see the following:
library('ProjectTemplate')
load.project()
# Run analyses
source('src/generate_plots.R')
|
/Backup/rProject/rprojectSPLa/ProjectTemplate/ProjectTemplate01.r
|
permissive
|
UTexas80/gitSPL
|
R
| false | false | 1,022 |
r
|
# Create project folder
# getwd() # Find the working directory
# setwd('..') # Go up one level
# Create project
library('ProjectTemplate')
create.project('letters')
# Load the project
setwd("letters")
library(ProjectTemplate) : load.project()
# Load libraries - config/global.dcf
# First, we have to edit the config/global.dcf file to make sure that the load_libraries setting is turned on:
# --> load_libraries: TRUE
# Second, we need to make sure that the plyr package will be loaded automatically when we run load.project().
# To do that, we check that the value of the libraries configuration setting contains plyr
# To stop recomputing work we've already cached, we edit our configuration file and turn munging off:
# --> munging: FALSE
# Caching data
cache('first.letter.counts')
cache('second.letter.counts')
# Now when we reload our project we see the following:
library('ProjectTemplate')
load.project()
# Run analyses
source('src/generate_plots.R')
|
###
# exmaple for testing estimation of mixed effect.
# using five covaraites, and no fixed effect
###
library(testthat)
library(GHmixedeffect)
n.pers <- 100
n.obs <- 20
sd_beta <- 0.2
sd_Y <- 0.1
B_list <- list()
beta <- c(0.9,0.4,0.2,0.2,0.1)
beta_list <- list()
Y_list <- list()
for(i in 1:n.pers)
{
B_list[[i]] <- cbind(rep(1, n.obs), (1:n.obs) / n.obs, runif(n.obs), rnorm(n.obs), rnorm(n.obs) )
beta_list[[i]] <- beta + sd_beta*rnorm(n = length( beta), 0, sd = 1)
Y_list[[i]] <- rnorm(n = n.obs, B_list[[i]]%*%beta_list[[i]], sd = sd_Y)
}
meas_list <- list(sigma_eps <- 0.1, noise = "Normal")
mixedEffect_list <- list(B_random = B_list,
Sigma = sd_beta*diag(5),
beta_random =rep(0,5),
noise = "Normal")
input <- list(Y = Y_list,
mixedEffect_list = mixedEffect_list,
measurementError_list = meas_list,
nSim = 2,
alpha = 0.3,
step0 = 1,
Niter = 1000)
res <- estimateME(input)
test_that("simple Gaussian-Gaussian random effect (5 cov)",
{
expect_equal( res$mixedEffect_list$beta_random, beta, tolerance = 0.1)
})
test_that("simple Gaussian-Gaussian measuerment sigma (5 cov)",
{
expect_equal( res$measurementError_list$sigma, sd_Y, tolerance = 0.1)
})
test_that("simple Gaussian-Gaussian measuerment Sigma (5 cov)",
{
expect_equal( mean(diag(res$mixedEffect_list$Sigma)-sd_beta^2), 0, tolerance = 0.05)
})
|
/GHmixedeffect/test/test_normalMixedEffect2.R
|
no_license
|
JonasWallin/LangLong
|
R
| false | false | 1,503 |
r
|
###
# exmaple for testing estimation of mixed effect.
# using five covaraites, and no fixed effect
###
library(testthat)
library(GHmixedeffect)
n.pers <- 100
n.obs <- 20
sd_beta <- 0.2
sd_Y <- 0.1
B_list <- list()
beta <- c(0.9,0.4,0.2,0.2,0.1)
beta_list <- list()
Y_list <- list()
for(i in 1:n.pers)
{
B_list[[i]] <- cbind(rep(1, n.obs), (1:n.obs) / n.obs, runif(n.obs), rnorm(n.obs), rnorm(n.obs) )
beta_list[[i]] <- beta + sd_beta*rnorm(n = length( beta), 0, sd = 1)
Y_list[[i]] <- rnorm(n = n.obs, B_list[[i]]%*%beta_list[[i]], sd = sd_Y)
}
meas_list <- list(sigma_eps <- 0.1, noise = "Normal")
mixedEffect_list <- list(B_random = B_list,
Sigma = sd_beta*diag(5),
beta_random =rep(0,5),
noise = "Normal")
input <- list(Y = Y_list,
mixedEffect_list = mixedEffect_list,
measurementError_list = meas_list,
nSim = 2,
alpha = 0.3,
step0 = 1,
Niter = 1000)
res <- estimateME(input)
test_that("simple Gaussian-Gaussian random effect (5 cov)",
{
expect_equal( res$mixedEffect_list$beta_random, beta, tolerance = 0.1)
})
test_that("simple Gaussian-Gaussian measuerment sigma (5 cov)",
{
expect_equal( res$measurementError_list$sigma, sd_Y, tolerance = 0.1)
})
test_that("simple Gaussian-Gaussian measuerment Sigma (5 cov)",
{
expect_equal( mean(diag(res$mixedEffect_list$Sigma)-sd_beta^2), 0, tolerance = 0.05)
})
|
#' Creates a plotly manhattan plot
#'
#' Creates an interactive manhattan plot with multiple annotation options
#'
#' @param x Can be an object of class \code{manhattanr} produced by the
#' \code{\link{manhattanr}} function or a \code{data.frame} which must contain
#' at least the following three columns: \itemize{ \item{the chromosome
#' number} \item{genomic base-pair position} \item{a numeric quantity to plot
#' such as a p-value or zscore} }
#' @param col A character vector indicating the colors of each chromosome. If
#' the number of colors specified is less than the number of unique
#' chromosomes, then the elements will be recycled. Can be
#' \href{http://www.rapidtables.com/web/color/RGB_Color.htm}{Hex Codes} as
#' well.
#' @param point_size A \code{numeric} indicating the size of the points on the
#' plot. Default is 5
#' @param labelChr A character vector equal to the number of chromosomes
#' specifying the chromosome labels (e.g., \code{c(1:22, "X", "Y", "MT")}).
#' Default is \code{NULL}, meaning that the actual chromosome numbers will be
#' used.
#' @param suggestiveline Where to draw a "suggestive" line. Default is
#' \code{-log10(1e-5)}. Set to \code{FALSE} to disable.
#' @param suggestiveline_color color of "suggestive" line. Only used if
#' \code{suggestiveline} is not set to \code{FALSE}. Default is \code{"blue"}.
#' @param suggestiveline_width Width of \code{suggestiveline}. Default is 1.
#' @param genomewideline Where to draw a "genome-wide sigificant" line. Default
#' \code{-log10(5e-8)}. Set to \code{FALSE} to disable.
#' @param genomewideline_color color of "genome-wide sigificant" line. Only used
#' if \code{genomewideline} is not set to \code{FALSE}. Default is
#' \code{"red"}.
#' @param genomewideline_width Width of \code{genomewideline}. Default is 1.
#' @param highlight A character vector of SNPs in your dataset to highlight.
#' These SNPs should all be in your dataset. Default is \code{NULL} which
#' means that nothing is highlighted.
#' @param highlight_color Color used to highlight points. Only used if
#' \code{highlight} argument has been specified
#' @param showlegend Should a legend be shown. Default is \code{FALSE}.
#' @param showgrid Should gridlines be shown. Default is \code{FALSE}.
#' @param xlab X-axis label. Default is \code{NULL} which means that the label
#' is automatically determined by the \code{\link{manhattanr}} function.
#' Specify here to overwrite the default.
#' @param ylab Y-axis label. Default is \code{"-log10(p)"}.
#' @param title Title of the plot. Default is \code{"Manhattan Plot"}
#' @param ... other parameters passed to \code{\link{manhattanr}}
#' @inheritParams manhattanr
#' @note This package is inspired by the
#' \href{https://github.com/stephenturner/qqman}{\code{qqman}} package by
#' \href{http://www.gettinggeneticsdone.com/}{Stephen Turner}. Much of the
#' plot format and pre-processing is the same. This package provides
#' additional annotation options and builds on the \code{\link{plotly}}
#' \code{d3.js} engine. These plots can be included in Shiny apps, Rmarkdown
#' documents or embeded in websites using simple HTML code.
#' @return An interactive manhattan plot.
#' @seealso \code{\link{manhattanr}}, \code{\link{HapMap}},
#' \code{\link{significantSNP}}, \code{\link[qqman]{manhattan}},
#' \url{https://github.com/stephenturner/qqman},
#' \href{https://github.com/nstrayer/D3ManhattanPlots}{D3ManhattanPlots}
#' @aliases manhattanly.default manhattanly.manhattanr
#' @importFrom magrittr '%<>%'
#' @import plotly
#' @export
#' @examples
#' \dontrun{
#' library(manhattanly)
#' manhattanly(HapMap)
#'
#' # highlight SNPs of interest
#' # 'signigicantSNP' is a character vector of SNPs included in this package
#' manhattanly(HapMap, snp = "SNP", highlight = significantSNP)
#' }
manhattanly <- function(x,
# col = colorRampPalette(RColorBrewer::brewer.pal(n = 9, name = "Set1"))(nchr),
# col = RColorBrewer::brewer.pal(n = 9, name = "Greys"),
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
UseMethod("manhattanly")
}
#' @export
manhattanly.default <- function(x,
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
mh <- manhattanr(x, ...)
nchr <- mh$nchr
manhattanly.manhattanr(mh,
col = col,
labelChr = labelChr,
point_size = point_size,
suggestiveline = suggestiveline,
suggestiveline_color = suggestiveline_color,
suggestiveline_width = suggestiveline_width,
genomewideline = genomewideline,
genomewideline_color = genomewideline_color,
genomewideline_width = genomewideline_width,
highlight = highlight,
highlight_color = highlight_color,
showlegend = showlegend,
showgrid = showgrid,
xlab = xlab,
ylab = ylab,
title = title)
}
#' @export
manhattanly.manhattanr <- function(x,
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
# x <- manhattanr(gwasResults)
# x <- manhattanr(kk, annotation1 = "ZSCORE", annotation2 = "EFFECTSIZE")
# x <- manhattanr(kk, annotation1 = "ZSCORE")
# x <- manhattanr(kk, annotation1 = "ZSCORE", annotation2 = "EFFECTSIZE")
# x <- manhattanr(HapMap, snp = "SNP", gene = "GENE")
#
# x$data %>% head
# str(x$data)
# labelChr <- NULL
# col <- colorRampPalette(rev(RColorBrewer::brewer.pal(n = 7, name ="Set1")))(22)
# showgrid <- TRUE
# labelChr = NULL
# point_size = 5
# suggestiveline = -log10(1e-5)
# genomewideline = -log10(5e-8)
# suggestiveline_color = "blue"
# genomewideline_color = "red"
# suggestiveline_width = genomewideline_width = 1;
# highlight_color = "#00FF00"
# highlight = c(significantSNP, x$data$SNP[1:20])
# showlegend = TRUE
# showgrid = TRUE
# ylab = "-log10(p)"
# xlab = NULL
# title = "Manhattan Plot"
# col = c("#969696", "#252525")
#########
d <- x$data
pName <- x$pName
snpName <- x$snpName
geneName <- x$geneName
annotation1Name <- x$annotation1Name
annotation2Name <- x$annotation2Name
labs <- x$labs
xlabel <- x$xlabel
ticks <- x$ticks
nchr <- x$nchr
if (!is.null(highlight) & is.na(snpName)) stop("You're trying to highlight snps, but havent provided a snp column")
# Initialize plot
xmax = ceiling(max(d$pos) * 1.03)
xmin = floor(max(d$pos) * -0.03)
# If manually specifying chromosome labels, ensure a character vector
# and number of labels matches number chrs.
if (!is.null(labelChr)) {
if (is.character(labelChr)) {
if (length(labelChr)==length(labs)) {
labs <- labelChr
} else {
warning("You're trying to specify chromosome labels but the number of labels != number of chromosomes.")
}
} else {
warning("If you're trying to specify chromosome labels, labelChr must be a character vector")
}
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Initalize plotly
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
p <- plotly::plot_ly()
# Add an axis.
if (nchr == 1) {
#If single chromosome, ticks and labels automatic.
p %<>% plotly::layout(p,
title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else xlabel,
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax)
),
yaxis = list(
title = ylab)#,
#range = c(0,ceiling(max(d$logp)))
#)
)
} else {
# if multiple chrs, use the ticks and labels you created above.
p %<>% plotly::layout(p,
title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else "Chromosome",
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax),
autotick = FALSE,
tickmode = "array",
tickvals = ticks,
ticktext = labs,
ticks = "outside"
),
yaxis = list(
title = ylab)#,
#range = c(0,ceiling(max(d$logp)))
#)
)
}
# Create a vector of alternatiting colors
col <- rep(col, max(d$CHR))
# Add points to the plot
if (nchr==1) {
# paste(if (!is.na(snpName)) paste0(snpName,": ",d[[snpName]],"<br>"),
# if (!is.na(geneName)) paste0(geneName,": ",d[[geneName]],"<br>"),
# if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d[[annotation1Name]],"<br>")
# if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d[[annotation2Name]],"<br>")
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ",d[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ",d[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d[[annotation2Name]]), sep = "<br>")
if (is.na(snpName) && is.na(geneName) && is.na(annotation1Name) && is.na(annotation2Name)) {
p %<>% plotly::add_trace(x = d$pos, y = d$logp,
type = "scatter",
mode = "markers",
# text = TEXT,
showlegend = showlegend,
marker = list(color = col[1],
size = point_size),
name = paste0("chr", unique(d$CHR)))
} else {
p %<>% plotly::add_trace(x = d$pos, y = d$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = col[1],
size = point_size),
name = paste0("chr", unique(d$CHR)))
}
} else {
icol <- 1
for(i in unique(d$index)) {
tmp <- d[d$index == unique(d$index)[i], ]
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ", tmp[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ", tmp[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ", tmp[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ", tmp[[annotation2Name]]),
sep = "<br>")
# get chromosome name for labeling
chromo <- unique(tmp[which(tmp$index==i),"CHR"])
if (is.na(snpName) && is.na(geneName) && is.na(annotation1Name) && is.na(annotation2Name)) {
p %<>% plotly::add_trace(x = tmp$pos, y = tmp$logp, type = "scatter",
mode = "markers",
showlegend = showlegend,
marker = list(color = col[icol],
size = point_size),
name = paste0("chr",chromo))
} else {
p %<>% plotly::add_trace(x = tmp$pos, y = tmp$logp, type = "scatter",
mode = "markers",
showlegend = showlegend,
text = TEXT,
marker = list(color = col[icol],
size = point_size),
name = paste0("chr",chromo))
}
icol = icol + 1
}
}
if (suggestiveline & genomewideline) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = suggestiveline_color,
line = list(color = suggestiveline_color,
width = suggestiveline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = suggestiveline, y1 = suggestiveline, yref = "y"),
list(type = "line",
fillcolor = genomewideline_color,
line = list(color = genomewideline_color,
width = genomewideline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = genomewideline, y1 = genomewideline, yref = "y")
))}
if (suggestiveline & !(genomewideline)) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = suggestiveline_color,
line = list(color = suggestiveline_color,
width = suggestiveline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = suggestiveline, y1 = suggestiveline, yref = "y")
))}
if (!(suggestiveline) & genomewideline) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = genomewideline_color,
line = list(color = genomewideline_color,
width = genomewideline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = genomewideline, y1 = genomewideline, yref = "y")
))}
# Highlight snps from a character vector
if (!is.na(snpName)) {
if (!is.null(highlight)) {
if (any(!(highlight %in% d[[snpName]]))) warning("You're trying to highlight SNPs that don't exist in your results.")
d.highlight <- d[which(d[[snpName]] %in% highlight), ]
# Add points to the plot
if (nchr==1) {
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ",d.highlight[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ",d.highlight[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d.highlight[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d.highlight[[annotation2Name]]), sep = "<br>")
p %<>% plotly::add_trace(x = d$pos, y = d$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = highlight_color,
size = point_size),
name = "of interest")
} else {
# icol <- 1
for(i in unique(d.highlight$index)) {
tmp <- d.highlight[d.highlight$index == i, ]
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ", tmp[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ", tmp[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ", tmp[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ", tmp[[annotation2Name]]),
sep = "<br>")
# get chromosome name for labeling
chromo <- unique(tmp[which(tmp$index==i),"CHR"])
p %<>% plotly::add_trace(x = tmp$pos,
y = tmp$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = highlight_color,
size = point_size),
name = "of interest")
# icol = icol + 1
}
}
# p %<>% plotly::add_trace(x = d.highlight$pos,
# y = d.highlight$logp,
# type = "scatter",
# mode = "markers",
# #evaluate = TRUE,
# text = d.highlight[[snpName]],
# showlegend = showlegend,
# marker = list(color = highlight_color,
# size = point_size),
# name = "of interest")
}
}
p
}
# jj <- manhattan_plotly(gwasResults, genomewideline = FALSE)
#
# jj
# str(jj)
# topHits = subset(d, P <= annotatePval)
# p %>% layout(annotations = list(x = topHits$pos[10],
# y = -log10(topHits$P[10]),
# text = topHits$SNP[10],
# showarrow = T))
"%ni%" <- Negate("%in%")
|
/R/manhattanly.R
|
no_license
|
tianbu/manhattanly
|
R
| false | false | 20,685 |
r
|
#' Creates a plotly manhattan plot
#'
#' Creates an interactive manhattan plot with multiple annotation options
#'
#' @param x Can be an object of class \code{manhattanr} produced by the
#' \code{\link{manhattanr}} function or a \code{data.frame} which must contain
#' at least the following three columns: \itemize{ \item{the chromosome
#' number} \item{genomic base-pair position} \item{a numeric quantity to plot
#' such as a p-value or zscore} }
#' @param col A character vector indicating the colors of each chromosome. If
#' the number of colors specified is less than the number of unique
#' chromosomes, then the elements will be recycled. Can be
#' \href{http://www.rapidtables.com/web/color/RGB_Color.htm}{Hex Codes} as
#' well.
#' @param point_size A \code{numeric} indicating the size of the points on the
#' plot. Default is 5
#' @param labelChr A character vector equal to the number of chromosomes
#' specifying the chromosome labels (e.g., \code{c(1:22, "X", "Y", "MT")}).
#' Default is \code{NULL}, meaning that the actual chromosome numbers will be
#' used.
#' @param suggestiveline Where to draw a "suggestive" line. Default is
#' \code{-log10(1e-5)}. Set to \code{FALSE} to disable.
#' @param suggestiveline_color color of "suggestive" line. Only used if
#' \code{suggestiveline} is not set to \code{FALSE}. Default is \code{"blue"}.
#' @param suggestiveline_width Width of \code{suggestiveline}. Default is 1.
#' @param genomewideline Where to draw a "genome-wide sigificant" line. Default
#' \code{-log10(5e-8)}. Set to \code{FALSE} to disable.
#' @param genomewideline_color color of "genome-wide sigificant" line. Only used
#' if \code{genomewideline} is not set to \code{FALSE}. Default is
#' \code{"red"}.
#' @param genomewideline_width Width of \code{genomewideline}. Default is 1.
#' @param highlight A character vector of SNPs in your dataset to highlight.
#' These SNPs should all be in your dataset. Default is \code{NULL} which
#' means that nothing is highlighted.
#' @param highlight_color Color used to highlight points. Only used if
#' \code{highlight} argument has been specified
#' @param showlegend Should a legend be shown. Default is \code{FALSE}.
#' @param showgrid Should gridlines be shown. Default is \code{FALSE}.
#' @param xlab X-axis label. Default is \code{NULL} which means that the label
#' is automatically determined by the \code{\link{manhattanr}} function.
#' Specify here to overwrite the default.
#' @param ylab Y-axis label. Default is \code{"-log10(p)"}.
#' @param title Title of the plot. Default is \code{"Manhattan Plot"}
#' @param ... other parameters passed to \code{\link{manhattanr}}
#' @inheritParams manhattanr
#' @note This package is inspired by the
#' \href{https://github.com/stephenturner/qqman}{\code{qqman}} package by
#' \href{http://www.gettinggeneticsdone.com/}{Stephen Turner}. Much of the
#' plot format and pre-processing is the same. This package provides
#' additional annotation options and builds on the \code{\link{plotly}}
#' \code{d3.js} engine. These plots can be included in Shiny apps, Rmarkdown
#' documents or embeded in websites using simple HTML code.
#' @return An interactive manhattan plot.
#' @seealso \code{\link{manhattanr}}, \code{\link{HapMap}},
#' \code{\link{significantSNP}}, \code{\link[qqman]{manhattan}},
#' \url{https://github.com/stephenturner/qqman},
#' \href{https://github.com/nstrayer/D3ManhattanPlots}{D3ManhattanPlots}
#' @aliases manhattanly.default manhattanly.manhattanr
#' @importFrom magrittr '%<>%'
#' @import plotly
#' @export
#' @examples
#' \dontrun{
#' library(manhattanly)
#' manhattanly(HapMap)
#'
#' # highlight SNPs of interest
#' # 'signigicantSNP' is a character vector of SNPs included in this package
#' manhattanly(HapMap, snp = "SNP", highlight = significantSNP)
#' }
manhattanly <- function(x,
# col = colorRampPalette(RColorBrewer::brewer.pal(n = 9, name = "Set1"))(nchr),
# col = RColorBrewer::brewer.pal(n = 9, name = "Greys"),
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
UseMethod("manhattanly")
}
#' @export
manhattanly.default <- function(x,
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
mh <- manhattanr(x, ...)
nchr <- mh$nchr
manhattanly.manhattanr(mh,
col = col,
labelChr = labelChr,
point_size = point_size,
suggestiveline = suggestiveline,
suggestiveline_color = suggestiveline_color,
suggestiveline_width = suggestiveline_width,
genomewideline = genomewideline,
genomewideline_color = genomewideline_color,
genomewideline_width = genomewideline_width,
highlight = highlight,
highlight_color = highlight_color,
showlegend = showlegend,
showgrid = showgrid,
xlab = xlab,
ylab = ylab,
title = title)
}
#' @export
manhattanly.manhattanr <- function(x,
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
# x <- manhattanr(gwasResults)
# x <- manhattanr(kk, annotation1 = "ZSCORE", annotation2 = "EFFECTSIZE")
# x <- manhattanr(kk, annotation1 = "ZSCORE")
# x <- manhattanr(kk, annotation1 = "ZSCORE", annotation2 = "EFFECTSIZE")
# x <- manhattanr(HapMap, snp = "SNP", gene = "GENE")
#
# x$data %>% head
# str(x$data)
# labelChr <- NULL
# col <- colorRampPalette(rev(RColorBrewer::brewer.pal(n = 7, name ="Set1")))(22)
# showgrid <- TRUE
# labelChr = NULL
# point_size = 5
# suggestiveline = -log10(1e-5)
# genomewideline = -log10(5e-8)
# suggestiveline_color = "blue"
# genomewideline_color = "red"
# suggestiveline_width = genomewideline_width = 1;
# highlight_color = "#00FF00"
# highlight = c(significantSNP, x$data$SNP[1:20])
# showlegend = TRUE
# showgrid = TRUE
# ylab = "-log10(p)"
# xlab = NULL
# title = "Manhattan Plot"
# col = c("#969696", "#252525")
#########
d <- x$data
pName <- x$pName
snpName <- x$snpName
geneName <- x$geneName
annotation1Name <- x$annotation1Name
annotation2Name <- x$annotation2Name
labs <- x$labs
xlabel <- x$xlabel
ticks <- x$ticks
nchr <- x$nchr
if (!is.null(highlight) & is.na(snpName)) stop("You're trying to highlight snps, but havent provided a snp column")
# Initialize plot
xmax = ceiling(max(d$pos) * 1.03)
xmin = floor(max(d$pos) * -0.03)
# If manually specifying chromosome labels, ensure a character vector
# and number of labels matches number chrs.
if (!is.null(labelChr)) {
if (is.character(labelChr)) {
if (length(labelChr)==length(labs)) {
labs <- labelChr
} else {
warning("You're trying to specify chromosome labels but the number of labels != number of chromosomes.")
}
} else {
warning("If you're trying to specify chromosome labels, labelChr must be a character vector")
}
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Initalize plotly
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
p <- plotly::plot_ly()
# Add an axis.
if (nchr == 1) {
#If single chromosome, ticks and labels automatic.
p %<>% plotly::layout(p,
title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else xlabel,
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax)
),
yaxis = list(
title = ylab)#,
#range = c(0,ceiling(max(d$logp)))
#)
)
} else {
# if multiple chrs, use the ticks and labels you created above.
p %<>% plotly::layout(p,
title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else "Chromosome",
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax),
autotick = FALSE,
tickmode = "array",
tickvals = ticks,
ticktext = labs,
ticks = "outside"
),
yaxis = list(
title = ylab)#,
#range = c(0,ceiling(max(d$logp)))
#)
)
}
# Create a vector of alternatiting colors
col <- rep(col, max(d$CHR))
# Add points to the plot
if (nchr==1) {
# paste(if (!is.na(snpName)) paste0(snpName,": ",d[[snpName]],"<br>"),
# if (!is.na(geneName)) paste0(geneName,": ",d[[geneName]],"<br>"),
# if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d[[annotation1Name]],"<br>")
# if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d[[annotation2Name]],"<br>")
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ",d[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ",d[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d[[annotation2Name]]), sep = "<br>")
if (is.na(snpName) && is.na(geneName) && is.na(annotation1Name) && is.na(annotation2Name)) {
p %<>% plotly::add_trace(x = d$pos, y = d$logp,
type = "scatter",
mode = "markers",
# text = TEXT,
showlegend = showlegend,
marker = list(color = col[1],
size = point_size),
name = paste0("chr", unique(d$CHR)))
} else {
p %<>% plotly::add_trace(x = d$pos, y = d$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = col[1],
size = point_size),
name = paste0("chr", unique(d$CHR)))
}
} else {
icol <- 1
for(i in unique(d$index)) {
tmp <- d[d$index == unique(d$index)[i], ]
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ", tmp[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ", tmp[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ", tmp[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ", tmp[[annotation2Name]]),
sep = "<br>")
# get chromosome name for labeling
chromo <- unique(tmp[which(tmp$index==i),"CHR"])
if (is.na(snpName) && is.na(geneName) && is.na(annotation1Name) && is.na(annotation2Name)) {
p %<>% plotly::add_trace(x = tmp$pos, y = tmp$logp, type = "scatter",
mode = "markers",
showlegend = showlegend,
marker = list(color = col[icol],
size = point_size),
name = paste0("chr",chromo))
} else {
p %<>% plotly::add_trace(x = tmp$pos, y = tmp$logp, type = "scatter",
mode = "markers",
showlegend = showlegend,
text = TEXT,
marker = list(color = col[icol],
size = point_size),
name = paste0("chr",chromo))
}
icol = icol + 1
}
}
if (suggestiveline & genomewideline) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = suggestiveline_color,
line = list(color = suggestiveline_color,
width = suggestiveline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = suggestiveline, y1 = suggestiveline, yref = "y"),
list(type = "line",
fillcolor = genomewideline_color,
line = list(color = genomewideline_color,
width = genomewideline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = genomewideline, y1 = genomewideline, yref = "y")
))}
if (suggestiveline & !(genomewideline)) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = suggestiveline_color,
line = list(color = suggestiveline_color,
width = suggestiveline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = suggestiveline, y1 = suggestiveline, yref = "y")
))}
if (!(suggestiveline) & genomewideline) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = genomewideline_color,
line = list(color = genomewideline_color,
width = genomewideline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = genomewideline, y1 = genomewideline, yref = "y")
))}
# Highlight snps from a character vector
if (!is.na(snpName)) {
if (!is.null(highlight)) {
if (any(!(highlight %in% d[[snpName]]))) warning("You're trying to highlight SNPs that don't exist in your results.")
d.highlight <- d[which(d[[snpName]] %in% highlight), ]
# Add points to the plot
if (nchr==1) {
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ",d.highlight[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ",d.highlight[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d.highlight[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d.highlight[[annotation2Name]]), sep = "<br>")
p %<>% plotly::add_trace(x = d$pos, y = d$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = highlight_color,
size = point_size),
name = "of interest")
} else {
# icol <- 1
for(i in unique(d.highlight$index)) {
tmp <- d.highlight[d.highlight$index == i, ]
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ", tmp[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ", tmp[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ", tmp[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ", tmp[[annotation2Name]]),
sep = "<br>")
# get chromosome name for labeling
chromo <- unique(tmp[which(tmp$index==i),"CHR"])
p %<>% plotly::add_trace(x = tmp$pos,
y = tmp$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = highlight_color,
size = point_size),
name = "of interest")
# icol = icol + 1
}
}
# p %<>% plotly::add_trace(x = d.highlight$pos,
# y = d.highlight$logp,
# type = "scatter",
# mode = "markers",
# #evaluate = TRUE,
# text = d.highlight[[snpName]],
# showlegend = showlegend,
# marker = list(color = highlight_color,
# size = point_size),
# name = "of interest")
}
}
p
}
# jj <- manhattan_plotly(gwasResults, genomewideline = FALSE)
#
# jj
# str(jj)
# topHits = subset(d, P <= annotatePval)
# p %>% layout(annotations = list(x = topHits$pos[10],
# y = -log10(topHits$P[10]),
# text = topHits$SNP[10],
# showarrow = T))
"%ni%" <- Negate("%in%")
|
# we'll be using the iris data
data(iris)
# logical operators -------------------------------------------------------
# if statement
if (TRUE) {
"DO SOMETHING"
} else {
"THIS HAPPENS"
}
# lets test some of the data
if (iris$Sepal.Length[1] < 3.5) {
flower.stage = "bud"
}
# lets add an alternative
if (iris$Sepal.Length[1] < 3.5) {
flower.stage = "bud"
} else {
flower.stage = "flower"
}
# lets add another option
if (iris$Sepal.Length[1] < 3.5) {
flower.stage = "hidden bud"
} else if (iris$Sepal.Length[1] >= 3.5 & iris$Sepal.Length[1] < 5.0) {
flower.stage = "final bud"
} else {
flower.stage = "open flower"
}
# what if we want to know something about all of the observations?
iris$Sepal.Length>5
sepal.gt.5 = which(iris$Sepal.Length>5)
iris$Species[sepal.gt.5]
# for loops ---------------------------------------------------------------
# basic loop syntax
for (i in 1:10) {
print(i)
}
for (i in 1:10) {
print(i^2)
}
# what if we want to keep the results?
# when we don't know what the length of the resutls is going to be (and we don't mind going slow...):
x = numeric()
for (i in 1:10) {
x = append(x, i^2)
}
# when we do know the length (this is a MUCH MUCH MUCH better option):
x = numeric(10)
for (i in 1:10) {
x[i] = i^2
}
# we can use a variable instead of a numeric index
var = 1:10
for (i in var) {
x[i] = i^2
}
# lets use a loop to do somethign a little more useful
flower.stage = character(length(iris$Sepal.Length))
for (i in 1:length(iris$Sepal.Length)) {
if (iris$Sepal.Length[i] < 3.5) {
flower.stage[i] = "hidden bud"
} else if (iris$Sepal.Length[i] >= 3.5 & iris$Sepal.Length[i] < 5.0) {
flower.stage[i] = "final bud"
} else {
flower.stage[i] = "open flower"
}
}
# lets get some summary information using a loop
unique.species = unique(iris$Species) # do we know the unique() function?
iris.summary = list()
for (species in unique.species) {
look.at.rows = iris$Species==species
iris.summary[[species]] = summary(iris[look.at.rows,])
}
iris.summary
iris.summary$setosa
# more than one for loop??
# lets calculate how mean varies with random sample size 1:n, per species
iris.sample = list()
n = 30
for (species in unique(iris$Species)) {
temp = numeric(n)
for (i in 1:n) {
temp[i] = mean(sample(iris$Sepal.Length[iris$Species==species], i))
}
iris.sample[[species]] = temp
}
# loops to open and save files??
getwd()
setwd()
# save the subsets we created above as separate files
for (i in 1:length(iris.summary)) {
write.csv(iris.summary[[i]], file=paste0(names(iris.summary)[i], ".csv")) # could use paste() to make more informative filenames
}
# get a list of target files and read them in
filenames = list.files(getwd(), pattern="*.csv", full.names=TRUE)
var1 = read.csv(filenames[1])
# we can use a loop to read them in and assign as variables too!
for (file in filenames) {
assign(file, read.csv(file)) # you should do a better job here with the object naming!
}
##############
## HOMEWORK ##
##############
# Using a loop, write your own version of the unique() function
# SOLUTION:
# this is by no means the only or best way, but this is a nice logical implementation which is pretty quick
# as long as your number of unique elements is not insanely large
# the for loop and if statement:
unique.items = character()
for (i in iris$Species) {
if (!(i %in% unique.items)) {
unique.items = append(unique.items, i)
}
}
print(unique.items)
# if you wanted to wrap it in a function:
unique = function(x) {
unique.items = character()
for (i in iris$Species) {
if (!(i %in% unique.items)) {
unique.items = append(unique.items, i)
}
}
return(unique.items)
}
unique(iris$Species)
|
/introToLoops.R
|
permissive
|
mitchest/BEES-ecocomp
|
R
| false | false | 3,732 |
r
|
# we'll be using the iris data
data(iris)
# logical operators -------------------------------------------------------
# if statement
if (TRUE) {
"DO SOMETHING"
} else {
"THIS HAPPENS"
}
# lets test some of the data
if (iris$Sepal.Length[1] < 3.5) {
flower.stage = "bud"
}
# lets add an alternative
if (iris$Sepal.Length[1] < 3.5) {
flower.stage = "bud"
} else {
flower.stage = "flower"
}
# lets add another option
if (iris$Sepal.Length[1] < 3.5) {
flower.stage = "hidden bud"
} else if (iris$Sepal.Length[1] >= 3.5 & iris$Sepal.Length[1] < 5.0) {
flower.stage = "final bud"
} else {
flower.stage = "open flower"
}
# what if we want to know something about all of the observations?
iris$Sepal.Length>5
sepal.gt.5 = which(iris$Sepal.Length>5)
iris$Species[sepal.gt.5]
# for loops ---------------------------------------------------------------
# basic loop syntax
for (i in 1:10) {
print(i)
}
for (i in 1:10) {
print(i^2)
}
# what if we want to keep the results?
# when we don't know what the length of the resutls is going to be (and we don't mind going slow...):
x = numeric()
for (i in 1:10) {
x = append(x, i^2)
}
# when we do know the length (this is a MUCH MUCH MUCH better option):
x = numeric(10)
for (i in 1:10) {
x[i] = i^2
}
# we can use a variable instead of a numeric index
var = 1:10
for (i in var) {
x[i] = i^2
}
# lets use a loop to do somethign a little more useful
flower.stage = character(length(iris$Sepal.Length))
for (i in 1:length(iris$Sepal.Length)) {
if (iris$Sepal.Length[i] < 3.5) {
flower.stage[i] = "hidden bud"
} else if (iris$Sepal.Length[i] >= 3.5 & iris$Sepal.Length[i] < 5.0) {
flower.stage[i] = "final bud"
} else {
flower.stage[i] = "open flower"
}
}
# lets get some summary information using a loop
unique.species = unique(iris$Species) # do we know the unique() function?
iris.summary = list()
for (species in unique.species) {
look.at.rows = iris$Species==species
iris.summary[[species]] = summary(iris[look.at.rows,])
}
iris.summary
iris.summary$setosa
# more than one for loop??
# lets calculate how mean varies with random sample size 1:n, per species
iris.sample = list()
n = 30
for (species in unique(iris$Species)) {
temp = numeric(n)
for (i in 1:n) {
temp[i] = mean(sample(iris$Sepal.Length[iris$Species==species], i))
}
iris.sample[[species]] = temp
}
# loops to open and save files??
getwd()
setwd()
# save the subsets we created above as separate files
for (i in 1:length(iris.summary)) {
write.csv(iris.summary[[i]], file=paste0(names(iris.summary)[i], ".csv")) # could use paste() to make more informative filenames
}
# get a list of target files and read them in
filenames = list.files(getwd(), pattern="*.csv", full.names=TRUE)
var1 = read.csv(filenames[1])
# we can use a loop to read them in and assign as variables too!
for (file in filenames) {
assign(file, read.csv(file)) # you should do a better job here with the object naming!
}
##############
## HOMEWORK ##
##############
# Using a loop, write your own version of the unique() function
# SOLUTION:
# this is by no means the only or best way, but this is a nice logical implementation which is pretty quick
# as long as your number of unique elements is not insanely large
# the for loop and if statement:
unique.items = character()
for (i in iris$Species) {
if (!(i %in% unique.items)) {
unique.items = append(unique.items, i)
}
}
print(unique.items)
# if you wanted to wrap it in a function:
unique = function(x) {
unique.items = character()
for (i in iris$Species) {
if (!(i %in% unique.items)) {
unique.items = append(unique.items, i)
}
}
return(unique.items)
}
unique(iris$Species)
|
#' Scrape ship data from "https://www.vesseltracker.com/"
#'
#' @param url A search recult url from "https://www.vesseltracker.com/"
#' @param pause Indicates how long the function should sleep for before returning a value.
#' @return A list containing two dataframes, PortStatus contians general information about the port, PortCalls contains a list of recent arrivals. Use \code{pause} when vectorising this function
#' over a list of urls to minimise nuisance to target website.
#' @examples
#' scrape_vesseltracker(url="https://www.vesseltracker.com/en/Port/Dover/Dashboard.html")
#' scrape_vesseltracker(url="https://www.vesseltracker.com/en/Port/Calais/Dashboard.html")
#' scrape_vesseltracker(url="https://www.vesseltracker.com/en/Port/Belfast/Dashboard.html")
#' @export
#' @importFrom tidyr fill
#function for scraping port data
scrape_vesseltracker<-function(url,pause=0)
{
x<-xml2::read_html(url)
PortStatus<-rvest::html_nodes(x,css=".key-value-table")%>%
rvest::html_children()%>%
rvest::html_text()%>%
strsplit(":")%>%
do.call(rbind, .)%>%
t()%>%
data.frame(stringsAsFactors = F)%>%
purrr::set_names(.[1,]%>%
unlist()%>%
as.character())%>%
.[-1,]%>%
dplyr::mutate(time_collected=Sys.time(),
url=url)%>%
dplyr::select(-`Local time`)
PortCalls<-rvest::html_nodes(x,css=".data-table")%>%
rvest::html_children()%>%
purrr::map(rvest::html_children)%>%
purrr::map(~rvest::html_text(.x,trim = T))%>%
do.call(rbind,.)%>%
.[,-1]%>%
data.frame(stringsAsFactors = F)
PortCalls$table<-NA
PortCalls$table[PortCalls$X1=="Name"]<-PortCalls$X3[PortCalls$X1=="Name"]
PortCalls<-tidyr::fill(PortCalls,table)
PortCalls[PortCalls$X1!="Name",]%>%
purrr::set_names(c("Name","Type","Time","Status"))%>%
dplyr::mutate(time_collected=Sys.time(),
url=url)->PortCalls
print("Scraping successful")
if(pause>0){
print(paste("Pausing for",pause,"seconds"))
Sys.sleep(pause)
}
return(list(Status=PortStatus,
Calls=PortCalls))
}
|
/R/scrape_vesseltracker.r
|
no_license
|
lina2497/webscrapeR
|
R
| false | false | 2,089 |
r
|
#' Scrape ship data from "https://www.vesseltracker.com/"
#'
#' @param url A search recult url from "https://www.vesseltracker.com/"
#' @param pause Indicates how long the function should sleep for before returning a value.
#' @return A list containing two dataframes, PortStatus contians general information about the port, PortCalls contains a list of recent arrivals. Use \code{pause} when vectorising this function
#' over a list of urls to minimise nuisance to target website.
#' @examples
#' scrape_vesseltracker(url="https://www.vesseltracker.com/en/Port/Dover/Dashboard.html")
#' scrape_vesseltracker(url="https://www.vesseltracker.com/en/Port/Calais/Dashboard.html")
#' scrape_vesseltracker(url="https://www.vesseltracker.com/en/Port/Belfast/Dashboard.html")
#' @export
#' @importFrom tidyr fill
#function for scraping port data
scrape_vesseltracker<-function(url,pause=0)
{
x<-xml2::read_html(url)
PortStatus<-rvest::html_nodes(x,css=".key-value-table")%>%
rvest::html_children()%>%
rvest::html_text()%>%
strsplit(":")%>%
do.call(rbind, .)%>%
t()%>%
data.frame(stringsAsFactors = F)%>%
purrr::set_names(.[1,]%>%
unlist()%>%
as.character())%>%
.[-1,]%>%
dplyr::mutate(time_collected=Sys.time(),
url=url)%>%
dplyr::select(-`Local time`)
PortCalls<-rvest::html_nodes(x,css=".data-table")%>%
rvest::html_children()%>%
purrr::map(rvest::html_children)%>%
purrr::map(~rvest::html_text(.x,trim = T))%>%
do.call(rbind,.)%>%
.[,-1]%>%
data.frame(stringsAsFactors = F)
PortCalls$table<-NA
PortCalls$table[PortCalls$X1=="Name"]<-PortCalls$X3[PortCalls$X1=="Name"]
PortCalls<-tidyr::fill(PortCalls,table)
PortCalls[PortCalls$X1!="Name",]%>%
purrr::set_names(c("Name","Type","Time","Status"))%>%
dplyr::mutate(time_collected=Sys.time(),
url=url)->PortCalls
print("Scraping successful")
if(pause>0){
print(paste("Pausing for",pause,"seconds"))
Sys.sleep(pause)
}
return(list(Status=PortStatus,
Calls=PortCalls))
}
|
.gpScaleBiasGradient <-
function(model) {
g = list()
if (model$learnScales) {
## 'drop' converts row matrix to column vector by default.
g = 1/model$scale * drop(model$innerProducts-1)
fhandle <- get(model$scaleTransform$func, mode="function")
g = g * fhandle(model$scale, "gradfact")
}
return (g)
}
|
/R/gptk_gpScaleBiasGradient.R
|
no_license
|
cran/robin
|
R
| false | false | 337 |
r
|
.gpScaleBiasGradient <-
function(model) {
g = list()
if (model$learnScales) {
## 'drop' converts row matrix to column vector by default.
g = 1/model$scale * drop(model$innerProducts-1)
fhandle <- get(model$scaleTransform$func, mode="function")
g = g * fhandle(model$scale, "gradfact")
}
return (g)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getFunctionWC.R
\name{getFunctionWC}
\alias{getFunctionWC}
\title{Creates word cloud visualization of GO functionality based on Motif match
names from MotifDb}
\usage{
getFunctionWC(matchNames)
}
\arguments{
\item{matchNames}{A character vector indicating Motif "names" (based on
MotifDb records)}
}
\value{
Returns functionFreq - A table/list indicating frequency of found
functionalities
}
\description{
A function that takes motif "names" in MotifDb format, retrieves
corresponding GO functions and produces a word cloud visualization based on
function frequency within all specified motifs.
}
\examples{
data(jaspar.scores, package="MotifFunc")
jaspar.scores
matchNames <- MotifFunc::classifySeqMotifs("AGCGTAGGCGT")
functionFreq <- MotifFunc::getFunctionWC(matchNames)
transfacFilePath <- system.file("extdata", "new0007.txt", package = "MotifFunc")
data(jaspar.scores, package="MotifFunc")
jaspar.scores
matchNames <- MotifFunc::classifyPcmMotifs(transfacFilePath)
functionFreq <- MotifFunc::getFunctionWC(matchNames)
}
\references{
Hines, K. (2014). Stack Overflow source code [Source code].
https://stackoverflow.com/questions/26937960/creating-word-cloud-of-phrases-not-individual-words-in-r.
}
|
/man/getFunctionWC.Rd
|
permissive
|
minhanho/MotifFunc
|
R
| false | true | 1,284 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getFunctionWC.R
\name{getFunctionWC}
\alias{getFunctionWC}
\title{Creates word cloud visualization of GO functionality based on Motif match
names from MotifDb}
\usage{
getFunctionWC(matchNames)
}
\arguments{
\item{matchNames}{A character vector indicating Motif "names" (based on
MotifDb records)}
}
\value{
Returns functionFreq - A table/list indicating frequency of found
functionalities
}
\description{
A function that takes motif "names" in MotifDb format, retrieves
corresponding GO functions and produces a word cloud visualization based on
function frequency within all specified motifs.
}
\examples{
data(jaspar.scores, package="MotifFunc")
jaspar.scores
matchNames <- MotifFunc::classifySeqMotifs("AGCGTAGGCGT")
functionFreq <- MotifFunc::getFunctionWC(matchNames)
transfacFilePath <- system.file("extdata", "new0007.txt", package = "MotifFunc")
data(jaspar.scores, package="MotifFunc")
jaspar.scores
matchNames <- MotifFunc::classifyPcmMotifs(transfacFilePath)
functionFreq <- MotifFunc::getFunctionWC(matchNames)
}
\references{
Hines, K. (2014). Stack Overflow source code [Source code].
https://stackoverflow.com/questions/26937960/creating-word-cloud-of-phrases-not-individual-words-in-r.
}
|
\name{scaleScores}
\alias{scaleScores}
\title{
Compute scale scores
}
\description{
Compute scale scores exploiting regularities in the item names.
}
\usage{
scaleScores(data, pattern, recode = FALSE, recString = "_r", min = 1,
max = 5, na.rm = TRUE)
}
\arguments{
\item{data}{
A dataframe.
}
\item{pattern}{
A list of named items. Each element of the list identifies a set of items composing a scale, through a vector of strings. Each scale is identified by a vector of strings. All items whose names include all the strings specified in the vector are included in the scale.
}
\item{recode}{
Logical. Whether reverse-coded items should be recoded before estimating reliability.
}
\item{recString}{
A string that identifies reverse-coded items, by default "_r". Be careful: if \code{recode = TRUE}, all items including this string in their names will be recoded.
}
\item{min}{
The lowest point of the response scale
}
\item{max}{
The highest point of the response scale
}
\item{na.rm}{
Whether NAs should be removed before computing scale score
}
}
\value{
A dataframe of scale scores
}
\author{
Giulio Costantini
}
\examples{
library(psychTools)
data(bfi)
# bfi.keys include information on reverse-scoring. In particular, items with the string
# "-" are reverse-scored. We can exploit this information to reverse-score all items
bfi2 <- bfi
names(bfi2)[1:25] <- c("-A1", "A2", "A3", "A4", "A5",
"C1", "C2", "C3", "-C4", "-C5",
"-E1", "-E2", "E3", "E4", "E5" ,
"N1", "N2", "N3", "N4", "N5",
"O1", "-O2", "O3", "O4", "-O5")
# scale scores can be computed with function scaleScores
BFI <- scaleScores(bfi2, pattern = list(
"Openness" = c("O"),
"Conscientiousness" = c("C"),
"Extraversion" = c("E"),
"Agreeableness" = c("A"),
"Neuroticism" = c("N")),
recode = TRUE,
recString = "-",
min = 1, max = 6)
head(BFI)
}
|
/man/scaleScores.Rd
|
no_license
|
GiulioCostantini/markerIndex
|
R
| false | false | 1,944 |
rd
|
\name{scaleScores}
\alias{scaleScores}
\title{
Compute scale scores
}
\description{
Compute scale scores exploiting regularities in the item names.
}
\usage{
scaleScores(data, pattern, recode = FALSE, recString = "_r", min = 1,
max = 5, na.rm = TRUE)
}
\arguments{
\item{data}{
A dataframe.
}
\item{pattern}{
A list of named items. Each element of the list identifies a set of items composing a scale, through a vector of strings. Each scale is identified by a vector of strings. All items whose names include all the strings specified in the vector are included in the scale.
}
\item{recode}{
Logical. Whether reverse-coded items should be recoded before estimating reliability.
}
\item{recString}{
A string that identifies reverse-coded items, by default "_r". Be careful: if \code{recode = TRUE}, all items including this string in their names will be recoded.
}
\item{min}{
The lowest point of the response scale
}
\item{max}{
The highest point of the response scale
}
\item{na.rm}{
Whether NAs should be removed before computing scale score
}
}
\value{
A dataframe of scale scores
}
\author{
Giulio Costantini
}
\examples{
library(psychTools)
data(bfi)
# bfi.keys include information on reverse-scoring. In particular, items with the string
# "-" are reverse-scored. We can exploit this information to reverse-score all items
bfi2 <- bfi
names(bfi2)[1:25] <- c("-A1", "A2", "A3", "A4", "A5",
"C1", "C2", "C3", "-C4", "-C5",
"-E1", "-E2", "E3", "E4", "E5" ,
"N1", "N2", "N3", "N4", "N5",
"O1", "-O2", "O3", "O4", "-O5")
# scale scores can be computed with function scaleScores
BFI <- scaleScores(bfi2, pattern = list(
"Openness" = c("O"),
"Conscientiousness" = c("C"),
"Extraversion" = c("E"),
"Agreeableness" = c("A"),
"Neuroticism" = c("N")),
recode = TRUE,
recString = "-",
min = 1, max = 6)
head(BFI)
}
|
###########################################################################/**
# @set "class=matrix"
# @RdocMethod fitNSAcnPs
# @alias fitNSAcnPs
#
# @title "Infering the LH values of the CN probes"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{data}{An Jx2 @numeric @array containing copy number values, where J is the number of SNPs.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns an Jx2 @numeric @array with the "normality information" of the CN probes.
# }
#
#*/###########################################################################
setMethodS3("fitNSAcnPs", "matrix", function(data,...) {
pos <- c(1:(dim(data)[1]));
nSNPs <- dim(data)[1];
Salida <- approxfun(pos,data[,1])(pos)
#There can be some NA in the extremes
lastSNPsBegin <- which(is.na(Salida[1:(nSNPs/2)]));
if (length(lastSNPsBegin) >0)
Salida[lastSNPsBegin] <- Salida[max(lastSNPsBegin)+1];
lastSNPsEnd <- which(is.na(Salida[1:nSNPs]));
if (length(lastSNPsEnd) >0)
Salida[lastSNPsEnd] <- Salida[min(lastSNPsEnd)-1];
data[,1] <- Salida;
return(data);
}, private=TRUE)
############################################################################
# HISTORY:
# 2011-05-13 [MO]
# o Created.
############################################################################
|
/NSA/R/fitNSAcnPs.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 1,403 |
r
|
###########################################################################/**
# @set "class=matrix"
# @RdocMethod fitNSAcnPs
# @alias fitNSAcnPs
#
# @title "Infering the LH values of the CN probes"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{data}{An Jx2 @numeric @array containing copy number values, where J is the number of SNPs.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns an Jx2 @numeric @array with the "normality information" of the CN probes.
# }
#
#*/###########################################################################
setMethodS3("fitNSAcnPs", "matrix", function(data,...) {
pos <- c(1:(dim(data)[1]));
nSNPs <- dim(data)[1];
Salida <- approxfun(pos,data[,1])(pos)
#There can be some NA in the extremes
lastSNPsBegin <- which(is.na(Salida[1:(nSNPs/2)]));
if (length(lastSNPsBegin) >0)
Salida[lastSNPsBegin] <- Salida[max(lastSNPsBegin)+1];
lastSNPsEnd <- which(is.na(Salida[1:nSNPs]));
if (length(lastSNPsEnd) >0)
Salida[lastSNPsEnd] <- Salida[min(lastSNPsEnd)-1];
data[,1] <- Salida;
return(data);
}, private=TRUE)
############################################################################
# HISTORY:
# 2011-05-13 [MO]
# o Created.
############################################################################
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 122000
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 122000
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-201.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 46259
c no.of clauses 122000
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 122000
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-201.qdimacs 46259 122000 E1 [] 0 202 45453 122000 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-201/tlc02-nonuniform-depth-201.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 703 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 122000
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 122000
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-201.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 46259
c no.of clauses 122000
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 122000
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-201.qdimacs 46259 122000 E1 [] 0 202 45453 122000 NONE
|
library(reshape2)
# 1.Merge the training and test sets to create one data set
# Read the data from files
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
# Load activity labels and features
activityLabels[,2] <- as.character(read.table("UCI HAR Dataset/activity_labels.txt")[,2])
featuresData[,2] <- as.character(read.table("UCI HAR Dataset/features.txt")[,2])
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
features <- grep(".*mean.*|.*std.*", featuresData[,2])
features.names <- featuresData[features,2]
# Load the datasets and bind the data
train <- read.table("UCI HAR Dataset/train/X_train.txt")[features]
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[features]
test <- cbind(testSubjects, testActivities, test)
# 3. Use descriptive activity names to name the activities in the data set.
# Merge the finalData set with the acitivityType table to include descriptive activity names
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", features.names)
# 4. Appropriately label the data set with descriptive activity names.
# Cleaning up the variable names
features.names <- gsub('[-()]', '', features.names)
features.names <- gsub("-std$","StdDev",features.names)
features.names <- gsub("-mean","Mean",features.names)
features.names <- gsub("^(t)","time",features.names)
features.names <- gsub("^(f)","freq",features.names)
features.names <- gsub("([Gg]ravity)","Gravity",features.names)
features.names <- gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",features.names)
features.names <- gsub("[Gg]yro","Gyro",features.names)
features.names <- gsub("AccMag","AccMagnitude",features.names)
features.names <- gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",features.names)
features.names <- gsub("JerkMag","JerkMagnitude",features.names)
features.names <- gsub("GyroMag","GyroMagnitude",features.names)
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject.
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
# Export the tidyData set
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
/run_analysis.R
|
no_license
|
sskuma5/getting-and-cleaning-data
|
R
| false | false | 2,702 |
r
|
library(reshape2)
# 1.Merge the training and test sets to create one data set
# Read the data from files
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
# Load activity labels and features
activityLabels[,2] <- as.character(read.table("UCI HAR Dataset/activity_labels.txt")[,2])
featuresData[,2] <- as.character(read.table("UCI HAR Dataset/features.txt")[,2])
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
features <- grep(".*mean.*|.*std.*", featuresData[,2])
features.names <- featuresData[features,2]
# Load the datasets and bind the data
train <- read.table("UCI HAR Dataset/train/X_train.txt")[features]
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[features]
test <- cbind(testSubjects, testActivities, test)
# 3. Use descriptive activity names to name the activities in the data set.
# Merge the finalData set with the acitivityType table to include descriptive activity names
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", features.names)
# 4. Appropriately label the data set with descriptive activity names.
# Cleaning up the variable names
features.names <- gsub('[-()]', '', features.names)
features.names <- gsub("-std$","StdDev",features.names)
features.names <- gsub("-mean","Mean",features.names)
features.names <- gsub("^(t)","time",features.names)
features.names <- gsub("^(f)","freq",features.names)
features.names <- gsub("([Gg]ravity)","Gravity",features.names)
features.names <- gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",features.names)
features.names <- gsub("[Gg]yro","Gyro",features.names)
features.names <- gsub("AccMag","AccMagnitude",features.names)
features.names <- gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",features.names)
features.names <- gsub("JerkMag","JerkMagnitude",features.names)
features.names <- gsub("GyroMag","GyroMagnitude",features.names)
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject.
# turn activities & subjects into factors
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
allData.melted <- melt(allData, id = c("subject", "activity"))
allData.mean <- dcast(allData.melted, subject + activity ~ variable, mean)
# Export the tidyData set
write.table(allData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
# Draw graphic of Sub metering according time
plot3 <- function() {
# filter only the targeted date
grep_data <- grep("^(1/2/2007)|^(2/2/2007)", readLines("household_power_consumption.txt"))
# assuming data are sorted skip from the first grep until the length of grep
data <- read.table("household_power_consumption.txt", sep=";", skip=grep_data[1], nrows=length(grep_data), col.names=c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# add a column that associate Date and time
data$DateTime <- paste(data$Date, data$Time, " ")
# open the png file
png("plot3.png")
# Needed for no english computer else day are in local
Sys.setlocale("LC_TIME", "en_US.UTF-8")
# Draw first line
plot(as.POSIXlt(data$DateTime, format="%d/%m/%Y %H:%M:%S"), data$Sub_metering_1, type="line", ylab="Energy sub metering", xlab="")
# Draw second line
lines(as.POSIXlt(data$DateTime, format="%d/%m/%Y %H:%M:%S"), data$Sub_metering_2, col="red")
# Draw third line
lines(as.POSIXlt(data$DateTime, format="%d/%m/%Y %H:%M:%S"), data$Sub_metering_3, col="blue")
# Add legend
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
# close device
dev.off()
}
|
/plot3.R
|
no_license
|
ysleo/ExData_Plotting1
|
R
| false | false | 1,333 |
r
|
# Draw graphic of Sub metering according time
plot3 <- function() {
# filter only the targeted date
grep_data <- grep("^(1/2/2007)|^(2/2/2007)", readLines("household_power_consumption.txt"))
# assuming data are sorted skip from the first grep until the length of grep
data <- read.table("household_power_consumption.txt", sep=";", skip=grep_data[1], nrows=length(grep_data), col.names=c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# add a column that associate Date and time
data$DateTime <- paste(data$Date, data$Time, " ")
# open the png file
png("plot3.png")
# Needed for no english computer else day are in local
Sys.setlocale("LC_TIME", "en_US.UTF-8")
# Draw first line
plot(as.POSIXlt(data$DateTime, format="%d/%m/%Y %H:%M:%S"), data$Sub_metering_1, type="line", ylab="Energy sub metering", xlab="")
# Draw second line
lines(as.POSIXlt(data$DateTime, format="%d/%m/%Y %H:%M:%S"), data$Sub_metering_2, col="red")
# Draw third line
lines(as.POSIXlt(data$DateTime, format="%d/%m/%Y %H:%M:%S"), data$Sub_metering_3, col="blue")
# Add legend
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
# close device
dev.off()
}
|
calculaEstatisticasLinha = function(matrizResultado, matrizGabarito, matrizLimiares){
vetorPrecisao = vector(mode="integer", length=nrow(matrizResultado));
vetorRecall = vector(mode="integer", length=nrow(matrizResultado));
beta = vector(mode="integer", length=3);
resultado = rep(c(0), each=12);
resultado = matrix(resultado, nrow=ncol(matrizLimiares), ncol=4);
resultado = matrix(resultado, nrow=ncol(matrizLimiares)*length(beta), ncol=4);
beta[1] = 0.5;
beta[2] = 1;
beta[3] = 2;
for(k in 1:ncol(matrizLimiares)){
for(i in 1:nrow(matrizResultado)){
threshold = matrizLimiares[i,k];
gabaritosIteracao = colnames(matrizGabarito)[matrizGabarito[i,]==TRUE];
naoGabaritosIteracao = colnames(matrizGabarito)[matrizGabarito[i,]==FALSE];
scoresOrdenados = unique(sort(matrizResultado[i,1:12]));
rotulosAbaixoThreshold = colnames(matrizResultado[,1:12])[matrizResultado[i,1:12] < threshold];
rotulosAcimaThreshold = colnames(matrizResultado[,1:12])[matrizResultado[i,1:12] >= threshold];
truePositives = length(intersect(gabaritosIteracao, rotulosAcimaThreshold));
vetorPrecisao[i]=truePositives/length(rotulosAcimaThreshold);
vetorRecall[i]= truePositives/length(gabaritosIteracao);
}
for(j in 1:length(beta)){
resultado[3*(k-1)+j,1] = k;
resultado[3*(k-1)+j,2] = sum(vetorPrecisao)/length(vetorPrecisao);
resultado[3*(k-1)+j,3] = sum(vetorRecall)/length(vetorRecall);
resultado[3*(k-1)+j,4]= ((1+beta[j]^2)*(resultado[3*(k-1)+j,2]*resultado[3*(k-1)+j,3]))/((beta[j]^2)*resultado[3*(k-1)+j,2]+resultado[3*(k-1)+j,3]);
}
}
return(resultado);
}
|
/calculaEstatisticasLinha.R
|
no_license
|
rnpeclat/A2E
|
R
| false | false | 1,759 |
r
|
calculaEstatisticasLinha = function(matrizResultado, matrizGabarito, matrizLimiares){
vetorPrecisao = vector(mode="integer", length=nrow(matrizResultado));
vetorRecall = vector(mode="integer", length=nrow(matrizResultado));
beta = vector(mode="integer", length=3);
resultado = rep(c(0), each=12);
resultado = matrix(resultado, nrow=ncol(matrizLimiares), ncol=4);
resultado = matrix(resultado, nrow=ncol(matrizLimiares)*length(beta), ncol=4);
beta[1] = 0.5;
beta[2] = 1;
beta[3] = 2;
for(k in 1:ncol(matrizLimiares)){
for(i in 1:nrow(matrizResultado)){
threshold = matrizLimiares[i,k];
gabaritosIteracao = colnames(matrizGabarito)[matrizGabarito[i,]==TRUE];
naoGabaritosIteracao = colnames(matrizGabarito)[matrizGabarito[i,]==FALSE];
scoresOrdenados = unique(sort(matrizResultado[i,1:12]));
rotulosAbaixoThreshold = colnames(matrizResultado[,1:12])[matrizResultado[i,1:12] < threshold];
rotulosAcimaThreshold = colnames(matrizResultado[,1:12])[matrizResultado[i,1:12] >= threshold];
truePositives = length(intersect(gabaritosIteracao, rotulosAcimaThreshold));
vetorPrecisao[i]=truePositives/length(rotulosAcimaThreshold);
vetorRecall[i]= truePositives/length(gabaritosIteracao);
}
for(j in 1:length(beta)){
resultado[3*(k-1)+j,1] = k;
resultado[3*(k-1)+j,2] = sum(vetorPrecisao)/length(vetorPrecisao);
resultado[3*(k-1)+j,3] = sum(vetorRecall)/length(vetorRecall);
resultado[3*(k-1)+j,4]= ((1+beta[j]^2)*(resultado[3*(k-1)+j,2]*resultado[3*(k-1)+j,3]))/((beta[j]^2)*resultado[3*(k-1)+j,2]+resultado[3*(k-1)+j,3]);
}
}
return(resultado);
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{rp99}
\alias{rp99}
\title{1999 French Census - Cities from the Rhône state}
\format{A data frame with 301 rows and 21 variables}
\source{
\url{http://www.insee.fr/fr/bases-de-donnees/default.asp?page=recensements.htm}
}
\description{
Sample from the 1999 french census for the cities of the Rhône state.
}
\keyword{datasets}
|
/man/rp99.Rd
|
no_license
|
Rterial/questionr
|
R
| false | true | 438 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{rp99}
\alias{rp99}
\title{1999 French Census - Cities from the Rhône state}
\format{A data frame with 301 rows and 21 variables}
\source{
\url{http://www.insee.fr/fr/bases-de-donnees/default.asp?page=recensements.htm}
}
\description{
Sample from the 1999 french census for the cities of the Rhône state.
}
\keyword{datasets}
|
#!/usr/bin/Rscript
args <- commandArgs(trailingOnly=TRUE)
test.cases <- strsplit(readLines(args[[1]], warn=FALSE), "\n")
for (test in test.cases[2:length(test.cases)]) {
if (length(test) > 0) {
X <- strtoi(test)
if (X == 0){
cat("1\n")
next
}
N <- 0
for (a in 0:floor(sqrt(X / 2))){
b <- sqrt(X - a ** 2)
if (a ** 2 + round(b) ** 2 == X){
N <- N + 1
}
}
stmt <- sprintf("%d\n", N)
cat(stmt)
}
}
|
/CodeEval/033_R/033.r
|
no_license
|
benjamin-stokes/CodeExamples
|
R
| false | false | 545 |
r
|
#!/usr/bin/Rscript
args <- commandArgs(trailingOnly=TRUE)
test.cases <- strsplit(readLines(args[[1]], warn=FALSE), "\n")
for (test in test.cases[2:length(test.cases)]) {
if (length(test) > 0) {
X <- strtoi(test)
if (X == 0){
cat("1\n")
next
}
N <- 0
for (a in 0:floor(sqrt(X / 2))){
b <- sqrt(X - a ** 2)
if (a ** 2 + round(b) ** 2 == X){
N <- N + 1
}
}
stmt <- sprintf("%d\n", N)
cat(stmt)
}
}
|
#Loading the libraries
library(kernlab)
library(readr)
library(caret)
library(ggplot2)
library(dplyr)
library(gridExtra)
Data_MNIST_Train <- read.csv("mnist_train.csv",header=FALSE)
Data_MNIST_Test <- read.csv("mnist_test.csv",header=FALSE)
#Understanding Dimensions
dim(Data_MNIST_Train)
dim(Data_MNIST_Test)
#Structure of the dataset
str(Data_MNIST_Train)
str(Data_MNIST_Test)
#Exploring the dataset
summary(Data_MNIST_Train)
summary(Data_MNIST_Test)
#Checking for na values
sum(is.na(Data_MNIST_Train))
sum(is.na(Data_MNIST_Test))
Data_MNIST_Train$V1<-factor(Data_MNIST_Train$V1)
#Taking 6000 samples of training data
Data_Train <- sample_n(Data_MNIST_Train,6000,replace = F)
#Scaling of training and test datasets
Data_Train_scaled<- Data_Train
Test_data_scaled <- Data_MNIST_Test
Data_Train_scaled[,-1] <- Data_Train_scaled[,-1]/255
Test_data_scaled[,-1] <- Test_data_scaled[,-1]/255
############################################################################################
#Constructing Linear Model
Model_linear_scaled <- ksvm(V1~ ., data = Data_Train_scaled, scale = FALSE, kernel = "vanilladot")
Eval_linear_scaled<- predict(Model_linear_scaled, Test_data_scaled)
#Confusion Matrix:- Linear Kernel
confusionMatrix(Eval_linear_scaled,Test_data_scaled$V1)
#Accuracy:- 0.9033
#Constructing polydot kernel
Model_Poly_Scaled <- ksvm(V1~ ., data = Data_Train_scaled, scale = FALSE, kernel = "polydot")
Eval_Poly_Scaled<- predict(Model_Poly_Scaled, Test_data_scaled)
confusionMatrix(Eval_Poly_Scaled,Test_data_scaled$V1)
#Accuracy for Polydot:- 0.9033
#Constructing RBF Kernel
Model_RBF_Scaled <- ksvm(V1~ ., data = Data_Train_scaled, scale = FALSE, kernel = "rbfdot")
Eval_RBF_Scaled<- predict(Model_RBF_Scaled,Test_data_scaled )
#confusion matrix:- RBF Kernel(Scaled)
confusionMatrix(Eval_RBF_Scaled,Test_data_scaled$V1)
#Accuracy:- 0.9526
#############################################################################################
#traincontrol function to control the computational nuances of the train function.
trainControl <- trainControl(method="cv", number=5)
metric <- "Accuracy"
set.seed(7)
#Hyperparameter tuning and cross validation using Linear Kernel
# making a grid of C values.
grid_Linear_scaled <- expand.grid(C=seq(1, 5, by=1))
# Performing 5-fold cross validation
fit.svm_Linear_scaled <- train(V1~., data=Data_Train_scaled, method="svmLinear", metric=metric,
tuneGrid=grid_Linear_scaled, trControl=trainControl)
print(fit.svm_Linear_scaled)
#Plotting model results
plot(fit.svm_Linear)
#Accuracy:- 0.8955 at c=1
#Hyperparameter tuning and cross validation using Polydot
grid_Poly_scaled <- expand.grid(.degree=c(2,3), .scale=c(0.1,0.3),.C=c(1,2,3,4) )
fit.svm_Poly_Scaled <- train(V1~., data=Data_Train_scaled, method="svmPoly", metric=metric,
tuneGrid=grid_Poly_scaled, trControl=trainControl)
print(fit.svm_Poly_Scaled)
plot(fit.svm_Poly_Scaled) #Plotting model results
#Accuracy of 0.9479961 at degree=2,scale=0.1 and c=1
#Hyperparameter tuning using RBF Kernel
grid_RBF_scaled <- expand.grid(.sigma=c(0.025, 0.05), .C=c(0.1,0.5,1,2) )
fit.svm_RBF_scaled <- train(V1~., data=Data_Train_scaled, method="svmRadial", metric=metric,
tuneGrid=grid_RBF_scaled, trControl=trainControl)
print(fit.svm_RBF_scaled)
plot(fit.svm_RBF_scaled) #Plotting model results
#Accuracy:- 0.9581 at c=2 and sigma=0.025
#Since, the highest accuracy is obtained for RBF kernel, therefore we are considering RBF Kernel in our final model.
# Checking overfitting - Non-Linear - SVM
######################################################################
# Validating the model results on test data
evaluate_non_linear<- predict(fit.svm_RBF_scaled, Test_data_scaled)
confusionMatrix(evaluate_non_linear, Test_data_scaled$V1)
#Accuracy :- 0.966
#End of Assignment#
|
/SVM-Assignment.R
|
no_license
|
aviral2407/Aviral_3
|
R
| false | false | 4,029 |
r
|
#Loading the libraries
library(kernlab)
library(readr)
library(caret)
library(ggplot2)
library(dplyr)
library(gridExtra)
Data_MNIST_Train <- read.csv("mnist_train.csv",header=FALSE)
Data_MNIST_Test <- read.csv("mnist_test.csv",header=FALSE)
#Understanding Dimensions
dim(Data_MNIST_Train)
dim(Data_MNIST_Test)
#Structure of the dataset
str(Data_MNIST_Train)
str(Data_MNIST_Test)
#Exploring the dataset
summary(Data_MNIST_Train)
summary(Data_MNIST_Test)
#Checking for na values
sum(is.na(Data_MNIST_Train))
sum(is.na(Data_MNIST_Test))
Data_MNIST_Train$V1<-factor(Data_MNIST_Train$V1)
#Taking 6000 samples of training data
Data_Train <- sample_n(Data_MNIST_Train,6000,replace = F)
#Scaling of training and test datasets
Data_Train_scaled<- Data_Train
Test_data_scaled <- Data_MNIST_Test
Data_Train_scaled[,-1] <- Data_Train_scaled[,-1]/255
Test_data_scaled[,-1] <- Test_data_scaled[,-1]/255
############################################################################################
#Constructing Linear Model
Model_linear_scaled <- ksvm(V1~ ., data = Data_Train_scaled, scale = FALSE, kernel = "vanilladot")
Eval_linear_scaled<- predict(Model_linear_scaled, Test_data_scaled)
#Confusion Matrix:- Linear Kernel
confusionMatrix(Eval_linear_scaled,Test_data_scaled$V1)
#Accuracy:- 0.9033
#Constructing polydot kernel
Model_Poly_Scaled <- ksvm(V1~ ., data = Data_Train_scaled, scale = FALSE, kernel = "polydot")
Eval_Poly_Scaled<- predict(Model_Poly_Scaled, Test_data_scaled)
confusionMatrix(Eval_Poly_Scaled,Test_data_scaled$V1)
#Accuracy for Polydot:- 0.9033
#Constructing RBF Kernel
Model_RBF_Scaled <- ksvm(V1~ ., data = Data_Train_scaled, scale = FALSE, kernel = "rbfdot")
Eval_RBF_Scaled<- predict(Model_RBF_Scaled,Test_data_scaled )
#confusion matrix:- RBF Kernel(Scaled)
confusionMatrix(Eval_RBF_Scaled,Test_data_scaled$V1)
#Accuracy:- 0.9526
#############################################################################################
#traincontrol function to control the computational nuances of the train function.
trainControl <- trainControl(method="cv", number=5)
metric <- "Accuracy"
set.seed(7)
#Hyperparameter tuning and cross validation using Linear Kernel
# making a grid of C values.
grid_Linear_scaled <- expand.grid(C=seq(1, 5, by=1))
# Performing 5-fold cross validation
fit.svm_Linear_scaled <- train(V1~., data=Data_Train_scaled, method="svmLinear", metric=metric,
tuneGrid=grid_Linear_scaled, trControl=trainControl)
print(fit.svm_Linear_scaled)
#Plotting model results
plot(fit.svm_Linear)
#Accuracy:- 0.8955 at c=1
#Hyperparameter tuning and cross validation using Polydot
grid_Poly_scaled <- expand.grid(.degree=c(2,3), .scale=c(0.1,0.3),.C=c(1,2,3,4) )
fit.svm_Poly_Scaled <- train(V1~., data=Data_Train_scaled, method="svmPoly", metric=metric,
tuneGrid=grid_Poly_scaled, trControl=trainControl)
print(fit.svm_Poly_Scaled)
plot(fit.svm_Poly_Scaled) #Plotting model results
#Accuracy of 0.9479961 at degree=2,scale=0.1 and c=1
#Hyperparameter tuning using RBF Kernel
grid_RBF_scaled <- expand.grid(.sigma=c(0.025, 0.05), .C=c(0.1,0.5,1,2) )
fit.svm_RBF_scaled <- train(V1~., data=Data_Train_scaled, method="svmRadial", metric=metric,
tuneGrid=grid_RBF_scaled, trControl=trainControl)
print(fit.svm_RBF_scaled)
plot(fit.svm_RBF_scaled) #Plotting model results
#Accuracy:- 0.9581 at c=2 and sigma=0.025
#Since, the highest accuracy is obtained for RBF kernel, therefore we are considering RBF Kernel in our final model.
# Checking overfitting - Non-Linear - SVM
######################################################################
# Validating the model results on test data
evaluate_non_linear<- predict(fit.svm_RBF_scaled, Test_data_scaled)
confusionMatrix(evaluate_non_linear, Test_data_scaled$V1)
#Accuracy :- 0.966
#End of Assignment#
|
library(TeachingDemos)
# R CMD BATCH --no-save --no-restore '--args nbcores=2'
args = commandArgs(trailingOnly=TRUE)
for(i in 1:length(args)){
eval(parse(text=args[[i]]))
}
print(42)
print(Sys.time())
print(nbcores)
|
/Bugs/args.R
|
no_license
|
CamilleVernier/SharedTests
|
R
| false | false | 221 |
r
|
library(TeachingDemos)
# R CMD BATCH --no-save --no-restore '--args nbcores=2'
args = commandArgs(trailingOnly=TRUE)
for(i in 1:length(args)){
eval(parse(text=args[[i]]))
}
print(42)
print(Sys.time())
print(nbcores)
|
library(plotly)
library(plotly)
library(dplyr)
library(cluster)
library(ggplot2)
library(webshot)
source("Analisis/R_Scripts/utils.R")
NOTES = "Data/notas_clean.csv"
# =======================================================================================================
# ================================== KMEANS CALC & PLOT =================================================
# =======================================================================================================
measures_kmeans <- function(model_obj){
dis <- dist(dataGroup)^2
sil <- silhouette(model_obj$cluster, dis)
df_bss <- length(model_obj$size)-1
df_wss <- length(model_obj$cluster)-length(model_obj$size)
df_tss <- df_bss + df_wss
mean_squares <- c(model_obj$betweenss/df_bss, model_obj$tot.withinss/df_wss, NA)
f_ratio <- c(mean_squares[1]/mean_squares[2],NA,NA)
anova <- data.frame(SOURCE = c("BSS","WSS","TSS"), DF = c(df_bss,df_wss,df_tss),
SUM_OF_SQUARES = c(model_obj$betweenss,model_obj$tot.withinss,model_obj$totss),
MEAN_SQUARES = mean_squares, F_RATIO = f_ratio,
P_VALUE = c(1-pf(f_ratio[1],df_bss,df_wss),NA,NA))
return(list(silhouette = sil, anova_table = anova))
}
BoxPlotByK <- function(model){
k <- length(model$size)
pdf(paste("BoxPlotGroup",k,"k.pdf",sep = ""),width=6,height=4,paper='special')
plot(measures_kmeans(model)$silhouette)
colorBox <- c("red","red","red","blue","blue","blue","blue","darkgreen","darkgreen","darkgreen","darkgreen","gold","gold","gold","gold")
for (i in 1:k){
boxplot.matrix(dataGroup[model$cluster == i,], names = asig, las = 2, col = colorBox)
}
dev.off()
}
# =======================================================================================================
# ==================================== ISIS DATA - KMEANS ===============================================
# =======================================================================================================
allData <- read.csv(NOTES, header = TRUE)
asig <- c("TSOR ", "PRON ", "AREM ", "PDSW ", "ARSW ", "COSW ",
"SOSW ", "ARQC", "SOPC ", "FRED ", "SEGI")
dataGroup <- getData(allData,asig)
set.seed(2017)
goodness <- (nrow(dataGroup)-1)*sum(apply(dataGroup,2,var))
mod <- list(rep(list(NULL),9))
for ( i in 1:10 ) {
model <- kmeans( dataGroup , centers = i , iter.max = 30 );
mod[[i]] <- model;
goodness[i] <- model$betweenss / model$totss;
}
plot(1:10, goodness, type="b", xlab="Number of Clusters",
ylab="BSS/TSS")
#Select Best Model
measures_2k <- measures_kmeans(mod[[2]])
measures_3k <- measures_kmeans(mod[[3]])
measures_4k <- measures_kmeans(mod[[4]])
#Hipotesis
BoxPlotByK(mod[[3]])
#ByElbow
BoxPlotByK(mod[[2]])
BoxPlotByK(mod[[3]])
|
/Analisis/R_Scripts/grades/cluster_grades.R
|
no_license
|
IEADIS/Consejero-Academico
|
R
| false | false | 2,786 |
r
|
library(plotly)
library(plotly)
library(dplyr)
library(cluster)
library(ggplot2)
library(webshot)
source("Analisis/R_Scripts/utils.R")
NOTES = "Data/notas_clean.csv"
# =======================================================================================================
# ================================== KMEANS CALC & PLOT =================================================
# =======================================================================================================
measures_kmeans <- function(model_obj){
dis <- dist(dataGroup)^2
sil <- silhouette(model_obj$cluster, dis)
df_bss <- length(model_obj$size)-1
df_wss <- length(model_obj$cluster)-length(model_obj$size)
df_tss <- df_bss + df_wss
mean_squares <- c(model_obj$betweenss/df_bss, model_obj$tot.withinss/df_wss, NA)
f_ratio <- c(mean_squares[1]/mean_squares[2],NA,NA)
anova <- data.frame(SOURCE = c("BSS","WSS","TSS"), DF = c(df_bss,df_wss,df_tss),
SUM_OF_SQUARES = c(model_obj$betweenss,model_obj$tot.withinss,model_obj$totss),
MEAN_SQUARES = mean_squares, F_RATIO = f_ratio,
P_VALUE = c(1-pf(f_ratio[1],df_bss,df_wss),NA,NA))
return(list(silhouette = sil, anova_table = anova))
}
BoxPlotByK <- function(model){
k <- length(model$size)
pdf(paste("BoxPlotGroup",k,"k.pdf",sep = ""),width=6,height=4,paper='special')
plot(measures_kmeans(model)$silhouette)
colorBox <- c("red","red","red","blue","blue","blue","blue","darkgreen","darkgreen","darkgreen","darkgreen","gold","gold","gold","gold")
for (i in 1:k){
boxplot.matrix(dataGroup[model$cluster == i,], names = asig, las = 2, col = colorBox)
}
dev.off()
}
# =======================================================================================================
# ==================================== ISIS DATA - KMEANS ===============================================
# =======================================================================================================
allData <- read.csv(NOTES, header = TRUE)
asig <- c("TSOR ", "PRON ", "AREM ", "PDSW ", "ARSW ", "COSW ",
"SOSW ", "ARQC", "SOPC ", "FRED ", "SEGI")
dataGroup <- getData(allData,asig)
set.seed(2017)
goodness <- (nrow(dataGroup)-1)*sum(apply(dataGroup,2,var))
mod <- list(rep(list(NULL),9))
for ( i in 1:10 ) {
model <- kmeans( dataGroup , centers = i , iter.max = 30 );
mod[[i]] <- model;
goodness[i] <- model$betweenss / model$totss;
}
plot(1:10, goodness, type="b", xlab="Number of Clusters",
ylab="BSS/TSS")
#Select Best Model
measures_2k <- measures_kmeans(mod[[2]])
measures_3k <- measures_kmeans(mod[[3]])
measures_4k <- measures_kmeans(mod[[4]])
#Hipotesis
BoxPlotByK(mod[[3]])
#ByElbow
BoxPlotByK(mod[[2]])
BoxPlotByK(mod[[3]])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/macroutils2.r
\name{macroReadIndump}
\alias{macroReadIndump}
\title{INTERNAL. Import a MACRO indump.tmp file and output it in a human readable format.}
\usage{
macroReadIndump(f, layerLoc = 7, exportTrash = FALSE)
}
\arguments{
\item{f}{Single character string. Name (and if needed, path) of the
indump.tmp file to be read}
\item{layerLoc}{Single integer. Line where the number of numerical layers is
written}
\item{exportTrash}{Single logical value. If TRUE, 'filling' parameter values (i.e.
values written but not used) are also exported.}
}
\value{
Returns a list of \code{\link[base]{data.frame}}s with different
MACRO parameters
}
\description{
INTERNAL. Import a MACRO indump.tmp file and output it in a
human readable format. It reads layered parameters, options,
crop parameters and irrigation parameters, but not yet output
parameters. EXPERIMENTAL. USE AT YOUR OWN RISKS.
}
\keyword{internal}
|
/man/macroReadIndump.Rd
|
permissive
|
julienmoeys/macroutils2
|
R
| false | true | 994 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/macroutils2.r
\name{macroReadIndump}
\alias{macroReadIndump}
\title{INTERNAL. Import a MACRO indump.tmp file and output it in a human readable format.}
\usage{
macroReadIndump(f, layerLoc = 7, exportTrash = FALSE)
}
\arguments{
\item{f}{Single character string. Name (and if needed, path) of the
indump.tmp file to be read}
\item{layerLoc}{Single integer. Line where the number of numerical layers is
written}
\item{exportTrash}{Single logical value. If TRUE, 'filling' parameter values (i.e.
values written but not used) are also exported.}
}
\value{
Returns a list of \code{\link[base]{data.frame}}s with different
MACRO parameters
}
\description{
INTERNAL. Import a MACRO indump.tmp file and output it in a
human readable format. It reads layered parameters, options,
crop parameters and irrigation parameters, but not yet output
parameters. EXPERIMENTAL. USE AT YOUR OWN RISKS.
}
\keyword{internal}
|
source("src/lib.R")
# Create tables of summary stats for the map ####
## Riding level
district_summary_stats_probs <- district_summary_stats %>%
left_join(district_probs %>%
melt(measure.vars = c("Liberal", "Conservative", "NDP", "Green", "People's", "Bloc"), variable.name = "party", value.name = "prob"),
by = c("district_code", "party")) %>%
mutate(prob = ifelse(is.na(prob), 0, prob)) %>%
dplyr::select(province, district_code, district_name = district, party, prob, pct_05, mean, pct_95) %>%
arrange(district_code, desc(prob), desc(mean)) %>%
group_by(district_code) %>%
mutate(rank = 1:n()) %>%
ungroup()
district_summary_stats_1 <- district_summary_stats_probs %>%
filter(rank == 1) %>%
dplyr::select(province, district_code, district_name, party_1 = party, prob_1 = prob, pct_05_1 = pct_05, mean_1 = mean, pct_95_1 = pct_95)
district_summary_stats_2 <- district_summary_stats_probs %>%
filter(rank == 2) %>%
dplyr::select(district_code, district_name, party_2 = party, prob_2 = prob, pct_05_2 = pct_05, mean_2 = mean, pct_95_2 = pct_95)
district_summary_stats_3 <- district_summary_stats_probs %>%
filter(rank == 3) %>%
dplyr::select(district_code, district_name, party_3 = party, prob_3 = prob, pct_05_3 = pct_05, mean_3 = mean, pct_95_3 = pct_95)
district_summary_stats_4 <- district_summary_stats_probs %>%
filter(rank == 4) %>%
dplyr::select(district_code, district_name, party_4 = party, prob_4 = prob, pct_05_4 = pct_05, mean_4 = mean, pct_95_4 = pct_95)
district_summary_stats_5 <- district_summary_stats_probs %>%
filter(rank == 5) %>%
dplyr::select(district_code, district_name, party_5 = party, prob_5 = prob, pct_05_5 = pct_05, mean_5 = mean, pct_95_5 = pct_95)
district_summary_stats_6 <- district_summary_stats_probs %>%
filter(rank == 6) %>%
dplyr::select(district_code, district_name, party_6 = party, prob_6 = prob, pct_05_6 = pct_05, mean_6 = mean, pct_95_6 = pct_95)
district_summary_stats_wide <- district_summary_stats_1 %>%
left_join(district_summary_stats_2, by = c("district_code", "district_name")) %>%
left_join(district_summary_stats_3, by = c("district_code", "district_name")) %>%
left_join(district_summary_stats_4, by = c("district_code", "district_name")) %>%
left_join(district_summary_stats_5, by = c("district_code", "district_name")) %>%
left_join(district_summary_stats_6, by = c("district_code", "district_name"))
## Province level
province_summary_stats_1 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 1) %>%
dplyr::select(province, party_1 = party, vote_pct_05_1 = vote_pct_05, vote_pct_50_1 = vote_pct_50, vote_pct_95_1 = vote_pct_95,
seats_pct_05_1 = seats_pct_05, seats_pct_50_1 = seats_pct_50, seats_pct_95_1 = seats_pct_95)
province_summary_stats_2 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 2) %>%
dplyr::select(province, party_2 = party, vote_pct_05_2 = vote_pct_05, vote_pct_50_2 = vote_pct_50, vote_pct_95_2 = vote_pct_95,
seats_pct_05_2 = seats_pct_05, seats_pct_50_2 = seats_pct_50, seats_pct_95_2 = seats_pct_95)
province_summary_stats_3 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 3) %>%
dplyr::select(province, party_3 = party, vote_pct_05_3 = vote_pct_05, vote_pct_50_3 = vote_pct_50, vote_pct_95_3 = vote_pct_95,
seats_pct_05_3 = seats_pct_05, seats_pct_50_3 = seats_pct_50, seats_pct_95_3 = seats_pct_95)
province_summary_stats_4 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 4) %>%
dplyr::select(province, party_4 = party, vote_pct_05_4 = vote_pct_05, vote_pct_50_4 = vote_pct_50, vote_pct_95_4 = vote_pct_95,
seats_pct_05_4 = seats_pct_05, seats_pct_50_4 = seats_pct_50, seats_pct_95_4 = seats_pct_95)
province_summary_stats_5 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 5) %>%
dplyr::select(province, party_5 = party, vote_pct_05_5 = vote_pct_05, vote_pct_50_5 = vote_pct_50, vote_pct_95_5 = vote_pct_95,
seats_pct_05_5 = seats_pct_05, seats_pct_50_5 = seats_pct_50, seats_pct_95_5 = seats_pct_95)
province_summary_stats_6 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 6) %>%
dplyr::select(province, party_6 = party, vote_pct_05_6 = vote_pct_05, vote_pct_50_6 = vote_pct_50, vote_pct_95_6 = vote_pct_95,
seats_pct_05_6 = seats_pct_05, seats_pct_50_6 = seats_pct_50, seats_pct_95_6 = seats_pct_95)
province_summary_stats_wide <- province_summary_stats_1 %>%
left_join(province_summary_stats_2, by = c("province")) %>%
left_join(province_summary_stats_3, by = c("province")) %>%
left_join(province_summary_stats_4, by = c("province")) %>%
left_join(province_summary_stats_5, by = c("province")) %>%
left_join(province_summary_stats_6, by = c("province"))
# The map itself ####
## Create Lambert conformal conic CRS for leaflet (see http://spatialreference.org/ref/esri/canada-lambert-conformal-conic/ )
crs_proj <- "+proj=lcc +lat_1=49 +lat_2=77 +lat_0=63.390675 +lon_0=-91.86666666666666 +x_0=6200000 +y_0=3000000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs "
crs_lcc <- leafletCRS(code = "ESRI:102002", proj4def = crs_proj)
district_shp <- readOGR("data/shapes/FED_CA_2_2_ENG.shp") %>%
spTransform(CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")) %>%
st_as_sf() %>%
mutate(district_code = as.numeric(FED_NUM)) %>%
ms_simplify()
province_shp <- readOGR("data/shapes/provinces.shp") %>%
spTransform(CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")) %>%
st_as_sf() %>%
ms_simplify()
# Swap out shapes from provincial file for the territories since they look nicer
district_shp$geometry[district_shp$PROVCODE == "YT"] <- province_shp$geometry[province_shp$PRENAME == "Yukon"]
district_shp$geometry[district_shp$PROVCODE == "NT"] <- province_shp$geometry[province_shp$PRENAME == "Northwest Territories"]
district_shp$geometry[district_shp$PROVCODE == "NU"] <- province_shp$geometry[province_shp$PRENAME == "Nunavut"]
# Define mapping from party to color
party_to_color <- function(party) {
case_when(party == "Liberal" ~ "red",
party == "Conservative" ~ "blue",
party == "NDP" ~ "#EE7600",
party == "Green" ~ "#008B00",
party == "Bloc" ~ "#8B008B",
party == "People's" ~ "midnightblue")
}
district_map_data <- district_shp %>%
left_join(district_summary_stats_wide, by = "district_code") %>%
mutate(color_1 = party_to_color(party_1),
color_2 = party_to_color(party_2),
color_3 = party_to_color(party_3),
color_4 = party_to_color(party_4),
color_5 = party_to_color(party_5),
color_6 = party_to_color(party_6),
mouseover_label = district_name,
alpha = (prob_1 - 0.4) / 0.6) %>%
# Create the infobox
mutate(popup_label = case_when(
# When there is a sixth party (i.e. Quebec)
!is.na(party_6) ~ paste0("<H4><b><u>", district_name, "</u><br><i>", province, "</i></b></H4>
<b><i>Projected vote (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">",
percent(mean_1, accuracy = 0.1), "</font></b> (", percent(pct_05_1, accuracy = 0.1), " – ",
percent(pct_95_1, accuracy = 0.1), ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">",
percent(mean_2, accuracy = 0.1), "</font></b> (", percent(pct_05_2, accuracy = 0.1), " – ",
percent(pct_95_2, accuracy = 0.1), ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">",
percent(mean_3, accuracy = 0.1), "</font></b> (", percent(pct_05_3, accuracy = 0.1), " – ",
percent(pct_95_3, accuracy = 0.1), ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">",
percent(mean_4, accuracy = 0.1), "</font></b> (", percent(pct_05_4, accuracy = 0.1), " – ",
percent(pct_95_4, accuracy = 0.1), ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">",
percent(mean_5, accuracy = 0.1), "</font></b> (", percent(pct_05_5, accuracy = 0.1), " – ",
percent(pct_95_5, accuracy = 0.1), ")<br>
<font color = ", color_6, "><b>", party_6, "</b></font>: <b><font color =", color_6, ">",
percent(mean_6, accuracy = 0.1), "</font></b> (", percent(pct_05_6, accuracy = 0.1), " – ",
percent(pct_95_6, accuracy = 0.1), ")<br>
<br>
<b><i>Win probability</i><b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>", ": <font color = ", color_1, "><b>",
percent(prob_1, accuracy = 1), "</b></font><br>
<font color = ", color_2, "><b>", party_2, "</b></font>", ": <font color = ", color_2, "><b>",
percent(prob_2, accuracy = 1), "</b></font><br>
<font color = ", color_3, "><b>", party_3, "</b></font>", ": <font color = ", color_3, "><b>",
percent(prob_3, accuracy = 1), "</b></font><br>
<font color = ", color_4, "><b>", party_4, "</b></font>", ": <font color = ", color_4, "><b>",
percent(prob_4, accuracy = 1), "</b></font><br>
<font color = ", color_5, "><b>", party_5, "</b></font>", ": <font color = ", color_5, "><b>",
percent(prob_5, accuracy = 1), "</b></font><br>
<font color = ", color_6, "><b>", party_6, "</b></font>", ": <font color = ", color_6, "><b>",
percent(prob_6, accuracy = 1), "</b></font>"),
is.na(party_6) ~ paste0("<H4><b><u>", district_name, "</u><br><i>", province, "</i></b></H4>
<b><i>Projected vote (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">",
percent(mean_1, accuracy = 0.1), "</font></b> (", percent(pct_05_1, accuracy = 0.1), " – ",
percent(pct_95_1, accuracy = 0.1), ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">",
percent(mean_2, accuracy = 0.1), "</font></b> (", percent(pct_05_2, accuracy = 0.1), " – ",
percent(pct_95_2, accuracy = 0.1), ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">",
percent(mean_3, accuracy = 0.1), "</font></b> (", percent(pct_05_3, accuracy = 0.1), " – ",
percent(pct_95_3, accuracy = 0.1), ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">",
percent(mean_4, accuracy = 0.1), "</font></b> (", percent(pct_05_4, accuracy = 0.1), " – ",
percent(pct_95_4, accuracy = 0.1), ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">",
percent(mean_5, accuracy = 0.1), "</font></b> (", percent(pct_05_5, accuracy = 0.1), " – ",
percent(pct_95_5, accuracy = 0.1), ")<br>
<br>
<b><i>Win probability</i><b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>", ": <font color = ", color_1, "><b>",
percent(prob_1, accuracy = 1), "</b></font><br>
<font color = ", color_2, "><b>", party_2, "</b></font>", ": <font color = ", color_2, "><b>",
percent(prob_2, accuracy = 1), "</b></font><br>
<font color = ", color_3, "><b>", party_3, "</b></font>", ": <font color = ", color_3, "><b>",
percent(prob_3, accuracy = 1), "</b></font><br>
<font color = ", color_4, "><b>", party_4, "</b></font>", ": <font color = ", color_4, "><b>",
percent(prob_4, accuracy = 1), "</b></font><br>
<font color = ", color_5, "><b>", party_5, "</b></font>", ": <font color = ", color_5, "><b>",
percent(prob_5, accuracy = 1), "</b></font>")))
write_rds(district_map_data, "shiny-app/data/district_map_data.rds")
leaflet(district_map_data) %>%
addTiles() %>%
addPolygons(color = "#666666", weight = 1, opacity = 1, fill = TRUE, fillColor = ~color_1, fillOpacity = ~(prob_1 - 0.4) / 0.6,
label = ~district_name, popup = ~popup_label,
highlightOptions = highlightOptions(color = "black", weight = 4, bringToFront = TRUE, opacity = 1))
# Province map ####
province_map_data <- province_shp %>%
left_join(province_summary_stats_wide, by = c("PRENAME" = "province")) %>%
left_join(district_probs %>% group_by(province) %>% summarise(n_seats = n()), by = c("PRENAME" = "province")) %>%
mutate(color_1 = party_to_color(party_1),
color_2 = party_to_color(party_2),
color_3 = party_to_color(party_3),
color_4 = party_to_color(party_4),
color_5 = party_to_color(party_5),
color_6 = party_to_color(party_6),
alpha = sqrt((vote_pct_50_1 - 0.28) / 0.28),
seat_word_total = ifelse(n_seats == 1, "riding", "ridings"),
seat_word_1 = ifelse(seats_pct_50_1 == 1, "seat", "seats"),
seat_word_2 = ifelse(seats_pct_50_2 == 1, "seat", "seats"),
seat_word_3 = ifelse(seats_pct_50_3 == 1, "seat", "seats"),
seat_word_4 = ifelse(seats_pct_50_4 == 1, "seat", "seats"),
seat_word_5 = ifelse(seats_pct_50_5 == 1, "seat", "seats"),
seat_word_6 = ifelse(seats_pct_50_6 == 1, "seat", "seats"),
mouseover_label = PRENAME) %>%
# Infobox
# Create the infobox
mutate(popup_label = case_when(
# When there is a sixth party (i.e. Quebec)
!is.na(party_6) ~ paste0("<H4><b><u>", PRENAME, " (", n_seats, " ", seat_word_total, ")", "</u></b></H4>
<b><i>Projected vote (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">",
percent(vote_pct_50_1, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_1, accuracy = 0.1), " – ",
percent(vote_pct_95_1, accuracy = 0.1), ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">",
percent(vote_pct_50_2, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_2, accuracy = 0.1), " – ",
percent(vote_pct_95_2, accuracy = 0.1), ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">",
percent(vote_pct_50_3, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_3, accuracy = 0.1), " – ",
percent(vote_pct_95_3, accuracy = 0.1), ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">",
percent(vote_pct_50_4, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_4, accuracy = 0.1), " – ",
percent(vote_pct_95_4, accuracy = 0.1), ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">",
percent(vote_pct_50_5, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_5, accuracy = 0.1), " – ",
percent(vote_pct_95_5, accuracy = 0.1), ")<br>
<font color = ", color_6, "><b>", party_6, "</b></font>: <b><font color =", color_6, ">",
percent(vote_pct_50_6, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_6, accuracy = 0.1), " – ",
percent(vote_pct_95_6, accuracy = 0.1), ")<br>
<br>
<b><i>Projected seats (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">", seats_pct_50_1,
"</font></b> ", seat_word_1, " (", seats_pct_05_1, " – ", seats_pct_95_1, ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">", seats_pct_50_2,
"</font></b> ", seat_word_2, " (", seats_pct_05_2, " – ", seats_pct_95_2, ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">", seats_pct_50_3,
"</font></b> ", seat_word_3, " (", seats_pct_05_3, " – ", seats_pct_95_3, ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">", seats_pct_50_4,
"</font></b> ", seat_word_4, " (", seats_pct_05_4, " – ", seats_pct_95_4, ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">", seats_pct_50_5,
"</font></b> ", seat_word_5, " (", seats_pct_05_5, " – ", seats_pct_95_5, ")<br>
<font color = ", color_6, "><b>", party_6, "</b></font>: <b><font color =", color_6, ">", seats_pct_50_6,
"</font></b> ", seat_word_6, " (", seats_pct_05_6, " – ", seats_pct_95_6, ")"),
is.na(party_6) ~ paste0("<H4><b><u>", PRENAME, " (", n_seats, " ", seat_word_total, ")", "</u></b></H4>
<b><i>Projected vote (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">",
percent(vote_pct_50_1, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_1, accuracy = 0.1), " – ",
percent(vote_pct_95_1, accuracy = 0.1), ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">",
percent(vote_pct_50_2, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_2, accuracy = 0.1), " – ",
percent(vote_pct_95_2, accuracy = 0.1), ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">",
percent(vote_pct_50_3, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_3, accuracy = 0.1), " – ",
percent(vote_pct_95_3, accuracy = 0.1), ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">",
percent(vote_pct_50_4, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_4, accuracy = 0.1), " – ",
percent(vote_pct_95_4, accuracy = 0.1), ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">",
percent(vote_pct_50_5, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_5, accuracy = 0.1), " – ",
percent(vote_pct_95_5, accuracy = 0.1), ")<br>
<br>
<b><i>Projected seats (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">", seats_pct_50_1,
"</font></b> ", seat_word_1, " (", seats_pct_05_1, " – ", seats_pct_95_1, ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">", seats_pct_50_2,
"</font></b> ", seat_word_2, " (", seats_pct_05_2, " – ", seats_pct_95_2, ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">", seats_pct_50_3,
"</font></b> ", seat_word_3, " (", seats_pct_05_3, " – ", seats_pct_95_3, ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">", seats_pct_50_4,
"</font></b> ", seat_word_4, " (", seats_pct_05_4, " – ", seats_pct_95_4, ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">", seats_pct_50_5,
"</font></b> ", seat_word_5, " (", seats_pct_05_5, " – ", seats_pct_95_5, ")")))
write_rds(province_map_data, "shiny-app/data/province_map_data.rds")
leaflet(province_map_data) %>%
addTiles() %>%
addPolygons(color = "#666666", weight = 1, opacity = 1, fill = TRUE, fillColor = ~color_1, fillOpacity = ~alpha, label = ~PRENAME,
popup = ~popup_label, highlightOptions = highlightOptions(color = "black", weight = 4, bringToFront = TRUE, opacity = 1))
write_rds(paste0(as.character(Sys.time()), " EDT"), "shiny-app/data/update_time.rds")
rsconnect::deployApp(appDir = "shiny-app", forceUpdate = TRUE, launch.browser = FALSE)
|
/src/map.R
|
no_license
|
thisismactan/Canada-2021
|
R
| false | false | 22,833 |
r
|
source("src/lib.R")
# Create tables of summary stats for the map ####
## Riding level
district_summary_stats_probs <- district_summary_stats %>%
left_join(district_probs %>%
melt(measure.vars = c("Liberal", "Conservative", "NDP", "Green", "People's", "Bloc"), variable.name = "party", value.name = "prob"),
by = c("district_code", "party")) %>%
mutate(prob = ifelse(is.na(prob), 0, prob)) %>%
dplyr::select(province, district_code, district_name = district, party, prob, pct_05, mean, pct_95) %>%
arrange(district_code, desc(prob), desc(mean)) %>%
group_by(district_code) %>%
mutate(rank = 1:n()) %>%
ungroup()
district_summary_stats_1 <- district_summary_stats_probs %>%
filter(rank == 1) %>%
dplyr::select(province, district_code, district_name, party_1 = party, prob_1 = prob, pct_05_1 = pct_05, mean_1 = mean, pct_95_1 = pct_95)
district_summary_stats_2 <- district_summary_stats_probs %>%
filter(rank == 2) %>%
dplyr::select(district_code, district_name, party_2 = party, prob_2 = prob, pct_05_2 = pct_05, mean_2 = mean, pct_95_2 = pct_95)
district_summary_stats_3 <- district_summary_stats_probs %>%
filter(rank == 3) %>%
dplyr::select(district_code, district_name, party_3 = party, prob_3 = prob, pct_05_3 = pct_05, mean_3 = mean, pct_95_3 = pct_95)
district_summary_stats_4 <- district_summary_stats_probs %>%
filter(rank == 4) %>%
dplyr::select(district_code, district_name, party_4 = party, prob_4 = prob, pct_05_4 = pct_05, mean_4 = mean, pct_95_4 = pct_95)
district_summary_stats_5 <- district_summary_stats_probs %>%
filter(rank == 5) %>%
dplyr::select(district_code, district_name, party_5 = party, prob_5 = prob, pct_05_5 = pct_05, mean_5 = mean, pct_95_5 = pct_95)
district_summary_stats_6 <- district_summary_stats_probs %>%
filter(rank == 6) %>%
dplyr::select(district_code, district_name, party_6 = party, prob_6 = prob, pct_05_6 = pct_05, mean_6 = mean, pct_95_6 = pct_95)
district_summary_stats_wide <- district_summary_stats_1 %>%
left_join(district_summary_stats_2, by = c("district_code", "district_name")) %>%
left_join(district_summary_stats_3, by = c("district_code", "district_name")) %>%
left_join(district_summary_stats_4, by = c("district_code", "district_name")) %>%
left_join(district_summary_stats_5, by = c("district_code", "district_name")) %>%
left_join(district_summary_stats_6, by = c("district_code", "district_name"))
## Province level
province_summary_stats_1 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 1) %>%
dplyr::select(province, party_1 = party, vote_pct_05_1 = vote_pct_05, vote_pct_50_1 = vote_pct_50, vote_pct_95_1 = vote_pct_95,
seats_pct_05_1 = seats_pct_05, seats_pct_50_1 = seats_pct_50, seats_pct_95_1 = seats_pct_95)
province_summary_stats_2 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 2) %>%
dplyr::select(province, party_2 = party, vote_pct_05_2 = vote_pct_05, vote_pct_50_2 = vote_pct_50, vote_pct_95_2 = vote_pct_95,
seats_pct_05_2 = seats_pct_05, seats_pct_50_2 = seats_pct_50, seats_pct_95_2 = seats_pct_95)
province_summary_stats_3 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 3) %>%
dplyr::select(province, party_3 = party, vote_pct_05_3 = vote_pct_05, vote_pct_50_3 = vote_pct_50, vote_pct_95_3 = vote_pct_95,
seats_pct_05_3 = seats_pct_05, seats_pct_50_3 = seats_pct_50, seats_pct_95_3 = seats_pct_95)
province_summary_stats_4 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 4) %>%
dplyr::select(province, party_4 = party, vote_pct_05_4 = vote_pct_05, vote_pct_50_4 = vote_pct_50, vote_pct_95_4 = vote_pct_95,
seats_pct_05_4 = seats_pct_05, seats_pct_50_4 = seats_pct_50, seats_pct_95_4 = seats_pct_95)
province_summary_stats_5 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 5) %>%
dplyr::select(province, party_5 = party, vote_pct_05_5 = vote_pct_05, vote_pct_50_5 = vote_pct_50, vote_pct_95_5 = vote_pct_95,
seats_pct_05_5 = seats_pct_05, seats_pct_50_5 = seats_pct_50, seats_pct_95_5 = seats_pct_95)
province_summary_stats_6 <- province_summary_stats %>%
arrange(province, desc(vote_pct_50)) %>%
group_by(province) %>%
mutate(rank = 1:n()) %>%
filter(rank == 6) %>%
dplyr::select(province, party_6 = party, vote_pct_05_6 = vote_pct_05, vote_pct_50_6 = vote_pct_50, vote_pct_95_6 = vote_pct_95,
seats_pct_05_6 = seats_pct_05, seats_pct_50_6 = seats_pct_50, seats_pct_95_6 = seats_pct_95)
province_summary_stats_wide <- province_summary_stats_1 %>%
left_join(province_summary_stats_2, by = c("province")) %>%
left_join(province_summary_stats_3, by = c("province")) %>%
left_join(province_summary_stats_4, by = c("province")) %>%
left_join(province_summary_stats_5, by = c("province")) %>%
left_join(province_summary_stats_6, by = c("province"))
# The map itself ####
## Create Lambert conformal conic CRS for leaflet (see http://spatialreference.org/ref/esri/canada-lambert-conformal-conic/ )
crs_proj <- "+proj=lcc +lat_1=49 +lat_2=77 +lat_0=63.390675 +lon_0=-91.86666666666666 +x_0=6200000 +y_0=3000000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs "
crs_lcc <- leafletCRS(code = "ESRI:102002", proj4def = crs_proj)
district_shp <- readOGR("data/shapes/FED_CA_2_2_ENG.shp") %>%
spTransform(CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")) %>%
st_as_sf() %>%
mutate(district_code = as.numeric(FED_NUM)) %>%
ms_simplify()
province_shp <- readOGR("data/shapes/provinces.shp") %>%
spTransform(CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")) %>%
st_as_sf() %>%
ms_simplify()
# Swap out shapes from provincial file for the territories since they look nicer
district_shp$geometry[district_shp$PROVCODE == "YT"] <- province_shp$geometry[province_shp$PRENAME == "Yukon"]
district_shp$geometry[district_shp$PROVCODE == "NT"] <- province_shp$geometry[province_shp$PRENAME == "Northwest Territories"]
district_shp$geometry[district_shp$PROVCODE == "NU"] <- province_shp$geometry[province_shp$PRENAME == "Nunavut"]
# Define mapping from party to color
party_to_color <- function(party) {
case_when(party == "Liberal" ~ "red",
party == "Conservative" ~ "blue",
party == "NDP" ~ "#EE7600",
party == "Green" ~ "#008B00",
party == "Bloc" ~ "#8B008B",
party == "People's" ~ "midnightblue")
}
district_map_data <- district_shp %>%
left_join(district_summary_stats_wide, by = "district_code") %>%
mutate(color_1 = party_to_color(party_1),
color_2 = party_to_color(party_2),
color_3 = party_to_color(party_3),
color_4 = party_to_color(party_4),
color_5 = party_to_color(party_5),
color_6 = party_to_color(party_6),
mouseover_label = district_name,
alpha = (prob_1 - 0.4) / 0.6) %>%
# Create the infobox
mutate(popup_label = case_when(
# When there is a sixth party (i.e. Quebec)
!is.na(party_6) ~ paste0("<H4><b><u>", district_name, "</u><br><i>", province, "</i></b></H4>
<b><i>Projected vote (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">",
percent(mean_1, accuracy = 0.1), "</font></b> (", percent(pct_05_1, accuracy = 0.1), " – ",
percent(pct_95_1, accuracy = 0.1), ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">",
percent(mean_2, accuracy = 0.1), "</font></b> (", percent(pct_05_2, accuracy = 0.1), " – ",
percent(pct_95_2, accuracy = 0.1), ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">",
percent(mean_3, accuracy = 0.1), "</font></b> (", percent(pct_05_3, accuracy = 0.1), " – ",
percent(pct_95_3, accuracy = 0.1), ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">",
percent(mean_4, accuracy = 0.1), "</font></b> (", percent(pct_05_4, accuracy = 0.1), " – ",
percent(pct_95_4, accuracy = 0.1), ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">",
percent(mean_5, accuracy = 0.1), "</font></b> (", percent(pct_05_5, accuracy = 0.1), " – ",
percent(pct_95_5, accuracy = 0.1), ")<br>
<font color = ", color_6, "><b>", party_6, "</b></font>: <b><font color =", color_6, ">",
percent(mean_6, accuracy = 0.1), "</font></b> (", percent(pct_05_6, accuracy = 0.1), " – ",
percent(pct_95_6, accuracy = 0.1), ")<br>
<br>
<b><i>Win probability</i><b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>", ": <font color = ", color_1, "><b>",
percent(prob_1, accuracy = 1), "</b></font><br>
<font color = ", color_2, "><b>", party_2, "</b></font>", ": <font color = ", color_2, "><b>",
percent(prob_2, accuracy = 1), "</b></font><br>
<font color = ", color_3, "><b>", party_3, "</b></font>", ": <font color = ", color_3, "><b>",
percent(prob_3, accuracy = 1), "</b></font><br>
<font color = ", color_4, "><b>", party_4, "</b></font>", ": <font color = ", color_4, "><b>",
percent(prob_4, accuracy = 1), "</b></font><br>
<font color = ", color_5, "><b>", party_5, "</b></font>", ": <font color = ", color_5, "><b>",
percent(prob_5, accuracy = 1), "</b></font><br>
<font color = ", color_6, "><b>", party_6, "</b></font>", ": <font color = ", color_6, "><b>",
percent(prob_6, accuracy = 1), "</b></font>"),
is.na(party_6) ~ paste0("<H4><b><u>", district_name, "</u><br><i>", province, "</i></b></H4>
<b><i>Projected vote (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">",
percent(mean_1, accuracy = 0.1), "</font></b> (", percent(pct_05_1, accuracy = 0.1), " – ",
percent(pct_95_1, accuracy = 0.1), ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">",
percent(mean_2, accuracy = 0.1), "</font></b> (", percent(pct_05_2, accuracy = 0.1), " – ",
percent(pct_95_2, accuracy = 0.1), ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">",
percent(mean_3, accuracy = 0.1), "</font></b> (", percent(pct_05_3, accuracy = 0.1), " – ",
percent(pct_95_3, accuracy = 0.1), ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">",
percent(mean_4, accuracy = 0.1), "</font></b> (", percent(pct_05_4, accuracy = 0.1), " – ",
percent(pct_95_4, accuracy = 0.1), ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">",
percent(mean_5, accuracy = 0.1), "</font></b> (", percent(pct_05_5, accuracy = 0.1), " – ",
percent(pct_95_5, accuracy = 0.1), ")<br>
<br>
<b><i>Win probability</i><b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>", ": <font color = ", color_1, "><b>",
percent(prob_1, accuracy = 1), "</b></font><br>
<font color = ", color_2, "><b>", party_2, "</b></font>", ": <font color = ", color_2, "><b>",
percent(prob_2, accuracy = 1), "</b></font><br>
<font color = ", color_3, "><b>", party_3, "</b></font>", ": <font color = ", color_3, "><b>",
percent(prob_3, accuracy = 1), "</b></font><br>
<font color = ", color_4, "><b>", party_4, "</b></font>", ": <font color = ", color_4, "><b>",
percent(prob_4, accuracy = 1), "</b></font><br>
<font color = ", color_5, "><b>", party_5, "</b></font>", ": <font color = ", color_5, "><b>",
percent(prob_5, accuracy = 1), "</b></font>")))
write_rds(district_map_data, "shiny-app/data/district_map_data.rds")
leaflet(district_map_data) %>%
addTiles() %>%
addPolygons(color = "#666666", weight = 1, opacity = 1, fill = TRUE, fillColor = ~color_1, fillOpacity = ~(prob_1 - 0.4) / 0.6,
label = ~district_name, popup = ~popup_label,
highlightOptions = highlightOptions(color = "black", weight = 4, bringToFront = TRUE, opacity = 1))
# Province map ####
province_map_data <- province_shp %>%
left_join(province_summary_stats_wide, by = c("PRENAME" = "province")) %>%
left_join(district_probs %>% group_by(province) %>% summarise(n_seats = n()), by = c("PRENAME" = "province")) %>%
mutate(color_1 = party_to_color(party_1),
color_2 = party_to_color(party_2),
color_3 = party_to_color(party_3),
color_4 = party_to_color(party_4),
color_5 = party_to_color(party_5),
color_6 = party_to_color(party_6),
alpha = sqrt((vote_pct_50_1 - 0.28) / 0.28),
seat_word_total = ifelse(n_seats == 1, "riding", "ridings"),
seat_word_1 = ifelse(seats_pct_50_1 == 1, "seat", "seats"),
seat_word_2 = ifelse(seats_pct_50_2 == 1, "seat", "seats"),
seat_word_3 = ifelse(seats_pct_50_3 == 1, "seat", "seats"),
seat_word_4 = ifelse(seats_pct_50_4 == 1, "seat", "seats"),
seat_word_5 = ifelse(seats_pct_50_5 == 1, "seat", "seats"),
seat_word_6 = ifelse(seats_pct_50_6 == 1, "seat", "seats"),
mouseover_label = PRENAME) %>%
# Infobox
# Create the infobox
mutate(popup_label = case_when(
# When there is a sixth party (i.e. Quebec)
!is.na(party_6) ~ paste0("<H4><b><u>", PRENAME, " (", n_seats, " ", seat_word_total, ")", "</u></b></H4>
<b><i>Projected vote (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">",
percent(vote_pct_50_1, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_1, accuracy = 0.1), " – ",
percent(vote_pct_95_1, accuracy = 0.1), ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">",
percent(vote_pct_50_2, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_2, accuracy = 0.1), " – ",
percent(vote_pct_95_2, accuracy = 0.1), ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">",
percent(vote_pct_50_3, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_3, accuracy = 0.1), " – ",
percent(vote_pct_95_3, accuracy = 0.1), ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">",
percent(vote_pct_50_4, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_4, accuracy = 0.1), " – ",
percent(vote_pct_95_4, accuracy = 0.1), ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">",
percent(vote_pct_50_5, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_5, accuracy = 0.1), " – ",
percent(vote_pct_95_5, accuracy = 0.1), ")<br>
<font color = ", color_6, "><b>", party_6, "</b></font>: <b><font color =", color_6, ">",
percent(vote_pct_50_6, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_6, accuracy = 0.1), " – ",
percent(vote_pct_95_6, accuracy = 0.1), ")<br>
<br>
<b><i>Projected seats (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">", seats_pct_50_1,
"</font></b> ", seat_word_1, " (", seats_pct_05_1, " – ", seats_pct_95_1, ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">", seats_pct_50_2,
"</font></b> ", seat_word_2, " (", seats_pct_05_2, " – ", seats_pct_95_2, ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">", seats_pct_50_3,
"</font></b> ", seat_word_3, " (", seats_pct_05_3, " – ", seats_pct_95_3, ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">", seats_pct_50_4,
"</font></b> ", seat_word_4, " (", seats_pct_05_4, " – ", seats_pct_95_4, ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">", seats_pct_50_5,
"</font></b> ", seat_word_5, " (", seats_pct_05_5, " – ", seats_pct_95_5, ")<br>
<font color = ", color_6, "><b>", party_6, "</b></font>: <b><font color =", color_6, ">", seats_pct_50_6,
"</font></b> ", seat_word_6, " (", seats_pct_05_6, " – ", seats_pct_95_6, ")"),
is.na(party_6) ~ paste0("<H4><b><u>", PRENAME, " (", n_seats, " ", seat_word_total, ")", "</u></b></H4>
<b><i>Projected vote (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">",
percent(vote_pct_50_1, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_1, accuracy = 0.1), " – ",
percent(vote_pct_95_1, accuracy = 0.1), ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">",
percent(vote_pct_50_2, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_2, accuracy = 0.1), " – ",
percent(vote_pct_95_2, accuracy = 0.1), ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">",
percent(vote_pct_50_3, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_3, accuracy = 0.1), " – ",
percent(vote_pct_95_3, accuracy = 0.1), ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">",
percent(vote_pct_50_4, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_4, accuracy = 0.1), " – ",
percent(vote_pct_95_4, accuracy = 0.1), ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">",
percent(vote_pct_50_5, accuracy = 0.1), "</font></b> (", percent(vote_pct_05_5, accuracy = 0.1), " – ",
percent(vote_pct_95_5, accuracy = 0.1), ")<br>
<br>
<b><i>Projected seats (90% CI)</i></b><br>
<font color = ", color_1, "><b>", party_1, "</b></font>: <b><font color =", color_1, ">", seats_pct_50_1,
"</font></b> ", seat_word_1, " (", seats_pct_05_1, " – ", seats_pct_95_1, ")<br>
<font color = ", color_2, "><b>", party_2, "</b></font>: <b><font color =", color_2, ">", seats_pct_50_2,
"</font></b> ", seat_word_2, " (", seats_pct_05_2, " – ", seats_pct_95_2, ")<br>
<font color = ", color_3, "><b>", party_3, "</b></font>: <b><font color =", color_3, ">", seats_pct_50_3,
"</font></b> ", seat_word_3, " (", seats_pct_05_3, " – ", seats_pct_95_3, ")<br>
<font color = ", color_4, "><b>", party_4, "</b></font>: <b><font color =", color_4, ">", seats_pct_50_4,
"</font></b> ", seat_word_4, " (", seats_pct_05_4, " – ", seats_pct_95_4, ")<br>
<font color = ", color_5, "><b>", party_5, "</b></font>: <b><font color =", color_5, ">", seats_pct_50_5,
"</font></b> ", seat_word_5, " (", seats_pct_05_5, " – ", seats_pct_95_5, ")")))
write_rds(province_map_data, "shiny-app/data/province_map_data.rds")
leaflet(province_map_data) %>%
addTiles() %>%
addPolygons(color = "#666666", weight = 1, opacity = 1, fill = TRUE, fillColor = ~color_1, fillOpacity = ~alpha, label = ~PRENAME,
popup = ~popup_label, highlightOptions = highlightOptions(color = "black", weight = 4, bringToFront = TRUE, opacity = 1))
write_rds(paste0(as.character(Sys.time()), " EDT"), "shiny-app/data/update_time.rds")
rsconnect::deployApp(appDir = "shiny-app", forceUpdate = TRUE, launch.browser = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doeFactor.R
\name{getRandomLevel}
\alias{getRandomLevel}
\title{Function that returns a random value that could be chosen for the given factor.}
\usage{
getRandomLevel(object, skip = NA)
}
\arguments{
\item{object}{An object of class doeFactor}
\item{skip}{A list of values that should not be used for the random selection of one factor level.}
}
\value{
The function returns either a single value (numeric or character) for hard- and easy-to-change factors.
For semi-hard-to-change-factors a list of different values is generated. The length of the list is
the number of possible values a shtc-factor can take in a given block (=whole plot).
}
\description{
Function that returns a random value that could be chosen for the given factor.
}
\examples{
etc.factor <- new("doeFactor",
name="Temperature",
levels=c(180, 210),
number.levels=as.integer(5),
type="continuous",
changes="easy")
getRandomLevel(etc.factor)
semi.htc.factor <- new("doeFactor",
name="Temperature",
levels=c(180, 210),
number.levels=as.integer(5),
type="continuous",
changes="semi.hard",
semi.htc.group.size=as.integer(3))
getRandomLevel(semi.htc.factor)
}
\keyword{internal}
|
/man/getRandomLevel.Rd
|
no_license
|
neuhier/rospd
|
R
| false | true | 1,516 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doeFactor.R
\name{getRandomLevel}
\alias{getRandomLevel}
\title{Function that returns a random value that could be chosen for the given factor.}
\usage{
getRandomLevel(object, skip = NA)
}
\arguments{
\item{object}{An object of class doeFactor}
\item{skip}{A list of values that should not be used for the random selection of one factor level.}
}
\value{
The function returns either a single value (numeric or character) for hard- and easy-to-change factors.
For semi-hard-to-change-factors a list of different values is generated. The length of the list is
the number of possible values a shtc-factor can take in a given block (=whole plot).
}
\description{
Function that returns a random value that could be chosen for the given factor.
}
\examples{
etc.factor <- new("doeFactor",
name="Temperature",
levels=c(180, 210),
number.levels=as.integer(5),
type="continuous",
changes="easy")
getRandomLevel(etc.factor)
semi.htc.factor <- new("doeFactor",
name="Temperature",
levels=c(180, 210),
number.levels=as.integer(5),
type="continuous",
changes="semi.hard",
semi.htc.group.size=as.integer(3))
getRandomLevel(semi.htc.factor)
}
\keyword{internal}
|
# Script to generate plots with various ways of representing uncertainty, based
# Coffee & Code dataset from https://www.kaggle.com/devready/coffee-and-code/data
# set-up & data manipulation ---------------------------------------------------
# load packages
library(ggplot2) # for plots, built layer by layer
library(dplyr) # for data manipulation
library(magrittr) # for piping
library(ggridges) # for density ridge plots
library(patchwork) # great package for "patching" plots together!
# set ggplot theme
theme_set(theme_classic() +
theme(axis.title = element_text(size = 11, face = "bold"),
axis.text = element_text(size = 11),
plot.title = element_text(size = 13, face = "bold"),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 10)))
# import data
df <- read.csv("data/coffee_code.csv")
# set labels to be used in all plots
coffee_labels <- labs(title = "Does coffee help programmers code?",
x = "Coffee while coding",
y = "Time spent coding \n(hours/day)")
# the variable CodingWithoutCoffee is negative, which is harder to understand
# (i.e. "No" means they drink coffee...). So, let's transform it into a more
# intuitive variable!
df$CodingWithCoffee <- gsub("No", "Usually", df$CodingWithoutCoffee)
df$CodingWithCoffee <- gsub("Yes", "Rarely\n or never", df$CodingWithCoffee)
# convert to factor and set levels so they show up in a logical order
df$CodingWithCoffee <- factor(df$CodingWithCoffee,
levels = c("Rarely\n or never",
"Sometimes",
"Usually"))
# calculate summary statistics for the variable of choice
df_summary <- group_by(df, CodingWithCoffee) %>%
summarise(
# mean
mean_codinghours = mean(CodingHours),
# standard deviation
sd_codinghours = sd(CodingHours),
# standard error
se_codinghours = sd(CodingHours)/sqrt(length(CodingHours)))
# 1. Error bars (standard error) -----------------------------------------------
ggplot(df_summary) +
geom_errorbar(aes(x = CodingWithCoffee,
ymin = mean_codinghours - se_codinghours,
ymax = mean_codinghours + se_codinghours),
width = .2) +
geom_point(aes(x = CodingWithCoffee, y = mean_codinghours),
size = 3) +
coffee_labels + ylim(0,10)
ggsave("figures/coffee_errorbars.png", width = 5, height = 3, units = "in")
# 2. Boxplot -------------------------------------------------------------------
ggplot(df) +
geom_boxplot(aes(x = CodingWithCoffee, y = CodingHours)) +
coffee_labels
ggsave("figures/coffee_boxplot.png", width = 5, height = 3, units = "in")
# 3. Error bar demonstration ---------------------------------------------------
# get some nice colours from viridis (magma)
error_cols <- viridis::viridis_pal(option = "magma")(5)[2:4]
# set labels to be used in the palette
error_labels = c("standard deviation","95% confidence interval","standard error")
ggplot(df_summary) +
# show the raw data
geom_jitter(data = df, aes(x = CodingWithCoffee,
y = CodingHours),
alpha = .5, width = .05, col = "grey") +
# standard deviation
geom_errorbar(aes(x = CodingWithCoffee,
ymin = mean_codinghours - sd_codinghours,
ymax = mean_codinghours + sd_codinghours,
col = "SD"), width = .2, lwd = 1) +
# 95% confidence interval
geom_errorbar(aes(x = CodingWithCoffee,
ymin = mean_codinghours - 1.96*se_codinghours,
ymax = mean_codinghours + 1.96*se_codinghours,
col = "CI"), width = .2, lwd = 1) +
# standard error
geom_errorbar(aes(x = CodingWithCoffee,
ymin = mean_codinghours - se_codinghours,
ymax = mean_codinghours + se_codinghours,
col = "SE"), width = .2, lwd = 1) +
geom_point(aes(x = CodingWithCoffee, y = mean_codinghours),
size = 3) +
coffee_labels + ylim(c(0,11)) +
# manual palette/legend set-up!
scale_colour_manual(name = "Uncertainty metric",
values = c(SD = error_cols[1],
CI = error_cols[2],
SE = error_cols[3]),
labels = error_labels) +
theme(legend.position = "top")
ggsave("figures/coffee_bars_demo.png", width = 7, height = 5, units = "in")
# 4. Jitter plot with violin ---------------------------------------------------
ggplot() +
geom_jitter(data = df, aes(x = CodingWithCoffee,
y = CodingHours),
alpha = .5, width = .05, col = "grey") +
geom_violin(data = df, aes(x = CodingWithCoffee,
y = CodingHours), alpha = 0) +
geom_linerange(data = df_summary,
aes(x = CodingWithCoffee,
ymin = mean_codinghours - se_codinghours,
ymax = mean_codinghours + se_codinghours)) +
geom_point(data = df_summary,
aes(x = CodingWithCoffee,
y = mean_codinghours), size = 3) +
coffee_labels
ggsave("figures/coffee_violin_jitter.png", width = 5, height = 3, units = "in")
# 5. Density ridge plot --------------------------------------------------------
ggplot(df) +
aes(y = CodingWithCoffee, x = CodingHours, fill = stat(x)) +
geom_density_ridges_gradient(scale = 1.9, size = .2, rel_min_height = 0.005) +
# colour palette (gradient according to CodingHours)
scale_fill_viridis_c(option = "magma", direction = -1) +
# remove legend - it's not necessary here!
theme(legend.position = "none") +
labs(title = coffee_labels$title,
x = coffee_labels$y,
y = "Coffee \nwhile coding") +
theme(axis.title.y = element_text(angle=0, hjust = 1, vjust = .9,
margin = margin(t = 0, r = -50, b = 0, l = 0)))
ggsave("figures/coffee_density_ridges.png", width = 5, height = 3, units = "in")
# 6. Jitter vs. Rug plot ------------------------------------------------------------------
jitterplot <- ggplot(df, aes(x = CoffeeCupsPerDay, y = CodingHours)) +
geom_jitter(alpha = .2) +
geom_smooth(fill = error_cols[1], col = "black", method = lm, lwd = .7) +
coffee_labels + ylim(c(0,11)) + labs(x = "Cups of coffee (per day)")
rugplot <- ggplot(df, aes(x = CoffeeCupsPerDay, y = CodingHours)) +
geom_smooth(fill = error_cols[1], col = "black", method = lm, lwd = .7) +
geom_rug(position="jitter", alpha = .7) + ylim(c(0,11)) +
coffee_labels + labs(x = "Cups of coffee (per day)")
# patch the two plots together
jitterplot + rugplot
ggsave("figures/coffee_jitter_vs_rugplot.png", width = 10, height = 4, units = "in")
|
/scripts/coffee_uncertainty_options.R
|
permissive
|
katherinehebert/BIOS2_DataViz
|
R
| false | false | 6,920 |
r
|
# Script to generate plots with various ways of representing uncertainty, based
# Coffee & Code dataset from https://www.kaggle.com/devready/coffee-and-code/data
# set-up & data manipulation ---------------------------------------------------
# load packages
library(ggplot2) # for plots, built layer by layer
library(dplyr) # for data manipulation
library(magrittr) # for piping
library(ggridges) # for density ridge plots
library(patchwork) # great package for "patching" plots together!
# set ggplot theme
theme_set(theme_classic() +
theme(axis.title = element_text(size = 11, face = "bold"),
axis.text = element_text(size = 11),
plot.title = element_text(size = 13, face = "bold"),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 10)))
# import data
df <- read.csv("data/coffee_code.csv")
# set labels to be used in all plots
coffee_labels <- labs(title = "Does coffee help programmers code?",
x = "Coffee while coding",
y = "Time spent coding \n(hours/day)")
# the variable CodingWithoutCoffee is negative, which is harder to understand
# (i.e. "No" means they drink coffee...). So, let's transform it into a more
# intuitive variable!
df$CodingWithCoffee <- gsub("No", "Usually", df$CodingWithoutCoffee)
df$CodingWithCoffee <- gsub("Yes", "Rarely\n or never", df$CodingWithCoffee)
# convert to factor and set levels so they show up in a logical order
df$CodingWithCoffee <- factor(df$CodingWithCoffee,
levels = c("Rarely\n or never",
"Sometimes",
"Usually"))
# calculate summary statistics for the variable of choice
df_summary <- group_by(df, CodingWithCoffee) %>%
summarise(
# mean
mean_codinghours = mean(CodingHours),
# standard deviation
sd_codinghours = sd(CodingHours),
# standard error
se_codinghours = sd(CodingHours)/sqrt(length(CodingHours)))
# 1. Error bars (standard error) -----------------------------------------------
ggplot(df_summary) +
geom_errorbar(aes(x = CodingWithCoffee,
ymin = mean_codinghours - se_codinghours,
ymax = mean_codinghours + se_codinghours),
width = .2) +
geom_point(aes(x = CodingWithCoffee, y = mean_codinghours),
size = 3) +
coffee_labels + ylim(0,10)
ggsave("figures/coffee_errorbars.png", width = 5, height = 3, units = "in")
# 2. Boxplot -------------------------------------------------------------------
ggplot(df) +
geom_boxplot(aes(x = CodingWithCoffee, y = CodingHours)) +
coffee_labels
ggsave("figures/coffee_boxplot.png", width = 5, height = 3, units = "in")
# 3. Error bar demonstration ---------------------------------------------------
# get some nice colours from viridis (magma)
error_cols <- viridis::viridis_pal(option = "magma")(5)[2:4]
# set labels to be used in the palette
error_labels = c("standard deviation","95% confidence interval","standard error")
ggplot(df_summary) +
# show the raw data
geom_jitter(data = df, aes(x = CodingWithCoffee,
y = CodingHours),
alpha = .5, width = .05, col = "grey") +
# standard deviation
geom_errorbar(aes(x = CodingWithCoffee,
ymin = mean_codinghours - sd_codinghours,
ymax = mean_codinghours + sd_codinghours,
col = "SD"), width = .2, lwd = 1) +
# 95% confidence interval
geom_errorbar(aes(x = CodingWithCoffee,
ymin = mean_codinghours - 1.96*se_codinghours,
ymax = mean_codinghours + 1.96*se_codinghours,
col = "CI"), width = .2, lwd = 1) +
# standard error
geom_errorbar(aes(x = CodingWithCoffee,
ymin = mean_codinghours - se_codinghours,
ymax = mean_codinghours + se_codinghours,
col = "SE"), width = .2, lwd = 1) +
geom_point(aes(x = CodingWithCoffee, y = mean_codinghours),
size = 3) +
coffee_labels + ylim(c(0,11)) +
# manual palette/legend set-up!
scale_colour_manual(name = "Uncertainty metric",
values = c(SD = error_cols[1],
CI = error_cols[2],
SE = error_cols[3]),
labels = error_labels) +
theme(legend.position = "top")
ggsave("figures/coffee_bars_demo.png", width = 7, height = 5, units = "in")
# 4. Jitter plot with violin ---------------------------------------------------
ggplot() +
geom_jitter(data = df, aes(x = CodingWithCoffee,
y = CodingHours),
alpha = .5, width = .05, col = "grey") +
geom_violin(data = df, aes(x = CodingWithCoffee,
y = CodingHours), alpha = 0) +
geom_linerange(data = df_summary,
aes(x = CodingWithCoffee,
ymin = mean_codinghours - se_codinghours,
ymax = mean_codinghours + se_codinghours)) +
geom_point(data = df_summary,
aes(x = CodingWithCoffee,
y = mean_codinghours), size = 3) +
coffee_labels
ggsave("figures/coffee_violin_jitter.png", width = 5, height = 3, units = "in")
# 5. Density ridge plot --------------------------------------------------------
ggplot(df) +
aes(y = CodingWithCoffee, x = CodingHours, fill = stat(x)) +
geom_density_ridges_gradient(scale = 1.9, size = .2, rel_min_height = 0.005) +
# colour palette (gradient according to CodingHours)
scale_fill_viridis_c(option = "magma", direction = -1) +
# remove legend - it's not necessary here!
theme(legend.position = "none") +
labs(title = coffee_labels$title,
x = coffee_labels$y,
y = "Coffee \nwhile coding") +
theme(axis.title.y = element_text(angle=0, hjust = 1, vjust = .9,
margin = margin(t = 0, r = -50, b = 0, l = 0)))
ggsave("figures/coffee_density_ridges.png", width = 5, height = 3, units = "in")
# 6. Jitter vs. Rug plot ------------------------------------------------------------------
jitterplot <- ggplot(df, aes(x = CoffeeCupsPerDay, y = CodingHours)) +
geom_jitter(alpha = .2) +
geom_smooth(fill = error_cols[1], col = "black", method = lm, lwd = .7) +
coffee_labels + ylim(c(0,11)) + labs(x = "Cups of coffee (per day)")
rugplot <- ggplot(df, aes(x = CoffeeCupsPerDay, y = CodingHours)) +
geom_smooth(fill = error_cols[1], col = "black", method = lm, lwd = .7) +
geom_rug(position="jitter", alpha = .7) + ylim(c(0,11)) +
coffee_labels + labs(x = "Cups of coffee (per day)")
# patch the two plots together
jitterplot + rugplot
ggsave("figures/coffee_jitter_vs_rugplot.png", width = 10, height = 4, units = "in")
|
#####################################
#
# UHS ED Consultants Working overnight
# Interrupted Time Series Modelling Script
# Modelling - Total Time in Department (monthly average)
# Intervention group: night time 10pm to 6am
# Control group: day time 6am to 10pm
# Interruption occurs at time point 35 (November 2015)
# Total 54 observations.Unit of analysis = month.
#
# Pre = 34; Post = 20
# T.Monks
#####################################
####################################
# Modelling
# 1. Seasonality with 11 monthly dummies
# 2 Seasonality with single dummy variable for difference between summer/rest of year
# 3. Adjusting for wild points in Winter 2016/17
# 4. Interaction between seasonality and group
# 5. Adjusting for patients numbers per month
#####################################
###################################
# Dependencies
###################################
library(nlme)
library(car)
library(tseries)
library(ggplot2)
########################
# Read in the dataset
########################
#read in manually.
########################
# Descriptives: Plot the series - sample size for each point
########################
# Plot the time series for the nights where consultants worked in ED.
plot(data_ed$time[1:54],data_ed$patients_n[1:54],
ylab="Sample size (patients)",
ylim=c(0,4500),
xlab="Year-Month",
type="l",
col="blue",
xaxt="n")
# Add in control group flow into Lake Huron
points(data_ed$time[55:108],data_ed$patients_n[55:108],
type='l',
col="red")
# Add x-axis year labels
axis(1, at=1:54, labels=data_ed$x_labels[1:54])
# Add in the points for the figure
points(data_ed$time[1:54],data_ed$patients_n[1:54],
col="blue",
pch=20)
points(data_ed$time[55:108],data_ed$patients_n[55:108],
col="red",
pch=20)
# Label the weather change
abline(v=34.5,lty=2, lwd = 2)
# Add in a legend
legend(x=0.2, y=4500, legend=c("Night (Intervention)","Day (Control)"),
col=c("blue","red"),pch=20)
########################
# Plotting a smoothing line
########################
par(mfrow=c(1,1))
smoother = c(1/24,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/24)
trendpattern = filter (data_ed$mean_total_time[1:54], filter = smoother, sides=2)
trendpattern2 = filter (data_ed$mean_total_time[55:108], filter = smoother, sides=2)
##########
# 12 Month smoother to visualise difference.
#########
plot (data_ed$time[1:54], data_ed$mean_total_time[1:54],
type= "p",
ylab="Total Time in Department (mean mins)",
ylim=c(0,300),
xlab="Month",
col='lightblue',
pch=20,
xaxt="n")
# Add x-axis year labels
axis(1, at=1:54, labels=data_ed$x_labels[1:54])
points(data_ed$time[1:54],data_ed$mean_total_time[55:108],
type='p',
col="pink",
pch=20)
lines (trendpattern, col='blue', lwd = 3)
lines (trendpattern2, col='red', lwd = 3)
abline(v=34.5,lty=2)
legend(x=1, y=100, legend=c("Night (intervention)","Day (control)"),
col=c("blue","red"),pch=20)
########################
# Model 1: Simple OLS with seasonal dummies
########################
#check for unit root - all okay ADF(4) = -4.6 p < 0.01 (sig indicates stationarity)
adf.test(data_ed$mean_total_time)
# A preliminary OLS regression
model_ols<-lm(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend +
M1 +
M2 +
M3 +
M4 +
M5 +
M5 +
M6 +
M7 +
M8 +
M9 +
M10 +
M11 , data=data_ed)
summary(model_ols)
confint(model_ols)
influence(model_ols)
cooks.distance(model_ols)
################################
# Assessing Autocorrelation
################################
# Durbin-watson test to 12 lags
dwt(model_ols,max.lag=12,alternative="two.sided")
# Graph the residuals from the OLS regression to check for serially correlated errors
plot(data_ed$time[1:54],
residuals(model_ols)[1:54],
type='o',
pch=16,
xlab='Time',
ylab='OLS Residuals',
col="red")
abline(h=0,lty=2)
# Plot ACF and PACF
# Set plotting to two records on one page
par(mfrow=c(1,2))
# Produce Plots
acf(residuals(model_ols), lag.max = 50)
acf(residuals(model_ols),type='partial')
# Note decay in ACF, significant spike at 10 in PACF, model p=10
########################
# Run the final model
########################
# Fit the GLS regression model
model_p10 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend +
M1 +
M2 +
M3 +
M4 +
M5 +
M5 +
M6 +
M7 +
M8 +
M9 +
M10 +
M11 ,
data=data_ed,
correlation=corARMA(p=10,form=~time|group),
method="ML")
summary(model_p10)
confint(model_p10)
########################
# Sensitivity analysis
########################
#ACF exp decay to zero indicating AR
#PACF spikes at 2, 10 and 14 (just).
# Likelihood-ratio tests
model_p2<- update(model_p10,correlation=corARMA(p=2,form=~time|group))
anova(model_p2,model_p10)
summary(model_p2)
#AR(10) has lower AIC. Conclusions are the same.
model_p14 <- update(model_p10,correlation=corARMA(p=14, form=~time|group))
anova(model_p10,model_p14)
summary(model_p14)
#AR(14) has lower AIC (marginally). Substantive conclusions are the same.
# Put plotting back to one chart
par(mfrow=c(1,1))
model_final <- model_p10
# Residual plot
qqPlot(residuals(model_final))
influence(model_p16, do.coef = TRUE)
########################
# Plot results
#########################
# First plot the raw data points for the Nile
plot(data_ed$time[1:54],data_ed$mean_total_time[1:54],
ylim=c(0,450),
ylab="Total Time in Dept. (Mean Mins)",
xlab="Year/Mnth",
pch=20,
col="lightblue",
xaxt="n")
# Add x-axis year labels
axis(1, at=1:54, labels=data_ed$x_labels[1:54])
# Label the policy change
abline(v=34.5,lty=2)
# Add in the points for the control
points(data_ed$time[55:108],data_ed$mean_total_time[55:108],
col="pink",
pch=20)
# Plot the first line segment for the intervention group
lines(data_ed$time[1:34], fitted(model_final)[1:34], col="blue",lwd=2)
# Add the second line segment for the intervention group
lines(data_ed$time[35:54], fitted(model_final)[35:54], col="blue",lwd=2)
segments(35, model_final$coef[1] + model_final$coef[2]*35 + model_final$coef[3]+model_final$coef[4]*35 +
model_final$coef[5] + model_final$coef[6],
54, model_final$coef[1] + model_final$coef[2]*54 + model_final$coef[3]+model_final$coef[4]*54 +
model_final$coef[5] + model_final$coef[6]*20,
lty=2,col='blue',lwd=2)
# Plot the first line segment for the control group
lines(data_ed$time[55:88], fitted(model_final)[55:88], col="red",lwd=2)
# Add the second line segment for the control
lines(data_ed$time[89:108], fitted(model_final)[89:108], col="red",lwd=2)
# Add the counterfactual for the control group
#segments(1, model_final$coef[1]+model_final$coef[2],
#60,model_final$coef[1]+model_final$coef[2]*54,
#lty=2,col='red',lwd=2)
# Add in a legend
legend(x=40, y=430, legend=c("Night Worked","On Call"), col=c("blue","red"),pch=20)
########################
# Model 2: Seasonality (summer) modelled as single variable
########################
# A preliminary OLS regression
model_ols<-lm(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer, data=data_ed)
summary(model_ols)
confint(model_ols)
influence(model_ols)
cooks.distance(model_ols)
################################
# Assessing Autocorrelation
################################
# Durbin-watson test to 12 lags
dwt(model_ols,max.lag=12,alternative="two.sided")
#pac suggestive of a prblem at lag 12
# Graph the residuals from the OLS regression to check for serially correlated errors
plot(data_ed$time[1:54],
residuals(model_ols)[1:54],
type='o',
pch=16,
xlab='Time',
ylab='OLS Residuals',
col="red")
abline(h=0,lty=2)
# Plot ACF and PACF
# Set plotting to two records on one page
par(mfrow=c(1,2))
# Produce Plots
acf(residuals(model_ols), lag.max = 25)
acf(residuals(model_ols),type='partial', lag.max = 25)
# Note decay in ACF, significant spike at 2 in PACF, initial model p=2
# Also a significant spike at 16 in PACF. Test in sensitivity analysis.
########################
# Run the final model
########################
# Fit the GLS regression model
model_p2 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer,
data=data_ed,
correlation=corARMA(p=2,form=~time|group),
method="ML")
summary(model_p2)
confint(model_p2)
vif(model_p2)
acf(residuals(model_p2), lag.max = 25)
acf(residuals(model_p2),type='partial', lag.max = 25)
outlierTest(model_ols) # Bonferonni p-value for most extreme obs
cutoff <- 4/((nrow(data_ed)-length(model_p2$coefficients)-2))
plot(model_p2, which=4, cook.levels=cutoff)
influencePlot(model_ols, id.method="identify", main="Influence Plot", sub="Circle size is proportial to Cook's Distance" )
########################
# Sensitivity
########################
# Likelihood-ratio tests
model_p2q1 <- update(model_p2,correlation=corARMA(p=2,q=1,form=~time|group))
anova(model_p2q1,model_p2)
summary(model_p2q1)
#non-significant. Substantive conclusions the same.
model_p12 <- update(model_p2,correlation=corARMA(p=12, form=~time|group))
anova(model_p12,model_p2)
summary(model_p12)
#non-significant. Substantive conclusions the same.
model_p16 <- update(model_p2,correlation=corARMA(p=16, form=~time|group))
anova(model_p16,model_p2)
summary(model_p16)
confint(model_p16)
#p = 16 has lower AIC - significant. Substantive conclusions the same.
#notes: p = 16 has smaller standard errors for interaction terms.
model_p12q1 <- update(model_p2,correlation=corARMA(p=12, q = 1, form=~time|group))
anova(model_p12q1,model_p2)
summary(model_p12q1)
#non-significant. Substantive conclusions the same.
model_p16q1 <- update(model_p2,correlation=corARMA(p=16, q=1, form=~time|group))
anova(model_p16q1,model_p16)
summary(model_p16q1)
confint(model_p16q1)
#p = 16 has lower AIC - significant. Substantive conclusions the same.
# Put plotting back to one chart
par(mfrow=c(1,1))
model_final <- model_p2
summary(model_final)
#psuedo R squared
R2 <- cor(data_ed$mean_total_time,predict(model_final))^2
R2
#psuedo R^2 = 0.67 for p = 2 and 0.66 for p = 16
# Residual plot
qqPlot(residuals(model_final))
influence(model_p2, do.coef = TRUE)
########################
# Plot regression results - average out seasonality
#########################
plot(data_ed$time[1:54],data_ed$mean_total_time[1:54],
ylim=c(0,300),
ylab="Total Time in Dept. (Mean Mins)",
xlab="Month",
pch=20,
col="lightblue",
xaxt="n")
# Add x-axis year labels
axis(1, at=1:54, labels=data_ed$x_labels[1:54])
# Label the policy change
abline(v=34.5,lty=2)
# Add in the points for the control
points(data_ed$time[55:108],data_ed$mean_total_time[55:108],
col="pink",
pch=20)
# Plot the first line segment for the intervention group
#lines(data_ed$time[1:34], fitted(model_final)[1:34], col="blue",lwd=2)
# Add the second line segment for the intervention group
#lines(data_ed$time[35:54], fitted(model_final)[35:54], col="blue",lwd=2)
#segments(35, model_final$coef[1] + model_final$coef[2]*35 + model_final$coef[3]+model_final$coef[4]*35 +
# model_final$coef[5] + model_final$coef[6],
# 54, model_final$coef[1] + model_final$coef[2]*54 + model_final$coef[3]+model_final$coef[4]*54 +
# model_final$coef[5] + model_final$coef[6]*20,
# lty=2,col='blue',lwd=2)
# Plot the first line segment for the control group
#lines(data_ed$time[55:88], fitted(model_final)[55:88], col="red",lwd=2)
# Add the second line segment for the control
#lines(data_ed$time[89:108], fitted(model_final)[89:108], col="red",lwd=2)
# Calculate the offset to average out seasonality in the visualisation
offset <- mean(data_ed$ed_summer) * model_final$coef[9]
offset
#Plot the first line for the intervention group (averaging out seasonality)
segments(1,
model_final$coef[1] + model_final$coef[2] + model_final$coef[3] + offset,
34,
model_final$coef[1] + (model_final$coef[2]*34) +
+ model_final$coef[3] + (model_final$coef[4]*34) + offset,
lty=1, lwd=2, col='blue')
#intervention group post period line (averaging out seasonality) CORRECT.
segments(35,
model_final$coef[1] + model_final$coef[3] +
(model_final$coef[2] + model_final$coef[4])*35 +
model_final$coef[5] + model_final$coef[6] +
model_final$coef[7] + model_final$coef[8] + offset,
54,
model_final$coef[1] + model_final$coef[3] +
(model_final$coef[2] + model_final$coef[4])*35 +
model_final$coef[5] + model_final$coef[7] +
(model_final$coef[6] + model_final$coef[8])*20 + offset,
lty=1, lwd=2, col='blue')
# Plot the first line segment for the control group (averaging out seasonality)
segments(1,
model_final$coef[1] + model_final$coef[2] + offset,
34,
model_final$coef[1] + model_final$coef[2]*34 + offset,
lty=1, lwd=2, col='red')
#Plot the second line for the control group (averaging out seasonality)
#correct 23/01/18
segments(35,
model_final$coef[1] + (model_final$coef[2]*35) +
model_final$coef[5] + model_final$coef[6] + offset,
54,
model_final$coef[1] + (model_final$coef[2]*35) +
model_final$coef[5] + (model_final$coef[6]*54) + offset,
lty=1, lwd=2, col='red')
#counterfactual for intervention 23/01/2018
segments(35,
model_final$coef[1] + (model_final$coef[2]*35) +
model_final$coef[5] + model_final$coef[6] + offset +
model_final$coef[3] + model_final$coef[4]*35,
54,
model_final$coef[1] + (model_final$coef[2]*35) +
model_final$coef[5] + (model_final$coef[6]*20) + offset+
model_final$coef[3] + model_final$coef[4]*54,
lty=2, lwd=2, col='blue')
# Add in a legend
legend(x=2, y=100, legend=c("Night (2200-0600)","Day"), col=c("blue","red"),pch=20)
##############################################
# Sensitivity to wild points - cookes distance > 4/n
##############################################
# significant - same substantive findings
model_p16 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer + wild,
data=data_ed,
correlation=corARMA(p=16,form=~time|group),
method="ML")
summary(model_p16)
confint(model_p16)
##############################################
# Sensitivity to interaction between group and seasonality
##############################################
model_p16 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer + summer_group,
data=data_ed,
correlation=corARMA(p=16, form=~time|group),
method="ML")
summary(model_p16)
confint(model_p16)
#conclusions = no substantive difference in interpretatation. Interaction term ns.
##############################################
# Sensitivity to number of patients per month.
##############################################
model_p16 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer + patients_n,
data=data_ed,
correlation=corARMA(p=16, form=~time|group),
method="ML")
summary(model_p16)
confint(model_p16)
#conclusions = no substantive difference in interpretatation. patients_n is ns.
##############################################
# Predict absolute and relative changes
##############################################
prediction <- function(model, time, time_after){
pred <- fitted(model)[time]
cfac <- model_final$coef[1] + model_final$coef[2]*time +
model_final$coef[3] + model_final$coef[4]*time +
model_final$coef[5] + model_final$coef[6]*time_after
abs <- pred - cfac
rel <- (pred - cfac) / cfac * 100
return(list(absolute=abs, relative=rel))
}
# Predicted value at 3 months after introducing consultants at night (month 37)
prediction(model_final, 37, 3)
# Predicted value at 6 months after introducing consultants at night (month 40)
prediction(model_final, 40, 6)
# Predicted value at 12 months after introducing consultants at night (month 46)
prediction(model_final, 46, 12)
# END
|
/ITS_ED_Control.R
|
permissive
|
LisaSharwood/ed_consultant_ITS
|
R
| false | false | 17,094 |
r
|
#####################################
#
# UHS ED Consultants Working overnight
# Interrupted Time Series Modelling Script
# Modelling - Total Time in Department (monthly average)
# Intervention group: night time 10pm to 6am
# Control group: day time 6am to 10pm
# Interruption occurs at time point 35 (November 2015)
# Total 54 observations.Unit of analysis = month.
#
# Pre = 34; Post = 20
# T.Monks
#####################################
####################################
# Modelling
# 1. Seasonality with 11 monthly dummies
# 2 Seasonality with single dummy variable for difference between summer/rest of year
# 3. Adjusting for wild points in Winter 2016/17
# 4. Interaction between seasonality and group
# 5. Adjusting for patients numbers per month
#####################################
###################################
# Dependencies
###################################
library(nlme)
library(car)
library(tseries)
library(ggplot2)
########################
# Read in the dataset
########################
#read in manually.
########################
# Descriptives: Plot the series - sample size for each point
########################
# Plot the time series for the nights where consultants worked in ED.
plot(data_ed$time[1:54],data_ed$patients_n[1:54],
ylab="Sample size (patients)",
ylim=c(0,4500),
xlab="Year-Month",
type="l",
col="blue",
xaxt="n")
# Add in control group flow into Lake Huron
points(data_ed$time[55:108],data_ed$patients_n[55:108],
type='l',
col="red")
# Add x-axis year labels
axis(1, at=1:54, labels=data_ed$x_labels[1:54])
# Add in the points for the figure
points(data_ed$time[1:54],data_ed$patients_n[1:54],
col="blue",
pch=20)
points(data_ed$time[55:108],data_ed$patients_n[55:108],
col="red",
pch=20)
# Label the weather change
abline(v=34.5,lty=2, lwd = 2)
# Add in a legend
legend(x=0.2, y=4500, legend=c("Night (Intervention)","Day (Control)"),
col=c("blue","red"),pch=20)
########################
# Plotting a smoothing line
########################
par(mfrow=c(1,1))
smoother = c(1/24,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/12,1/24)
trendpattern = filter (data_ed$mean_total_time[1:54], filter = smoother, sides=2)
trendpattern2 = filter (data_ed$mean_total_time[55:108], filter = smoother, sides=2)
##########
# 12 Month smoother to visualise difference.
#########
plot (data_ed$time[1:54], data_ed$mean_total_time[1:54],
type= "p",
ylab="Total Time in Department (mean mins)",
ylim=c(0,300),
xlab="Month",
col='lightblue',
pch=20,
xaxt="n")
# Add x-axis year labels
axis(1, at=1:54, labels=data_ed$x_labels[1:54])
points(data_ed$time[1:54],data_ed$mean_total_time[55:108],
type='p',
col="pink",
pch=20)
lines (trendpattern, col='blue', lwd = 3)
lines (trendpattern2, col='red', lwd = 3)
abline(v=34.5,lty=2)
legend(x=1, y=100, legend=c("Night (intervention)","Day (control)"),
col=c("blue","red"),pch=20)
########################
# Model 1: Simple OLS with seasonal dummies
########################
#check for unit root - all okay ADF(4) = -4.6 p < 0.01 (sig indicates stationarity)
adf.test(data_ed$mean_total_time)
# A preliminary OLS regression
model_ols<-lm(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend +
M1 +
M2 +
M3 +
M4 +
M5 +
M5 +
M6 +
M7 +
M8 +
M9 +
M10 +
M11 , data=data_ed)
summary(model_ols)
confint(model_ols)
influence(model_ols)
cooks.distance(model_ols)
################################
# Assessing Autocorrelation
################################
# Durbin-watson test to 12 lags
dwt(model_ols,max.lag=12,alternative="two.sided")
# Graph the residuals from the OLS regression to check for serially correlated errors
plot(data_ed$time[1:54],
residuals(model_ols)[1:54],
type='o',
pch=16,
xlab='Time',
ylab='OLS Residuals',
col="red")
abline(h=0,lty=2)
# Plot ACF and PACF
# Set plotting to two records on one page
par(mfrow=c(1,2))
# Produce Plots
acf(residuals(model_ols), lag.max = 50)
acf(residuals(model_ols),type='partial')
# Note decay in ACF, significant spike at 10 in PACF, model p=10
########################
# Run the final model
########################
# Fit the GLS regression model
model_p10 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend +
M1 +
M2 +
M3 +
M4 +
M5 +
M5 +
M6 +
M7 +
M8 +
M9 +
M10 +
M11 ,
data=data_ed,
correlation=corARMA(p=10,form=~time|group),
method="ML")
summary(model_p10)
confint(model_p10)
########################
# Sensitivity analysis
########################
#ACF exp decay to zero indicating AR
#PACF spikes at 2, 10 and 14 (just).
# Likelihood-ratio tests
model_p2<- update(model_p10,correlation=corARMA(p=2,form=~time|group))
anova(model_p2,model_p10)
summary(model_p2)
#AR(10) has lower AIC. Conclusions are the same.
model_p14 <- update(model_p10,correlation=corARMA(p=14, form=~time|group))
anova(model_p10,model_p14)
summary(model_p14)
#AR(14) has lower AIC (marginally). Substantive conclusions are the same.
# Put plotting back to one chart
par(mfrow=c(1,1))
model_final <- model_p10
# Residual plot
qqPlot(residuals(model_final))
influence(model_p16, do.coef = TRUE)
########################
# Plot results
#########################
# First plot the raw data points for the Nile
plot(data_ed$time[1:54],data_ed$mean_total_time[1:54],
ylim=c(0,450),
ylab="Total Time in Dept. (Mean Mins)",
xlab="Year/Mnth",
pch=20,
col="lightblue",
xaxt="n")
# Add x-axis year labels
axis(1, at=1:54, labels=data_ed$x_labels[1:54])
# Label the policy change
abline(v=34.5,lty=2)
# Add in the points for the control
points(data_ed$time[55:108],data_ed$mean_total_time[55:108],
col="pink",
pch=20)
# Plot the first line segment for the intervention group
lines(data_ed$time[1:34], fitted(model_final)[1:34], col="blue",lwd=2)
# Add the second line segment for the intervention group
lines(data_ed$time[35:54], fitted(model_final)[35:54], col="blue",lwd=2)
segments(35, model_final$coef[1] + model_final$coef[2]*35 + model_final$coef[3]+model_final$coef[4]*35 +
model_final$coef[5] + model_final$coef[6],
54, model_final$coef[1] + model_final$coef[2]*54 + model_final$coef[3]+model_final$coef[4]*54 +
model_final$coef[5] + model_final$coef[6]*20,
lty=2,col='blue',lwd=2)
# Plot the first line segment for the control group
lines(data_ed$time[55:88], fitted(model_final)[55:88], col="red",lwd=2)
# Add the second line segment for the control
lines(data_ed$time[89:108], fitted(model_final)[89:108], col="red",lwd=2)
# Add the counterfactual for the control group
#segments(1, model_final$coef[1]+model_final$coef[2],
#60,model_final$coef[1]+model_final$coef[2]*54,
#lty=2,col='red',lwd=2)
# Add in a legend
legend(x=40, y=430, legend=c("Night Worked","On Call"), col=c("blue","red"),pch=20)
########################
# Model 2: Seasonality (summer) modelled as single variable
########################
# A preliminary OLS regression
model_ols<-lm(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer, data=data_ed)
summary(model_ols)
confint(model_ols)
influence(model_ols)
cooks.distance(model_ols)
################################
# Assessing Autocorrelation
################################
# Durbin-watson test to 12 lags
dwt(model_ols,max.lag=12,alternative="two.sided")
#pac suggestive of a prblem at lag 12
# Graph the residuals from the OLS regression to check for serially correlated errors
plot(data_ed$time[1:54],
residuals(model_ols)[1:54],
type='o',
pch=16,
xlab='Time',
ylab='OLS Residuals',
col="red")
abline(h=0,lty=2)
# Plot ACF and PACF
# Set plotting to two records on one page
par(mfrow=c(1,2))
# Produce Plots
acf(residuals(model_ols), lag.max = 25)
acf(residuals(model_ols),type='partial', lag.max = 25)
# Note decay in ACF, significant spike at 2 in PACF, initial model p=2
# Also a significant spike at 16 in PACF. Test in sensitivity analysis.
########################
# Run the final model
########################
# Fit the GLS regression model
model_p2 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer,
data=data_ed,
correlation=corARMA(p=2,form=~time|group),
method="ML")
summary(model_p2)
confint(model_p2)
vif(model_p2)
acf(residuals(model_p2), lag.max = 25)
acf(residuals(model_p2),type='partial', lag.max = 25)
outlierTest(model_ols) # Bonferonni p-value for most extreme obs
cutoff <- 4/((nrow(data_ed)-length(model_p2$coefficients)-2))
plot(model_p2, which=4, cook.levels=cutoff)
influencePlot(model_ols, id.method="identify", main="Influence Plot", sub="Circle size is proportial to Cook's Distance" )
########################
# Sensitivity
########################
# Likelihood-ratio tests
model_p2q1 <- update(model_p2,correlation=corARMA(p=2,q=1,form=~time|group))
anova(model_p2q1,model_p2)
summary(model_p2q1)
#non-significant. Substantive conclusions the same.
model_p12 <- update(model_p2,correlation=corARMA(p=12, form=~time|group))
anova(model_p12,model_p2)
summary(model_p12)
#non-significant. Substantive conclusions the same.
model_p16 <- update(model_p2,correlation=corARMA(p=16, form=~time|group))
anova(model_p16,model_p2)
summary(model_p16)
confint(model_p16)
#p = 16 has lower AIC - significant. Substantive conclusions the same.
#notes: p = 16 has smaller standard errors for interaction terms.
model_p12q1 <- update(model_p2,correlation=corARMA(p=12, q = 1, form=~time|group))
anova(model_p12q1,model_p2)
summary(model_p12q1)
#non-significant. Substantive conclusions the same.
model_p16q1 <- update(model_p2,correlation=corARMA(p=16, q=1, form=~time|group))
anova(model_p16q1,model_p16)
summary(model_p16q1)
confint(model_p16q1)
#p = 16 has lower AIC - significant. Substantive conclusions the same.
# Put plotting back to one chart
par(mfrow=c(1,1))
model_final <- model_p2
summary(model_final)
#psuedo R squared
R2 <- cor(data_ed$mean_total_time,predict(model_final))^2
R2
#psuedo R^2 = 0.67 for p = 2 and 0.66 for p = 16
# Residual plot
qqPlot(residuals(model_final))
influence(model_p2, do.coef = TRUE)
########################
# Plot regression results - average out seasonality
#########################
plot(data_ed$time[1:54],data_ed$mean_total_time[1:54],
ylim=c(0,300),
ylab="Total Time in Dept. (Mean Mins)",
xlab="Month",
pch=20,
col="lightblue",
xaxt="n")
# Add x-axis year labels
axis(1, at=1:54, labels=data_ed$x_labels[1:54])
# Label the policy change
abline(v=34.5,lty=2)
# Add in the points for the control
points(data_ed$time[55:108],data_ed$mean_total_time[55:108],
col="pink",
pch=20)
# Plot the first line segment for the intervention group
#lines(data_ed$time[1:34], fitted(model_final)[1:34], col="blue",lwd=2)
# Add the second line segment for the intervention group
#lines(data_ed$time[35:54], fitted(model_final)[35:54], col="blue",lwd=2)
#segments(35, model_final$coef[1] + model_final$coef[2]*35 + model_final$coef[3]+model_final$coef[4]*35 +
# model_final$coef[5] + model_final$coef[6],
# 54, model_final$coef[1] + model_final$coef[2]*54 + model_final$coef[3]+model_final$coef[4]*54 +
# model_final$coef[5] + model_final$coef[6]*20,
# lty=2,col='blue',lwd=2)
# Plot the first line segment for the control group
#lines(data_ed$time[55:88], fitted(model_final)[55:88], col="red",lwd=2)
# Add the second line segment for the control
#lines(data_ed$time[89:108], fitted(model_final)[89:108], col="red",lwd=2)
# Calculate the offset to average out seasonality in the visualisation
offset <- mean(data_ed$ed_summer) * model_final$coef[9]
offset
#Plot the first line for the intervention group (averaging out seasonality)
segments(1,
model_final$coef[1] + model_final$coef[2] + model_final$coef[3] + offset,
34,
model_final$coef[1] + (model_final$coef[2]*34) +
+ model_final$coef[3] + (model_final$coef[4]*34) + offset,
lty=1, lwd=2, col='blue')
#intervention group post period line (averaging out seasonality) CORRECT.
segments(35,
model_final$coef[1] + model_final$coef[3] +
(model_final$coef[2] + model_final$coef[4])*35 +
model_final$coef[5] + model_final$coef[6] +
model_final$coef[7] + model_final$coef[8] + offset,
54,
model_final$coef[1] + model_final$coef[3] +
(model_final$coef[2] + model_final$coef[4])*35 +
model_final$coef[5] + model_final$coef[7] +
(model_final$coef[6] + model_final$coef[8])*20 + offset,
lty=1, lwd=2, col='blue')
# Plot the first line segment for the control group (averaging out seasonality)
segments(1,
model_final$coef[1] + model_final$coef[2] + offset,
34,
model_final$coef[1] + model_final$coef[2]*34 + offset,
lty=1, lwd=2, col='red')
#Plot the second line for the control group (averaging out seasonality)
#correct 23/01/18
segments(35,
model_final$coef[1] + (model_final$coef[2]*35) +
model_final$coef[5] + model_final$coef[6] + offset,
54,
model_final$coef[1] + (model_final$coef[2]*35) +
model_final$coef[5] + (model_final$coef[6]*54) + offset,
lty=1, lwd=2, col='red')
#counterfactual for intervention 23/01/2018
segments(35,
model_final$coef[1] + (model_final$coef[2]*35) +
model_final$coef[5] + model_final$coef[6] + offset +
model_final$coef[3] + model_final$coef[4]*35,
54,
model_final$coef[1] + (model_final$coef[2]*35) +
model_final$coef[5] + (model_final$coef[6]*20) + offset+
model_final$coef[3] + model_final$coef[4]*54,
lty=2, lwd=2, col='blue')
# Add in a legend
legend(x=2, y=100, legend=c("Night (2200-0600)","Day"), col=c("blue","red"),pch=20)
##############################################
# Sensitivity to wild points - cookes distance > 4/n
##############################################
# significant - same substantive findings
model_p16 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer + wild,
data=data_ed,
correlation=corARMA(p=16,form=~time|group),
method="ML")
summary(model_p16)
confint(model_p16)
##############################################
# Sensitivity to interaction between group and seasonality
##############################################
model_p16 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer + summer_group,
data=data_ed,
correlation=corARMA(p=16, form=~time|group),
method="ML")
summary(model_p16)
confint(model_p16)
#conclusions = no substantive difference in interpretatation. Interaction term ns.
##############################################
# Sensitivity to number of patients per month.
##############################################
model_p16 <- gls(mean_total_time ~ time + group + group_time + level + trend + group_level +
group_trend + ed_summer + patients_n,
data=data_ed,
correlation=corARMA(p=16, form=~time|group),
method="ML")
summary(model_p16)
confint(model_p16)
#conclusions = no substantive difference in interpretatation. patients_n is ns.
##############################################
# Predict absolute and relative changes
##############################################
prediction <- function(model, time, time_after){
pred <- fitted(model)[time]
cfac <- model_final$coef[1] + model_final$coef[2]*time +
model_final$coef[3] + model_final$coef[4]*time +
model_final$coef[5] + model_final$coef[6]*time_after
abs <- pred - cfac
rel <- (pred - cfac) / cfac * 100
return(list(absolute=abs, relative=rel))
}
# Predicted value at 3 months after introducing consultants at night (month 37)
prediction(model_final, 37, 3)
# Predicted value at 6 months after introducing consultants at night (month 40)
prediction(model_final, 40, 6)
# Predicted value at 12 months after introducing consultants at night (month 46)
prediction(model_final, 46, 12)
# END
|
library(cyjShiny)
library(later)
#----------------------------------------------------------------------------------------------------
styles <- c("",
"from Cytoscape desktop"="smallDemoStyle.json",
"generic style"="basicStyle.js")
#----------------------------------------------------------------------------------------------------
# create read json text for graph, two simulated experimental variables in data.frames, 3 conditions
#----------------------------------------------------------------------------------------------------
json.filename <- "smallDemo.cyjs"
graph.json <- paste(readLines(json.filename), collapse="")
#----------------------------------------------------------------------------------------------------
ui = shinyUI(fluidPage(
tags$head(
tags$style("#cyjShiny{height:95vh !important;}")),
sidebarLayout(
sidebarPanel(
selectInput("loadStyleFile", "Select Style: ", choices=styles),
selectInput("doLayout", "Select Layout:",
choices=c("",
"cose",
"cola",
"circle",
"concentric",
"breadthfirst",
"grid",
"random",
"dagre",
"cose-bilkent")),
actionButton("sfn", "Select First Neighbor"),
actionButton("fit", "Fit Graph"),
actionButton("fitSelected", "Fit Selected"),
actionButton("clearSelection", "Clear Selection"), HTML("<br>"),
#actionButton("loopConditions", "Loop Conditions"), HTML("<br>"),
actionButton("removeGraphButton", "Remove Graph"), HTML("<br>"),
actionButton("addRandomGraphFromDataFramesButton", "Add Random Graph"), HTML("<br>"),
actionButton("getSelectedNodes", "Get Selected Nodes"), HTML("<br><br>"),
htmlOutput("selectedNodesDisplay"),
width=3
),
mainPanel(cyjShinyOutput('cyjShiny'),
width=9
)
) # sidebarLayout
))
#----------------------------------------------------------------------------------------------------
server = function(input, output, session)
{
observeEvent(input$fit, ignoreInit=TRUE, {
fit(session, 80)
})
observeEvent(input$loadStyleFile, ignoreInit=TRUE, {
if(input$loadStyleFile != ""){
tryCatch({
loadStyleFile(input$loadStyleFile)
}, error=function(e) {
msg <- sprintf("ERROR in stylesheet file '%s': %s", input$loadStyleFile, e$message)
showNotification(msg, duration=NULL, type="error")
})
later(function() {updateSelectInput(session, "loadStyleFile", selected=character(0))}, 0.5)
}
})
observeEvent(input$doLayout, ignoreInit=TRUE,{
if(input$doLayout != ""){
strategy <- input$doLayout
doLayout(session, strategy)
later(function() {updateSelectInput(session, "doLayout", selected=character(0))}, 1)
}
})
observeEvent(input$selectName, ignoreInit=TRUE,{
selectNodes(session, input$selectName)
})
observeEvent(input$sfn, ignoreInit=TRUE,{
selectFirstNeighbors(session)
})
observeEvent(input$fitSelected, ignoreInit=TRUE,{
fitSelected(session, 100)
})
observeEvent(input$getSelectedNodes, ignoreInit=TRUE, {
output$selectedNodesDisplay <- renderText({" "})
getSelectedNodes(session)
})
observeEvent(input$clearSelection, ignoreInit=TRUE, {
clearSelection(session)
})
observeEvent(input$loopConditions, ignoreInit=TRUE, {
condition.names <- rownames(tbl.lfc)
for(condition.name in condition.names[-1]){
#browser()
lfc.vector <- as.numeric(tbl.lfc[condition.name,])
node.names <- rownames(tbl.lfc)
setNodeAttributes(session, attributeName="lfc", nodes=node.names, values=lfc.vector)
#updateSelectInput(session, "setNodeAttributes", selected=condition.name)
Sys.sleep(1)
} # for condition.name
updateSelectInput(session, "setNodeAttributes", selected="baseline")
})
observeEvent(input$removeGraphButton, ignoreInit=TRUE, {
removeGraph(session)
})
observeEvent(input$addRandomGraphFromDataFramesButton, ignoreInit=TRUE, {
source.nodes <- LETTERS[sample(1:5, 5)]
target.nodes <- LETTERS[sample(1:5, 5)]
tbl.edges <- data.frame(source=source.nodes,
target=target.nodes,
interaction=rep("generic", length(source.nodes)),
stringsAsFactors=FALSE)
all.nodes <- sort(unique(c(source.nodes, target.nodes, "orphan")))
tbl.nodes <- data.frame(id=all.nodes,
type=rep("unspecified", length(all.nodes)),
stringsAsFactors=FALSE)
addGraphFromDataFrame(session, tbl.edges, tbl.nodes)
})
observeEvent(input$selectedNodes, {
newNodes <- input$selectedNodes;
output$selectedNodesDisplay <- renderText({
paste(newNodes)
})
})
output$value <- renderPrint({ input$action })
output$cyjShiny <- renderCyjShiny({
cyjShiny(graph=graph.json, layoutName="cola")
})
} # server
#----------------------------------------------------------------------------------------------------
app <- shinyApp(ui = ui, server = server)
|
/inst/examples/fromCytoscapeDesktop/simple/app.R
|
permissive
|
mw201608/cyjShiny
|
R
| false | false | 5,707 |
r
|
library(cyjShiny)
library(later)
#----------------------------------------------------------------------------------------------------
styles <- c("",
"from Cytoscape desktop"="smallDemoStyle.json",
"generic style"="basicStyle.js")
#----------------------------------------------------------------------------------------------------
# create read json text for graph, two simulated experimental variables in data.frames, 3 conditions
#----------------------------------------------------------------------------------------------------
json.filename <- "smallDemo.cyjs"
graph.json <- paste(readLines(json.filename), collapse="")
#----------------------------------------------------------------------------------------------------
ui = shinyUI(fluidPage(
tags$head(
tags$style("#cyjShiny{height:95vh !important;}")),
sidebarLayout(
sidebarPanel(
selectInput("loadStyleFile", "Select Style: ", choices=styles),
selectInput("doLayout", "Select Layout:",
choices=c("",
"cose",
"cola",
"circle",
"concentric",
"breadthfirst",
"grid",
"random",
"dagre",
"cose-bilkent")),
actionButton("sfn", "Select First Neighbor"),
actionButton("fit", "Fit Graph"),
actionButton("fitSelected", "Fit Selected"),
actionButton("clearSelection", "Clear Selection"), HTML("<br>"),
#actionButton("loopConditions", "Loop Conditions"), HTML("<br>"),
actionButton("removeGraphButton", "Remove Graph"), HTML("<br>"),
actionButton("addRandomGraphFromDataFramesButton", "Add Random Graph"), HTML("<br>"),
actionButton("getSelectedNodes", "Get Selected Nodes"), HTML("<br><br>"),
htmlOutput("selectedNodesDisplay"),
width=3
),
mainPanel(cyjShinyOutput('cyjShiny'),
width=9
)
) # sidebarLayout
))
#----------------------------------------------------------------------------------------------------
server = function(input, output, session)
{
observeEvent(input$fit, ignoreInit=TRUE, {
fit(session, 80)
})
observeEvent(input$loadStyleFile, ignoreInit=TRUE, {
if(input$loadStyleFile != ""){
tryCatch({
loadStyleFile(input$loadStyleFile)
}, error=function(e) {
msg <- sprintf("ERROR in stylesheet file '%s': %s", input$loadStyleFile, e$message)
showNotification(msg, duration=NULL, type="error")
})
later(function() {updateSelectInput(session, "loadStyleFile", selected=character(0))}, 0.5)
}
})
observeEvent(input$doLayout, ignoreInit=TRUE,{
if(input$doLayout != ""){
strategy <- input$doLayout
doLayout(session, strategy)
later(function() {updateSelectInput(session, "doLayout", selected=character(0))}, 1)
}
})
observeEvent(input$selectName, ignoreInit=TRUE,{
selectNodes(session, input$selectName)
})
observeEvent(input$sfn, ignoreInit=TRUE,{
selectFirstNeighbors(session)
})
observeEvent(input$fitSelected, ignoreInit=TRUE,{
fitSelected(session, 100)
})
observeEvent(input$getSelectedNodes, ignoreInit=TRUE, {
output$selectedNodesDisplay <- renderText({" "})
getSelectedNodes(session)
})
observeEvent(input$clearSelection, ignoreInit=TRUE, {
clearSelection(session)
})
observeEvent(input$loopConditions, ignoreInit=TRUE, {
condition.names <- rownames(tbl.lfc)
for(condition.name in condition.names[-1]){
#browser()
lfc.vector <- as.numeric(tbl.lfc[condition.name,])
node.names <- rownames(tbl.lfc)
setNodeAttributes(session, attributeName="lfc", nodes=node.names, values=lfc.vector)
#updateSelectInput(session, "setNodeAttributes", selected=condition.name)
Sys.sleep(1)
} # for condition.name
updateSelectInput(session, "setNodeAttributes", selected="baseline")
})
observeEvent(input$removeGraphButton, ignoreInit=TRUE, {
removeGraph(session)
})
observeEvent(input$addRandomGraphFromDataFramesButton, ignoreInit=TRUE, {
source.nodes <- LETTERS[sample(1:5, 5)]
target.nodes <- LETTERS[sample(1:5, 5)]
tbl.edges <- data.frame(source=source.nodes,
target=target.nodes,
interaction=rep("generic", length(source.nodes)),
stringsAsFactors=FALSE)
all.nodes <- sort(unique(c(source.nodes, target.nodes, "orphan")))
tbl.nodes <- data.frame(id=all.nodes,
type=rep("unspecified", length(all.nodes)),
stringsAsFactors=FALSE)
addGraphFromDataFrame(session, tbl.edges, tbl.nodes)
})
observeEvent(input$selectedNodes, {
newNodes <- input$selectedNodes;
output$selectedNodesDisplay <- renderText({
paste(newNodes)
})
})
output$value <- renderPrint({ input$action })
output$cyjShiny <- renderCyjShiny({
cyjShiny(graph=graph.json, layoutName="cola")
})
} # server
#----------------------------------------------------------------------------------------------------
app <- shinyApp(ui = ui, server = server)
|
#' View a sequence of pitch chords
#'
#' Displays a chord sequence, where each chord is expressed as a
#' vector of MIDI note numbers.
#'
#' @param x
#' Chord sequence to display, expressed as a list of numeric vectors,
#' where each vector expresses a single chord as a list of MIDI
#' note numbers.
#'
#' @param annotate
#' If not \code{NULL}, a character vector of the same length as \code{x},
#' providing text to display under each chord. This text may not
#' contain whitespace.
#'
#' @param chords_per_line
#' (Integerish scalar)
#' Number of chords to display on each line.
#'
#' @inheritParams view_abc_string
#'
#' @export
view_pi_chord_seq <- function(
x,
annotate = NULL,
chords_per_line = 5,
staff_width = 75 * pmin(pmax(2, length(x)), 5),
play_midi = FALSE,
download_midi = FALSE
) {
html_from_pi_chord_seq(
x = x,
annotate = annotate,
chords_per_line = chords_per_line,
staff_width = staff_width,
play_midi = play_midi,
download_midi = download_midi
) %>%
view_html()
}
html_from_pi_chord_seq <- function(
x,
annotate = NULL,
chords_per_line = 5,
staff_width = 75 * pmin(pmax(2, length(x)), 5),
play_midi = FALSE,
download_midi = FALSE,
...
) {
checkmate::qassert(x, "l")
if (!is.null(annotate)) {
annotate <- as.character(annotate)
stopifnot(is.character(annotate),
length(annotate) == length(x))
}
if (any(grepl("\\s", annotate)))
stop("<annotate> strings may not contain whitespace")
y <- purrr::map(x, spell_pi_chord) %>% (dplyr::bind_rows)
if (!is.null(annotate)) y$annotate <- annotate
by_row <- split(y, floor(seq(from = 0, length.out = nrow(y)) / chords_per_line))
score <- purrr::map_chr(by_row, function(t) {
treble_str <- paste0("[V:1]", paste(t$treble, collapse = "|"), "|")
bass_str <- paste0("[V:2]", paste(t$bass, collapse = "|"), "|")
annotate_str <- if (!is.null(annotate))
paste(c("w:", t$annotate), collapse = " ")
paste(c(treble_str,
if (!is.null(treble_str)) annotate_str,
bass_str,
if (is.null(treble_str)) annotate_str
), collapse = "\n")
}) %>% paste(collapse = "\n%\n")
str <- sprintf("L:1\nV:1 treble\nV:2 bass\n%s", score)
html_from_abc_string(str,
play_midi = play_midi,
download_midi = download_midi,
staff_width = staff_width,
... = ...)
}
|
/R/pi-chord-seq.R
|
permissive
|
pmcharrison/abcR
|
R
| false | false | 2,449 |
r
|
#' View a sequence of pitch chords
#'
#' Displays a chord sequence, where each chord is expressed as a
#' vector of MIDI note numbers.
#'
#' @param x
#' Chord sequence to display, expressed as a list of numeric vectors,
#' where each vector expresses a single chord as a list of MIDI
#' note numbers.
#'
#' @param annotate
#' If not \code{NULL}, a character vector of the same length as \code{x},
#' providing text to display under each chord. This text may not
#' contain whitespace.
#'
#' @param chords_per_line
#' (Integerish scalar)
#' Number of chords to display on each line.
#'
#' @inheritParams view_abc_string
#'
#' @export
view_pi_chord_seq <- function(
x,
annotate = NULL,
chords_per_line = 5,
staff_width = 75 * pmin(pmax(2, length(x)), 5),
play_midi = FALSE,
download_midi = FALSE
) {
html_from_pi_chord_seq(
x = x,
annotate = annotate,
chords_per_line = chords_per_line,
staff_width = staff_width,
play_midi = play_midi,
download_midi = download_midi
) %>%
view_html()
}
html_from_pi_chord_seq <- function(
x,
annotate = NULL,
chords_per_line = 5,
staff_width = 75 * pmin(pmax(2, length(x)), 5),
play_midi = FALSE,
download_midi = FALSE,
...
) {
checkmate::qassert(x, "l")
if (!is.null(annotate)) {
annotate <- as.character(annotate)
stopifnot(is.character(annotate),
length(annotate) == length(x))
}
if (any(grepl("\\s", annotate)))
stop("<annotate> strings may not contain whitespace")
y <- purrr::map(x, spell_pi_chord) %>% (dplyr::bind_rows)
if (!is.null(annotate)) y$annotate <- annotate
by_row <- split(y, floor(seq(from = 0, length.out = nrow(y)) / chords_per_line))
score <- purrr::map_chr(by_row, function(t) {
treble_str <- paste0("[V:1]", paste(t$treble, collapse = "|"), "|")
bass_str <- paste0("[V:2]", paste(t$bass, collapse = "|"), "|")
annotate_str <- if (!is.null(annotate))
paste(c("w:", t$annotate), collapse = " ")
paste(c(treble_str,
if (!is.null(treble_str)) annotate_str,
bass_str,
if (is.null(treble_str)) annotate_str
), collapse = "\n")
}) %>% paste(collapse = "\n%\n")
str <- sprintf("L:1\nV:1 treble\nV:2 bass\n%s", score)
html_from_abc_string(str,
play_midi = play_midi,
download_midi = download_midi,
staff_width = staff_width,
... = ...)
}
|
# import the data set
dataset = read.delim("Restaurant_Reviews.tsv",quote = '',stringsAsFactors = FALSE)
# cleaning the text
library(tm)
library(SnowballC)
Corpus = VCorpus(VectorSource(dataset$Review))
Corpus = tm_map(Corpus,content_transformer(tolower))
Corpus = tm_map(Corpus,removeNumbers)
Corpus = tm_map(Corpus,removePunctuation)
Corpus = tm_map(Corpus,removeWords,stopwords())
Corpus = tm_map(Corpus,stemDocument)
Corpus = tm_map(Corpus, stripWhitespace)
|
/Reviews.R
|
no_license
|
lsrikar/textminig
|
R
| false | false | 475 |
r
|
# import the data set
dataset = read.delim("Restaurant_Reviews.tsv",quote = '',stringsAsFactors = FALSE)
# cleaning the text
library(tm)
library(SnowballC)
Corpus = VCorpus(VectorSource(dataset$Review))
Corpus = tm_map(Corpus,content_transformer(tolower))
Corpus = tm_map(Corpus,removeNumbers)
Corpus = tm_map(Corpus,removePunctuation)
Corpus = tm_map(Corpus,removeWords,stopwords())
Corpus = tm_map(Corpus,stemDocument)
Corpus = tm_map(Corpus, stripWhitespace)
|
rm(list=ls())
os <- .Platform$OS.type
if (os == "windows") {
j <- "J:/"
h <- "H:/"
} else {
j <- "FILEPATH"
h <- "USERNAME"
}
## QSUB
#slot_number <- 50
#fthreads <- 20
#m_mem_free <-
next_script <- paste0("FILEPATH")
##loop args
#cause_list <- c(614, 615, 616)
# cause_list <- c(616, 618)
cause_list <- c(618)
for (cause in cause_list){
job_name <- paste0("save_hemog_", cause)
system(paste( "qsub -P proj_custom_models -l m_mem_free=110G -l fthread=15 -q long.q -N ", job_name, "FILEPATH -s", next_script, cause))
}
|
/gbd_2019/cod_code/hemoglobinopathies/hemog_splits_cod/00_launch_savejobs_parallel.R
|
no_license
|
Nermin-Ghith/ihme-modeling
|
R
| false | false | 539 |
r
|
rm(list=ls())
os <- .Platform$OS.type
if (os == "windows") {
j <- "J:/"
h <- "H:/"
} else {
j <- "FILEPATH"
h <- "USERNAME"
}
## QSUB
#slot_number <- 50
#fthreads <- 20
#m_mem_free <-
next_script <- paste0("FILEPATH")
##loop args
#cause_list <- c(614, 615, 616)
# cause_list <- c(616, 618)
cause_list <- c(618)
for (cause in cause_list){
job_name <- paste0("save_hemog_", cause)
system(paste( "qsub -P proj_custom_models -l m_mem_free=110G -l fthread=15 -q long.q -N ", job_name, "FILEPATH -s", next_script, cause))
}
|
## name: overlap_fill.r
## date: 10/13/2017
## Here I fill NAs in the 1st file with the 2nd file
args <- commandArgs(trailingOnly = TRUE)
input1=as.matrix(read.delim(args[1],stringsAsFactors=F,check.names=F,row.names=1))
input2=as.matrix(read.delim(args[2],stringsAsFactors=F,check.names=F,row.names=1))
common_row=intersect(rownames(input1),rownames(input2))
common_col=intersect(colnames(input1),colnames(input2))
input1[common_row,common_col][is.na(input1[common_row,common_col])]=input2[common_row,common_col][is.na(input1[common_row,common_col])]
output=input1;output=cbind(rownames(output),output);colnames(output)[1]="proteinID"
write.table(output,file=paste0(args[1],".overlap_fill"),quote=F,sep="\t",row.names=F)
|
/function/overlap_fill.r
|
no_license
|
GuanLab/phosphoproteome_prediction
|
R
| false | false | 730 |
r
|
## name: overlap_fill.r
## date: 10/13/2017
## Here I fill NAs in the 1st file with the 2nd file
args <- commandArgs(trailingOnly = TRUE)
input1=as.matrix(read.delim(args[1],stringsAsFactors=F,check.names=F,row.names=1))
input2=as.matrix(read.delim(args[2],stringsAsFactors=F,check.names=F,row.names=1))
common_row=intersect(rownames(input1),rownames(input2))
common_col=intersect(colnames(input1),colnames(input2))
input1[common_row,common_col][is.na(input1[common_row,common_col])]=input2[common_row,common_col][is.na(input1[common_row,common_col])]
output=input1;output=cbind(rownames(output),output);colnames(output)[1]="proteinID"
write.table(output,file=paste0(args[1],".overlap_fill"),quote=F,sep="\t",row.names=F)
|
\name{contr.earth.response}
\alias{contr.earth.response}
\title{Please ignore}
\description{
Contrasts function for factors in the \code{\link{earth}} response.
For internal use by earth.
}
\usage{
contr.earth.response(x, base, contrasts)
}
\arguments{
\item{x}{
a factor
}
\item{base}{
unused
}
\item{contrasts}{
unused
}
}
\value{
Returns a diagonal matrix.
An example for a 3 level factor with levels \code{A}, \code{B}, and \code{C}:
\preformatted{
A B C
A 1 0 0
B 0 1 0
C 0 0 1
}
}
\note{
Earth uses this function internally.
You shouldn't need it.
It is made publicly available only because it seems that is necessary
for \code{model.matrix}.
}
\seealso{
\code{\link{contrasts}}
}
\keyword{models}
|
/man/contr.earth.response.Rd
|
no_license
|
cran/earth
|
R
| false | false | 748 |
rd
|
\name{contr.earth.response}
\alias{contr.earth.response}
\title{Please ignore}
\description{
Contrasts function for factors in the \code{\link{earth}} response.
For internal use by earth.
}
\usage{
contr.earth.response(x, base, contrasts)
}
\arguments{
\item{x}{
a factor
}
\item{base}{
unused
}
\item{contrasts}{
unused
}
}
\value{
Returns a diagonal matrix.
An example for a 3 level factor with levels \code{A}, \code{B}, and \code{C}:
\preformatted{
A B C
A 1 0 0
B 0 1 0
C 0 0 1
}
}
\note{
Earth uses this function internally.
You shouldn't need it.
It is made publicly available only because it seems that is necessary
for \code{model.matrix}.
}
\seealso{
\code{\link{contrasts}}
}
\keyword{models}
|
\name{updatemustar}
\alias{updatemustar}
\title{Function to update mean latent values for missingness model}
\description{
This function takes current parameters, gives updated mean latent values for missingness model
}
\usage{
updatemustar(mu, c, n, T0, D)
}
\arguments{
\item{mu}{current estimated mean matrix for CAL}
\item{c}{current c for missingness model}
\item{n}{number of patients}
\item{T0}{number of teeth}
\item{D}{the D matrix in the paper}
}
\value{
\code{updatemustar(mu, c, n, T0, D)} returns the updated mean latent values for missingness model.
}
\seealso{
\link{update_RJ} for a complete example for all functions in this package.
}
\author{Yuliang Li}
|
/BAREB/man/updatemustar.Rd
|
no_license
|
akhikolla/ClusterTests
|
R
| false | false | 689 |
rd
|
\name{updatemustar}
\alias{updatemustar}
\title{Function to update mean latent values for missingness model}
\description{
This function takes current parameters, gives updated mean latent values for missingness model
}
\usage{
updatemustar(mu, c, n, T0, D)
}
\arguments{
\item{mu}{current estimated mean matrix for CAL}
\item{c}{current c for missingness model}
\item{n}{number of patients}
\item{T0}{number of teeth}
\item{D}{the D matrix in the paper}
}
\value{
\code{updatemustar(mu, c, n, T0, D)} returns the updated mean latent values for missingness model.
}
\seealso{
\link{update_RJ} for a complete example for all functions in this package.
}
\author{Yuliang Li}
|
context("glance_foot")
mtcars2 <- mtcars
mtcars2 <-
labelVector::set_label(
mtcars2,
mpg = "Gas Mileage",
qsec = "Quarter Mile Time",
am = "Transmission",
wt = "Weight",
gear = "Gears"
)
fit <- lm(mpg ~ qsec + factor(am) * wt + factor(gear), data = mtcars2)
test_that("glance_foot by column",
{
expect_silent(glance_foot(fit, col_pairs = 2, total_cols = 6))
})
test_that("glance_foot by row",
{
expect_silent(glance_foot(fit, col_pairs = 2, total_cols = 6, byrow = TRUE))
})
test_that("glance_foot with subset of stats",
{
expect_silent(glance_foot(fit, col_pairs = 2, total_cols = 6, byrow = TRUE,
glance_stats = c("r.squared", "adj.r.squared",
"df", "AIC")))
})
test_that("glance_foot with invalid stats requested",
{
expect_warning(glance_foot(fit, col_pairs = 2, total_cols = 6, byrow = TRUE,
glance_stats = c("r.squared", "adj.r.squared",
"df", "AIC-xy")))
})
test_that("glance_foot with no valid stats requested",
{
expect_error(
expect_warning(
glance_foot(
fit,
col_pairs = 2,
total_cols = 6,
byrow = TRUE,
glance_stats = c("r.squared-x", "adj.r.squared-x",
"df-x", "AIC-xy")
)
)
)
})
test_that("glance_foot with too few total_cols",
{
expect_error(
expect_warning(
glance_foot(
fit,
col_pairs = 2,
total_cols = 3,
byrow = TRUE,
glance_stats = c("r.squared", "adj.r.squared",
"df", "AIC-xy")
)
)
)
})
|
/tests/testthat/test-glance_foot.R
|
no_license
|
cran/pixiedust
|
R
| false | false | 1,744 |
r
|
context("glance_foot")
mtcars2 <- mtcars
mtcars2 <-
labelVector::set_label(
mtcars2,
mpg = "Gas Mileage",
qsec = "Quarter Mile Time",
am = "Transmission",
wt = "Weight",
gear = "Gears"
)
fit <- lm(mpg ~ qsec + factor(am) * wt + factor(gear), data = mtcars2)
test_that("glance_foot by column",
{
expect_silent(glance_foot(fit, col_pairs = 2, total_cols = 6))
})
test_that("glance_foot by row",
{
expect_silent(glance_foot(fit, col_pairs = 2, total_cols = 6, byrow = TRUE))
})
test_that("glance_foot with subset of stats",
{
expect_silent(glance_foot(fit, col_pairs = 2, total_cols = 6, byrow = TRUE,
glance_stats = c("r.squared", "adj.r.squared",
"df", "AIC")))
})
test_that("glance_foot with invalid stats requested",
{
expect_warning(glance_foot(fit, col_pairs = 2, total_cols = 6, byrow = TRUE,
glance_stats = c("r.squared", "adj.r.squared",
"df", "AIC-xy")))
})
test_that("glance_foot with no valid stats requested",
{
expect_error(
expect_warning(
glance_foot(
fit,
col_pairs = 2,
total_cols = 6,
byrow = TRUE,
glance_stats = c("r.squared-x", "adj.r.squared-x",
"df-x", "AIC-xy")
)
)
)
})
test_that("glance_foot with too few total_cols",
{
expect_error(
expect_warning(
glance_foot(
fit,
col_pairs = 2,
total_cols = 3,
byrow = TRUE,
glance_stats = c("r.squared", "adj.r.squared",
"df", "AIC-xy")
)
)
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/complete_trip.R
\name{add_trip_cost}
\alias{add_trip_cost}
\title{Add column for previous purse amount and actual trip cost for trips with subtypes 2/3}
\usage{
add_trip_cost(tr_df)
}
\arguments{
\item{tr_df}{dataframe of transactions}
}
\description{
Add column for previous purse amount and actual trip cost for trips with subtypes 2/3
}
|
/man/add_trip_cost.Rd
|
no_license
|
ytse17/clpr
|
R
| false | true | 418 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/complete_trip.R
\name{add_trip_cost}
\alias{add_trip_cost}
\title{Add column for previous purse amount and actual trip cost for trips with subtypes 2/3}
\usage{
add_trip_cost(tr_df)
}
\arguments{
\item{tr_df}{dataframe of transactions}
}
\description{
Add column for previous purse amount and actual trip cost for trips with subtypes 2/3
}
|
## Import libraries
library(dplyr)
library(ggplot2)
library(grid)
library(gridExtra)
##Read the data file
life <- read.csv("LifeExpectancy.csv",header=TRUE)
## Rearrange the data as Country|1990|2013 for slopegraph. Filter the life expectancy data fo
## 1990 and 2013 into 2 temp variables and then extract the values into year1990 and year2013.
## All countries are extracted to variable group.
## Create a new data frame named 'a' with the 3 variables year1990,year2013, countries
## Set the variable years which will be used for x-axis as 23 (2013-1990)
tmp1990 <- life %>% filter(Year==1990)
tmp2013 <- life %>% filter(Year==2013)
year1990 <- tmp1990$Value
year2013 <- tmp2013$Value
group <- tmp2013$Country
years <- 23
a <- data.frame(year1990,year2013,group)
## Create the labels for the 2 ends of the slopegraph
## Left end (1990) will say COuntry,Value
## Right end(2013) will say value
## Round the values to get rid of the decimals
lab1990 <- paste(a$group, round(a$year1990),sep=",")
lab2013 <- paste(round(a$year2013), a$group, sep=",")
## Draw the initial plot
p <- ggplot(a) +
geom_segment(aes(x=0,xend=years,
y=year1990,yend=year2013),
size=0.5)+
ggtitle("Life Expectancy At Birth, 1990 & 2013")+theme(plot.title = element_text(face="bold",size=20,color="blue"))
## Set the theme background, grids, ticks, text and borders to blank
p<-p + theme(panel.background = element_blank())
p<-p + theme(panel.grid=element_blank())
p<-p + theme(axis.ticks=element_blank())
p<-p + theme(axis.text=element_blank())
p<-p + theme(panel.border=element_blank())
## Adding extra space around the graph to accomodate the labels
p <- p+ xlim(-8,(years+9))
p <- p+ylim(min(a$year2013,a$year1990),max(a$year2013,a$year1990)+7)
## Set the Y axis title
p<-p+xlab("")+ylab("Life Expectancy")+
theme(axis.title.y = element_text(size = 15, angle = 90))
## Set the labels on the slopegraph
## x variable is repeated with value as years (right end)
## hjust of 0, aligns text to left
## Size is the font size
p<-p+geom_text(label=lab2013,y=a$year2013,x=rep.int(years,length(a$year2013)),hjust=0, vjust=0.5,size=4)
p<-p+geom_text(label=lab1990,y=a$year1990,x=rep.int(0,length(a$year2013)),hjust=1, vjust=0.5,size=4)
## Set the label for the 2 ends of the slopegraph.
## Y value needs to be slightly greater than the max value of the data, to avoid overlap.
## This spacing needs to be adjusted in the ylim above
p<-p+geom_text(label="1990",x=0,y=(1.05*(max(a$year2013,a$year1990))),hjust=0.2,size=5)
p<-p+geom_text(label="2013",x=years,y=(1.05*(max(a$year2013,a$year1990))),hjust=0,size=5)
## Show the plot on screen
print(p)
## Add footnote
grid.newpage()
footnote <- "Data Source: UN Data, Life expectancy at birth, total (years)"
g <- arrangeGrob(p, bottom = textGrob(footnote, x = 0, hjust = -0.1, vjust=0.1, gp = gpar(fontface = "italic", fontsize = 12)))
grid.draw(g)
|
/Slopegraph.R
|
no_license
|
vidyakesavan/slopegraph-in-R
|
R
| false | false | 2,915 |
r
|
## Import libraries
library(dplyr)
library(ggplot2)
library(grid)
library(gridExtra)
##Read the data file
life <- read.csv("LifeExpectancy.csv",header=TRUE)
## Rearrange the data as Country|1990|2013 for slopegraph. Filter the life expectancy data fo
## 1990 and 2013 into 2 temp variables and then extract the values into year1990 and year2013.
## All countries are extracted to variable group.
## Create a new data frame named 'a' with the 3 variables year1990,year2013, countries
## Set the variable years which will be used for x-axis as 23 (2013-1990)
tmp1990 <- life %>% filter(Year==1990)
tmp2013 <- life %>% filter(Year==2013)
year1990 <- tmp1990$Value
year2013 <- tmp2013$Value
group <- tmp2013$Country
years <- 23
a <- data.frame(year1990,year2013,group)
## Create the labels for the 2 ends of the slopegraph
## Left end (1990) will say COuntry,Value
## Right end(2013) will say value
## Round the values to get rid of the decimals
lab1990 <- paste(a$group, round(a$year1990),sep=",")
lab2013 <- paste(round(a$year2013), a$group, sep=",")
## Draw the initial plot
p <- ggplot(a) +
geom_segment(aes(x=0,xend=years,
y=year1990,yend=year2013),
size=0.5)+
ggtitle("Life Expectancy At Birth, 1990 & 2013")+theme(plot.title = element_text(face="bold",size=20,color="blue"))
## Set the theme background, grids, ticks, text and borders to blank
p<-p + theme(panel.background = element_blank())
p<-p + theme(panel.grid=element_blank())
p<-p + theme(axis.ticks=element_blank())
p<-p + theme(axis.text=element_blank())
p<-p + theme(panel.border=element_blank())
## Adding extra space around the graph to accomodate the labels
p <- p+ xlim(-8,(years+9))
p <- p+ylim(min(a$year2013,a$year1990),max(a$year2013,a$year1990)+7)
## Set the Y axis title
p<-p+xlab("")+ylab("Life Expectancy")+
theme(axis.title.y = element_text(size = 15, angle = 90))
## Set the labels on the slopegraph
## x variable is repeated with value as years (right end)
## hjust of 0, aligns text to left
## Size is the font size
p<-p+geom_text(label=lab2013,y=a$year2013,x=rep.int(years,length(a$year2013)),hjust=0, vjust=0.5,size=4)
p<-p+geom_text(label=lab1990,y=a$year1990,x=rep.int(0,length(a$year2013)),hjust=1, vjust=0.5,size=4)
## Set the label for the 2 ends of the slopegraph.
## Y value needs to be slightly greater than the max value of the data, to avoid overlap.
## This spacing needs to be adjusted in the ylim above
p<-p+geom_text(label="1990",x=0,y=(1.05*(max(a$year2013,a$year1990))),hjust=0.2,size=5)
p<-p+geom_text(label="2013",x=years,y=(1.05*(max(a$year2013,a$year1990))),hjust=0,size=5)
## Show the plot on screen
print(p)
## Add footnote
grid.newpage()
footnote <- "Data Source: UN Data, Life expectancy at birth, total (years)"
g <- arrangeGrob(p, bottom = textGrob(footnote, x = 0, hjust = -0.1, vjust=0.1, gp = gpar(fontface = "italic", fontsize = 12)))
grid.draw(g)
|
class = c(rep("a", 10), rep("b", 3))
ref = c(rep("b", 4), rep("a", 9))
map = relabel_class(class, ref)
attr(map, "df") = NULL
test_that("test relabel", {
expect_equal(map, c("a" = "b", "b" = "a"))
})
adjusted = relabel_class(class, ref, return_map = FALSE)
test_that("test relabel", {
expect_equal(adjusted, unname(map[class]))
})
class = c(rep("a", 9), rep("b", 3), "c")
ref = c(rep("b", 4), rep("a", 9))
map = relabel_class(class, ref)
attr(map, "df") = NULL
test_that("test relabel", {
expect_equal(map, c("a" = "b", "b" = "a", "c" = "c"))
})
class = c(rep("a", 9), rep("b", 4))
ref = c(rep("b", 4), rep("a", 8), "c")
map = relabel_class(class, ref)
attr(map, "df") = NULL
test_that("test relabel", {
expect_equal(map, c("a" = "b", "b" = "a", "c" = "c"))
})
class = c(rep("a", 10), rep("b", 3))
ref = c(rep("b", 4), rep("a", 9))
map = relabel_class(class, ref, full_set = c("a", "b", "c"))
attr(map, "df") = NULL
test_that("test relabel", {
expect_equal(map, c("a" = "b", "b" = "a", "c" = "c"))
})
class = c(rep(1, 10), rep(2, 3))
ref = c(rep(2, 4), rep(1, 9))
map = relabel_class(class, ref)
adjusted = relabel_class(class, ref, return_map = FALSE)
test_that("test relabel", {
expect_equal(adjusted, unname(as.numeric(map[class])))
})
|
/tests/testthat/test_relabel.R
|
permissive
|
jokergoo/cola
|
R
| false | false | 1,260 |
r
|
class = c(rep("a", 10), rep("b", 3))
ref = c(rep("b", 4), rep("a", 9))
map = relabel_class(class, ref)
attr(map, "df") = NULL
test_that("test relabel", {
expect_equal(map, c("a" = "b", "b" = "a"))
})
adjusted = relabel_class(class, ref, return_map = FALSE)
test_that("test relabel", {
expect_equal(adjusted, unname(map[class]))
})
class = c(rep("a", 9), rep("b", 3), "c")
ref = c(rep("b", 4), rep("a", 9))
map = relabel_class(class, ref)
attr(map, "df") = NULL
test_that("test relabel", {
expect_equal(map, c("a" = "b", "b" = "a", "c" = "c"))
})
class = c(rep("a", 9), rep("b", 4))
ref = c(rep("b", 4), rep("a", 8), "c")
map = relabel_class(class, ref)
attr(map, "df") = NULL
test_that("test relabel", {
expect_equal(map, c("a" = "b", "b" = "a", "c" = "c"))
})
class = c(rep("a", 10), rep("b", 3))
ref = c(rep("b", 4), rep("a", 9))
map = relabel_class(class, ref, full_set = c("a", "b", "c"))
attr(map, "df") = NULL
test_that("test relabel", {
expect_equal(map, c("a" = "b", "b" = "a", "c" = "c"))
})
class = c(rep(1, 10), rep(2, 3))
ref = c(rep(2, 4), rep(1, 9))
map = relabel_class(class, ref)
adjusted = relabel_class(class, ref, return_map = FALSE)
test_that("test relabel", {
expect_equal(adjusted, unname(as.numeric(map[class])))
})
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/cognostics.R
\name{cogRange}
\alias{cogRange}
\title{Compute Range Cognostic}
\usage{
cogRange(x, desc = "range (max - min)", group = "common",
defLabel = FALSE, defActive = TRUE, filterable = TRUE)
}
\arguments{
\item{x}{numeric vector from which to compute the range}
\item{desc,group,defLabel,defActive,filterable}{arguments passed to \code{\link{cog}}}
}
\description{
Compute range to be used as cognostics in a trelliscope display.
}
\examples{
cogRange(rnorm(100))
}
\author{
Ryan Hafen
}
\seealso{
\code{\link{cog}}
}
|
/man/cogRange.Rd
|
permissive
|
jfgilman/trelliscope
|
R
| false | false | 616 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/cognostics.R
\name{cogRange}
\alias{cogRange}
\title{Compute Range Cognostic}
\usage{
cogRange(x, desc = "range (max - min)", group = "common",
defLabel = FALSE, defActive = TRUE, filterable = TRUE)
}
\arguments{
\item{x}{numeric vector from which to compute the range}
\item{desc,group,defLabel,defActive,filterable}{arguments passed to \code{\link{cog}}}
}
\description{
Compute range to be used as cognostics in a trelliscope display.
}
\examples{
cogRange(rnorm(100))
}
\author{
Ryan Hafen
}
\seealso{
\code{\link{cog}}
}
|
##' @title Maximum likelihood optimization
##' @param data The angular data to be used for inference
##' @param model A list made of \describe{
##'\item{likelihood}{The likelihood function, see \code{\link{dpairbeta}}
##' for a template}.
##' \item{npar}{The length of the parameter vector}
##' }
##' @param init NULL or a real vector of size \code{model$npar} giving the initial values for \code{link{par}}.
##' @param maxit maximum number of iterations to be performed by
##' function \code{optim}
##' @param method The method to be used by \code{optim}
##' @param hess logical: should an approximation of the hessian be performed ?
##' @param link the link function from the natural marginal parameter spaces to the real line.
##' @param unlink the inverse link function. If \code{x} is any real number, then \code{unlink(x)} should be in the admissible range for the likelihood function and the prior function.
##' @return The list returned by \code{optim} and the AIC and BIC criteria
##' @export
maxLikelihood <-
function(data, model,## = list(likelihood, npar),
init = NULL, maxit = 500, method="L-BFGS-B", hess = T, link, unlink)
## @param prior The prior function (see \code{\link{prior.pb}} for a template) for generating initial parameters in case the initial value results in non finite log-likelihood.
## @param Hpar the prior hyper parameters
{
p = dim(data)[2]
ndat=dim(data)[1]
npar <- model$npar
lhood <- function(vec)
{
-model$likelihood(x=data, par=unlink(vec),
log=TRUE, vectorial=FALSE)
}
if(is.null(init))
init <- rep(0,model$npar)
count <- 0
converged <- FALSE
while(!converged & count<20 )
{
count <- count+1
opt.test <- tryCatch(optim(init, lhood, method = method,
control =list(maxit = maxit,trace=0 ),
hessian = hess),
error=function(e){
# print(e)
return(list(convergence=100))
})
if(as.integer(opt.test$convergence)>0){
init <- rnorm(model$npar,mean=0,sd=1)
}
else{
converged <- TRUE
opt <- opt.test
}
}
rtn <- list()
if(as.integer(opt.test$convergence)>0){
rtn$message <- "optimisation failed"
rtn$counts <- count
rtn$convergence <- 100
rtn$linkedpar <- rep(0,npar)
rtn$par <- unlink(rep(0,npar))
rtn$value <- -Inf
rtn$aic <- rtn$aicc <- rtn$bic <- Inf
rtn$linkedHessian <- 1
return(rtn)
}
rtn$message <- opt$message
rtn$counts <- opt$counts
rtn$convergence <- opt$convergence
rtn$linkedpar <- opt$par
rtn$par <- unlink(opt$par)
rtn$value <- opt$value
rtn$aic <- 2* (opt$value + npar)
rtn$aicc <- 2* (opt$value + npar) +2*npar*(npar+1)/(ndat-npar-1 )
rtn$bic <- 2*opt$value + npar*log(ndat)
if(!hess)
{
rtn$linkedHessian <- 1
}
else
{
rtn$linkedHessian <- opt$hessian
tryCatch(expr={
asympt.variance <- chol2inv(chol(opt$hessian))
rtn$asympt.variance <- asympt.variance
rtn$linked.esterr <- sqrt(diag(asympt.variance))},
error=function(e){## rtn$asympt.variance <- NULL
## rtn$linked.esterr <- NULL
return(rtn)} )
}
return(rtn)
}
##?optim
|
/R/maxLikelihood.r
|
no_license
|
cran/BMAmevt
|
R
| false | false | 3,391 |
r
|
##' @title Maximum likelihood optimization
##' @param data The angular data to be used for inference
##' @param model A list made of \describe{
##'\item{likelihood}{The likelihood function, see \code{\link{dpairbeta}}
##' for a template}.
##' \item{npar}{The length of the parameter vector}
##' }
##' @param init NULL or a real vector of size \code{model$npar} giving the initial values for \code{link{par}}.
##' @param maxit maximum number of iterations to be performed by
##' function \code{optim}
##' @param method The method to be used by \code{optim}
##' @param hess logical: should an approximation of the hessian be performed ?
##' @param link the link function from the natural marginal parameter spaces to the real line.
##' @param unlink the inverse link function. If \code{x} is any real number, then \code{unlink(x)} should be in the admissible range for the likelihood function and the prior function.
##' @return The list returned by \code{optim} and the AIC and BIC criteria
##' @export
maxLikelihood <-
function(data, model,## = list(likelihood, npar),
init = NULL, maxit = 500, method="L-BFGS-B", hess = T, link, unlink)
## @param prior The prior function (see \code{\link{prior.pb}} for a template) for generating initial parameters in case the initial value results in non finite log-likelihood.
## @param Hpar the prior hyper parameters
{
p = dim(data)[2]
ndat=dim(data)[1]
npar <- model$npar
lhood <- function(vec)
{
-model$likelihood(x=data, par=unlink(vec),
log=TRUE, vectorial=FALSE)
}
if(is.null(init))
init <- rep(0,model$npar)
count <- 0
converged <- FALSE
while(!converged & count<20 )
{
count <- count+1
opt.test <- tryCatch(optim(init, lhood, method = method,
control =list(maxit = maxit,trace=0 ),
hessian = hess),
error=function(e){
# print(e)
return(list(convergence=100))
})
if(as.integer(opt.test$convergence)>0){
init <- rnorm(model$npar,mean=0,sd=1)
}
else{
converged <- TRUE
opt <- opt.test
}
}
rtn <- list()
if(as.integer(opt.test$convergence)>0){
rtn$message <- "optimisation failed"
rtn$counts <- count
rtn$convergence <- 100
rtn$linkedpar <- rep(0,npar)
rtn$par <- unlink(rep(0,npar))
rtn$value <- -Inf
rtn$aic <- rtn$aicc <- rtn$bic <- Inf
rtn$linkedHessian <- 1
return(rtn)
}
rtn$message <- opt$message
rtn$counts <- opt$counts
rtn$convergence <- opt$convergence
rtn$linkedpar <- opt$par
rtn$par <- unlink(opt$par)
rtn$value <- opt$value
rtn$aic <- 2* (opt$value + npar)
rtn$aicc <- 2* (opt$value + npar) +2*npar*(npar+1)/(ndat-npar-1 )
rtn$bic <- 2*opt$value + npar*log(ndat)
if(!hess)
{
rtn$linkedHessian <- 1
}
else
{
rtn$linkedHessian <- opt$hessian
tryCatch(expr={
asympt.variance <- chol2inv(chol(opt$hessian))
rtn$asympt.variance <- asympt.variance
rtn$linked.esterr <- sqrt(diag(asympt.variance))},
error=function(e){## rtn$asympt.variance <- NULL
## rtn$linked.esterr <- NULL
return(rtn)} )
}
return(rtn)
}
##?optim
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function ---
## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method to get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
/cachematrix.R
|
no_license
|
aromej/ProgrammingAssignment2
|
R
| false | false | 1,737 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function ---
## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method to get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
# Lookup the runif() function. Create a 3x4 matrix with 12 random numbers generated using the runif() function; have the matrix be filled our row-by-row, instead of column-by-column.
# Name the columns of the matrix uno, dos, tres, cuatro, and the rows x, y, z.
# Scale the matrix by 10 and save the result.
# Extract a 2x4 matrix from it and save the result.
# Subtract the smaller matrix from the larger one. Can you do that? Why?
# Extract a 3x3 matrix from the original matrix and save the result. Try the subtraction again. Can you do that? Why?
# Extract the column called "uno" as a vector from the original matrix and save the result. Try the subtraction again. Can you do that? Why?
# Lookup the rnorm() function. Create a new 3x4 matrix with 12 random values generated using the rnorm() function.
# Perform matrix multiplication (using the * sign). Can you do that? How is the operation carried out?
# Perform inner matrix multiplication with the two matrixes. Can you do that? Why? Can you think of something to do to make this possible?
|
/5.6 Exercise.R
|
permissive
|
PacktPublishing/Programming-for-Statistics-and-Data-Science
|
R
| false | false | 1,081 |
r
|
# Lookup the runif() function. Create a 3x4 matrix with 12 random numbers generated using the runif() function; have the matrix be filled our row-by-row, instead of column-by-column.
# Name the columns of the matrix uno, dos, tres, cuatro, and the rows x, y, z.
# Scale the matrix by 10 and save the result.
# Extract a 2x4 matrix from it and save the result.
# Subtract the smaller matrix from the larger one. Can you do that? Why?
# Extract a 3x3 matrix from the original matrix and save the result. Try the subtraction again. Can you do that? Why?
# Extract the column called "uno" as a vector from the original matrix and save the result. Try the subtraction again. Can you do that? Why?
# Lookup the rnorm() function. Create a new 3x4 matrix with 12 random values generated using the rnorm() function.
# Perform matrix multiplication (using the * sign). Can you do that? How is the operation carried out?
# Perform inner matrix multiplication with the two matrixes. Can you do that? Why? Can you think of something to do to make this possible?
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SellAction.R
\name{SellAction}
\alias{SellAction}
\title{SellAction}
\usage{
SellAction(id = NULL, target = NULL, startTime = NULL, result = NULL,
participant = NULL, object = NULL, location = NULL, instrument = NULL,
error = NULL, endTime = NULL, agent = NULL, actionStatus = NULL,
warrantyPromise = NULL, buyer = NULL, url = NULL, sameAs = NULL,
potentialAction = NULL, name = NULL, mainEntityOfPage = NULL,
image = NULL, identifier = NULL, disambiguatingDescription = NULL,
description = NULL, alternateName = NULL, additionalType = NULL,
priceSpecification = NULL, price = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{target}{(EntryPoint type.) Indicates a target EntryPoint for an Action.}
\item{startTime}{(DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{result}{(Thing type.) The result produced in the action. e.g. John wrote *a book*.}
\item{participant}{(Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.}
\item{object}{(Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.}
\item{location}{(Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.}
\item{instrument}{(Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.}
\item{error}{(Thing type.) For failed actions, more information on the cause of the failure.}
\item{endTime}{(DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{agent}{(Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.}
\item{actionStatus}{(ActionStatusType type.) Indicates the current disposition of the Action.}
\item{warrantyPromise}{(WarrantyPromise or WarrantyPromise type.) The warranty promise(s) included in the offer.}
\item{buyer}{(Person type.) A sub property of participant. The participant/person/organization that bought the object.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
\item{priceSpecification}{(PriceSpecification or PriceSpecification or PriceSpecification type.) One or more detailed price specifications, indicating the unit price and delivery or payment charges.}
\item{price}{(Text or Number or Text or Number or Text or Number type.) The offer price of a product, or of a price component when attached to PriceSpecification and its subtypes.Usage guidelines:* Use the [[priceCurrency]] property (with standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR") instead of including [ambiguous symbols](http://en.wikipedia.org/wiki/Dollar_sign#Currencies_that_use_the_dollar_or_peso_sign) such as '$' in the value.* Use '.' (Unicode 'FULL STOP' (U+002E)) rather than ',' to indicate a decimal point. Avoid using these symbols as a readability separator.* Note that both [RDFa](http://www.w3.org/TR/xhtml-rdfa-primer/#using-the-content-attribute) and Microdata syntax allow the use of a "content=" attribute for publishing simple machine-readable values alongside more human-friendly formatting.* Use values from 0123456789 (Unicode 'DIGIT ZERO' (U+0030) to 'DIGIT NINE' (U+0039)) rather than superficially similiar Unicode symbols.}
}
\value{
a list object corresponding to a schema:SellAction
}
\description{
The act of taking money from a buyer in exchange for goods or services rendered. An agent sells an object, product, or service to a buyer for a price. Reciprocal of BuyAction.
}
|
/man/SellAction.Rd
|
no_license
|
cboettig/schemar
|
R
| false | true | 6,931 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SellAction.R
\name{SellAction}
\alias{SellAction}
\title{SellAction}
\usage{
SellAction(id = NULL, target = NULL, startTime = NULL, result = NULL,
participant = NULL, object = NULL, location = NULL, instrument = NULL,
error = NULL, endTime = NULL, agent = NULL, actionStatus = NULL,
warrantyPromise = NULL, buyer = NULL, url = NULL, sameAs = NULL,
potentialAction = NULL, name = NULL, mainEntityOfPage = NULL,
image = NULL, identifier = NULL, disambiguatingDescription = NULL,
description = NULL, alternateName = NULL, additionalType = NULL,
priceSpecification = NULL, price = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{target}{(EntryPoint type.) Indicates a target EntryPoint for an Action.}
\item{startTime}{(DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{result}{(Thing type.) The result produced in the action. e.g. John wrote *a book*.}
\item{participant}{(Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.}
\item{object}{(Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.}
\item{location}{(Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.}
\item{instrument}{(Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.}
\item{error}{(Thing type.) For failed actions, more information on the cause of the failure.}
\item{endTime}{(DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{agent}{(Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.}
\item{actionStatus}{(ActionStatusType type.) Indicates the current disposition of the Action.}
\item{warrantyPromise}{(WarrantyPromise or WarrantyPromise type.) The warranty promise(s) included in the offer.}
\item{buyer}{(Person type.) A sub property of participant. The participant/person/organization that bought the object.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
\item{priceSpecification}{(PriceSpecification or PriceSpecification or PriceSpecification type.) One or more detailed price specifications, indicating the unit price and delivery or payment charges.}
\item{price}{(Text or Number or Text or Number or Text or Number type.) The offer price of a product, or of a price component when attached to PriceSpecification and its subtypes.Usage guidelines:* Use the [[priceCurrency]] property (with standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR") instead of including [ambiguous symbols](http://en.wikipedia.org/wiki/Dollar_sign#Currencies_that_use_the_dollar_or_peso_sign) such as '$' in the value.* Use '.' (Unicode 'FULL STOP' (U+002E)) rather than ',' to indicate a decimal point. Avoid using these symbols as a readability separator.* Note that both [RDFa](http://www.w3.org/TR/xhtml-rdfa-primer/#using-the-content-attribute) and Microdata syntax allow the use of a "content=" attribute for publishing simple machine-readable values alongside more human-friendly formatting.* Use values from 0123456789 (Unicode 'DIGIT ZERO' (U+0030) to 'DIGIT NINE' (U+0039)) rather than superficially similiar Unicode symbols.}
}
\value{
a list object corresponding to a schema:SellAction
}
\description{
The act of taking money from a buyer in exchange for goods or services rendered. An agent sells an object, product, or service to a buyer for a price. Reciprocal of BuyAction.
}
|
## Create a special Matrix and compute the inverse of the Matrix. If the inverse is already computed, fetch the cached inverse.
## Function to create a special Matrix, that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Computes the inverse of the special Matrix, assuming it is invertible. If already calculated (for the same Matrix) it fetches the cached inverse.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
Varun-Dua/ProgrammingAssignment2
|
R
| false | false | 854 |
r
|
## Create a special Matrix and compute the inverse of the Matrix. If the inverse is already computed, fetch the cached inverse.
## Function to create a special Matrix, that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Computes the inverse of the special Matrix, assuming it is invertible. If already calculated (for the same Matrix) it fetches the cached inverse.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeling.R
\name{estimateSizeFactorsForSparseMatrix}
\alias{estimateSizeFactorsForSparseMatrix}
\title{Estimate size factors for each column, given a sparseMatrix from the Matrix package}
\usage{
estimateSizeFactorsForSparseMatrix(
counts,
locfunc = median,
round_exprs = TRUE,
method = "mean-geometric-mean-total"
)
}
\arguments{
\item{counts}{The matrix for the gene expression data, either read counts or FPKM values or transcript counts}
\item{locfunc}{The location function used to find the representive value}
\item{round_exprs}{A logic flag to determine whether or not the expression value should be rounded}
\item{method}{A character to specify the size factor calculation appraoches. It can be either "mean-geometric-mean-total" (default),
"weighted-median", "median-geometric-mean", "median", "mode", "geometric-mean-total".}
}
\description{
Estimate size factors for each column, given a sparseMatrix from the Matrix package
}
|
/man/estimateSizeFactorsForSparseMatrix.Rd
|
no_license
|
zhanglhbioinfor/DIRECT-NET
|
R
| false | true | 1,027 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeling.R
\name{estimateSizeFactorsForSparseMatrix}
\alias{estimateSizeFactorsForSparseMatrix}
\title{Estimate size factors for each column, given a sparseMatrix from the Matrix package}
\usage{
estimateSizeFactorsForSparseMatrix(
counts,
locfunc = median,
round_exprs = TRUE,
method = "mean-geometric-mean-total"
)
}
\arguments{
\item{counts}{The matrix for the gene expression data, either read counts or FPKM values or transcript counts}
\item{locfunc}{The location function used to find the representive value}
\item{round_exprs}{A logic flag to determine whether or not the expression value should be rounded}
\item{method}{A character to specify the size factor calculation appraoches. It can be either "mean-geometric-mean-total" (default),
"weighted-median", "median-geometric-mean", "median", "mode", "geometric-mean-total".}
}
\description{
Estimate size factors for each column, given a sparseMatrix from the Matrix package
}
|
#' Calculate trackloss and remove trials and subjects with excessive trackloss
#'
#' @param gaze fixation list from fixation report
#' @param screen_size size of the screen in pixels. Defaults to c(1024, 768)
#' @param missingthresh threshold to throw out trials and subjs
#' @export
#' @return data frame containing gaze df with trackloss by subjs and trial. Trials and Subjs with excessive trackloss removed
#'
#'
get_trackloss <- function(gaze, screen_size=c(1024, 768),missingthresh=.2){
#measure calibration goodness by computing:
# proportion of off-screen fixations
g2 <- data.frame(gaze, screen_width=screen_size[1], screen_height=screen_size[2])
#how many out of bounds samples by sub
oob_prop_subj <- g2 %>% dplyr::group_by(subject) %>%
dplyr::summarise(oob_prop_sub = sum(time[(x > screen_width | x < 0 |
y > screen_height | y < 0)]) /
sum(time))
#how many out of bounds samples by items
oob_prop_trial <- g2 %>% dplyr::group_by(trial) %>%
dplyr::summarise(oob_prop_tri = sum(time[(x > screen_width | x < 0 |
y > screen_height | y < 0)]) /
sum(time))
greaterthan <- dplyr::filter(oob_prop_trial, oob_prop_trial$oob_prop_tri > missingthresh)
prop <- length(greaterthan$trial)/length(oob_prop_trial$trial)
# % trials excluded
message("% trials excluded:", round(prop, digits=3))
message("Participants taken out:" ,oob_prop_subj$subject[oob_prop_subj$oob_prop_sub > missingthresh])
oob_trial=merge(g2, oob_prop_trial, by=c("trial"))
oob_subject=merge(oob_trial, oob_prop_subj ,by=c("subject"))
combinetrial_above_threshold <- dplyr::filter(oob_subject, (oob_prop_sub < missingthresh), (oob_prop_tri < missingthresh)) %>%
dplyr::select(-screen_width, -screen_height)
return(combinetrial_above_threshold)
}
|
/R/get_trackloss.R
|
no_license
|
dmirman/gazer
|
R
| false | false | 1,926 |
r
|
#' Calculate trackloss and remove trials and subjects with excessive trackloss
#'
#' @param gaze fixation list from fixation report
#' @param screen_size size of the screen in pixels. Defaults to c(1024, 768)
#' @param missingthresh threshold to throw out trials and subjs
#' @export
#' @return data frame containing gaze df with trackloss by subjs and trial. Trials and Subjs with excessive trackloss removed
#'
#'
get_trackloss <- function(gaze, screen_size=c(1024, 768),missingthresh=.2){
#measure calibration goodness by computing:
# proportion of off-screen fixations
g2 <- data.frame(gaze, screen_width=screen_size[1], screen_height=screen_size[2])
#how many out of bounds samples by sub
oob_prop_subj <- g2 %>% dplyr::group_by(subject) %>%
dplyr::summarise(oob_prop_sub = sum(time[(x > screen_width | x < 0 |
y > screen_height | y < 0)]) /
sum(time))
#how many out of bounds samples by items
oob_prop_trial <- g2 %>% dplyr::group_by(trial) %>%
dplyr::summarise(oob_prop_tri = sum(time[(x > screen_width | x < 0 |
y > screen_height | y < 0)]) /
sum(time))
greaterthan <- dplyr::filter(oob_prop_trial, oob_prop_trial$oob_prop_tri > missingthresh)
prop <- length(greaterthan$trial)/length(oob_prop_trial$trial)
# % trials excluded
message("% trials excluded:", round(prop, digits=3))
message("Participants taken out:" ,oob_prop_subj$subject[oob_prop_subj$oob_prop_sub > missingthresh])
oob_trial=merge(g2, oob_prop_trial, by=c("trial"))
oob_subject=merge(oob_trial, oob_prop_subj ,by=c("subject"))
combinetrial_above_threshold <- dplyr::filter(oob_subject, (oob_prop_sub < missingthresh), (oob_prop_tri < missingthresh)) %>%
dplyr::select(-screen_width, -screen_height)
return(combinetrial_above_threshold)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc_and_utility.R
\name{umx_string_to_algebra}
\alias{umx_string_to_algebra}
\title{Convert a string to an OpenMx algebra}
\usage{
umx_string_to_algebra(algString, name = NA, dimnames = NA)
}
\arguments{
\item{algString}{a string to turn into an algebra}
\item{name}{of the returned algebra}
\item{dimnames}{of the returned algebra}
}
\value{
\itemize{
\item \code{\link[=mxAlgebra]{mxAlgebra()}}
}
}
\description{
This is useful use to quickly and easily insert values from R variables into the string (using paste() and rep() etc.),
then parse the string as an mxAlgebra argument.
}
\details{
A use case is including a matrix exponent (that is A \%\emph{\% A \%}\% A \%*\% A...) with a variable exponent.
}
\examples{
\dontrun{
alg = umx_string_to_algebra(paste(rep("A", nReps), collapse = " \%*\% "), name = "test_case")
}
}
\references{
\itemize{
\item \url{https://www.github.com/tbates/umx}
}
}
\seealso{
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umxTwinMaker}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx}},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_Continuous}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_extract_column}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_TwinSuperModel}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_twin_add_WeightMatrices}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
|
/man/umx_string_to_algebra.Rd
|
no_license
|
khusmann/umx
|
R
| false | true | 3,564 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc_and_utility.R
\name{umx_string_to_algebra}
\alias{umx_string_to_algebra}
\title{Convert a string to an OpenMx algebra}
\usage{
umx_string_to_algebra(algString, name = NA, dimnames = NA)
}
\arguments{
\item{algString}{a string to turn into an algebra}
\item{name}{of the returned algebra}
\item{dimnames}{of the returned algebra}
}
\value{
\itemize{
\item \code{\link[=mxAlgebra]{mxAlgebra()}}
}
}
\description{
This is useful use to quickly and easily insert values from R variables into the string (using paste() and rep() etc.),
then parse the string as an mxAlgebra argument.
}
\details{
A use case is including a matrix exponent (that is A \%\emph{\% A \%}\% A \%*\% A...) with a variable exponent.
}
\examples{
\dontrun{
alg = umx_string_to_algebra(paste(rep("A", nReps), collapse = " \%*\% "), name = "test_case")
}
}
\references{
\itemize{
\item \url{https://www.github.com/tbates/umx}
}
}
\seealso{
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umxTwinMaker}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx}},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_Continuous}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_extract_column}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_TwinSuperModel}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_twin_add_WeightMatrices}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
|
# @file CohortMethod.R
#
# Copyright 2014 Observational Health Data Sciences and Informatics
#
# This file is part of CohortMethod
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author Observational Health Data Sciences and Informatics
# @author Patrick Ryan
# @author Marc Suchard
# @author Martijn Schuemie
#' CohortMethod
#'
#' @docType package
#' @name CohortMethod
#' @importFrom Rcpp evalCpp
#' @importFrom SqlRender loadRenderTranslateSql translateSql
#' @importFrom RJDBC dbDisconnect
#' @importFrom survival survfit Surv
#' @import Cyclops
#' @import DatabaseConnector
#' @useDynLib CohortMethod
NULL
#' A simulation profile
#' @docType data
#' @keywords datasets
#' @name cohortMethodDataSimulationProfile
#' @usage
#' data(cohortMethodDataSimulationProfile)
NULL
#' Propensity scores for the vignette
#' @docType data
#' @keywords datasets
#' @name vignettePs
#' @usage
#' data(vignettePs)
NULL
#' Balance data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteBalance
#' @usage
#' data(vignetteBalance)
NULL
#' Outcome data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteOutcomeModel1
#' @usage
#' data(vignetteOutcomeModel1)
NULL
#' Outcome data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteOutcomeModel2
#' @usage
#' data(vignetteOutcomeModel2)
NULL
#' Outcome data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteOutcomeModel3
#' @usage
#' data(vignetteOutcomeModel3)
NULL
#' Analysis summary data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteAnalysisSummary
#' @usage
#' data(vignetteAnalysisSummary)
NULL
.onLoad <- function(libname, pkgname) {
missing(libname) # suppresses R CMD check note
missing(pkgname) # suppresses R CMD check note
# Copied this from the ff package:
if (is.null(getOption("ffmaxbytes"))) {
# memory.limit is windows specific
if (.Platform$OS.type == "windows") {
if (getRversion() >= "2.6.0")
options(ffmaxbytes = 0.5 * utils::memory.limit() * (1024^2)) else options(ffmaxbytes = 0.5 * utils::memory.limit())
} else {
# some magic constant
options(ffmaxbytes = 0.5 * 1024^3)
}
}
# Workaround for problem with ff on machines with lots of memory (see
# https://github.com/edwindj/ffbase/issues/37)
options(ffmaxbytes = min(getOption("ffmaxbytes"), .Machine$integer.max * 12))
}
#' @keywords internal
runCohortMethod <- function() {
# todo: implement function that will call all other functions needed to run a cohort method study
}
|
/R/CohortMethod.R
|
permissive
|
tdbennett/CohortMethod
|
R
| false | false | 3,078 |
r
|
# @file CohortMethod.R
#
# Copyright 2014 Observational Health Data Sciences and Informatics
#
# This file is part of CohortMethod
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author Observational Health Data Sciences and Informatics
# @author Patrick Ryan
# @author Marc Suchard
# @author Martijn Schuemie
#' CohortMethod
#'
#' @docType package
#' @name CohortMethod
#' @importFrom Rcpp evalCpp
#' @importFrom SqlRender loadRenderTranslateSql translateSql
#' @importFrom RJDBC dbDisconnect
#' @importFrom survival survfit Surv
#' @import Cyclops
#' @import DatabaseConnector
#' @useDynLib CohortMethod
NULL
#' A simulation profile
#' @docType data
#' @keywords datasets
#' @name cohortMethodDataSimulationProfile
#' @usage
#' data(cohortMethodDataSimulationProfile)
NULL
#' Propensity scores for the vignette
#' @docType data
#' @keywords datasets
#' @name vignettePs
#' @usage
#' data(vignettePs)
NULL
#' Balance data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteBalance
#' @usage
#' data(vignetteBalance)
NULL
#' Outcome data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteOutcomeModel1
#' @usage
#' data(vignetteOutcomeModel1)
NULL
#' Outcome data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteOutcomeModel2
#' @usage
#' data(vignetteOutcomeModel2)
NULL
#' Outcome data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteOutcomeModel3
#' @usage
#' data(vignetteOutcomeModel3)
NULL
#' Analysis summary data for the vignette
#' @docType data
#' @keywords datasets
#' @name vignetteAnalysisSummary
#' @usage
#' data(vignetteAnalysisSummary)
NULL
.onLoad <- function(libname, pkgname) {
missing(libname) # suppresses R CMD check note
missing(pkgname) # suppresses R CMD check note
# Copied this from the ff package:
if (is.null(getOption("ffmaxbytes"))) {
# memory.limit is windows specific
if (.Platform$OS.type == "windows") {
if (getRversion() >= "2.6.0")
options(ffmaxbytes = 0.5 * utils::memory.limit() * (1024^2)) else options(ffmaxbytes = 0.5 * utils::memory.limit())
} else {
# some magic constant
options(ffmaxbytes = 0.5 * 1024^3)
}
}
# Workaround for problem with ff on machines with lots of memory (see
# https://github.com/edwindj/ffbase/issues/37)
options(ffmaxbytes = min(getOption("ffmaxbytes"), .Machine$integer.max * 12))
}
#' @keywords internal
runCohortMethod <- function() {
# todo: implement function that will call all other functions needed to run a cohort method study
}
|
#' An output generator for the \file{NAMESPACE} file.
#'
#' @param tag function that processes a single tag. It should return a
#' character vector of lines to be included in the \file{NAMESPACE}.
#' Duplicates will be automatically removed.
#' @param name input tag name, usually set by \code{\link{roccer}}.
#' @dev
#' @export
namespace_out <- function(tag, name = NULL) {
rocout(tag, name, subclass = "namespace_out")
}
output_path.namespace_out <- function(writer, rocblock) {
"NAMESPACE"
}
#' @auto_imports
output_postproc.namespace_out <- function(output) {
lines <- unlist(str_split(unlist(output), "\n"))
with_collate("C", sort(unique(lines)))
}
output_write.namespace_out <- function(output, path) {
write_if_different(path, output)
}
# Useful output commands -----------------------------------------------------
ns_each <- function(directive) {
function(values) {
lines(directive, "(", quote_if_needed(values), ")")
}
}
ns_call <- function(directive) {
function(values) {
args <- paste(names(values), " = ", values, collapse = ", ", sep = "")
lines(directive, "(", args, ")")
}
}
ns_repeat1 <- function(directive) {
function(values) {
lines(directive, "(", quote_if_needed(values[1]), ",",
quote_if_needed(values[-1]), ")")
}
}
lines <- function(...) paste(..., sep = "", collapse = "\n")
|
/R/output-namespace.r
|
no_license
|
vspinu/roxygen3
|
R
| false | false | 1,359 |
r
|
#' An output generator for the \file{NAMESPACE} file.
#'
#' @param tag function that processes a single tag. It should return a
#' character vector of lines to be included in the \file{NAMESPACE}.
#' Duplicates will be automatically removed.
#' @param name input tag name, usually set by \code{\link{roccer}}.
#' @dev
#' @export
namespace_out <- function(tag, name = NULL) {
rocout(tag, name, subclass = "namespace_out")
}
output_path.namespace_out <- function(writer, rocblock) {
"NAMESPACE"
}
#' @auto_imports
output_postproc.namespace_out <- function(output) {
lines <- unlist(str_split(unlist(output), "\n"))
with_collate("C", sort(unique(lines)))
}
output_write.namespace_out <- function(output, path) {
write_if_different(path, output)
}
# Useful output commands -----------------------------------------------------
ns_each <- function(directive) {
function(values) {
lines(directive, "(", quote_if_needed(values), ")")
}
}
ns_call <- function(directive) {
function(values) {
args <- paste(names(values), " = ", values, collapse = ", ", sep = "")
lines(directive, "(", args, ")")
}
}
ns_repeat1 <- function(directive) {
function(values) {
lines(directive, "(", quote_if_needed(values[1]), ",",
quote_if_needed(values[-1]), ")")
}
}
lines <- function(...) paste(..., sep = "", collapse = "\n")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aditivo4.R
\name{aditivo4}
\alias{aditivo4}
\title{Multa por Atraso Aditivo 4}
\usage{
aditivo4(vec_meses_atrasados_aditivo4_phase1)
}
\arguments{
\item{vec_meses_atrasados_aditivo4_phase1}{A vector with numbers - number of delay (in months) for each tranche. There are three tranches.}
}
\value{
total payment for the informed number of months delayed and delay per tranche
}
\description{
Computes the fine due to delay in construction, as per in "aditivo 4"
}
\examples{
aditivo4(vec_meses_atrasados_aditivo4_phase1= c(10, 14, NA))
}
|
/man/aditivo4.Rd
|
no_license
|
mgaldino/line4PPPsim
|
R
| false | true | 617 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aditivo4.R
\name{aditivo4}
\alias{aditivo4}
\title{Multa por Atraso Aditivo 4}
\usage{
aditivo4(vec_meses_atrasados_aditivo4_phase1)
}
\arguments{
\item{vec_meses_atrasados_aditivo4_phase1}{A vector with numbers - number of delay (in months) for each tranche. There are three tranches.}
}
\value{
total payment for the informed number of months delayed and delay per tranche
}
\description{
Computes the fine due to delay in construction, as per in "aditivo 4"
}
\examples{
aditivo4(vec_meses_atrasados_aditivo4_phase1= c(10, 14, NA))
}
|
################################################################################
sim.freq<-function(nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0){
#allows for different N and f for each population
#modified param so that it reflects correctly population effective size
genofreq<-function(freq,fi){
if (fi==0) {geno.freq<-outer(freq,freq)}
else {geno.freq<-outer(freq,freq)*(1-fi)+diag(nbal)*diag(freq)*fi}
return(geno.freq)
}
if (nbal>99) stop ("Too many alleles, must be <100. Exiting")
cl<-match.call()
pl<-vector("list",nbloc)
freq<-gtools::rdirichlet(nbloc,rep(1,nbal)) #1=uniform freq has dim nbloc,nbal
if (length(N)!=nbpop) {
if (length(N)==1) N<-rep(N,nbpop)
else stop("N must be a vector of length nbpop. Exiting.")}
if (length(f)!=nbpop){
if (length(f)==1) f<-rep(f,nbpop)
else stop("f must be a vector of length nbpop. Exiting.")}
param<-outer(4*N/(1+f)*(mig+mut),freq,"*") #verify this [nbpop,nbloc,nbal]
for (il in 1:nbloc){
x<-matrix(numeric(nbal*nbpop),nrow=nbpop)
for (ip in 1:nbpop) x[ip,]<-gtools::rdirichlet(1,param[ip,il,])
pl[[il]]<-x
}
gf<-vector("list",nbloc)
for (il in 1:nbloc){
gf[[il]]<-matrix(numeric(nbal^2*nbpop),ncol=nbpop)
for (ip in 1:nbpop) gf[[il]][,ip]<-genofreq(pl[[il]][ip,],f[ip])}
if (nbal<10)
nfun<-function(x,y) x*10+y
else
nfun<-function(x,y) x*100+y
gn<-as.numeric(outer(1:nbal,1:nbal,nfun))
return(list(call=cl,fpl=pl,gf=gf,gn=gn))
}
##################
#'@export
###################
#########################################################################
sim.genot<-function(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0){
a<-sim.freq(nbal,nbloc,nbpop,N,mig,mut,f)
dat<-data.frame(rep(1:nbpop,each=size),matrix(numeric(nbloc*nbpop*size),ncol=nbloc))
names(dat)<-c("Pop",paste("loc",1:nbloc,sep="."))
dumf<-function(x) sample(a$gn,size=size,replace=TRUE,prob=x)
for (il in 1:nbloc) dat[,il+1]<-as.numeric(apply(a$gf[[il]],2,dumf))
return(dat)
}
################################################################################
sim.freq.t<-function(nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0,t=100){
#allows for different N and f for each population
#modified N->Ne so that it reflects correctly population effective size
genofreq<-function(freq,fi){
if (fi==0) {geno.freq<-outer(freq,freq)}
else {geno.freq<-outer(freq,freq)*(1-fi)+diag(nbal)*diag(freq)*fi}
return(geno.freq)
}
if (nbal>99) stop ("Too many alleles, must be <100. Exiting")
cl<-match.call()
pl<-vector("list",nbloc)
freq<-gtools::rdirichlet(nbloc,rep(1,nbal))
if (length(N)!=nbpop) {
if (length(N)==1) N<-rep(N,nbpop)
else stop("N must be a vector of length nbpop. Exiting.")}
if (length(f)!=nbpop){
if (length(f)==1) f<-rep(f,nbpop)
else stop("f must be a vector of length nbpop. Exiting.")}
xmut<-matrix(rep(1/nbal,nbal*nbpop,nrow=nbpop),nrow=nbpop)
for (il in 1:nbloc){
xini<-matrix(rep(freq[il,],nbpop),nrow=nbpop,byrow=TRUE)
xn<-xini
Ne<-round(N/(1+f))
for (it in 1:t) {
xn<-(xn*(1-mig)+mig*xini)*(1-mut)+mut*xmut
for (ip in 1:nbpop) xn[ip,]<-stats::rmultinom(1,Ne[ip]*2,xn[ip,])/2/Ne[ip]
}
pl[[il]]<-xn
}
gf<-vector("list",nbloc)
for (il in 1:nbloc){
gf[[il]]<-matrix(numeric(nbal^2*nbpop),ncol=nbpop)
for (ip in 1:nbpop) gf[[il]][,ip]<-genofreq(pl[[il]][ip,],f[ip])}
if (nbal<10)
nfun<-function(x,y) x*10+y
else
nfun<-function(x,y) x*100+y
gn<-as.numeric(outer(1:nbal,1:nbal,nfun))
return(list(call=cl,fpl=pl,gf=gf,gn=gn))
}
###################################################
################################################################################
sim.freq.FIM.t<-function(nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0,t=100){
#allows for different N and f for each population
#modified N->Ne so that it reflects correctly population effective size
genofreq<-function(freq,fi){
if (fi==0) {geno.freq<-outer(freq,freq)}
else {geno.freq<-outer(freq,freq)*(1-fi)+diag(nbal)*diag(freq)*fi}
return(geno.freq)
}
if (nbal>99) stop ("Too many alleles, must be <100. Exiting")
cl<-match.call()
pl<-vector("list",nbloc)
freq<-gtools::rdirichlet(nbloc,rep(1,nbal))
if (length(N)!=nbpop) {
if (length(N)==1) N<-rep(N,nbpop)
else stop("N must be a vector of length nbpop. Exiting.")}
if (length(f)!=nbpop){
if (length(f)==1) f<-rep(f,nbpop)
else stop("f must be a vector of length nbpop. Exiting.")}
xmut<-matrix(rep(1/nbal,nbal*nbpop,nrow=nbpop),nrow=nbpop)
for (il in 1:nbloc){
xpool<-matrix(rep(freq[il,],nbpop),nrow=nbpop,byrow=TRUE)
xn<-xpool
Ne<-round(N/(1+f))
tot<-sum(Ne)
for (it in 1:t) {
xn<-(xn*(1-mig)+mig*xpool)*(1-mut)+mut*xmut
for (ip in 1:nbpop) xn[ip,]<-stats::rmultinom(1,Ne[ip]*2,xn[ip,])/2/Ne[ip]
#xpool<-matrix(rep((N/tot)%*%xn,nbpop),nrow=nbpop,byrow=TRUE) #weighted
xpool<-matrix(rep((rep(1,nbpop)/nbpop)%*%xn,nbpop),nrow=nbpop,byrow=TRUE) #unweighted
}
pl[[il]]<-xn
}
gf<-vector("list",nbloc)
for (il in 1:nbloc){
gf[[il]]<-matrix(numeric(nbal^2*nbpop),ncol=nbpop)
for (ip in 1:nbpop) gf[[il]][,ip]<-genofreq(pl[[il]][ip,],f[ip])}
if (nbal<10)
nfun<-function(x,y) x*10+y
else
nfun<-function(x,y) x*100+y
gn<-as.numeric(outer(1:nbal,1:nbal,nfun))
return(list(call=cl,fpl=pl,gf=gf,gn=gn))
}
###################################################
#'
#' Simulate data from a non equilibrium continent-island model
#'
#' This function allows to simulate genetic data from a non-equilibrium continent-island
#' model, where each island can have a different size and a different inbreeding
#' coefficient.
#'
#' This function simulates genetic data under the continent-islands model (IIM=TRUE)
#' or the finite island model (IIM=FALSE).
#' In the IIM, a continent of
#' infinite size sends migrants to islands of finite sizes \eqn{N_i} at a rate
#' \eqn{m}. Alleles can also mutate to a new state at a rate \eqn{\mu}. Under this model,
#' the expected \eqn{F_{STi}, \theta_i}, can be calculated and compared to empirical
#' estimates.
#'
#' In this model, \eqn{\theta_t} can be written as a function of population size
#' \eqn{N_i}, migration rate \eqn{m}, mutation rate \eqn{\mu} and \eqn{\theta_{(t-1)}}.
#'
#' The rational is as follows:
#'
#' With probability \eqn{\frac{1}{N}}, 2 alleles from 2 different individuals in
#' the current generation are sampled from the same individual of the previous
#' generation:
#'
#' -Half the time, the same allele is drawn from the parent;
#'
#' -The other half, two different alleles are drawn, but they are identical in
#' proportion \eqn{\theta_{(t-1)}}.
#'
#' -With probability \eqn{1-\frac{1}{N}}, the 2 alleles are drawn from different
#' individuals in the previous generation, in which case they are identical in
#' proportion \eqn{\theta_{(t-1)}}.
#'
#' This holds providing that neither alleles have mutated or migrated. This is
#' the case with probability \eqn{(1-m)^2 \times (1-\mu)^2}.
#' If an allele is a mutant or a migrant, then its coancestry with another allele
#' is 0 in the infinite continent-islands model (it is not the case in the finite island model).
#'
#' Note also that the mutation scheme assumed is the infinite allele (or site)
#' model. If the number of alleles is finite (as will be the case in what follows),
#' the corresponding mutation model is the K-allele model and the mutation rate
#' has to be adjusted to \eqn{\mu'=\frac{K-1}{K}\mu}.
#'
#' Lets substitute \eqn{\alpha} for \eqn{(1-m)^2 (1-\mu)^2} and \eqn{x} for
#' \eqn{\frac{1}{2N}}.
#'
#' The expectation of \eqn{F_{ST}}, \eqn{\theta} can be written as:
#'
#' \deqn{\theta_t=(\alpha (1-x))^t \theta_0 + \frac{x}{1-x}\sum_{i=1}^t (\alpha (1-x))^i}
#'
#' which reduces to \eqn{\theta_t=\frac{x}{1-x}\sum_{i=1}^t (\alpha (1-x))^i} if \eqn{\theta_0=0}.
#'
#' Transition equations for \eqn{theta} in the migrant-pool island model (IIM=FALSE) are given in Rouseet (1996).
#' Currently, the migrant pool is made of equal contribution from each island, irrespective of their size.
#'
#'
#' @usage sim.genot.t(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,
#' mig=0.001,mut=0.0001,f=0,t=100,IIM=TRUE)
#'
#' @param size the number of sampled individuals per island
#' @param nbal the number of alleles per locus (maximum of 99)
#' @param nbloc the number of loci to simulate
#' @param nbpop the number of islands to simulate
#' @param N the effective population sizes of each island. If only one number, all
#' islands are assumed to be of the same size
#' @param mig the migration rate from the continent to the islands
#' @param mut the mutation rate of the loci
#' @param f the inbreeding coefficient for each island
#' @param t the number of generation since the islands were created
#' @param IIM whether to simulate a continent island Model (default) or a migrant pool island Model
#'
#' @return A data frame with size*nbpop rows and nbloc+1 columns. Each row is an
#' individual, the first column contains the island to which the individual belongs,
#' the following nbloc columns contain the genotype for each locus.
#'
#' @author Jerome Goudet \email{jerome.goudet@@unil.ch}
#'
#' @references
#' \href{https://pubmed.ncbi.nlm.nih.gov/8846911/}{Rousset, F. (1996)} Equilibrium values of measures of population subdivision for
#' stepwise mutation processes. Genetics 142:1357
#'
#' @examples
#'
#' psize<-c(100,1000,10000,100000,1000000)
#' dat<-sim.genot.t(nbal=4,nbloc=20,nbpop=5,N=psize,mig=0.001,mut=0.0001,t=100)
#' summary(wc(dat)) #Weir and cockerham overall estimators of FST & FIS
#' betas(dat) # Population specific estimator of FST
#'
#' @export
#'
#########################################################################
sim.genot.t<-function(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0,t=100,IIM=TRUE){
if (IIM) a<-sim.freq.t(nbal,nbloc,nbpop,N,mig,mut,f,t)
else a<-sim.freq.FIM.t(nbal,nbloc,nbpop,N,mig,mut,f,t)
dat<-data.frame(rep(1:nbpop,each=size),matrix(numeric(nbloc*nbpop*size),ncol=nbloc))
names(dat)<-c("Pop",paste("loc",1:nbloc,sep="."))
dumf<-function(x) sample(a$gn,size=size,replace=TRUE,prob=x)
for (il in 1:nbloc) dat[,il+1]<-as.numeric(apply(a$gf[[il]],2,dumf))
return(dat)
}
###############################################################################
###############################################################################
###############################################################################
################################################################################
sim.freq.metapop.t<-function(nbal,nbloc,nbpop,N,mig,mut,f,t,epsilon=1e-8){
#allows for different N and f for each population
#allows for different migration rates among populations, mig must be a matrix of dim nbpop*nbpop
#modified param so that it reflects correctly population effective size
genofreq<-function(freq,fi){
if (fi==0) {geno.freq<-outer(freq,freq)}
else {geno.freq<-outer(freq,freq)*(1-fi)+diag(nbal)*diag(freq)*fi}
return(geno.freq)
}
if (nbal>99) stop ("Too many alleles, must be <100. Exiting")
cl<-match.call()
pl<-vector("list",nbloc)
freq<-gtools::rdirichlet(nbloc,rep(1,nbal))
if (length(N)!=nbpop) {
if (length(N)==1) N<-rep(N,nbpop)
else stop("N must be a vector of length nbpop. Exiting.")}
if (length(f)!=nbpop){
if (length(f)==1) f<-rep(f,nbpop)
else stop("f must be a vector of length nbpop. Exiting.")}
# sanity check for migration matrix
mig.dim<-dim(mig)
if (is.null(mig.dim) || (mig.dim[1]!=mig.dim[2]) || (mig.dim[1]!=nbpop) || (abs(sum(rowSums(mig)-1))>epsilon)){
stop(paste("mig must be a ",nbpop,"X",nbpop," matrix with each row summing to 1. Exiting",sep=""))
}
xmut<-matrix(rep(1/nbal,nbal*nbpop,nrow=nbpop),nrow=nbpop)
for (il in 1:nbloc){
xini<-matrix(rep(freq[il,],nbpop),nrow=nbpop,byrow=TRUE)
xn<-xini
tot<-sum(N)
for (it in 1:t) {
xn<-(mig%*%xn)*(1-mut)+mut*xmut
for (ip in 1:nbpop) xn[ip,]<-stats::rmultinom(1,N[ip]*2,xn[ip,])/2/N[ip]
}
pl[[il]]<-xn
}
gf<-vector("list",nbloc)
for (il in 1:nbloc){
gf[[il]]<-matrix(numeric(nbal^2*nbpop),ncol=nbpop)
for (ip in 1:nbpop) gf[[il]][,ip]<-genofreq(pl[[il]][ip,],f[ip])}
if (nbal<10)
nfun<-function(x,y) x*10+y
else
nfun<-function(x,y) x*100+y
gn<-as.numeric(outer(1:nbal,1:nbal,nfun))
return(list(call=cl,fpl=pl,gf=gf,gn=gn))
}
#########################################################################
###################################################
#'
#' Simulate genetic data from a metapopulation model
#'
#' This function allows to simulate genetic data from a metapopulation
#' model, where each population can have a different size and a different inbreeding
#' coefficient, and migration between each population is given in a migration matrix.
#'
#' This function simulates genetic data under a migration matrix model.
#' Each population \eqn{i} sends a proportion of migrant alleles \eqn{m_{ij}} to population \eqn{j}
#' and receives a proportion of migrant alleles \eqn{m_{ji}} from population \eqn{j}.
#'
#' In this model, \eqn{\theta_t} can be written as a function of population size
#' \eqn{N_i}, migration rate \eqn{m_{ij}}, mutation rate \eqn{\mu} and \eqn{\theta_{(t-1)}}.
#'
#' The rational is as follows:
#'
#' With probability \eqn{\frac{1}{N_i}}, 2 alleles from 2 different individuals in
#' the current generation are sampled from the same individual of the previous
#' generation:
#'
#' -Half the time, the same allele is drawn from the parent;
#'
#' -The other half, two different alleles are drawn, but they are identical in
#' proportion \eqn{\theta_{(t-1)}}.
#'
#' -With probability \eqn{1-\frac{1}{N_i}}, the 2 alleles are drawn from different
#' individuals in the previous generation, in which case they are identical in
#' proportion \eqn{\theta_{(t-1)}}.
#'
#' This holds providing that neither alleles have mutated or migrated. This is
#' the case with probability \eqn{m_{ii}^2 \times (1-\mu)^2}.
#' If an allele is a mutant, then its coancestry with another allele
#' is 0.
#'
#' Note also that the mutation scheme assumed is the infinite allele (or site)
#' model. If the number of alleles is finite (as will be the case in what follows),
#' the corresponding mutation model is the K-allele model and the mutation rate
#' has to be adjusted to \eqn{\mu'=\frac{K-1}{K}\mu}.
#'
#' Continue derivation
#'
#' @usage sim.genot.metapop.t(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,
#' mig=diag(3),mut=0.0001,f=0,t=100)
#'
#' @param size the number of sampled individuals per population
#' @param nbal the number of alleles per locus (maximum of 99)
#' @param nbloc the number of loci to simulate
#' @param nbpop the number of populations to simulate
#' @param N the effective population sizes of each population. If only one number, all
#' populations are assumed to be of the same size
#' @param mig a matrix with nbpop rows and columns giving the migration rate
#' from population i (in row) to population j (in column). Each row must sum to 1.
#' @param mut the mutation rate of the loci
#' @param f the inbreeding coefficient for each population
#' @param t the number of generation since the islands were created
#'
#' @return A data frame with size*nbpop rows and nbloc+1 columns. Each row is an
#' individual, the first column contains the identifier of the population to which the individual belongs,
#' the following nbloc columns contain the genotype for each locus.
#'
#'
#'
#' @details
#'
#'
#' @author Jerome Goudet \email{jerome.goudet@@unil.ch}
#'
#' @examples
#'
#' #2 populations
#' psize<-c(10,1000)
#' mig.mat<-matrix(c(0.99,0.01,0.1,0.9),nrow=2,byrow=TRUE)
#' dat<-sim.genot.metapop.t(nbal=10,nbloc=100,nbpop=2,N=psize,mig=mig.mat,mut=0.00001,t=100)
#' betas(dat)$betaiovl # Population specific estimator of FST
#'
#' #1D stepping stone
#' \dontrun{
#' np<-10
#' m<-0.2
#' mig.mat<-diag(np)*(1-m)
#' diag(mig.mat[-1,-np])<-m/2
#' diag(mig.mat[-np,-1])<-m/2
#' mig.mat[1,1:2]<-c(1-m/2,m/2)
#' mig.mat[np,(np-1):np]<-c(m/2,1-m/2)
#' dat<-sim.genot.metapop.t(nbal=10,nbloc=50,nbpop=np,mig=mig.mat,t=400)
#' pcoa(as.matrix(genet.dist(dat))) # principal coordinates plot
#' }
#'
#' @export
#'
##################################################################################################
sim.genot.metapop.t<-function(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,mig=diag(3),mut=0.0001,f=0,t=100){
a<-sim.freq.metapop.t(nbal,nbloc,nbpop,N,mig,mut,f,t)
dat<-data.frame(rep(1:nbpop,each=size),matrix(numeric(nbloc*nbpop*size),ncol=nbloc))
names(dat)<-c("Pop",paste("loc",1:nbloc,sep="."))
dumf<-function(x) sample(a$gn,size=size,replace=TRUE,prob=x)
for (il in 1:nbloc) dat[,il+1]<-as.numeric(apply(a$gf[[il]],2,dumf))
return(dat)
}
|
/R/simgenot_new.R
|
no_license
|
jgx65/hierfstat
|
R
| false | false | 17,012 |
r
|
################################################################################
sim.freq<-function(nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0){
#allows for different N and f for each population
#modified param so that it reflects correctly population effective size
genofreq<-function(freq,fi){
if (fi==0) {geno.freq<-outer(freq,freq)}
else {geno.freq<-outer(freq,freq)*(1-fi)+diag(nbal)*diag(freq)*fi}
return(geno.freq)
}
if (nbal>99) stop ("Too many alleles, must be <100. Exiting")
cl<-match.call()
pl<-vector("list",nbloc)
freq<-gtools::rdirichlet(nbloc,rep(1,nbal)) #1=uniform freq has dim nbloc,nbal
if (length(N)!=nbpop) {
if (length(N)==1) N<-rep(N,nbpop)
else stop("N must be a vector of length nbpop. Exiting.")}
if (length(f)!=nbpop){
if (length(f)==1) f<-rep(f,nbpop)
else stop("f must be a vector of length nbpop. Exiting.")}
param<-outer(4*N/(1+f)*(mig+mut),freq,"*") #verify this [nbpop,nbloc,nbal]
for (il in 1:nbloc){
x<-matrix(numeric(nbal*nbpop),nrow=nbpop)
for (ip in 1:nbpop) x[ip,]<-gtools::rdirichlet(1,param[ip,il,])
pl[[il]]<-x
}
gf<-vector("list",nbloc)
for (il in 1:nbloc){
gf[[il]]<-matrix(numeric(nbal^2*nbpop),ncol=nbpop)
for (ip in 1:nbpop) gf[[il]][,ip]<-genofreq(pl[[il]][ip,],f[ip])}
if (nbal<10)
nfun<-function(x,y) x*10+y
else
nfun<-function(x,y) x*100+y
gn<-as.numeric(outer(1:nbal,1:nbal,nfun))
return(list(call=cl,fpl=pl,gf=gf,gn=gn))
}
##################
#'@export
###################
#########################################################################
sim.genot<-function(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0){
a<-sim.freq(nbal,nbloc,nbpop,N,mig,mut,f)
dat<-data.frame(rep(1:nbpop,each=size),matrix(numeric(nbloc*nbpop*size),ncol=nbloc))
names(dat)<-c("Pop",paste("loc",1:nbloc,sep="."))
dumf<-function(x) sample(a$gn,size=size,replace=TRUE,prob=x)
for (il in 1:nbloc) dat[,il+1]<-as.numeric(apply(a$gf[[il]],2,dumf))
return(dat)
}
################################################################################
sim.freq.t<-function(nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0,t=100){
#allows for different N and f for each population
#modified N->Ne so that it reflects correctly population effective size
genofreq<-function(freq,fi){
if (fi==0) {geno.freq<-outer(freq,freq)}
else {geno.freq<-outer(freq,freq)*(1-fi)+diag(nbal)*diag(freq)*fi}
return(geno.freq)
}
if (nbal>99) stop ("Too many alleles, must be <100. Exiting")
cl<-match.call()
pl<-vector("list",nbloc)
freq<-gtools::rdirichlet(nbloc,rep(1,nbal))
if (length(N)!=nbpop) {
if (length(N)==1) N<-rep(N,nbpop)
else stop("N must be a vector of length nbpop. Exiting.")}
if (length(f)!=nbpop){
if (length(f)==1) f<-rep(f,nbpop)
else stop("f must be a vector of length nbpop. Exiting.")}
xmut<-matrix(rep(1/nbal,nbal*nbpop,nrow=nbpop),nrow=nbpop)
for (il in 1:nbloc){
xini<-matrix(rep(freq[il,],nbpop),nrow=nbpop,byrow=TRUE)
xn<-xini
Ne<-round(N/(1+f))
for (it in 1:t) {
xn<-(xn*(1-mig)+mig*xini)*(1-mut)+mut*xmut
for (ip in 1:nbpop) xn[ip,]<-stats::rmultinom(1,Ne[ip]*2,xn[ip,])/2/Ne[ip]
}
pl[[il]]<-xn
}
gf<-vector("list",nbloc)
for (il in 1:nbloc){
gf[[il]]<-matrix(numeric(nbal^2*nbpop),ncol=nbpop)
for (ip in 1:nbpop) gf[[il]][,ip]<-genofreq(pl[[il]][ip,],f[ip])}
if (nbal<10)
nfun<-function(x,y) x*10+y
else
nfun<-function(x,y) x*100+y
gn<-as.numeric(outer(1:nbal,1:nbal,nfun))
return(list(call=cl,fpl=pl,gf=gf,gn=gn))
}
###################################################
################################################################################
sim.freq.FIM.t<-function(nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0,t=100){
#allows for different N and f for each population
#modified N->Ne so that it reflects correctly population effective size
genofreq<-function(freq,fi){
if (fi==0) {geno.freq<-outer(freq,freq)}
else {geno.freq<-outer(freq,freq)*(1-fi)+diag(nbal)*diag(freq)*fi}
return(geno.freq)
}
if (nbal>99) stop ("Too many alleles, must be <100. Exiting")
cl<-match.call()
pl<-vector("list",nbloc)
freq<-gtools::rdirichlet(nbloc,rep(1,nbal))
if (length(N)!=nbpop) {
if (length(N)==1) N<-rep(N,nbpop)
else stop("N must be a vector of length nbpop. Exiting.")}
if (length(f)!=nbpop){
if (length(f)==1) f<-rep(f,nbpop)
else stop("f must be a vector of length nbpop. Exiting.")}
xmut<-matrix(rep(1/nbal,nbal*nbpop,nrow=nbpop),nrow=nbpop)
for (il in 1:nbloc){
xpool<-matrix(rep(freq[il,],nbpop),nrow=nbpop,byrow=TRUE)
xn<-xpool
Ne<-round(N/(1+f))
tot<-sum(Ne)
for (it in 1:t) {
xn<-(xn*(1-mig)+mig*xpool)*(1-mut)+mut*xmut
for (ip in 1:nbpop) xn[ip,]<-stats::rmultinom(1,Ne[ip]*2,xn[ip,])/2/Ne[ip]
#xpool<-matrix(rep((N/tot)%*%xn,nbpop),nrow=nbpop,byrow=TRUE) #weighted
xpool<-matrix(rep((rep(1,nbpop)/nbpop)%*%xn,nbpop),nrow=nbpop,byrow=TRUE) #unweighted
}
pl[[il]]<-xn
}
gf<-vector("list",nbloc)
for (il in 1:nbloc){
gf[[il]]<-matrix(numeric(nbal^2*nbpop),ncol=nbpop)
for (ip in 1:nbpop) gf[[il]][,ip]<-genofreq(pl[[il]][ip,],f[ip])}
if (nbal<10)
nfun<-function(x,y) x*10+y
else
nfun<-function(x,y) x*100+y
gn<-as.numeric(outer(1:nbal,1:nbal,nfun))
return(list(call=cl,fpl=pl,gf=gf,gn=gn))
}
###################################################
#'
#' Simulate data from a non equilibrium continent-island model
#'
#' This function allows to simulate genetic data from a non-equilibrium continent-island
#' model, where each island can have a different size and a different inbreeding
#' coefficient.
#'
#' This function simulates genetic data under the continent-islands model (IIM=TRUE)
#' or the finite island model (IIM=FALSE).
#' In the IIM, a continent of
#' infinite size sends migrants to islands of finite sizes \eqn{N_i} at a rate
#' \eqn{m}. Alleles can also mutate to a new state at a rate \eqn{\mu}. Under this model,
#' the expected \eqn{F_{STi}, \theta_i}, can be calculated and compared to empirical
#' estimates.
#'
#' In this model, \eqn{\theta_t} can be written as a function of population size
#' \eqn{N_i}, migration rate \eqn{m}, mutation rate \eqn{\mu} and \eqn{\theta_{(t-1)}}.
#'
#' The rational is as follows:
#'
#' With probability \eqn{\frac{1}{N}}, 2 alleles from 2 different individuals in
#' the current generation are sampled from the same individual of the previous
#' generation:
#'
#' -Half the time, the same allele is drawn from the parent;
#'
#' -The other half, two different alleles are drawn, but they are identical in
#' proportion \eqn{\theta_{(t-1)}}.
#'
#' -With probability \eqn{1-\frac{1}{N}}, the 2 alleles are drawn from different
#' individuals in the previous generation, in which case they are identical in
#' proportion \eqn{\theta_{(t-1)}}.
#'
#' This holds providing that neither alleles have mutated or migrated. This is
#' the case with probability \eqn{(1-m)^2 \times (1-\mu)^2}.
#' If an allele is a mutant or a migrant, then its coancestry with another allele
#' is 0 in the infinite continent-islands model (it is not the case in the finite island model).
#'
#' Note also that the mutation scheme assumed is the infinite allele (or site)
#' model. If the number of alleles is finite (as will be the case in what follows),
#' the corresponding mutation model is the K-allele model and the mutation rate
#' has to be adjusted to \eqn{\mu'=\frac{K-1}{K}\mu}.
#'
#' Lets substitute \eqn{\alpha} for \eqn{(1-m)^2 (1-\mu)^2} and \eqn{x} for
#' \eqn{\frac{1}{2N}}.
#'
#' The expectation of \eqn{F_{ST}}, \eqn{\theta} can be written as:
#'
#' \deqn{\theta_t=(\alpha (1-x))^t \theta_0 + \frac{x}{1-x}\sum_{i=1}^t (\alpha (1-x))^i}
#'
#' which reduces to \eqn{\theta_t=\frac{x}{1-x}\sum_{i=1}^t (\alpha (1-x))^i} if \eqn{\theta_0=0}.
#'
#' Transition equations for \eqn{theta} in the migrant-pool island model (IIM=FALSE) are given in Rouseet (1996).
#' Currently, the migrant pool is made of equal contribution from each island, irrespective of their size.
#'
#'
#' @usage sim.genot.t(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,
#' mig=0.001,mut=0.0001,f=0,t=100,IIM=TRUE)
#'
#' @param size the number of sampled individuals per island
#' @param nbal the number of alleles per locus (maximum of 99)
#' @param nbloc the number of loci to simulate
#' @param nbpop the number of islands to simulate
#' @param N the effective population sizes of each island. If only one number, all
#' islands are assumed to be of the same size
#' @param mig the migration rate from the continent to the islands
#' @param mut the mutation rate of the loci
#' @param f the inbreeding coefficient for each island
#' @param t the number of generation since the islands were created
#' @param IIM whether to simulate a continent island Model (default) or a migrant pool island Model
#'
#' @return A data frame with size*nbpop rows and nbloc+1 columns. Each row is an
#' individual, the first column contains the island to which the individual belongs,
#' the following nbloc columns contain the genotype for each locus.
#'
#' @author Jerome Goudet \email{jerome.goudet@@unil.ch}
#'
#' @references
#' \href{https://pubmed.ncbi.nlm.nih.gov/8846911/}{Rousset, F. (1996)} Equilibrium values of measures of population subdivision for
#' stepwise mutation processes. Genetics 142:1357
#'
#' @examples
#'
#' psize<-c(100,1000,10000,100000,1000000)
#' dat<-sim.genot.t(nbal=4,nbloc=20,nbpop=5,N=psize,mig=0.001,mut=0.0001,t=100)
#' summary(wc(dat)) #Weir and cockerham overall estimators of FST & FIS
#' betas(dat) # Population specific estimator of FST
#'
#' @export
#'
#########################################################################
sim.genot.t<-function(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,mig=0.001,mut=0.0001,f=0,t=100,IIM=TRUE){
if (IIM) a<-sim.freq.t(nbal,nbloc,nbpop,N,mig,mut,f,t)
else a<-sim.freq.FIM.t(nbal,nbloc,nbpop,N,mig,mut,f,t)
dat<-data.frame(rep(1:nbpop,each=size),matrix(numeric(nbloc*nbpop*size),ncol=nbloc))
names(dat)<-c("Pop",paste("loc",1:nbloc,sep="."))
dumf<-function(x) sample(a$gn,size=size,replace=TRUE,prob=x)
for (il in 1:nbloc) dat[,il+1]<-as.numeric(apply(a$gf[[il]],2,dumf))
return(dat)
}
###############################################################################
###############################################################################
###############################################################################
################################################################################
sim.freq.metapop.t<-function(nbal,nbloc,nbpop,N,mig,mut,f,t,epsilon=1e-8){
#allows for different N and f for each population
#allows for different migration rates among populations, mig must be a matrix of dim nbpop*nbpop
#modified param so that it reflects correctly population effective size
genofreq<-function(freq,fi){
if (fi==0) {geno.freq<-outer(freq,freq)}
else {geno.freq<-outer(freq,freq)*(1-fi)+diag(nbal)*diag(freq)*fi}
return(geno.freq)
}
if (nbal>99) stop ("Too many alleles, must be <100. Exiting")
cl<-match.call()
pl<-vector("list",nbloc)
freq<-gtools::rdirichlet(nbloc,rep(1,nbal))
if (length(N)!=nbpop) {
if (length(N)==1) N<-rep(N,nbpop)
else stop("N must be a vector of length nbpop. Exiting.")}
if (length(f)!=nbpop){
if (length(f)==1) f<-rep(f,nbpop)
else stop("f must be a vector of length nbpop. Exiting.")}
# sanity check for migration matrix
mig.dim<-dim(mig)
if (is.null(mig.dim) || (mig.dim[1]!=mig.dim[2]) || (mig.dim[1]!=nbpop) || (abs(sum(rowSums(mig)-1))>epsilon)){
stop(paste("mig must be a ",nbpop,"X",nbpop," matrix with each row summing to 1. Exiting",sep=""))
}
xmut<-matrix(rep(1/nbal,nbal*nbpop,nrow=nbpop),nrow=nbpop)
for (il in 1:nbloc){
xini<-matrix(rep(freq[il,],nbpop),nrow=nbpop,byrow=TRUE)
xn<-xini
tot<-sum(N)
for (it in 1:t) {
xn<-(mig%*%xn)*(1-mut)+mut*xmut
for (ip in 1:nbpop) xn[ip,]<-stats::rmultinom(1,N[ip]*2,xn[ip,])/2/N[ip]
}
pl[[il]]<-xn
}
gf<-vector("list",nbloc)
for (il in 1:nbloc){
gf[[il]]<-matrix(numeric(nbal^2*nbpop),ncol=nbpop)
for (ip in 1:nbpop) gf[[il]][,ip]<-genofreq(pl[[il]][ip,],f[ip])}
if (nbal<10)
nfun<-function(x,y) x*10+y
else
nfun<-function(x,y) x*100+y
gn<-as.numeric(outer(1:nbal,1:nbal,nfun))
return(list(call=cl,fpl=pl,gf=gf,gn=gn))
}
#########################################################################
###################################################
#'
#' Simulate genetic data from a metapopulation model
#'
#' This function allows to simulate genetic data from a metapopulation
#' model, where each population can have a different size and a different inbreeding
#' coefficient, and migration between each population is given in a migration matrix.
#'
#' This function simulates genetic data under a migration matrix model.
#' Each population \eqn{i} sends a proportion of migrant alleles \eqn{m_{ij}} to population \eqn{j}
#' and receives a proportion of migrant alleles \eqn{m_{ji}} from population \eqn{j}.
#'
#' In this model, \eqn{\theta_t} can be written as a function of population size
#' \eqn{N_i}, migration rate \eqn{m_{ij}}, mutation rate \eqn{\mu} and \eqn{\theta_{(t-1)}}.
#'
#' The rational is as follows:
#'
#' With probability \eqn{\frac{1}{N_i}}, 2 alleles from 2 different individuals in
#' the current generation are sampled from the same individual of the previous
#' generation:
#'
#' -Half the time, the same allele is drawn from the parent;
#'
#' -The other half, two different alleles are drawn, but they are identical in
#' proportion \eqn{\theta_{(t-1)}}.
#'
#' -With probability \eqn{1-\frac{1}{N_i}}, the 2 alleles are drawn from different
#' individuals in the previous generation, in which case they are identical in
#' proportion \eqn{\theta_{(t-1)}}.
#'
#' This holds providing that neither alleles have mutated or migrated. This is
#' the case with probability \eqn{m_{ii}^2 \times (1-\mu)^2}.
#' If an allele is a mutant, then its coancestry with another allele
#' is 0.
#'
#' Note also that the mutation scheme assumed is the infinite allele (or site)
#' model. If the number of alleles is finite (as will be the case in what follows),
#' the corresponding mutation model is the K-allele model and the mutation rate
#' has to be adjusted to \eqn{\mu'=\frac{K-1}{K}\mu}.
#'
#' Continue derivation
#'
#' @usage sim.genot.metapop.t(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,
#' mig=diag(3),mut=0.0001,f=0,t=100)
#'
#' @param size the number of sampled individuals per population
#' @param nbal the number of alleles per locus (maximum of 99)
#' @param nbloc the number of loci to simulate
#' @param nbpop the number of populations to simulate
#' @param N the effective population sizes of each population. If only one number, all
#' populations are assumed to be of the same size
#' @param mig a matrix with nbpop rows and columns giving the migration rate
#' from population i (in row) to population j (in column). Each row must sum to 1.
#' @param mut the mutation rate of the loci
#' @param f the inbreeding coefficient for each population
#' @param t the number of generation since the islands were created
#'
#' @return A data frame with size*nbpop rows and nbloc+1 columns. Each row is an
#' individual, the first column contains the identifier of the population to which the individual belongs,
#' the following nbloc columns contain the genotype for each locus.
#'
#'
#'
#' @details
#'
#'
#' @author Jerome Goudet \email{jerome.goudet@@unil.ch}
#'
#' @examples
#'
#' #2 populations
#' psize<-c(10,1000)
#' mig.mat<-matrix(c(0.99,0.01,0.1,0.9),nrow=2,byrow=TRUE)
#' dat<-sim.genot.metapop.t(nbal=10,nbloc=100,nbpop=2,N=psize,mig=mig.mat,mut=0.00001,t=100)
#' betas(dat)$betaiovl # Population specific estimator of FST
#'
#' #1D stepping stone
#' \dontrun{
#' np<-10
#' m<-0.2
#' mig.mat<-diag(np)*(1-m)
#' diag(mig.mat[-1,-np])<-m/2
#' diag(mig.mat[-np,-1])<-m/2
#' mig.mat[1,1:2]<-c(1-m/2,m/2)
#' mig.mat[np,(np-1):np]<-c(m/2,1-m/2)
#' dat<-sim.genot.metapop.t(nbal=10,nbloc=50,nbpop=np,mig=mig.mat,t=400)
#' pcoa(as.matrix(genet.dist(dat))) # principal coordinates plot
#' }
#'
#' @export
#'
##################################################################################################
sim.genot.metapop.t<-function(size=50,nbal=4,nbloc=5,nbpop=3,N=1000,mig=diag(3),mut=0.0001,f=0,t=100){
a<-sim.freq.metapop.t(nbal,nbloc,nbpop,N,mig,mut,f,t)
dat<-data.frame(rep(1:nbpop,each=size),matrix(numeric(nbloc*nbpop*size),ncol=nbloc))
names(dat)<-c("Pop",paste("loc",1:nbloc,sep="."))
dumf<-function(x) sample(a$gn,size=size,replace=TRUE,prob=x)
for (il in 1:nbloc) dat[,il+1]<-as.numeric(apply(a$gf[[il]],2,dumf))
return(dat)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracking-runs.R
\name{mlflow_delete_run}
\alias{mlflow_delete_run}
\title{Delete a Run}
\usage{
mlflow_delete_run(run_id, client = NULL)
}
\arguments{
\item{run_id}{Run ID.}
\item{client}{(Optional) An `mlflow_client` object.}
}
\description{
Delete a Run
}
\details{
When `client` is not specified, these functions attempt to infer the current active client.
}
\seealso{
Other tracking functions: \code{\link{mlflow_create_experiment}},
\code{\link{mlflow_delete_experiment}},
\code{\link{mlflow_download_artifacts}},
\code{\link{mlflow_end_run}},
\code{\link{mlflow_get_experiment}},
\code{\link{mlflow_get_metric_history}},
\code{\link{mlflow_get_run}},
\code{\link{mlflow_list_artifacts}},
\code{\link{mlflow_list_experiments}},
\code{\link{mlflow_list_run_infos}},
\code{\link{mlflow_log_artifact}},
\code{\link{mlflow_log_batch}},
\code{\link{mlflow_log_metric}},
\code{\link{mlflow_log_param}},
\code{\link{mlflow_rename_experiment}},
\code{\link{mlflow_restore_experiment}},
\code{\link{mlflow_restore_run}},
\code{\link{mlflow_search_runs}},
\code{\link{mlflow_set_tag}},
\code{\link{mlflow_start_run}}
}
\concept{tracking functions}
|
/mlflow/R/mlflow/man/mlflow_delete_run.Rd
|
permissive
|
Kublai-Jing/mlflow
|
R
| false | true | 1,260 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracking-runs.R
\name{mlflow_delete_run}
\alias{mlflow_delete_run}
\title{Delete a Run}
\usage{
mlflow_delete_run(run_id, client = NULL)
}
\arguments{
\item{run_id}{Run ID.}
\item{client}{(Optional) An `mlflow_client` object.}
}
\description{
Delete a Run
}
\details{
When `client` is not specified, these functions attempt to infer the current active client.
}
\seealso{
Other tracking functions: \code{\link{mlflow_create_experiment}},
\code{\link{mlflow_delete_experiment}},
\code{\link{mlflow_download_artifacts}},
\code{\link{mlflow_end_run}},
\code{\link{mlflow_get_experiment}},
\code{\link{mlflow_get_metric_history}},
\code{\link{mlflow_get_run}},
\code{\link{mlflow_list_artifacts}},
\code{\link{mlflow_list_experiments}},
\code{\link{mlflow_list_run_infos}},
\code{\link{mlflow_log_artifact}},
\code{\link{mlflow_log_batch}},
\code{\link{mlflow_log_metric}},
\code{\link{mlflow_log_param}},
\code{\link{mlflow_rename_experiment}},
\code{\link{mlflow_restore_experiment}},
\code{\link{mlflow_restore_run}},
\code{\link{mlflow_search_runs}},
\code{\link{mlflow_set_tag}},
\code{\link{mlflow_start_run}}
}
\concept{tracking functions}
|
CrossEntropy=function(P,ClAtt){
#this function si for computing cross entropy measure
#the idea is that we compare the partition we got by using the attribute information
#inputs:
#P: this is the propagation matrix we are learning through our task
#Net: This is our NxN adjacency matrix. Note that N will be smaller than the number of nodes
#ClAtt: This is the test set labels under our clustering
#first step is to make sure out P is normalized
# for(i in 1:nrow(P)){
# P[i,]=P[i,]/sum(P[i,])
# }
P=P+0.000001
##the first step is to create a null probability distribution##
AttProbDist=matrix(0,nrow=length(ClAtt),ncol=max(ClAtt))
for(i in 1:length(ClAtt)){
AttProbDist[i,ClAtt[i]]=1
}
cEnt=0
for(j in 1:length(P)){
cEnt=cEnt+(AttProbDist[j]*log(P[j]))
}
cEnt=cEnt*-1
cEnt
} ##end of the cross en
|
/Helper/CrossEntropy.R
|
no_license
|
yingstat/MOAB
|
R
| false | false | 830 |
r
|
CrossEntropy=function(P,ClAtt){
#this function si for computing cross entropy measure
#the idea is that we compare the partition we got by using the attribute information
#inputs:
#P: this is the propagation matrix we are learning through our task
#Net: This is our NxN adjacency matrix. Note that N will be smaller than the number of nodes
#ClAtt: This is the test set labels under our clustering
#first step is to make sure out P is normalized
# for(i in 1:nrow(P)){
# P[i,]=P[i,]/sum(P[i,])
# }
P=P+0.000001
##the first step is to create a null probability distribution##
AttProbDist=matrix(0,nrow=length(ClAtt),ncol=max(ClAtt))
for(i in 1:length(ClAtt)){
AttProbDist[i,ClAtt[i]]=1
}
cEnt=0
for(j in 1:length(P)){
cEnt=cEnt+(AttProbDist[j]*log(P[j]))
}
cEnt=cEnt*-1
cEnt
} ##end of the cross en
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FES0550.R
\name{FES0550}
\alias{FES0550}
\title{Transmission from Thorlabs shortpass filter number FES0550 given wavelength}
\usage{
FES0550(wavelength_nm)
}
\arguments{
\item{wavelength_nm}{incident wavelength in nanometers}
}
\value{
Returns the fractional transmission coefficient of Thorlabs shortpass filter number FES0550
}
\description{
Transmission from Thorlabs shortpass filter number FES0550 given wavelength
}
\examples{
FES0550(570)
}
|
/man/FES0550.Rd
|
no_license
|
tjconstant/thor
|
R
| false | true | 526 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FES0550.R
\name{FES0550}
\alias{FES0550}
\title{Transmission from Thorlabs shortpass filter number FES0550 given wavelength}
\usage{
FES0550(wavelength_nm)
}
\arguments{
\item{wavelength_nm}{incident wavelength in nanometers}
}
\value{
Returns the fractional transmission coefficient of Thorlabs shortpass filter number FES0550
}
\description{
Transmission from Thorlabs shortpass filter number FES0550 given wavelength
}
\examples{
FES0550(570)
}
|
#' Estimation of the Graded Response Model
#' @description Estimate the GRM using the joint or marginal
#' maximum likelihood estimation
#' @name estimate_grm
NULL
#' @rdname estimate_grm
#' @description \code{model_grm_eap} scores response vectors using the EAP method
#' @return \code{model_grm_eap} returns theta estimates and standard errors in a list
#' @examples
#' with(model_grm_gendata(10, 50, 3),
#' cbind(true=t, est=model_grm_eap(u, a, b)$t))
#' @importFrom stats dnorm
#' @export
model_grm_eap <- function(u, a, b, D=1.702, priors=c(0, 1), bounds_t=c(-4, 4)){
quad <- hermite_gauss('11')
quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, priors[1], priors[2])
n_p <- dim(u)[1]
n_i <- dim(u)[2]
n_q <- length(quad$t)
p <- model_grm_prob(quad$t, a, b, D)
ix <- model_polytomous_3dindex(u)
lh <- array(NA, c(n_p, n_i, n_q))
for(q in 1:n_q)
lh[,,q] <- array(p[q,,][ix[,-1]], c(n_p, n_i))
lh <- apply(lh, c(1, 3), prod, na.rm=T)
t <- ((lh / (lh %*% quad$w)[,1]) %*% (quad$w * quad$t))[,1]
t[t < bounds_t[1]] <- bounds_t[1]
t[t > bounds_t[2]] <- bounds_t[2]
t_sd <- ((lh / (lh %*% quad$w)[,1] * outer(t, quad$t, '-')^2) %*% quad$w)[,1]
t_sd <- sqrt(t_sd)
list(t=t, sd=t_sd)
}
#' @rdname estimate_grm
#' @description \code{model_grm_map} scores response vectors using the MAP method
#' @return \code{model_grm_map} returns theta estimates in a list
#' @examples
#' with(model_grm_gendata(10, 50, 3),
#' cbind(true=t, est=model_grm_map(u, a, b)$t))
#' @export
model_grm_map <- function(u, a, b, D=1.702, priors=c(0, 1), bounds_t=c(-4, 4), iter=30, conv=1e-3){
ix <- model_polytomous_3dindex(u)
t <- rnorm(dim(u)[1], 0, .01)
t_free <- rep(T, length(t))
for(m in 1:iter){
dv <- model_grm_dv_jmle(ix, model_grm_dv_Pt(t, a, b, D))
dv$dv1 <- rowSums(dv$dv1, na.rm=T)
dv$dv2 <- rowSums(dv$dv2, na.rm=T)
if(!is.null(priors)){
dv$dv1 <- dv$dv1 - (t - priors[1]) / priors[2]^2
dv$dv2 <- dv$dv2 - 1 / priors[2]^2
}
nr <- nr_iteration(t, t_free, dv, 1.0, 1.0, bounds_t)
t <- nr$param
if(max(abs(nr$h)) < conv) break
}
list(t=t)
}
#' @rdname estimate_grm
#' @keywords internal
model_grm_dv_Pt <- function(t, a, b, D){
n_c <- ncol(b) + 1
p <- model_grm_prob(t, a, b, D, raw=T)
dv1 <- aperm(aperm(p*(1-p), c(2,1,3)) * D * a, c(2,1,3))
dv1 <- dv1[,,1:n_c] - dv1[,,-1]
dv2 <- aperm(aperm(p*(1-p)*(1-2*p), c(2,1,3)) * (D * a)^2, c(2,1,3))
dv2 <- dv2[,,1:n_c] - dv2[,,-1]
p <- p[,,1:n_c] - p[,,-1]
list(p=p, dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @keywords internal
model_grm_dv_Pa <- function(t, a, b, D){
n_c <- ncol(b) + 1
p <- model_grm_prob(t, a, b, D, raw=T)
term0 <- D * outer(t, cbind(0, b, 0), '-')
dv1 <- p * (1-p) * term0
dv1 <- dv1[,,1:n_c] - dv1[,,-1]
dv2 <- p * (1-p) * (1-2*p) * term0^2
dv2 <- dv2[,,1:n_c] - dv2[,,-1]
p <- p[,,1:n_c] - p[,,-1]
list(p=p, dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @keywords internal
model_grm_dv_Pb <- function(t, a, b, D){
n_p <- length(t)
n_i <- nrow(b)
n_c <- ncol(b) + 1
p <- model_grm_prob(t, a, b, D, raw=T)
dv1 <- dv2 <- array(0, c(n_p, n_i, n_c, n_c-1))
for(k in 1:(n_c-1)){
term0 <- t(t(p[,,k+1]*(1-p[,,k+1])) * (-D * a))
dv1[,,k,k] <- -1 * term0
dv1[,,k+1,k] <- term0
term1<- t(t(p[,,k+1]*(1-p[,,k+1])*(1-2*p[,,k+1])) * (D*a)^2)
dv2[,,k,k] <- -1 * term1
dv2[,,k+1,k] <- term1
}
p <- p[,,1:n_c] - p[,,-1]
list(p=p, dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @param u_ix the 3d indices
#' @param dvp the derivatives of P
#' @keywords internal
model_grm_dv_jmle <- function(u_ix, dvp){
n_p <- max(u_ix[,1])
n_i <- max(u_ix[,2])
dv1 <- array(with(dvp, dv1[u_ix]/p[u_ix]), c(n_p, n_i))
dv2 <- array(with(dvp, dv2[u_ix]/p[u_ix]), c(n_p, n_i)) - dv1^2
list(dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @description \code{model_grm_jmle} estimates the parameters using the
#' joint maximum likelihood estimation (JMLE) method
#' @param u the observed response matrix, 2d matrix
#' @param t ability parameters, 1d vector (fixed value) or NA (freely estimate)
#' @param a discrimination parameters, 1d vector (fixed value) or NA (freely estimate)
#' @param b difficulty parameters, 2d matrix (fixed value) or NA (freely estimate)
#' @param D the scaling constant, 1.702 by default
#' @param iter the maximum iterations
#' @param conv the convergence criterion for the -2 log-likelihood
#' @param nr_iter the maximum newton-raphson iterations, default=10
#' @param scale the scale of theta parameters
#' @param bounds_t bounds of ability parameters
#' @param bounds_a bounds of discrimination parameters
#' @param bounds_b bounds of location parameters
#' @param priors a list of prior distributions
#' @param decay decay rate
#' @param verbose TRUE to print debuggin information
#' @param true_params a list of true parameters for evaluating the estimation accuracy
#' @return \code{model_grm_jmle} returns estimated t, a, b parameters in a list
#' @examples
#' \donttest{
#' # generate data
#' x <- model_grm_gendata(1000, 40, 3)
#' # free calibration, 40 iterations
#' y <- model_grm_jmle(x$u, true_params=x, iter=40, verbose=TRUE)
#' }
#' @importFrom stats cor
#' @importFrom reshape2 melt
#' @import ggplot2
#' @export
model_grm_jmle <- function(u, t=NA, a=NA, b=NA, D=1.702, iter=100, nr_iter=10, conv=1e-3, scale=c(0, 1), bounds_t=c(-4, 4), bounds_a=c(.01, 2.5), bounds_b=c(-4, 4), priors=list(t=c(0, 1)), decay=1, verbose=FALSE, true_params=NULL){
# configuration
h_max <- 1.0
tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter))
# initial values
n_p <- dim(u)[1]
n_i <- dim(u)[2]
n_c <- max(u, na.rm=T) + 1
u_ix <- model_polytomous_3dindex(u)
if(length(t) == 1) t <- rep(t, n_p)
t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01)
if(length(a) == 1) a <- rep(a, n_i)
a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01)
if(length(b) == 1) b <- array(b, c(n_i, n_c-1))
b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .01)
b <- t(apply(b, 1, sort))
# estimate parameters
for (k in 1:iter){
# max change in parameters
max_absh <- 0
# t parameters
if(any(t_free)){
for(j in 1:nr_iter){
dv_t <- model_grm_dv_jmle(u_ix, model_grm_dv_Pt(t, a, b, D))
dv_t$dv1 <- rowSums(dv_t$dv1, na.rm=T)
dv_t$dv2 <- rowSums(dv_t$dv2, na.rm=T)
if(!is.null(priors$t)){
dv_t$dv1 <- dv_t$dv1 - (t - priors$t[1]) / priors$t[2]^2
dv_t$dv2 <- dv_t$dv2 - 1 / priors$t[2]^2
}
nr_t <- nr_iteration(t, t_free, dv_t, h_max, decay, bounds_t)
t <- nr_t$param
if(max(abs(nr_t$h[t_free])) < conv) break
}
max_absh <- max(max_absh, abs(nr_t$h[t_free]))
# rescale theta
if(!is.null(scale)) t <- (t - mean(t)) / sd(t) * scale[2] + scale[1]
}
# b parameters
if(any(b_free)){
for(j in 1:nr_iter){
dv_b <- model_grm_dv_Pb(t, a, b, D)
dv_bh <- array(0, c(n_i, n_c-1))
for(m in 1:(n_c-1)){
dv <- model_grm_dv_jmle(u_ix, with(dv_b, list(p=p, dv1=dv1[,,,m], dv2=dv2[,,,m])))
dv$dv1 <- colSums(dv$dv1, na.rm=T)
dv$dv2 <- colSums(dv$dv2, na.rm=T)
if(!is.null(priors$b)){
dv$dv1 <- dv$dv1 - (b[,m] - priors$b[1]) / priors$b[2]^2
dv$dv2 <- dv$dv2 - 1 / priors$b[2]^2
}
nr <- nr_iteration(b[,m], b_free[,m], dv, h_max, decay, bounds_b)
b[,m] <- nr$param
dv_bh[,m] <- nr$h
}
b <- t(apply(b, 1, sort))
if(max(abs(dv_bh[b_free])) < conv) break
}
max_absh <- max(max_absh, abs(dv_bh[b_free]))
}
# a parameters
if(any(a_free)){
for(j in 1:nr_iter){
dv_a <- model_grm_dv_jmle(u_ix, model_grm_dv_Pa(t, a, b, D))
dv_a$dv1 <- colSums(dv_a$dv1, na.rm=T)
dv_a$dv2 <- colSums(dv_a$dv2, na.rm=T)
if(!is.null(priors$a)){
dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2)
dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2))
}
nr_a <- nr_iteration(a, a_free, dv_a, h_max, decay, bounds_a)
a <- nr_a$param
if(max(abs(nr_a$h[a_free])) < conv) break
}
max_absh <- max(max_absh, abs(nr_a$h[a_free]))
}
decay <- decay * decay
# model fit
if(verbose) {
loglh <- -2 * sum(model_grm_lh(u, t, a, b, D, log=T), na.rm=T)
cat('iter #', k, ': -2 log-likelihood = ', round(loglh, 2), ', max_change = ', round(max_absh, 4), '\n', sep='')
tracking$fit[k] <- loglh
if(any(t_free)) tracking$t[k] <- mean(abs(nr_t$h[t_free]))
if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free]))
if(any(b_free)) tracking$b[k] <- mean(abs(dv_bh[b_free]))
}
if(max_absh < conv)
break
}
# debugging
if(verbose)
estimate_grm_debug(tracking, k)
# compare with true parameters
if(!is.null(true_params))
estimate_grm_eval(true_params, n_c, t, a, b, t_free, a_free, b_free)
list(t=t, a=a, b=b)
}
#' @rdname estimate_grm
#' @keywords internal
model_grm_dv_mmle <- function(u_ix, quad, pdv){
n_p <- max(u_ix[,1])
n_i <- max(u_ix[,2])
n_q <- length(quad$t)
p0 <- array(NA, c(n_p, n_i, n_q))
for(q in 1:n_q)
p0[,,q] <- array(pdv$p[q,,][u_ix[,-1]], c(n_p, n_i))
p1 <- apply(p0, c(1, 3), prod, na.rm=T)
p2 <- (p1 %*% quad$w)[,1]
dv1 <- dv2 <- array(0, c(n_p, n_i))
dv_common <- t(t(p1 / p2) * quad$w)
for(q in 1:n_q) {
dv1 <- dv1 + dv_common[,q] / p0[,,q] * array(pdv$dv1[q,,][u_ix[,-1]], c(n_p, n_i))
dv2 <- dv2 + dv_common[,q] / p0[,,q] * array(pdv$dv2[q,,][u_ix[,-1]], c(n_p, n_i))
}
dv2 <- dv2 - dv1^2
list(dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @description \code{model_grm_mmle} estimates the parameters using the
#' marginal maximum likelihood estimation (MMLE) method
#' @param quad_degree the number of quadrature points
#' @param score_fn the scoring method: 'eap' or 'map'
#' @return \code{model_grm_mmle} returns estimated t, a, b parameters in a list
#' @examples
#' \donttest{
#' # generate data
#' x <- model_grm_gendata(1000, 40, 3)
#' # free estimation, 40 iterations
#' y <- model_grm_mmle(x$u, true_params=x, iter=40, verbose=TRUE)
#' }
#' @importFrom stats cor
#' @importFrom reshape2 melt
#' @import ggplot2
#' @export
model_grm_mmle <- function(u, t=NA, a=NA, b=NA, d=NA, D=1.702, iter=100, nr_iter=10, conv=1e-3, bounds_t=c(-4, 4), bounds_a=c(.01, 2.5), bounds_b=c(-4, 4), priors=list(t=c(0, 1)), decay=1, quad_degree='11', score_fn=c('eap', 'map'), verbose=FALSE, true_params=NULL){
# configuration
h_max <- 1.0
score_fn <- switch(match.arg(score_fn, score_fn), 'eap'=model_grm_eap, 'map'=model_grm_map)
if(is.null(priors$t)) priors$t <- c(0, 1)
quad <- hermite_gauss(quad_degree)
quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, priors$t[1], priors$t[2])
tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter), d=rep(NA, iter))
# initial values
n_p <- dim(u)[1]
n_i <- dim(u)[2]
n_c <- max(u, na.rm=T) + 1
u_ix <- model_polytomous_3dindex(u)
if(length(t) == 1) t <- rep(t, n_p)
t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01)
if(length(a) == 1) a <- rep(a, n_i)
a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01)
if(length(b) == 1) b <- array(b, c(n_i, n_c-1))
b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .1)
b <- t(apply(b, 1, sort))
# estimate parameters
for (k in 1:iter){
# max change in parameters
max_absh <- 0
# b parameters
if(any(b_free)){
for(j in 1:nr_iter){
dv_b <- model_grm_dv_Pb(quad$t, a, b, D)
dv_bh <- array(0, c(n_i, n_c-1))
for(m in 1:(n_c-1)){
dv <- model_grm_dv_mmle(u_ix, quad, with(dv_b, list(p=p, dv1=dv1[,,,m], dv2=dv2[,,,m])))
dv$dv1 <- colSums(dv$dv1, na.rm=T)
dv$dv2 <- colSums(dv$dv2, na.rm=T)
if(!is.null(priors$b)){
dv$dv1 <- dv$dv1 - (b[,m] - priors$b[1]) / priors$b[2]^2
dv$dv2 <- dv$dv2 - 1 / priors$b[2]^2
}
nr <- nr_iteration(b[,m], b_free[,m], dv, h_max, decay, bounds_b)
b[,m] <- nr$param
dv_bh[,m] <- nr$h
}
b <- t(apply(b, 1, sort))
if(max(abs(dv_bh)) < conv) break
}
max_absh <- max(max_absh, abs(dv_bh[b_free]))
}
# a parameters
if(any(a_free)){
for(j in 1:nr_iter){
dv_a <- model_grm_dv_mmle(u_ix, quad, model_grm_dv_Pa(quad$t, a, b, D))
dv_a$dv1 <- colSums(dv_a$dv1, na.rm=T)
dv_a$dv2 <- colSums(dv_a$dv2, na.rm=T)
if(!is.null(priors$a)){
dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2)
dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2))
}
nr_a <- nr_iteration(a, a_free, dv_a, h_max, decay, bounds_a)
a <- nr_a$param
if(max(abs(nr_a$h[a_free])) < conv) break
}
max_absh <- max(max_absh, abs(nr_a$h[a_free]))
}
# scoring
if(any(t_free))
t[t_free] <- score_fn(u, a, b, D, priors=priors$t, bounds_t=bounds_t)$t[t_free]
decay <- decay * decay
# model fit
if(verbose) {
loglh <- -2 * sum(model_grm_lh(u, t, a, b, D, log=T), na.rm=T)
cat('iter #', k, ': -2 log-likelihood = ', round(loglh, 2), ', max_change = ', round(max_absh, 4), '\n', sep='')
tracking$fit[k] <- loglh
if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free]))
if(any(b_free)) tracking$d[k] <- mean(abs(dv_bh[b_free]))
}
if(max_absh < conv)
break
}
# debugging
if(verbose)
estimate_grm_debug(tracking, k)
# compare with true parameters
if(!is.null(true_params))
estimate_grm_eval(true_params, n_c, t, a, b, t_free, a_free, b_free)
list(t=t, a=a, b=b)
}
#' @rdname estimate_grm
#' @param index the indices of items being plotted
#' @param intervals intervals on the x-axis
#' @return \code{model_grm_fitplot} returns a \code{ggplot} object
#' @examples
#' with(model_grm_gendata(1000, 20, 3),
#' model_grm_fitplot(u, t, a, b, index=c(1, 3, 5)))
#' @importFrom reshape2 melt
#' @import ggplot2
#' @export
model_grm_fitplot <- function(u, t, a, b, D=1.702, index=NULL, intervals=seq(-3, 3, .5)){
if(is.null(index)) index <- seq(b)
groups <- cut(t, intervals, labels=(intervals[-length(intervals)] + intervals[-1]) / 2)
obs <- aggregate(u, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)]
obs <- melt(obs, id.vars='intervals', variable.name='items')
obs[, 'type'] <- 'Observed'
p <- model_grm_prob(t, a, b, D)
p <- apply(p, 1:2, function(x) sum(x * (seq(x)-1), na.rm=T))
exp <- aggregate(p, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)]
exp <- melt(exp, id.vars='intervals', variable.name='items')
exp[, 'type'] <- 'Expected'
data <- rbind(obs, exp)
data$intervals <- as.numeric(levels(data$intervals)[data$intervals])
levels(data$items) <- gsub('V', 'Item ', levels(data$items))
ggplot(data, aes_string('intervals', 'value', color='type', group='type')) +
geom_line() + facet_wrap(~items) + xlab(expression(theta)) + ylab('Probability') +
scale_color_discrete(guide=guide_legend("")) + theme_bw()
}
|
/R/estimate_grm.R
|
no_license
|
xluo11/Rirt
|
R
| false | false | 15,411 |
r
|
#' Estimation of the Graded Response Model
#' @description Estimate the GRM using the joint or marginal
#' maximum likelihood estimation
#' @name estimate_grm
NULL
#' @rdname estimate_grm
#' @description \code{model_grm_eap} scores response vectors using the EAP method
#' @return \code{model_grm_eap} returns theta estimates and standard errors in a list
#' @examples
#' with(model_grm_gendata(10, 50, 3),
#' cbind(true=t, est=model_grm_eap(u, a, b)$t))
#' @importFrom stats dnorm
#' @export
model_grm_eap <- function(u, a, b, D=1.702, priors=c(0, 1), bounds_t=c(-4, 4)){
quad <- hermite_gauss('11')
quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, priors[1], priors[2])
n_p <- dim(u)[1]
n_i <- dim(u)[2]
n_q <- length(quad$t)
p <- model_grm_prob(quad$t, a, b, D)
ix <- model_polytomous_3dindex(u)
lh <- array(NA, c(n_p, n_i, n_q))
for(q in 1:n_q)
lh[,,q] <- array(p[q,,][ix[,-1]], c(n_p, n_i))
lh <- apply(lh, c(1, 3), prod, na.rm=T)
t <- ((lh / (lh %*% quad$w)[,1]) %*% (quad$w * quad$t))[,1]
t[t < bounds_t[1]] <- bounds_t[1]
t[t > bounds_t[2]] <- bounds_t[2]
t_sd <- ((lh / (lh %*% quad$w)[,1] * outer(t, quad$t, '-')^2) %*% quad$w)[,1]
t_sd <- sqrt(t_sd)
list(t=t, sd=t_sd)
}
#' @rdname estimate_grm
#' @description \code{model_grm_map} scores response vectors using the MAP method
#' @return \code{model_grm_map} returns theta estimates in a list
#' @examples
#' with(model_grm_gendata(10, 50, 3),
#' cbind(true=t, est=model_grm_map(u, a, b)$t))
#' @export
model_grm_map <- function(u, a, b, D=1.702, priors=c(0, 1), bounds_t=c(-4, 4), iter=30, conv=1e-3){
ix <- model_polytomous_3dindex(u)
t <- rnorm(dim(u)[1], 0, .01)
t_free <- rep(T, length(t))
for(m in 1:iter){
dv <- model_grm_dv_jmle(ix, model_grm_dv_Pt(t, a, b, D))
dv$dv1 <- rowSums(dv$dv1, na.rm=T)
dv$dv2 <- rowSums(dv$dv2, na.rm=T)
if(!is.null(priors)){
dv$dv1 <- dv$dv1 - (t - priors[1]) / priors[2]^2
dv$dv2 <- dv$dv2 - 1 / priors[2]^2
}
nr <- nr_iteration(t, t_free, dv, 1.0, 1.0, bounds_t)
t <- nr$param
if(max(abs(nr$h)) < conv) break
}
list(t=t)
}
#' @rdname estimate_grm
#' @keywords internal
model_grm_dv_Pt <- function(t, a, b, D){
n_c <- ncol(b) + 1
p <- model_grm_prob(t, a, b, D, raw=T)
dv1 <- aperm(aperm(p*(1-p), c(2,1,3)) * D * a, c(2,1,3))
dv1 <- dv1[,,1:n_c] - dv1[,,-1]
dv2 <- aperm(aperm(p*(1-p)*(1-2*p), c(2,1,3)) * (D * a)^2, c(2,1,3))
dv2 <- dv2[,,1:n_c] - dv2[,,-1]
p <- p[,,1:n_c] - p[,,-1]
list(p=p, dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @keywords internal
model_grm_dv_Pa <- function(t, a, b, D){
n_c <- ncol(b) + 1
p <- model_grm_prob(t, a, b, D, raw=T)
term0 <- D * outer(t, cbind(0, b, 0), '-')
dv1 <- p * (1-p) * term0
dv1 <- dv1[,,1:n_c] - dv1[,,-1]
dv2 <- p * (1-p) * (1-2*p) * term0^2
dv2 <- dv2[,,1:n_c] - dv2[,,-1]
p <- p[,,1:n_c] - p[,,-1]
list(p=p, dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @keywords internal
model_grm_dv_Pb <- function(t, a, b, D){
n_p <- length(t)
n_i <- nrow(b)
n_c <- ncol(b) + 1
p <- model_grm_prob(t, a, b, D, raw=T)
dv1 <- dv2 <- array(0, c(n_p, n_i, n_c, n_c-1))
for(k in 1:(n_c-1)){
term0 <- t(t(p[,,k+1]*(1-p[,,k+1])) * (-D * a))
dv1[,,k,k] <- -1 * term0
dv1[,,k+1,k] <- term0
term1<- t(t(p[,,k+1]*(1-p[,,k+1])*(1-2*p[,,k+1])) * (D*a)^2)
dv2[,,k,k] <- -1 * term1
dv2[,,k+1,k] <- term1
}
p <- p[,,1:n_c] - p[,,-1]
list(p=p, dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @param u_ix the 3d indices
#' @param dvp the derivatives of P
#' @keywords internal
model_grm_dv_jmle <- function(u_ix, dvp){
n_p <- max(u_ix[,1])
n_i <- max(u_ix[,2])
dv1 <- array(with(dvp, dv1[u_ix]/p[u_ix]), c(n_p, n_i))
dv2 <- array(with(dvp, dv2[u_ix]/p[u_ix]), c(n_p, n_i)) - dv1^2
list(dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @description \code{model_grm_jmle} estimates the parameters using the
#' joint maximum likelihood estimation (JMLE) method
#' @param u the observed response matrix, 2d matrix
#' @param t ability parameters, 1d vector (fixed value) or NA (freely estimate)
#' @param a discrimination parameters, 1d vector (fixed value) or NA (freely estimate)
#' @param b difficulty parameters, 2d matrix (fixed value) or NA (freely estimate)
#' @param D the scaling constant, 1.702 by default
#' @param iter the maximum iterations
#' @param conv the convergence criterion for the -2 log-likelihood
#' @param nr_iter the maximum newton-raphson iterations, default=10
#' @param scale the scale of theta parameters
#' @param bounds_t bounds of ability parameters
#' @param bounds_a bounds of discrimination parameters
#' @param bounds_b bounds of location parameters
#' @param priors a list of prior distributions
#' @param decay decay rate
#' @param verbose TRUE to print debuggin information
#' @param true_params a list of true parameters for evaluating the estimation accuracy
#' @return \code{model_grm_jmle} returns estimated t, a, b parameters in a list
#' @examples
#' \donttest{
#' # generate data
#' x <- model_grm_gendata(1000, 40, 3)
#' # free calibration, 40 iterations
#' y <- model_grm_jmle(x$u, true_params=x, iter=40, verbose=TRUE)
#' }
#' @importFrom stats cor
#' @importFrom reshape2 melt
#' @import ggplot2
#' @export
model_grm_jmle <- function(u, t=NA, a=NA, b=NA, D=1.702, iter=100, nr_iter=10, conv=1e-3, scale=c(0, 1), bounds_t=c(-4, 4), bounds_a=c(.01, 2.5), bounds_b=c(-4, 4), priors=list(t=c(0, 1)), decay=1, verbose=FALSE, true_params=NULL){
# configuration
h_max <- 1.0
tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter))
# initial values
n_p <- dim(u)[1]
n_i <- dim(u)[2]
n_c <- max(u, na.rm=T) + 1
u_ix <- model_polytomous_3dindex(u)
if(length(t) == 1) t <- rep(t, n_p)
t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01)
if(length(a) == 1) a <- rep(a, n_i)
a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01)
if(length(b) == 1) b <- array(b, c(n_i, n_c-1))
b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .01)
b <- t(apply(b, 1, sort))
# estimate parameters
for (k in 1:iter){
# max change in parameters
max_absh <- 0
# t parameters
if(any(t_free)){
for(j in 1:nr_iter){
dv_t <- model_grm_dv_jmle(u_ix, model_grm_dv_Pt(t, a, b, D))
dv_t$dv1 <- rowSums(dv_t$dv1, na.rm=T)
dv_t$dv2 <- rowSums(dv_t$dv2, na.rm=T)
if(!is.null(priors$t)){
dv_t$dv1 <- dv_t$dv1 - (t - priors$t[1]) / priors$t[2]^2
dv_t$dv2 <- dv_t$dv2 - 1 / priors$t[2]^2
}
nr_t <- nr_iteration(t, t_free, dv_t, h_max, decay, bounds_t)
t <- nr_t$param
if(max(abs(nr_t$h[t_free])) < conv) break
}
max_absh <- max(max_absh, abs(nr_t$h[t_free]))
# rescale theta
if(!is.null(scale)) t <- (t - mean(t)) / sd(t) * scale[2] + scale[1]
}
# b parameters
if(any(b_free)){
for(j in 1:nr_iter){
dv_b <- model_grm_dv_Pb(t, a, b, D)
dv_bh <- array(0, c(n_i, n_c-1))
for(m in 1:(n_c-1)){
dv <- model_grm_dv_jmle(u_ix, with(dv_b, list(p=p, dv1=dv1[,,,m], dv2=dv2[,,,m])))
dv$dv1 <- colSums(dv$dv1, na.rm=T)
dv$dv2 <- colSums(dv$dv2, na.rm=T)
if(!is.null(priors$b)){
dv$dv1 <- dv$dv1 - (b[,m] - priors$b[1]) / priors$b[2]^2
dv$dv2 <- dv$dv2 - 1 / priors$b[2]^2
}
nr <- nr_iteration(b[,m], b_free[,m], dv, h_max, decay, bounds_b)
b[,m] <- nr$param
dv_bh[,m] <- nr$h
}
b <- t(apply(b, 1, sort))
if(max(abs(dv_bh[b_free])) < conv) break
}
max_absh <- max(max_absh, abs(dv_bh[b_free]))
}
# a parameters
if(any(a_free)){
for(j in 1:nr_iter){
dv_a <- model_grm_dv_jmle(u_ix, model_grm_dv_Pa(t, a, b, D))
dv_a$dv1 <- colSums(dv_a$dv1, na.rm=T)
dv_a$dv2 <- colSums(dv_a$dv2, na.rm=T)
if(!is.null(priors$a)){
dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2)
dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2))
}
nr_a <- nr_iteration(a, a_free, dv_a, h_max, decay, bounds_a)
a <- nr_a$param
if(max(abs(nr_a$h[a_free])) < conv) break
}
max_absh <- max(max_absh, abs(nr_a$h[a_free]))
}
decay <- decay * decay
# model fit
if(verbose) {
loglh <- -2 * sum(model_grm_lh(u, t, a, b, D, log=T), na.rm=T)
cat('iter #', k, ': -2 log-likelihood = ', round(loglh, 2), ', max_change = ', round(max_absh, 4), '\n', sep='')
tracking$fit[k] <- loglh
if(any(t_free)) tracking$t[k] <- mean(abs(nr_t$h[t_free]))
if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free]))
if(any(b_free)) tracking$b[k] <- mean(abs(dv_bh[b_free]))
}
if(max_absh < conv)
break
}
# debugging
if(verbose)
estimate_grm_debug(tracking, k)
# compare with true parameters
if(!is.null(true_params))
estimate_grm_eval(true_params, n_c, t, a, b, t_free, a_free, b_free)
list(t=t, a=a, b=b)
}
#' @rdname estimate_grm
#' @keywords internal
model_grm_dv_mmle <- function(u_ix, quad, pdv){
n_p <- max(u_ix[,1])
n_i <- max(u_ix[,2])
n_q <- length(quad$t)
p0 <- array(NA, c(n_p, n_i, n_q))
for(q in 1:n_q)
p0[,,q] <- array(pdv$p[q,,][u_ix[,-1]], c(n_p, n_i))
p1 <- apply(p0, c(1, 3), prod, na.rm=T)
p2 <- (p1 %*% quad$w)[,1]
dv1 <- dv2 <- array(0, c(n_p, n_i))
dv_common <- t(t(p1 / p2) * quad$w)
for(q in 1:n_q) {
dv1 <- dv1 + dv_common[,q] / p0[,,q] * array(pdv$dv1[q,,][u_ix[,-1]], c(n_p, n_i))
dv2 <- dv2 + dv_common[,q] / p0[,,q] * array(pdv$dv2[q,,][u_ix[,-1]], c(n_p, n_i))
}
dv2 <- dv2 - dv1^2
list(dv1=dv1, dv2=dv2)
}
#' @rdname estimate_grm
#' @description \code{model_grm_mmle} estimates the parameters using the
#' marginal maximum likelihood estimation (MMLE) method
#' @param quad_degree the number of quadrature points
#' @param score_fn the scoring method: 'eap' or 'map'
#' @return \code{model_grm_mmle} returns estimated t, a, b parameters in a list
#' @examples
#' \donttest{
#' # generate data
#' x <- model_grm_gendata(1000, 40, 3)
#' # free estimation, 40 iterations
#' y <- model_grm_mmle(x$u, true_params=x, iter=40, verbose=TRUE)
#' }
#' @importFrom stats cor
#' @importFrom reshape2 melt
#' @import ggplot2
#' @export
model_grm_mmle <- function(u, t=NA, a=NA, b=NA, d=NA, D=1.702, iter=100, nr_iter=10, conv=1e-3, bounds_t=c(-4, 4), bounds_a=c(.01, 2.5), bounds_b=c(-4, 4), priors=list(t=c(0, 1)), decay=1, quad_degree='11', score_fn=c('eap', 'map'), verbose=FALSE, true_params=NULL){
# configuration
h_max <- 1.0
score_fn <- switch(match.arg(score_fn, score_fn), 'eap'=model_grm_eap, 'map'=model_grm_map)
if(is.null(priors$t)) priors$t <- c(0, 1)
quad <- hermite_gauss(quad_degree)
quad$w <- quad$w * exp(quad$t^2) * dnorm(quad$t, priors$t[1], priors$t[2])
tracking <- list(fit=rep(NA, iter), t=rep(NA, iter), a=rep(NA, iter), b=rep(NA, iter), d=rep(NA, iter))
# initial values
n_p <- dim(u)[1]
n_i <- dim(u)[2]
n_c <- max(u, na.rm=T) + 1
u_ix <- model_polytomous_3dindex(u)
if(length(t) == 1) t <- rep(t, n_p)
t[t_free <- is.na(t)] <- rnorm(sum(is.na(t)), 0, .01)
if(length(a) == 1) a <- rep(a, n_i)
a[a_free <- is.na(a)] <- rlnorm(sum(is.na(a)), -.1, .01)
if(length(b) == 1) b <- array(b, c(n_i, n_c-1))
b[b_free <- is.na(b)] <- rnorm(sum(is.na(b)), 0, .1)
b <- t(apply(b, 1, sort))
# estimate parameters
for (k in 1:iter){
# max change in parameters
max_absh <- 0
# b parameters
if(any(b_free)){
for(j in 1:nr_iter){
dv_b <- model_grm_dv_Pb(quad$t, a, b, D)
dv_bh <- array(0, c(n_i, n_c-1))
for(m in 1:(n_c-1)){
dv <- model_grm_dv_mmle(u_ix, quad, with(dv_b, list(p=p, dv1=dv1[,,,m], dv2=dv2[,,,m])))
dv$dv1 <- colSums(dv$dv1, na.rm=T)
dv$dv2 <- colSums(dv$dv2, na.rm=T)
if(!is.null(priors$b)){
dv$dv1 <- dv$dv1 - (b[,m] - priors$b[1]) / priors$b[2]^2
dv$dv2 <- dv$dv2 - 1 / priors$b[2]^2
}
nr <- nr_iteration(b[,m], b_free[,m], dv, h_max, decay, bounds_b)
b[,m] <- nr$param
dv_bh[,m] <- nr$h
}
b <- t(apply(b, 1, sort))
if(max(abs(dv_bh)) < conv) break
}
max_absh <- max(max_absh, abs(dv_bh[b_free]))
}
# a parameters
if(any(a_free)){
for(j in 1:nr_iter){
dv_a <- model_grm_dv_mmle(u_ix, quad, model_grm_dv_Pa(quad$t, a, b, D))
dv_a$dv1 <- colSums(dv_a$dv1, na.rm=T)
dv_a$dv2 <- colSums(dv_a$dv2, na.rm=T)
if(!is.null(priors$a)){
dv_a$dv1 <- dv_a$dv1 - 1/a * (1 + (log(a)-priors$a[1])/priors$a[2]^2)
dv_a$dv2 <- dv_a$dv2 - 1/a^2 * (1/priors$a[2]^2 - (1 + (log(a)-priors$a[1])/priors$a[2]^2))
}
nr_a <- nr_iteration(a, a_free, dv_a, h_max, decay, bounds_a)
a <- nr_a$param
if(max(abs(nr_a$h[a_free])) < conv) break
}
max_absh <- max(max_absh, abs(nr_a$h[a_free]))
}
# scoring
if(any(t_free))
t[t_free] <- score_fn(u, a, b, D, priors=priors$t, bounds_t=bounds_t)$t[t_free]
decay <- decay * decay
# model fit
if(verbose) {
loglh <- -2 * sum(model_grm_lh(u, t, a, b, D, log=T), na.rm=T)
cat('iter #', k, ': -2 log-likelihood = ', round(loglh, 2), ', max_change = ', round(max_absh, 4), '\n', sep='')
tracking$fit[k] <- loglh
if(any(a_free)) tracking$a[k] <- mean(abs(nr_a$h[a_free]))
if(any(b_free)) tracking$d[k] <- mean(abs(dv_bh[b_free]))
}
if(max_absh < conv)
break
}
# debugging
if(verbose)
estimate_grm_debug(tracking, k)
# compare with true parameters
if(!is.null(true_params))
estimate_grm_eval(true_params, n_c, t, a, b, t_free, a_free, b_free)
list(t=t, a=a, b=b)
}
#' @rdname estimate_grm
#' @param index the indices of items being plotted
#' @param intervals intervals on the x-axis
#' @return \code{model_grm_fitplot} returns a \code{ggplot} object
#' @examples
#' with(model_grm_gendata(1000, 20, 3),
#' model_grm_fitplot(u, t, a, b, index=c(1, 3, 5)))
#' @importFrom reshape2 melt
#' @import ggplot2
#' @export
model_grm_fitplot <- function(u, t, a, b, D=1.702, index=NULL, intervals=seq(-3, 3, .5)){
if(is.null(index)) index <- seq(b)
groups <- cut(t, intervals, labels=(intervals[-length(intervals)] + intervals[-1]) / 2)
obs <- aggregate(u, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)]
obs <- melt(obs, id.vars='intervals', variable.name='items')
obs[, 'type'] <- 'Observed'
p <- model_grm_prob(t, a, b, D)
p <- apply(p, 1:2, function(x) sum(x * (seq(x)-1), na.rm=T))
exp <- aggregate(p, by=list(intervals=groups), mean, na.rm=TRUE)[, c(1, index+1)]
exp <- melt(exp, id.vars='intervals', variable.name='items')
exp[, 'type'] <- 'Expected'
data <- rbind(obs, exp)
data$intervals <- as.numeric(levels(data$intervals)[data$intervals])
levels(data$items) <- gsub('V', 'Item ', levels(data$items))
ggplot(data, aes_string('intervals', 'value', color='type', group='type')) +
geom_line() + facet_wrap(~items) + xlab(expression(theta)) + ylab('Probability') +
scale_color_discrete(guide=guide_legend("")) + theme_bw()
}
|
Version = "linear_mixed_model"
# Simulate data for a linear mixed model with random intercepts:
set.seed(1)
Factor = rep(1:10, each=10)
Z = rnorm(length(unique(Factor)), mean=0, sd=1)
X0 = 0
Y = Z[Factor] + X0 + rnorm( length(Factor), mean=0, sd=1)
# Download CPP file:
setwd(tempdir())
download.file(url="https://raw.githubusercontent.com/James-Thorson/mixed-effects/master/linear_mixed_model/linear_mixed_model.cpp", destfile="linear_mixed_model.cpp", method="auto")
compile(paste0(Version,".cpp"))
# Generate inputs for TMB:
Data = list("n_data"=length(Y), "n_factors"=length(unique(Factor)), "Factor"=Factor-1, "Y"=Y)
Parameters = list("X0"=-10, "log_SD0"=2, "log_SDZ"=2, "Z"=rep(0,Data$n_factor))
Random = c("Z")
# Build TMB object:
dyn.load(dynlib(Version))
Obj = MakeADFun(data=Data, parameters=Parameters, random=Random) #
# Check that TMB is working properly:
Obj$fn(Obj$par)
# This should return 313.4137.
|
/VAST_exploration/TMB_assist.R
|
no_license
|
jlmorano/Reference-R-scripts
|
R
| false | false | 922 |
r
|
Version = "linear_mixed_model"
# Simulate data for a linear mixed model with random intercepts:
set.seed(1)
Factor = rep(1:10, each=10)
Z = rnorm(length(unique(Factor)), mean=0, sd=1)
X0 = 0
Y = Z[Factor] + X0 + rnorm( length(Factor), mean=0, sd=1)
# Download CPP file:
setwd(tempdir())
download.file(url="https://raw.githubusercontent.com/James-Thorson/mixed-effects/master/linear_mixed_model/linear_mixed_model.cpp", destfile="linear_mixed_model.cpp", method="auto")
compile(paste0(Version,".cpp"))
# Generate inputs for TMB:
Data = list("n_data"=length(Y), "n_factors"=length(unique(Factor)), "Factor"=Factor-1, "Y"=Y)
Parameters = list("X0"=-10, "log_SD0"=2, "log_SDZ"=2, "Z"=rep(0,Data$n_factor))
Random = c("Z")
# Build TMB object:
dyn.load(dynlib(Version))
Obj = MakeADFun(data=Data, parameters=Parameters, random=Random) #
# Check that TMB is working properly:
Obj$fn(Obj$par)
# This should return 313.4137.
|
## Prepare plots and tables for report
## Before:
## After:
library(icesTAF)
mkdir("report")
sourceTAF("report_plots.R")
sourceTAF("report_tables.R")
#sourceTAF("report_doc.R")
|
/Misc/WKTAF/example-stock-assessment-start/report.R
|
no_license
|
ices-eg/wg_WGEEL
|
R
| false | false | 182 |
r
|
## Prepare plots and tables for report
## Before:
## After:
library(icesTAF)
mkdir("report")
sourceTAF("report_plots.R")
sourceTAF("report_tables.R")
#sourceTAF("report_doc.R")
|
options(warn=-1)
suppressPackageStartupMessages(library(affy))
opt = read.delim(file="options.mi", sep="\t", header=F)
opt=as.vector(as.matrix(opt))
setwd("Cel Files")
rawdata = ReadAffy()
norm = suppressMessages(expresso(rawdata, bgcorrect.method=opt[1],normalize.method=opt[2],pmcorrect.method=opt[3],summary.method=opt[4]))
e = exprs(norm)
e2 = cbind(rownames(e),e)
e3 = rbind(c("Probeset ID",colnames(e)),e2)
setwd("../")
write(file="norm.txt", paste("### Date: ", Sys.time(),sep=""),append=F)
write(file="norm.txt", paste("### Background Correction Method:",opt[1]),append=T)
write(file="norm.txt", paste("### Normalization Method:",opt[2]),append=T)
write(file="norm.txt", paste("### PM Correction Method:",opt[3]),append=T)
write(file="norm.txt", paste("### Summarization Method:",opt[4]),append=T)
write(file="norm.txt", "",append=T)
write(file="norm.txt", t(e3), ncol=ncol(e3), sep="\t",append=T)
options(warn=0)
|
/RMA_Norm.r
|
no_license
|
muratisbilen/NormalizationTool
|
R
| false | false | 922 |
r
|
options(warn=-1)
suppressPackageStartupMessages(library(affy))
opt = read.delim(file="options.mi", sep="\t", header=F)
opt=as.vector(as.matrix(opt))
setwd("Cel Files")
rawdata = ReadAffy()
norm = suppressMessages(expresso(rawdata, bgcorrect.method=opt[1],normalize.method=opt[2],pmcorrect.method=opt[3],summary.method=opt[4]))
e = exprs(norm)
e2 = cbind(rownames(e),e)
e3 = rbind(c("Probeset ID",colnames(e)),e2)
setwd("../")
write(file="norm.txt", paste("### Date: ", Sys.time(),sep=""),append=F)
write(file="norm.txt", paste("### Background Correction Method:",opt[1]),append=T)
write(file="norm.txt", paste("### Normalization Method:",opt[2]),append=T)
write(file="norm.txt", paste("### PM Correction Method:",opt[3]),append=T)
write(file="norm.txt", paste("### Summarization Method:",opt[4]),append=T)
write(file="norm.txt", "",append=T)
write(file="norm.txt", t(e3), ncol=ncol(e3), sep="\t",append=T)
options(warn=0)
|
#' Map names from ped file (modified to have phylip-compliant names)
#' to the original ped file with full names
#'
#' @param from.ped modified ped
#' @param to.ped original ped
#' @return formatting function
map.names <- function(from.names, to.names) {
return(function(x) {
m <- match(x, from.names)
if (is.na(m)) {
x
}
else {
to.names[m]
}
})
}
#' Draw a radial tree for an ade4 phylog object.
#'
#' Parameter names use the following terminology:
#' * branch: the lines/edges in the tree
#' * leaf: the terminal branches of the tree
#' * node: a circle drawn at the point where a branch splits, or
#' at the end of a leaf ('leaf node')
#' * pointer: a line connecting the end of a leaf branch to its
#' label
#' * center: the middle of the circle
#'
#' @param phylog `ade4 phylog` or character string with file name
#' containing tree
#' @param circle multiplier for the size of the tree (see details)
#' @param show.leaves logical; turn ploting of leaf labels and
#' decorations on/off
#' @param labels.leaves leaf names in the correct order; defaults to
#' the names provided in the tree object
#' @param label.formatter function to format labels
#' @param cex.leaves, fg.text.leaves size and color of leaf labels
#' @param ragged.leaves logical; whether to draw all labels at the
#' same distance from the center (FALSE) or draw each label at the
#' end of its branch (TRUE)
#' @param pad.leaves padding between the end of the branch/pointer
#' and the start of the label text
#' @param show.leaf.nodes logical; whether to draw leaf node circles
#' @param bg.leaf.nodes, pch.leaf.nodes, cex.leaf.nodes color, shape
#' and size of leaf nodes
#' @param number.leaves whether to add a sequential number to each
#' label, for e.g. so you can refer to individual nodes in a figure
#' leged
#' @param cex.leaf.numbers size of the leaf numbers; defaults to
#' `cex.leaves`
#' @param labelptr.show whether to show pointers
#' @param labelptr.col, labelptr.pad color padding space for pointers
#' @param show.nodes whether to draw circles for internal nodes
#' @param labels.nodes node labels; defaults to labels provided in
#' tree object; set to NULL to not draw labels
#' @param pch.nodes, cex.nodes node circle shape and size
#' @param fg.nodes, bg.nodes node colors
#' @param bg.colramp.node instead of specifying `bg.nodes`, use this to
#' specify a color ramp function to derive the node color from the node
#' value (typically a bootstrap value)
#' @param lwd.branch, lwd.branch.default width of branch lines; can be
#' a list to specify different values for each branch (use `lwd.branch.default`
#' if you only want to give different values to a few nodes). Each branch
#' can be given as a two element vector specifying the widths of the left
#' and right branches independently
#' @param col.branch, col.branch.default similar to lwd, but for colors
#' @param col.branch.colramp, weights.branch provide a color ramp function
#' to determine branch colors from weights. Weights default to branch lengths.
#' @param col.center, cex.center color and shape of center node
#' @param legend.nodes, legend.title.nodes whether to show a legend for node
#' colors, and a custom title
#' @param legend.branch, legend.branch.title whether to show a legend for
#' branch weights, and a custom title
#' @param main figure title
#' @param pdf.file, pdf.width, pdf.height save the tree to a PDF file with
#' the given dimensions
#'
#' @details
#' By default, the tree has a radius of 1 on a 4x4 plot. Changing
#' the `circle` parameter changes the tree radius but does not
#' change the plot size, so it takes a bit of trial and error to
#' find the correct device size, `cex.leaves` and other parameters
#' to make a nice looking tree.
radial.phylog <- function (phylog, circle=1,
# leaves
show.leaves=TRUE, labels.leaves=names(phylog$leaves), label.formatter=NULL,
cex.leaves=1, fg.text.leaves='black', ragged.leaves=FALSE, pad.leaves=0,
# leaf nodes
show.leaf.nodes=TRUE, bg.leaf.nodes='black', pch.leaf.nodes=21,
cex.leaf.nodes=1,
# leaf numbering
number.leaves=FALSE, cex.leaf.numbers=cex.leaves,
# pointers
labelptr.show=TRUE, labelptr.col=grey(0.7), labelptr.pad=NULL,
# internal nodes
show.nodes=FALSE, labels.nodes=phylog$nodes, pch.nodes=21, cex.nodes=1,
fg.nodes='black', bg.nodes='white', bg.colramp.nodes=NULL, #adj.nodes=0,
# branches
lwd.branch=1, lwd.branch.default=1, col.branch='black',
col.branch.default='black', col.branch.colramp=NULL, weights.branch=phylog$droot,
# center
col.center='red', cex.center=2,
# node legend
legend.nodes=FALSE, legend.title.nodes="Nodes",
# branch legend
legend.branch=FALSE, legend.title.branch="Branches",
# title
main=NULL,
# device
pdf.file=NULL, pdf.width=7, pdf.height=7) {
if (is.character(phylog)) {
phylog <- newick2phylog(readLines(phylog))
}
if (!inherits(phylog, "phylog"))
stop("Non convenient data")
if (circle < 0)
stop("'circle': non convenient value")
retval <- list(phylog=phylog)
if (!is.null(pdf.file)) {
pdf(pdf.file, pdf.width, pdf.height)
}
leaves.number <- length(phylog$leaves)
leaves.names <- names(phylog$leaves)
if (length(labels.leaves) == 1) {
name.map <- read.table(labels.leaves, sep='=', header=FALSE, colClasses='character', row.names=1)
labels.leaves <- name.map[substr(leaves.names, 2, nchar(leaves.names)), 1]
}
if (length(labels.leaves) != leaves.number) {
labels.leaves <- leaves.names
}
if (!is.null(label.formatter)) {
labels.leaves <- sapply(labels.leaves, label.formatter)
}
nodes.number <- length(phylog$nodes)
nodes.names <- names(phylog$nodes)
if (length(labels.nodes) != nodes.number) {
labels.nodes <- names(phylog$nodes)
}
dis <- phylog$droot
dis <- (dis / max(dis)) * circle
dist.leaves <- dis[leaves.names]
dist.nodes <- dis[nodes.names]
if (nodes.number == 1) {
d.rayon <- circle
}
else {
d.rayon <- circle / (nodes.number - 1)
}
theta <- (2 * pi) / leaves.number
alpha <- theta * (1:leaves.number)
names(alpha) <- leaves.names
x <- dist.leaves * cos(alpha)
y <- dist.leaves * sin(alpha)
ang <- rep(0, length(dist.nodes))
names(ang) <- names(dist.nodes)
ang <- c(alpha, ang)
opar <- par(mar=c(0.1, 0.1, 0.1, 0.1),
oma=c(1 + ifelse(legend.nodes || legend.branch, 1, 0), 1, 1, 1))
on.exit(par(opar))
plot.default(0, 0, type="n", asp=1, xlab="", ylab="", xaxt="n", yaxt="n", xlim=c(-2, 2),
ylim=c(-2, 2), xaxs="i", yaxs="i", frame.plot=FALSE)
if (!is.null(main)) {
title(main, line=-1)
}
if (!is.null(bg.colramp.nodes)) {
if (length(bg.colramp.nodes) == 1) {
bg.colramp.nodes <- brewer.pal(9, bg.colramp.nodes)
}
nodelabs <- as.numeric(labels.nodes)
node.ramp.start <- as.integer(floor(min(nodelabs)))
node.ramp.end <- as.integer(ceiling(max(nodelabs)))
node.ramp <- make.ramp(bg.colramp.nodes, node.ramp.start, node.ramp.end)
bg.nodes <- sapply(nodelabs, node.ramp)
}
if (!is.null(col.branch.colramp)) {
if (length(col.branch.colramp) == 1) {
col.branch.colramp <- brewer.pal(9, col.branch.colramp)
}
branch.ramp.start <- as.integer(floor(min(unlist(weights.branch))))
branch.ramp.end <- as.integer(ceiling(max(unlist(weights.branch))))
branch.ramp <- make.ramp(col.branch.colramp, branch.ramp.start, branch.ramp.end)
col.branch <- lapply(names(phylog$paths), function(x) {
val <- weights.branch[[x]]
if (is.null(val)) {
val <- branch.ramp.start
}
branch.ramp(val)
})
names(col.branch) <- names(phylog$paths)
}
# Draw branches
for (i in 1:length(phylog$parts)) {
w <- phylog$parts[[i]]
but <- names(phylog$parts)[i]
ang[but] <- mean(ang[w])
if (length(col.branch) == 1) {
col <- c(col.branch, col.branch)
}
else {
col <- unlist(col.branch[w])
if (length(col) == 1) {
col <- c(col, col)
}
}
col[is.na(col)] <- col.branch.default
if (length(lwd.branch) == 1) {
lwd <- c(lwd.branch, lwd.branch)
}
else {
lwd <- unlist(lwd.branch[w])
if (length(lwd) == 1) {
lwd <- c(lwd, lwd)
}
}
lwd[is.na(lwd)] <- lwd.branch.default
# perpendicular to radius
b <- range(ang[w])
m <- ((b[2] - b[1]) / 2)
a1.seq <- c(seq(b[1], b[2]-m, by = pi/180), b[2]-m)
a2.seq <- c(seq(b[2]-m, b[2], by = pi/180), b[2])
lines(dis[but] * cos(a1.seq), dis[but] * sin(a1.seq), lwd=lwd[1], col=col[1])
lines(dis[but] * cos(a2.seq), dis[but] * sin(a2.seq), lwd=lwd[2], col=col[2])
# parallel to radius
x1 <- dis[w] * cos(ang[w])
y1 <- dis[w] * sin(ang[w])
x2 <- dis[but] * cos(ang[w])
y2 <- dis[but] * sin(ang[w])
segments(x1, y1, x2, y2, col=col, lwd=lwd)
}
if (is.null(labelptr.pad)) {
labelptr.pad <- d.rayon
}
r.ptr <- ifelse(ragged.leaves, dist.leaves, circle) + labelptr.pad
xptr <- r.ptr * cos(alpha)
yptr <- r.ptr * sin(alpha)
r.leaf <- r.ptr + pad.leaves
xleaf <- r.leaf * cos(alpha)
yleaf <- r.leaf * sin(alpha)
# Draw leaves and leaf labels
if (show.leaves) {
retval$leaves <- list()
for (i in 1:leaves.number) {
drawn <- TRUE
if (is.list(labels.leaves[i])) {
if (is.integer(labels.leaves[[i]][[1]])) {
# This is wrong, but I'm not sure how to get the true point of a plotting symbol
pt.size <- max(convert.size(par("cin"))) * cex.leaf.nodes / 2.75
cols <- unlist(sapply(1:length(labels.leaves[[i]]), function(l)
rep(names(labels.leaves[[i]])[[l]], labels.leaves[[i]][[l]])))
npts <- length(cols)
pt.x <- NULL
pt.y <- NULL
row <- 1
while (npts > 0) {
row.r <- r.leaf + (pt.size * row)
if (length(row.r) > 1) {
row.r <- row.r[i]
}
max.pts <- floor((row.r * theta) / pt.size)
if (max.pts %% 2 == 0) {
max.pts <- max.pts - 1
}
t <- (theta * 0.9) / max.pts
n <- max(min(max.pts, npts), 1)
m <- .alternating(n)
pt.x <- c(pt.x, row.r * cos(alpha[i] + (t * m)))
pt.y <- c(pt.y, row.r * sin(alpha[i] + (t * m)))
npts <- npts - n
row <- row + 1
}
points(pt.x, pt.y, col=cols, pch=20, cex=cex.leaf.nodes)
}
else {
par(srt=alpha[i] * 360/2/pi)
for (col in names(labels.leaves[[i]])) {
lab <- labels.leaves[[i]][[col]]
text(xleaf[i], yleaf[i], lab, adj=0, col=col, cex=par("cex") * cex.leaves)
}
}
}
else if (cex.leaves > 0 && nchar(labels.leaves[i]) > 0) {
par(srt=alpha[i] * 360/2/pi)
col.txt <- ifelse(length(fg.text.leaves) == 1, fg.text.leaves, fg.text.leaves[i])
text(xleaf[i], yleaf[i], labels.leaves[i], adj=0, col=col.txt, cex=par("cex") * cex.leaves)
retval$leaves[[i]] <- c(xleaf[i], yleaf[i])
}
else {
drawn <- FALSE
}
if (drawn && labelptr.show) {
segments(xptr[i], yptr[i], x[i], y[i], col=labelptr.col)
}
if (show.leaf.nodes) {
pch <- ifelse(length(pch.leaf.nodes)==1, pch.leaf.nodes, pch.leaf.nodes[i])
col <- ifelse(length(bg.leaf.nodes)==1, bg.leaf.nodes, bg.leaf.nodes[i])
points(x[i], y[i], pch=pch, bg=col, cex=par("cex") * show.leaves)
}
if (number.leaves) {
par(srt=alpha[i] * 360/2/pi)
text(xptr[i], yptr[i], as.character(i), cex=cex.leaf.numbers)
}
}
}
# Draw center node
points(0, 0, pch=21, cex=par("cex") * cex.center, bg=col.center)
# Draw internal nodes and labels
if (show.nodes) {
delta <- strwidth(as.character(length(dist.nodes)), cex=par("cex") * ifelse(cex.nodes == 0, 0.3, cex.nodes))
for (j in 1:(length(dist.nodes)-1)) {
i <- names(dist.nodes)[j]
#x1 <- (dis[i] - (adj.nodes * circle)) * cos(ang[i])
#y1 <- (dis[i] - (adj.nodes * circle)) * sin(ang[i])
x1 <- dis[i] * cos(ang[i])
y1 <- dis[i] * sin(ang[i])
bg <- ifelse(length(bg.nodes) == 1, bg.nodes, bg.nodes[j])
pch <- ifelse(length(pch.nodes)==1, pch.nodes, pch.nodes[i])
points(x1, y1, pch=pch, bg=bg, cex=par("cex") * show.nodes)
if (cex.nodes > 0) {
par(srt = (ang[i] * 360/2/pi + 90))
fg <- ifelse(length(fg.nodes) == 1, fg.nodes, fg.nodes[j])
text(x1, y1, labels.nodes[j], adj=0.5, cex=par("cex") * cex.nodes, col=fg)
}
}
}
# Draw a legend
draw.node.leg <- !is.null(bg.colramp.nodes) && legend.nodes
draw.branch.leg <- !is.null(col.branch.colramp) && legend.branch
if (draw.node.leg && draw.branch.leg) {
.radial.phylog.legend(bg.colramp.nodes, node.ramp.start, node.ramp.end, legend.title.nodes, 0.1, 0.4)
.radial.phylog.legend(col.branch.colramp, branch.ramp.start, branch.ramp.end, legend.title.branch, 0.6, 0.9)
}
else if (draw.node.leg) {
.radial.phylog.legend(bg.colramp.nodes, node.ramp.start, node.ramp.end, legend.title.nodes)
}
else if (draw.branch.leg) {
.radial.phylog.legend(col.branch.colramp, branch.ramp.start, branch.ramp.end, legend.title.branch)
}
if (!is.null(pdf.file)) {
dev.off()
}
return(invisible(retval))
}
.alternating <- function(n) {
if (n %% 2 == 0) {
x <- (n / 2) - 0.5
}
else {
x <- floor(n / 2)
}
seq(-x, x)
}
.radial.phylog.legend <- function(grad, ramp.start, ramp.end, legend.title, x1=0.3, x2=0.7) {
par(srt=0)
ncol <- length(grad)
y2 <- grconvertY(0, 'nfc', 'user')
par(xpd=NA)
x1 <- grconvertX(x1, 'ndc', 'user')
x2 <- grconvertX(x2, 'ndc', 'user')
x <- seq(x1, x2, (x2 - x1) / ncol)
y1 <- grconvertY(0, 'ndc', 'user')
y2 <- y1 + (0.6 * (y2 - y1))
rect(x[1:(length(x)-1)], y1, x[2:length(x)], y2, col=grad, border=NA)
ymid <- y1 + ((y2 - y1) / 2)
text(x1, ymid, as.character(ramp.start), pos=2, cex=0.8)
text(x2, ymid, as.character(ramp.end), pos=4, cex=0.8)
text(x1 + ((x2 - x1) / 2), y2, legend.title, pos=3, cex=0.8)
}
|
/R/ade4_phylo.R
|
no_license
|
jdidion/fancyplots
|
R
| false | false | 15,574 |
r
|
#' Map names from ped file (modified to have phylip-compliant names)
#' to the original ped file with full names
#'
#' @param from.ped modified ped
#' @param to.ped original ped
#' @return formatting function
map.names <- function(from.names, to.names) {
return(function(x) {
m <- match(x, from.names)
if (is.na(m)) {
x
}
else {
to.names[m]
}
})
}
#' Draw a radial tree for an ade4 phylog object.
#'
#' Parameter names use the following terminology:
#' * branch: the lines/edges in the tree
#' * leaf: the terminal branches of the tree
#' * node: a circle drawn at the point where a branch splits, or
#' at the end of a leaf ('leaf node')
#' * pointer: a line connecting the end of a leaf branch to its
#' label
#' * center: the middle of the circle
#'
#' @param phylog `ade4 phylog` or character string with file name
#' containing tree
#' @param circle multiplier for the size of the tree (see details)
#' @param show.leaves logical; turn ploting of leaf labels and
#' decorations on/off
#' @param labels.leaves leaf names in the correct order; defaults to
#' the names provided in the tree object
#' @param label.formatter function to format labels
#' @param cex.leaves, fg.text.leaves size and color of leaf labels
#' @param ragged.leaves logical; whether to draw all labels at the
#' same distance from the center (FALSE) or draw each label at the
#' end of its branch (TRUE)
#' @param pad.leaves padding between the end of the branch/pointer
#' and the start of the label text
#' @param show.leaf.nodes logical; whether to draw leaf node circles
#' @param bg.leaf.nodes, pch.leaf.nodes, cex.leaf.nodes color, shape
#' and size of leaf nodes
#' @param number.leaves whether to add a sequential number to each
#' label, for e.g. so you can refer to individual nodes in a figure
#' leged
#' @param cex.leaf.numbers size of the leaf numbers; defaults to
#' `cex.leaves`
#' @param labelptr.show whether to show pointers
#' @param labelptr.col, labelptr.pad color padding space for pointers
#' @param show.nodes whether to draw circles for internal nodes
#' @param labels.nodes node labels; defaults to labels provided in
#' tree object; set to NULL to not draw labels
#' @param pch.nodes, cex.nodes node circle shape and size
#' @param fg.nodes, bg.nodes node colors
#' @param bg.colramp.node instead of specifying `bg.nodes`, use this to
#' specify a color ramp function to derive the node color from the node
#' value (typically a bootstrap value)
#' @param lwd.branch, lwd.branch.default width of branch lines; can be
#' a list to specify different values for each branch (use `lwd.branch.default`
#' if you only want to give different values to a few nodes). Each branch
#' can be given as a two element vector specifying the widths of the left
#' and right branches independently
#' @param col.branch, col.branch.default similar to lwd, but for colors
#' @param col.branch.colramp, weights.branch provide a color ramp function
#' to determine branch colors from weights. Weights default to branch lengths.
#' @param col.center, cex.center color and shape of center node
#' @param legend.nodes, legend.title.nodes whether to show a legend for node
#' colors, and a custom title
#' @param legend.branch, legend.branch.title whether to show a legend for
#' branch weights, and a custom title
#' @param main figure title
#' @param pdf.file, pdf.width, pdf.height save the tree to a PDF file with
#' the given dimensions
#'
#' @details
#' By default, the tree has a radius of 1 on a 4x4 plot. Changing
#' the `circle` parameter changes the tree radius but does not
#' change the plot size, so it takes a bit of trial and error to
#' find the correct device size, `cex.leaves` and other parameters
#' to make a nice looking tree.
radial.phylog <- function (phylog, circle=1,
# leaves
show.leaves=TRUE, labels.leaves=names(phylog$leaves), label.formatter=NULL,
cex.leaves=1, fg.text.leaves='black', ragged.leaves=FALSE, pad.leaves=0,
# leaf nodes
show.leaf.nodes=TRUE, bg.leaf.nodes='black', pch.leaf.nodes=21,
cex.leaf.nodes=1,
# leaf numbering
number.leaves=FALSE, cex.leaf.numbers=cex.leaves,
# pointers
labelptr.show=TRUE, labelptr.col=grey(0.7), labelptr.pad=NULL,
# internal nodes
show.nodes=FALSE, labels.nodes=phylog$nodes, pch.nodes=21, cex.nodes=1,
fg.nodes='black', bg.nodes='white', bg.colramp.nodes=NULL, #adj.nodes=0,
# branches
lwd.branch=1, lwd.branch.default=1, col.branch='black',
col.branch.default='black', col.branch.colramp=NULL, weights.branch=phylog$droot,
# center
col.center='red', cex.center=2,
# node legend
legend.nodes=FALSE, legend.title.nodes="Nodes",
# branch legend
legend.branch=FALSE, legend.title.branch="Branches",
# title
main=NULL,
# device
pdf.file=NULL, pdf.width=7, pdf.height=7) {
if (is.character(phylog)) {
phylog <- newick2phylog(readLines(phylog))
}
if (!inherits(phylog, "phylog"))
stop("Non convenient data")
if (circle < 0)
stop("'circle': non convenient value")
retval <- list(phylog=phylog)
if (!is.null(pdf.file)) {
pdf(pdf.file, pdf.width, pdf.height)
}
leaves.number <- length(phylog$leaves)
leaves.names <- names(phylog$leaves)
if (length(labels.leaves) == 1) {
name.map <- read.table(labels.leaves, sep='=', header=FALSE, colClasses='character', row.names=1)
labels.leaves <- name.map[substr(leaves.names, 2, nchar(leaves.names)), 1]
}
if (length(labels.leaves) != leaves.number) {
labels.leaves <- leaves.names
}
if (!is.null(label.formatter)) {
labels.leaves <- sapply(labels.leaves, label.formatter)
}
nodes.number <- length(phylog$nodes)
nodes.names <- names(phylog$nodes)
if (length(labels.nodes) != nodes.number) {
labels.nodes <- names(phylog$nodes)
}
dis <- phylog$droot
dis <- (dis / max(dis)) * circle
dist.leaves <- dis[leaves.names]
dist.nodes <- dis[nodes.names]
if (nodes.number == 1) {
d.rayon <- circle
}
else {
d.rayon <- circle / (nodes.number - 1)
}
theta <- (2 * pi) / leaves.number
alpha <- theta * (1:leaves.number)
names(alpha) <- leaves.names
x <- dist.leaves * cos(alpha)
y <- dist.leaves * sin(alpha)
ang <- rep(0, length(dist.nodes))
names(ang) <- names(dist.nodes)
ang <- c(alpha, ang)
opar <- par(mar=c(0.1, 0.1, 0.1, 0.1),
oma=c(1 + ifelse(legend.nodes || legend.branch, 1, 0), 1, 1, 1))
on.exit(par(opar))
plot.default(0, 0, type="n", asp=1, xlab="", ylab="", xaxt="n", yaxt="n", xlim=c(-2, 2),
ylim=c(-2, 2), xaxs="i", yaxs="i", frame.plot=FALSE)
if (!is.null(main)) {
title(main, line=-1)
}
if (!is.null(bg.colramp.nodes)) {
if (length(bg.colramp.nodes) == 1) {
bg.colramp.nodes <- brewer.pal(9, bg.colramp.nodes)
}
nodelabs <- as.numeric(labels.nodes)
node.ramp.start <- as.integer(floor(min(nodelabs)))
node.ramp.end <- as.integer(ceiling(max(nodelabs)))
node.ramp <- make.ramp(bg.colramp.nodes, node.ramp.start, node.ramp.end)
bg.nodes <- sapply(nodelabs, node.ramp)
}
if (!is.null(col.branch.colramp)) {
if (length(col.branch.colramp) == 1) {
col.branch.colramp <- brewer.pal(9, col.branch.colramp)
}
branch.ramp.start <- as.integer(floor(min(unlist(weights.branch))))
branch.ramp.end <- as.integer(ceiling(max(unlist(weights.branch))))
branch.ramp <- make.ramp(col.branch.colramp, branch.ramp.start, branch.ramp.end)
col.branch <- lapply(names(phylog$paths), function(x) {
val <- weights.branch[[x]]
if (is.null(val)) {
val <- branch.ramp.start
}
branch.ramp(val)
})
names(col.branch) <- names(phylog$paths)
}
# Draw branches
for (i in 1:length(phylog$parts)) {
w <- phylog$parts[[i]]
but <- names(phylog$parts)[i]
ang[but] <- mean(ang[w])
if (length(col.branch) == 1) {
col <- c(col.branch, col.branch)
}
else {
col <- unlist(col.branch[w])
if (length(col) == 1) {
col <- c(col, col)
}
}
col[is.na(col)] <- col.branch.default
if (length(lwd.branch) == 1) {
lwd <- c(lwd.branch, lwd.branch)
}
else {
lwd <- unlist(lwd.branch[w])
if (length(lwd) == 1) {
lwd <- c(lwd, lwd)
}
}
lwd[is.na(lwd)] <- lwd.branch.default
# perpendicular to radius
b <- range(ang[w])
m <- ((b[2] - b[1]) / 2)
a1.seq <- c(seq(b[1], b[2]-m, by = pi/180), b[2]-m)
a2.seq <- c(seq(b[2]-m, b[2], by = pi/180), b[2])
lines(dis[but] * cos(a1.seq), dis[but] * sin(a1.seq), lwd=lwd[1], col=col[1])
lines(dis[but] * cos(a2.seq), dis[but] * sin(a2.seq), lwd=lwd[2], col=col[2])
# parallel to radius
x1 <- dis[w] * cos(ang[w])
y1 <- dis[w] * sin(ang[w])
x2 <- dis[but] * cos(ang[w])
y2 <- dis[but] * sin(ang[w])
segments(x1, y1, x2, y2, col=col, lwd=lwd)
}
if (is.null(labelptr.pad)) {
labelptr.pad <- d.rayon
}
r.ptr <- ifelse(ragged.leaves, dist.leaves, circle) + labelptr.pad
xptr <- r.ptr * cos(alpha)
yptr <- r.ptr * sin(alpha)
r.leaf <- r.ptr + pad.leaves
xleaf <- r.leaf * cos(alpha)
yleaf <- r.leaf * sin(alpha)
# Draw leaves and leaf labels
if (show.leaves) {
retval$leaves <- list()
for (i in 1:leaves.number) {
drawn <- TRUE
if (is.list(labels.leaves[i])) {
if (is.integer(labels.leaves[[i]][[1]])) {
# This is wrong, but I'm not sure how to get the true point of a plotting symbol
pt.size <- max(convert.size(par("cin"))) * cex.leaf.nodes / 2.75
cols <- unlist(sapply(1:length(labels.leaves[[i]]), function(l)
rep(names(labels.leaves[[i]])[[l]], labels.leaves[[i]][[l]])))
npts <- length(cols)
pt.x <- NULL
pt.y <- NULL
row <- 1
while (npts > 0) {
row.r <- r.leaf + (pt.size * row)
if (length(row.r) > 1) {
row.r <- row.r[i]
}
max.pts <- floor((row.r * theta) / pt.size)
if (max.pts %% 2 == 0) {
max.pts <- max.pts - 1
}
t <- (theta * 0.9) / max.pts
n <- max(min(max.pts, npts), 1)
m <- .alternating(n)
pt.x <- c(pt.x, row.r * cos(alpha[i] + (t * m)))
pt.y <- c(pt.y, row.r * sin(alpha[i] + (t * m)))
npts <- npts - n
row <- row + 1
}
points(pt.x, pt.y, col=cols, pch=20, cex=cex.leaf.nodes)
}
else {
par(srt=alpha[i] * 360/2/pi)
for (col in names(labels.leaves[[i]])) {
lab <- labels.leaves[[i]][[col]]
text(xleaf[i], yleaf[i], lab, adj=0, col=col, cex=par("cex") * cex.leaves)
}
}
}
else if (cex.leaves > 0 && nchar(labels.leaves[i]) > 0) {
par(srt=alpha[i] * 360/2/pi)
col.txt <- ifelse(length(fg.text.leaves) == 1, fg.text.leaves, fg.text.leaves[i])
text(xleaf[i], yleaf[i], labels.leaves[i], adj=0, col=col.txt, cex=par("cex") * cex.leaves)
retval$leaves[[i]] <- c(xleaf[i], yleaf[i])
}
else {
drawn <- FALSE
}
if (drawn && labelptr.show) {
segments(xptr[i], yptr[i], x[i], y[i], col=labelptr.col)
}
if (show.leaf.nodes) {
pch <- ifelse(length(pch.leaf.nodes)==1, pch.leaf.nodes, pch.leaf.nodes[i])
col <- ifelse(length(bg.leaf.nodes)==1, bg.leaf.nodes, bg.leaf.nodes[i])
points(x[i], y[i], pch=pch, bg=col, cex=par("cex") * show.leaves)
}
if (number.leaves) {
par(srt=alpha[i] * 360/2/pi)
text(xptr[i], yptr[i], as.character(i), cex=cex.leaf.numbers)
}
}
}
# Draw center node
points(0, 0, pch=21, cex=par("cex") * cex.center, bg=col.center)
# Draw internal nodes and labels
if (show.nodes) {
delta <- strwidth(as.character(length(dist.nodes)), cex=par("cex") * ifelse(cex.nodes == 0, 0.3, cex.nodes))
for (j in 1:(length(dist.nodes)-1)) {
i <- names(dist.nodes)[j]
#x1 <- (dis[i] - (adj.nodes * circle)) * cos(ang[i])
#y1 <- (dis[i] - (adj.nodes * circle)) * sin(ang[i])
x1 <- dis[i] * cos(ang[i])
y1 <- dis[i] * sin(ang[i])
bg <- ifelse(length(bg.nodes) == 1, bg.nodes, bg.nodes[j])
pch <- ifelse(length(pch.nodes)==1, pch.nodes, pch.nodes[i])
points(x1, y1, pch=pch, bg=bg, cex=par("cex") * show.nodes)
if (cex.nodes > 0) {
par(srt = (ang[i] * 360/2/pi + 90))
fg <- ifelse(length(fg.nodes) == 1, fg.nodes, fg.nodes[j])
text(x1, y1, labels.nodes[j], adj=0.5, cex=par("cex") * cex.nodes, col=fg)
}
}
}
# Draw a legend
draw.node.leg <- !is.null(bg.colramp.nodes) && legend.nodes
draw.branch.leg <- !is.null(col.branch.colramp) && legend.branch
if (draw.node.leg && draw.branch.leg) {
.radial.phylog.legend(bg.colramp.nodes, node.ramp.start, node.ramp.end, legend.title.nodes, 0.1, 0.4)
.radial.phylog.legend(col.branch.colramp, branch.ramp.start, branch.ramp.end, legend.title.branch, 0.6, 0.9)
}
else if (draw.node.leg) {
.radial.phylog.legend(bg.colramp.nodes, node.ramp.start, node.ramp.end, legend.title.nodes)
}
else if (draw.branch.leg) {
.radial.phylog.legend(col.branch.colramp, branch.ramp.start, branch.ramp.end, legend.title.branch)
}
if (!is.null(pdf.file)) {
dev.off()
}
return(invisible(retval))
}
.alternating <- function(n) {
if (n %% 2 == 0) {
x <- (n / 2) - 0.5
}
else {
x <- floor(n / 2)
}
seq(-x, x)
}
.radial.phylog.legend <- function(grad, ramp.start, ramp.end, legend.title, x1=0.3, x2=0.7) {
par(srt=0)
ncol <- length(grad)
y2 <- grconvertY(0, 'nfc', 'user')
par(xpd=NA)
x1 <- grconvertX(x1, 'ndc', 'user')
x2 <- grconvertX(x2, 'ndc', 'user')
x <- seq(x1, x2, (x2 - x1) / ncol)
y1 <- grconvertY(0, 'ndc', 'user')
y2 <- y1 + (0.6 * (y2 - y1))
rect(x[1:(length(x)-1)], y1, x[2:length(x)], y2, col=grad, border=NA)
ymid <- y1 + ((y2 - y1) / 2)
text(x1, ymid, as.character(ramp.start), pos=2, cex=0.8)
text(x2, ymid, as.character(ramp.end), pos=4, cex=0.8)
text(x1 + ((x2 - x1) / 2), y2, legend.title, pos=3, cex=0.8)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/faoswsFlagTable.R
\docType{data}
\name{faoswsFlagTable}
\alias{faoswsFlagTable}
\title{Flag table for the new FAO statistical working system}
\description{
The table maps the relationship between the observational status
flag and it's corresponding information weight
}
\keyword{datasets}
|
/man/faoswsFlagTable.Rd
|
no_license
|
mkao006/sws_flag
|
R
| false | true | 368 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/faoswsFlagTable.R
\docType{data}
\name{faoswsFlagTable}
\alias{faoswsFlagTable}
\title{Flag table for the new FAO statistical working system}
\description{
The table maps the relationship between the observational status
flag and it's corresponding information weight
}
\keyword{datasets}
|
### Simulating ironX and DISTANCE sampling
## rm(list = ls())
## library (spatstat)
### Plotting commands below
## Random basic
## Set up equations for the random sine curve walk
f=3 #frequency of random walk sine curve
A=0.25 #amplitude of random walk in km
w=.001 #half-width of sampling walk
yp10<-function(x) A*sin(x*f*pi/10) + w #lines(x,ym10(x),type="l")
ym10<-function(x) A*sin(x*f*pi/10) - w #lines(x,yp10(x),type="l")
yp8<-function(x) A*sin(x*f*pi/3.826834) + w #lines(x,ym8(x),type="l") 7.653669/2
ym8<-function(x) A*sin(x*f*pi/3.82683) - w #lines(x,yp8(x),type="l")
inputs<-list()
inputs[[1]] <- sampledf<-data.frame(random_basic(n=20),
fecal_prev=0.1) #df of samples
inputs[[2]] <- ironX0
inputs[[3]] <- nironXs <- 1 # number of ironXs
inputs[[4]] <- w
simIXRstokes<-function(inputs)
{
nsegs = 8
xvals<-sample(seq(10,55,by=0.01),inputs[[3]]) #x center points of IronXs
yvals<-sample(seq(10,55,by=0.01),inputs[[3]]) #y center points of ironY
angles<-c(-pi/4,-pi/2,-3*pi/4,0,25*pi/180,-65*pi/180,25*pi/180,-65*pi/180)
##angles of line segments to horizontal
patterndf<-data.frame(move_ironXs(inputs[[2]],xvals,yvals,nsegs,angles))
##dataframe of x1y1 and x2y2 line segments
distdf<-data.frame(pointdistanceALL(patterndf,inputs[[1]],nsegs=8)) #measure distances d
alldf<-merge(distdf,patterndf,by=c("fignumber","segnumber"))
origindf<-rotmove(alldf)
insidepts10<-origindf[origindf$segnumber<5 &
abs(origindf$x.prime)<10.001,]
#select rows that have values
#that fall between the curves
#x.prime needs to be less than
#10.001
final10<-subset(insidepts10,insidepts10[,18]>ym10(insidepts10[,17])
& insidepts10[,18]<yp10(insidepts10[,17])) #select x y points
#between the 2 functions
insidepts8<-origindf[origindf$segnumber>4 &
abs(origindf$x.prime)<3.828334 ,]
#x.prime needs to be less than
#7.653669/2 +0.001
final8<-subset(insidepts8,insidepts8[,18]>ym8(insidepts8[,17]) &
insidepts8[,18]<yp8(insidepts8[,17]))
#select xy points between the 2 functions
out<-list(xvals,yvals,patterndf,distdf,alldf,origindf,insidepts10,final10,insidepts8,final8)
#out<-list(patterndf,final10,final8)
out
}
#out[[1]]=xvals
#out[[2]]=yvals
#out[[3]]=patterndf - location of iron cross(es)
#out[[4]]=distdf - distance from each sample point (px,py) to nearest
#iron cross line segment, by fig number and segment number (1-8)
#out[[5]]=alldf - merge of
#patterndf and distdf by fig
#number and segment number
#(nx1,ny1) and (nx2,ny2) and
#(ncxs,ncys) are the vertices
#and center points of
#distributed iron crosses
#out[[6]]=origindf, x.prime
#and y.prime, sample points
#referenced to the origin (0,0), merged into alldf
#out[[7]]=insidepts10 all px and py points converted to x.prime and y.prime that
#associate with segments 10 km in length
#out[[8]]=just the insidepts10 that fall within the sine curves
#out[[9]]=insidepts8 all px and py points coverted to x.prime and
#y.prime that associate with segments 7.6km in length
#out[[10]]=just the insidepts8 that fall within the sine curves
|
/recce1/func-simIXRstokes.r
|
no_license
|
soride/Rprojects
|
R
| false | false | 3,117 |
r
|
### Simulating ironX and DISTANCE sampling
## rm(list = ls())
## library (spatstat)
### Plotting commands below
## Random basic
## Set up equations for the random sine curve walk
f=3 #frequency of random walk sine curve
A=0.25 #amplitude of random walk in km
w=.001 #half-width of sampling walk
yp10<-function(x) A*sin(x*f*pi/10) + w #lines(x,ym10(x),type="l")
ym10<-function(x) A*sin(x*f*pi/10) - w #lines(x,yp10(x),type="l")
yp8<-function(x) A*sin(x*f*pi/3.826834) + w #lines(x,ym8(x),type="l") 7.653669/2
ym8<-function(x) A*sin(x*f*pi/3.82683) - w #lines(x,yp8(x),type="l")
inputs<-list()
inputs[[1]] <- sampledf<-data.frame(random_basic(n=20),
fecal_prev=0.1) #df of samples
inputs[[2]] <- ironX0
inputs[[3]] <- nironXs <- 1 # number of ironXs
inputs[[4]] <- w
simIXRstokes<-function(inputs)
{
nsegs = 8
xvals<-sample(seq(10,55,by=0.01),inputs[[3]]) #x center points of IronXs
yvals<-sample(seq(10,55,by=0.01),inputs[[3]]) #y center points of ironY
angles<-c(-pi/4,-pi/2,-3*pi/4,0,25*pi/180,-65*pi/180,25*pi/180,-65*pi/180)
##angles of line segments to horizontal
patterndf<-data.frame(move_ironXs(inputs[[2]],xvals,yvals,nsegs,angles))
##dataframe of x1y1 and x2y2 line segments
distdf<-data.frame(pointdistanceALL(patterndf,inputs[[1]],nsegs=8)) #measure distances d
alldf<-merge(distdf,patterndf,by=c("fignumber","segnumber"))
origindf<-rotmove(alldf)
insidepts10<-origindf[origindf$segnumber<5 &
abs(origindf$x.prime)<10.001,]
#select rows that have values
#that fall between the curves
#x.prime needs to be less than
#10.001
final10<-subset(insidepts10,insidepts10[,18]>ym10(insidepts10[,17])
& insidepts10[,18]<yp10(insidepts10[,17])) #select x y points
#between the 2 functions
insidepts8<-origindf[origindf$segnumber>4 &
abs(origindf$x.prime)<3.828334 ,]
#x.prime needs to be less than
#7.653669/2 +0.001
final8<-subset(insidepts8,insidepts8[,18]>ym8(insidepts8[,17]) &
insidepts8[,18]<yp8(insidepts8[,17]))
#select xy points between the 2 functions
out<-list(xvals,yvals,patterndf,distdf,alldf,origindf,insidepts10,final10,insidepts8,final8)
#out<-list(patterndf,final10,final8)
out
}
#out[[1]]=xvals
#out[[2]]=yvals
#out[[3]]=patterndf - location of iron cross(es)
#out[[4]]=distdf - distance from each sample point (px,py) to nearest
#iron cross line segment, by fig number and segment number (1-8)
#out[[5]]=alldf - merge of
#patterndf and distdf by fig
#number and segment number
#(nx1,ny1) and (nx2,ny2) and
#(ncxs,ncys) are the vertices
#and center points of
#distributed iron crosses
#out[[6]]=origindf, x.prime
#and y.prime, sample points
#referenced to the origin (0,0), merged into alldf
#out[[7]]=insidepts10 all px and py points converted to x.prime and y.prime that
#associate with segments 10 km in length
#out[[8]]=just the insidepts10 that fall within the sine curves
#out[[9]]=insidepts8 all px and py points coverted to x.prime and
#y.prime that associate with segments 7.6km in length
#out[[10]]=just the insidepts8 that fall within the sine curves
|
matchInventoryRings <- function(trees,rings,extractor="TreeCode",nyears=30,coredOnly=TRUE){
## build tree codes
id.build = function(x){do.call(paste0("to.",extractor),x)}
names(trees) = toupper(names(trees))
tree.ID = id.build(list(SITE=trees$SITE,PLOT=trees$PLOT,SUB=trees$SUB,TAG=trees$TAG))
## build tree ring codes
if(is.list(rings)){
ring.file <- rep(names(rings),times=sapply(rings,ncol))
rings <- combine.rwl(rings)
}
ring.ID <- names(rings)
id.extract = function(x){do.call(paste0("from.",extractor),list(x=x))}
ring.info <- id.extract(ring.ID)
## matching up data sets by tree
mch = match(tree.ID,ring.ID)
cored = apply(!is.na(trees[,grep("DATE_CORE_COLLECT",names(trees))]),1,any)
unmatched = which(cored & is.na(mch))
write.table(tree.ID[unmatched],file="unmatched.txt")
mch[duplicated(mch)] <- NA ## if there's multiple stems, match the first
## combine data into one table
combined = cbind(trees,t(as.matrix(rings))[mch,-(nyears-1):0 + nrow(rings)])
if(coredOnly==TRUE){
combined = combined[!is.na(combined$"2000"),]
}
return(combined)
}
|
/modules/data.land/R/matchInventoryRings.R
|
permissive
|
davidjpmoore/pecan
|
R
| false | false | 1,109 |
r
|
matchInventoryRings <- function(trees,rings,extractor="TreeCode",nyears=30,coredOnly=TRUE){
## build tree codes
id.build = function(x){do.call(paste0("to.",extractor),x)}
names(trees) = toupper(names(trees))
tree.ID = id.build(list(SITE=trees$SITE,PLOT=trees$PLOT,SUB=trees$SUB,TAG=trees$TAG))
## build tree ring codes
if(is.list(rings)){
ring.file <- rep(names(rings),times=sapply(rings,ncol))
rings <- combine.rwl(rings)
}
ring.ID <- names(rings)
id.extract = function(x){do.call(paste0("from.",extractor),list(x=x))}
ring.info <- id.extract(ring.ID)
## matching up data sets by tree
mch = match(tree.ID,ring.ID)
cored = apply(!is.na(trees[,grep("DATE_CORE_COLLECT",names(trees))]),1,any)
unmatched = which(cored & is.na(mch))
write.table(tree.ID[unmatched],file="unmatched.txt")
mch[duplicated(mch)] <- NA ## if there's multiple stems, match the first
## combine data into one table
combined = cbind(trees,t(as.matrix(rings))[mch,-(nyears-1):0 + nrow(rings)])
if(coredOnly==TRUE){
combined = combined[!is.na(combined$"2000"),]
}
return(combined)
}
|
#fonte: http://tidytextmining.com/tidytext.html#contrasting-tidy-text-with-other-data-structures
library(data.table)
library(tidytext)
library(dplyr)
library(ggplot2)
train <- fread("train.csv")
train_sample <- sample_frac(train, size = 0.1)
dim(train_sample)
names(train_sample)
words <- train_sample %>%
unnest_tokens(word, desc) %>%
anti_join(stop_words) %>%
mutate(word = str_extract(word, "[a-z']+"))
#palavras mais usadas:
words %>%
filter(is.na(word) == FALSE) %>%
count(word, sort = TRUE) %>%
filter(n > 400) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n)) +
geom_col() +
xlab(NULL) +
coord_flip()
words
|
/analise textual.R
|
no_license
|
marcoschoma/machine-learning
|
R
| false | false | 652 |
r
|
#fonte: http://tidytextmining.com/tidytext.html#contrasting-tidy-text-with-other-data-structures
library(data.table)
library(tidytext)
library(dplyr)
library(ggplot2)
train <- fread("train.csv")
train_sample <- sample_frac(train, size = 0.1)
dim(train_sample)
names(train_sample)
words <- train_sample %>%
unnest_tokens(word, desc) %>%
anti_join(stop_words) %>%
mutate(word = str_extract(word, "[a-z']+"))
#palavras mais usadas:
words %>%
filter(is.na(word) == FALSE) %>%
count(word, sort = TRUE) %>%
filter(n > 400) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n)) +
geom_col() +
xlab(NULL) +
coord_flip()
words
|
#### BASICS ####
# Nouveautés :
x <- c(1,7,4,10,2,14,15,4,9,6,12.5,122,1675,165,0.1,1)
y <- cut(x, c(0,5,10,15,100,2000))
plot(stack.loss ~ Water.Temp, data=stackloss) # Quanti/Quanti
plot(weight ~ feed, data=chickwts) # Quanti/Quali
# 2 graphiques séparés
plot(stack.loss ~ Water.Temp + Air.Flow, data=stackloss)
# 1 unique graphique 3D
require(scatterplot3d)
par(mfrow=c(1,1))
with(stackloss,scatterplot3d(stack.loss ~ Water.Temp + Air.Flow))
# Modèle linéaire simple
lm(stack.loss ~ Water.Temp, data=stackloss)
# Modèle linéaire multiple
lm(stack.loss ~ Water.Temp + Air.Flow, data=stackloss)
# Modèle linéaire simple sans intercept (constante)
lm(stack.loss ~ Water.Temp - 1, data=stackloss)
lm(weight ~ feed, data=chickwts)
lm(weight ~ feed - 1, data=chickwts) # Pas d'intercept mais présence modalité référentielle du modèle précédent
# Reading Data :
chordata <- read.table("chordatafile.txt", header=TRUE)
chordata <- read.csv("chordatafile.csv")
# ppp object
# fenêtre = rectangle
chordata <- read.table("chordatafile.txt", header=TRUE)
east <- chordata$Easting
north <- chordata$Northing
X <- ppp(east, north, c(174,178), c(29,33))
# ou plus élégamment
X <- with(chordata, ppp(Easting, Northing, c(174,178), c(29,33)))
# Si autre format (pas rectangle, voir page 58)
# Problèmes valeurs manquantes/aberrantes
x <- c(1.5, 2.1, 4.0, -999, 2.2, 3.8, 0.9, -999, 1.9)
stem(x)
x[x == -999] <- NA
x
# Fonction convertie en image pixels
f <- function(x,y){15*(cos(2*pi*sqrt((x-3)^2+(y-3)^2)))^2}
A <- as.im(f, W=square(6))
plot(A)
# Factor valued pixel image
vec <- rep(1:3, each = 400)
mat <- matrix(vec, nrow=40, ncol=30)
f <- factor(mat)
is.factor(f)
is.matrix(f)
factorim <- im(f,xcol=seq(0,1,length=30),yrow=seq(0,1,length=40))
factorim
plot(factorim)
dim(f) <- c(40,30)
factorim <- im(f)
factorim
plot(factorim)
# Fenêtre rectangulaire
owin(c(0,3), c(1,2))
# Fenêtre carrée
square(5)
square(c(1,3))
# Polygone window
Z <- owin(poly=list(x=c(0,10,0), y=c(0,0,10)))
plot(Z)
# Polygone avec trou
ZH <- owin(poly=list(list(x=c(0,8,0),y=c(0,0,8)),list(x=c(2,2,3,3), y=c(2,3,3,2))))
plot(ZH)
# Circular and elliptical windows
par(mfrow=c(2,2))
W <- disc(radius=2, centre=c(0,0))
plot(W)
# Plotting a point pattern
# 3 types de données : unmarked point pattern, multitype point pattern, point pattern with numeric marks
# Options graphiques : pch
(plot(amacrine))
(plot(longleaf))
# Marche pour markformat=vector
A <- colourmap(heat.colors(128), range=range(longleaf$marks))
plot(longleaf, pch=21, bg=A, cex=1)
# Pour Bei :
# Idée : plot(bei$x,bei$y,pch=21, bg=(heat.colors(128), range=range(bei.extra$grad$v)),cex=1)
juveniles <- subset(longleaf, marks <= 30)
a <- plot(longleaf)
plot(juveniles, symap=a)
# Symbol map ban be created
g1 <- symbolmap(inputs=letters[1:10], pch=11:20)
plot(g1)
g2 <- symbolmap(range=c(0,100), size=function(x) {x/50})
plot(g2)
g3 <- update(g2, col="red")
plot(g3)
g4 <- update(g3, col=function(x) ifelse(x < 30, "red", "black"))
plot(g4)
juveniles <- subset(longleaf, marks <= 30)
a <- plot(longleaf)
plot(juveniles, symap=g3)
# Mettre une légende :
g2 <- symbolmap(range=c(-1,1),shape=function(x) ifelse(x > 0, "circles", "squares"),
size=function(x) sqrt(ifelse(x > 0, x/pi, -x)),
bg = function(x) ifelse(abs(x) < 1, "red", "black"))
plot(g2, vertical=TRUE, side="left", col.axis="blue", cex.axis=2)
# Plotting a window
plot(murchison$greenstone, main = "")
plot(square(c(-1,1)), main = "")
plot(ellipse(1,.5), col = rgb(0,0,0,.2), add = TRUE)
plot(ellipse(.5,1), col = rgb(0,0,0,.2), add = TRUE)
# Plotting an image
g <- colourmap(rainbow(128), range=c(0,100))
plot(g)
h <- colourmap(c("green", "yellow", "red"), inputs=c("Low", "Medium", "High"))
plot(h)
# Image en Perspective
par(mfrow=c(2,2))
persp(bei.extra$elev, expand=6,theta=-30, phi=20,
colmap=terrain.colors(128),shade=0.2,apron=TRUE, main="", box=FALSE)
M <- persp(bei.extra$elev,theta=-45, phi=18, expand=6,
colmap=terrain.colors(128),border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE)
perspPoints(bei, Z=bei.extra$elev, M=M, pch=16,cex=0.2)
M <- persp(bei.extra$grad,theta=-45, phi=18, expand=17,
colmap=terrain.colors(128),border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE)
perspPoints(bei, Z=bei.extra$grad, M=M, pch=16,cex=0.2)
# Transect : diagonial line (bottom left to top right)
with(bei.extra,plot(transect.im(elev)))
with(bei.extra,plot(transect.im(grad)))
# Distribution des valeurs des pixels
with(bei.extra, hist(elev, freq=TRUE))
with(bei.extra, plot(spatialcdf(elev, normalise=TRUE)))
with(bei.extra, hist(grad, freq=TRUE))
with(bei.extra, plot(spatialcdf(grad, normalise=TRUE)))
# Interactive Plotting
plot(amacrine)
identify(amacrine)
# Plotting several objects
X <- layered(density(cells), cells)
layerplotargs(X)[[2]] <- list(pch=16)
plot(X, main="")
# Bei
# Layered Plots
X <- layered(density(bei), bei)
layerplotargs(X)[[2]] <- list(pch=16)
plot(X, main="",cex=0.35)
# "Listof" Objects
X <- swedishpines
QC <- quadratcount(X)
QCI <- as.im(X,dimyx=5)
DI <- density(X)
L <- listof(X, QC, QCI, DI)
names(L) <- c("Swedish pines pattern", "Quadrat count image",
"Quadrat counts", "Estimated intensity")
plot(L)
# Plotting several images
pairs(density(split(lansing)[c(2,3,5)]))
# Corrélation entre les densités estimées
L <- density(split(lansing)[c(2,3,5)])
df <- pairs(L, plot=FALSE)
co <- cor(df)
round(co, 2)
# Distance du plus proche voisin
nndist(redwood)
# Type de fenêtres
W <- Window(clmfires)
U <- simplify.owin(W,10) # Moins de bord (~200), contour moins précis
par(mfrow=c(1,2))
plot(W)
plot(U)
# Extracting subsets of a point pattern
# Subset defined by an index
bei
bei[1:10]
bei[-(1:10)]
swedishpines[nndist(swedishpines) > 10]
longleaf[marks(longleaf) >= 42]
# Subset defined by a window
W <- owin(c(100,800), c(100,400))
W
bei
bei[W]
# Subset defined by an expression
subset(cells, x > 0.5 & y < 0.4)
subset(longleaf, marks >= 42)
subset(finpines, diameter > 2 & height < 4)
subset(finpines, diameter > 2, select=height)
subset(nbfires, year == 1999, select=cause:fnl.size)
subset(finpines, select = -height)
# Manipulating Marks
par(mfrow=c(2,1))
plot(anemones)
plot(unmark(anemones))
radii <- rexp(npoints(redwood), rate=10)
plot(redwood)
str(redwood)
str(redwood %mark% radii)
plot(redwood %mark% radii)
# Bei, elev
par(mfrow=c(2,2))
image(bei.extra$elev);points(bei,pch=3,cex=.4)
elev <- bei.extra$elev
Yelev <- bei %mark% elev[bei]
A <- colourmap(heat.colors(16), range=range(Yelev$marks))
plot(Yelev, pch=21, bg=A, cex=1)
# Bei, grad
image(bei.extra$grad);points(bei,pch=3,cex=.4)
grad <- bei.extra$grad
Ygrad <- bei %mark% grad[bei]
B <- colourmap(heat.colors(16), range=range(Ygrad$marks))
plot(Ygrad, pch=21, bg=B, cex=1)
# Colour :
pie(rep(1, 12), col = rainbow(12))
# Palette
demo.pal <-
function(n, border = if (n < 32) "light gray" else NA,
main = paste("color palettes; n=", n),
ch.col = c("rainbow(n, start=.7, end=.1)", "heat.colors(n)",
"terrain.colors(n)", "topo.colors(n)",
"cm.colors(n)"))
{
nt <- length(ch.col)
i <- 1:n; j <- n / nt; d <- j/6; dy <- 2*d
plot(i, i+d, type = "n", yaxt = "n", ylab = "", main = main)
for (k in 1:nt) {
rect(i-.5, (k-1)*j+ dy, i+.4, k*j,
col = eval(parse(text = ch.col[k])), border = border)
text(2*j, k * j + dy/4, ch.col[k])
}
}
n <- if(.Device == "postscript") 64 else 16
# Since for screen, larger n may give color allocation problem
demo.pal(n)
X <- amacrine
plot(X)
str(amacrine)
marks(X) <- data.frame(type=marks(X), nn=nndist(amacrine))
str(marks(X))
plot(marks(X))
Y <- finpines
plot(Y)
str(Y)
vol <- with(marks(Y), (100 * pi/12) * height * diameter^2)
marks(Y) <- cbind(marks(Y), volume=vol)
str(marks(Y))
plot(marks(Y))
par(mfrow=c(2,2))
plot(longleaf)
Y <- cut(longleaf, breaks=c(0,5, 20, Inf))
plot(Y)
Y <- cut(longleaf, breaks=3)
plot(Y)
# Converting to another unit of length
lansing
rescale(lansing)
# metres -> kilometres
murchison
murch2 <- lapply(murchison, rescale, s=1000, unitname="km")
murch2 <- as.listof(murch2)
# Geometrical transformations
chorley
plot(chorley)
rotate(chorley, pi/2, centre="centroid")
plot(rotate(chorley, pi/2, centre="centroid"))
# Random perturbations of a point pattern
# rjitter : displaces each point of the pattern by a small random distance
par(mfrow=c(1,2))
X <- rsyst(owin(), 10, 10)
Y <- rjitter(X, 0.02)
plot(X)
plot(Y)
# rshift : applies the same random shift
par(mfrow=c(3,2))
plot(amacrine)
# random toroidal shift
# shift "on" and "off" points separately
X <- rshift(amacrine)
plot(X)
# shift "on" points and leave "off" points fixed
X <- rshift(amacrine, which="on")
plot(X)
# shift all points simultaneously
X <- rshift(amacrine, group=NULL)
plot(X)
# maximum displacement distance 0.1 units
X <- rshift(amacrine, radius=0.1)
plot(X)
# shift with erosion
X <- rshift(amacrine, radius=0.1, edge="erode")
plot(X)
# rlabel : randomly assigns new mark values to the points in a pattern
par(mfrow=c(2,2))
plot(amacrine)
# Randomly permute the marks "on" and "off"
# Result always has 142 "off" and 152 "on"
Y <- rlabel(amacrine)
plot(Y)
# randomly allocate marks "on" and "off"
# with probabilities p(off) = 0.48, p(on) = 0.52
Y <- rlabel(amacrine, permute=FALSE)
plot(Y)
par(mfrow=c(1,2))
# randomly allocate marks "A" and "B" with equal probability
data(cells)
plot(cells)
Y <- rlabel(cells, labels=factor(c("A", "B")), permute=FALSE)
plot(Y)
# quadratresample : performs a block resampling procedure
par(mfrow=c(1,2))
data(bei)
plot(bei)
X<-quadratresample(bei, 6, 3)
plot(X)
# rthin : randomly deletes some of the points
par(mfrow=c(2,2))
data(redwood)
plot(redwood, main="thinning")
# delete 20% of points
Y <- rthin(redwood, 0.8)
points(Y, col="green", cex=1.4)
plot(Y)
# function
f <- function(x,y) { ifelse(x < 0.4, 1, 0.5) }
Y <- rthin(redwood, f)
plot(redwood, main="thinning")
points(Y, col="green", cex=1.4)
plot(Y)
# pixel image
Z <- as.im(f, redwood$window)
Y <- rthin(redwood, Z)
plot(redwood, main="thinning")
points(Y, col="green", cex=1.4)
plot(Y)
# Splitting a point pattern into sub-patterns
lansing
plot(lansing)
V <- split(lansing)
plot(V)
A <- lapply(V, adaptive.density)
plot(as.listof(A))
plot(amacrine)
plot(split(amacrine))
plot(as.listof(lapply(split(amacrine), adaptive.density)))
A <- by(lansing, FUN=adaptive.density)
plot(A)
# Un-splitting
# points with type Messor have been slightly displaced.
par(mfrow=c(2,1))
X <- ants
u <- split(X)
plot(u)
u$Messor <- rjitter(u$Messor,20)
split(X) <- u
plot(split(X))
# Combining point patterns
par(mfrow=c(2,2))
plot(ants)
plot(split(ants))
X <- superimpose(u$Messor, u$Cataglyphis)
plot(X)
X <- superimpose(Cataglyphis=u$Cataglyphis, Messor=u$Messor)
plot(X)
par(mfrow=c(2,2))
X <- runifpoint(50, square(c(0,2)))
Y <- runifpoint(50, square(c(1,3)))
plot(X)
plot(Y)
A=superimpose(X,Y)
plot(A)
B=superimpose(X, Y, W=square(3))
plot(B)
# Basic summaries of point patterns and windows
summary(chorley)
plot(chorley)
plot(split(chorley))
intensity(chorley) # nombre moyen de points par unité d'aire
D=density(chorley)
plot(D)
pairdist(chorley) # matrice contenant les distances entre toutes les paires
nndist(chorley) # Distance du plus proche voisin
nnwhich(chorley) # Renvoie l'indice (index) du voisin le plus proche
# Marks(X) : facteur
table(marks(chorley))
barplot(table(marks(chorley)))
# Si marks(X) numérique
hist(marks(X))
plot(ecdf(marks(X)))
plot(density(marks(X)))
smooth(X)
markmean(X)
markvar(X)
# Voir page 108 pour toutes les commandes utiles
# Exemple dataset : nztrees (/bei)
nztrees # [0;153]x[0;95]
plot(nztrees)
contour(density(nztrees, 10), axes=FALSE)
plot(density(nztrees))
# contour(density(bei, 25), axes=FALSE)
# plot(bei, add=TRUE)
hist(coords(nztrees)$x)
# Si on veut enlever le coin en haute à droite (car intensité trop forte)
chopped <- owin(c(0,148),c(0,95))
# ou
win <- Window(nztrees)
chopped <- trim.rectangle(win, xmargin=c(0,5), ymargin=0)
nzchop <- nztrees[chopped]
summary(nzchop)
plot(density(nzchop,10))
plot(nzchop, add=TRUE)
contour(density(nzchop, 10), axes=FALSE)
plot(nzchop, add=TRUE)
# Exploring images
elev[list(x=142,y=356)] # value of the pixel images
# Par intéraction
plot(elev)
elev[locator(1)]
# Choisir une sous-région
S <- owin(c(200,300), c(100,200))
plot(elev[S])
# Par intéraction
plot(elev)
S <- clickpoly(add=TRUE)
plot(elev[S, drop=FALSE, tight=TRUE])
# Calcul pixels
Y <- eval.im(Z + 10)
C <- eval.im(A + B)
eval.im(sqrt(Z))
eval.im(sin(pi * Z))
eval.im(Z > 3)
eval.im(log(X) + Y - 3)
eval.im(if(Z < 3) 3 else 1)
eval.im(ifelse(X < Y, 3, 1)) ## Throws an error.
eval.im(ifelse(X < Y, 3, 1)) ## Works
ecdf(Z)
# ...
# Manipulating images
elev <- bei.extra$elev
W <- levelset(elev, 145, ">")
plot(W)
points(bei,cex=.1)
grad <- bei.extra$grad
V <- solutionset(elev <= 140 & grad > 0.1)
plot(V)
points(bei,cex=.1)
# Tessellations (division de fenêtres qui ne se chevauchent pas)
# Rectangulaire
T=tess(xgrid=c(0,.5,1), ygrid=c(0,.5,1))
plot(T)
# ou
T1=quadrats(bei$w, 5, 10)
plot(T1)
# Tile list (format mosaïque)
S <- owin(c(200,300), c(100,200))
S1 <- owin(c(200,211), c(80,525))
T2=tess(tiles=list(S,S1))
plot(T2)
# Tessellation pixel image
T3=tess(image=D)
plot(T3)
# Computed tessellattions
par(mfrow=c(2,2))
Z=dirichlet(bei)
plot(bei)
plot(Z)
Z=delaunay(bei)
plot(Z)
# Operations involving a tessellation
par(mfrow=c(2,2))
X <- runifpoint(10)
plot(X)
V <- dirichlet(X)
plot(V)
U <- tiles(V)
unlist(lapply(U, area)) # Aires pour chaque figure
# plot(U) Décomposition
cut(X,V)
plot(cut(X,V))
split(X,V)
plot(split(X,V))
# Application Bei, Dirichlet/Voronoi (Thiessen)
# Marche pas (trop grande décomposition)
plot(bei)
V<-dirichlet(bei)
plot(V)
U <- tiles(V)
unlist(lapply(U, area)) # Aires pour chaque figure
# plot(U) Décomposition
cut(bei,V)
plot(cut(bei,V))
split(bei,V)
plot(split(bei,V))
# Application Bei, Delaunay (Marche pas, trop grande décomposition)
plot(bei)
V<-delaunay(bei)
plot(V)
U <- tiles(V)
unlist(lapply(U, area)) # Aires pour chaque figure
# plot(U) Décomposition
cut(bei,V)
plot(cut(bei,V))
split(bei,V)
plot(split(bei,V))
# Superposition de 2 tessellations
T=tess(xgrid=c(0,.5,1), ygrid=c(0,.5,1))
plot(T)
X <- runifpoint(10)
plot(X)
V <- dirichlet(X)
plot(V)
I=intersect.tess(T,V)
plot(I)
# Réduire le "blanc" autour des graphiques d'un pattern point
plot(bei)
par(mar=rep(0.5, 4))
plot(bei)
# Package Rpanel
plot(amacrine)
identify(amacrine)
iplot(amacrine)
#### Exploration Data Analysis ####
# Estimating homogeneous intensity
X <- rescale(swedishpines)
W <- as.owin(X)
lam <- intensity(X)
sdX <- sqrt(lam/area.owin(W))
sdX
unitname(amacrine)
X <- rescale(amacrine, 1000/662)
unitname(X) <- "mm"
intensity(amacrine)
intensity(X)
sum(intensity(X))
intensity(unmark(X))
finpines
height <- marks(finpines)$height
diameter <- marks(finpines)$diameter
volume <- (pi/12) * height * (diameter/100)^2
# ou
volume <- with(marks(finpines),(pi/12) * height * (diameter/100)^2)
intensity(finpines)
unitname(finpines)
intensity(finpines, weights=volume)
# Quadrat counts
par(mfrow=c(1,2))
Q3 <- quadratcount(swedishpines, nx=3, ny=3)
Q3
plot(Q3)
points(swedishpines)
L3<-intensity(Q3,image=TRUE)
plot(L3)
H <- hextess(swedishpines, 10)
hQ<-quadratcount(swedishpines, tess=H)
plot(hQ)
L3<-intensity(hQ,image=TRUE)
plot(L3)
# Quadrat counting test of homogeneity
tS <- quadrat.test(swedishpines, 3,3)
tS
tS$p.value
plot(swedishpines)
plot(tS,add=TRUE)
quadrat.test(swedishpines, 5, alternative="regular",method="MonteCarlo")
# Smoothing estimation of intensity function
# Kernel Gaussien
par(mfrow=c(1,2))
den <- density(swedishpines, sigma=10)
plot(den)
# Autre méthodes
# Pour trouver le meilleur sigma, CV
b <- bw.ppl(swedishpines)
plot(b)
plot(b, xlim=c(30,60))
den1 <- density(swedishpines, sigma=b)
plot(den1)
D <- density(swedishpines, sigma=bw.diggle(swedishpines))
plot(D)
# Estimation of intensity at the data points
dX <- density(swedishpines, sigma=10, at="points")
plot(dX)
dX[1:5]
# Computation
den <- density(swedishpines, sigma=10)
denXpixel <- den[swedishpines]
denXpixel[1:5]
denXexact <- density(swedishpines, sigma=10, at="points",leaveoneout=FALSE)
denXexact[1:5]
# Weighted kernel estimators
vols <- with(marks(finpines),(pi/12) * height * (diameter/100)^2)
Dvol <- density(finpines, weights=vols, sigma=bw.ppl)
plot(density(finpines))
plot(Dvol)
# Spatially adaptive smoothing
par(mfrow=c(2,2))
plot(density(swedishpines))
aden <- adaptive.density(swedishpines, f=0.1, nrep=30)
nden <- nndensity(swedishpines, k=10)
plot(aden)
plot(nden)
# Projections, transformations, change of coordinates
grad <- bei.extra$grad
dens.map <- density(bei, W=grad)
dens.ter <- eval.im(dens.map * sqrt(1+grad^2))
persp(bei.extra$grad, colin=dens.ter,expand=6,theta=-30, phi=20,shade=0.2,apron=TRUE)
dens.ter2 <- density(bei, weights=sqrt(1+grad[bei]^2))
persp(bei.extra$grad, colin=dens.ter2)
# Investigating dependence of intensity on a covariate
# Spatial covariates
# Quadrats determined by a covariate
elev <- bei.extra$elev
b <- quantile(elev, probs=(0:4)/4)
Zcut <- cut(elev, breaks=b, labels=1:4)
V <- tess(image=Zcut)
V
plot(V)
qb <- quadratcount(bei, tess=V)
qb
plot(qb)
lam <- intensity(qb)
L3<-intensity(qb,image=TRUE)
plot(L3)
b5 <- seq(0, 5 * ceiling(max(elev)/5), by=5)
Zcut5 <- cut(elev, breaks=b5, include.lowest=TRUE)
Q5 <- quadratcount(bei, tess=tess(image=Zcut5))
plot(Q5)
lam5 <- intensity(Q5)
L3<-intensity(Q5,image=TRUE)
plot(L3)
barplot(lam5)
# Estimation of ρ
# Bei.Elev
rh <- rhohat(bei, elev)
plot(rh)
rhp<-predict(rh)
plot(rhp)
image(bei.extra)
rhf <- as.function(rh)
rhf(130) # Intensité prédictive à 130m
# Bei.Grad
rh <- rhohat(bei, grad)
plot(rh)
rhp<-predict(rh)
plot(rhp)
image(bei.extra)
# Bei combiné (extra+grad)
rh <- rhohat(bei,grad+elev)
plot(rh)
rhp<-predict(rh)
plot(rhp)
points(bei)
# Comparaison intensités estimées
#1
elev <- bei.extra$elev
b <- quantile(elev, probs=(0:4)/4)
Zcut <- cut(elev, breaks=b, labels=1:4)
V <- tess(image=Zcut)
V
plot(V)
qb <- quadratcount(bei, tess=V)
qb
plot(qb)
lam <- intensity(qb)
L4<-intensity(qb,image=TRUE)
plot(L4)
#2
b5 <- seq(0, 5 * ceiling(max(elev)/5), by=5)
Zcut5 <- cut(elev, breaks=b5, include.lowest=TRUE)
Q5 <- quadratcount(bei, tess=tess(image=Zcut5))
plot(Q5)
lam5 <- intensity(Q5)
L5<-intensity(Q5,image=TRUE)
plot(L5)
#3
grad=bei.extra$grad
elev=bei.extra$elev
rh <- rhohat(bei,elev)
plot(rh)
rhp<-predict(rh)
plot(rhp)
points(bei)
par(mfrow=c(2,2))
plot(L4)
plot(L5)
plot(rhp)
image(bei.extra$elev)
M <- persp(bei.extra$elev,theta=-45, phi=18, expand=6,
colmap=terrain.colors(128),border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE)
perspPoints(bei, Z=bei.extra$elev, M=M, pch=16,cex=.5)
Yelev <- bei %mark% elev[bei]
A <- colourmap(heat.colors(16), range=range(Yelev$marks))
plot(Yelev, pch=21, bg=A, cex=1)
plot(bei)
# Différences entre :
plot(density(bei))
plot(predict(rhohat(bei,elev+grad)))
# Comparer deux intensités estimées
# Elev avec bei
rh <- rhohat(bei, elev)
pred <- predict(rh)
kden <- density(bei, 50)
pairs(pred,kden)
plot(pred)
plot(kden)
P1<-eval.im(kden-pred)
plot(P1)
# Grad avec bei
rh <- rhohat(bei, grad)
pred <- predict(rh)
kden <- density(bei, 50)
pairs(pred,kden)
plot(pred)
plot(kden)
P2<-eval.im(kden-pred)
plot(P2)
# Avec 2 covariables
rh <- rhohat(bei,grad+elev)
plot(rh)
rhp<-predict(rh)
plot(rhp)
A<-with(bei.extra, rho2hat(bei, grad, elev))
plot(A)
# Rotation, Distance Map
X <- rotate(copper$SouthPoints, pi/2)
L <- rotate(copper$SouthLines, pi/2)
plot(X)
plot(L,add=TRUE)
Z<-distmap(L)
contour(Z)
plot(Z)
# Formal tests of (non-)dependence on a covariate
Z <- bei.extra$elev
b <- quantile(Z, probs=(0:4)/4)
Zcut <- cut(Z, breaks=b, labels=1:4)
V <- tess(image=Zcut)
quadrat.test(bei, tess=V)->q;plot(q,lwd=2,col='red');points(bei,cex=.1);q
quadrat.test(bei,nx=3)->q;plot(q,lwd=2,col='red');points(bei,cex=.1);q
Z <- bei.extra$grad
b <- quantile(Z, probs=(0:4)/4)
Zcut <- cut(Z, breaks=b, labels=1:4)
V <- tess(image=Zcut)
quadrat.test(bei, tess=V)->q;plot(q,lwd=2,col='red');points(bei,cex=.1);q
quadrat.test(bei,nx=3)->q;plot(q,lwd=2,col='red');points(bei,cex=.1);q
# More Powerful : Kolmogorov-Smirnov test of CSR
elev <- bei.extra$elev
cdf.test(bei, elev)
plot(cdf.test(bei, elev))
cdf.test(swedishpines, "x")
plot(cdf.test(swedishpines, "x"))
# Berman’s tests
elev <- bei.extra$elev
B <- berman.test(bei, elev)
B
plot(B)
# Hot spots, clusters, and local features
# Clusters
denRed <- density(redwood, bw.ppl, ns=16) # bw.ppl= Likelihood CV
# bw.ppl when the pattern consists predominantly of tight clusters
# bw.diggle : detect a single tight cluster in the midst of random noise
plot(redwood)
plot(denRed)
LR <- scanLRTS(redwood, r = 2 * bw.ppl(redwood))
plot(LR)
pvals <- eval.im(pchisq(LR, df=1, lower.tail=FALSE))
plot(pvals)
clusterset(redwood, what="domain")
plot(redwood)
plot(clusterset(redwood, what="domain",fast=TRUE))
plot(bei)
plot(clusterset(bei, what="domain",fast=TRUE))
# 10ème plus proche voisin
Z <- nnclean(redwood, k=5, plothist=TRUE)
plot(Z)
# Exemple forte concentration d'intensité
require(datasets)
require(mapdata)
require(maps)
qk <- ppp(quakes$long, quakes$lat, c(164, 190), c(-39,-10))
plot(qk)
dq.5 <- density(qk, 0.5)
plot(dq.5)
ht.5 <- hextess(as.owin(qk), 1.09)
hq.5 <- intensity(quadratcount(qk, tess=ht.5), image=TRUE)
plot(hq.5)
clusterset(qk, what="domain")
plot(clusterset(qk, what="domain"))
nnclean(qk, k=5, plothist=TRUE)
par(mfrow=c(1,1))
plot(nnclean(qk, k=5, plothist=TRUE))
par(mfrow=c(2,1))
plot(unmark(shapley))
# Attraction des points
Y <- sharpen(unmark(shapley), sigma=0.5, edgecorrect=TRUE)
plot(Y)
# Kernel smoothing of marks
plot(longleaf)
plot(density(longleaf))
plot(Smooth(longleaf,bw.smoothppp))
# Variance/Ecart-Type
mvar <- markvar(longleaf, bw.smoothppp)
msd <- eval.im(sqrt(mvar))
plot(msd) # plus fortes valeurs sur les bordures où arbres sont jeunes
mfit <- Smooth(longleaf, bw.smoothppp, at="points")
res <- marks(longleaf) - mfit
marks(longleaf)<-res
plot(longleaf)
plot(nnmark(longleaf), k=5, plothist=TRUE)
# Multitype intensity and relative risk
plot(lansing)
plot(split(lansing))
b <- bw.relrisk(lansing)
plot(b)
rr <- relrisk(lansing, sigma=b)
# Estimates of spatially-varying proportions of each species
plot(rr)
dominant <- im.apply(rr, which.max)
species <- levels(marks(lansing))
dominant <- eval.im(factor(dominant, levels=1:6,labels=species))
textureplot(dominant)
#### Corrélation ####
# 3 grands types : régularité (répulsion), indépendance et cluster
fryplot(anemones)
plot(frypoints(anemones))
# Peu de points au centre (certaine régularité des points)
# Fonction K de Ripley, transformation en L
# Using the K-function implicitly assumes that the point process has homogeneous intensity
K <- Kest(cells)
Ki <- Kest(cells, correction="isotropic")
Lc <- Lest(cells)
# Warning : absence de corrélation != indépendance
# Cell process !!
X <- rcell(nx=15)
plot(X)
plot(Kest(X))
# Swedishpines
Ks <- Kest(swedishpines)
plot(Ks, iso ~ r)
plot(Ks, cbind(iso, trans, theo) ~ r)
lambda <- intensity(swedishpines)
plot(Ks, lambda * . ~ r)
Ko <- subset(Ks, r < 0.1, select= -border)
plot(Ko)
Ks <- Kest(swedishpines)
K <- as.function(Ks)
K(9)
Kr <- Kest(redwood)
y <- with(Kr, iso - theo)
x <- with(Kr, r)
# Calcul
K1 <- Kest(redwood)
K2 <- Kest(cells)
DK <- eval.fv(K1-K2)
plot(DK)
# Estimating the pair correlation function
g <- pcf(cells)
fryplot(cells)
plot(g)
## Standard errors and confidence intervals
X <- copper$SouthPoints
Kvb <- varblock(X, Kest, nx=3, ny=3)
plot(Kvb)
# Loh’s bootstrap
Kloh <- lohboot(X, Kest)
plot(Kloh)
Lg <- lohboot(X, Lest, global=TRUE)
Kg <- eval.fv(pi * Lg^2)
plot(Lg)
plot(Kg)
# Testing statistical significance
# Pointwise envelopes
plot(Kest(runifpoint(npoints(cells), Window(cells))))
E <- envelope(cells, Kest, nsim=39, fix.n=TRUE)
plot(E)
# Global envelopes
Ek<-envelope(cells, Kest, nsim=19, rank=1, global=TRUE)
plot(Ek)
El<-envelope(cells, Lest, nsim=39, fix.n=TRUE)
plot(El)
# Detecting anisotropy
# A point process is ‘isotropic’ if all its statistical properties are unchanged when it is rotated
X <- rSSI(0.05, win=owin(c(0,1), c(0, 3)))
Y <- affine(X, mat=diag(c(1, 1/3)))
plot(frypoints(Y))
Khoriz <- Ksector(Y, begin = -15, end = 15, units="degrees")
Kvert <- Ksector(Y, begin = 90-15, end = 90+15, units="degrees")
plot(Khoriz, trans/theo ~ r, lty=2)
plot(Kvert, trans/theo ~ r, add=TRUE)
dK <- function(X, ...) {
K1 <- Ksector(X, ..., begin = -15, end = 15, units="degrees")
K2 <- Ksector(X, ..., begin = 90-15, end = 90+15, units="degrees")
eval.fv(K1-K2)
}
CIdK <- varblock(Y, dK, nx=5)
plot(CIdK)
# Adjusting for inhomogeneity (K, g)
# K function inhomogène
numata <- residualspaper$Fig1
plot(numata)
lambda <- density(numata, bw.ppl)
numataK <- Kinhom(numata, lambda)
plot(numataK)
numataK <- Kinhom(numata, sigma=bw.ppl)
plot(numataK)
# pair correlation function (pcf) inhomogène
plot(pcf(bei))
g <- pcfinhom(bei)
plot(g)
### Shortest distances and empty spaces ###
M <- pairdist(redwood)
M
v <- nndist(redwood)
v
Z <- distmap(redwood)
plot(Z)
points(redwood)
# Tests of CSR based on shortest distances
# Complete Spatial Randomness (CSR)
clarkevans(redwood)
clarkevans.test(redwood, correction="donnelly",alternative="clustered")
clarkevans.test(cells, correction="donnelly",alternative="clustered")
hopskel(redwood)
hopskel.test(redwood, alternative="clustered")
hopskel.test(cells, alternative="clustered")
# Exploratory graphics
plot(redwoodfull)
plot(redwoodfull %mark% nndist(redwoodfull), markscale=1)
stienen(redwoodfull) # Stienen Diagram
plot(dirichlet(redwoodfull)) # Dirichlet tessellation
# Nearest-neighbour function G, empty-space function F
Fs <- Fest(swedishpines)
Gs <- Gest(swedishpines)
plot(Fs)
plot(Gs)
Swedish <- rescale(swedishpines)
plot(Fest(Swedish))
plot(Gest(Swedish))
# Formal inference and diagnostic plots
Fci <- varblock(Swedish, Fest, nx=5, correction="best")
Gci <- varblock(Swedish, Gest, nx=5, correction="best")
Fenv <- envelope(Swedish, Fest, nsim=39, fix.n=TRUE)
Genv <- envelope(Swedish, Gest, nsim=39, fix.n=TRUE)
plot(Fci);plot(Gci);plot(Fenv);plot(Genv)
# Empty space hazard
# Here is a need for alternative summary functions (derived from F and G) which contain only contributions from distances equal to r
plot(Fest(cells), cbind(hazard, theohaz)~ r)
hazenv <- envelope(Swedish, Fhazard, nsim=39, fix.n=T,transform=expression(./(2*pi*r)))
plot(hazenv)
# J-function
# Values J(r) > 1 are consistent with a regular pattern, and J(r) < 1 is consistent with clustering, at scales less than or equal to r.
plot(allstats(cells))
plot(allstats(cells)$J)
# Inhomogeneous F, G and J functions
Finhom(cells)
Ginhom(cells)
plot(Jinhom(cells))
#### STATISTICAL INFERENCE ####
### POISSON ###
# The key property of a Poisson process is that the random points are independent of each other
# The ppm() function
# Bei data
# Simplest example
fit <- ppm(bei ~ 1)
fit
# Models with a single numerical covariate (p. 278)
bei.extra
fit <- ppm(bei ~ grad, data=bei.extra)
fit
plot(effectfun(fit,"grad", se.fit=TRUE))
# These results tell us that the estimated intensity of Beilschmiedia trees on a flat surface (slope s = 0) is about exp(−5.391) = 0.004559 trees per square metre, or 45.59 trees per hectare, and would increase by a factor of exp(5.022) = 151.7 if the slope increased to 1.0. The largest slope value in the data is about 0.3, at which stage the predicted intensity has risen by a factor of exp(0.3×5.022) = 4.511 from its value on a flat surface.
ppm(bei ~ atan(grad), data=bei.extra)
ppm(bei ~ I(atan(grad) * 180/pi),data=bei.extra)
degrees <- function(x) { x * 180/pi }
ppm(bei ~ degrees(atan(grad)), data=bei.extra)
# Quadratic function
fit<-ppm(bei ~ grad + I(grad^2), data=bei.extra)
plot(effectfun(fit,"grad", se.fit=TRUE))
# Murchison data
mur <- lapply(murchison, rescale, s=1000, unitname="km")
plot(mur$gold)
plot(mur$faults)
# dfault = Distance par rapport à "faults"
dfault <- with(mur,distfun(faults))
plot(dfault)
fit <- ppm(gold ~ dfault,data=mur)
fit
# Models with a logical covariate
# greenstone : polygonal boundary, spatial window
# intensité inside/outside greenstone
ppm(gold ~ greenstone, data=mur)
ppm(gold ~ greenstone-1,data=mur)
# Mêmes résultats/interprétations (p. 284)
# Models with a factor covariate
# Gorilla nest data
gor <- rescale(gorillas, 1000, unitname="km")
gor <- unmark(gor)
plot(gor)
gex <- lapply(gorillas.extra, rescale,s=1000, unitname="km")
plot(gex$vegetation)
names(gex)
shorten <- function(x) substr(x, 1, 4)
names(gex) <- shorten(names(gex))
names(gex)
isfactor <- !unlist(lapply(lapply(gex, levels), is.null))
for(i in which(isfactor))
levels(gex[[i]]) <- shorten(levels(gex[[i]]))
levels(gex$vege)
vt <- tess(image=gex$vege)
plot(vt)
I<-intensity(quadratcount(gor, tess=vt))
plot(I)
# Additive models (p. 289)
fitadd <- ppm(bei ~ elev + grad,data=bei.extra)
fitadd
# Modelling spatial trend using Cartesian coordinates (p. 290)
jpines <- residualspaper[["Fig1"]]
ppm(jpines ~ x + y)
ppm(jpines ~ polynom(x,y,2))
# Fit a model with constant but unequal intensities on each side of the vertical line x = 0.5
ppm(jpines ~ (x < 0.5))
# Models with interaction between covariates
# Interaction between two numerical covariates
fit <- ppm(bei ~ elev + grad + I(elev*grad), data=bei.extra)
fit
# Interaction between two factors
ppm(gor ~ vege * heat, data=gex)
# Interaction between factor and numerical covariate
ppm(gold ~ dfault * greenstone,data=mur)
# Nested interaction (intéractions emboitées)
ppm(gold ~ greenstone/dfault, data=mur)
ppm(gold ~ greenstone/dfault-1, data=mur)
# Formulas involving many variables
ppm(gor~ . , data=gex)
## Statistical inference for Poisson models
# Fitted models
beikm <- rescale(bei, 1000, unitname="km")
bei.extrakm <- lapply(bei.extra, rescale, s=1000, unitname="km")
fitkm <- ppm(beikm ~ x + y)
fitkm
coef(fitkm)
plot(fitkm, how="image", se=FALSE)
summary(fitkm)
coef(summary(fitkm))
# Matrice variance-covariance
vcov(fitkm)
# Erreurs standards sur la diagonale
sqrt(diag(vcov(fitkm)))
# Intervalle de confiance des coefficients
confint(fitkm, level=0.95)
# Matrice de corrélation entre coefficients (variables)
co <- vcov(fitkm, what="corr")
round(co, 2)
# Attention, corrélations changent selon l'emplacement de l'origine des covariables
# Ici, changement au centre de la région d'étude
fitch <- update(fitkm, . ~ I(x-0.5) + I(y-0.25))
co <- vcov(fitch, what="corr")
round(co, 2)
# Prediction
fit <- ppm(bei ~ polynom(grad, elev, 2), data=bei.extra)
lamhat <- predict(fit)
contour(lamhat)
plot(lamhat)
M <- persp(bei.extra$elev, colin=lamhat,
colmap=topo.colors, shade=0.4,
theta=-55, phi=25, expand=6,
box=FALSE, apron=TRUE, visible=TRUE)
perspPoints(bei, Z=bei.extra$elev, M=M,
pch=".", col="red", cex=1.25)
# Intervalle de confiance de la prédiction
contour(predict(fit, interval="confidence"))
plot(predict(fit, interval="confidence"))
# To find the expected number of trees at elevations below 130 metres
B <- levelset(bei.extra$elev, 130)
predict(fit, total=B)
predict(fit, total=B, type="se")
predict(fit, total=B, interval="confidence")
# Intervalle prédiction
predict(fit, total=B, interval="prediction")
# Updating a model
X <- rpoispp(42)
m <- ppm(X ~ 1)
fitcsr <- ppm(bei ~ 1, data=bei.extra)
update(fitcsr, bei ~ grad)
# ou
fitgrad <- update(fitcsr, .~ grad)
fitall <- update(fitgrad, . ~ . + elev)
fitall
# Model selection
fit1 <- ppm(bei ~ grad,data=bei.extra)
fitnull <- ppm(bei ~ 1)
anova(fitnull, fit1, test="Chi")
# < 0.05, on préfère le modèle avec le plus de paramètres
# Akaike Information Criterion : AIC for model selection
AIC(fit1)
AIC(fitnull)
# AIC automatisé
fit <- ppm(bei ~ elev+grad,data=bei.extra)
step(fit,trace=1)
# Simulating the fitted model
fit=ppm(bei ~ polynom(grad, elev, 2),data=bei.extra)
X <- simulate(fit, data=bei.extra)
plot(X[[1]])
# Quadrature schemes
# Gorilla nests example
ppm(gor ~ vege, data=gex)
vt <- tess(image=gex$vege)
plot(vt)
intensity(quadratcount(gor, tess=vt))
fitveg2 <- ppm(gor~ vege-1, data=gex, nd=256)
exp(coef(fitveg2))
## Hypothesis Tests and Simulation Envelopes (p. 331)
# Testing for a covariate effect in a parametric model (p. 333)
mur <- lapply(murchison, rescale, s=1000, unitname="km")
mur$dfault <- with(mur, distfun(faults))
mfit0 <- ppm(gold ~ greenstone, data=mur)
mfit1 <- ppm(gold ~ greenstone + dfault, data=mur)
# ou
mfit1 <- update(mfit0, . ~ . + dfault)
copper$dist <- with(copper, distfun(SouthLines))
cfit0 <- ppm(SouthPoints ~ 1, data=copper)
cfit1 <- ppm(SouthPoints ~ dist, data=copper)
# Likelihood ratio test (p. 334)
# Test : H0 : Absence de l'effet due à la variable
anova(mfit0, mfit1, test="Chi") # < 0.05. Effet "dfault"
anova(cfit0, cfit1, test="Chi") # >0.05. Pas d'effet "dist"
# Wald test for single parameter
coef(summary(mfit1)) # Z-test. Effet "dfault"
coef(summary(cfit1)) # Pas d'effet "dist"
V <- coef(summary(mfit1))["dfault", "Zval"]
pnorm(V, lower.tail=TRUE)
# Score test (moins performant) (p. 336)
# Avertissements (Caveats, p. 337) : H0 faux => pas forcément H1 Vrai
# Model selection using AIC (p. 338)
fitxy <- ppm(swedishpines ~ x + y)
step(fitxy)
drop1(fitxy)
# The output indicates that the lowest AIC (i.e. 842) would be achieved by deleting the x term.
fitcsr <- ppm(swedishpines~1)
add1(fitcsr, ~x+y)
# A l'inverse
# Meilleur modèle (AIC CSR sans x et y)
# Autre exemple
bigfit <- ppm(swedishpines ~ polynom(x,y,3))
formula(bigfit)
formula(step(bigfit, trace=1))
# Goodness-of-fit tests for an intensity model (p. 343)
# H0 : le modèle est bon (sous Poisson (CSR))
X <- copper$SouthPoints
D <- distfun(copper$SouthLines)
cdf.test(X, D, test="ad")
cdf.test(mfit0, mur$dfault, test="ad")
# Goodness-of-fit tests of independence between points (p. 343)
# H0 : indépendance entre les points (Poisson)
quadrat.test(bei)
clarkevans.test(bei)
# Monte Carlo tests
# Voir page 344 -> 357
### Envelopes in spatstat ###
plot(envelope(redwood, Lest, nsim=39))
plot(envelope(redwood, Lest, nsim=39, global=TRUE))
# Envelopes for any fitted model
numata <- residualspaper$Fig1
fit <- ppm(numata ~ polynom(x,y,3))
E <- envelope(fit, Lest, nsim=19, global=TRUE, correction="border")
plot(E)
# Envelopes based on any simulation procedure
e <- expression(rpoispp(100))
eval(e)
e <- expression(rlabel(amacrine))
E <- envelope(amacrine, Lcross, nsim=19, global=TRUE, simulate=e)
plot(E)
# Envelopes based on a set of point patterns
Xlist <- list()
for(i in 1:99) Xlist[[i]] <- runifpoint(42)
envelope(cells, Kest, nsim=99, simulate=Xlist)
plot(envelope(cells, Kest, nsim=99, simulate=Xlist))
EK <- envelope(cells, Kest, nsim=99, savepatterns=TRUE) # Kest
Ep <- envelope(cells, pcf, nsim=99, simulate=EK) # PCF
plot(Ep)
EK <- envelope(cells, Kest, nsim=99, savepatterns=TRUE)
Ep <- envelope(EK, pcf)
plot(Ep)
# Pointwise envelopes
envelope(redwood, Lest)
# Simultaneous envelopes
envelope(redwood, Lest, global=TRUE)
# Envelopes based on sample mean & variance
E<-envelope(cells, Kest, nsim=100, VARIANCE=TRUE)
plot(E)
# One-sided envelopes
E<-envelope(cells, Kest, nsim=100, alternative="greater")
E<-envelope(cells, Kest, nsim=100, alternative="less")
plot(E)
# Re-using envelope data (p. 363)
E1 <- envelope(redwood, Kest, savepatterns=TRUE)
E2 <- envelope(E1, Gest, global=TRUE, transform=expression(fisher(.)))
### MARCHE PAS
plot(E2)
A1 <- envelope(redwood, Kest, nsim=39, savefuns=TRUE)
A2 <- envelope(A1, global=TRUE, nsim=19,
transform=expression(sqrt(./pi)))
plot(A1)
plot(A2)
# Pooling several envelopes
E1 <- envelope(cells, Kest, nsim=10, savefuns=TRUE)
E2 <- envelope(cells, Kest, nsim=20, savefuns=TRUE)
plot(E1)
plot(E2)
E <- pool(E1, E2)
plot(E)
#### Validation of Poisson models ####
# Goodness-of-fit tests of a fitted model
fit2e <- ppm(bei ~ polynom(elev,2), data=bei.extra)
fit2e
M <- quadrat.test(fit2e, nx=4, ny=2)
M
# ddl = nombre de cases (4*2-3(intercept+elev+I(elev²))=5)
plot(M)
elev <- bei.extra$elev
grad <- bei.extra$grad
b <- quantile(elev, probs=(0:4)/4)
Zcut <- cut(elev, breaks=b, labels=1:4)
V <- tess(image=Zcut)
quadrat.test(fit2e, tess=V)
plot(V)
# Variantes
cdf.test(fit2e, grad, test="ks")
berman.test(fit2e, grad)
fit2e1g <- update(fit2e, ~ . + grad)
anova(fit2e, fit2e1g, test="Chi")
# Modèle avec grad meilleur
AIC(fit2e1g)
AIC(fit2e)
# Relative intensity
# Inverse-lambda weightin
lam0 <- fitted(fit2e, dataonly=TRUE)
rel2e <- density(bei, weights=1/lam0)
range(rel2e)
plot(rel2e)
# Relative intensity as function of covariate
lambda0 <- predict(fit2e)
rh1 <- rhohat(bei, grad, baseline=lambda0)
plot(rh1);plot(predict(rh1))
rh2 <- rhohat(fit2e, grad)
plot(rh2);plot(predict(rh2))
# Residuals for Poisson processes
res2e <- residuals(fit2e)
plot(res2e)
# Smoothed residual field
plot(Smooth(res2e))
plot(contour(Smooth(res2e)))
# Scaled or weighted residuals
pres2e <- residuals(fit2e, type="pearson")
plot(pres2e)
plot(Smooth(residuals(fit2e, type="pearson")))
plot(contour(Smooth(residuals(fit2e, type="pearson"))))
# Four-panel residual plot
diagnose.ppm(fit2e)
# Partial residuals plot (p. 384)
fit1g <- ppm(bei ~ grad)
coef(fit1g)
parres(fit1g, "grad")
plot(parres(fit1g, "grad"))
# Added variable plots
fit2g <- update(fit1g, ~ polynom(grad,2))
add<-addvar(fit2g, elev)
plot(add)
## Leverage and influence (Points leviers et point d'influence)
lev <- leverage(fit2g)
inf <- influence(fit2g)
dfb <- dfbetas(fit2g)
plot(lev);plot(inf);plot(dfb)
# Murchison example
mur <- lapply(murchison, rescale, s=1000, unitname="km")
attach(mur)
green <- greenstone
dfault <- distfun(faults)
murfit1x<-ppm(gold ~ green * dfault)
murlev1x<-leverage(murfit1x)
murinf1x<-influence(murfit1x)
murdfb1x<-dfbetas(murfit1x)
persp(as.im(murlev1x))
plot(murinf1x)
marks(as.ppp(murinf1x))
# Residual summary functions
# K-Function
cellKr <- Kres(cells, correction="best")
cellGr <- Gres(cells, correction="best")
plot(cellKr);plot(cellGr)
# Pas poisson homogène
jfit <- ppm(residualspaper$Fig1 ~ polynom(x,y,3))
jKr <- Kres(jfit, correction="best")
fvnames(jKr, ".s") <- c("ihi", "ilo")
plot(jKr)
# Poisson homogène
|
/Stage-BabyLab/Codes/BookStatSpat.R
|
no_license
|
Twan76/R
|
R
| false | false | 38,379 |
r
|
#### BASICS ####
# Nouveautés :
x <- c(1,7,4,10,2,14,15,4,9,6,12.5,122,1675,165,0.1,1)
y <- cut(x, c(0,5,10,15,100,2000))
plot(stack.loss ~ Water.Temp, data=stackloss) # Quanti/Quanti
plot(weight ~ feed, data=chickwts) # Quanti/Quali
# 2 graphiques séparés
plot(stack.loss ~ Water.Temp + Air.Flow, data=stackloss)
# 1 unique graphique 3D
require(scatterplot3d)
par(mfrow=c(1,1))
with(stackloss,scatterplot3d(stack.loss ~ Water.Temp + Air.Flow))
# Modèle linéaire simple
lm(stack.loss ~ Water.Temp, data=stackloss)
# Modèle linéaire multiple
lm(stack.loss ~ Water.Temp + Air.Flow, data=stackloss)
# Modèle linéaire simple sans intercept (constante)
lm(stack.loss ~ Water.Temp - 1, data=stackloss)
lm(weight ~ feed, data=chickwts)
lm(weight ~ feed - 1, data=chickwts) # Pas d'intercept mais présence modalité référentielle du modèle précédent
# Reading Data :
chordata <- read.table("chordatafile.txt", header=TRUE)
chordata <- read.csv("chordatafile.csv")
# ppp object
# fenêtre = rectangle
chordata <- read.table("chordatafile.txt", header=TRUE)
east <- chordata$Easting
north <- chordata$Northing
X <- ppp(east, north, c(174,178), c(29,33))
# ou plus élégamment
X <- with(chordata, ppp(Easting, Northing, c(174,178), c(29,33)))
# Si autre format (pas rectangle, voir page 58)
# Problèmes valeurs manquantes/aberrantes
x <- c(1.5, 2.1, 4.0, -999, 2.2, 3.8, 0.9, -999, 1.9)
stem(x)
x[x == -999] <- NA
x
# Fonction convertie en image pixels
f <- function(x,y){15*(cos(2*pi*sqrt((x-3)^2+(y-3)^2)))^2}
A <- as.im(f, W=square(6))
plot(A)
# Factor valued pixel image
vec <- rep(1:3, each = 400)
mat <- matrix(vec, nrow=40, ncol=30)
f <- factor(mat)
is.factor(f)
is.matrix(f)
factorim <- im(f,xcol=seq(0,1,length=30),yrow=seq(0,1,length=40))
factorim
plot(factorim)
dim(f) <- c(40,30)
factorim <- im(f)
factorim
plot(factorim)
# Fenêtre rectangulaire
owin(c(0,3), c(1,2))
# Fenêtre carrée
square(5)
square(c(1,3))
# Polygone window
Z <- owin(poly=list(x=c(0,10,0), y=c(0,0,10)))
plot(Z)
# Polygone avec trou
ZH <- owin(poly=list(list(x=c(0,8,0),y=c(0,0,8)),list(x=c(2,2,3,3), y=c(2,3,3,2))))
plot(ZH)
# Circular and elliptical windows
par(mfrow=c(2,2))
W <- disc(radius=2, centre=c(0,0))
plot(W)
# Plotting a point pattern
# 3 types de données : unmarked point pattern, multitype point pattern, point pattern with numeric marks
# Options graphiques : pch
(plot(amacrine))
(plot(longleaf))
# Marche pour markformat=vector
A <- colourmap(heat.colors(128), range=range(longleaf$marks))
plot(longleaf, pch=21, bg=A, cex=1)
# Pour Bei :
# Idée : plot(bei$x,bei$y,pch=21, bg=(heat.colors(128), range=range(bei.extra$grad$v)),cex=1)
juveniles <- subset(longleaf, marks <= 30)
a <- plot(longleaf)
plot(juveniles, symap=a)
# Symbol map ban be created
g1 <- symbolmap(inputs=letters[1:10], pch=11:20)
plot(g1)
g2 <- symbolmap(range=c(0,100), size=function(x) {x/50})
plot(g2)
g3 <- update(g2, col="red")
plot(g3)
g4 <- update(g3, col=function(x) ifelse(x < 30, "red", "black"))
plot(g4)
juveniles <- subset(longleaf, marks <= 30)
a <- plot(longleaf)
plot(juveniles, symap=g3)
# Mettre une légende :
g2 <- symbolmap(range=c(-1,1),shape=function(x) ifelse(x > 0, "circles", "squares"),
size=function(x) sqrt(ifelse(x > 0, x/pi, -x)),
bg = function(x) ifelse(abs(x) < 1, "red", "black"))
plot(g2, vertical=TRUE, side="left", col.axis="blue", cex.axis=2)
# Plotting a window
plot(murchison$greenstone, main = "")
plot(square(c(-1,1)), main = "")
plot(ellipse(1,.5), col = rgb(0,0,0,.2), add = TRUE)
plot(ellipse(.5,1), col = rgb(0,0,0,.2), add = TRUE)
# Plotting an image
g <- colourmap(rainbow(128), range=c(0,100))
plot(g)
h <- colourmap(c("green", "yellow", "red"), inputs=c("Low", "Medium", "High"))
plot(h)
# Image en Perspective
par(mfrow=c(2,2))
persp(bei.extra$elev, expand=6,theta=-30, phi=20,
colmap=terrain.colors(128),shade=0.2,apron=TRUE, main="", box=FALSE)
M <- persp(bei.extra$elev,theta=-45, phi=18, expand=6,
colmap=terrain.colors(128),border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE)
perspPoints(bei, Z=bei.extra$elev, M=M, pch=16,cex=0.2)
M <- persp(bei.extra$grad,theta=-45, phi=18, expand=17,
colmap=terrain.colors(128),border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE)
perspPoints(bei, Z=bei.extra$grad, M=M, pch=16,cex=0.2)
# Transect : diagonial line (bottom left to top right)
with(bei.extra,plot(transect.im(elev)))
with(bei.extra,plot(transect.im(grad)))
# Distribution des valeurs des pixels
with(bei.extra, hist(elev, freq=TRUE))
with(bei.extra, plot(spatialcdf(elev, normalise=TRUE)))
with(bei.extra, hist(grad, freq=TRUE))
with(bei.extra, plot(spatialcdf(grad, normalise=TRUE)))
# Interactive Plotting
plot(amacrine)
identify(amacrine)
# Plotting several objects
X <- layered(density(cells), cells)
layerplotargs(X)[[2]] <- list(pch=16)
plot(X, main="")
# Bei
# Layered Plots
X <- layered(density(bei), bei)
layerplotargs(X)[[2]] <- list(pch=16)
plot(X, main="",cex=0.35)
# "Listof" Objects
X <- swedishpines
QC <- quadratcount(X)
QCI <- as.im(X,dimyx=5)
DI <- density(X)
L <- listof(X, QC, QCI, DI)
names(L) <- c("Swedish pines pattern", "Quadrat count image",
"Quadrat counts", "Estimated intensity")
plot(L)
# Plotting several images
pairs(density(split(lansing)[c(2,3,5)]))
# Corrélation entre les densités estimées
L <- density(split(lansing)[c(2,3,5)])
df <- pairs(L, plot=FALSE)
co <- cor(df)
round(co, 2)
# Distance du plus proche voisin
nndist(redwood)
# Type de fenêtres
W <- Window(clmfires)
U <- simplify.owin(W,10) # Moins de bord (~200), contour moins précis
par(mfrow=c(1,2))
plot(W)
plot(U)
# Extracting subsets of a point pattern
# Subset defined by an index
bei
bei[1:10]
bei[-(1:10)]
swedishpines[nndist(swedishpines) > 10]
longleaf[marks(longleaf) >= 42]
# Subset defined by a window
W <- owin(c(100,800), c(100,400))
W
bei
bei[W]
# Subset defined by an expression
subset(cells, x > 0.5 & y < 0.4)
subset(longleaf, marks >= 42)
subset(finpines, diameter > 2 & height < 4)
subset(finpines, diameter > 2, select=height)
subset(nbfires, year == 1999, select=cause:fnl.size)
subset(finpines, select = -height)
# Manipulating Marks
par(mfrow=c(2,1))
plot(anemones)
plot(unmark(anemones))
radii <- rexp(npoints(redwood), rate=10)
plot(redwood)
str(redwood)
str(redwood %mark% radii)
plot(redwood %mark% radii)
# Bei, elev
par(mfrow=c(2,2))
image(bei.extra$elev);points(bei,pch=3,cex=.4)
elev <- bei.extra$elev
Yelev <- bei %mark% elev[bei]
A <- colourmap(heat.colors(16), range=range(Yelev$marks))
plot(Yelev, pch=21, bg=A, cex=1)
# Bei, grad
image(bei.extra$grad);points(bei,pch=3,cex=.4)
grad <- bei.extra$grad
Ygrad <- bei %mark% grad[bei]
B <- colourmap(heat.colors(16), range=range(Ygrad$marks))
plot(Ygrad, pch=21, bg=B, cex=1)
# Colour :
pie(rep(1, 12), col = rainbow(12))
# Palette
demo.pal <-
function(n, border = if (n < 32) "light gray" else NA,
main = paste("color palettes; n=", n),
ch.col = c("rainbow(n, start=.7, end=.1)", "heat.colors(n)",
"terrain.colors(n)", "topo.colors(n)",
"cm.colors(n)"))
{
nt <- length(ch.col)
i <- 1:n; j <- n / nt; d <- j/6; dy <- 2*d
plot(i, i+d, type = "n", yaxt = "n", ylab = "", main = main)
for (k in 1:nt) {
rect(i-.5, (k-1)*j+ dy, i+.4, k*j,
col = eval(parse(text = ch.col[k])), border = border)
text(2*j, k * j + dy/4, ch.col[k])
}
}
n <- if(.Device == "postscript") 64 else 16
# Since for screen, larger n may give color allocation problem
demo.pal(n)
X <- amacrine
plot(X)
str(amacrine)
marks(X) <- data.frame(type=marks(X), nn=nndist(amacrine))
str(marks(X))
plot(marks(X))
Y <- finpines
plot(Y)
str(Y)
vol <- with(marks(Y), (100 * pi/12) * height * diameter^2)
marks(Y) <- cbind(marks(Y), volume=vol)
str(marks(Y))
plot(marks(Y))
par(mfrow=c(2,2))
plot(longleaf)
Y <- cut(longleaf, breaks=c(0,5, 20, Inf))
plot(Y)
Y <- cut(longleaf, breaks=3)
plot(Y)
# Converting to another unit of length
lansing
rescale(lansing)
# metres -> kilometres
murchison
murch2 <- lapply(murchison, rescale, s=1000, unitname="km")
murch2 <- as.listof(murch2)
# Geometrical transformations
chorley
plot(chorley)
rotate(chorley, pi/2, centre="centroid")
plot(rotate(chorley, pi/2, centre="centroid"))
# Random perturbations of a point pattern
# rjitter : displaces each point of the pattern by a small random distance
par(mfrow=c(1,2))
X <- rsyst(owin(), 10, 10)
Y <- rjitter(X, 0.02)
plot(X)
plot(Y)
# rshift : applies the same random shift
par(mfrow=c(3,2))
plot(amacrine)
# random toroidal shift
# shift "on" and "off" points separately
X <- rshift(amacrine)
plot(X)
# shift "on" points and leave "off" points fixed
X <- rshift(amacrine, which="on")
plot(X)
# shift all points simultaneously
X <- rshift(amacrine, group=NULL)
plot(X)
# maximum displacement distance 0.1 units
X <- rshift(amacrine, radius=0.1)
plot(X)
# shift with erosion
X <- rshift(amacrine, radius=0.1, edge="erode")
plot(X)
# rlabel : randomly assigns new mark values to the points in a pattern
par(mfrow=c(2,2))
plot(amacrine)
# Randomly permute the marks "on" and "off"
# Result always has 142 "off" and 152 "on"
Y <- rlabel(amacrine)
plot(Y)
# randomly allocate marks "on" and "off"
# with probabilities p(off) = 0.48, p(on) = 0.52
Y <- rlabel(amacrine, permute=FALSE)
plot(Y)
par(mfrow=c(1,2))
# randomly allocate marks "A" and "B" with equal probability
data(cells)
plot(cells)
Y <- rlabel(cells, labels=factor(c("A", "B")), permute=FALSE)
plot(Y)
# quadratresample : performs a block resampling procedure
par(mfrow=c(1,2))
data(bei)
plot(bei)
X<-quadratresample(bei, 6, 3)
plot(X)
# rthin : randomly deletes some of the points
par(mfrow=c(2,2))
data(redwood)
plot(redwood, main="thinning")
# delete 20% of points
Y <- rthin(redwood, 0.8)
points(Y, col="green", cex=1.4)
plot(Y)
# function
f <- function(x,y) { ifelse(x < 0.4, 1, 0.5) }
Y <- rthin(redwood, f)
plot(redwood, main="thinning")
points(Y, col="green", cex=1.4)
plot(Y)
# pixel image
Z <- as.im(f, redwood$window)
Y <- rthin(redwood, Z)
plot(redwood, main="thinning")
points(Y, col="green", cex=1.4)
plot(Y)
# Splitting a point pattern into sub-patterns
lansing
plot(lansing)
V <- split(lansing)
plot(V)
A <- lapply(V, adaptive.density)
plot(as.listof(A))
plot(amacrine)
plot(split(amacrine))
plot(as.listof(lapply(split(amacrine), adaptive.density)))
A <- by(lansing, FUN=adaptive.density)
plot(A)
# Un-splitting
# points with type Messor have been slightly displaced.
par(mfrow=c(2,1))
X <- ants
u <- split(X)
plot(u)
u$Messor <- rjitter(u$Messor,20)
split(X) <- u
plot(split(X))
# Combining point patterns
par(mfrow=c(2,2))
plot(ants)
plot(split(ants))
X <- superimpose(u$Messor, u$Cataglyphis)
plot(X)
X <- superimpose(Cataglyphis=u$Cataglyphis, Messor=u$Messor)
plot(X)
par(mfrow=c(2,2))
X <- runifpoint(50, square(c(0,2)))
Y <- runifpoint(50, square(c(1,3)))
plot(X)
plot(Y)
A=superimpose(X,Y)
plot(A)
B=superimpose(X, Y, W=square(3))
plot(B)
# Basic summaries of point patterns and windows
summary(chorley)
plot(chorley)
plot(split(chorley))
intensity(chorley) # nombre moyen de points par unité d'aire
D=density(chorley)
plot(D)
pairdist(chorley) # matrice contenant les distances entre toutes les paires
nndist(chorley) # Distance du plus proche voisin
nnwhich(chorley) # Renvoie l'indice (index) du voisin le plus proche
# Marks(X) : facteur
table(marks(chorley))
barplot(table(marks(chorley)))
# Si marks(X) numérique
hist(marks(X))
plot(ecdf(marks(X)))
plot(density(marks(X)))
smooth(X)
markmean(X)
markvar(X)
# Voir page 108 pour toutes les commandes utiles
# Exemple dataset : nztrees (/bei)
nztrees # [0;153]x[0;95]
plot(nztrees)
contour(density(nztrees, 10), axes=FALSE)
plot(density(nztrees))
# contour(density(bei, 25), axes=FALSE)
# plot(bei, add=TRUE)
hist(coords(nztrees)$x)
# Si on veut enlever le coin en haute à droite (car intensité trop forte)
chopped <- owin(c(0,148),c(0,95))
# ou
win <- Window(nztrees)
chopped <- trim.rectangle(win, xmargin=c(0,5), ymargin=0)
nzchop <- nztrees[chopped]
summary(nzchop)
plot(density(nzchop,10))
plot(nzchop, add=TRUE)
contour(density(nzchop, 10), axes=FALSE)
plot(nzchop, add=TRUE)
# Exploring images
elev[list(x=142,y=356)] # value of the pixel images
# Par intéraction
plot(elev)
elev[locator(1)]
# Choisir une sous-région
S <- owin(c(200,300), c(100,200))
plot(elev[S])
# Par intéraction
plot(elev)
S <- clickpoly(add=TRUE)
plot(elev[S, drop=FALSE, tight=TRUE])
# Calcul pixels
Y <- eval.im(Z + 10)
C <- eval.im(A + B)
eval.im(sqrt(Z))
eval.im(sin(pi * Z))
eval.im(Z > 3)
eval.im(log(X) + Y - 3)
eval.im(if(Z < 3) 3 else 1)
eval.im(ifelse(X < Y, 3, 1)) ## Throws an error.
eval.im(ifelse(X < Y, 3, 1)) ## Works
ecdf(Z)
# ...
# Manipulating images
elev <- bei.extra$elev
W <- levelset(elev, 145, ">")
plot(W)
points(bei,cex=.1)
grad <- bei.extra$grad
V <- solutionset(elev <= 140 & grad > 0.1)
plot(V)
points(bei,cex=.1)
# Tessellations (division de fenêtres qui ne se chevauchent pas)
# Rectangulaire
T=tess(xgrid=c(0,.5,1), ygrid=c(0,.5,1))
plot(T)
# ou
T1=quadrats(bei$w, 5, 10)
plot(T1)
# Tile list (format mosaïque)
S <- owin(c(200,300), c(100,200))
S1 <- owin(c(200,211), c(80,525))
T2=tess(tiles=list(S,S1))
plot(T2)
# Tessellation pixel image
T3=tess(image=D)
plot(T3)
# Computed tessellattions
par(mfrow=c(2,2))
Z=dirichlet(bei)
plot(bei)
plot(Z)
Z=delaunay(bei)
plot(Z)
# Operations involving a tessellation
par(mfrow=c(2,2))
X <- runifpoint(10)
plot(X)
V <- dirichlet(X)
plot(V)
U <- tiles(V)
unlist(lapply(U, area)) # Aires pour chaque figure
# plot(U) Décomposition
cut(X,V)
plot(cut(X,V))
split(X,V)
plot(split(X,V))
# Application Bei, Dirichlet/Voronoi (Thiessen)
# Marche pas (trop grande décomposition)
plot(bei)
V<-dirichlet(bei)
plot(V)
U <- tiles(V)
unlist(lapply(U, area)) # Aires pour chaque figure
# plot(U) Décomposition
cut(bei,V)
plot(cut(bei,V))
split(bei,V)
plot(split(bei,V))
# Application Bei, Delaunay (Marche pas, trop grande décomposition)
plot(bei)
V<-delaunay(bei)
plot(V)
U <- tiles(V)
unlist(lapply(U, area)) # Aires pour chaque figure
# plot(U) Décomposition
cut(bei,V)
plot(cut(bei,V))
split(bei,V)
plot(split(bei,V))
# Superposition de 2 tessellations
T=tess(xgrid=c(0,.5,1), ygrid=c(0,.5,1))
plot(T)
X <- runifpoint(10)
plot(X)
V <- dirichlet(X)
plot(V)
I=intersect.tess(T,V)
plot(I)
# Réduire le "blanc" autour des graphiques d'un pattern point
plot(bei)
par(mar=rep(0.5, 4))
plot(bei)
# Package Rpanel
plot(amacrine)
identify(amacrine)
iplot(amacrine)
#### Exploration Data Analysis ####
# Estimating homogeneous intensity
X <- rescale(swedishpines)
W <- as.owin(X)
lam <- intensity(X)
sdX <- sqrt(lam/area.owin(W))
sdX
unitname(amacrine)
X <- rescale(amacrine, 1000/662)
unitname(X) <- "mm"
intensity(amacrine)
intensity(X)
sum(intensity(X))
intensity(unmark(X))
finpines
height <- marks(finpines)$height
diameter <- marks(finpines)$diameter
volume <- (pi/12) * height * (diameter/100)^2
# ou
volume <- with(marks(finpines),(pi/12) * height * (diameter/100)^2)
intensity(finpines)
unitname(finpines)
intensity(finpines, weights=volume)
# Quadrat counts
par(mfrow=c(1,2))
Q3 <- quadratcount(swedishpines, nx=3, ny=3)
Q3
plot(Q3)
points(swedishpines)
L3<-intensity(Q3,image=TRUE)
plot(L3)
H <- hextess(swedishpines, 10)
hQ<-quadratcount(swedishpines, tess=H)
plot(hQ)
L3<-intensity(hQ,image=TRUE)
plot(L3)
# Quadrat counting test of homogeneity
tS <- quadrat.test(swedishpines, 3,3)
tS
tS$p.value
plot(swedishpines)
plot(tS,add=TRUE)
quadrat.test(swedishpines, 5, alternative="regular",method="MonteCarlo")
# Smoothing estimation of intensity function
# Kernel Gaussien
par(mfrow=c(1,2))
den <- density(swedishpines, sigma=10)
plot(den)
# Autre méthodes
# Pour trouver le meilleur sigma, CV
b <- bw.ppl(swedishpines)
plot(b)
plot(b, xlim=c(30,60))
den1 <- density(swedishpines, sigma=b)
plot(den1)
D <- density(swedishpines, sigma=bw.diggle(swedishpines))
plot(D)
# Estimation of intensity at the data points
dX <- density(swedishpines, sigma=10, at="points")
plot(dX)
dX[1:5]
# Computation
den <- density(swedishpines, sigma=10)
denXpixel <- den[swedishpines]
denXpixel[1:5]
denXexact <- density(swedishpines, sigma=10, at="points",leaveoneout=FALSE)
denXexact[1:5]
# Weighted kernel estimators
vols <- with(marks(finpines),(pi/12) * height * (diameter/100)^2)
Dvol <- density(finpines, weights=vols, sigma=bw.ppl)
plot(density(finpines))
plot(Dvol)
# Spatially adaptive smoothing
par(mfrow=c(2,2))
plot(density(swedishpines))
aden <- adaptive.density(swedishpines, f=0.1, nrep=30)
nden <- nndensity(swedishpines, k=10)
plot(aden)
plot(nden)
# Projections, transformations, change of coordinates
grad <- bei.extra$grad
dens.map <- density(bei, W=grad)
dens.ter <- eval.im(dens.map * sqrt(1+grad^2))
persp(bei.extra$grad, colin=dens.ter,expand=6,theta=-30, phi=20,shade=0.2,apron=TRUE)
dens.ter2 <- density(bei, weights=sqrt(1+grad[bei]^2))
persp(bei.extra$grad, colin=dens.ter2)
# Investigating dependence of intensity on a covariate
# Spatial covariates
# Quadrats determined by a covariate
elev <- bei.extra$elev
b <- quantile(elev, probs=(0:4)/4)
Zcut <- cut(elev, breaks=b, labels=1:4)
V <- tess(image=Zcut)
V
plot(V)
qb <- quadratcount(bei, tess=V)
qb
plot(qb)
lam <- intensity(qb)
L3<-intensity(qb,image=TRUE)
plot(L3)
b5 <- seq(0, 5 * ceiling(max(elev)/5), by=5)
Zcut5 <- cut(elev, breaks=b5, include.lowest=TRUE)
Q5 <- quadratcount(bei, tess=tess(image=Zcut5))
plot(Q5)
lam5 <- intensity(Q5)
L3<-intensity(Q5,image=TRUE)
plot(L3)
barplot(lam5)
# Estimation of ρ
# Bei.Elev
rh <- rhohat(bei, elev)
plot(rh)
rhp<-predict(rh)
plot(rhp)
image(bei.extra)
rhf <- as.function(rh)
rhf(130) # Intensité prédictive à 130m
# Bei.Grad
rh <- rhohat(bei, grad)
plot(rh)
rhp<-predict(rh)
plot(rhp)
image(bei.extra)
# Bei combiné (extra+grad)
rh <- rhohat(bei,grad+elev)
plot(rh)
rhp<-predict(rh)
plot(rhp)
points(bei)
# Comparaison intensités estimées
#1
elev <- bei.extra$elev
b <- quantile(elev, probs=(0:4)/4)
Zcut <- cut(elev, breaks=b, labels=1:4)
V <- tess(image=Zcut)
V
plot(V)
qb <- quadratcount(bei, tess=V)
qb
plot(qb)
lam <- intensity(qb)
L4<-intensity(qb,image=TRUE)
plot(L4)
#2
b5 <- seq(0, 5 * ceiling(max(elev)/5), by=5)
Zcut5 <- cut(elev, breaks=b5, include.lowest=TRUE)
Q5 <- quadratcount(bei, tess=tess(image=Zcut5))
plot(Q5)
lam5 <- intensity(Q5)
L5<-intensity(Q5,image=TRUE)
plot(L5)
#3
grad=bei.extra$grad
elev=bei.extra$elev
rh <- rhohat(bei,elev)
plot(rh)
rhp<-predict(rh)
plot(rhp)
points(bei)
par(mfrow=c(2,2))
plot(L4)
plot(L5)
plot(rhp)
image(bei.extra$elev)
M <- persp(bei.extra$elev,theta=-45, phi=18, expand=6,
colmap=terrain.colors(128),border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE)
perspPoints(bei, Z=bei.extra$elev, M=M, pch=16,cex=.5)
Yelev <- bei %mark% elev[bei]
A <- colourmap(heat.colors(16), range=range(Yelev$marks))
plot(Yelev, pch=21, bg=A, cex=1)
plot(bei)
# Différences entre :
plot(density(bei))
plot(predict(rhohat(bei,elev+grad)))
# Comparer deux intensités estimées
# Elev avec bei
rh <- rhohat(bei, elev)
pred <- predict(rh)
kden <- density(bei, 50)
pairs(pred,kden)
plot(pred)
plot(kden)
P1<-eval.im(kden-pred)
plot(P1)
# Grad avec bei
rh <- rhohat(bei, grad)
pred <- predict(rh)
kden <- density(bei, 50)
pairs(pred,kden)
plot(pred)
plot(kden)
P2<-eval.im(kden-pred)
plot(P2)
# Avec 2 covariables
rh <- rhohat(bei,grad+elev)
plot(rh)
rhp<-predict(rh)
plot(rhp)
A<-with(bei.extra, rho2hat(bei, grad, elev))
plot(A)
# Rotation, Distance Map
X <- rotate(copper$SouthPoints, pi/2)
L <- rotate(copper$SouthLines, pi/2)
plot(X)
plot(L,add=TRUE)
Z<-distmap(L)
contour(Z)
plot(Z)
# Formal tests of (non-)dependence on a covariate
Z <- bei.extra$elev
b <- quantile(Z, probs=(0:4)/4)
Zcut <- cut(Z, breaks=b, labels=1:4)
V <- tess(image=Zcut)
quadrat.test(bei, tess=V)->q;plot(q,lwd=2,col='red');points(bei,cex=.1);q
quadrat.test(bei,nx=3)->q;plot(q,lwd=2,col='red');points(bei,cex=.1);q
Z <- bei.extra$grad
b <- quantile(Z, probs=(0:4)/4)
Zcut <- cut(Z, breaks=b, labels=1:4)
V <- tess(image=Zcut)
quadrat.test(bei, tess=V)->q;plot(q,lwd=2,col='red');points(bei,cex=.1);q
quadrat.test(bei,nx=3)->q;plot(q,lwd=2,col='red');points(bei,cex=.1);q
# More Powerful : Kolmogorov-Smirnov test of CSR
elev <- bei.extra$elev
cdf.test(bei, elev)
plot(cdf.test(bei, elev))
cdf.test(swedishpines, "x")
plot(cdf.test(swedishpines, "x"))
# Berman’s tests
elev <- bei.extra$elev
B <- berman.test(bei, elev)
B
plot(B)
# Hot spots, clusters, and local features
# Clusters
denRed <- density(redwood, bw.ppl, ns=16) # bw.ppl= Likelihood CV
# bw.ppl when the pattern consists predominantly of tight clusters
# bw.diggle : detect a single tight cluster in the midst of random noise
plot(redwood)
plot(denRed)
LR <- scanLRTS(redwood, r = 2 * bw.ppl(redwood))
plot(LR)
pvals <- eval.im(pchisq(LR, df=1, lower.tail=FALSE))
plot(pvals)
clusterset(redwood, what="domain")
plot(redwood)
plot(clusterset(redwood, what="domain",fast=TRUE))
plot(bei)
plot(clusterset(bei, what="domain",fast=TRUE))
# 10ème plus proche voisin
Z <- nnclean(redwood, k=5, plothist=TRUE)
plot(Z)
# Exemple forte concentration d'intensité
require(datasets)
require(mapdata)
require(maps)
qk <- ppp(quakes$long, quakes$lat, c(164, 190), c(-39,-10))
plot(qk)
dq.5 <- density(qk, 0.5)
plot(dq.5)
ht.5 <- hextess(as.owin(qk), 1.09)
hq.5 <- intensity(quadratcount(qk, tess=ht.5), image=TRUE)
plot(hq.5)
clusterset(qk, what="domain")
plot(clusterset(qk, what="domain"))
nnclean(qk, k=5, plothist=TRUE)
par(mfrow=c(1,1))
plot(nnclean(qk, k=5, plothist=TRUE))
par(mfrow=c(2,1))
plot(unmark(shapley))
# Attraction des points
Y <- sharpen(unmark(shapley), sigma=0.5, edgecorrect=TRUE)
plot(Y)
# Kernel smoothing of marks
plot(longleaf)
plot(density(longleaf))
plot(Smooth(longleaf,bw.smoothppp))
# Variance/Ecart-Type
mvar <- markvar(longleaf, bw.smoothppp)
msd <- eval.im(sqrt(mvar))
plot(msd) # plus fortes valeurs sur les bordures où arbres sont jeunes
mfit <- Smooth(longleaf, bw.smoothppp, at="points")
res <- marks(longleaf) - mfit
marks(longleaf)<-res
plot(longleaf)
plot(nnmark(longleaf), k=5, plothist=TRUE)
# Multitype intensity and relative risk
plot(lansing)
plot(split(lansing))
b <- bw.relrisk(lansing)
plot(b)
rr <- relrisk(lansing, sigma=b)
# Estimates of spatially-varying proportions of each species
plot(rr)
dominant <- im.apply(rr, which.max)
species <- levels(marks(lansing))
dominant <- eval.im(factor(dominant, levels=1:6,labels=species))
textureplot(dominant)
#### Corrélation ####
# 3 grands types : régularité (répulsion), indépendance et cluster
fryplot(anemones)
plot(frypoints(anemones))
# Peu de points au centre (certaine régularité des points)
# Fonction K de Ripley, transformation en L
# Using the K-function implicitly assumes that the point process has homogeneous intensity
K <- Kest(cells)
Ki <- Kest(cells, correction="isotropic")
Lc <- Lest(cells)
# Warning : absence de corrélation != indépendance
# Cell process !!
X <- rcell(nx=15)
plot(X)
plot(Kest(X))
# Swedishpines
Ks <- Kest(swedishpines)
plot(Ks, iso ~ r)
plot(Ks, cbind(iso, trans, theo) ~ r)
lambda <- intensity(swedishpines)
plot(Ks, lambda * . ~ r)
Ko <- subset(Ks, r < 0.1, select= -border)
plot(Ko)
Ks <- Kest(swedishpines)
K <- as.function(Ks)
K(9)
Kr <- Kest(redwood)
y <- with(Kr, iso - theo)
x <- with(Kr, r)
# Calcul
K1 <- Kest(redwood)
K2 <- Kest(cells)
DK <- eval.fv(K1-K2)
plot(DK)
# Estimating the pair correlation function
g <- pcf(cells)
fryplot(cells)
plot(g)
## Standard errors and confidence intervals
X <- copper$SouthPoints
Kvb <- varblock(X, Kest, nx=3, ny=3)
plot(Kvb)
# Loh’s bootstrap
Kloh <- lohboot(X, Kest)
plot(Kloh)
Lg <- lohboot(X, Lest, global=TRUE)
Kg <- eval.fv(pi * Lg^2)
plot(Lg)
plot(Kg)
# Testing statistical significance
# Pointwise envelopes
plot(Kest(runifpoint(npoints(cells), Window(cells))))
E <- envelope(cells, Kest, nsim=39, fix.n=TRUE)
plot(E)
# Global envelopes
Ek<-envelope(cells, Kest, nsim=19, rank=1, global=TRUE)
plot(Ek)
El<-envelope(cells, Lest, nsim=39, fix.n=TRUE)
plot(El)
# Detecting anisotropy
# A point process is ‘isotropic’ if all its statistical properties are unchanged when it is rotated
X <- rSSI(0.05, win=owin(c(0,1), c(0, 3)))
Y <- affine(X, mat=diag(c(1, 1/3)))
plot(frypoints(Y))
Khoriz <- Ksector(Y, begin = -15, end = 15, units="degrees")
Kvert <- Ksector(Y, begin = 90-15, end = 90+15, units="degrees")
plot(Khoriz, trans/theo ~ r, lty=2)
plot(Kvert, trans/theo ~ r, add=TRUE)
dK <- function(X, ...) {
K1 <- Ksector(X, ..., begin = -15, end = 15, units="degrees")
K2 <- Ksector(X, ..., begin = 90-15, end = 90+15, units="degrees")
eval.fv(K1-K2)
}
CIdK <- varblock(Y, dK, nx=5)
plot(CIdK)
# Adjusting for inhomogeneity (K, g)
# K function inhomogène
numata <- residualspaper$Fig1
plot(numata)
lambda <- density(numata, bw.ppl)
numataK <- Kinhom(numata, lambda)
plot(numataK)
numataK <- Kinhom(numata, sigma=bw.ppl)
plot(numataK)
# pair correlation function (pcf) inhomogène
plot(pcf(bei))
g <- pcfinhom(bei)
plot(g)
### Shortest distances and empty spaces ###
M <- pairdist(redwood)
M
v <- nndist(redwood)
v
Z <- distmap(redwood)
plot(Z)
points(redwood)
# Tests of CSR based on shortest distances
# Complete Spatial Randomness (CSR)
clarkevans(redwood)
clarkevans.test(redwood, correction="donnelly",alternative="clustered")
clarkevans.test(cells, correction="donnelly",alternative="clustered")
hopskel(redwood)
hopskel.test(redwood, alternative="clustered")
hopskel.test(cells, alternative="clustered")
# Exploratory graphics
plot(redwoodfull)
plot(redwoodfull %mark% nndist(redwoodfull), markscale=1)
stienen(redwoodfull) # Stienen Diagram
plot(dirichlet(redwoodfull)) # Dirichlet tessellation
# Nearest-neighbour function G, empty-space function F
Fs <- Fest(swedishpines)
Gs <- Gest(swedishpines)
plot(Fs)
plot(Gs)
Swedish <- rescale(swedishpines)
plot(Fest(Swedish))
plot(Gest(Swedish))
# Formal inference and diagnostic plots
Fci <- varblock(Swedish, Fest, nx=5, correction="best")
Gci <- varblock(Swedish, Gest, nx=5, correction="best")
Fenv <- envelope(Swedish, Fest, nsim=39, fix.n=TRUE)
Genv <- envelope(Swedish, Gest, nsim=39, fix.n=TRUE)
plot(Fci);plot(Gci);plot(Fenv);plot(Genv)
# Empty space hazard
# Here is a need for alternative summary functions (derived from F and G) which contain only contributions from distances equal to r
plot(Fest(cells), cbind(hazard, theohaz)~ r)
hazenv <- envelope(Swedish, Fhazard, nsim=39, fix.n=T,transform=expression(./(2*pi*r)))
plot(hazenv)
# J-function
# Values J(r) > 1 are consistent with a regular pattern, and J(r) < 1 is consistent with clustering, at scales less than or equal to r.
plot(allstats(cells))
plot(allstats(cells)$J)
# Inhomogeneous F, G and J functions
Finhom(cells)
Ginhom(cells)
plot(Jinhom(cells))
#### STATISTICAL INFERENCE ####
### POISSON ###
# The key property of a Poisson process is that the random points are independent of each other
# The ppm() function
# Bei data
# Simplest example
fit <- ppm(bei ~ 1)
fit
# Models with a single numerical covariate (p. 278)
bei.extra
fit <- ppm(bei ~ grad, data=bei.extra)
fit
plot(effectfun(fit,"grad", se.fit=TRUE))
# These results tell us that the estimated intensity of Beilschmiedia trees on a flat surface (slope s = 0) is about exp(−5.391) = 0.004559 trees per square metre, or 45.59 trees per hectare, and would increase by a factor of exp(5.022) = 151.7 if the slope increased to 1.0. The largest slope value in the data is about 0.3, at which stage the predicted intensity has risen by a factor of exp(0.3×5.022) = 4.511 from its value on a flat surface.
ppm(bei ~ atan(grad), data=bei.extra)
ppm(bei ~ I(atan(grad) * 180/pi),data=bei.extra)
degrees <- function(x) { x * 180/pi }
ppm(bei ~ degrees(atan(grad)), data=bei.extra)
# Quadratic function
fit<-ppm(bei ~ grad + I(grad^2), data=bei.extra)
plot(effectfun(fit,"grad", se.fit=TRUE))
# Murchison data
mur <- lapply(murchison, rescale, s=1000, unitname="km")
plot(mur$gold)
plot(mur$faults)
# dfault = Distance par rapport à "faults"
dfault <- with(mur,distfun(faults))
plot(dfault)
fit <- ppm(gold ~ dfault,data=mur)
fit
# Models with a logical covariate
# greenstone : polygonal boundary, spatial window
# intensité inside/outside greenstone
ppm(gold ~ greenstone, data=mur)
ppm(gold ~ greenstone-1,data=mur)
# Mêmes résultats/interprétations (p. 284)
# Models with a factor covariate
# Gorilla nest data
gor <- rescale(gorillas, 1000, unitname="km")
gor <- unmark(gor)
plot(gor)
gex <- lapply(gorillas.extra, rescale,s=1000, unitname="km")
plot(gex$vegetation)
names(gex)
shorten <- function(x) substr(x, 1, 4)
names(gex) <- shorten(names(gex))
names(gex)
isfactor <- !unlist(lapply(lapply(gex, levels), is.null))
for(i in which(isfactor))
levels(gex[[i]]) <- shorten(levels(gex[[i]]))
levels(gex$vege)
vt <- tess(image=gex$vege)
plot(vt)
I<-intensity(quadratcount(gor, tess=vt))
plot(I)
# Additive models (p. 289)
fitadd <- ppm(bei ~ elev + grad,data=bei.extra)
fitadd
# Modelling spatial trend using Cartesian coordinates (p. 290)
jpines <- residualspaper[["Fig1"]]
ppm(jpines ~ x + y)
ppm(jpines ~ polynom(x,y,2))
# Fit a model with constant but unequal intensities on each side of the vertical line x = 0.5
ppm(jpines ~ (x < 0.5))
# Models with interaction between covariates
# Interaction between two numerical covariates
fit <- ppm(bei ~ elev + grad + I(elev*grad), data=bei.extra)
fit
# Interaction between two factors
ppm(gor ~ vege * heat, data=gex)
# Interaction between factor and numerical covariate
ppm(gold ~ dfault * greenstone,data=mur)
# Nested interaction (intéractions emboitées)
ppm(gold ~ greenstone/dfault, data=mur)
ppm(gold ~ greenstone/dfault-1, data=mur)
# Formulas involving many variables
ppm(gor~ . , data=gex)
## Statistical inference for Poisson models
# Fitted models
beikm <- rescale(bei, 1000, unitname="km")
bei.extrakm <- lapply(bei.extra, rescale, s=1000, unitname="km")
fitkm <- ppm(beikm ~ x + y)
fitkm
coef(fitkm)
plot(fitkm, how="image", se=FALSE)
summary(fitkm)
coef(summary(fitkm))
# Matrice variance-covariance
vcov(fitkm)
# Erreurs standards sur la diagonale
sqrt(diag(vcov(fitkm)))
# Intervalle de confiance des coefficients
confint(fitkm, level=0.95)
# Matrice de corrélation entre coefficients (variables)
co <- vcov(fitkm, what="corr")
round(co, 2)
# Attention, corrélations changent selon l'emplacement de l'origine des covariables
# Ici, changement au centre de la région d'étude
fitch <- update(fitkm, . ~ I(x-0.5) + I(y-0.25))
co <- vcov(fitch, what="corr")
round(co, 2)
# Prediction
fit <- ppm(bei ~ polynom(grad, elev, 2), data=bei.extra)
lamhat <- predict(fit)
contour(lamhat)
plot(lamhat)
M <- persp(bei.extra$elev, colin=lamhat,
colmap=topo.colors, shade=0.4,
theta=-55, phi=25, expand=6,
box=FALSE, apron=TRUE, visible=TRUE)
perspPoints(bei, Z=bei.extra$elev, M=M,
pch=".", col="red", cex=1.25)
# Intervalle de confiance de la prédiction
contour(predict(fit, interval="confidence"))
plot(predict(fit, interval="confidence"))
# To find the expected number of trees at elevations below 130 metres
B <- levelset(bei.extra$elev, 130)
predict(fit, total=B)
predict(fit, total=B, type="se")
predict(fit, total=B, interval="confidence")
# Intervalle prédiction
predict(fit, total=B, interval="prediction")
# Updating a model
X <- rpoispp(42)
m <- ppm(X ~ 1)
fitcsr <- ppm(bei ~ 1, data=bei.extra)
update(fitcsr, bei ~ grad)
# ou
fitgrad <- update(fitcsr, .~ grad)
fitall <- update(fitgrad, . ~ . + elev)
fitall
# Model selection
fit1 <- ppm(bei ~ grad,data=bei.extra)
fitnull <- ppm(bei ~ 1)
anova(fitnull, fit1, test="Chi")
# < 0.05, on préfère le modèle avec le plus de paramètres
# Akaike Information Criterion : AIC for model selection
AIC(fit1)
AIC(fitnull)
# AIC automatisé
fit <- ppm(bei ~ elev+grad,data=bei.extra)
step(fit,trace=1)
# Simulating the fitted model
fit=ppm(bei ~ polynom(grad, elev, 2),data=bei.extra)
X <- simulate(fit, data=bei.extra)
plot(X[[1]])
# Quadrature schemes
# Gorilla nests example
ppm(gor ~ vege, data=gex)
vt <- tess(image=gex$vege)
plot(vt)
intensity(quadratcount(gor, tess=vt))
fitveg2 <- ppm(gor~ vege-1, data=gex, nd=256)
exp(coef(fitveg2))
## Hypothesis Tests and Simulation Envelopes (p. 331)
# Testing for a covariate effect in a parametric model (p. 333)
mur <- lapply(murchison, rescale, s=1000, unitname="km")
mur$dfault <- with(mur, distfun(faults))
mfit0 <- ppm(gold ~ greenstone, data=mur)
mfit1 <- ppm(gold ~ greenstone + dfault, data=mur)
# ou
mfit1 <- update(mfit0, . ~ . + dfault)
copper$dist <- with(copper, distfun(SouthLines))
cfit0 <- ppm(SouthPoints ~ 1, data=copper)
cfit1 <- ppm(SouthPoints ~ dist, data=copper)
# Likelihood ratio test (p. 334)
# Test : H0 : Absence de l'effet due à la variable
anova(mfit0, mfit1, test="Chi") # < 0.05. Effet "dfault"
anova(cfit0, cfit1, test="Chi") # >0.05. Pas d'effet "dist"
# Wald test for single parameter
coef(summary(mfit1)) # Z-test. Effet "dfault"
coef(summary(cfit1)) # Pas d'effet "dist"
V <- coef(summary(mfit1))["dfault", "Zval"]
pnorm(V, lower.tail=TRUE)
# Score test (moins performant) (p. 336)
# Avertissements (Caveats, p. 337) : H0 faux => pas forcément H1 Vrai
# Model selection using AIC (p. 338)
fitxy <- ppm(swedishpines ~ x + y)
step(fitxy)
drop1(fitxy)
# The output indicates that the lowest AIC (i.e. 842) would be achieved by deleting the x term.
fitcsr <- ppm(swedishpines~1)
add1(fitcsr, ~x+y)
# A l'inverse
# Meilleur modèle (AIC CSR sans x et y)
# Autre exemple
bigfit <- ppm(swedishpines ~ polynom(x,y,3))
formula(bigfit)
formula(step(bigfit, trace=1))
# Goodness-of-fit tests for an intensity model (p. 343)
# H0 : le modèle est bon (sous Poisson (CSR))
X <- copper$SouthPoints
D <- distfun(copper$SouthLines)
cdf.test(X, D, test="ad")
cdf.test(mfit0, mur$dfault, test="ad")
# Goodness-of-fit tests of independence between points (p. 343)
# H0 : indépendance entre les points (Poisson)
quadrat.test(bei)
clarkevans.test(bei)
# Monte Carlo tests
# Voir page 344 -> 357
### Envelopes in spatstat ###
plot(envelope(redwood, Lest, nsim=39))
plot(envelope(redwood, Lest, nsim=39, global=TRUE))
# Envelopes for any fitted model
numata <- residualspaper$Fig1
fit <- ppm(numata ~ polynom(x,y,3))
E <- envelope(fit, Lest, nsim=19, global=TRUE, correction="border")
plot(E)
# Envelopes based on any simulation procedure
e <- expression(rpoispp(100))
eval(e)
e <- expression(rlabel(amacrine))
E <- envelope(amacrine, Lcross, nsim=19, global=TRUE, simulate=e)
plot(E)
# Envelopes based on a set of point patterns
Xlist <- list()
for(i in 1:99) Xlist[[i]] <- runifpoint(42)
envelope(cells, Kest, nsim=99, simulate=Xlist)
plot(envelope(cells, Kest, nsim=99, simulate=Xlist))
EK <- envelope(cells, Kest, nsim=99, savepatterns=TRUE) # Kest
Ep <- envelope(cells, pcf, nsim=99, simulate=EK) # PCF
plot(Ep)
EK <- envelope(cells, Kest, nsim=99, savepatterns=TRUE)
Ep <- envelope(EK, pcf)
plot(Ep)
# Pointwise envelopes
envelope(redwood, Lest)
# Simultaneous envelopes
envelope(redwood, Lest, global=TRUE)
# Envelopes based on sample mean & variance
E<-envelope(cells, Kest, nsim=100, VARIANCE=TRUE)
plot(E)
# One-sided envelopes
E<-envelope(cells, Kest, nsim=100, alternative="greater")
E<-envelope(cells, Kest, nsim=100, alternative="less")
plot(E)
# Re-using envelope data (p. 363)
E1 <- envelope(redwood, Kest, savepatterns=TRUE)
E2 <- envelope(E1, Gest, global=TRUE, transform=expression(fisher(.)))
### MARCHE PAS
plot(E2)
A1 <- envelope(redwood, Kest, nsim=39, savefuns=TRUE)
A2 <- envelope(A1, global=TRUE, nsim=19,
transform=expression(sqrt(./pi)))
plot(A1)
plot(A2)
# Pooling several envelopes
E1 <- envelope(cells, Kest, nsim=10, savefuns=TRUE)
E2 <- envelope(cells, Kest, nsim=20, savefuns=TRUE)
plot(E1)
plot(E2)
E <- pool(E1, E2)
plot(E)
#### Validation of Poisson models ####
# Goodness-of-fit tests of a fitted model
fit2e <- ppm(bei ~ polynom(elev,2), data=bei.extra)
fit2e
M <- quadrat.test(fit2e, nx=4, ny=2)
M
# ddl = nombre de cases (4*2-3(intercept+elev+I(elev²))=5)
plot(M)
elev <- bei.extra$elev
grad <- bei.extra$grad
b <- quantile(elev, probs=(0:4)/4)
Zcut <- cut(elev, breaks=b, labels=1:4)
V <- tess(image=Zcut)
quadrat.test(fit2e, tess=V)
plot(V)
# Variantes
cdf.test(fit2e, grad, test="ks")
berman.test(fit2e, grad)
fit2e1g <- update(fit2e, ~ . + grad)
anova(fit2e, fit2e1g, test="Chi")
# Modèle avec grad meilleur
AIC(fit2e1g)
AIC(fit2e)
# Relative intensity
# Inverse-lambda weightin
lam0 <- fitted(fit2e, dataonly=TRUE)
rel2e <- density(bei, weights=1/lam0)
range(rel2e)
plot(rel2e)
# Relative intensity as function of covariate
lambda0 <- predict(fit2e)
rh1 <- rhohat(bei, grad, baseline=lambda0)
plot(rh1);plot(predict(rh1))
rh2 <- rhohat(fit2e, grad)
plot(rh2);plot(predict(rh2))
# Residuals for Poisson processes
res2e <- residuals(fit2e)
plot(res2e)
# Smoothed residual field
plot(Smooth(res2e))
plot(contour(Smooth(res2e)))
# Scaled or weighted residuals
pres2e <- residuals(fit2e, type="pearson")
plot(pres2e)
plot(Smooth(residuals(fit2e, type="pearson")))
plot(contour(Smooth(residuals(fit2e, type="pearson"))))
# Four-panel residual plot
diagnose.ppm(fit2e)
# Partial residuals plot (p. 384)
fit1g <- ppm(bei ~ grad)
coef(fit1g)
parres(fit1g, "grad")
plot(parres(fit1g, "grad"))
# Added variable plots
fit2g <- update(fit1g, ~ polynom(grad,2))
add<-addvar(fit2g, elev)
plot(add)
## Leverage and influence (Points leviers et point d'influence)
lev <- leverage(fit2g)
inf <- influence(fit2g)
dfb <- dfbetas(fit2g)
plot(lev);plot(inf);plot(dfb)
# Murchison example
mur <- lapply(murchison, rescale, s=1000, unitname="km")
attach(mur)
green <- greenstone
dfault <- distfun(faults)
murfit1x<-ppm(gold ~ green * dfault)
murlev1x<-leverage(murfit1x)
murinf1x<-influence(murfit1x)
murdfb1x<-dfbetas(murfit1x)
persp(as.im(murlev1x))
plot(murinf1x)
marks(as.ppp(murinf1x))
# Residual summary functions
# K-Function
cellKr <- Kres(cells, correction="best")
cellGr <- Gres(cells, correction="best")
plot(cellKr);plot(cellGr)
# Pas poisson homogène
jfit <- ppm(residualspaper$Fig1 ~ polynom(x,y,3))
jKr <- Kres(jfit, correction="best")
fvnames(jKr, ".s") <- c("ihi", "ilo")
plot(jKr)
# Poisson homogène
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_2304}
\alias{data_2304}
\title{IMF World Economic Outlook data, current}
\format{
A tibble.
\describe{
\item{unit}{refer to meta unit}
\item{concept}{refer to meta concept}
\item{ref_area}{refer to meta area}
\item{freq}{"A" for annual}
\item{lastactualdate}{year like 2019}
\item{scale}{refer to meta scale}
\item{notes}{some notes}
\item{year}{year like 1980}
\item{value}{observed value}
}
}
\source{
https://www.imf.org/en/Publications/WEO/weo-database/2023/April/download-entire-database
}
\usage{
data_2304
}
\description{
Economic data published in April 2023.
}
\keyword{datasets}
|
/man/data_2304.Rd
|
permissive
|
mitsuoxv/imf-weo
|
R
| false | true | 700 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_2304}
\alias{data_2304}
\title{IMF World Economic Outlook data, current}
\format{
A tibble.
\describe{
\item{unit}{refer to meta unit}
\item{concept}{refer to meta concept}
\item{ref_area}{refer to meta area}
\item{freq}{"A" for annual}
\item{lastactualdate}{year like 2019}
\item{scale}{refer to meta scale}
\item{notes}{some notes}
\item{year}{year like 1980}
\item{value}{observed value}
}
}
\source{
https://www.imf.org/en/Publications/WEO/weo-database/2023/April/download-entire-database
}
\usage{
data_2304
}
\description{
Economic data published in April 2023.
}
\keyword{datasets}
|
## Matrix inversion is usually a costly computation and there may be some
# benefit to caching the inverse of a matrix rather than computing it
#repeatedly.
## The makeCacheMatrix function creates a special "matrix" object
## 1: set the value of the Matrix
## 2: get the value of the Matrix
## 3: set the value of the Inverse of matrix
## 4: get the value of the Inverse of matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) { ## set the value of the Matrix
x <<- y
i <<- NULL
}
get <- function() x ## get the value of the Matrix
setinverse <- function(inverse) i <<- inverse ## set the value of the inverse of matrix
getinverse <- function() i ## get the value of the inverse of matrix
# Create a list containing the results of set, get, getinverse, getinverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function computes the inverse of the special
# "matrix" returned by `makeCacheMatrix` above. If the inverse has
# already been calculated (and the matrix has not changed), then
# `cacheSolve` should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i) # Return the inverse of a matrix cached previously if
# same matrix is run
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i) # Cache the inverse value
i ## Return a matrix that is the inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
SFBay-Chris/ProgrammingAssignment2
|
R
| false | false | 1,704 |
r
|
## Matrix inversion is usually a costly computation and there may be some
# benefit to caching the inverse of a matrix rather than computing it
#repeatedly.
## The makeCacheMatrix function creates a special "matrix" object
## 1: set the value of the Matrix
## 2: get the value of the Matrix
## 3: set the value of the Inverse of matrix
## 4: get the value of the Inverse of matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) { ## set the value of the Matrix
x <<- y
i <<- NULL
}
get <- function() x ## get the value of the Matrix
setinverse <- function(inverse) i <<- inverse ## set the value of the inverse of matrix
getinverse <- function() i ## get the value of the inverse of matrix
# Create a list containing the results of set, get, getinverse, getinverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function computes the inverse of the special
# "matrix" returned by `makeCacheMatrix` above. If the inverse has
# already been calculated (and the matrix has not changed), then
# `cacheSolve` should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i) # Return the inverse of a matrix cached previously if
# same matrix is run
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i) # Cache the inverse value
i ## Return a matrix that is the inverse of 'x'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GmeasureFunctions.R
\name{UpdateGprocessCenter}
\alias{UpdateGprocessCenter}
\title{Split: Sample one random center which is proposed for a split.
A split is proposed by adding and subtracting the sampled vector to the original center, the angle of the
proposed vector is sampled from Von Mises distribution with mean equal an observed center of a cluster of the children.
The mark to each proposed split is sampled by randomly portion out the mark of the original center,
where one proportion is beta-distributed with parameters corresponding to the number of children in each cluster.
The observed points previously assigned to the center is then divided randomly to new centers accordinlgy to the isotropic Gaussian dispersion density.
If the latent point has in the current less than 3 children, a more simple procedure is used.}
\usage{
UpdateGprocessCenter(D, P, u, r, accept, conf)
}
\arguments{
\item{D}{The data}
\item{P}{The current state of the MCMC}
\item{u}{Random uniform vector of numbers used in the MCMC for selctin split/merge/move}
\item{r}{Random uniform vector of numbers used for acceptance}
\item{accept}{Information regarding acceptance}
\item{lBext}{Boundary}
\item{kappaVonMises}{Paramter used in the Von Mises distribtuion for sampling angles for split}
}
\description{
}
|
/man/UpdateGprocessCenter.Rd
|
no_license
|
PointProcess/SNCPbayes
|
R
| false | true | 1,388 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GmeasureFunctions.R
\name{UpdateGprocessCenter}
\alias{UpdateGprocessCenter}
\title{Split: Sample one random center which is proposed for a split.
A split is proposed by adding and subtracting the sampled vector to the original center, the angle of the
proposed vector is sampled from Von Mises distribution with mean equal an observed center of a cluster of the children.
The mark to each proposed split is sampled by randomly portion out the mark of the original center,
where one proportion is beta-distributed with parameters corresponding to the number of children in each cluster.
The observed points previously assigned to the center is then divided randomly to new centers accordinlgy to the isotropic Gaussian dispersion density.
If the latent point has in the current less than 3 children, a more simple procedure is used.}
\usage{
UpdateGprocessCenter(D, P, u, r, accept, conf)
}
\arguments{
\item{D}{The data}
\item{P}{The current state of the MCMC}
\item{u}{Random uniform vector of numbers used in the MCMC for selctin split/merge/move}
\item{r}{Random uniform vector of numbers used for acceptance}
\item{accept}{Information regarding acceptance}
\item{lBext}{Boundary}
\item{kappaVonMises}{Paramter used in the Von Mises distribtuion for sampling angles for split}
}
\description{
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.