content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
#' Sales Taxes Project
#' This code estimates the demand using the proposed method. First, we
#' run a Basic DiD model by initial price level and estimate the "long run" models
#' splitting the sample by quantiles increasing the number of groups.
#' Here initial level means previous period and we divide by groups within the "common" support
#' In this case, we run a fully saturated model (instead of splitting the sample)
#' We get the Implied IV and recover the implied demand function varying the degree estimated (no. of quantiles)
library(data.table)
library(futile.logger)
library(lfe)
library(multcomp)
setwd("/project2/igaarder")
## input filepaths -----------------------------------------------
#' This data is the same as all_goods_pi_path, except it has 2015-2016 data as well.
data.semester <- "Data/Nielsen/semester_nielsen_data_county.csv"
#data.year <- "Data/Nielsen/yearly_nielsen_data.csv"
## output filepaths ----------------------------------------------
iv.output.results.file <- "Data/Demand_iv_sat_initial_price_semester_county.csv"
theta.output.results.file <- "Data/Demand_theta_sat_initial_price_semester_county.csv"
### Set up Semester Data ---------------------------------
all_pi <- fread(data.semester)
# Create a categorical variable for county_by_module
all_pi[, county_by_module := .GRP, by = .(fips_state, fips_county, product_module_code)]
all_pi[, w.ln_sales_tax := ln_sales_tax - mean(ln_sales_tax), by = .(county_by_module)]
all_pi[, w.ln_cpricei2 := ln_cpricei2 - mean(ln_cpricei2), by = .(county_by_module)]
all_pi[, w.ln_quantity3 := ln_quantity3 - mean(ln_quantity3), by = .(county_by_module)]
# Need to demean
all_pi[, module_by_time := .GRP, by = .(product_module_code, semester, year)]
all_pi[, L.ln_cpricei2 := ln_cpricei2 - D.ln_cpricei2]
all_pi[, dm.L.ln_cpricei2 := L.ln_cpricei2 - mean(L.ln_cpricei2, na.rm = T), by = module_by_time]
all_pi[, dm.ln_cpricei2 := ln_cpricei2 - mean(ln_cpricei2, na.rm = T), by = module_by_time]
all_pi[, dm.ln_quantity3 := ln_quantity3 - mean(ln_quantity3, na.rm = T), by = module_by_time]
## Defining common support
control <- all_pi[D.ln_sales_tax == 0,]
treated <- all_pi[D.ln_sales_tax != 0,]
## Price
#pct1.control <- quantile(control$dm.L.ln_cpricei2, probs = 0.01, na.rm = T, weight=control$base.sales)
#pct1.treated <- quantile(treated$dm.L.ln_cpricei2, probs = 0.01, na.rm = T, weight=treated$base.sales)
#pct99.control <- quantile(control$dm.L.ln_cpricei2, probs = 0.99, na.rm = T, weight=control$base.sales)
#pct99treated <- quantile(treated$dm.L.ln_cpricei2, probs = 0.99, na.rm = T, weight=treated$base.sales)
pct5.control <- quantile(control$dm.ln_cpricei2, probs = 0.05, na.rm = T, weight=control$base.sales)
pct5.treated <- quantile(treated$dm.ln_cpricei2, probs = 0.05, na.rm = T, weight=treated$base.sales)
pct95.control <- quantile(control$dm.ln_cpricei2, probs = 0.95, na.rm = T, weight=control$base.sales)
pct95.treated <- quantile(treated$dm.ln_cpricei2, probs = 0.95, na.rm = T, weight=treated$base.sales)
pct5.control
pct5.treated
pct95.control
pct95.treated
max(control$dm.ln_cpricei2, na.rm = T)
min(control$dm.ln_cpricei2, na.rm = T)
#all_pi[, cs_price := ifelse(dm.L.ln_cpricei2 > max(pct1.treated, pct1.control) &
# dm.L.ln_cpricei2 < min(pct99treated, pct99.control), 1, 0)]
# Make sure missings are 0s
#all_pi[, cs_price := ifelse(is.na(dm.L.ln_cpricei2), 0, cs_price)]
## Keep within the common support
#all_pi <- all_pi[cs_price == 1,]
all_pi[, cs_price := ifelse(dm.ln_cpricei2 > max(pct5.treated, pct5.control) &
dm.ln_cpricei2 < min(pct95.treated, pct95.control), 1, 0)]
# Make sure missings are 0s
all_pi[, cs_price := ifelse(is.na(dm.ln_cpricei2), 0, cs_price)]
## Keep within the common support
all_pi <- all_pi[cs_price == 1,]
outcomes <- c("w.ln_cpricei2", "w.ln_quantity3")
FE_opts <- c("group_region_by_module_by_time", "group_division_by_module_by_time")
LRdiff_res <- data.table(NULL)
target_res <- data.table(NULL)
## Run within
for (n.g in 2:7) {
# Create groups of initial values of tax rate
# We use the full weighted distribution
all_pi <- all_pi[, quantile := cut(dm.L.ln_cpricei2,
breaks = quantile(dm.L.ln_cpricei2, probs = seq(0, 1, by = 1/n.g), na.rm = T, weight = base.sales),
labels = 1:n.g, right = FALSE)]
quantlab <- round(quantile(all_pi$dm.L.ln_cpricei2,
probs = seq(0, 1, by = 1/n.g), na.rm = T,
weight = all_pi$base.sales), digits = 4)
# Saturate fixed effects
all_pi[, group_region_by_module_by_time := .GRP, by = .(region_by_module_by_time, quantile)]
all_pi[, group_division_by_module_by_time := .GRP, by = .(division_by_module_by_time, quantile)]
## Estimate RF and FS
for (FE in FE_opts) {
for (Y in outcomes) {
formula1 <- as.formula(paste0(
Y, " ~ w.ln_sales_tax:quantile | ", FE, "+ quantile | 0 | module_by_state"
))
res1 <- felm(formula = formula1, data = all_pi,
weights = all_pi$base.sales)
## attach results
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := Y]
res1.dt[, controls := FE]
res1.dt[, n.groups := n.g]
res1.dt[, lev := quantlab[-1]]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, iv.output.results.file)
}
## Estimate IVs and retrieve in vector
IV <- LRdiff_res[outcome == "w.ln_quantity3" & n.groups == n.g & controls == FE,][["Estimate"]]/LRdiff_res[outcome == "w.ln_cpricei2" & n.groups == n.g & controls == FE,][["Estimate"]]
## Estimate the matrix of the implied system of equations
# Get the empirical distribution of prices by quantile
all_pi[, base.sales.q := base.sales/sum(base.sales), by = .(quantile)]
all_pi[, p_group := floor((dm.ln_cpricei2 - min(dm.ln_cpricei2, na.rm = T))/((max(dm.ln_cpricei2, na.rm = T)-min(dm.ln_cpricei2, na.rm = T))/100)), by = .(quantile)]
all_pi[, p_ll := p_group*((max(dm.ln_cpricei2, na.rm = T)-min(dm.ln_cpricei2, na.rm = T))/100), by = .(quantile)]
all_pi[, p_ll := p_ll + min(dm.ln_cpricei2, na.rm = T), by = .(quantile)]
all_pi[, p_ul := p_ll + ((max(dm.ln_cpricei2, na.rm = T)-min(dm.ln_cpricei2, na.rm = T))/100), by = .(quantile)]
ed.price.quantile <- all_pi[, .(w1 = (sum(base.sales.q))), by = .(p_ul, p_ll, quantile)]
ed.price.quantile[, p_m := (p_ul+p_ll)/2]
# Create the derivative of the polynomial of prices and multiplicate by weights
for (n in 1:n.g){
ed.price.quantile[, paste0("b",n) := (n)*w1*(p_m^(n-1))]
}
# Calculate integral
gamma <- ed.price.quantile[ , lapply(.SD, sum), by = .(quantile), .SDcols = paste0("b",1:n.g)]
gamma <- gamma[!is.na(quantile),][order(quantile)][, -c("quantile")]
## Retrieve target parameters
beta_hat <- as.vector(solve(as.matrix(gamma))%*%(as.matrix(IV)))
# Estimate intercept
mean.q <- all_pi[, mean(dm.ln_quantity3, weights = base.sales)]
mean.p <- all_pi[, mean(dm.ln_cpricei2, weights = base.sales)]
beta_0_hat <- mean.q - sum((beta_hat)*(mean.p^(1:n.g)))
beta_hat <- c(beta_0_hat, beta_hat)
## Export estimated target parameters
estimated.target <- data.table(beta_hat)
estimated.target[, beta_n := .I-1]
estimated.target[, n.groups := n.g]
estimated.target[, controls := FE]
target_res <- rbind(target_res, estimated.target)
fwrite(target_res, theta.output.results.file)
}
}
|
/R/regressions/Semester_regressions/Demand_semester_initprice_quantiles_COUNTY.R
|
no_license
|
lancelothdf/sales.taxes
|
R
| false | false | 7,644 |
r
|
#' Sales Taxes Project
#' This code estimates the demand using the proposed method. First, we
#' run a Basic DiD model by initial price level and estimate the "long run" models
#' splitting the sample by quantiles increasing the number of groups.
#' Here initial level means previous period and we divide by groups within the "common" support
#' In this case, we run a fully saturated model (instead of splitting the sample)
#' We get the Implied IV and recover the implied demand function varying the degree estimated (no. of quantiles)
library(data.table)
library(futile.logger)
library(lfe)
library(multcomp)
setwd("/project2/igaarder")
## input filepaths -----------------------------------------------
#' This data is the same as all_goods_pi_path, except it has 2015-2016 data as well.
data.semester <- "Data/Nielsen/semester_nielsen_data_county.csv"
#data.year <- "Data/Nielsen/yearly_nielsen_data.csv"
## output filepaths ----------------------------------------------
iv.output.results.file <- "Data/Demand_iv_sat_initial_price_semester_county.csv"
theta.output.results.file <- "Data/Demand_theta_sat_initial_price_semester_county.csv"
### Set up Semester Data ---------------------------------
all_pi <- fread(data.semester)
# Create a categorical variable for county_by_module
all_pi[, county_by_module := .GRP, by = .(fips_state, fips_county, product_module_code)]
all_pi[, w.ln_sales_tax := ln_sales_tax - mean(ln_sales_tax), by = .(county_by_module)]
all_pi[, w.ln_cpricei2 := ln_cpricei2 - mean(ln_cpricei2), by = .(county_by_module)]
all_pi[, w.ln_quantity3 := ln_quantity3 - mean(ln_quantity3), by = .(county_by_module)]
# Need to demean
all_pi[, module_by_time := .GRP, by = .(product_module_code, semester, year)]
all_pi[, L.ln_cpricei2 := ln_cpricei2 - D.ln_cpricei2]
all_pi[, dm.L.ln_cpricei2 := L.ln_cpricei2 - mean(L.ln_cpricei2, na.rm = T), by = module_by_time]
all_pi[, dm.ln_cpricei2 := ln_cpricei2 - mean(ln_cpricei2, na.rm = T), by = module_by_time]
all_pi[, dm.ln_quantity3 := ln_quantity3 - mean(ln_quantity3, na.rm = T), by = module_by_time]
## Defining common support
control <- all_pi[D.ln_sales_tax == 0,]
treated <- all_pi[D.ln_sales_tax != 0,]
## Price
#pct1.control <- quantile(control$dm.L.ln_cpricei2, probs = 0.01, na.rm = T, weight=control$base.sales)
#pct1.treated <- quantile(treated$dm.L.ln_cpricei2, probs = 0.01, na.rm = T, weight=treated$base.sales)
#pct99.control <- quantile(control$dm.L.ln_cpricei2, probs = 0.99, na.rm = T, weight=control$base.sales)
#pct99treated <- quantile(treated$dm.L.ln_cpricei2, probs = 0.99, na.rm = T, weight=treated$base.sales)
pct5.control <- quantile(control$dm.ln_cpricei2, probs = 0.05, na.rm = T, weight=control$base.sales)
pct5.treated <- quantile(treated$dm.ln_cpricei2, probs = 0.05, na.rm = T, weight=treated$base.sales)
pct95.control <- quantile(control$dm.ln_cpricei2, probs = 0.95, na.rm = T, weight=control$base.sales)
pct95.treated <- quantile(treated$dm.ln_cpricei2, probs = 0.95, na.rm = T, weight=treated$base.sales)
pct5.control
pct5.treated
pct95.control
pct95.treated
max(control$dm.ln_cpricei2, na.rm = T)
min(control$dm.ln_cpricei2, na.rm = T)
#all_pi[, cs_price := ifelse(dm.L.ln_cpricei2 > max(pct1.treated, pct1.control) &
# dm.L.ln_cpricei2 < min(pct99treated, pct99.control), 1, 0)]
# Make sure missings are 0s
#all_pi[, cs_price := ifelse(is.na(dm.L.ln_cpricei2), 0, cs_price)]
## Keep within the common support
#all_pi <- all_pi[cs_price == 1,]
all_pi[, cs_price := ifelse(dm.ln_cpricei2 > max(pct5.treated, pct5.control) &
dm.ln_cpricei2 < min(pct95.treated, pct95.control), 1, 0)]
# Make sure missings are 0s
all_pi[, cs_price := ifelse(is.na(dm.ln_cpricei2), 0, cs_price)]
## Keep within the common support
all_pi <- all_pi[cs_price == 1,]
outcomes <- c("w.ln_cpricei2", "w.ln_quantity3")
FE_opts <- c("group_region_by_module_by_time", "group_division_by_module_by_time")
LRdiff_res <- data.table(NULL)
target_res <- data.table(NULL)
## Run within
for (n.g in 2:7) {
# Create groups of initial values of tax rate
# We use the full weighted distribution
all_pi <- all_pi[, quantile := cut(dm.L.ln_cpricei2,
breaks = quantile(dm.L.ln_cpricei2, probs = seq(0, 1, by = 1/n.g), na.rm = T, weight = base.sales),
labels = 1:n.g, right = FALSE)]
quantlab <- round(quantile(all_pi$dm.L.ln_cpricei2,
probs = seq(0, 1, by = 1/n.g), na.rm = T,
weight = all_pi$base.sales), digits = 4)
# Saturate fixed effects
all_pi[, group_region_by_module_by_time := .GRP, by = .(region_by_module_by_time, quantile)]
all_pi[, group_division_by_module_by_time := .GRP, by = .(division_by_module_by_time, quantile)]
## Estimate RF and FS
for (FE in FE_opts) {
for (Y in outcomes) {
formula1 <- as.formula(paste0(
Y, " ~ w.ln_sales_tax:quantile | ", FE, "+ quantile | 0 | module_by_state"
))
res1 <- felm(formula = formula1, data = all_pi,
weights = all_pi$base.sales)
## attach results
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := Y]
res1.dt[, controls := FE]
res1.dt[, n.groups := n.g]
res1.dt[, lev := quantlab[-1]]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, iv.output.results.file)
}
## Estimate IVs and retrieve in vector
IV <- LRdiff_res[outcome == "w.ln_quantity3" & n.groups == n.g & controls == FE,][["Estimate"]]/LRdiff_res[outcome == "w.ln_cpricei2" & n.groups == n.g & controls == FE,][["Estimate"]]
## Estimate the matrix of the implied system of equations
# Get the empirical distribution of prices by quantile
all_pi[, base.sales.q := base.sales/sum(base.sales), by = .(quantile)]
all_pi[, p_group := floor((dm.ln_cpricei2 - min(dm.ln_cpricei2, na.rm = T))/((max(dm.ln_cpricei2, na.rm = T)-min(dm.ln_cpricei2, na.rm = T))/100)), by = .(quantile)]
all_pi[, p_ll := p_group*((max(dm.ln_cpricei2, na.rm = T)-min(dm.ln_cpricei2, na.rm = T))/100), by = .(quantile)]
all_pi[, p_ll := p_ll + min(dm.ln_cpricei2, na.rm = T), by = .(quantile)]
all_pi[, p_ul := p_ll + ((max(dm.ln_cpricei2, na.rm = T)-min(dm.ln_cpricei2, na.rm = T))/100), by = .(quantile)]
ed.price.quantile <- all_pi[, .(w1 = (sum(base.sales.q))), by = .(p_ul, p_ll, quantile)]
ed.price.quantile[, p_m := (p_ul+p_ll)/2]
# Create the derivative of the polynomial of prices and multiplicate by weights
for (n in 1:n.g){
ed.price.quantile[, paste0("b",n) := (n)*w1*(p_m^(n-1))]
}
# Calculate integral
gamma <- ed.price.quantile[ , lapply(.SD, sum), by = .(quantile), .SDcols = paste0("b",1:n.g)]
gamma <- gamma[!is.na(quantile),][order(quantile)][, -c("quantile")]
## Retrieve target parameters
beta_hat <- as.vector(solve(as.matrix(gamma))%*%(as.matrix(IV)))
# Estimate intercept
mean.q <- all_pi[, mean(dm.ln_quantity3, weights = base.sales)]
mean.p <- all_pi[, mean(dm.ln_cpricei2, weights = base.sales)]
beta_0_hat <- mean.q - sum((beta_hat)*(mean.p^(1:n.g)))
beta_hat <- c(beta_0_hat, beta_hat)
## Export estimated target parameters
estimated.target <- data.table(beta_hat)
estimated.target[, beta_n := .I-1]
estimated.target[, n.groups := n.g]
estimated.target[, controls := FE]
target_res <- rbind(target_res, estimated.target)
fwrite(target_res, theta.output.results.file)
}
}
|
#' @title Get data of NTB experiments in customized format
#'
#' @author Paul Volkmann
#'
#' @name getexpdata
#'
#' @description A function that imports an NTB dataset and prepares the data for plotting and analysis as
#' dataframe or matrix.
#' For right formatting of your files, please consider the "ReadMe for ntbgraphics".
#'
#' @param directory specifies file directory of 'Meta Behavior' and 'Animal List' files within quotation
#' marks (mind correct spelling of both files and 'directory'!);
#' no default
#' @param analysis specifies the kind of experiment performed within quotation marks;
#' "2arm_ko","2arm_tg", "2arm_sd", "2arm_treat",
#' "4arm_sd_ko", "4arm_sd_tg", "4arm_treat_ko", "4arm_treat_tg"
#' (tg for transgenic, ko for knockout;
#' 4arm_sd_x assumes a stress paradigm with social defeat (sd) and housing or handling control (hc) as
#' control;
#' 4arm_treat_x assumes a treatment paradigm with treated (treat) and untreated (untreat) animals;
#' 2arm_x assumes wildtype controls (wt) for tg and ko, housing or handling controls (hc) for sd and
#' untreated controls (untreat) for treated animals;
#' ('analysis' defines the kind of experiment performed, respectively the kind of analysis preferred -
#' you can easily perform 2arm analysis for 4arm experiments looking only at the groups of interest,
#' but not the other way around);
#' default: "2arm_ko"
#' @param ordercolumns defines the order paradigm of experiment column appearance in final table within
#' quotation marks: "ntb", "rdoc", "manual";
#' RFID and Condition are always listed first and need no specification;
#' order of experiments may be chronological with "ntb", follow RDoC clustering with "rdoc" or be customized
#' manually with "manual" (-> use 'ordercolumns_manual' for exact appearance; there, you may also choose to
#' exclude experiments);
#' default: "ntb"
#' @param ordercolumns_manual customizes order of appearance and appearance itself of experiment columns
#' in final table (experiments that are not listed will not be included);
#' only if 'ordercolumns' = "manual";
#' user has to provide a vector containing characters within quotation marks (e.g. by using
#' c("Meanspeed", "SerialLearn")) with all experiments he wants to include into the final tabel with desired
#' order;
#' no need for specification if 'ordercolumns' is not "manual"
#' default: FALSE
#' @param exclude.animals excluding animals from analysis by RFID;
#' user has to provide a vector containing characters within quotation marks (e.g. by using
#' c("900200000067229", "900200000065167")) with all animals he wants to exclude from the final table;
#' if FALSE is provided, no animal will be excluded;
#' default: FALSE
#' @param orderlevelcond defines order of factor levels of conditions within quotation marks:
#' "other", "gtblock", "etblock", "2rev";
#' (might be important when it comes to plotting or displaying your data grouped by condition
#' in a defined order):
#' "other" for alphabetical order in case of 4arm; also for default order of 2arm experiments
#' (which lists the 'control' first, then the 'condition');
#' "gtblock" for order wt_x, wt_y, tg_x, tg_y;
#' "etblock" for order x_hc, y_hc, x_sd, y_sd;
#' "2rev" for inverse order of 2arm default only, meaning listing the 'condition' first, then the 'control';
#' default: "other"
#' @param acceptable.nas defines the maximum number of NAs allowed within the same row;
#' if number of actual NAs within one row is bigger than the number provided, the row will be excluded from
#' table and following analyses;
#' if the number of acceptable NAs should be unlimited, no value has to be provided;
#' default: "unlimited"
#' @param return.matrix boolean that defines if the standard dataframe or a z-scored matrix should be
#' provided;
#' by default, getexpdata generates a dataframe containing raw joined animal and experiment information;
#' 'return.matrix' can further process the dataframe with customizable functions to return a z-scored matrix,
#' for e.g. heatmapping, pca and tsne;
#' default: FALSE
#' @param return.matrix,mean boolean that specifies if matrix should only contain the mean of each group
#' for each experiment; grouping follows specification of groups to be analyzed as defined by 'analysis';
#' only useful if 'return.matrix' is TRUE;
#' default: FALSE
#' @param healthy_norm boolean that specifies if mean matrix should be normalized to healthy controls by
#' subtracting all values by the healthy controls;
#' only if return.matrix and return.matrix.mean are TRUE; not possible for 2arm experiments;
#' default: FALSE
#' @param naomit boolean that specifies if each columns with any number of NAs bigger than 0 should be
#' excluded; only applied and useful if 'return.matrix' is TRUE;
#' may appear redundant concerning earlier listed 'acceptable.nas', but gives user the opportunity, to save
#' settings within function with different needs for dataframe and (probably later needed) matrix;
#' default: FALSE
#' @param directional specifies which directionality paradigm should be applied; several options are
#' available, manual specification is also possible;
#' if "rdoc" within quotation marks is provided, columns 'Rotations', 'FreezeBase', 'Timeimmobile',
#' 'Baseline', 'Activity', 'Choices' and 'Meanspeed' are multiplied by -1;
#' if "emptcf4" within quotation marks is provided, columns 'Center', 'Choices' and 'Meanspeed' are
#' multiplied by -1;
#' you may alternatively provide a vector containing characters within quotation marks (e.g. by using
#' c("Nocturnal", "inhibition75")) with all columns you wants to have multiplied by -1;
#' only applied if 'return.matrix' is TRUE and only useful if 'absoluteval' is FALSE;
#' default: FALSE
#' @param absoluteval boolean that specifies if only absolute values of z-scored matrix should be given;
#' only applied and useful if 'return.matrix' is TRUE;
#' default: FALSE
#'
#' @return prepared and joined dataframe of all animals and corresponding NTB experiments
#' or customized z-scored matrix
#'
#' @export
#'
#' @examples getexpdata(directory = paste0(system.file("extdata", package = "ntbgraphics", mustWork = T),"/"))
#'
#' @examples getexpdata(directory = paste0(system.file("extdata", package = "ntbgraphics", mustWork = T),"/"),
#' analysis = "2arm_sd",
#' ordercolumns = "manual",
#' ordercolumns_manual = c("Meanspeed", "SerialLearn", "Center"),
#' exclude.animals = c("900200000070142"),
#' orderlevelcond = "2rev",
#' acceptable.nas = 3,
#' return.matrix = TRUE,
#' naomit = TRUE,
#' directional = "emptcf4")
getexpdata <- function(directory,
analysis = c("2arm_ko","2arm_tg", "2arm_sd", "2arm_treat",
"4arm_sd_ko", "4arm_sd_tg", "4arm_treat_ko", "4arm_treat_tg"),
ordercolumns = c("ntb", "rdoc", "manual"),
ordercolumns_manual = FALSE,
exclude.animals = FALSE,
orderlevelcond = c("other", "gtblock", "etblock", "2rev"),
acceptable.nas = "unlimited",
return.matrix = FALSE,
return.matrix.mean = FALSE,
healthy_norm = FALSE,
naomit = FALSE,
directional = FALSE,
absoluteval = FALSE) {
### use switch() for more flexible level assignments and assert_that() for more complex error management
### if errors occur when modifying the functions or even just occasionally, write a test (testthat package)
# turn warnings off
options(warn=-1)
# check if directory is provided and if it exists
if (missing(directory)) {
stop("Please provide path to 'Meta Behavior' and 'Animal List' files!")
} else if (dir.exists(directory) == FALSE) {
stop(sprintf("The path `%s` does not exist!", directory))
}
# check for data file
if (file.exists(paste0(directory,"/Meta Behavior.xlsx")) == FALSE |
file.exists(paste0(directory,"/Animal List.xlsx")) == FALSE) {
stop(sprintf("Path `%s` does not contain one of or both input excel files!", directory))
}
# ensure that in case of no provided argument, first one of list is taken
analysis <- analysis[1]
ordercolumns <- ordercolumns[1]
orderlevelcond <- orderlevelcond[1]
# ensure that correct analysis is provided
if (analysis == "2arm_ko") {
print("Warning: You have chosen '2arm_ko' as type of analysis. Since this is the default setting, please make sure it matches the data provided. Furthermore, refer to the help page of 'getexpdata' to check available options!")
}
possible.ana <- c("2arm_ko","2arm_tg", "2arm_sd", "2arm_treat",
"4arm_sd_ko", "4arm_sd_tg", "4arm_treat_ko", "4arm_treat_tg")
if (! analysis %in% possible.ana) {
stop("The 'analysis' provided does not exist. Please refer to the help page of 'getexpdata' to check available arguments!")
}
# ensure that correct ordercolumns is provided
possible.oc <- c("ntb", "rdoc", "manual")
if (! ordercolumns %in% possible.oc) {
stop("The 'ordercolumns' provided does not exist. Please refer to the help page of 'getexpdata' to check available arguments!")
}
# ensure that correct orderlevelcond is provided
possible.olc <- c("other", "gtblock", "etblock", "2rev")
if (! orderlevelcond %in% possible.olc) {
stop("The 'orderlevelcond' provided does not exist. Please refer to the help page of 'getexpdata' to check available arguments!")
}
# define provided directionality paradigm if provided
if (directional == "rdoc") {
directional = c("Rotations", "FreezeBase", "Timeimmobile", "Baseline",
"Activity", "Choices", "Meanspeed")
}
if (directional == "emptcf4") {
directional = c("Center", "Choices", "Meanspeed")
}
## import data
suppressMessages(meta.data <- readxl::read_excel(paste0(directory,"/Meta Behavior.xlsx")))
suppressMessages(animal.list <- readxl::read_excel(paste0(directory, "/Animal List.xlsx")))
# ensure that Animal is a character - important for joining both tables
meta.data <- meta.data %>%
mutate_at(., vars("Animal"),list(as.character))
# modify tables
data.animal.joined <- animal.list %>%
# exclude NAs in Genotype
filter(Genotype!= 'NA') %>%
# merge conditions in case of 4arm
`if`(analysis == "4arm_sd_tg", unite(., col="Condition", Genotype, Environmental, sep= "_",
remove = FALSE), .) %>%
`if`(analysis == "4arm_sd_ko", unite(., col="Condition", Genotype, Environmental, sep= "_",
remove = FALSE), .) %>%
`if`(analysis == "4arm_treat_tg", unite(., col="Condition", Genotype, Treatment, sep= "_",
remove = FALSE), .) %>%
`if`(analysis == "4arm_treat_ko", unite(., col="Condition", Genotype, Treatment, sep= "_",
remove = FALSE), .) %>%
# rename column of interest in case of 2arm
`if`(analysis == "2arm_tg", dplyr::rename(., Condition = Genotype), .) %>%
`if`(analysis == "2arm_ko", dplyr::rename(., Condition = Genotype), .) %>%
`if`(analysis == "2arm_sd", dplyr::rename(., Condition = Environmental), .) %>%
`if`(analysis == "2arm_treat", dplyr::rename(., Condition = Treatment), .) %>%
# ensure that RFID is a character - important for joining both tables
mutate_at(., vars("RFID"),list(as.character)) %>%
# join animals and behavior data
left_join(meta.data, by = c("RFID" = "Animal"))
# define preferred order of columns
if (ordercolumns == "ntb") {
col.names <- c("RFID", "Condition", # identifiers
"Meanspeed", "Rotations", # open field
"Center", "Alternations", "Choices", # y maze
"Activity", "Nocturnal", "PlacePref", "SerialLearn", "ReversalLearn", "SucPref", # ic
"Baseline", "inhibition70", "inhibition75", "inhibition80", # ppi
"Timeimmobile", # tail suspension
"FreezeBase", "Context", "Cue") # fear conditioning
} else if (ordercolumns == "rdoc") {
col.names <- c("RFID", "Condition", # identifiers
"Alternations", "ReversalLearn", "SerialLearn", "Cue", "Context", # cognition
"SucPref", "PlacePref", "Rotations", # positive valence
"Center", "FreezeBase", "Timeimmobile", "Baseline", # negative valence
"Activity", "Nocturnal", "Choices", "Meanspeed", # arousal and regulation
"inhibition70", "inhibition75", "inhibition80") # sensorimotor
} else if (ordercolumns == "manual") {
col.names <- c("RFID", "Condition", ordercolumns_manual)
}
## prepare order setup
# consider intersect(x, y)
# define number of column positions
col.pos <- c(1:length(col.names))
# create data frame with all possible column names and their ideal positions
col.names.order.ideal <- data.frame(col.names, col.pos)
# create data frame with actual column names
col.names.order.actual <- data.frame(colnames(data.animal.joined))
order.input <- col.names.order.actual %>%
# join the two created frames
left_join(col.names.order.ideal, by=c("colnames.data.animal.joined."="col.names")) %>%
# loose all NAs, i.e. columns that do not exist in data.animal.joined
na.omit() %>%
# sort by ideal positions
arrange(., col.pos) %>%
# select your column names, now sorted
select(., colnames.data.animal.joined.) %>%
# extract your column names as a vector
pull(., colnames.data.animal.joined.)
data.animal.joined <- data.animal.joined %>%
# select relevant columns and adjust order according to former preparation
select(., all_of(order.input)) %>%
# change values from chr to num
mutate_at(., vars(nth(order.input, 3):last(order.input)),list(as.numeric)) %>%
# delete selected animals
`if`(exclude.animals != FALSE, filter(., !RFID %in% exclude.animals),.)
# order factor levels of conditions (e.g. for order of plot appearance)
if (analysis == "4arm_sd_tg" && orderlevelcond == "gtblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_hc", "wt_sd", "tg_hc", "tg_sd"))
}
if (analysis == "4arm_sd_ko" && orderlevelcond == "gtblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_hc", "wt_sd", "ko_hc", "ko_sd"))
}
if (analysis == "4arm_treat_tg" && orderlevelcond == "gtblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_untreat", "wt_treat",
"tg_untreat", "tg_treat"))
}
if (analysis == "4arm_treat_ko" && orderlevelcond == "gtblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_untreat", "wt_treat",
"ko_untreat", "ko_treat"))
}
if (analysis == "4arm_sd_tg" && orderlevelcond == "etblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_hc", "tg_hc", "wt_sd", "tg_sd"))
}
if (analysis == "4arm_sd_ko" && orderlevelcond == "etblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_hc", "ko_hc", "wt_sd", "ko_sd"))
}
if (analysis == "4arm_treat_tg" && orderlevelcond == "etblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_untreat", "tg_untreat",
"wt_treat", "tg_treat"))
}
if (analysis == "4arm_treat_ko" && orderlevelcond == "etblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_untreat", "ko_untreat",
"wt_treat", "ko_treat"))
}
if (analysis == "2arm_tg") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt", "tg"))
}
if (analysis == "2arm_ko") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt", "ko"))
}
if (analysis == "2arm_sd") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("hc", "sd"))
}
if (analysis == "2arm_treat") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("untreat", "treat"))
}
if (orderlevelcond == "2rev") {
if (analysis == "2arm_tg") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition, levels = c("tg", "wt"))
} else if (analysis == "2arm_ko") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition, levels = c("ko", "wt"))
} else if (analysis == "2arm_sd") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition, levels = c("sd", "hc"))
} else if(analysis == "2arm_treat") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition, levels = c("treat", "untreat"))
}
}
# exclude columns containing certain amount of NAs (counts number of NAs per columns, saves information
# in new column na_count, filters regarding value in this columns na_count, drops columns na_count)
data.animal.joined$na_count <- rowSums(is.na(data.animal.joined))
data.animal.joined <- data.animal.joined %>%
filter(na_count <= paste(acceptable.nas)) %>%
select(-na_count) %>%
# finally, arrange by condition
arrange(.,Condition)
# option for creating matrix with different possible parameters
if(return.matrix == TRUE) {
# standard matrix
if (return.matrix.mean == FALSE) {
# arrange by condition and transform column RFID to rownames
data.animal.matrix <- data.animal.joined %>%
column_to_rownames(., "RFID")
# unselect condition and transfrom into matrix, z-scoring
data.animal.matrix <- data.animal.matrix %>%
select(nth(colnames(data.animal.matrix), 2):last(colnames(data.animal.matrix))) %>%
data.matrix() %>%
`if`(naomit == TRUE, na.omit(.), .) %>%
scale()
# set NAs to zero
data.animal.matrix[is.na(data.animal.matrix)] <- 0
}
# matrix containing means for every group only
if (return.matrix.mean == TRUE) {
length.col <- data.animal.joined %>%
colnames() %>%
length() %>%
as.numeric()
data.animal.matrix <- aggregate(data.animal.joined[, 3:length.col],
list(data.animal.joined$Condition), mean, na.rm = T)
data.animal.matrix <- data.animal.matrix %>%
data.frame() %>%
column_to_rownames("Group.1") %>%
data.matrix() %>%
scale()
# optionally subtract the wt_hc values from all other values
if (healthy_norm == TRUE && analysis == "4arm_sd_ko") {
data.animal.matrix <- sweep(data.animal.matrix, 2, data.animal.matrix["wt_hc",], "-")
}
if (healthy_norm == TRUE && analysis == "4arm_sd_tg") {
data.animal.matrix <- sweep(data.animal.matrix, 2, data.animal.matrix["wt_hc",], "-")
}
if (healthy_norm == TRUE && analysis == "4arm_treat_ko") {
data.animal.matrix <- sweep(data.animal.matrix, 2, data.animal.matrix["wt_untreat",], "-")
}
if (healthy_norm == TRUE && analysis == "4arm_treat_tg") {
data.animal.matrix <- sweep(data.animal.matrix, 2, data.animal.matrix["wt_untreat",], "-")
}
}
# inverse z-scoring according to directionality paradigm
col.names.actual <- colnames(data.animal.matrix)
if (directional != FALSE) {
dirlist <- directional
for(x in dirlist) {
if (x %in% col.names.actual == TRUE) {
data.animal.matrix[, x] <- data.animal.matrix[, x]*-1
}
}
}
# inverse z-scoring according to empirical Tcf4 paradigm
col.names.actual <- colnames(data.animal.matrix)
# optionally take absolute values
if (absoluteval == TRUE) {
data.animal.matrix <- abs(data.animal.matrix)
}
}
# return amended dataframe
if(return.matrix == FALSE) {
return(data.animal.joined)
}
# return amended matrix
if(return.matrix == TRUE) {
return(data.animal.matrix)
}
# turn warnings back on
options(warn=0)
}
|
/R/getexpdata.R
|
no_license
|
volkmannp/ntbgraphics
|
R
| false | false | 21,279 |
r
|
#' @title Get data of NTB experiments in customized format
#'
#' @author Paul Volkmann
#'
#' @name getexpdata
#'
#' @description A function that imports an NTB dataset and prepares the data for plotting and analysis as
#' dataframe or matrix.
#' For right formatting of your files, please consider the "ReadMe for ntbgraphics".
#'
#' @param directory specifies file directory of 'Meta Behavior' and 'Animal List' files within quotation
#' marks (mind correct spelling of both files and 'directory'!);
#' no default
#' @param analysis specifies the kind of experiment performed within quotation marks;
#' "2arm_ko","2arm_tg", "2arm_sd", "2arm_treat",
#' "4arm_sd_ko", "4arm_sd_tg", "4arm_treat_ko", "4arm_treat_tg"
#' (tg for transgenic, ko for knockout;
#' 4arm_sd_x assumes a stress paradigm with social defeat (sd) and housing or handling control (hc) as
#' control;
#' 4arm_treat_x assumes a treatment paradigm with treated (treat) and untreated (untreat) animals;
#' 2arm_x assumes wildtype controls (wt) for tg and ko, housing or handling controls (hc) for sd and
#' untreated controls (untreat) for treated animals;
#' ('analysis' defines the kind of experiment performed, respectively the kind of analysis preferred -
#' you can easily perform 2arm analysis for 4arm experiments looking only at the groups of interest,
#' but not the other way around);
#' default: "2arm_ko"
#' @param ordercolumns defines the order paradigm of experiment column appearance in final table within
#' quotation marks: "ntb", "rdoc", "manual";
#' RFID and Condition are always listed first and need no specification;
#' order of experiments may be chronological with "ntb", follow RDoC clustering with "rdoc" or be customized
#' manually with "manual" (-> use 'ordercolumns_manual' for exact appearance; there, you may also choose to
#' exclude experiments);
#' default: "ntb"
#' @param ordercolumns_manual customizes order of appearance and appearance itself of experiment columns
#' in final table (experiments that are not listed will not be included);
#' only if 'ordercolumns' = "manual";
#' user has to provide a vector containing characters within quotation marks (e.g. by using
#' c("Meanspeed", "SerialLearn")) with all experiments he wants to include into the final tabel with desired
#' order;
#' no need for specification if 'ordercolumns' is not "manual"
#' default: FALSE
#' @param exclude.animals excluding animals from analysis by RFID;
#' user has to provide a vector containing characters within quotation marks (e.g. by using
#' c("900200000067229", "900200000065167")) with all animals he wants to exclude from the final table;
#' if FALSE is provided, no animal will be excluded;
#' default: FALSE
#' @param orderlevelcond defines order of factor levels of conditions within quotation marks:
#' "other", "gtblock", "etblock", "2rev";
#' (might be important when it comes to plotting or displaying your data grouped by condition
#' in a defined order):
#' "other" for alphabetical order in case of 4arm; also for default order of 2arm experiments
#' (which lists the 'control' first, then the 'condition');
#' "gtblock" for order wt_x, wt_y, tg_x, tg_y;
#' "etblock" for order x_hc, y_hc, x_sd, y_sd;
#' "2rev" for inverse order of 2arm default only, meaning listing the 'condition' first, then the 'control';
#' default: "other"
#' @param acceptable.nas defines the maximum number of NAs allowed within the same row;
#' if number of actual NAs within one row is bigger than the number provided, the row will be excluded from
#' table and following analyses;
#' if the number of acceptable NAs should be unlimited, no value has to be provided;
#' default: "unlimited"
#' @param return.matrix boolean that defines if the standard dataframe or a z-scored matrix should be
#' provided;
#' by default, getexpdata generates a dataframe containing raw joined animal and experiment information;
#' 'return.matrix' can further process the dataframe with customizable functions to return a z-scored matrix,
#' for e.g. heatmapping, pca and tsne;
#' default: FALSE
#' @param return.matrix,mean boolean that specifies if matrix should only contain the mean of each group
#' for each experiment; grouping follows specification of groups to be analyzed as defined by 'analysis';
#' only useful if 'return.matrix' is TRUE;
#' default: FALSE
#' @param healthy_norm boolean that specifies if mean matrix should be normalized to healthy controls by
#' subtracting all values by the healthy controls;
#' only if return.matrix and return.matrix.mean are TRUE; not possible for 2arm experiments;
#' default: FALSE
#' @param naomit boolean that specifies if each columns with any number of NAs bigger than 0 should be
#' excluded; only applied and useful if 'return.matrix' is TRUE;
#' may appear redundant concerning earlier listed 'acceptable.nas', but gives user the opportunity, to save
#' settings within function with different needs for dataframe and (probably later needed) matrix;
#' default: FALSE
#' @param directional specifies which directionality paradigm should be applied; several options are
#' available, manual specification is also possible;
#' if "rdoc" within quotation marks is provided, columns 'Rotations', 'FreezeBase', 'Timeimmobile',
#' 'Baseline', 'Activity', 'Choices' and 'Meanspeed' are multiplied by -1;
#' if "emptcf4" within quotation marks is provided, columns 'Center', 'Choices' and 'Meanspeed' are
#' multiplied by -1;
#' you may alternatively provide a vector containing characters within quotation marks (e.g. by using
#' c("Nocturnal", "inhibition75")) with all columns you wants to have multiplied by -1;
#' only applied if 'return.matrix' is TRUE and only useful if 'absoluteval' is FALSE;
#' default: FALSE
#' @param absoluteval boolean that specifies if only absolute values of z-scored matrix should be given;
#' only applied and useful if 'return.matrix' is TRUE;
#' default: FALSE
#'
#' @return prepared and joined dataframe of all animals and corresponding NTB experiments
#' or customized z-scored matrix
#'
#' @export
#'
#' @examples getexpdata(directory = paste0(system.file("extdata", package = "ntbgraphics", mustWork = T),"/"))
#'
#' @examples getexpdata(directory = paste0(system.file("extdata", package = "ntbgraphics", mustWork = T),"/"),
#' analysis = "2arm_sd",
#' ordercolumns = "manual",
#' ordercolumns_manual = c("Meanspeed", "SerialLearn", "Center"),
#' exclude.animals = c("900200000070142"),
#' orderlevelcond = "2rev",
#' acceptable.nas = 3,
#' return.matrix = TRUE,
#' naomit = TRUE,
#' directional = "emptcf4")
getexpdata <- function(directory,
analysis = c("2arm_ko","2arm_tg", "2arm_sd", "2arm_treat",
"4arm_sd_ko", "4arm_sd_tg", "4arm_treat_ko", "4arm_treat_tg"),
ordercolumns = c("ntb", "rdoc", "manual"),
ordercolumns_manual = FALSE,
exclude.animals = FALSE,
orderlevelcond = c("other", "gtblock", "etblock", "2rev"),
acceptable.nas = "unlimited",
return.matrix = FALSE,
return.matrix.mean = FALSE,
healthy_norm = FALSE,
naomit = FALSE,
directional = FALSE,
absoluteval = FALSE) {
### use switch() for more flexible level assignments and assert_that() for more complex error management
### if errors occur when modifying the functions or even just occasionally, write a test (testthat package)
# turn warnings off
options(warn=-1)
# check if directory is provided and if it exists
if (missing(directory)) {
stop("Please provide path to 'Meta Behavior' and 'Animal List' files!")
} else if (dir.exists(directory) == FALSE) {
stop(sprintf("The path `%s` does not exist!", directory))
}
# check for data file
if (file.exists(paste0(directory,"/Meta Behavior.xlsx")) == FALSE |
file.exists(paste0(directory,"/Animal List.xlsx")) == FALSE) {
stop(sprintf("Path `%s` does not contain one of or both input excel files!", directory))
}
# ensure that in case of no provided argument, first one of list is taken
analysis <- analysis[1]
ordercolumns <- ordercolumns[1]
orderlevelcond <- orderlevelcond[1]
# ensure that correct analysis is provided
if (analysis == "2arm_ko") {
print("Warning: You have chosen '2arm_ko' as type of analysis. Since this is the default setting, please make sure it matches the data provided. Furthermore, refer to the help page of 'getexpdata' to check available options!")
}
possible.ana <- c("2arm_ko","2arm_tg", "2arm_sd", "2arm_treat",
"4arm_sd_ko", "4arm_sd_tg", "4arm_treat_ko", "4arm_treat_tg")
if (! analysis %in% possible.ana) {
stop("The 'analysis' provided does not exist. Please refer to the help page of 'getexpdata' to check available arguments!")
}
# ensure that correct ordercolumns is provided
possible.oc <- c("ntb", "rdoc", "manual")
if (! ordercolumns %in% possible.oc) {
stop("The 'ordercolumns' provided does not exist. Please refer to the help page of 'getexpdata' to check available arguments!")
}
# ensure that correct orderlevelcond is provided
possible.olc <- c("other", "gtblock", "etblock", "2rev")
if (! orderlevelcond %in% possible.olc) {
stop("The 'orderlevelcond' provided does not exist. Please refer to the help page of 'getexpdata' to check available arguments!")
}
# define provided directionality paradigm if provided
if (directional == "rdoc") {
directional = c("Rotations", "FreezeBase", "Timeimmobile", "Baseline",
"Activity", "Choices", "Meanspeed")
}
if (directional == "emptcf4") {
directional = c("Center", "Choices", "Meanspeed")
}
## import data
suppressMessages(meta.data <- readxl::read_excel(paste0(directory,"/Meta Behavior.xlsx")))
suppressMessages(animal.list <- readxl::read_excel(paste0(directory, "/Animal List.xlsx")))
# ensure that Animal is a character - important for joining both tables
meta.data <- meta.data %>%
mutate_at(., vars("Animal"),list(as.character))
# modify tables
data.animal.joined <- animal.list %>%
# exclude NAs in Genotype
filter(Genotype!= 'NA') %>%
# merge conditions in case of 4arm
`if`(analysis == "4arm_sd_tg", unite(., col="Condition", Genotype, Environmental, sep= "_",
remove = FALSE), .) %>%
`if`(analysis == "4arm_sd_ko", unite(., col="Condition", Genotype, Environmental, sep= "_",
remove = FALSE), .) %>%
`if`(analysis == "4arm_treat_tg", unite(., col="Condition", Genotype, Treatment, sep= "_",
remove = FALSE), .) %>%
`if`(analysis == "4arm_treat_ko", unite(., col="Condition", Genotype, Treatment, sep= "_",
remove = FALSE), .) %>%
# rename column of interest in case of 2arm
`if`(analysis == "2arm_tg", dplyr::rename(., Condition = Genotype), .) %>%
`if`(analysis == "2arm_ko", dplyr::rename(., Condition = Genotype), .) %>%
`if`(analysis == "2arm_sd", dplyr::rename(., Condition = Environmental), .) %>%
`if`(analysis == "2arm_treat", dplyr::rename(., Condition = Treatment), .) %>%
# ensure that RFID is a character - important for joining both tables
mutate_at(., vars("RFID"),list(as.character)) %>%
# join animals and behavior data
left_join(meta.data, by = c("RFID" = "Animal"))
# define preferred order of columns
if (ordercolumns == "ntb") {
col.names <- c("RFID", "Condition", # identifiers
"Meanspeed", "Rotations", # open field
"Center", "Alternations", "Choices", # y maze
"Activity", "Nocturnal", "PlacePref", "SerialLearn", "ReversalLearn", "SucPref", # ic
"Baseline", "inhibition70", "inhibition75", "inhibition80", # ppi
"Timeimmobile", # tail suspension
"FreezeBase", "Context", "Cue") # fear conditioning
} else if (ordercolumns == "rdoc") {
col.names <- c("RFID", "Condition", # identifiers
"Alternations", "ReversalLearn", "SerialLearn", "Cue", "Context", # cognition
"SucPref", "PlacePref", "Rotations", # positive valence
"Center", "FreezeBase", "Timeimmobile", "Baseline", # negative valence
"Activity", "Nocturnal", "Choices", "Meanspeed", # arousal and regulation
"inhibition70", "inhibition75", "inhibition80") # sensorimotor
} else if (ordercolumns == "manual") {
col.names <- c("RFID", "Condition", ordercolumns_manual)
}
## prepare order setup
# consider intersect(x, y)
# define number of column positions
col.pos <- c(1:length(col.names))
# create data frame with all possible column names and their ideal positions
col.names.order.ideal <- data.frame(col.names, col.pos)
# create data frame with actual column names
col.names.order.actual <- data.frame(colnames(data.animal.joined))
order.input <- col.names.order.actual %>%
# join the two created frames
left_join(col.names.order.ideal, by=c("colnames.data.animal.joined."="col.names")) %>%
# loose all NAs, i.e. columns that do not exist in data.animal.joined
na.omit() %>%
# sort by ideal positions
arrange(., col.pos) %>%
# select your column names, now sorted
select(., colnames.data.animal.joined.) %>%
# extract your column names as a vector
pull(., colnames.data.animal.joined.)
data.animal.joined <- data.animal.joined %>%
# select relevant columns and adjust order according to former preparation
select(., all_of(order.input)) %>%
# change values from chr to num
mutate_at(., vars(nth(order.input, 3):last(order.input)),list(as.numeric)) %>%
# delete selected animals
`if`(exclude.animals != FALSE, filter(., !RFID %in% exclude.animals),.)
# order factor levels of conditions (e.g. for order of plot appearance)
if (analysis == "4arm_sd_tg" && orderlevelcond == "gtblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_hc", "wt_sd", "tg_hc", "tg_sd"))
}
if (analysis == "4arm_sd_ko" && orderlevelcond == "gtblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_hc", "wt_sd", "ko_hc", "ko_sd"))
}
if (analysis == "4arm_treat_tg" && orderlevelcond == "gtblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_untreat", "wt_treat",
"tg_untreat", "tg_treat"))
}
if (analysis == "4arm_treat_ko" && orderlevelcond == "gtblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_untreat", "wt_treat",
"ko_untreat", "ko_treat"))
}
if (analysis == "4arm_sd_tg" && orderlevelcond == "etblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_hc", "tg_hc", "wt_sd", "tg_sd"))
}
if (analysis == "4arm_sd_ko" && orderlevelcond == "etblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_hc", "ko_hc", "wt_sd", "ko_sd"))
}
if (analysis == "4arm_treat_tg" && orderlevelcond == "etblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_untreat", "tg_untreat",
"wt_treat", "tg_treat"))
}
if (analysis == "4arm_treat_ko" && orderlevelcond == "etblock") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt_untreat", "ko_untreat",
"wt_treat", "ko_treat"))
}
if (analysis == "2arm_tg") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt", "tg"))
}
if (analysis == "2arm_ko") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("wt", "ko"))
}
if (analysis == "2arm_sd") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("hc", "sd"))
}
if (analysis == "2arm_treat") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition,
levels = c("untreat", "treat"))
}
if (orderlevelcond == "2rev") {
if (analysis == "2arm_tg") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition, levels = c("tg", "wt"))
} else if (analysis == "2arm_ko") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition, levels = c("ko", "wt"))
} else if (analysis == "2arm_sd") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition, levels = c("sd", "hc"))
} else if(analysis == "2arm_treat") {
data.animal.joined$Condition <- factor(data.animal.joined$Condition, levels = c("treat", "untreat"))
}
}
# exclude columns containing certain amount of NAs (counts number of NAs per columns, saves information
# in new column na_count, filters regarding value in this columns na_count, drops columns na_count)
data.animal.joined$na_count <- rowSums(is.na(data.animal.joined))
data.animal.joined <- data.animal.joined %>%
filter(na_count <= paste(acceptable.nas)) %>%
select(-na_count) %>%
# finally, arrange by condition
arrange(.,Condition)
# option for creating matrix with different possible parameters
if(return.matrix == TRUE) {
# standard matrix
if (return.matrix.mean == FALSE) {
# arrange by condition and transform column RFID to rownames
data.animal.matrix <- data.animal.joined %>%
column_to_rownames(., "RFID")
# unselect condition and transfrom into matrix, z-scoring
data.animal.matrix <- data.animal.matrix %>%
select(nth(colnames(data.animal.matrix), 2):last(colnames(data.animal.matrix))) %>%
data.matrix() %>%
`if`(naomit == TRUE, na.omit(.), .) %>%
scale()
# set NAs to zero
data.animal.matrix[is.na(data.animal.matrix)] <- 0
}
# matrix containing means for every group only
if (return.matrix.mean == TRUE) {
length.col <- data.animal.joined %>%
colnames() %>%
length() %>%
as.numeric()
data.animal.matrix <- aggregate(data.animal.joined[, 3:length.col],
list(data.animal.joined$Condition), mean, na.rm = T)
data.animal.matrix <- data.animal.matrix %>%
data.frame() %>%
column_to_rownames("Group.1") %>%
data.matrix() %>%
scale()
# optionally subtract the wt_hc values from all other values
if (healthy_norm == TRUE && analysis == "4arm_sd_ko") {
data.animal.matrix <- sweep(data.animal.matrix, 2, data.animal.matrix["wt_hc",], "-")
}
if (healthy_norm == TRUE && analysis == "4arm_sd_tg") {
data.animal.matrix <- sweep(data.animal.matrix, 2, data.animal.matrix["wt_hc",], "-")
}
if (healthy_norm == TRUE && analysis == "4arm_treat_ko") {
data.animal.matrix <- sweep(data.animal.matrix, 2, data.animal.matrix["wt_untreat",], "-")
}
if (healthy_norm == TRUE && analysis == "4arm_treat_tg") {
data.animal.matrix <- sweep(data.animal.matrix, 2, data.animal.matrix["wt_untreat",], "-")
}
}
# inverse z-scoring according to directionality paradigm
col.names.actual <- colnames(data.animal.matrix)
if (directional != FALSE) {
dirlist <- directional
for(x in dirlist) {
if (x %in% col.names.actual == TRUE) {
data.animal.matrix[, x] <- data.animal.matrix[, x]*-1
}
}
}
# inverse z-scoring according to empirical Tcf4 paradigm
col.names.actual <- colnames(data.animal.matrix)
# optionally take absolute values
if (absoluteval == TRUE) {
data.animal.matrix <- abs(data.animal.matrix)
}
}
# return amended dataframe
if(return.matrix == FALSE) {
return(data.animal.joined)
}
# return amended matrix
if(return.matrix == TRUE) {
return(data.animal.matrix)
}
# turn warnings back on
options(warn=0)
}
|
`%||%` <- function(a, b) if (is.null(a)) b else a
is_named <- function(x) {
all(has_names(x))
}
has_names <- function(x) {
nms <- names(x)
if (is.null(nms)) {
rep(FALSE, length(x))
} else {
!(is.na(nms) | nms == "")
}
}
# non smart quote version of sQuote
quote_str <- function(x, quote = "\"") {
if (!length(x)) {
return(character(0))
}
paste0(quote, x, quote)
}
is_installed <- function(pkg) {
requireNamespace(pkg, quietly = TRUE)
}
need_package <- function(pkg) {
if (is_installed(pkg)) return(invisible())
stop("Please install ", pkg, " package", call. = FALSE)
}
|
/R/utils.R
|
no_license
|
gothub/xml2
|
R
| false | false | 610 |
r
|
`%||%` <- function(a, b) if (is.null(a)) b else a
is_named <- function(x) {
all(has_names(x))
}
has_names <- function(x) {
nms <- names(x)
if (is.null(nms)) {
rep(FALSE, length(x))
} else {
!(is.na(nms) | nms == "")
}
}
# non smart quote version of sQuote
quote_str <- function(x, quote = "\"") {
if (!length(x)) {
return(character(0))
}
paste0(quote, x, quote)
}
is_installed <- function(pkg) {
requireNamespace(pkg, quietly = TRUE)
}
need_package <- function(pkg) {
if (is_installed(pkg)) return(invisible())
stop("Please install ", pkg, " package", call. = FALSE)
}
|
library(tidyverse)
library(lubridate)
source("./R/import/import_Fitbit_HR.R")
source("./R/import/import_gpx_HR.R")
glimpse(polar_hr)
glimpse(fitbit_hr)
# ajusting the "timezone" and merging both devices
polar_hr %>%
mutate(datetime = datetime - hours(2)) %>%
inner_join(fitbit_hr, by = "datetime") -> hr_data
glimpse(hr_data)
# lets plot the dataset
hr_data %>%
gather(device, hr, -datetime) %>%
ggplot(aes(x=datetime, y=hr, group=device)) +
geom_line(aes(color=device)) +
theme_minimal()
# lets see the correlation
hr_data %>%
ggplot(aes(x=polar_hr, y=fitbit_hr)) +
geom_point() +
stat_smooth(method = "lm", se=T, level=.95) +
theme_minimal()
# correlation test
cor.test(x=hr_data$polar_hr, y=hr_data$fitbit_hr, alternative = "two.sided")
# check the quality of a linear correlation
model <- lm(fitbit_hr~polar_hr, hr_data)
summary(model)
par(mfrow = c(2, 2))
plot(model)
## teste Bland Altman
# ref: https://seer.ufrgs.br/hcpa/article/view/11727/7021
# Deve ser avaliado se as diferenças entre as variáveis dependem ou não do tamanho da
# medida. Isto pode ser feito através de uma correlação entre as diferenças e as médias, que
# deve ser nula. A hipótese do viés ser ou não igual a zero pode ser testada por um teste t para
# amostras emparelhadas. A partir do cálculo do viés ( d ) e do seu desvio-padrão (sd) é possível
# chegar aos limites de concordância: d ± 1,96sd, que devem ser calculados e incluídos no gráfico.
# Se o viés apresenta distribuição normal, estes limites representam a região em
# que se encontram 95% das diferenças nos casos estudados.
# Nas situações em que o viés não apresenta
# distribuição normal, é recomendada uma abordagem não-paramétrica.
# math
hr_data %>%
mutate(
mean = (polar_hr + fitbit_hr)/2,
diff = polar_hr - fitbit_hr,
diff.mn = mean(diff),
diff.sd = sqrt(var(diff)),
upper.lim = diff.mn + (2*diff.sd),
lower.lim = diff.mn - (2*diff.sd),
) -> hr_data_ba
# overview
summary(hr_data_ba)
# plotting the differences
hr_data_ba %>%
ggplot() +
geom_segment(aes(x=datetime, xend = datetime, y=0, yend = diff), color="red", size=1) +
geom_point(aes(x=datetime, y=diff), color='black') +
theme_minimal()
# distribuição das diferenças
hr_data_ba %>%
ggplot() +
geom_density(aes(x=diff), color="red", fill="red" ) +
theme_minimal()
# Deve ser avaliado se as diferenças entre as variáveis dependem ou não do tamanho da
# medida. Isto pode ser feito através de uma correlação entre as diferenças e as médias, que
# deve ser nula.
cor.test(x=hr_data_ba$diff, y=hr_data_ba$mean)
# A hipótese do viés ser ou não igual a zero pode ser testada por um teste t para
# amostras emparelhadas.
t.test(x=hr_data_ba$diff, y=hr_data_ba$mean, paired = T)
# plot
hr_data_ba %>%
ggplot(aes(x=mean, y=diff)) +
geom_point() +
geom_hline(yintercept=0) +
geom_hline(yintercept=hr_data_ba$diff.mn[1], linetype=2) +
geom_hline(yintercept=hr_data_ba$upper.lim[1], linetype=2) +
geom_hline(yintercept=hr_data_ba$lower.lim[1], linetype=2) +
theme_minimal()
hrdata %>%
ggplot(aes(x=timestamp)) +
geom_line(aes(y=hr_fitbit), color="blue") +
geom_line(aes(y=hr_polar), color="red") +
theme_minimal()
hrdata %>%
ggplot() +
geom_point(aes(x=hr_polar, y=hr_fitbit, color=error)) +
scale_color_gradient(name="error %",low="green", high="red") +
theme_minimal()
hrdata %>%
ggplot(aes(x=timestamp)) +
geom_line(aes(y=error, color=error)) +
scale_color_gradient(name="heart rate (bpm)",low="green", high="red") +
theme_minimal()
summary(hrdata)
hist(hrdata$error, breaks=25, col="red")
|
/R/analysis/analysis_polar_x_fitbit.R
|
no_license
|
GiulSposito/fitbit_api
|
R
| false | false | 3,682 |
r
|
library(tidyverse)
library(lubridate)
source("./R/import/import_Fitbit_HR.R")
source("./R/import/import_gpx_HR.R")
glimpse(polar_hr)
glimpse(fitbit_hr)
# ajusting the "timezone" and merging both devices
polar_hr %>%
mutate(datetime = datetime - hours(2)) %>%
inner_join(fitbit_hr, by = "datetime") -> hr_data
glimpse(hr_data)
# lets plot the dataset
hr_data %>%
gather(device, hr, -datetime) %>%
ggplot(aes(x=datetime, y=hr, group=device)) +
geom_line(aes(color=device)) +
theme_minimal()
# lets see the correlation
hr_data %>%
ggplot(aes(x=polar_hr, y=fitbit_hr)) +
geom_point() +
stat_smooth(method = "lm", se=T, level=.95) +
theme_minimal()
# correlation test
cor.test(x=hr_data$polar_hr, y=hr_data$fitbit_hr, alternative = "two.sided")
# check the quality of a linear correlation
model <- lm(fitbit_hr~polar_hr, hr_data)
summary(model)
par(mfrow = c(2, 2))
plot(model)
## teste Bland Altman
# ref: https://seer.ufrgs.br/hcpa/article/view/11727/7021
# Deve ser avaliado se as diferenças entre as variáveis dependem ou não do tamanho da
# medida. Isto pode ser feito através de uma correlação entre as diferenças e as médias, que
# deve ser nula. A hipótese do viés ser ou não igual a zero pode ser testada por um teste t para
# amostras emparelhadas. A partir do cálculo do viés ( d ) e do seu desvio-padrão (sd) é possível
# chegar aos limites de concordância: d ± 1,96sd, que devem ser calculados e incluídos no gráfico.
# Se o viés apresenta distribuição normal, estes limites representam a região em
# que se encontram 95% das diferenças nos casos estudados.
# Nas situações em que o viés não apresenta
# distribuição normal, é recomendada uma abordagem não-paramétrica.
# math
hr_data %>%
mutate(
mean = (polar_hr + fitbit_hr)/2,
diff = polar_hr - fitbit_hr,
diff.mn = mean(diff),
diff.sd = sqrt(var(diff)),
upper.lim = diff.mn + (2*diff.sd),
lower.lim = diff.mn - (2*diff.sd),
) -> hr_data_ba
# overview
summary(hr_data_ba)
# plotting the differences
hr_data_ba %>%
ggplot() +
geom_segment(aes(x=datetime, xend = datetime, y=0, yend = diff), color="red", size=1) +
geom_point(aes(x=datetime, y=diff), color='black') +
theme_minimal()
# distribuição das diferenças
hr_data_ba %>%
ggplot() +
geom_density(aes(x=diff), color="red", fill="red" ) +
theme_minimal()
# Deve ser avaliado se as diferenças entre as variáveis dependem ou não do tamanho da
# medida. Isto pode ser feito através de uma correlação entre as diferenças e as médias, que
# deve ser nula.
cor.test(x=hr_data_ba$diff, y=hr_data_ba$mean)
# A hipótese do viés ser ou não igual a zero pode ser testada por um teste t para
# amostras emparelhadas.
t.test(x=hr_data_ba$diff, y=hr_data_ba$mean, paired = T)
# plot
hr_data_ba %>%
ggplot(aes(x=mean, y=diff)) +
geom_point() +
geom_hline(yintercept=0) +
geom_hline(yintercept=hr_data_ba$diff.mn[1], linetype=2) +
geom_hline(yintercept=hr_data_ba$upper.lim[1], linetype=2) +
geom_hline(yintercept=hr_data_ba$lower.lim[1], linetype=2) +
theme_minimal()
hrdata %>%
ggplot(aes(x=timestamp)) +
geom_line(aes(y=hr_fitbit), color="blue") +
geom_line(aes(y=hr_polar), color="red") +
theme_minimal()
hrdata %>%
ggplot() +
geom_point(aes(x=hr_polar, y=hr_fitbit, color=error)) +
scale_color_gradient(name="error %",low="green", high="red") +
theme_minimal()
hrdata %>%
ggplot(aes(x=timestamp)) +
geom_line(aes(y=error, color=error)) +
scale_color_gradient(name="heart rate (bpm)",low="green", high="red") +
theme_minimal()
summary(hrdata)
hist(hrdata$error, breaks=25, col="red")
|
#' Copyright(c) 2017-2020 R. Mark Sharp
#' This file is part of nprcgenekeepr
context("convertRelationships")
library(testthat)
ped <- nprcgenekeepr::smallPed
kmat <- kinship(ped$id, ped$sire, ped$dam, ped$gen, sparse = FALSE)
ids <- c("A", "B", "D", "E", "F", "G", "I", "J", "L", "M", "O", "P")
relIds <- convertRelationships(kmat, ped, ids)
rel <- convertRelationships(kmat, ped, updateProgress = function() {})
ped <- nprcgenekeepr::qcPed
bkmat <- kinship(ped$id, ped$sire, ped$dam, ped$gen,
sparse = FALSE)
relBIds <- convertRelationships(bkmat, ped, c("4LFS70", "DD1U77"))
test_that("convertRelationships makes correct transformations", {
expect_equal(relIds$id1[relIds$id1 %in% rel$id1], relIds$id1)
expect_true(all(rel$id1[rel$id1 %in% relIds$id1] %in% relIds$id1))
expect_equal(rel$kinship[rel$id1 == "A" & rel$id2 == "D"], 0.25)
expect_equal(rel$relation[rel$id1 == "D" & rel$id2 == "G"],
"Parent-Offspring")
expect_equal(rel$relation[rel$id1 == "C" & rel$id2 == "I"],
"Half-Siblings")
expect_equal(rel$relation[rel$id1 == "C" & rel$id2 == "J"],
"No Relation")
expect_equal(rel$relation[rel$id1 == "C" & rel$id2 == "C"],
"Self")
expect_equal(rel$relation[rel$id1 == "C" & rel$id2 == "G"],
"Full-Avuncular")
expect_equal(relIds$relation[relIds$id1 == "A" & relIds$id2 == "B"],
"No Relation")
expect_equal(relIds$relation[relIds$id1 == "A" & relIds$id2 == "F"],
"Grandparent-Grandchild")
expect_equal(relIds$relation[relIds$id1 == "F" & relIds$id2 == "G"],
"Full-Siblings")
expect_equal(relIds$relation[relIds$id1 == "F" & relIds$id2 == "I"],
"Avuncular - Other")
expect_equal(relIds$relation[relIds$id1 == "F" & relIds$id2 == "L"],
"Full-Cousins")
expect_equal(rel$relation[rel$id1 == "L" & rel$id2 == "P"],
"Cousin - Other")
expect_equal(relBIds$relation[relBIds$id1 == "4LFS70" &
relBIds$id2 == "DD1U77"], "Other")
})
|
/tests/testthat/test_convertRelationships.R
|
permissive
|
jhagberg/nprcgenekeepr
|
R
| false | false | 2,074 |
r
|
#' Copyright(c) 2017-2020 R. Mark Sharp
#' This file is part of nprcgenekeepr
context("convertRelationships")
library(testthat)
ped <- nprcgenekeepr::smallPed
kmat <- kinship(ped$id, ped$sire, ped$dam, ped$gen, sparse = FALSE)
ids <- c("A", "B", "D", "E", "F", "G", "I", "J", "L", "M", "O", "P")
relIds <- convertRelationships(kmat, ped, ids)
rel <- convertRelationships(kmat, ped, updateProgress = function() {})
ped <- nprcgenekeepr::qcPed
bkmat <- kinship(ped$id, ped$sire, ped$dam, ped$gen,
sparse = FALSE)
relBIds <- convertRelationships(bkmat, ped, c("4LFS70", "DD1U77"))
test_that("convertRelationships makes correct transformations", {
expect_equal(relIds$id1[relIds$id1 %in% rel$id1], relIds$id1)
expect_true(all(rel$id1[rel$id1 %in% relIds$id1] %in% relIds$id1))
expect_equal(rel$kinship[rel$id1 == "A" & rel$id2 == "D"], 0.25)
expect_equal(rel$relation[rel$id1 == "D" & rel$id2 == "G"],
"Parent-Offspring")
expect_equal(rel$relation[rel$id1 == "C" & rel$id2 == "I"],
"Half-Siblings")
expect_equal(rel$relation[rel$id1 == "C" & rel$id2 == "J"],
"No Relation")
expect_equal(rel$relation[rel$id1 == "C" & rel$id2 == "C"],
"Self")
expect_equal(rel$relation[rel$id1 == "C" & rel$id2 == "G"],
"Full-Avuncular")
expect_equal(relIds$relation[relIds$id1 == "A" & relIds$id2 == "B"],
"No Relation")
expect_equal(relIds$relation[relIds$id1 == "A" & relIds$id2 == "F"],
"Grandparent-Grandchild")
expect_equal(relIds$relation[relIds$id1 == "F" & relIds$id2 == "G"],
"Full-Siblings")
expect_equal(relIds$relation[relIds$id1 == "F" & relIds$id2 == "I"],
"Avuncular - Other")
expect_equal(relIds$relation[relIds$id1 == "F" & relIds$id2 == "L"],
"Full-Cousins")
expect_equal(rel$relation[rel$id1 == "L" & rel$id2 == "P"],
"Cousin - Other")
expect_equal(relBIds$relation[relBIds$id1 == "4LFS70" &
relBIds$id2 == "DD1U77"], "Other")
})
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- shinyUI(fluidPage(
tags$head(tags$link(rel = "stylesheet",
type = "text/css", href = "style.css")),
# Application title
titlePanel("One file test app for shiny.epa.gov"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("species",
"Which Species To Display",
choices = unique(iris$Species),
multiple = TRUE,
selected = c("setosa", "versicolor", "virginica")
)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("scatterPlot")
)
)
))
# Define server logic required to draw a histogram
server <- shinyServer(function(input, output) {
output$scatterPlot <- renderPlot({
# filter
x <- iris[iris$Species %in% input$species,]
# plot scatter plot with selected species
plot(x$Petal.Length, x$Petal.Width, col = x$Species)
})
})
shinyApp(ui, server, uiPattern = ".*")
|
/routetest/app.R
|
no_license
|
jhollist/cloud_gov_shiny
|
R
| false | false | 1,314 |
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- shinyUI(fluidPage(
tags$head(tags$link(rel = "stylesheet",
type = "text/css", href = "style.css")),
# Application title
titlePanel("One file test app for shiny.epa.gov"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("species",
"Which Species To Display",
choices = unique(iris$Species),
multiple = TRUE,
selected = c("setosa", "versicolor", "virginica")
)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("scatterPlot")
)
)
))
# Define server logic required to draw a histogram
server <- shinyServer(function(input, output) {
output$scatterPlot <- renderPlot({
# filter
x <- iris[iris$Species %in% input$species,]
# plot scatter plot with selected species
plot(x$Petal.Length, x$Petal.Width, col = x$Species)
})
})
shinyApp(ui, server, uiPattern = ".*")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{FindTpsIp}
\alias{FindTpsIp}
\title{Gets IP address of TPS2.}
\usage{
FindTpsIp(TpsSerial, timeout)
}
\arguments{
\item{TpsSerial}{Serial number of TPS2.}
\item{timeout}{Timeout in ms to wait for the UDP packet.}
}
\value{
String of IP address.
}
\description{
\code{FindTpsIp} listens for UDP packets that TPS2 broadcast and returns
the IP of the TPS2.
}
\details{
Note that executing this function makes your program to a UDP server and
Windows firewall (or other personal firewall software) will query for
permission.
}
\examples{
\dontrun{
FindTpsIp("910.33.0316", 500)
}
}
|
/man/FindTpsIp.Rd
|
no_license
|
pasturm/TofDaqR
|
R
| false | true | 677 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{FindTpsIp}
\alias{FindTpsIp}
\title{Gets IP address of TPS2.}
\usage{
FindTpsIp(TpsSerial, timeout)
}
\arguments{
\item{TpsSerial}{Serial number of TPS2.}
\item{timeout}{Timeout in ms to wait for the UDP packet.}
}
\value{
String of IP address.
}
\description{
\code{FindTpsIp} listens for UDP packets that TPS2 broadcast and returns
the IP of the TPS2.
}
\details{
Note that executing this function makes your program to a UDP server and
Windows firewall (or other personal firewall software) will query for
permission.
}
\examples{
\dontrun{
FindTpsIp("910.33.0316", 500)
}
}
|
## Monte Carlo calculation of posterior probability ##
s <- as.numeric(commandArgs(trailingOnly=TRUE))
ntasks <- 21
pdff <- function(filename){pdf(file=paste0(filename,'.pdf'),paper='a4r',height=11.7,width=16.5)}
#library('ggplot2')
library('RColorBrewer')
#library('cowplot')
#library('png')
#library('plot3D')
library('foreach')
library('doFuture')
## registerDoFuture()
## #library("future.batchtools")
## print(paste0('available workers: ', availableCores()))
## if(length(ntasks)==0){ntasks <- availableCores()}
## #plan(batchtools_slurm, workers=ntasks-1)
## #plan(multiprocess, workers=ntasks-1)
## cl <- makeCluster(ntasks-1)
## plan(cluster, workers = cl)
## print(paste0('number of workers: ', nbrOfWorkers()))
library('doRNG')
#library('LaplacesDemon')
library('dirichletprocess')
library('mvtnorm')
library('RNetCDF')
#library('Rmpfr')
options(bitmapType='cairo')
mypurpleblue <- '#4477AA'
myblue <- '#66CCEE'
mygreen <- '#228833'
myyellow <- '#CCBB44'
myred <- '#EE6677'
myredpurple <- '#AA3377'
mygrey <- '#BBBBBB'
mycolours <- c(myblue, myred, mygreen, myyellow, myredpurple, mypurpleblue, mygrey, 'black')
palette(mycolours)
barpalette <- colorRampPalette(c(mypurpleblue,'white',myredpurple),space='Lab')
barpalettepos <- colorRampPalette(c('white','black'),space='Lab')
dev.off()
set.seed(181225+0)
#################################################################
######################## EQUATIONS SETUP ########################
#################################################################
group <- 'con'
#group <- 'sch'
savename <- paste0('fits_',group)
datafilename <- paste0('weights_', group, '_40cons')
logit2 <- function(x){log(1+x)-log(1-x)}
y <- t(as.matrix(read.csv(paste0(datafilename,'.csv'),header=FALSE,sep=',')))
dims <- ncol(y)
y <- logit2(y)
datasize <- nrow(y)
#foreach(s = 1:(datasize+1))%dopar%{
print(paste0('marg: ',s))
x <- y[-s,]
dp <- DirichletProcessMvnormal(x,
#alphaPriors=c(0.1, 0.1),
g0Priors=list(mu0=rep(0,dims), kappa0=0.01, Lambda=diag(dims)/2, nu=dims+1), numInitialClusters = nrow(x))
fitdp <- dp
fitdp <- Fit(fitdp,1000)
fitdp <- Fit(fitdp,10000)
saveRDS(fitdp,paste0(savename, '_marg_', s, '.rds'))
NULL
#}
## Idun:
## > dp <- DirichletProcessMvnormal(y,
## + #alphaPriors=c(0.1, 0.1),
## + g0Priors=list(mu0=rep(0,dims), kappa0=0.01, Lambda=diag(dims)/2, nu=dims+1), numInitialClusters = datasize)
## > fitdp <- dp
## > system.time(for(i in 1:1){fitdp <- Fit(fitdp,10000)})
## |--------------------------------------------------| 100%
## user system elapsed
## 15168.709 47.122 781.522
## > system.time(for(i in 1:1){fitdp <- Fit(fitdp,1000)})
## |--------------------------------------------------| 100%
## user system elapsed
## 1042.095 3.345 53.594
##
## > system.time(for(i in 1:1){fitdp <- Fit(fitdp,10000)})
## |--------------------------------------------------| 100%
## user system elapsed
## 10789.939 35.819 558.570
## >
## + system.time(for(i in 1:1){fitdptest <- Fit(fitdptest,5000)})
## +
## |--------------------------------------------------| 100%
## user system elapsed
## 317.40 0.15 317.67
## + system.time(for(i in 1:1){fitdptest <- Fit(fitdptest,1000)})
## +
## |--------------------------------------------------| 100%
## user system elapsed
## 52.40 0.15 52.57
|
/codev3/posteriors.R
|
no_license
|
pglpm/nanunana
|
R
| false | false | 3,517 |
r
|
## Monte Carlo calculation of posterior probability ##
s <- as.numeric(commandArgs(trailingOnly=TRUE))
ntasks <- 21
pdff <- function(filename){pdf(file=paste0(filename,'.pdf'),paper='a4r',height=11.7,width=16.5)}
#library('ggplot2')
library('RColorBrewer')
#library('cowplot')
#library('png')
#library('plot3D')
library('foreach')
library('doFuture')
## registerDoFuture()
## #library("future.batchtools")
## print(paste0('available workers: ', availableCores()))
## if(length(ntasks)==0){ntasks <- availableCores()}
## #plan(batchtools_slurm, workers=ntasks-1)
## #plan(multiprocess, workers=ntasks-1)
## cl <- makeCluster(ntasks-1)
## plan(cluster, workers = cl)
## print(paste0('number of workers: ', nbrOfWorkers()))
library('doRNG')
#library('LaplacesDemon')
library('dirichletprocess')
library('mvtnorm')
library('RNetCDF')
#library('Rmpfr')
options(bitmapType='cairo')
mypurpleblue <- '#4477AA'
myblue <- '#66CCEE'
mygreen <- '#228833'
myyellow <- '#CCBB44'
myred <- '#EE6677'
myredpurple <- '#AA3377'
mygrey <- '#BBBBBB'
mycolours <- c(myblue, myred, mygreen, myyellow, myredpurple, mypurpleblue, mygrey, 'black')
palette(mycolours)
barpalette <- colorRampPalette(c(mypurpleblue,'white',myredpurple),space='Lab')
barpalettepos <- colorRampPalette(c('white','black'),space='Lab')
dev.off()
set.seed(181225+0)
#################################################################
######################## EQUATIONS SETUP ########################
#################################################################
group <- 'con'
#group <- 'sch'
savename <- paste0('fits_',group)
datafilename <- paste0('weights_', group, '_40cons')
logit2 <- function(x){log(1+x)-log(1-x)}
y <- t(as.matrix(read.csv(paste0(datafilename,'.csv'),header=FALSE,sep=',')))
dims <- ncol(y)
y <- logit2(y)
datasize <- nrow(y)
#foreach(s = 1:(datasize+1))%dopar%{
print(paste0('marg: ',s))
x <- y[-s,]
dp <- DirichletProcessMvnormal(x,
#alphaPriors=c(0.1, 0.1),
g0Priors=list(mu0=rep(0,dims), kappa0=0.01, Lambda=diag(dims)/2, nu=dims+1), numInitialClusters = nrow(x))
fitdp <- dp
fitdp <- Fit(fitdp,1000)
fitdp <- Fit(fitdp,10000)
saveRDS(fitdp,paste0(savename, '_marg_', s, '.rds'))
NULL
#}
## Idun:
## > dp <- DirichletProcessMvnormal(y,
## + #alphaPriors=c(0.1, 0.1),
## + g0Priors=list(mu0=rep(0,dims), kappa0=0.01, Lambda=diag(dims)/2, nu=dims+1), numInitialClusters = datasize)
## > fitdp <- dp
## > system.time(for(i in 1:1){fitdp <- Fit(fitdp,10000)})
## |--------------------------------------------------| 100%
## user system elapsed
## 15168.709 47.122 781.522
## > system.time(for(i in 1:1){fitdp <- Fit(fitdp,1000)})
## |--------------------------------------------------| 100%
## user system elapsed
## 1042.095 3.345 53.594
##
## > system.time(for(i in 1:1){fitdp <- Fit(fitdp,10000)})
## |--------------------------------------------------| 100%
## user system elapsed
## 10789.939 35.819 558.570
## >
## + system.time(for(i in 1:1){fitdptest <- Fit(fitdptest,5000)})
## +
## |--------------------------------------------------| 100%
## user system elapsed
## 317.40 0.15 317.67
## + system.time(for(i in 1:1){fitdptest <- Fit(fitdptest,1000)})
## +
## |--------------------------------------------------| 100%
## user system elapsed
## 52.40 0.15 52.57
|
#This code is revised from Zhenyu Zhang's code here:http://github.com/ZhenyuZ/eqtl/blob/master/cnv/GetGeneLevelCNA.r
options(stringsAsFactors=F)
library("GenomicRanges")
library(DESeq)
library(optparse)
option_list <- list(
make_option("--locfile", type="character", help="path to locations file"),
make_option("--patients", type="character", help="path to uuids"),
make_option("--segfiles", type="character", help="path to datasets"),
make_option("--outfile", type="character", help="name of output file")
)
parser <- OptionParser(usage="main.R [options] file", option_list=option_list)
args <- parse_args(parser)
#get gene locations
get_gene_loc <- function(geneloc){
loc = GRanges(seqnames = geneloc$chr,
ranges = IRanges(start=as.numeric(geneloc$start), end=as.numeric(geneloc$end)),
strand = "+",
name = geneloc$gene_id)
#gene.length = width(loc) #vector of lengths of each gene id.
#n = length(loc) #no. of geneids
return(loc)
}
get_seg_loc <- function(segfile){
cnvseg <- data.frame(read.table(segfile, header=T, colClasses="character", sep="\t"))
seg <- GRanges(seqnames = paste("chr", cnvseg$Chromosome, sep=""),
ranges = IRanges(start=as.numeric(cnvseg$Start), end=as.numeric(cnvseg$End)),
strand = "+",
Num_probes = as.numeric(cnvseg$Num_Probes),
Segment_Mean = as.numeric(cnvseg$Segment_Mean))
return(seg)
}
get_copy_number<- function(loc, seg){
segm <- values(seg)$Segment_Mean
Hits <- findOverlaps(loc,seg)
gene_to_seg_map <- data.frame(cbind(queryHits(Hits), subjectHits(Hits))) #contains the indices of the genes
#from the locfile mapped to
#the indices of the segfile.
colnames(gene_to_seg_map ) <- c("loc.index", "seg.index")
cnv <- numeric(length(loc))
for (gene_index in 1:nrow(gene_to_seg_map)){
cnv[gene_index] <- 2 * (2^segm[gene_to_seg_map$seg.index[gene_index]])
}
return(cnv)
}
locfile = args$locfile
geneloc = read.table(locfile, h=F, sep="\t", colClasses="character")
colnames(geneloc) <- c("gene_id", "chr", "start", "end")
w <- which(!duplicated(geneloc$gene_id))
geneloc <- geneloc[w,]
loc <- get_gene_loc(geneloc)
patients <- read.table(args$patients, header=T, colClasses="character")
all_cnv = matrix(nrow=nrow(geneloc), ncol=0)
col_counter = 0
for (i in 1:nrow(patients)){
segfile=paste(args$segfiles, patients[i,1], ".seg.txt", sep="")
if (file.exists(segfile)){
col_counter = col_counter + 1
print(paste("Getting copy number for", patients[i,1]))
seg <- get_seg_loc(segfile)
cnv <- get_copy_number(loc, seg)
all_cnv <- cbind(all_cnv, as.numeric(cnv))
colnames(all_cnv)[col_counter] <- patients[i,1]
}
}
rownames(all_cnv) <- geneloc$gene_id
write.table(all_cnv, file=args$outfile, quote=FALSE, sep='\t')
|
/cnv.aggr.R
|
no_license
|
stutiagrawal/nbl
|
R
| false | false | 3,140 |
r
|
#This code is revised from Zhenyu Zhang's code here:http://github.com/ZhenyuZ/eqtl/blob/master/cnv/GetGeneLevelCNA.r
options(stringsAsFactors=F)
library("GenomicRanges")
library(DESeq)
library(optparse)
option_list <- list(
make_option("--locfile", type="character", help="path to locations file"),
make_option("--patients", type="character", help="path to uuids"),
make_option("--segfiles", type="character", help="path to datasets"),
make_option("--outfile", type="character", help="name of output file")
)
parser <- OptionParser(usage="main.R [options] file", option_list=option_list)
args <- parse_args(parser)
#get gene locations
get_gene_loc <- function(geneloc){
loc = GRanges(seqnames = geneloc$chr,
ranges = IRanges(start=as.numeric(geneloc$start), end=as.numeric(geneloc$end)),
strand = "+",
name = geneloc$gene_id)
#gene.length = width(loc) #vector of lengths of each gene id.
#n = length(loc) #no. of geneids
return(loc)
}
get_seg_loc <- function(segfile){
cnvseg <- data.frame(read.table(segfile, header=T, colClasses="character", sep="\t"))
seg <- GRanges(seqnames = paste("chr", cnvseg$Chromosome, sep=""),
ranges = IRanges(start=as.numeric(cnvseg$Start), end=as.numeric(cnvseg$End)),
strand = "+",
Num_probes = as.numeric(cnvseg$Num_Probes),
Segment_Mean = as.numeric(cnvseg$Segment_Mean))
return(seg)
}
get_copy_number<- function(loc, seg){
segm <- values(seg)$Segment_Mean
Hits <- findOverlaps(loc,seg)
gene_to_seg_map <- data.frame(cbind(queryHits(Hits), subjectHits(Hits))) #contains the indices of the genes
#from the locfile mapped to
#the indices of the segfile.
colnames(gene_to_seg_map ) <- c("loc.index", "seg.index")
cnv <- numeric(length(loc))
for (gene_index in 1:nrow(gene_to_seg_map)){
cnv[gene_index] <- 2 * (2^segm[gene_to_seg_map$seg.index[gene_index]])
}
return(cnv)
}
locfile = args$locfile
geneloc = read.table(locfile, h=F, sep="\t", colClasses="character")
colnames(geneloc) <- c("gene_id", "chr", "start", "end")
w <- which(!duplicated(geneloc$gene_id))
geneloc <- geneloc[w,]
loc <- get_gene_loc(geneloc)
patients <- read.table(args$patients, header=T, colClasses="character")
all_cnv = matrix(nrow=nrow(geneloc), ncol=0)
col_counter = 0
for (i in 1:nrow(patients)){
segfile=paste(args$segfiles, patients[i,1], ".seg.txt", sep="")
if (file.exists(segfile)){
col_counter = col_counter + 1
print(paste("Getting copy number for", patients[i,1]))
seg <- get_seg_loc(segfile)
cnv <- get_copy_number(loc, seg)
all_cnv <- cbind(all_cnv, as.numeric(cnv))
colnames(all_cnv)[col_counter] <- patients[i,1]
}
}
rownames(all_cnv) <- geneloc$gene_id
write.table(all_cnv, file=args$outfile, quote=FALSE, sep='\t')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-utils-clip-grad.R
\name{nn_utils_clip_grad_value_}
\alias{nn_utils_clip_grad_value_}
\title{Clips gradient of an iterable of parameters at specified value.}
\usage{
nn_utils_clip_grad_value_(parameters, clip_value)
}
\arguments{
\item{parameters}{(Iterable(Tensor) or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized}
\item{clip_value}{(float or int): maximum allowed value of the gradients.}
}
\description{
Gradients are modified in-place.
}
\details{
The gradients are clipped in the range
\eqn{\left[\mbox{-clip\_value}, \mbox{clip\_value}\right]}
}
|
/man/nn_utils_clip_grad_value_.Rd
|
permissive
|
mlverse/torch
|
R
| false | true | 674 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-utils-clip-grad.R
\name{nn_utils_clip_grad_value_}
\alias{nn_utils_clip_grad_value_}
\title{Clips gradient of an iterable of parameters at specified value.}
\usage{
nn_utils_clip_grad_value_(parameters, clip_value)
}
\arguments{
\item{parameters}{(Iterable(Tensor) or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized}
\item{clip_value}{(float or int): maximum allowed value of the gradients.}
}
\description{
Gradients are modified in-place.
}
\details{
The gradients are clipped in the range
\eqn{\left[\mbox{-clip\_value}, \mbox{clip\_value}\right]}
}
|
# importing libraries
library(shiny)
library(tidyverse)
library(scales)
library(shinythemes)
library(ggrepel)
# importing data
scatter_data <- read_csv("Data Wrangling/school-enrollment-and-water-access.csv")
# defining choice values and labels for user inputs
country_choices <- unique(scatter_data$country)
# for scatter plot
scatter_school_type_values <- c("primary","secondary")
scatter_school_type_names <- c("Primary Schools", "Secondary Schools")
names(scatter_school_type_values) <- scatter_school_type_names
############
# ui #
############
ui <- navbarPage(
title = "Effect of Basic Water Access in Schools on Enrollment Rates",
theme = shinytheme("flatly"),
# SCATTERPLOT (incorporating enrollment rates data)
sidebarLayout(
sidebarPanel(
# choose school type
radioButtons(inputId = "stat_type_scatter",
label = "Filter by School Type:",
choices = scatter_school_type_values,
selected = "primary")
,
selectizeInput(inputId = "country_name"
, label = "Identify country(s) in the scatterplot:"
, choices = country_choices
, selected = NULL
, multiple = TRUE)
),
mainPanel(
plotOutput(outputId = "scatter")
)
)
)
############
# server #
############
server <- function(input, output) {
# SCATTERPLOT
data_for_scatter_reactive <- reactive({
data_for_scatter <- scatter_data %>%
filter(schoolType == input$stat_type_scatter)
})
output$scatter <- renderPlot({
ggplot(data_for_scatter_reactive(), aes_string(x="coverage/100", y="grossSchoolEnrollmentRatio/100")) +
geom_point(color = "#2c7fb8") +
geom_smooth(method = "lm") +
labs(x = "\nPercent of Schools in Country with Basic Water Supply", y = "School Enrollment Rates (Gross Ratios)\n"
, title = "School Enrollment Rates (Gross Ratios) vs. Percent of Schools with Basic Water Supply\n") +
geom_label_repel(data = filter(data_for_scatter_reactive(), country %in% input$country_name), aes(label = country), show.legend = FALSE) +
theme(text = element_text(size=13.5), plot.title = element_text(face = "bold")) +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent)
}
)
}
####################
# call to shinyApp #
####################
shinyApp(ui = ui, server = server)
|
/Jamie_Shiny2.R
|
no_license
|
stat231-s21/Blog-JAMS
|
R
| false | false | 2,573 |
r
|
# importing libraries
library(shiny)
library(tidyverse)
library(scales)
library(shinythemes)
library(ggrepel)
# importing data
scatter_data <- read_csv("Data Wrangling/school-enrollment-and-water-access.csv")
# defining choice values and labels for user inputs
country_choices <- unique(scatter_data$country)
# for scatter plot
scatter_school_type_values <- c("primary","secondary")
scatter_school_type_names <- c("Primary Schools", "Secondary Schools")
names(scatter_school_type_values) <- scatter_school_type_names
############
# ui #
############
ui <- navbarPage(
title = "Effect of Basic Water Access in Schools on Enrollment Rates",
theme = shinytheme("flatly"),
# SCATTERPLOT (incorporating enrollment rates data)
sidebarLayout(
sidebarPanel(
# choose school type
radioButtons(inputId = "stat_type_scatter",
label = "Filter by School Type:",
choices = scatter_school_type_values,
selected = "primary")
,
selectizeInput(inputId = "country_name"
, label = "Identify country(s) in the scatterplot:"
, choices = country_choices
, selected = NULL
, multiple = TRUE)
),
mainPanel(
plotOutput(outputId = "scatter")
)
)
)
############
# server #
############
server <- function(input, output) {
# SCATTERPLOT
data_for_scatter_reactive <- reactive({
data_for_scatter <- scatter_data %>%
filter(schoolType == input$stat_type_scatter)
})
output$scatter <- renderPlot({
ggplot(data_for_scatter_reactive(), aes_string(x="coverage/100", y="grossSchoolEnrollmentRatio/100")) +
geom_point(color = "#2c7fb8") +
geom_smooth(method = "lm") +
labs(x = "\nPercent of Schools in Country with Basic Water Supply", y = "School Enrollment Rates (Gross Ratios)\n"
, title = "School Enrollment Rates (Gross Ratios) vs. Percent of Schools with Basic Water Supply\n") +
geom_label_repel(data = filter(data_for_scatter_reactive(), country %in% input$country_name), aes(label = country), show.legend = FALSE) +
theme(text = element_text(size=13.5), plot.title = element_text(face = "bold")) +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent)
}
)
}
####################
# call to shinyApp #
####################
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/2_SimpleModels.R
\name{M_M_1}
\alias{M_M_1}
\title{Obtains the main characteristics of a M/M/1 queueing model}
\usage{
M_M_1(lambda = 3, mu = 6)
}
\arguments{
\item{lambda}{Mean arrival rate}
\item{mu}{Mean service rate}
}
\value{
Returns the next information of a M/M/1 model:
\item{rho}{Traffic intensity: \eqn{\rho}}
\item{cn}{Coefficients used in the computation of \ifelse{latex}{\eqn{P_n}: \eqn{C_n}}{\out{<i>P<sub>n</sub>: C<sub>n</sub></i>}}}
\item{p0}{Probability of empty system: \ifelse{latex}{\eqn{P_{0}}}{\out{<i>P<sub>0</sub></i>}}}
\item{l}{Number of customers in the system: \eqn{L}}
\item{lq}{Number of customers in the queue: \ifelse{latex}{\eqn{L_q}}{\out{<i>L<sub>q</sub></i>}}}
\item{w}{Waiting time in the system: \eqn{W}}
\item{wq}{Waiting time in the queue: \ifelse{latex}{\eqn{W_q}}{\out{<i>W<sub>q</sub></i>}}}
\item{eff}{System efficiency: \ifelse{latex}{\eqn{Eff = W/(W-W_q)}}{\out{<i>Eff = W/(W-W<sub>q</sub>)</i>}}}
}
\description{
Obtains the main characteristics of a M/M/1 queueing model
}
\examples{
#A workstation with a single processor
#runs programs with CPU time following
#an exponential distribution with mean 3 minutes.
#The programs arrive to the workstation following
#a Poisson process with an intensity of 15
#programs per hour.
M_M_1(lambda=15, mu=60/3)
}
\seealso{
Other AnaliticalModels: \code{\link{ClosedJacksonNetwork}};
\code{\link{M_M_1_INF_H}}; \code{\link{M_M_1_K}};
\code{\link{M_M_INF}}; \code{\link{M_M_S_INF_H_Y}};
\code{\link{M_M_S_INF_H}}; \code{\link{M_M_S_K}};
\code{\link{M_M_S}}; \code{\link{OpenJacksonNetwork}}
}
|
/man/M_M_1.Rd
|
no_license
|
vishkey/arqas
|
R
| false | false | 1,678 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/2_SimpleModels.R
\name{M_M_1}
\alias{M_M_1}
\title{Obtains the main characteristics of a M/M/1 queueing model}
\usage{
M_M_1(lambda = 3, mu = 6)
}
\arguments{
\item{lambda}{Mean arrival rate}
\item{mu}{Mean service rate}
}
\value{
Returns the next information of a M/M/1 model:
\item{rho}{Traffic intensity: \eqn{\rho}}
\item{cn}{Coefficients used in the computation of \ifelse{latex}{\eqn{P_n}: \eqn{C_n}}{\out{<i>P<sub>n</sub>: C<sub>n</sub></i>}}}
\item{p0}{Probability of empty system: \ifelse{latex}{\eqn{P_{0}}}{\out{<i>P<sub>0</sub></i>}}}
\item{l}{Number of customers in the system: \eqn{L}}
\item{lq}{Number of customers in the queue: \ifelse{latex}{\eqn{L_q}}{\out{<i>L<sub>q</sub></i>}}}
\item{w}{Waiting time in the system: \eqn{W}}
\item{wq}{Waiting time in the queue: \ifelse{latex}{\eqn{W_q}}{\out{<i>W<sub>q</sub></i>}}}
\item{eff}{System efficiency: \ifelse{latex}{\eqn{Eff = W/(W-W_q)}}{\out{<i>Eff = W/(W-W<sub>q</sub>)</i>}}}
}
\description{
Obtains the main characteristics of a M/M/1 queueing model
}
\examples{
#A workstation with a single processor
#runs programs with CPU time following
#an exponential distribution with mean 3 minutes.
#The programs arrive to the workstation following
#a Poisson process with an intensity of 15
#programs per hour.
M_M_1(lambda=15, mu=60/3)
}
\seealso{
Other AnaliticalModels: \code{\link{ClosedJacksonNetwork}};
\code{\link{M_M_1_INF_H}}; \code{\link{M_M_1_K}};
\code{\link{M_M_INF}}; \code{\link{M_M_S_INF_H_Y}};
\code{\link{M_M_S_INF_H}}; \code{\link{M_M_S_K}};
\code{\link{M_M_S}}; \code{\link{OpenJacksonNetwork}}
}
|
#SEM practicing
library(lavaan)
library(semPlot)
library(tidyverse)
#path Analysis
n<-3094
sd<-c(.71,.75,.90,.69,1.84,1.37)
lowercorr<-c(1.00,
.28,1.00,
.19,.21,1.00,
.15,.15,.23,1.00,
.35,.09,.20,.20,1.00,
.08,.11,.09,.11,.23,1.00)
fullcorr<-lav_matrix_lower2full(lowercorr)
covmat<-cor2cov(R = fullcorr,sds = sd)
colnames(covmat)<-rownames(covmat)<-c("ability","acheive","deg_asp","hi_deg","selectiv","income")
model<-'
income~hi_deg+selectiv
hi_deg~ability+acheive+deg_asp
selectiv~ability+acheive+deg_asp
ability~~acheive
ability~~deg_asp
acheive~~deg_asp
'
fit<-sem(model = model,sample.cov = covmat,sample.nobs = n)
summary(fit,standardized=T,fit.measures=T,rsquare=T)
semPaths(fit,layout = "tree2",whatLabels = "std",color = "red")
lavResiduals(object = fit,zstat = T)
#the std.all in case of error variance is the total variance unexplained
modificationindices(fit,sort. = T)
model2<-'
income~hi_deg+selectiv
hi_deg~ability+acheive+deg_asp
selectiv~ability+acheive+deg_asp+hi_deg
ability~~acheive
ability~~deg_asp
acheive~~deg_asp
'
fit2<-sem(model = model2,sample.cov = covmat,sample.nobs = n)
summary(fit2,standardized=T,fit.measures=T,rsquare=T)
semPaths(fit2,layout = "tree2",whatLabels = "std",color = "red")
lavResiduals(object = fit2,zstat = T)
modificationindices(fit2,sort. = T)
model3<-'
income~hi_deg+selectiv+acheive
hi_deg~ability+acheive+deg_asp
selectiv~ability+acheive+deg_asp+hi_deg
ability~~acheive
ability~~deg_asp
acheive~~deg_asp
'
fit3<-sem(model = model3,sample.cov = covmat,sample.nobs = n)
summary(fit3,standardized=T,fit.measures=T,rsquare=T)
semPaths(fit3,layout = "tree2",whatLabels = "std",color = "red")
lavResiduals(object = fit3,zstat = T)
modificationindices(fit3,sort. = T)
model4<-'
income~hi_deg+selectiv+acheive
hi_deg~ability+acheive+deg_asp
selectiv~ability+acheive+deg_asp+hi_deg
ability~income
ability~~acheive
ability~~deg_asp
acheive~~deg_asp
'
fit4<-sem(model = model4,sample.cov = covmat,sample.nobs = n)
summary(fit4,standardized=T,fit.measures=T,rsquare=T)
semPaths(fit4,layout = "tree2",whatLabels = "std",color = "red")
lavResiduals(object = fit4,zstat = T)
modificationindices(fit4,sort. = T)
anova(fit4,fit3,fit2,fit) #third and fourth modifications is not significant
#CFA by lavaan
n<-318
covlow<-c(.7821,
.5602,.9299,
.5695,.6281,.9751,
.1969,.2599,.2362,.6352,
.2289,.2835,.3079,.4575,.7943,
.2609,.3670,.3575,.4327,.4151,.6783,
.0556,.0740,.0981,.2094,.2306,.2503,.6855,
.0025,.0279,.0798,.2047,.2270,.2257,.4224,.6952,
.0180,.0753,.0744,.1892,.2352,.2008,.4343,.4514,.6065,
.1617,.1919,.2892,.1376,.1744,.1845,.0645,.0731,.0921,.4068,
.2628,.3047,.4043,.1742,.2066,.2547,.1356,.1336,.1283,.1958,.7015,
.2966,.3040,.3919,.1942,.1864,.2402,.1073,.0988,.0599,.2233,.3033,.5786)
covfull<-lav_matrix_lower2full(covlow)
colnames(covfull)<-rownames(covfull)<-c(paste0("TEN",1:3),paste0("WOR",1:3),paste0("IRTHK",1:3),paste0("BODY",1:3))
model<-'
TENSION=~TEN1+TEN2+TEN3
WORRY=~WOR1+WOR2+WOR3
TESTIRRTHINKING=~IRTHK1+IRTHK2+IRTHK3
BODILYSUMP=~BODY1+BODY2+BODY3
TENSION~~WORRY
TENSION~~TESTIRRTHINKING
TENSION~~BODILYSUMP
WORRY~~TESTIRRTHINKING
WORRY~~BODILYSUMP
TESTIRRTHINKING~~BODILYSUMP
TENSION~~1*TENSION
WORRY~~1*WORRY
TESTIRRTHINKING~~1*TESTIRRTHINKING
BODILYSUMP~~1*BODILYSUMP
'
fit<-cfa(model = model,sample.cov = covfull,sample.nobs = n,std.lv=TRUE)
summary(fit,fit.measures=T,standardized=T,rsquare=T)
lavResiduals(object = fit,zstat = T)
modificationindices(fit,sort. = T)[1,]
semPaths(fit,whatLabels = "std",residuals = T,color = "green")
#modification 1
model2<-'
TENSION=~TEN1+TEN2+TEN3
WORRY=~WOR1+WOR2+WOR3
TESTIRRTHINKING=~IRTHK1+IRTHK2+IRTHK3
BODILYSUMP=~BODY1+BODY2+BODY3+TEN3
TENSION~~WORRY
TENSION~~TESTIRRTHINKING
TENSION~~BODILYSUMP
WORRY~~TESTIRRTHINKING
WORRY~~BODILYSUMP
TESTIRRTHINKING~~BODILYSUMP
TENSION~~1*TENSION
WORRY~~1*WORRY
TESTIRRTHINKING~~1*TESTIRRTHINKING
BODILYSUMP~~1*BODILYSUMP
'
fit2<-cfa(model = model2,sample.cov = covfull,sample.nobs = n,std.lv=TRUE)
summary(fit2,fit.measures=T,standardized=T,rsquare=T)
lavResiduals(object = fit2,zstat = T)$cov.z%>%data.frame()%>%apply(MARGIN = 2,function(x){
ifelse(abs(x)>=1.96,x,0)
})%>%View
modificationindices(fit2,sort. = T)
semPaths(fit2,whatLabels = "std",residuals = T,color = "green")
#modification 12
model3<-'
TENSION=~TEN1+TEN2+TEN3
WORRY=~WOR1+WOR2+WOR3
TESTIRRTHINKING=~IRTHK1+IRTHK2+IRTHK3
BODILYSUMP=~BODY1+BODY2+BODY3+TEN3
TENSION~~WORRY
TENSION~~TESTIRRTHINKING
TENSION~~BODILYSUMP
WORRY~~TESTIRRTHINKING
WORRY~~BODILYSUMP
TESTIRRTHINKING~~BODILYSUMP
WOR2~~WOR3
TENSION~~1*TENSION
WORRY~~1*WORRY
TESTIRRTHINKING~~1*TESTIRRTHINKING
BODILYSUMP~~1*BODILYSUMP
'
fit3<-cfa(model = model3,sample.cov = covfull,sample.nobs = n,std.lv=TRUE)
summary(fit3,fit.measures=T,standardized=T,rsquare=T)
lavResiduals(object = fit3,zstat = T)$cov.z%>%data.frame()%>%apply(MARGIN = 2,function(x){
ifelse(abs(x)>=1.96,x,0)
})%>%View
modificationindices(fit3,sort. = T)
semPaths(fit3,whatLabels = "std",residuals = T,color = "green")
anova(fit,fit2,fit3)
#################################3
#structural ( latent path analysis ) model
corlow<-c(1.000,
.812,1.000,
.819,.752,1.000,
.334,.344,.228,1.000,
.177,.094,.141,.363,1.000,
.363,.383,.387,.241,.273,1.000,
.239,.258,.275,.286,.389,.445,1.000,
.243,.293,.234,.116,.096,.222,.344,1.000,
.672,.616,.621,.277,.137,.458,.315,.246,1.000,
.464,.620,.514,.213,.173,.430,.387,.132,.680,1.000,
.612,.640,.719,.192,.090,.509,.336,.230,.819,.676,1.000,
.331,.391,.310,.435,.263,.409,.298,.256,.446,.395,.411,1.000,
.209,.214,.286,.319,.671,.423,.334,.246,.308,.268,.280,.573,1.000,
.298,.358,.361,.171,.232,.791,.286,.057,.433,.387,.477,.389,.445,1.000,
.309,.303,.381,.132,.307,.637,.459,.267,.468,.406,.458,.554,.514,.551,1.000,
.056,.086,.092,.090,.201,.123,.247,.403,.176,.076,.131,.318,.213,.056,.342,1.000)
corfull<-lav_matrix_lower2full(corlow)
colnames(corfull)<-rownames(corfull)<-c(paste("SS1",1:3,sep = "_"),
paste("SE1",1:2,sep = "_"),
paste("EB1",1:3,sep = "_"),
paste("SS2",1:3,sep = "_"),
paste("SE2",1:2,sep = "_"),
paste("EB2",1:3,sep = "_"))
sds<-c(2.46,1.76,2.74,2.04,2.13,4.30,1.90,1.90,2.63,1.89,2.84,2.34,2.27,4.86,2.66,1.94)
n<-300
covmat<-cor2cov(corfull,sds = sds)
path1<-paste("EB1",paste(paste("EB1",1:3,sep = "_"),collapse = "+"),sep = "=~")
path2<-paste("EB2",paste(paste("EB2",1:3,sep = "_"),collapse = "+"),sep = "=~")
path3<-paste("SE1",paste(paste("SE1",1:2,sep = "_"),collapse = "+"),sep = "=~")
path4<-paste("SE2",paste(paste("SE2",1:2,sep = "_"),collapse = "+"),sep = "=~")
path5<-paste("SS1",paste(paste("SS1",1:3,sep = "_"),collapse = "+"),sep = "=~")
path6<-paste("SS2",paste(paste("SS2",1:3,sep = "_"),collapse = "+"),sep = "=~")
measuremodel<-paste(path1,path2,path3,path4,path5,path6,sep = "\n")
structuralmodel<-"
SS2~SS1
SE2~SS2+SS1+SE1
EB2~SE1+SE2+EB1
EB1~~SE1
EB1~~SS1
SE1~~SS1
"
model<-paste(measuremodel,structuralmodel,collapse = "\n")
fit<-sem(model = model,sample.cov = covmat,sample.nobs = n)
summary(fit,fit.measures=T,standardized=T,rsquare=T)
semPaths(fit,whatLabels = "std")
modificationindices(fit,sort. = T)[1:10,]
lavResiduals(object = fit,zstat = T)$cov.z%>%data.frame()%>%apply(MARGIN = 2,function(x){
ifelse(abs(x)>=1.96,x,NA)
})%>%round(digits = 2)%>%View
#running a saturated path analysis
structuralmodel2<-'
EB1~~EB2
EB1~~SE2
EB1~~SS2
EB1~~SE1
EB1~~SS1
SE1~~EB2
SE1~~SE2
SE1~~SS2
SE1~~SS1
SS1~~EB2
SS1~~SE2
SS1~~SS2
EB2~~SE2
EB2~~SS2
SE2~~SS2
'
model2<-paste(measuremodel,structuralmodel2,collapse = "\n")
fit2<-sem(model = model2,sample.cov = covmat,sample.nobs = n)
summary(fit2,fit.measures=T,standardized=T,rsquare=T)
semPaths(fit2,whatLabels = "std")
modificationindices(fit2,sort. = T)[1:10,]
pathmi1<-"EB1_1 ~~ EB2_1"
pathmi2<-"SE1_2 ~~ SE2_2"
pathmi3<-"SE1_2 ~~ SE2_1"
pathmi4<-"SS1_3 ~~ SS2_3"
pathmi5<-"SS1_1 ~~ SS2_1"
pathmi6<-" EB2 =~ SE2_2"
pathmi7<-"EB1 =~ SE2_2"
measuremodel2<-paste(path1,path2,path3,path4,path5,path6,pathmi1,pathmi2,pathmi3,pathmi4,pathmi5,pathmi6,pathmi7,sep = "\n")
model3<-paste(measuremodel2,structuralmodel2,collapse = "\n")
fit3<-sem(model = model3,sample.cov = covmat,sample.nobs = n)
summary(fit3,fit.measures=T,standardized=T,rsquare=T)
semPaths(fit3,whatLabels = "std")
|
/SEM.R
|
no_license
|
VetMomen/Statistical-practice
|
R
| false | false | 8,463 |
r
|
#SEM practicing
library(lavaan)
library(semPlot)
library(tidyverse)
#path Analysis
n<-3094
sd<-c(.71,.75,.90,.69,1.84,1.37)
lowercorr<-c(1.00,
.28,1.00,
.19,.21,1.00,
.15,.15,.23,1.00,
.35,.09,.20,.20,1.00,
.08,.11,.09,.11,.23,1.00)
fullcorr<-lav_matrix_lower2full(lowercorr)
covmat<-cor2cov(R = fullcorr,sds = sd)
colnames(covmat)<-rownames(covmat)<-c("ability","acheive","deg_asp","hi_deg","selectiv","income")
model<-'
income~hi_deg+selectiv
hi_deg~ability+acheive+deg_asp
selectiv~ability+acheive+deg_asp
ability~~acheive
ability~~deg_asp
acheive~~deg_asp
'
fit<-sem(model = model,sample.cov = covmat,sample.nobs = n)
summary(fit,standardized=T,fit.measures=T,rsquare=T)
semPaths(fit,layout = "tree2",whatLabels = "std",color = "red")
lavResiduals(object = fit,zstat = T)
#the std.all in case of error variance is the total variance unexplained
modificationindices(fit,sort. = T)
model2<-'
income~hi_deg+selectiv
hi_deg~ability+acheive+deg_asp
selectiv~ability+acheive+deg_asp+hi_deg
ability~~acheive
ability~~deg_asp
acheive~~deg_asp
'
fit2<-sem(model = model2,sample.cov = covmat,sample.nobs = n)
summary(fit2,standardized=T,fit.measures=T,rsquare=T)
semPaths(fit2,layout = "tree2",whatLabels = "std",color = "red")
lavResiduals(object = fit2,zstat = T)
modificationindices(fit2,sort. = T)
model3<-'
income~hi_deg+selectiv+acheive
hi_deg~ability+acheive+deg_asp
selectiv~ability+acheive+deg_asp+hi_deg
ability~~acheive
ability~~deg_asp
acheive~~deg_asp
'
fit3<-sem(model = model3,sample.cov = covmat,sample.nobs = n)
summary(fit3,standardized=T,fit.measures=T,rsquare=T)
semPaths(fit3,layout = "tree2",whatLabels = "std",color = "red")
lavResiduals(object = fit3,zstat = T)
modificationindices(fit3,sort. = T)
model4<-'
income~hi_deg+selectiv+acheive
hi_deg~ability+acheive+deg_asp
selectiv~ability+acheive+deg_asp+hi_deg
ability~income
ability~~acheive
ability~~deg_asp
acheive~~deg_asp
'
fit4<-sem(model = model4,sample.cov = covmat,sample.nobs = n)
summary(fit4,standardized=T,fit.measures=T,rsquare=T)
semPaths(fit4,layout = "tree2",whatLabels = "std",color = "red")
lavResiduals(object = fit4,zstat = T)
modificationindices(fit4,sort. = T)
anova(fit4,fit3,fit2,fit) #third and fourth modifications is not significant
#CFA by lavaan
n<-318
covlow<-c(.7821,
.5602,.9299,
.5695,.6281,.9751,
.1969,.2599,.2362,.6352,
.2289,.2835,.3079,.4575,.7943,
.2609,.3670,.3575,.4327,.4151,.6783,
.0556,.0740,.0981,.2094,.2306,.2503,.6855,
.0025,.0279,.0798,.2047,.2270,.2257,.4224,.6952,
.0180,.0753,.0744,.1892,.2352,.2008,.4343,.4514,.6065,
.1617,.1919,.2892,.1376,.1744,.1845,.0645,.0731,.0921,.4068,
.2628,.3047,.4043,.1742,.2066,.2547,.1356,.1336,.1283,.1958,.7015,
.2966,.3040,.3919,.1942,.1864,.2402,.1073,.0988,.0599,.2233,.3033,.5786)
covfull<-lav_matrix_lower2full(covlow)
colnames(covfull)<-rownames(covfull)<-c(paste0("TEN",1:3),paste0("WOR",1:3),paste0("IRTHK",1:3),paste0("BODY",1:3))
model<-'
TENSION=~TEN1+TEN2+TEN3
WORRY=~WOR1+WOR2+WOR3
TESTIRRTHINKING=~IRTHK1+IRTHK2+IRTHK3
BODILYSUMP=~BODY1+BODY2+BODY3
TENSION~~WORRY
TENSION~~TESTIRRTHINKING
TENSION~~BODILYSUMP
WORRY~~TESTIRRTHINKING
WORRY~~BODILYSUMP
TESTIRRTHINKING~~BODILYSUMP
TENSION~~1*TENSION
WORRY~~1*WORRY
TESTIRRTHINKING~~1*TESTIRRTHINKING
BODILYSUMP~~1*BODILYSUMP
'
fit<-cfa(model = model,sample.cov = covfull,sample.nobs = n,std.lv=TRUE)
summary(fit,fit.measures=T,standardized=T,rsquare=T)
lavResiduals(object = fit,zstat = T)
modificationindices(fit,sort. = T)[1,]
semPaths(fit,whatLabels = "std",residuals = T,color = "green")
#modification 1
model2<-'
TENSION=~TEN1+TEN2+TEN3
WORRY=~WOR1+WOR2+WOR3
TESTIRRTHINKING=~IRTHK1+IRTHK2+IRTHK3
BODILYSUMP=~BODY1+BODY2+BODY3+TEN3
TENSION~~WORRY
TENSION~~TESTIRRTHINKING
TENSION~~BODILYSUMP
WORRY~~TESTIRRTHINKING
WORRY~~BODILYSUMP
TESTIRRTHINKING~~BODILYSUMP
TENSION~~1*TENSION
WORRY~~1*WORRY
TESTIRRTHINKING~~1*TESTIRRTHINKING
BODILYSUMP~~1*BODILYSUMP
'
fit2<-cfa(model = model2,sample.cov = covfull,sample.nobs = n,std.lv=TRUE)
summary(fit2,fit.measures=T,standardized=T,rsquare=T)
lavResiduals(object = fit2,zstat = T)$cov.z%>%data.frame()%>%apply(MARGIN = 2,function(x){
ifelse(abs(x)>=1.96,x,0)
})%>%View
modificationindices(fit2,sort. = T)
semPaths(fit2,whatLabels = "std",residuals = T,color = "green")
#modification 12
model3<-'
TENSION=~TEN1+TEN2+TEN3
WORRY=~WOR1+WOR2+WOR3
TESTIRRTHINKING=~IRTHK1+IRTHK2+IRTHK3
BODILYSUMP=~BODY1+BODY2+BODY3+TEN3
TENSION~~WORRY
TENSION~~TESTIRRTHINKING
TENSION~~BODILYSUMP
WORRY~~TESTIRRTHINKING
WORRY~~BODILYSUMP
TESTIRRTHINKING~~BODILYSUMP
WOR2~~WOR3
TENSION~~1*TENSION
WORRY~~1*WORRY
TESTIRRTHINKING~~1*TESTIRRTHINKING
BODILYSUMP~~1*BODILYSUMP
'
fit3<-cfa(model = model3,sample.cov = covfull,sample.nobs = n,std.lv=TRUE)
summary(fit3,fit.measures=T,standardized=T,rsquare=T)
lavResiduals(object = fit3,zstat = T)$cov.z%>%data.frame()%>%apply(MARGIN = 2,function(x){
ifelse(abs(x)>=1.96,x,0)
})%>%View
modificationindices(fit3,sort. = T)
semPaths(fit3,whatLabels = "std",residuals = T,color = "green")
anova(fit,fit2,fit3)
#################################3
#structural ( latent path analysis ) model
corlow<-c(1.000,
.812,1.000,
.819,.752,1.000,
.334,.344,.228,1.000,
.177,.094,.141,.363,1.000,
.363,.383,.387,.241,.273,1.000,
.239,.258,.275,.286,.389,.445,1.000,
.243,.293,.234,.116,.096,.222,.344,1.000,
.672,.616,.621,.277,.137,.458,.315,.246,1.000,
.464,.620,.514,.213,.173,.430,.387,.132,.680,1.000,
.612,.640,.719,.192,.090,.509,.336,.230,.819,.676,1.000,
.331,.391,.310,.435,.263,.409,.298,.256,.446,.395,.411,1.000,
.209,.214,.286,.319,.671,.423,.334,.246,.308,.268,.280,.573,1.000,
.298,.358,.361,.171,.232,.791,.286,.057,.433,.387,.477,.389,.445,1.000,
.309,.303,.381,.132,.307,.637,.459,.267,.468,.406,.458,.554,.514,.551,1.000,
.056,.086,.092,.090,.201,.123,.247,.403,.176,.076,.131,.318,.213,.056,.342,1.000)
corfull<-lav_matrix_lower2full(corlow)
colnames(corfull)<-rownames(corfull)<-c(paste("SS1",1:3,sep = "_"),
paste("SE1",1:2,sep = "_"),
paste("EB1",1:3,sep = "_"),
paste("SS2",1:3,sep = "_"),
paste("SE2",1:2,sep = "_"),
paste("EB2",1:3,sep = "_"))
sds<-c(2.46,1.76,2.74,2.04,2.13,4.30,1.90,1.90,2.63,1.89,2.84,2.34,2.27,4.86,2.66,1.94)
n<-300
covmat<-cor2cov(corfull,sds = sds)
path1<-paste("EB1",paste(paste("EB1",1:3,sep = "_"),collapse = "+"),sep = "=~")
path2<-paste("EB2",paste(paste("EB2",1:3,sep = "_"),collapse = "+"),sep = "=~")
path3<-paste("SE1",paste(paste("SE1",1:2,sep = "_"),collapse = "+"),sep = "=~")
path4<-paste("SE2",paste(paste("SE2",1:2,sep = "_"),collapse = "+"),sep = "=~")
path5<-paste("SS1",paste(paste("SS1",1:3,sep = "_"),collapse = "+"),sep = "=~")
path6<-paste("SS2",paste(paste("SS2",1:3,sep = "_"),collapse = "+"),sep = "=~")
measuremodel<-paste(path1,path2,path3,path4,path5,path6,sep = "\n")
structuralmodel<-"
SS2~SS1
SE2~SS2+SS1+SE1
EB2~SE1+SE2+EB1
EB1~~SE1
EB1~~SS1
SE1~~SS1
"
model<-paste(measuremodel,structuralmodel,collapse = "\n")
fit<-sem(model = model,sample.cov = covmat,sample.nobs = n)
summary(fit,fit.measures=T,standardized=T,rsquare=T)
semPaths(fit,whatLabels = "std")
modificationindices(fit,sort. = T)[1:10,]
lavResiduals(object = fit,zstat = T)$cov.z%>%data.frame()%>%apply(MARGIN = 2,function(x){
ifelse(abs(x)>=1.96,x,NA)
})%>%round(digits = 2)%>%View
#running a saturated path analysis
structuralmodel2<-'
EB1~~EB2
EB1~~SE2
EB1~~SS2
EB1~~SE1
EB1~~SS1
SE1~~EB2
SE1~~SE2
SE1~~SS2
SE1~~SS1
SS1~~EB2
SS1~~SE2
SS1~~SS2
EB2~~SE2
EB2~~SS2
SE2~~SS2
'
model2<-paste(measuremodel,structuralmodel2,collapse = "\n")
fit2<-sem(model = model2,sample.cov = covmat,sample.nobs = n)
summary(fit2,fit.measures=T,standardized=T,rsquare=T)
semPaths(fit2,whatLabels = "std")
modificationindices(fit2,sort. = T)[1:10,]
pathmi1<-"EB1_1 ~~ EB2_1"
pathmi2<-"SE1_2 ~~ SE2_2"
pathmi3<-"SE1_2 ~~ SE2_1"
pathmi4<-"SS1_3 ~~ SS2_3"
pathmi5<-"SS1_1 ~~ SS2_1"
pathmi6<-" EB2 =~ SE2_2"
pathmi7<-"EB1 =~ SE2_2"
measuremodel2<-paste(path1,path2,path3,path4,path5,path6,pathmi1,pathmi2,pathmi3,pathmi4,pathmi5,pathmi6,pathmi7,sep = "\n")
model3<-paste(measuremodel2,structuralmodel2,collapse = "\n")
fit3<-sem(model = model3,sample.cov = covmat,sample.nobs = n)
summary(fit3,fit.measures=T,standardized=T,rsquare=T)
semPaths(fit3,whatLabels = "std")
|
# test <- wilcox.test(statsAov$Median~statsAov$PRESa, exact=T, estimate=T)
## Kruskal Wallis tests for differences between groups
## helper function
require(xtable)
require(dplyr)
require(pgirmess)
rdec <- function(x, k){format(round(x), nsmall=k)}
#
# load("./output/statsAov_medianCI.Rda")
# load("./output/simConcise.Rda")
# simConcise$ExpNo <- as.factor(simConcise$ExpNo)
#
# statsAov <- inner_join(statsAov, simConcise)
load("./output/statsAov.Rda")
statsAov$ExpNo <- as.factor(statsAov$ExpNo)
## Set up output paths:
outPath <- "./output/"
# bigtabsPath <- "output/MWU/bigtabs/"
## Build subsets, run kruskalmc and record differences for both cells and all FG
## individually
## ------ PRES ------
PRES <- new.env() #Storage for output
PRES$median <- new.env()
PRES$mwutest <- new.env()
cells <- unique(statsAov$CellCode)
fgroups <- unique(statsAov$FGroup)
expno <- levels(statsAov$ExpNo)
exptest <- expno
numexp <- length(exptest)
categ <- names(statsAov)[c(19:21)]
for(ce in cells){
curPath <- paste0(outPath,"MannW/BMD_pres/", ce)
xPath <- paste0(curPath, "/xtable")
if(!is.factor(statsAov$ExpNo) | !is.factor(statsAov$FD)){
break("Coerce to factors!")
} else {
paste("Factor Check successful!")
}
if(!file.exists(curPath)){
dir.create(curPath, recursive=T)
} else {}
if(!file.exists(xPath)){
dir.create(xPath, recursive = T)
} else {}
for(fg in fgroups){
assign(paste0(fg, "_dat"), subset(statsAov, FGroup==fg &
TimeStep>=1080 &
CellCode==ce),
envir = PRES)
for(cat in categ){
dat <- get(paste0(fg, "_dat"),
envir = PRES)
dat <- as.data.frame(dat)
n1 <- table(dat[,cat])[[1]]
n2 <- table(dat[,cat])[[2]]
assign(paste0(fg, "_", cat, "_", ce, "_median"),
tapply(dat$Median,list(dat[,cat]), median)
,
envir = PRES$median
)
assign(paste0(fg, "_", cat, "_", ce, "_mwutest"),
{
wilcox.test(dat$Median~dat[,cat])
}
,
envir = PRES$mwutest
)
# write.csv(dat.tmp, file=paste0(curPath,"/",fg, "_", cat, "_",ce, "_res.csv"))
# curr.test <- get(x = paste0(fg, "_", cat, "_", ce, "_mwutest"),
# envir = PRES$mwutest)
#
# curr.test[6] <- paste(cat,"for",fg, "in", ce, " biomass density [$kg\\cdot km^{-2}$]")
#
# xtab <- xtable(dat.tmp,
# caption = paste0("$\\U_{",
# n1,
# "," ,
# n2,
# "} = ",
# round(as.numeric(curr.test[1]),2),
# "$ ",
# "$p = ",
# round(as.numeric(curr.test[3]), 4),
# "$ ",
# curr.test[5]),
# label = "tab:"
#
# )
#
# print(xtab, file=paste0(xPath,"/",fg, "_", cat, "_", ce, "_res.tex"),
# sanitize.text.function = function(x) x,
# tabular.environment = "tabular*",
# caption.placement = "top",
# booktabs = T,
# sanitize.colnames.function = function(x){
# paste0("\\textbf{", x, "}")
# })
}
}
}
# PRES.kw <- as.list(mget(ls(pattern = "res", envir = PRES), envir = PRES))
save(PRES, file="output/MannW/BMD_pres/PRES_env.Rda")
# save(PRES.mwu, file="output/MWU/PRES_MWU.Rda")
|
/Analysis/scripts/08_statAnalysis_MannW_presence.R
|
no_license
|
the-Hull/Diss
|
R
| false | false | 4,397 |
r
|
# test <- wilcox.test(statsAov$Median~statsAov$PRESa, exact=T, estimate=T)
## Kruskal Wallis tests for differences between groups
## helper function
require(xtable)
require(dplyr)
require(pgirmess)
rdec <- function(x, k){format(round(x), nsmall=k)}
#
# load("./output/statsAov_medianCI.Rda")
# load("./output/simConcise.Rda")
# simConcise$ExpNo <- as.factor(simConcise$ExpNo)
#
# statsAov <- inner_join(statsAov, simConcise)
load("./output/statsAov.Rda")
statsAov$ExpNo <- as.factor(statsAov$ExpNo)
## Set up output paths:
outPath <- "./output/"
# bigtabsPath <- "output/MWU/bigtabs/"
## Build subsets, run kruskalmc and record differences for both cells and all FG
## individually
## ------ PRES ------
PRES <- new.env() #Storage for output
PRES$median <- new.env()
PRES$mwutest <- new.env()
cells <- unique(statsAov$CellCode)
fgroups <- unique(statsAov$FGroup)
expno <- levels(statsAov$ExpNo)
exptest <- expno
numexp <- length(exptest)
categ <- names(statsAov)[c(19:21)]
for(ce in cells){
curPath <- paste0(outPath,"MannW/BMD_pres/", ce)
xPath <- paste0(curPath, "/xtable")
if(!is.factor(statsAov$ExpNo) | !is.factor(statsAov$FD)){
break("Coerce to factors!")
} else {
paste("Factor Check successful!")
}
if(!file.exists(curPath)){
dir.create(curPath, recursive=T)
} else {}
if(!file.exists(xPath)){
dir.create(xPath, recursive = T)
} else {}
for(fg in fgroups){
assign(paste0(fg, "_dat"), subset(statsAov, FGroup==fg &
TimeStep>=1080 &
CellCode==ce),
envir = PRES)
for(cat in categ){
dat <- get(paste0(fg, "_dat"),
envir = PRES)
dat <- as.data.frame(dat)
n1 <- table(dat[,cat])[[1]]
n2 <- table(dat[,cat])[[2]]
assign(paste0(fg, "_", cat, "_", ce, "_median"),
tapply(dat$Median,list(dat[,cat]), median)
,
envir = PRES$median
)
assign(paste0(fg, "_", cat, "_", ce, "_mwutest"),
{
wilcox.test(dat$Median~dat[,cat])
}
,
envir = PRES$mwutest
)
# write.csv(dat.tmp, file=paste0(curPath,"/",fg, "_", cat, "_",ce, "_res.csv"))
# curr.test <- get(x = paste0(fg, "_", cat, "_", ce, "_mwutest"),
# envir = PRES$mwutest)
#
# curr.test[6] <- paste(cat,"for",fg, "in", ce, " biomass density [$kg\\cdot km^{-2}$]")
#
# xtab <- xtable(dat.tmp,
# caption = paste0("$\\U_{",
# n1,
# "," ,
# n2,
# "} = ",
# round(as.numeric(curr.test[1]),2),
# "$ ",
# "$p = ",
# round(as.numeric(curr.test[3]), 4),
# "$ ",
# curr.test[5]),
# label = "tab:"
#
# )
#
# print(xtab, file=paste0(xPath,"/",fg, "_", cat, "_", ce, "_res.tex"),
# sanitize.text.function = function(x) x,
# tabular.environment = "tabular*",
# caption.placement = "top",
# booktabs = T,
# sanitize.colnames.function = function(x){
# paste0("\\textbf{", x, "}")
# })
}
}
}
# PRES.kw <- as.list(mget(ls(pattern = "res", envir = PRES), envir = PRES))
save(PRES, file="output/MannW/BMD_pres/PRES_env.Rda")
# save(PRES.mwu, file="output/MWU/PRES_MWU.Rda")
|
# should be parsed as dependencies (use only lower-case letters for package names)
library(a)
library("b")
base::library(c)
base::library("d", character.only = TRUE)
requireNamespace("e")
base::requireNamespace("f", quietly = TRUE)
xfun::pkg_attach(c("g", "h"))
pkg_attach2("i", "j")
k::foo()
l:::bar()
"m"::baz()
# should NOT be parsed as dependencies (use only upper-case names for package names)
library(A, character.only = TRUE)
|
/tests/testthat/resources/code.R
|
permissive
|
rstudio/renv
|
R
| false | false | 435 |
r
|
# should be parsed as dependencies (use only lower-case letters for package names)
library(a)
library("b")
base::library(c)
base::library("d", character.only = TRUE)
requireNamespace("e")
base::requireNamespace("f", quietly = TRUE)
xfun::pkg_attach(c("g", "h"))
pkg_attach2("i", "j")
k::foo()
l:::bar()
"m"::baz()
# should NOT be parsed as dependencies (use only upper-case names for package names)
library(A, character.only = TRUE)
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.75,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/urinary_tract/urinary_tract_079.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/urinary_tract/urinary_tract_079.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 381 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.75,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/urinary_tract/urinary_tract_079.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
loadDigestDataFromXL_Severity <- function(xlsPath, sheets, labelCol="B", positionCol="C", dataCol="D") {
# Loads digest data from Excel.
#
# The data for fire severity samples are in separate worksheets.
# Unlike the similar fire frequency data files, the sheet names do not
# always match sample label, so instead this script examines the contents
# of the label column and takes the last non-blank entry as the name
# of the sample (earlier entries can be header label or other comments).
#
# The data are formatted in blocks with header rows and blank lines
# so this script has to match things up.
#
# The value returned is a single data.frame with cols for sample label
# and data value (both as character).
require(xlsx)
require(stringr)
gc()
wb <- loadWorkbook(xlsPath)
wss <- getSheets(wb)[sheets]
getColValues <- function(ws, col, rowIndices) {
if (missing(rowIndices))
rows <- getRows(ws)
else
rows <- getRows(ws, rowIndices)
# sometimes we end up with NAs for blank cells - no idea why
vals <- sapply(getCells(rows, col), getCellValue)
vals[ is.na(vals) ] <- ""
vals
}
cellNamesToRowIndex <- function(cellNames) {
# names have the form rownum.colnum
rows <- sapply( str_split(cellNames, "\\."), function(parts) parts[1] )
as.integer(rows)
}
lastLabel <- function(labels) {
ts <- str_trim(labels)
ii <- which(str_length(ts) > 0)
ilast <- max(ii)
labels[ilast]
}
# browser()
out <- NULL
for (isheet in 1:length(wss)) {
cat("sheet", isheet, ": ")
labelCol <- xlColLabelToIndex(labelCol)
labels <- getColValues(wss[[isheet]], labelCol)
# Assume that the last non-blank label col value is the
# sample label
sampleLabel <- lastLabel(labels)
cat(sampleLabel, "\n")
# The rows that we want are identified by having a label
# identical to the sample label (e.g. "K1F1")
ii <- labels == sampleLabel
# We need to get the excel row numbers from the label vector names
# because of some cols having or not having header data
ii.rows <- cellNamesToRowIndex( names(ii)[ii] )
positionCol <- xlColLabelToIndex(positionCol)
positionVals <- getColValues(wss[[isheet]], positionCol, ii.rows)
dataCol <- xlColLabelToIndex(dataCol)
dataVals <- getColValues(wss[[isheet]], dataCol, ii.rows)
if (is.null(out))
out <- data.frame(label=labels[ii], position=positionVals, x=dataVals,
stringsAsFactors=FALSE)
else
out <- rbind(out, data.frame(label=labels[ii], position=positionVals, x=dataVals,
stringsAsFactors=FALSE))
}
out
}
|
/scripts/loadDigestDataFromXL_Severity.R
|
no_license
|
mbedward/robert_fire_sev
|
R
| false | false | 2,770 |
r
|
loadDigestDataFromXL_Severity <- function(xlsPath, sheets, labelCol="B", positionCol="C", dataCol="D") {
# Loads digest data from Excel.
#
# The data for fire severity samples are in separate worksheets.
# Unlike the similar fire frequency data files, the sheet names do not
# always match sample label, so instead this script examines the contents
# of the label column and takes the last non-blank entry as the name
# of the sample (earlier entries can be header label or other comments).
#
# The data are formatted in blocks with header rows and blank lines
# so this script has to match things up.
#
# The value returned is a single data.frame with cols for sample label
# and data value (both as character).
require(xlsx)
require(stringr)
gc()
wb <- loadWorkbook(xlsPath)
wss <- getSheets(wb)[sheets]
getColValues <- function(ws, col, rowIndices) {
if (missing(rowIndices))
rows <- getRows(ws)
else
rows <- getRows(ws, rowIndices)
# sometimes we end up with NAs for blank cells - no idea why
vals <- sapply(getCells(rows, col), getCellValue)
vals[ is.na(vals) ] <- ""
vals
}
cellNamesToRowIndex <- function(cellNames) {
# names have the form rownum.colnum
rows <- sapply( str_split(cellNames, "\\."), function(parts) parts[1] )
as.integer(rows)
}
lastLabel <- function(labels) {
ts <- str_trim(labels)
ii <- which(str_length(ts) > 0)
ilast <- max(ii)
labels[ilast]
}
# browser()
out <- NULL
for (isheet in 1:length(wss)) {
cat("sheet", isheet, ": ")
labelCol <- xlColLabelToIndex(labelCol)
labels <- getColValues(wss[[isheet]], labelCol)
# Assume that the last non-blank label col value is the
# sample label
sampleLabel <- lastLabel(labels)
cat(sampleLabel, "\n")
# The rows that we want are identified by having a label
# identical to the sample label (e.g. "K1F1")
ii <- labels == sampleLabel
# We need to get the excel row numbers from the label vector names
# because of some cols having or not having header data
ii.rows <- cellNamesToRowIndex( names(ii)[ii] )
positionCol <- xlColLabelToIndex(positionCol)
positionVals <- getColValues(wss[[isheet]], positionCol, ii.rows)
dataCol <- xlColLabelToIndex(dataCol)
dataVals <- getColValues(wss[[isheet]], dataCol, ii.rows)
if (is.null(out))
out <- data.frame(label=labels[ii], position=positionVals, x=dataVals,
stringsAsFactors=FALSE)
else
out <- rbind(out, data.frame(label=labels[ii], position=positionVals, x=dataVals,
stringsAsFactors=FALSE))
}
out
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/rfVIP_master.R
\name{createRFmodel}
\alias{createRFmodel}
\title{createRFmodel}
\usage{
createRFmodel(train.x, train.y, m.try, nSeed = 3456, nCores = 1)
}
\arguments{
\item{train.x}{dataframe containing x values}
\item{train.y}{vector containing y values}
\item{m.try}{tuned m.try value}
\item{nSeed}{integer containing seed value, defaults to 3456}
\item{nCores}{integer indicating number of cores used for parallal processing}
}
\value{
random forest model of class "train"
}
\description{
This function does the random forest modeling for a given x and y
}
\examples{
rf_model <- createRFmodel(train.x,train.y,nSeed=3456,nCores=4)
}
|
/man/createRFmodel.Rd
|
no_license
|
kpadm/rfeVIP
|
R
| false | false | 727 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/rfVIP_master.R
\name{createRFmodel}
\alias{createRFmodel}
\title{createRFmodel}
\usage{
createRFmodel(train.x, train.y, m.try, nSeed = 3456, nCores = 1)
}
\arguments{
\item{train.x}{dataframe containing x values}
\item{train.y}{vector containing y values}
\item{m.try}{tuned m.try value}
\item{nSeed}{integer containing seed value, defaults to 3456}
\item{nCores}{integer indicating number of cores used for parallal processing}
}
\value{
random forest model of class "train"
}
\description{
This function does the random forest modeling for a given x and y
}
\examples{
rf_model <- createRFmodel(train.x,train.y,nSeed=3456,nCores=4)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cashflows.R
\name{shift_for_dividends}
\alias{shift_for_dividends}
\title{Shift a set of grid values for dividends paid, using spline interpolation}
\usage{
shift_for_dividends(grid_values_before_shift, stock_prices, div_sum)
}
\arguments{
\item{grid_values_before_shift}{Values on grid before accounting for
expected dividends}
\item{stock_prices}{Stock prices for which to shift the grid}
\item{div_sum}{Sum of dividend values at each grid point}
}
\value{
An object like \code{grid_values_before_shift} with entries shifted
according to the dividend sums
}
\description{
Shift a set of grid values for dividends paid, using spline interpolation
}
\seealso{
Other Dividends: \code{\link{adjust_for_dividends}},
\code{\link{time_adj_dividends}}
}
|
/man/shift_for_dividends.Rd
|
no_license
|
freephys/ragtop
|
R
| false | true | 833 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cashflows.R
\name{shift_for_dividends}
\alias{shift_for_dividends}
\title{Shift a set of grid values for dividends paid, using spline interpolation}
\usage{
shift_for_dividends(grid_values_before_shift, stock_prices, div_sum)
}
\arguments{
\item{grid_values_before_shift}{Values on grid before accounting for
expected dividends}
\item{stock_prices}{Stock prices for which to shift the grid}
\item{div_sum}{Sum of dividend values at each grid point}
}
\value{
An object like \code{grid_values_before_shift} with entries shifted
according to the dividend sums
}
\description{
Shift a set of grid values for dividends paid, using spline interpolation
}
\seealso{
Other Dividends: \code{\link{adjust_for_dividends}},
\code{\link{time_adj_dividends}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/motifComparision.r
\name{locHist2}
\alias{locHist2}
\title{locHist}
\usage{
locHist2(t1, x, xlab = "Histogram", limits = c(-32, 32))
}
\description{
locHist
}
|
/man/locHist2.Rd
|
permissive
|
alexjgriffith/CCCA
|
R
| false | true | 238 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/motifComparision.r
\name{locHist2}
\alias{locHist2}
\title{locHist}
\usage{
locHist2(t1, x, xlab = "Histogram", limits = c(-32, 32))
}
\description{
locHist
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadCLSAData.R
\name{loadCLSAData}
\alias{loadCLSAData}
\title{Load CLSA Comprehensive and Tracking Datasets}
\usage{
loadCLSAData(path)
}
\arguments{
\item{path}{The path to the folder containging the comprehensive and
tracking .csv files}
}
\value{
A list of the comprehensive and tracking CLSA datasets in the following order:
\enumerate{
\item comprehensive
\item tracking
}
}
\description{
Load the most recent comprehensive and tracking data sets. Note that
the .csv files must already be saved on your computer before using this
function. Make sure that the csv files containing the data are named
as follows:
\itemize{
\item comprehensive: cop3_2.csv
\item tracking: tra3_3.csv
}
}
\examples{
\dontrun{
dataList <- loadCLSAData(path = "C:/Users/Documents")
# tracking data set
cop3.2 <- dataList[[1]]
tra3.3 <- dataList[[2]]
}
}
\author{
Phil Boileau, \email{philippe.boileau (at) rimuhc.ca}
}
|
/man/loadCLSAData.Rd
|
permissive
|
gevamaimon/CLSAR
|
R
| false | true | 983 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadCLSAData.R
\name{loadCLSAData}
\alias{loadCLSAData}
\title{Load CLSA Comprehensive and Tracking Datasets}
\usage{
loadCLSAData(path)
}
\arguments{
\item{path}{The path to the folder containging the comprehensive and
tracking .csv files}
}
\value{
A list of the comprehensive and tracking CLSA datasets in the following order:
\enumerate{
\item comprehensive
\item tracking
}
}
\description{
Load the most recent comprehensive and tracking data sets. Note that
the .csv files must already be saved on your computer before using this
function. Make sure that the csv files containing the data are named
as follows:
\itemize{
\item comprehensive: cop3_2.csv
\item tracking: tra3_3.csv
}
}
\examples{
\dontrun{
dataList <- loadCLSAData(path = "C:/Users/Documents")
# tracking data set
cop3.2 <- dataList[[1]]
tra3.3 <- dataList[[2]]
}
}
\author{
Phil Boileau, \email{philippe.boileau (at) rimuhc.ca}
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{wmatrix}
\alias{wmatrix}
\title{Model variance-covariance matrix of the multinomial mixed models}
\usage{
wmatrix(M, pr)
}
\arguments{
\item{M}{vector with area sample sizes.}
\item{pr}{matrix with the estimated probabilities for the categories of the
response variable obtained from \code{\link[mme]{prmu}} or \code{\link[mme]{prmu.time}}.}
}
\value{
W a list with the model variance-covariance matrices for each domain.
}
\description{
This function calculates the variance-covariance matrix of the multinomial mixed models.
Three types of multinomial mixed model are considered. The first model (Model 1), with one
random effect in each category of the response variable; Model 2, introducing
independent time effect; Model 3, introducing correlated time effect.
}
\examples{
k=3 #number of categories of the response variable
pp=c(1,1) #vector with the number of auxiliary variables in each category
mod=2 #type of model
data(simdata2)
datar=data.mme(simdata2,k,pp,mod)
initial=datar$initial
mean=prmu.time(datar$n,datar$Xk,initial$beta.0,initial$u1.0,initial$u2.0)
##The model variance-covariance matrix
varcov=wmatrix(datar$n,mean$estimated.probabilities)
}
\references{
Lopez-Vizcaino, ME, Lombardia, MJ and Morales, D (2013).
Multinomial-based small area estimation of labour force indicators.
Statistical Modelling,13,153-178.
}
\seealso{
\code{\link[mme]{data.mme}}, \code{\link[mme]{initial.values}},
\code{\link[mme]{phi.mult}}, \code{\link[mme]{prmu}}, \code{\link[mme]{prmu.time}}
\code{\link[mme]{Fbetaf}}, \code{\link[mme]{phi.direct}},
\code{\link[mme]{sPhikf}}, \code{\link[mme]{ci}},
\code{\link[mme]{modelfit1}}, \code{\link[mme]{msef}},
\code{\link[mme]{mseb}}
}
\keyword{models}
|
/man/wmatrix.Rd
|
no_license
|
cran/mme
|
R
| false | false | 1,810 |
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{wmatrix}
\alias{wmatrix}
\title{Model variance-covariance matrix of the multinomial mixed models}
\usage{
wmatrix(M, pr)
}
\arguments{
\item{M}{vector with area sample sizes.}
\item{pr}{matrix with the estimated probabilities for the categories of the
response variable obtained from \code{\link[mme]{prmu}} or \code{\link[mme]{prmu.time}}.}
}
\value{
W a list with the model variance-covariance matrices for each domain.
}
\description{
This function calculates the variance-covariance matrix of the multinomial mixed models.
Three types of multinomial mixed model are considered. The first model (Model 1), with one
random effect in each category of the response variable; Model 2, introducing
independent time effect; Model 3, introducing correlated time effect.
}
\examples{
k=3 #number of categories of the response variable
pp=c(1,1) #vector with the number of auxiliary variables in each category
mod=2 #type of model
data(simdata2)
datar=data.mme(simdata2,k,pp,mod)
initial=datar$initial
mean=prmu.time(datar$n,datar$Xk,initial$beta.0,initial$u1.0,initial$u2.0)
##The model variance-covariance matrix
varcov=wmatrix(datar$n,mean$estimated.probabilities)
}
\references{
Lopez-Vizcaino, ME, Lombardia, MJ and Morales, D (2013).
Multinomial-based small area estimation of labour force indicators.
Statistical Modelling,13,153-178.
}
\seealso{
\code{\link[mme]{data.mme}}, \code{\link[mme]{initial.values}},
\code{\link[mme]{phi.mult}}, \code{\link[mme]{prmu}}, \code{\link[mme]{prmu.time}}
\code{\link[mme]{Fbetaf}}, \code{\link[mme]{phi.direct}},
\code{\link[mme]{sPhikf}}, \code{\link[mme]{ci}},
\code{\link[mme]{modelfit1}}, \code{\link[mme]{msef}},
\code{\link[mme]{mseb}}
}
\keyword{models}
|
#
# Author: Joyce Woznica
# Class: IST 719
# Date: 3/16/2020
# Subject: Lab 10, Week 10
#
# Lab 10
#---------------- Package Load -------------------
library(shiny)
server <- function(input, output)
{
output$myPie <- renderPlot(
{
pie (c(8,12,3), main = "Hello World")
})
}
ui <- fluidPage(
mainPanel(plotOutput("myPie"))
)
shinyApp(ui,server)
|
/Labs/Lab10/.Rproj.user/CA2B7130/sources/s-621F4385/D52BC42E-contents
|
no_license
|
jlwoznic/IST719
|
R
| false | false | 364 |
#
# Author: Joyce Woznica
# Class: IST 719
# Date: 3/16/2020
# Subject: Lab 10, Week 10
#
# Lab 10
#---------------- Package Load -------------------
library(shiny)
server <- function(input, output)
{
output$myPie <- renderPlot(
{
pie (c(8,12,3), main = "Hello World")
})
}
ui <- fluidPage(
mainPanel(plotOutput("myPie"))
)
shinyApp(ui,server)
|
|
options(stringsAsFactors=FALSE, width=400, max.print=10^6)
projpath <- getwd()
library(XML)
library(rjson)
library(rlist)
library(dynamicDensity)
xpath_mainData <- Sys.getenv("PATH_DZ_MAIN_DATA")
xpath_scrapedData <- file.path(xpath_mainData, "MLBjsons")
if(!dir.exists(xpath_scrapedData)) {
dir.create(xpath_scrapedData)
}
xpath_scrapedDataDays <- file.path(xpath_scrapedData, "MLBdays")
if(!dir.exists(xpath_scrapedDataDays)) {
dir.create(xpath_scrapedDataDays)
}
xpath_scrapedDataGames <- file.path(xpath_scrapedData, "MLBgames")
if(!dir.exists(xpath_scrapedDataGames)) {
dir.create(xpath_scrapedDataGames)
}
#######################
if(FALSE) {
xpatternFN <- "^game__2019"
xbigDFname <- "2019"
source( file.path(projpath, "_s_readData.R") )
}
xfn <- list.files(xpath_scrapedDataGames, pattern=xpatternFN)
length(xfn)
df_list <- list()
ii <- 1
for(ii in 1:length(xfn)) {
load( file.path(xpath_scrapedDataGames, xfn[ii]) )
xxx <- fromJSON( paste(xthis_game_data, collapse=" ") )
xgameDateInfo <- xxx[[ "gameData" ]][[ "datetime" ]]
xVT <- xxx[[ "gameData" ]][[ "teams" ]][[ "away" ]][[ "abbreviation" ]] ; xVT
xHT <- xxx[[ "gameData" ]][[ "teams" ]][[ "home" ]][[ "abbreviation" ]] ; xHT
xall_plays <- xxx[[ "liveData" ]][[ "plays" ]][[ "allPlays" ]]
length(xall_plays)
iiplay <- 1
if( length(xall_plays) > 0 ) {
for(iiplay in 1:length(xall_plays)) {
xthis_play <- xall_plays[[ iiplay ]]
xthis_pitcher <- xthis_play[[ "matchup" ]][[ "pitcher" ]]
xthis_play_events <- xthis_play[[ "playEvents" ]]
}
}
cat(ii, xfn[ii], "\n")
}
object.size(df_list)
|
/__06_week_01/__06b_go_MLB_02clean.R
|
no_license
|
davezes/MAS405_S2021
|
R
| false | false | 1,811 |
r
|
options(stringsAsFactors=FALSE, width=400, max.print=10^6)
projpath <- getwd()
library(XML)
library(rjson)
library(rlist)
library(dynamicDensity)
xpath_mainData <- Sys.getenv("PATH_DZ_MAIN_DATA")
xpath_scrapedData <- file.path(xpath_mainData, "MLBjsons")
if(!dir.exists(xpath_scrapedData)) {
dir.create(xpath_scrapedData)
}
xpath_scrapedDataDays <- file.path(xpath_scrapedData, "MLBdays")
if(!dir.exists(xpath_scrapedDataDays)) {
dir.create(xpath_scrapedDataDays)
}
xpath_scrapedDataGames <- file.path(xpath_scrapedData, "MLBgames")
if(!dir.exists(xpath_scrapedDataGames)) {
dir.create(xpath_scrapedDataGames)
}
#######################
if(FALSE) {
xpatternFN <- "^game__2019"
xbigDFname <- "2019"
source( file.path(projpath, "_s_readData.R") )
}
xfn <- list.files(xpath_scrapedDataGames, pattern=xpatternFN)
length(xfn)
df_list <- list()
ii <- 1
for(ii in 1:length(xfn)) {
load( file.path(xpath_scrapedDataGames, xfn[ii]) )
xxx <- fromJSON( paste(xthis_game_data, collapse=" ") )
xgameDateInfo <- xxx[[ "gameData" ]][[ "datetime" ]]
xVT <- xxx[[ "gameData" ]][[ "teams" ]][[ "away" ]][[ "abbreviation" ]] ; xVT
xHT <- xxx[[ "gameData" ]][[ "teams" ]][[ "home" ]][[ "abbreviation" ]] ; xHT
xall_plays <- xxx[[ "liveData" ]][[ "plays" ]][[ "allPlays" ]]
length(xall_plays)
iiplay <- 1
if( length(xall_plays) > 0 ) {
for(iiplay in 1:length(xall_plays)) {
xthis_play <- xall_plays[[ iiplay ]]
xthis_pitcher <- xthis_play[[ "matchup" ]][[ "pitcher" ]]
xthis_play_events <- xthis_play[[ "playEvents" ]]
}
}
cat(ii, xfn[ii], "\n")
}
object.size(df_list)
|
#' airqualityES: Air quality measurements in Spain
#'
#' aiqualityES dataset contains daily quality air measurements from 2001 to 2018.
#'
#' @author Jose V. Die \email{jose.die@uco.es}, Jose R. Caro
#'
#' @docType package
#'
#' @name airqualityES
#'
"_PACKAGE"
|
/R/airquaityES.R
|
no_license
|
rOpenSpain/airqualityES
|
R
| false | false | 262 |
r
|
#' airqualityES: Air quality measurements in Spain
#'
#' aiqualityES dataset contains daily quality air measurements from 2001 to 2018.
#'
#' @author Jose V. Die \email{jose.die@uco.es}, Jose R. Caro
#'
#' @docType package
#'
#' @name airqualityES
#'
"_PACKAGE"
|
slr.predict <-
function (y, x, newx=x, polydeg=1, interval,
conf.level=95, no.intercept=FALSE, ndigit=2)
{
if (!is.vector(y)) return("First Argument has to be Numeric Vector")
if(!is.vector(x)) return("Second Argument has to be Numeric Vector")
namResp <- deparse(substitute(y))
namPred <- deparse(substitute(x))
z<-newx
if(polydeg>1) {
X<-matrix(0,nrow=length(x),ncol=polydeg)
for(i in 1:polydeg) X[,i]<-x^i
x<-X
newX<-matrix(0,nrow=length(newx),ncol=polydeg)
for(i in 1:polydeg) newX[,i]<-newx^i
newx<-newX
}
if(no.intercept) fit<-lm(y~x-1)
else fit<-lm(y~x)
if(missing(interval)) est <- predict(fit,newdata=list(x=newx))
else {
if(interval=="PI")
a <- predict(fit,newdata=list(x=newx),interval = "prediction",level=conf.level/100)
if(interval=="CI")
a <- predict(fit,newdata=list(x=newx),interval = "confidence",level=conf.level/100)
if(length(newx)==1) {
est <- a[1]
L <-a[2]
U<-a[3]
}
else {
est <- a[,1]
L <-a[,2]
U<-a[,3]
}
}
txt1 <- matrix(0,length(est), 2+ifelse(missing(interval),0,2))
if(missing(interval)) colnames(txt1) <- c(namPred,"Fit")
else colnames(txt1)<-c(namPred, "Fit", "Lower", "Upper")
rownames(txt1) <- rep("",nrow(txt1))
for(i in 1:length(z)) {
txt1[i,1:2]<-c(z[i],round(est[i],ndigit))
if(!missing(interval))
txt1[i,3:4]<-c(round(L[i], ndigit), round(U[i], ndigit))
}
txt1
}
|
/R/slr.predict.R
|
no_license
|
WolfgangRolke/Resma3
|
R
| false | false | 1,655 |
r
|
slr.predict <-
function (y, x, newx=x, polydeg=1, interval,
conf.level=95, no.intercept=FALSE, ndigit=2)
{
if (!is.vector(y)) return("First Argument has to be Numeric Vector")
if(!is.vector(x)) return("Second Argument has to be Numeric Vector")
namResp <- deparse(substitute(y))
namPred <- deparse(substitute(x))
z<-newx
if(polydeg>1) {
X<-matrix(0,nrow=length(x),ncol=polydeg)
for(i in 1:polydeg) X[,i]<-x^i
x<-X
newX<-matrix(0,nrow=length(newx),ncol=polydeg)
for(i in 1:polydeg) newX[,i]<-newx^i
newx<-newX
}
if(no.intercept) fit<-lm(y~x-1)
else fit<-lm(y~x)
if(missing(interval)) est <- predict(fit,newdata=list(x=newx))
else {
if(interval=="PI")
a <- predict(fit,newdata=list(x=newx),interval = "prediction",level=conf.level/100)
if(interval=="CI")
a <- predict(fit,newdata=list(x=newx),interval = "confidence",level=conf.level/100)
if(length(newx)==1) {
est <- a[1]
L <-a[2]
U<-a[3]
}
else {
est <- a[,1]
L <-a[,2]
U<-a[,3]
}
}
txt1 <- matrix(0,length(est), 2+ifelse(missing(interval),0,2))
if(missing(interval)) colnames(txt1) <- c(namPred,"Fit")
else colnames(txt1)<-c(namPred, "Fit", "Lower", "Upper")
rownames(txt1) <- rep("",nrow(txt1))
for(i in 1:length(z)) {
txt1[i,1:2]<-c(z[i],round(est[i],ndigit))
if(!missing(interval))
txt1[i,3:4]<-c(round(L[i], ndigit), round(U[i], ndigit))
}
txt1
}
|
#' Trend analysis for single-cases data
#'
#' The `trend()` function provides an overview of linear trends in single case
#' data. By default, it provides the intercept and slope of a linear and
#' quadratic regression of measurement time on scores. Models are calculated
#' separately for each phase and across all phases. For more advanced use, you
#' can add regression models using the R-specific formula class.
#'
#' @inheritParams .inheritParams
#' @param first_mt A numeric setting the value for the first measurement-time.
#' Default = 0.
#' @param offset (Deprecated. Please use first_mt). An offset for the first
#' measurement-time of each phase. If `offset = 0`, the phase measurement is
#' handled as MT 1. Default is `offset = -1`, setting the first value of MT to
#' 0.
#' @param model A string or a list of (named) strings each depicting one
#' regression model. This is a formula expression of the standard R class. The
#' parameters of the model are `values`, `mt` and `phase`.
#' @return \item{trend}{A matrix containing the results (Intercept, B and beta)
#' of separate regression models for phase A, phase B, and the whole data.}
#' \item{offset}{Numeric argument from function call (see arguments
#' section).}
#' @author Juergen Wilbert
#' @seealso [describe()]
#' @family regression functions
#' @examples
#'
#' ## Compute the linear and squared regression for a random single-case
#' design <- design(slope = 0.5)
#' matthea <- random_scdf(design)
#' trend(matthea)
#'
#' ## Besides the linear and squared regression models compute two custom models:
#' ## a) a cubic model, and b) the values predicted by the natural logarithm of the
#' ## measurement time.
#' design <- design(slope = 0.3)
#' ben <- random_scdf(design)
#' trend(ben, offset = 0, model = c("Cubic" = values ~ I(mt^3), "Log Time" = values ~ log(mt)))
#'
#' @export
trend <- function(data, dvar, pvar, mvar,
offset = "deprecated",
first_mt = 0,
model = NULL) {
if (is.numeric(offset)) first_mt <- offset + 1
# set attributes to arguments else set to defaults of scdf
if (missing(dvar)) dvar <- dv(data) else dv(data) <- dvar
if (missing(pvar)) pvar <- phase(data) else phase(data) <- pvar
if (missing(mvar)) mvar <- mt(data) else mt(data) <- mvar
data <- .prepare_scdf(data)
phase <- NULL
N <- length(data)
if(N > 1) {
stop("Multiple single-cases are given. ",
"Calculations can only be applied to one single-case data set.\n")
}
data <- data[[1]]
design <- rle(as.character(data[, pvar]))$values
while(any(duplicated(design))) {
design[anyDuplicated(design)] <- paste0(
design[anyDuplicated(design)],
".phase",
anyDuplicated(design)
)
}
phases <- .phasestructure(data, pvar = pvar)
fomulas <- c(
formula(paste0(dvar, " ~ ", mvar)) ,
formula(paste0(dvar, " ~ I(", mvar, "^2)"))
)
fomulas_names <- c("Linear", "Quadratic")
if(!is.null(model)) {
fomulas <- c(fomulas, model)
fomulas_names <- c(fomulas_names, names(model))
}
tmp <- length(design) + 1
rows <- paste0(paste0(rep(fomulas_names, each = tmp), "."), c("ALL", design))
ma <- matrix(NA, nrow = length(rows), ncol = 3)
row.names(ma) <- rows
colnames(ma) <- c("Intercept", "B", "Beta")
ma <- as.data.frame(ma)
for(i_formula in 1:length(fomulas)) {
data_phase <- data
mvar_correction <- min(data_phase[[mvar]], na.rm = TRUE) - first_mt
data_phase[[mvar]] <- data_phase[[mvar]] - mvar_correction
.row <- which(rows == paste0(fomulas_names[i_formula], ".ALL"))
ma[.row, 1:3] <- .beta_weights(lm(fomulas[[i_formula]], data = data_phase))
for(p in 1:length(design)) {
data_phase <- data[phases$start[p]:phases$stop[p], ]
mvar_correction <- min(data_phase[[mvar]], na.rm = TRUE) - first_mt
data_phase[[mvar]] <- data_phase[[mvar]] - mvar_correction
.row <- which(rows == paste0(fomulas_names[i_formula], ".", design[p]))
ma[.row, 1:3] <- .beta_weights(lm(fomulas[[i_formula]], data=data_phase))
}
}
out <- list(
trend = ma,
offset = offset,
first_mt = first_mt,
formulas = fomulas_names,
design = design
)
class(out) <- c("sc_trend")
attr(out, opt("phase")) <- pvar
attr(out, opt("mt")) <- mvar
attr(out, opt("dv")) <- dvar
out
}
|
/R/trend.R
|
no_license
|
cran/scan
|
R
| false | false | 4,395 |
r
|
#' Trend analysis for single-cases data
#'
#' The `trend()` function provides an overview of linear trends in single case
#' data. By default, it provides the intercept and slope of a linear and
#' quadratic regression of measurement time on scores. Models are calculated
#' separately for each phase and across all phases. For more advanced use, you
#' can add regression models using the R-specific formula class.
#'
#' @inheritParams .inheritParams
#' @param first_mt A numeric setting the value for the first measurement-time.
#' Default = 0.
#' @param offset (Deprecated. Please use first_mt). An offset for the first
#' measurement-time of each phase. If `offset = 0`, the phase measurement is
#' handled as MT 1. Default is `offset = -1`, setting the first value of MT to
#' 0.
#' @param model A string or a list of (named) strings each depicting one
#' regression model. This is a formula expression of the standard R class. The
#' parameters of the model are `values`, `mt` and `phase`.
#' @return \item{trend}{A matrix containing the results (Intercept, B and beta)
#' of separate regression models for phase A, phase B, and the whole data.}
#' \item{offset}{Numeric argument from function call (see arguments
#' section).}
#' @author Juergen Wilbert
#' @seealso [describe()]
#' @family regression functions
#' @examples
#'
#' ## Compute the linear and squared regression for a random single-case
#' design <- design(slope = 0.5)
#' matthea <- random_scdf(design)
#' trend(matthea)
#'
#' ## Besides the linear and squared regression models compute two custom models:
#' ## a) a cubic model, and b) the values predicted by the natural logarithm of the
#' ## measurement time.
#' design <- design(slope = 0.3)
#' ben <- random_scdf(design)
#' trend(ben, offset = 0, model = c("Cubic" = values ~ I(mt^3), "Log Time" = values ~ log(mt)))
#'
#' @export
trend <- function(data, dvar, pvar, mvar,
offset = "deprecated",
first_mt = 0,
model = NULL) {
if (is.numeric(offset)) first_mt <- offset + 1
# set attributes to arguments else set to defaults of scdf
if (missing(dvar)) dvar <- dv(data) else dv(data) <- dvar
if (missing(pvar)) pvar <- phase(data) else phase(data) <- pvar
if (missing(mvar)) mvar <- mt(data) else mt(data) <- mvar
data <- .prepare_scdf(data)
phase <- NULL
N <- length(data)
if(N > 1) {
stop("Multiple single-cases are given. ",
"Calculations can only be applied to one single-case data set.\n")
}
data <- data[[1]]
design <- rle(as.character(data[, pvar]))$values
while(any(duplicated(design))) {
design[anyDuplicated(design)] <- paste0(
design[anyDuplicated(design)],
".phase",
anyDuplicated(design)
)
}
phases <- .phasestructure(data, pvar = pvar)
fomulas <- c(
formula(paste0(dvar, " ~ ", mvar)) ,
formula(paste0(dvar, " ~ I(", mvar, "^2)"))
)
fomulas_names <- c("Linear", "Quadratic")
if(!is.null(model)) {
fomulas <- c(fomulas, model)
fomulas_names <- c(fomulas_names, names(model))
}
tmp <- length(design) + 1
rows <- paste0(paste0(rep(fomulas_names, each = tmp), "."), c("ALL", design))
ma <- matrix(NA, nrow = length(rows), ncol = 3)
row.names(ma) <- rows
colnames(ma) <- c("Intercept", "B", "Beta")
ma <- as.data.frame(ma)
for(i_formula in 1:length(fomulas)) {
data_phase <- data
mvar_correction <- min(data_phase[[mvar]], na.rm = TRUE) - first_mt
data_phase[[mvar]] <- data_phase[[mvar]] - mvar_correction
.row <- which(rows == paste0(fomulas_names[i_formula], ".ALL"))
ma[.row, 1:3] <- .beta_weights(lm(fomulas[[i_formula]], data = data_phase))
for(p in 1:length(design)) {
data_phase <- data[phases$start[p]:phases$stop[p], ]
mvar_correction <- min(data_phase[[mvar]], na.rm = TRUE) - first_mt
data_phase[[mvar]] <- data_phase[[mvar]] - mvar_correction
.row <- which(rows == paste0(fomulas_names[i_formula], ".", design[p]))
ma[.row, 1:3] <- .beta_weights(lm(fomulas[[i_formula]], data=data_phase))
}
}
out <- list(
trend = ma,
offset = offset,
first_mt = first_mt,
formulas = fomulas_names,
design = design
)
class(out) <- c("sc_trend")
attr(out, opt("phase")) <- pvar
attr(out, opt("mt")) <- mvar
attr(out, opt("dv")) <- dvar
out
}
|
###############################################################################
################## FIGURE 5: LEARNING TRIAL LOOKING IN EXP 1 ##################
###############################################################################
quartz(width=4,height=3,title = "Learning")
p <- ggplot(train.data.e1.notd, aes(x=age.grp, y=prop,colour=trial.type,
group=trial.type)) +
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.1),
size=.8)+
geom_line() +
scale_x_continuous(limits = c(.9,4.3), breaks=seq(1,3.5,.5),
name = "Age(years)",
labels = c("1", "1.5", "2","2.5","3","3.5")) +
scale_y_continuous(limits = c(0,1), breaks=seq(0,1,.2),
name = "Prop. Looks to ROI") +
theme_bw(base_size=12) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
geom_dl(aes(label=trial.type),
method=list(dl.trans(x=x +.2),"last.qp",cex=.8)) +
scale_color_manual(values=man_cols,breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
quartz(width=12,height=3,title = "Learning")
p <- ggplot(filter(train.data.subj.trial ,aoi != "TD",exp == "Balanced",
age.grp < 4),
aes(x=age.grp, y=na.mean,colour=aoi)) +
facet_grid(. ~ trial.num) +
geom_pointrange(aes(ymin = na.mean-ci.low,
ymax = na.mean+ci.high),
position = position_dodge(.1),
size=.8)+
geom_line() +
scale_x_continuous(limits = c(.9,4.4), breaks=seq(1,3.5,.5),
name = "Age(years)",
labels = c("1", "1.5", "2","2.5","3","3.5")) +
scale_y_continuous(limits = c(0,1), breaks=seq(0,1,.2),
name = "Prop. Looks to ROI") +
theme_bw(base_size=12) +
#theme(panel.grid.major = element_blank()) +
geom_dl(aes(label=aoi),
method=list(dl.trans(x=x +.2),"last.qp",cex=.8)) +
scale_color_manual(values=man_cols,breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
###############################################################################
################# FIGURE 6: LEARNING AND TEST PROPS. IN EXP 1 #################
###############################################################################
quartz(width=10,height=4.5,title = "Test Data")
p <- ggplot(preflook.data.e1,
aes(x=age.grp, y=prop,colour=trial.type))+
facet_grid(. ~ trial.type) +
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.3),
size=.8)+
geom_hline(aes(yintercept=.5),lty=2) +
geom_line(aes(group=trial.type)) +
scale_x_continuous(breaks=c(1,1.5,2,2.5,3,3.5),
name = "Age(years)",
labels = c("1","1.5","2","2.5","3","3.5")) +
scale_y_continuous(limits = c(.4,1), breaks=seq(.4,1,.1),
name = "Prop. Looks to Target vs. Competitor") +
theme_bw(base_size=18) +
theme(legend.position=c(.95,.6),legend.title=element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values=man_cols,breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
quartz(width=10,height=4.5,title = "Test Data")
p <- ggplot(filter(preflook.data.e1,trial.type != "Learning"),
aes(x=age.grp, y=prop,colour=trial.type))+
facet_grid(. ~ trial.type) +
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.3),
size=.8)+
geom_hline(aes(yintercept=.5),lty=2) +
geom_line(aes(group=trial.type)) +
scale_x_continuous(breaks=c(1,1.5,2,2.5,3,3.5),
name = "Age(years)",
labels = c("1","1.5","2","2.5","3","3.5")) +
scale_y_continuous(limits = c(.4,1), breaks=seq(.4,1,.1),
name = "Prop. Looks to Target vs. Competitor") +
theme_bw(base_size=18) +
theme(legend.position=c(.95,.6),legend.title=element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values=man_cols,breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
quartz(width=4,height=4,title = "Test Data")
p <- ggplot(filter(preflook.data.e1,trial.type=="Novel"),
aes(x=age.grp, y=prop,colour=trial.type))+
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.3),
size=1.25)+
geom_hline(aes(yintercept=.5),lty=2) +
geom_line(aes(group=trial.type),size=1.25) +
scale_x_continuous(breaks=c(1,1.5,2,2.5,3,3.5),
name = "Age(years)",
labels = c("1","1.5","2","2.5","3","3.5")) +
scale_y_continuous(limits = c(.4,1), breaks=seq(.4,1,.1),
name = "Prop. Looks to Target") +
theme_bw(base_size=16) +
theme(legend.position=c(.95,.6),legend.title=element_blank()) +
scale_color_manual(values=man_cols[2],breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
###############################################################################
############## FIGURE 10: COMPARING SALIENCE AT LEARNING AND TEST #############
###############################################################################
quartz(width=10,height=3.5,title = "Test Data")
p <- ggplot(preflook.data.e1and2,
aes(x=age.grp, y=prop,colour=exp, lty=exp))+
facet_grid(. ~ trial.type) +
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.1),
size=.8)+
geom_hline(aes(yintercept=.5),lty=2) +
geom_line(aes(group=exp)) +
scale_x_continuous(limits = c(.9,2.5), breaks=c(1,1.5,2),name = "Age(years)",
labels = c("1", "1.5", "2")) +
scale_y_continuous(limits = c(.25,1), breaks=seq(.3,1,.1),
name = "Prop. Looks to Target") +
theme_bw(base_size=14) +
theme(legend.position="none",
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
geom_dl(aes(label=exp),method=list(dl.trans(x=x +.3),"last.qp",cex=1)) +
scale_color_manual(values=man_cols,breaks=c("2","1.5","1"))
print(p)
|
/analysis/journal/dot_graphs.R
|
no_license
|
dyurovsky/ATT-WORD
|
R
| false | false | 6,322 |
r
|
###############################################################################
################## FIGURE 5: LEARNING TRIAL LOOKING IN EXP 1 ##################
###############################################################################
quartz(width=4,height=3,title = "Learning")
p <- ggplot(train.data.e1.notd, aes(x=age.grp, y=prop,colour=trial.type,
group=trial.type)) +
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.1),
size=.8)+
geom_line() +
scale_x_continuous(limits = c(.9,4.3), breaks=seq(1,3.5,.5),
name = "Age(years)",
labels = c("1", "1.5", "2","2.5","3","3.5")) +
scale_y_continuous(limits = c(0,1), breaks=seq(0,1,.2),
name = "Prop. Looks to ROI") +
theme_bw(base_size=12) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
geom_dl(aes(label=trial.type),
method=list(dl.trans(x=x +.2),"last.qp",cex=.8)) +
scale_color_manual(values=man_cols,breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
quartz(width=12,height=3,title = "Learning")
p <- ggplot(filter(train.data.subj.trial ,aoi != "TD",exp == "Balanced",
age.grp < 4),
aes(x=age.grp, y=na.mean,colour=aoi)) +
facet_grid(. ~ trial.num) +
geom_pointrange(aes(ymin = na.mean-ci.low,
ymax = na.mean+ci.high),
position = position_dodge(.1),
size=.8)+
geom_line() +
scale_x_continuous(limits = c(.9,4.4), breaks=seq(1,3.5,.5),
name = "Age(years)",
labels = c("1", "1.5", "2","2.5","3","3.5")) +
scale_y_continuous(limits = c(0,1), breaks=seq(0,1,.2),
name = "Prop. Looks to ROI") +
theme_bw(base_size=12) +
#theme(panel.grid.major = element_blank()) +
geom_dl(aes(label=aoi),
method=list(dl.trans(x=x +.2),"last.qp",cex=.8)) +
scale_color_manual(values=man_cols,breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
###############################################################################
################# FIGURE 6: LEARNING AND TEST PROPS. IN EXP 1 #################
###############################################################################
quartz(width=10,height=4.5,title = "Test Data")
p <- ggplot(preflook.data.e1,
aes(x=age.grp, y=prop,colour=trial.type))+
facet_grid(. ~ trial.type) +
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.3),
size=.8)+
geom_hline(aes(yintercept=.5),lty=2) +
geom_line(aes(group=trial.type)) +
scale_x_continuous(breaks=c(1,1.5,2,2.5,3,3.5),
name = "Age(years)",
labels = c("1","1.5","2","2.5","3","3.5")) +
scale_y_continuous(limits = c(.4,1), breaks=seq(.4,1,.1),
name = "Prop. Looks to Target vs. Competitor") +
theme_bw(base_size=18) +
theme(legend.position=c(.95,.6),legend.title=element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values=man_cols,breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
quartz(width=10,height=4.5,title = "Test Data")
p <- ggplot(filter(preflook.data.e1,trial.type != "Learning"),
aes(x=age.grp, y=prop,colour=trial.type))+
facet_grid(. ~ trial.type) +
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.3),
size=.8)+
geom_hline(aes(yintercept=.5),lty=2) +
geom_line(aes(group=trial.type)) +
scale_x_continuous(breaks=c(1,1.5,2,2.5,3,3.5),
name = "Age(years)",
labels = c("1","1.5","2","2.5","3","3.5")) +
scale_y_continuous(limits = c(.4,1), breaks=seq(.4,1,.1),
name = "Prop. Looks to Target vs. Competitor") +
theme_bw(base_size=18) +
theme(legend.position=c(.95,.6),legend.title=element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values=man_cols,breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
quartz(width=4,height=4,title = "Test Data")
p <- ggplot(filter(preflook.data.e1,trial.type=="Novel"),
aes(x=age.grp, y=prop,colour=trial.type))+
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.3),
size=1.25)+
geom_hline(aes(yintercept=.5),lty=2) +
geom_line(aes(group=trial.type),size=1.25) +
scale_x_continuous(breaks=c(1,1.5,2,2.5,3,3.5),
name = "Age(years)",
labels = c("1","1.5","2","2.5","3","3.5")) +
scale_y_continuous(limits = c(.4,1), breaks=seq(.4,1,.1),
name = "Prop. Looks to Target") +
theme_bw(base_size=16) +
theme(legend.position=c(.95,.6),legend.title=element_blank()) +
scale_color_manual(values=man_cols[2],breaks=c("3.5","3","2.5","2","1.5","1"))
print(p)
###############################################################################
############## FIGURE 10: COMPARING SALIENCE AT LEARNING AND TEST #############
###############################################################################
quartz(width=10,height=3.5,title = "Test Data")
p <- ggplot(preflook.data.e1and2,
aes(x=age.grp, y=prop,colour=exp, lty=exp))+
facet_grid(. ~ trial.type) +
geom_pointrange(aes(ymin = prop-cih,
ymax = prop+cih),
position = position_dodge(.1),
size=.8)+
geom_hline(aes(yintercept=.5),lty=2) +
geom_line(aes(group=exp)) +
scale_x_continuous(limits = c(.9,2.5), breaks=c(1,1.5,2),name = "Age(years)",
labels = c("1", "1.5", "2")) +
scale_y_continuous(limits = c(.25,1), breaks=seq(.3,1,.1),
name = "Prop. Looks to Target") +
theme_bw(base_size=14) +
theme(legend.position="none",
panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
geom_dl(aes(label=exp),method=list(dl.trans(x=x +.3),"last.qp",cex=1)) +
scale_color_manual(values=man_cols,breaks=c("2","1.5","1"))
print(p)
|
library(archetypes)
library(lattice)
library(doParallel)
library(ggplot2)
library(dplyr)
registerDoParallel()
getDoParWorkers()
library(readr)
library(reshape2)
a<-"~/Archetypes/AfSIS"
#Read mir data
mir <- read_csv("~/Dropbox/AfSIS_reporting_data/Seperated_datasets/Calibration_Htsxt_MIR.csv")
#Read chem data
chem<-read_csv("~/Dropbox/AfSIS_reporting_data/Seperated_datasets/AfSIS_reference_data.csv")
setwd(a)
set.seed(8970)
#get number of mir columns
n<-ncol(mir)
#fit archetypes
a <- stepArchetypes (mir[,-c(1,n)],k=1:10,verbose = TRUE, nrep=1)
#fit robust archetype
#ra <- robustArchetypes (mir[,-1],k=1:17,verbose = TRUE)
png(file="Scree plots.png",height=500,width=800)
screeplot(a)
dev.off()
#According to elbow criterion k = 3 or maybe k =6 or 8 are the best number of archetypes
#Corresponding to Occam'srazor we use 3 archetypes;
a3 <- bestModel(a[[3]])
#Transpose the four archetypes for better readability
param<-t(parameters(a3))
#Store the parameters
write.table(param, file="Parameters.csv",row.names=FALSE)
#atypes <- apply(coef(a3, "alphas"), 2, which.max)
#Show simplex plot
par(mfrow=c(1,1))
png(file="Simplexplot3.png",height=500,width=1200)
simplexplot(a3, show_direction = FALSE, show_points =TRUE,radius=400,points_col="grey")
dev.off()
#Determine the archetypes
SSN<-as.vector(mir[,1])
arch.grps <- as.data.frame(cbind(SSN,paste0("Archetype.",apply(predict(a3,mir[,-c(1,n)]),1,which.max))))
colnames(arch.grps) <- c("SSN","archetypes" )
#Use barplot in relation to the original data:
png(file="Archetype_barplot3.png",height=500,width=1200)
barplot(a3, mir[,-c(1,n)], percentiles = FALSE)
dev.off()
#Or use the original raw spectra to show peaks
mir.arch<-merge(arch.grps,mir)
wave<-as.numeric(substr(colnames(mir.arch[,-c(1:2)]),2,19))
colnames(mir.arch) <- c("SSN","archetypes",wave)
spec.melt<-melt(mir.arch,id=c("SSN","archetypes"))
#By spectra
p<-ggplot(data=spec.melt, aes(x=as.numeric(as.vector(variable)),y=value,group=SSN))+
geom_line(size=0.34,aes(col=as.numeric(variable)))+scale_colour_gradient(high="red",low="blue")+
ggtitle("Archetypes for AfSIS reference set raw MIR spectra")+
xlim(c(4000,600))+
ylim(c(0,3))+
xlab(expression("Wavenumbers cm"^-1))+
ylab("Absorbance")+
#theme with white background
theme_bw() +
#eliminates background, gridlines, and chart border
theme(plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
)
p<-p+theme(legend.position = "none")
p<-p+facet_wrap(~archetypes,ncol=1)
png(file="Archetypes raw spectra.png")
p
dev.off()
#Aggregate
mir0<-mir.arch[,-1]
ag<-aggregate(.~archetypes,data=mir0,mean)
ag.melt<-melt(ag,id="archetypes")
p<-ggplot(data=ag.melt,aes(x=as.numeric(as.vector(variable)),y=value,color=archetypes)) + geom_line()+
ggtitle("Averaged raw MIR spectra by archetype")+
xlim(c(4000,600))+
ylim(c(0,2))+
xlab(expression("Wavenumbers cm"^-1))+
ylab("Absorbance")+
#theme with white background
theme_bw() +
#eliminates background, gridlines, and chart border
theme(plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
)
p<-p+theme(legend.justification=c(0,1),legend.position = c(0,1))
png(file="Archetypes Averaged raw spectra.png")
p
dev.off()
#Merge arch.grps with chem data
arch<-merge(chem, arch.grps)
arch.s<-with(arch,table(paste(Country,Site,sep="."),archetypes))
#View somw exploratory plots showing distribution of selected soil properties across the obtained archetypes
with(arch,bwplot(m3.Al~archetypes))
with(arch,bwplot(m3.Ca~archetypes))
with(arch,bwplot(ExAc~archetypes))
with(arch,bwplot(Na~archetypes))
with(arch,bwplot(psa.c4sand~archetypes))
with(arch,bwplot(pH~archetypes))
with(arch,bwplot(Total.Carbon~archetypes))
with(arch,bwplot(psa.c4clay~archetypes))
#which sites dorminate archetype 3
q<-which(arch.s[,3]>29)
q
#Save the spectral archetypes
write.table(arch.grps, file="AfSIS spec archtypes classes3.csv", sep=",",row.names=FALSE)
|
/afsis_spectral_archetype_analysis.R
|
no_license
|
asila/Soil_Archetypes
|
R
| false | false | 4,135 |
r
|
library(archetypes)
library(lattice)
library(doParallel)
library(ggplot2)
library(dplyr)
registerDoParallel()
getDoParWorkers()
library(readr)
library(reshape2)
a<-"~/Archetypes/AfSIS"
#Read mir data
mir <- read_csv("~/Dropbox/AfSIS_reporting_data/Seperated_datasets/Calibration_Htsxt_MIR.csv")
#Read chem data
chem<-read_csv("~/Dropbox/AfSIS_reporting_data/Seperated_datasets/AfSIS_reference_data.csv")
setwd(a)
set.seed(8970)
#get number of mir columns
n<-ncol(mir)
#fit archetypes
a <- stepArchetypes (mir[,-c(1,n)],k=1:10,verbose = TRUE, nrep=1)
#fit robust archetype
#ra <- robustArchetypes (mir[,-1],k=1:17,verbose = TRUE)
png(file="Scree plots.png",height=500,width=800)
screeplot(a)
dev.off()
#According to elbow criterion k = 3 or maybe k =6 or 8 are the best number of archetypes
#Corresponding to Occam'srazor we use 3 archetypes;
a3 <- bestModel(a[[3]])
#Transpose the four archetypes for better readability
param<-t(parameters(a3))
#Store the parameters
write.table(param, file="Parameters.csv",row.names=FALSE)
#atypes <- apply(coef(a3, "alphas"), 2, which.max)
#Show simplex plot
par(mfrow=c(1,1))
png(file="Simplexplot3.png",height=500,width=1200)
simplexplot(a3, show_direction = FALSE, show_points =TRUE,radius=400,points_col="grey")
dev.off()
#Determine the archetypes
SSN<-as.vector(mir[,1])
arch.grps <- as.data.frame(cbind(SSN,paste0("Archetype.",apply(predict(a3,mir[,-c(1,n)]),1,which.max))))
colnames(arch.grps) <- c("SSN","archetypes" )
#Use barplot in relation to the original data:
png(file="Archetype_barplot3.png",height=500,width=1200)
barplot(a3, mir[,-c(1,n)], percentiles = FALSE)
dev.off()
#Or use the original raw spectra to show peaks
mir.arch<-merge(arch.grps,mir)
wave<-as.numeric(substr(colnames(mir.arch[,-c(1:2)]),2,19))
colnames(mir.arch) <- c("SSN","archetypes",wave)
spec.melt<-melt(mir.arch,id=c("SSN","archetypes"))
#By spectra
p<-ggplot(data=spec.melt, aes(x=as.numeric(as.vector(variable)),y=value,group=SSN))+
geom_line(size=0.34,aes(col=as.numeric(variable)))+scale_colour_gradient(high="red",low="blue")+
ggtitle("Archetypes for AfSIS reference set raw MIR spectra")+
xlim(c(4000,600))+
ylim(c(0,3))+
xlab(expression("Wavenumbers cm"^-1))+
ylab("Absorbance")+
#theme with white background
theme_bw() +
#eliminates background, gridlines, and chart border
theme(plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
)
p<-p+theme(legend.position = "none")
p<-p+facet_wrap(~archetypes,ncol=1)
png(file="Archetypes raw spectra.png")
p
dev.off()
#Aggregate
mir0<-mir.arch[,-1]
ag<-aggregate(.~archetypes,data=mir0,mean)
ag.melt<-melt(ag,id="archetypes")
p<-ggplot(data=ag.melt,aes(x=as.numeric(as.vector(variable)),y=value,color=archetypes)) + geom_line()+
ggtitle("Averaged raw MIR spectra by archetype")+
xlim(c(4000,600))+
ylim(c(0,2))+
xlab(expression("Wavenumbers cm"^-1))+
ylab("Absorbance")+
#theme with white background
theme_bw() +
#eliminates background, gridlines, and chart border
theme(plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
)
p<-p+theme(legend.justification=c(0,1),legend.position = c(0,1))
png(file="Archetypes Averaged raw spectra.png")
p
dev.off()
#Merge arch.grps with chem data
arch<-merge(chem, arch.grps)
arch.s<-with(arch,table(paste(Country,Site,sep="."),archetypes))
#View somw exploratory plots showing distribution of selected soil properties across the obtained archetypes
with(arch,bwplot(m3.Al~archetypes))
with(arch,bwplot(m3.Ca~archetypes))
with(arch,bwplot(ExAc~archetypes))
with(arch,bwplot(Na~archetypes))
with(arch,bwplot(psa.c4sand~archetypes))
with(arch,bwplot(pH~archetypes))
with(arch,bwplot(Total.Carbon~archetypes))
with(arch,bwplot(psa.c4clay~archetypes))
#which sites dorminate archetype 3
q<-which(arch.s[,3]>29)
q
#Save the spectral archetypes
write.table(arch.grps, file="AfSIS spec archtypes classes3.csv", sep=",",row.names=FALSE)
|
library(pheatmap)
library(DESeq2)
##assumes environment has res and countData loaded from short_read_deseq.R
genes <- rownames(resOrdered[1:30, ])
countData_filtered <- countData[match(genes, rownames(countData)),]
rownames(countData_filtered) <- (str_split_fixed(rownames(countData_filtered),'\\|',2)[, 2])
#view(countData_filtered)
pheatmap(countData_filtered, show_rownames=T,
cluster_cols=T,
cluster_rows=T,
scale="row",
clustering_distance_rows="euclidean",
clustering_distance_cols="euclidean",
clustering_method="complete",
border_color=FALSE,
cex=0.7)
|
/short_read_code/heatmap.R
|
no_license
|
qhauck16/Quinn-Summer-2020
|
R
| false | false | 622 |
r
|
library(pheatmap)
library(DESeq2)
##assumes environment has res and countData loaded from short_read_deseq.R
genes <- rownames(resOrdered[1:30, ])
countData_filtered <- countData[match(genes, rownames(countData)),]
rownames(countData_filtered) <- (str_split_fixed(rownames(countData_filtered),'\\|',2)[, 2])
#view(countData_filtered)
pheatmap(countData_filtered, show_rownames=T,
cluster_cols=T,
cluster_rows=T,
scale="row",
clustering_distance_rows="euclidean",
clustering_distance_cols="euclidean",
clustering_method="complete",
border_color=FALSE,
cex=0.7)
|
\name{trclcomp}
\alias{trclcomp}
\title{Tree-Clustering Comparison}
\description{
This function compares the within-group variation for groups formed by
tree partitioning and unconstrained clustering.
The results are plotted and returned invisibly.
}
\usage{
trclcomp(x, method = "com", km = TRUE, mrt = TRUE)
}
\arguments{
\item{x}{ Rpart object with method "mrt" -- see \code{\link{rpart}}}
\item{method}{ The clustering method for the unconstrained clustering}.
\item{km}{ If \code{TRUE} a K-Means clustering is compared with the multivariate tree partitioning.
}
\item{mrt}{ If \code{TRUE} an additional K-Means clustering with a starting configuration based on
the multivariate tree partitioning is generated.
}
}
\details{
The within-group variation for groups formed by multivariate tree partitioning and
unconstrained clusterings are compared for all sizes of the hierarchy of tree
partitions.
}
\value{
Returns a list (invisibly) of the within-tree and within-cluster variation for all tree sizes.
}
\references{
De'ath G. (2002)
Multivariate Regression Trees : A New Technique for Constrained Classification Analysis.
Ecology 83(4):1103-1117.
}
\examples{
data(spider)
fit <- mvpart(data.matrix(spider[,1:12])~herbs+reft+moss+sand+twigs+water,spider)
trclcomp(fit)
}
\keyword{ multivariate }%-- one or more ...
|
/man/trclcomp.Rd
|
no_license
|
mvignon/mvpart
|
R
| false | false | 1,350 |
rd
|
\name{trclcomp}
\alias{trclcomp}
\title{Tree-Clustering Comparison}
\description{
This function compares the within-group variation for groups formed by
tree partitioning and unconstrained clustering.
The results are plotted and returned invisibly.
}
\usage{
trclcomp(x, method = "com", km = TRUE, mrt = TRUE)
}
\arguments{
\item{x}{ Rpart object with method "mrt" -- see \code{\link{rpart}}}
\item{method}{ The clustering method for the unconstrained clustering}.
\item{km}{ If \code{TRUE} a K-Means clustering is compared with the multivariate tree partitioning.
}
\item{mrt}{ If \code{TRUE} an additional K-Means clustering with a starting configuration based on
the multivariate tree partitioning is generated.
}
}
\details{
The within-group variation for groups formed by multivariate tree partitioning and
unconstrained clusterings are compared for all sizes of the hierarchy of tree
partitions.
}
\value{
Returns a list (invisibly) of the within-tree and within-cluster variation for all tree sizes.
}
\references{
De'ath G. (2002)
Multivariate Regression Trees : A New Technique for Constrained Classification Analysis.
Ecology 83(4):1103-1117.
}
\examples{
data(spider)
fit <- mvpart(data.matrix(spider[,1:12])~herbs+reft+moss+sand+twigs+water,spider)
trclcomp(fit)
}
\keyword{ multivariate }%-- one or more ...
|
#' Plot a fitted trajectory
#'
#' @param x an object returned from \code{\link{fit_trajectory}}
#' @param center should the trajectory be centered around the median WHO standard? This is equivalent to plotting the age difference score (like height-for-age difference - HAD)
#' @param x_range a vector specifying the range (min, max) that the superposed growth standard should span on the x-axis
#' @param width width of the plot
#' @param height height of the plot
#' @param hover variable names in \code{x$data} to show on hover for each point (only variables with non-NA data will be shown)
#' @param checkpoints should the checkpoints be plotted (if available)?
#' @param p centiles at which to draw the WHO polygons
#' @param x_units units of age x-axis (days, months, or years)
#' @param \ldots additional parameters passed to \code{\link{figure}}
#' @examples
#' mod <- get_fit(cpp, y_var = "wtkg", method = "rlm")
#' fit <- fit_trajectory(subset(cpp, subjid == 2), mod)
#' plot(fit)
#' plot(fit, x_units = "years")
#' plot(fit, center = TRUE)
#' plot(fit, hover = c("wtkg", "bmi", "waz", "haz"))
#' @export
plot.fittedTrajectory <- function(x, center = FALSE, x_range = NULL,
width = 500, height = 520, hover = NULL, checkpoints = TRUE,
p = 100 * pnorm(-3:0),
x_units = c("days", "months", "years"), ...) {
x_units <- match.arg(x_units)
x_denom <- switch(x_units,
days = 1,
months = 365.25 / 12,
years = 365.25)
if(nrow(x$xy) == 0)
return(empty_plot(paste0("No '", x$y_var, "' vs. '", x$x_var, "' data for this subject")))
if(is.null(x_range)) {
x_range <- range(x$xy$x, na.rm = TRUE)
x_range <- x_range + c(-1, 1) * diff(x_range) * 0.07
}
# if(missing(hover)) {
# hover <- names(x$data)[sapply(x$data, function(x) !all(is.na(x)))]
# hover <- x$data[x$xy$idx, hover]
# } else
if(is.null(hover))
hover <- c(x$x_var, x$y_var)
hover <- intersect(names(x$data), hover)
if(length(hover) == 0) {
hover <- NULL
} else {
hover <- hover[sapply(x$data[,hover], function(x) !all(is.na(x)))]
hover <- x$data[x$xy$idx, hover]
}
ylab <- hbgd::hbgd_labels[[x$y_var]]
if(center) {
for(el in c("xy", "fitgrid", "checkpoint", "holdout")) {
if(!is.null(x[[el]]))
x[[el]]$y <- x[[el]]$y - who_centile2value(x[[el]]$x, p = 50,
x_var = x$x_var, y_var = x$y_var, sex = x$sex)
if(!is.null(x[[el]]$yfit))
x[[el]]$yfit <- x[[el]]$yfit - who_centile2value(x[[el]]$x, p = 50,
x_var = x$x_var, y_var = x$y_var, sex = x$sex)
}
ylab <- paste(ylab, "(WHO median-centered)")
}
xlab <- hbgd::hbgd_labels[[x$x_var]]
if(x_units == "months")
xlab <- gsub("\\(days\\)", "(months)", xlab)
if(x_units == "years")
xlab <- gsub("\\(days\\)", "(years)", xlab)
fig <- figure(width = width, height = height,
xlab = xlab, ylab = ylab, logo = NULL, ...) %>%
ly_who(x = seq(x_range[1], x_range[2], length = 100), center = center,
x_var = x$x_var, y_var = x$y_var, sex = x$sex, p = p, x_units = x_units) %>%
rbokeh::ly_points(x / x_denom, y, hover = hover, data = x$xy, color = "black")
if(!is.null(x$fitgrid)) {
fig <- fig %>%
rbokeh::ly_lines(x / x_denom, y, data = x$fitgrid, color = "black") %>%
rbokeh::ly_points(x / x_denom, yfit, data = x$xy, color = "black", glyph = 19, size = 4)
}
if(!is.null(x$holdout))
fig <- fig %>%
rbokeh::ly_points(x / x_denom, y, data = x$holdout, color = "red")
if(!all(is.na(x$checkpoint$y)) && checkpoints) {
x$checkpoint <- subset(x$checkpoint, !is.na(y))
x$checkpoint <- data.frame(lapply(x$checkpoint, unname))
x$checkpoint$zcat <- as.character(x$checkpoint$zcat)
fig <- fig %>%
rbokeh::ly_points(x / x_denom, y, size = 15, hover = c("zcat", "x"),
data = x$checkpoint, glyph = 13, color = "black", alpha = 0.6)
}
fig
}
#' Plot a fitted trajectory on z-score scale
#'
#' @param x an object returned from \code{\link{fit_trajectory}}
#' @param x_range a vector specifying the range (min, max) that the superposed z-score bands should span on the x-axis
#' @param nadir should a guide be added to the plot showing the location of the nadir?
#' @param recovery age in days at which to plot recovery from nadir (only valid if nadir is TRUE) - if NULL (default), will not be plotted
#' @param width width of the plot
#' @param height height of the plot
#' @param hover variable names in \code{x$data} to show on hover for each point (only variables with non-NA data will be shown)
#' @param checkpoints should the checkpoints be plotted (if available)?
#' @param z z-scores at which to draw the z-score bands
#' @param x_units units of age x-axis (days, months, or years)
#' @param \ldots additional parameters passed to \code{\link{figure}}
#' @examples
#' mod <- get_fit(cpp, y_var = "wtkg", method = "rlm")
#' fit <- fit_trajectory(subset(cpp, subjid == 2), mod)
#' plot_z(fit)
#' @export
plot_z <- function(x, x_range = NULL, nadir = FALSE, recovery = NULL,
width = 500, height = 520,
hover = NULL, checkpoints = TRUE, z = -3:0,
x_units = c("days", "months", "years"), ...) {
x_units <- match.arg(x_units)
x_denom <- switch(x_units,
days = 1,
months = 365.25 / 12,
years = 365.25)
if(is.null(x$xy$z))
return(empty_plot("No z transformation data for this subject"))
if(is.null(x_range)) {
x_range <- range(x$xy$x, na.rm = TRUE)
x_range <- x_range + c(-1, 1) * diff(x_range) * 0.07
}
if(is.null(hover)) {
y_var_out <- x$y_var
if(x$y_var == "htcm")
y_var_out <- "haz"
if(x$y_var == "wtkg")
y_var_out <- "waz"
hover <- c(x$x_var, y_var_out)
}
hover <- intersect(names(x$data), hover)
if(length(hover) == 0) {
hover <- NULL
} else {
hover <- hover[sapply(x$data[,hover], function(x) !all(is.na(x)))]
hover <- x$data[x$xy$idx, hover]
}
xlab <- hbgd::hbgd_labels[[x$x_var]]
if(x_units == "months")
xlab <- gsub("\\(days\\)", "(months)", xlab)
if(x_units == "years")
xlab <- gsub("\\(days\\)", "(years)", xlab)
ylab <- paste(hbgd::hbgd_labels[[x$y_var]], "z-score")
fig <- figure(width = width, height = height,
xlab = xlab, ylab = ylab, logo = NULL, ...) %>%
ly_zband(x = c(x_range[1], x_range[2]), z = z,
color = ifelse(x$sex == "Male", "blue", "red"), x_units = x_units) %>%
rbokeh::ly_points(x / x_denom, z, hover = hover, data = x$xy, color = "black")
if(!is.null(x$fitgrid)) {
fig <- fig %>%
rbokeh::ly_lines(x / x_denom, z, data = x$fitgrid, color = "black") %>%
rbokeh::ly_points(x / x_denom, zfit, data = x$xy, color = "black",
glyph = 19, size = 4)
}
if(!is.null(x$holdout))
fig <- fig %>%
rbokeh::ly_points(x / x_denom, z, data = x$holdout, color = "red")
if(!all(is.na(x$checkpoint$y)) && checkpoints) {
x$checkpoint <- subset(x$checkpoint, !is.na(y))
fig <- fig %>%
rbokeh::ly_points(x / x_denom, z, size = 15, hover = zcat, data = x$checkpoint, glyph = 13, color = "black", alpha = 0.6)
}
if(nadir) {
nadir <- get_nadir(x)
if(!is.na(nadir$at)) {
fig <- fig %>%
rbokeh::ly_segments(nadir$at / x_denom, 0, nadir$at / x_denom, nadir$mag, line_width = 5, color = "red", alpha = 0.5)
if(!is.null(recovery)) {
recov <- get_recovery(x, nadir, recovery)
if(!is.na(recov$at)) {
fig <- fig %>%
rbokeh::ly_segments(nadir$at / x_denom, nadir$mag,
recov$at / x_denom, nadir$mag,
width = 5, color = "orange", alpha = 0.5) %>%
rbokeh::ly_segments(recov$at / x_denom, nadir$mag,
recov$at / x_denom, recov$z,
width = 5, color = "green", alpha = 0.5)
}
}
}
}
fig
}
#' Plot a fitted trajectory's velocity
#'
#' @param x an object returned from \code{\link{fit_trajectory}}
#' @param width width of the plot
#' @param height height of the plot
#' @param x_units units of age x-axis (days, months, or years)
#' @param \ldots additional parameters passed to \code{\link{figure}}
#' @examples
#' mod <- get_fit(cpp, y_var = "wtkg", method = "rlm")
#' fit <- fit_trajectory(subset(cpp, subjid == 2), mod)
#' plot_velocity(fit)
#' @export
plot_velocity <- function(x, width = 500, height = 520,
x_units = c("days", "months", "years"), ...) {
x_units <- match.arg(x_units)
x_denom <- switch(x_units,
days = 1,
months = 365.25 / 12,
years = 365.25)
if(is.null(x$fitgrid$dy))
return(empty_plot("No velocity data for this subject"))
xlab <- hbgd::hbgd_labels[[x$x_var]]
if(x_units == "months")
xlab <- gsub("\\(days\\)", "(months)", xlab)
if(x_units == "years")
xlab <- gsub("\\(days\\)", "(years)", xlab)
ylab <- paste(hbgd::hbgd_labels[[x$y_var]], "growth velocity")
# remove blip in velocity
xx <- x$fitgrid$x
dyy <- x$fitgrid$dy
ind <- which.min(abs(xx - 365.25 * 2))
if(abs(365.25 * 2 - xx[ind]) < 2 * diff(xx[1:2])) {
dyy[max(1, ind - 2):min(length(xx), ind + 2)] <- NA
}
figure(width = width, height = height,
xlab = xlab, ylab = ylab, logo = NULL, ...) %>%
ly_lines(xx / x_denom, dyy, color = "black")
}
#' Plot a fitted trajectory's z-score velocity
#'
#' @param x an object returned from \code{\link{fit_trajectory}}
#' @param width width of the plot
#' @param height height of the plot
#' @param x_units units of age x-axis (days, months, or years)
#' @param \ldots additional parameters passed to \code{\link{figure}}
#' @examples
#' mod <- get_fit(cpp, y_var = "wtkg", method = "rlm")
#' fit <- fit_trajectory(subset(cpp, subjid == 2), mod)
#' plot_zvelocity(fit)
#' @export
plot_zvelocity <- function(x, width = 500, height = 520,
x_units = c("days", "months", "years"), ...) {
x_units <- match.arg(x_units)
x_denom <- switch(x_units,
days = 1,
months = 365.25 / 12,
years = 365.25)
if(is.null(x$fitgrid$dz))
return(empty_plot("No z-score velocity data for this subject"))
xlab <- hbgd::hbgd_labels[[x$x_var]]
if(x_units == "months")
xlab <- gsub("\\(days\\)", "(months)", xlab)
if(x_units == "years")
xlab <- gsub("\\(days\\)", "(years)", xlab)
ylab <- paste(hbgd::hbgd_labels[[x$y_var]], "z-score growth velocity")
# remove blip in velocity
xx <- x$fitgrid$x
dzz <- x$fitgrid$dz
ind <- which.min(abs(xx - 365.25 * 2))
if(abs(365.25 * 2 - xx[ind]) < 2 * diff(xx[1:2])) {
dzz[max(1, ind - 2):min(length(xx), ind + 2)] <- NA
}
figure(width = width, height = height,
xlab = xlab, ylab = ylab, logo = NULL, ...) %>%
ly_lines(xx / x_denom, dzz, color = "black")
}
empty_plot <- function(lab) {
figure(xaxes = FALSE, yaxes = FALSE,
xgrid = FALSE, ygrid = FALSE, logo = NULL) %>%
ly_text(0, 0, c("", lab), align = "center")
}
#' Get nadir of z-scale growth trajectory
#'
#' @param obj object created from \code{\link{fit_trajectory}}
#' @export
get_nadir <- function(obj) {
if(is.null(obj$fitgrid))
return(data.frame(at = NA, mag = NA, end = NA))
if(is.null(obj$fitgrid$dz))
return(data.frame(at = NA, mag = NA, end = NA))
nn <- nrow(obj$fitgrid) - 1
# get crossings of zero of dz
cross <- which(diff(sign(obj$fitgrid$dz)) > 0) + 1
if(length(cross) == 0) {
if(all(obj$fitgrid$dz[nn] >= obj$fitgrid$dz, na.rm = TRUE)) {
return(data.frame(at = obj$fitgrid$x[nn], mag = obj$fitgrid$z[nn], end = TRUE))
} else {
return(data.frame(at = NA, mag = NA, end = NA))
}
}
cross <- cross[which.min(obj$fitgrid$z[cross])]
end <- FALSE
if(obj$fitgrid$z[nn] < obj$fitgrid$z[cross]) {
cross <- nn
end <- TRUE
}
data.frame(at = obj$fitgrid$x[cross], mag = obj$fitgrid$z[cross], end = end)
}
#' Get recovery statistics of z-scale growth trajectory
#'
#' @param obj object created from \code{\link{fit_trajectory}}
#' @param nadir object created from \code{\link{get_nadir}} (if NULL, will be automatically generated)
#' @param at age (in days) at which to estimate recovery
#' @export
get_recovery <- function(obj, nadir = NULL, at = 365.25 * 3) {
if(is.null(obj$fitgrid))
return(data.frame(at = NA, mag = NA, end = FALSE))
if(is.null(obj$fitgrid$z))
return(data.frame(at = NA, mag = NA, end = FALSE))
if(is.null(nadir)) {
nadir <- get_nadir(obj)
}
if(!is.na(nadir$at) && nadir$at < at) {
val <- approxfun(obj$fitgrid$x, obj$fitgrid$z)(at)
return(data.frame(at = at, z = val, recov = val - nadir$mag))
} else {
return(data.frame(at = NA, z = NA, recov = NA))
}
}
|
/R/plot_trajectory.R
|
permissive
|
skhan890/hbgd
|
R
| false | false | 12,513 |
r
|
#' Plot a fitted trajectory
#'
#' @param x an object returned from \code{\link{fit_trajectory}}
#' @param center should the trajectory be centered around the median WHO standard? This is equivalent to plotting the age difference score (like height-for-age difference - HAD)
#' @param x_range a vector specifying the range (min, max) that the superposed growth standard should span on the x-axis
#' @param width width of the plot
#' @param height height of the plot
#' @param hover variable names in \code{x$data} to show on hover for each point (only variables with non-NA data will be shown)
#' @param checkpoints should the checkpoints be plotted (if available)?
#' @param p centiles at which to draw the WHO polygons
#' @param x_units units of age x-axis (days, months, or years)
#' @param \ldots additional parameters passed to \code{\link{figure}}
#' @examples
#' mod <- get_fit(cpp, y_var = "wtkg", method = "rlm")
#' fit <- fit_trajectory(subset(cpp, subjid == 2), mod)
#' plot(fit)
#' plot(fit, x_units = "years")
#' plot(fit, center = TRUE)
#' plot(fit, hover = c("wtkg", "bmi", "waz", "haz"))
#' @export
plot.fittedTrajectory <- function(x, center = FALSE, x_range = NULL,
width = 500, height = 520, hover = NULL, checkpoints = TRUE,
p = 100 * pnorm(-3:0),
x_units = c("days", "months", "years"), ...) {
x_units <- match.arg(x_units)
x_denom <- switch(x_units,
days = 1,
months = 365.25 / 12,
years = 365.25)
if(nrow(x$xy) == 0)
return(empty_plot(paste0("No '", x$y_var, "' vs. '", x$x_var, "' data for this subject")))
if(is.null(x_range)) {
x_range <- range(x$xy$x, na.rm = TRUE)
x_range <- x_range + c(-1, 1) * diff(x_range) * 0.07
}
# if(missing(hover)) {
# hover <- names(x$data)[sapply(x$data, function(x) !all(is.na(x)))]
# hover <- x$data[x$xy$idx, hover]
# } else
if(is.null(hover))
hover <- c(x$x_var, x$y_var)
hover <- intersect(names(x$data), hover)
if(length(hover) == 0) {
hover <- NULL
} else {
hover <- hover[sapply(x$data[,hover], function(x) !all(is.na(x)))]
hover <- x$data[x$xy$idx, hover]
}
ylab <- hbgd::hbgd_labels[[x$y_var]]
if(center) {
for(el in c("xy", "fitgrid", "checkpoint", "holdout")) {
if(!is.null(x[[el]]))
x[[el]]$y <- x[[el]]$y - who_centile2value(x[[el]]$x, p = 50,
x_var = x$x_var, y_var = x$y_var, sex = x$sex)
if(!is.null(x[[el]]$yfit))
x[[el]]$yfit <- x[[el]]$yfit - who_centile2value(x[[el]]$x, p = 50,
x_var = x$x_var, y_var = x$y_var, sex = x$sex)
}
ylab <- paste(ylab, "(WHO median-centered)")
}
xlab <- hbgd::hbgd_labels[[x$x_var]]
if(x_units == "months")
xlab <- gsub("\\(days\\)", "(months)", xlab)
if(x_units == "years")
xlab <- gsub("\\(days\\)", "(years)", xlab)
fig <- figure(width = width, height = height,
xlab = xlab, ylab = ylab, logo = NULL, ...) %>%
ly_who(x = seq(x_range[1], x_range[2], length = 100), center = center,
x_var = x$x_var, y_var = x$y_var, sex = x$sex, p = p, x_units = x_units) %>%
rbokeh::ly_points(x / x_denom, y, hover = hover, data = x$xy, color = "black")
if(!is.null(x$fitgrid)) {
fig <- fig %>%
rbokeh::ly_lines(x / x_denom, y, data = x$fitgrid, color = "black") %>%
rbokeh::ly_points(x / x_denom, yfit, data = x$xy, color = "black", glyph = 19, size = 4)
}
if(!is.null(x$holdout))
fig <- fig %>%
rbokeh::ly_points(x / x_denom, y, data = x$holdout, color = "red")
if(!all(is.na(x$checkpoint$y)) && checkpoints) {
x$checkpoint <- subset(x$checkpoint, !is.na(y))
x$checkpoint <- data.frame(lapply(x$checkpoint, unname))
x$checkpoint$zcat <- as.character(x$checkpoint$zcat)
fig <- fig %>%
rbokeh::ly_points(x / x_denom, y, size = 15, hover = c("zcat", "x"),
data = x$checkpoint, glyph = 13, color = "black", alpha = 0.6)
}
fig
}
#' Plot a fitted trajectory on z-score scale
#'
#' @param x an object returned from \code{\link{fit_trajectory}}
#' @param x_range a vector specifying the range (min, max) that the superposed z-score bands should span on the x-axis
#' @param nadir should a guide be added to the plot showing the location of the nadir?
#' @param recovery age in days at which to plot recovery from nadir (only valid if nadir is TRUE) - if NULL (default), will not be plotted
#' @param width width of the plot
#' @param height height of the plot
#' @param hover variable names in \code{x$data} to show on hover for each point (only variables with non-NA data will be shown)
#' @param checkpoints should the checkpoints be plotted (if available)?
#' @param z z-scores at which to draw the z-score bands
#' @param x_units units of age x-axis (days, months, or years)
#' @param \ldots additional parameters passed to \code{\link{figure}}
#' @examples
#' mod <- get_fit(cpp, y_var = "wtkg", method = "rlm")
#' fit <- fit_trajectory(subset(cpp, subjid == 2), mod)
#' plot_z(fit)
#' @export
plot_z <- function(x, x_range = NULL, nadir = FALSE, recovery = NULL,
width = 500, height = 520,
hover = NULL, checkpoints = TRUE, z = -3:0,
x_units = c("days", "months", "years"), ...) {
x_units <- match.arg(x_units)
x_denom <- switch(x_units,
days = 1,
months = 365.25 / 12,
years = 365.25)
if(is.null(x$xy$z))
return(empty_plot("No z transformation data for this subject"))
if(is.null(x_range)) {
x_range <- range(x$xy$x, na.rm = TRUE)
x_range <- x_range + c(-1, 1) * diff(x_range) * 0.07
}
if(is.null(hover)) {
y_var_out <- x$y_var
if(x$y_var == "htcm")
y_var_out <- "haz"
if(x$y_var == "wtkg")
y_var_out <- "waz"
hover <- c(x$x_var, y_var_out)
}
hover <- intersect(names(x$data), hover)
if(length(hover) == 0) {
hover <- NULL
} else {
hover <- hover[sapply(x$data[,hover], function(x) !all(is.na(x)))]
hover <- x$data[x$xy$idx, hover]
}
xlab <- hbgd::hbgd_labels[[x$x_var]]
if(x_units == "months")
xlab <- gsub("\\(days\\)", "(months)", xlab)
if(x_units == "years")
xlab <- gsub("\\(days\\)", "(years)", xlab)
ylab <- paste(hbgd::hbgd_labels[[x$y_var]], "z-score")
fig <- figure(width = width, height = height,
xlab = xlab, ylab = ylab, logo = NULL, ...) %>%
ly_zband(x = c(x_range[1], x_range[2]), z = z,
color = ifelse(x$sex == "Male", "blue", "red"), x_units = x_units) %>%
rbokeh::ly_points(x / x_denom, z, hover = hover, data = x$xy, color = "black")
if(!is.null(x$fitgrid)) {
fig <- fig %>%
rbokeh::ly_lines(x / x_denom, z, data = x$fitgrid, color = "black") %>%
rbokeh::ly_points(x / x_denom, zfit, data = x$xy, color = "black",
glyph = 19, size = 4)
}
if(!is.null(x$holdout))
fig <- fig %>%
rbokeh::ly_points(x / x_denom, z, data = x$holdout, color = "red")
if(!all(is.na(x$checkpoint$y)) && checkpoints) {
x$checkpoint <- subset(x$checkpoint, !is.na(y))
fig <- fig %>%
rbokeh::ly_points(x / x_denom, z, size = 15, hover = zcat, data = x$checkpoint, glyph = 13, color = "black", alpha = 0.6)
}
if(nadir) {
nadir <- get_nadir(x)
if(!is.na(nadir$at)) {
fig <- fig %>%
rbokeh::ly_segments(nadir$at / x_denom, 0, nadir$at / x_denom, nadir$mag, line_width = 5, color = "red", alpha = 0.5)
if(!is.null(recovery)) {
recov <- get_recovery(x, nadir, recovery)
if(!is.na(recov$at)) {
fig <- fig %>%
rbokeh::ly_segments(nadir$at / x_denom, nadir$mag,
recov$at / x_denom, nadir$mag,
width = 5, color = "orange", alpha = 0.5) %>%
rbokeh::ly_segments(recov$at / x_denom, nadir$mag,
recov$at / x_denom, recov$z,
width = 5, color = "green", alpha = 0.5)
}
}
}
}
fig
}
#' Plot a fitted trajectory's velocity
#'
#' @param x an object returned from \code{\link{fit_trajectory}}
#' @param width width of the plot
#' @param height height of the plot
#' @param x_units units of age x-axis (days, months, or years)
#' @param \ldots additional parameters passed to \code{\link{figure}}
#' @examples
#' mod <- get_fit(cpp, y_var = "wtkg", method = "rlm")
#' fit <- fit_trajectory(subset(cpp, subjid == 2), mod)
#' plot_velocity(fit)
#' @export
plot_velocity <- function(x, width = 500, height = 520,
x_units = c("days", "months", "years"), ...) {
x_units <- match.arg(x_units)
x_denom <- switch(x_units,
days = 1,
months = 365.25 / 12,
years = 365.25)
if(is.null(x$fitgrid$dy))
return(empty_plot("No velocity data for this subject"))
xlab <- hbgd::hbgd_labels[[x$x_var]]
if(x_units == "months")
xlab <- gsub("\\(days\\)", "(months)", xlab)
if(x_units == "years")
xlab <- gsub("\\(days\\)", "(years)", xlab)
ylab <- paste(hbgd::hbgd_labels[[x$y_var]], "growth velocity")
# remove blip in velocity
xx <- x$fitgrid$x
dyy <- x$fitgrid$dy
ind <- which.min(abs(xx - 365.25 * 2))
if(abs(365.25 * 2 - xx[ind]) < 2 * diff(xx[1:2])) {
dyy[max(1, ind - 2):min(length(xx), ind + 2)] <- NA
}
figure(width = width, height = height,
xlab = xlab, ylab = ylab, logo = NULL, ...) %>%
ly_lines(xx / x_denom, dyy, color = "black")
}
#' Plot a fitted trajectory's z-score velocity
#'
#' @param x an object returned from \code{\link{fit_trajectory}}
#' @param width width of the plot
#' @param height height of the plot
#' @param x_units units of age x-axis (days, months, or years)
#' @param \ldots additional parameters passed to \code{\link{figure}}
#' @examples
#' mod <- get_fit(cpp, y_var = "wtkg", method = "rlm")
#' fit <- fit_trajectory(subset(cpp, subjid == 2), mod)
#' plot_zvelocity(fit)
#' @export
plot_zvelocity <- function(x, width = 500, height = 520,
x_units = c("days", "months", "years"), ...) {
x_units <- match.arg(x_units)
x_denom <- switch(x_units,
days = 1,
months = 365.25 / 12,
years = 365.25)
if(is.null(x$fitgrid$dz))
return(empty_plot("No z-score velocity data for this subject"))
xlab <- hbgd::hbgd_labels[[x$x_var]]
if(x_units == "months")
xlab <- gsub("\\(days\\)", "(months)", xlab)
if(x_units == "years")
xlab <- gsub("\\(days\\)", "(years)", xlab)
ylab <- paste(hbgd::hbgd_labels[[x$y_var]], "z-score growth velocity")
# remove blip in velocity
xx <- x$fitgrid$x
dzz <- x$fitgrid$dz
ind <- which.min(abs(xx - 365.25 * 2))
if(abs(365.25 * 2 - xx[ind]) < 2 * diff(xx[1:2])) {
dzz[max(1, ind - 2):min(length(xx), ind + 2)] <- NA
}
figure(width = width, height = height,
xlab = xlab, ylab = ylab, logo = NULL, ...) %>%
ly_lines(xx / x_denom, dzz, color = "black")
}
empty_plot <- function(lab) {
figure(xaxes = FALSE, yaxes = FALSE,
xgrid = FALSE, ygrid = FALSE, logo = NULL) %>%
ly_text(0, 0, c("", lab), align = "center")
}
#' Get nadir of z-scale growth trajectory
#'
#' @param obj object created from \code{\link{fit_trajectory}}
#' @export
get_nadir <- function(obj) {
if(is.null(obj$fitgrid))
return(data.frame(at = NA, mag = NA, end = NA))
if(is.null(obj$fitgrid$dz))
return(data.frame(at = NA, mag = NA, end = NA))
nn <- nrow(obj$fitgrid) - 1
# get crossings of zero of dz
cross <- which(diff(sign(obj$fitgrid$dz)) > 0) + 1
if(length(cross) == 0) {
if(all(obj$fitgrid$dz[nn] >= obj$fitgrid$dz, na.rm = TRUE)) {
return(data.frame(at = obj$fitgrid$x[nn], mag = obj$fitgrid$z[nn], end = TRUE))
} else {
return(data.frame(at = NA, mag = NA, end = NA))
}
}
cross <- cross[which.min(obj$fitgrid$z[cross])]
end <- FALSE
if(obj$fitgrid$z[nn] < obj$fitgrid$z[cross]) {
cross <- nn
end <- TRUE
}
data.frame(at = obj$fitgrid$x[cross], mag = obj$fitgrid$z[cross], end = end)
}
#' Get recovery statistics of z-scale growth trajectory
#'
#' @param obj object created from \code{\link{fit_trajectory}}
#' @param nadir object created from \code{\link{get_nadir}} (if NULL, will be automatically generated)
#' @param at age (in days) at which to estimate recovery
#' @export
get_recovery <- function(obj, nadir = NULL, at = 365.25 * 3) {
if(is.null(obj$fitgrid))
return(data.frame(at = NA, mag = NA, end = FALSE))
if(is.null(obj$fitgrid$z))
return(data.frame(at = NA, mag = NA, end = FALSE))
if(is.null(nadir)) {
nadir <- get_nadir(obj)
}
if(!is.na(nadir$at) && nadir$at < at) {
val <- approxfun(obj$fitgrid$x, obj$fitgrid$z)(at)
return(data.frame(at = at, z = val, recov = val - nadir$mag))
} else {
return(data.frame(at = NA, z = NA, recov = NA))
}
}
|
library(tidyverse)
library(rtracklayer)
library(ggrastr)
library(patchwork)
library(furrr)
plan(multiprocess(workers = 10))
load('misc/cons_ko.rda')
tads <- import.bed('misc/tad.bed')
load('misc/g33.rda')
load('misc/deg.rda')
load('misc/dac.rda')
signif.num <- function(x) {
symnum(x, corr = FALSE, na = FALSE, legend = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", "˙", ""))
}
getWhisks <- function(x) {
x <- as.numeric(x)
qs <- quantile(x, c(0.25, 0.75), na.rm = T)
data.frame(lower = qs[1], upper = qs[2], middle = median(x, na.rm = T),
ymin = min(x[x >= (qs[1] - 1.5 * diff(qs))], na.rm = T),
ymax = max(x[x <= (qs[2] + 1.5 * diff(qs))], na.rm = T)) %>%
mutate(notchlower = middle - 1.58 * diff(qs)/sqrt(length(x)),
notchupper = middle + 1.58 * diff(qs)/sqrt(length(x)))
}
scientific_10 <- function(x) {
xout <- gsub("1e", "10^{", format(x),fixed=TRUE)
xout <- gsub("{-0", "{-", xout,fixed=TRUE)
xout <- gsub("{+", "{", xout,fixed=TRUE)
xout <- gsub("{0", "{", xout,fixed=TRUE)
xout <- paste(xout,"}",sep="")
parse(text=xout)
}
scale_x_log10nice <- function(name=NULL,omag=seq(-10,20),...) {
breaks10 <- 10^omag
scale_x_log10(name,breaks=breaks10,labels=scientific_10(breaks10),...)
}
g33 <- g33[g33$type == 'gene']
dbins <- cons$B
tads <- tads[overlapsAny(tads, g33) & overlapsAny(tads, d.all)]
g33 <- g33[overlapsAny(g33, tads)]
d.all <- d.all[overlapsAny(d.all, tads)]
ints <- read_delim('misc/GH_interactions1_doubleElite.bed', '\t',
col_names = c("chrom","chromStart", "chromEnd",
"name", "score", "value",
"geneAssociationMethods",
"color", "geneHancerChrom",
"geneHancerStart", "geneHancerEnd",
"geneHancerIdentifier",
"geneHancerStrand", "geneChrom",
"geneStart", "geneEnd",
"geneName", "geneStrand"))
gh <- read_delim('misc/GeneHancer.bed', '\t',
col_names = c("chrom", "start", "end", "name", "score",
"strand", "thickStart", "thickEnd", "reserved",
"evidenceSources", "elementType", "eliteness")) %>%
mutate(start = start + 1) %>%
makeGRangesFromDataFrame(keep.extra.columns = T)
igr <- import.bed('ensembl/intergenic.bed')
igr_or_not <- setNames(overlapsAny(gh, igr), gh$name)
enh_or_not <- setNames(gh$elementType == 'Enhancer', gh$name)
enhs <- ints %>%
dplyr::select(chr = geneHancerChrom,
start = geneHancerStart,
end = geneHancerEnd,
strand = geneHancerStrand,
id = geneHancerIdentifier) %>%
mutate(start = start + 1,
igr = igr_or_not[id],
enh = enh_or_not[id]) %>%
distinct(id, .keep_all = T) %>%
makeGRangesFromDataFrame(keep.extra.columns = T)
genes <- ints %>%
dplyr::select(chr = geneChrom,
start = geneStart,
end = geneEnd,
strand = geneStrand,
gene = geneName,
id = geneHancerIdentifier) %>%
mutate(start = start + 1) %>%
makeGRangesFromDataFrame(keep.extra.columns = T)
gs <- genes[genes$gene %in% g33$gene_name]
g2id <- setNames(g33$ID, g33$gene_name)
gs$ensembl <- g2id[gs$gene]
others <- genes[!(genes$gene %in% g33$gene_name)]
others.ok <- others[overlapsAny(others, g33)]
others <- others[!overlapsAny(others, g33)]
hits <- findOverlaps(others.ok, g33) %>% as("List")
ids <- extractList(g33$ID, hits) %>% as.list()
init <- T
while(T) {
uniq <- which(sapply(ids, length) == 1)
others.ok$ensembl <- sapply(ids, `[`, 1)
if (init) {
ok <- others.ok
init <- F
} else {
ok <- c(ok, others.ok)
}
others.ok <- others.ok[-uniq]
if (length(others.ok) < 1) {
break
}
hits <- findOverlaps(others.ok, g33) %>% as("List")
ids <- extractList(g33$ID, hits) %>% as.list
}
gs <- c(gs, ok)
res <- res$KO
res$dtad <- res$ID %in%
g33[overlapsAny(g33, tads[overlapsAny(tads, dbins)])]$ID
res$tany <- res$ID %in%
(gs[gs$id %in% enhs[overlapsAny(enhs, d.all[
d.all$FDR < .05])]$id]$ensembl %>% unique())
res$tpos <- res$ID %in%
(gs[gs$id %in% enhs[overlapsAny(enhs, d.all[
d.all$FDR < .05 & d.all$Fold > 0])]$id]$ensembl %>% unique())
res$tneg <- res$ID %in%
(gs[gs$id %in% enhs[overlapsAny(enhs, d.all[
d.all$FDR < .05 & d.all$Fold < 0])]$id]$ensembl %>% unique())
res$targ <- res$ID %in%
(gs[gs$id %in% enhs[overlapsAny(enhs, dbins)]$id]$ensembl %>% unique())
res$dpos <- !is.na(res$svalue) & res$svalue < .05 & res$log2FoldChangeAdj < 0
res$dneg <- !is.na(res$svalue) & res$svalue < .05 & res$log2FoldChangeAdj > 0
res <- res[res$ID %in% g33$ID,]
gg <- g33[match(res$ID, g33$ID)]
res$str <- distanceToNearest(gg, dbins)@elementMetadata$distance %>%
cut(., breaks = quantile(., probs = seq(0, 1, by = .2)),
include.lowest = T) %>%
as.factor()
res$dist <- distanceToNearest(gg, dbins)@elementMetadata$distance
o <- res
o <- o[o$dist > 0,]
ht <- o$dist[o$dneg] %>%
log10() %>%
hist(10, plot = F)
o$str2 <- cut(o$dist, breaks = 10^ht$breaks,
include.lowest = T)
o$int <- as.numeric(o$str2)
probs <- lapply(seq_along(ht$counts), function(x) {
rep(x, ht$counts[x])
}) %>% unlist()
samp <- function(dat, idxs) {
lapply(idxs, function(i) {
dat[[as.character(i)]] %>%
{.[sample(nrow(.), 1),]}
}) %>% bind_rows()
}
dat <- split(o, o$int)
nums <- future_map(1:1e4, function(x) {
samp(dat, probs)$dtad %>%
sum()
}, .progress = T) %>% unlist()
num <- sum(o$dtad[o$dneg])
p <- sum(nums >= num) / length(nums)
ptxt <- tibble(x = -Inf, y = Inf,
p = sprintf('p = %.3f', p))
p <- ggplot(tibble(x = nums, ttl = 'Permutation test on \'TAD\''), aes(x)) +
geom_histogram(binwidth = 1, fill = 'dodgerblue4') +
geom_vline(xintercept = num, color = 'firebrick3') +
geom_label(aes(x, y, label = p), data = ptxt, vjust = 1.5, hjust = -0.1) +
labs(x = sprintf("# genes in TAD with \u2193%s", 'H3K36me2 bin'),
y = "Frequency") +
facet_grid(ttl ~ .) +
scale_y_continuous(expand = expansion(mult = c(0,0.1))) +
theme(plot.background = element_blank(),
panel.background = element_blank(),
panel.grid = element_blank(),
legend.position = c(.025, .90),
legend.justification = c(0, 1),
legend.title = element_text(family = 'Arial', size = 9),
legend.background = element_blank(),
legend.key = element_blank(),
axis.title = element_text(family = 'Arial'),
strip.background = element_rect(fill = 'black'),
strip.text = element_text(color = 'white'),
axis.line = element_line(color = 'black'),
axis.text = element_text(color = 'black'),
#axis.line.y = element_blank(),
panel.grid.major = element_line(color = 'grey80', linetype = 'dashed'),
axis.ticks = element_line(color = 'black'))
ggsave('figs/4d.pdf', p, height = 2.1, width = 3.6,
device = cairo_pdf)
|
/scripts/4d.R
|
no_license
|
yuzhenpeng/hnscc_nsd1
|
R
| false | false | 7,195 |
r
|
library(tidyverse)
library(rtracklayer)
library(ggrastr)
library(patchwork)
library(furrr)
plan(multiprocess(workers = 10))
load('misc/cons_ko.rda')
tads <- import.bed('misc/tad.bed')
load('misc/g33.rda')
load('misc/deg.rda')
load('misc/dac.rda')
signif.num <- function(x) {
symnum(x, corr = FALSE, na = FALSE, legend = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", "˙", ""))
}
getWhisks <- function(x) {
x <- as.numeric(x)
qs <- quantile(x, c(0.25, 0.75), na.rm = T)
data.frame(lower = qs[1], upper = qs[2], middle = median(x, na.rm = T),
ymin = min(x[x >= (qs[1] - 1.5 * diff(qs))], na.rm = T),
ymax = max(x[x <= (qs[2] + 1.5 * diff(qs))], na.rm = T)) %>%
mutate(notchlower = middle - 1.58 * diff(qs)/sqrt(length(x)),
notchupper = middle + 1.58 * diff(qs)/sqrt(length(x)))
}
scientific_10 <- function(x) {
xout <- gsub("1e", "10^{", format(x),fixed=TRUE)
xout <- gsub("{-0", "{-", xout,fixed=TRUE)
xout <- gsub("{+", "{", xout,fixed=TRUE)
xout <- gsub("{0", "{", xout,fixed=TRUE)
xout <- paste(xout,"}",sep="")
parse(text=xout)
}
scale_x_log10nice <- function(name=NULL,omag=seq(-10,20),...) {
breaks10 <- 10^omag
scale_x_log10(name,breaks=breaks10,labels=scientific_10(breaks10),...)
}
g33 <- g33[g33$type == 'gene']
dbins <- cons$B
tads <- tads[overlapsAny(tads, g33) & overlapsAny(tads, d.all)]
g33 <- g33[overlapsAny(g33, tads)]
d.all <- d.all[overlapsAny(d.all, tads)]
ints <- read_delim('misc/GH_interactions1_doubleElite.bed', '\t',
col_names = c("chrom","chromStart", "chromEnd",
"name", "score", "value",
"geneAssociationMethods",
"color", "geneHancerChrom",
"geneHancerStart", "geneHancerEnd",
"geneHancerIdentifier",
"geneHancerStrand", "geneChrom",
"geneStart", "geneEnd",
"geneName", "geneStrand"))
gh <- read_delim('misc/GeneHancer.bed', '\t',
col_names = c("chrom", "start", "end", "name", "score",
"strand", "thickStart", "thickEnd", "reserved",
"evidenceSources", "elementType", "eliteness")) %>%
mutate(start = start + 1) %>%
makeGRangesFromDataFrame(keep.extra.columns = T)
igr <- import.bed('ensembl/intergenic.bed')
igr_or_not <- setNames(overlapsAny(gh, igr), gh$name)
enh_or_not <- setNames(gh$elementType == 'Enhancer', gh$name)
enhs <- ints %>%
dplyr::select(chr = geneHancerChrom,
start = geneHancerStart,
end = geneHancerEnd,
strand = geneHancerStrand,
id = geneHancerIdentifier) %>%
mutate(start = start + 1,
igr = igr_or_not[id],
enh = enh_or_not[id]) %>%
distinct(id, .keep_all = T) %>%
makeGRangesFromDataFrame(keep.extra.columns = T)
genes <- ints %>%
dplyr::select(chr = geneChrom,
start = geneStart,
end = geneEnd,
strand = geneStrand,
gene = geneName,
id = geneHancerIdentifier) %>%
mutate(start = start + 1) %>%
makeGRangesFromDataFrame(keep.extra.columns = T)
gs <- genes[genes$gene %in% g33$gene_name]
g2id <- setNames(g33$ID, g33$gene_name)
gs$ensembl <- g2id[gs$gene]
others <- genes[!(genes$gene %in% g33$gene_name)]
others.ok <- others[overlapsAny(others, g33)]
others <- others[!overlapsAny(others, g33)]
hits <- findOverlaps(others.ok, g33) %>% as("List")
ids <- extractList(g33$ID, hits) %>% as.list()
init <- T
while(T) {
uniq <- which(sapply(ids, length) == 1)
others.ok$ensembl <- sapply(ids, `[`, 1)
if (init) {
ok <- others.ok
init <- F
} else {
ok <- c(ok, others.ok)
}
others.ok <- others.ok[-uniq]
if (length(others.ok) < 1) {
break
}
hits <- findOverlaps(others.ok, g33) %>% as("List")
ids <- extractList(g33$ID, hits) %>% as.list
}
gs <- c(gs, ok)
res <- res$KO
res$dtad <- res$ID %in%
g33[overlapsAny(g33, tads[overlapsAny(tads, dbins)])]$ID
res$tany <- res$ID %in%
(gs[gs$id %in% enhs[overlapsAny(enhs, d.all[
d.all$FDR < .05])]$id]$ensembl %>% unique())
res$tpos <- res$ID %in%
(gs[gs$id %in% enhs[overlapsAny(enhs, d.all[
d.all$FDR < .05 & d.all$Fold > 0])]$id]$ensembl %>% unique())
res$tneg <- res$ID %in%
(gs[gs$id %in% enhs[overlapsAny(enhs, d.all[
d.all$FDR < .05 & d.all$Fold < 0])]$id]$ensembl %>% unique())
res$targ <- res$ID %in%
(gs[gs$id %in% enhs[overlapsAny(enhs, dbins)]$id]$ensembl %>% unique())
res$dpos <- !is.na(res$svalue) & res$svalue < .05 & res$log2FoldChangeAdj < 0
res$dneg <- !is.na(res$svalue) & res$svalue < .05 & res$log2FoldChangeAdj > 0
res <- res[res$ID %in% g33$ID,]
gg <- g33[match(res$ID, g33$ID)]
res$str <- distanceToNearest(gg, dbins)@elementMetadata$distance %>%
cut(., breaks = quantile(., probs = seq(0, 1, by = .2)),
include.lowest = T) %>%
as.factor()
res$dist <- distanceToNearest(gg, dbins)@elementMetadata$distance
o <- res
o <- o[o$dist > 0,]
ht <- o$dist[o$dneg] %>%
log10() %>%
hist(10, plot = F)
o$str2 <- cut(o$dist, breaks = 10^ht$breaks,
include.lowest = T)
o$int <- as.numeric(o$str2)
probs <- lapply(seq_along(ht$counts), function(x) {
rep(x, ht$counts[x])
}) %>% unlist()
samp <- function(dat, idxs) {
lapply(idxs, function(i) {
dat[[as.character(i)]] %>%
{.[sample(nrow(.), 1),]}
}) %>% bind_rows()
}
dat <- split(o, o$int)
nums <- future_map(1:1e4, function(x) {
samp(dat, probs)$dtad %>%
sum()
}, .progress = T) %>% unlist()
num <- sum(o$dtad[o$dneg])
p <- sum(nums >= num) / length(nums)
ptxt <- tibble(x = -Inf, y = Inf,
p = sprintf('p = %.3f', p))
p <- ggplot(tibble(x = nums, ttl = 'Permutation test on \'TAD\''), aes(x)) +
geom_histogram(binwidth = 1, fill = 'dodgerblue4') +
geom_vline(xintercept = num, color = 'firebrick3') +
geom_label(aes(x, y, label = p), data = ptxt, vjust = 1.5, hjust = -0.1) +
labs(x = sprintf("# genes in TAD with \u2193%s", 'H3K36me2 bin'),
y = "Frequency") +
facet_grid(ttl ~ .) +
scale_y_continuous(expand = expansion(mult = c(0,0.1))) +
theme(plot.background = element_blank(),
panel.background = element_blank(),
panel.grid = element_blank(),
legend.position = c(.025, .90),
legend.justification = c(0, 1),
legend.title = element_text(family = 'Arial', size = 9),
legend.background = element_blank(),
legend.key = element_blank(),
axis.title = element_text(family = 'Arial'),
strip.background = element_rect(fill = 'black'),
strip.text = element_text(color = 'white'),
axis.line = element_line(color = 'black'),
axis.text = element_text(color = 'black'),
#axis.line.y = element_blank(),
panel.grid.major = element_line(color = 'grey80', linetype = 'dashed'),
axis.ticks = element_line(color = 'black'))
ggsave('figs/4d.pdf', p, height = 2.1, width = 3.6,
device = cairo_pdf)
|
# Collapsed samples overlap
# Tahel Ronel, June 2021
# This script calculates the TCR overlap (using the 5-part Decombinator id, DCR) between any two samples produced using Collapsinator from Decombinator V4.
# The overlap is calculated as follows:
# Let A be the overlap matrix. Then for any two samples i,j,
# A_{i,j}, the overlap with respect to row i, is
# (Number of distinct DCRs found in both i and j) / (number of unique DCRs in i).
# Note this is in general asymmetric (A_{i,j} is not equal to A_{j,i}).
# The overlap matrix is then plotted as a heatmap, with red squares marking an overlap greater than (mean + 3 standard deviations).
# If comparing all samples in a particular sequencing run, note that as some runs contain several samples from the same individual/s,
# the absolute expected 'background' overlap will differ between runs.
# The script calculates and plots the overlap of the alpha files first followed by beta.
library(pheatmap)
library(RColorBrewer)
library(ggplot2)
# Change this to the path to the directory containing the collapsed (.freq) files to be compared
input<-'/path/to/collapsed/files'
dir<-dir(input)
# Alpha
alpha_list<-list()
alpha_idx<-grep('alpha',dir)
alpha_dir<-dir[alpha_idx]
# Reading in all alpha collapsed files
for (i in 1:length(alpha_dir)){
alpha_list[[i]]<-read.csv(paste(input,alpha_dir[i],sep=""),header=FALSE)
}
# Defining the sample names (removing 'dcr' prefix and chain+file suffix to shorten)
s1<-strsplit(alpha_dir, '_alpha.freq.gz')
s2<-strsplit(unlist(s1), 'dcr_')
names_alpha<-lapply(1:length(alpha_dir), function(x){(s2[[x]][2])})
names(alpha_list)<-names_alpha
# Calculating the overlap matrix:
alpha_mat<-matrix(ncol=length(alpha_dir),nrow=length(alpha_dir))
len_alpha<-length(alpha_dir)
for (i in 1:len_alpha){
for (j in 1:len_alpha){
dcr_i<-paste(alpha_list[[i]][,1],alpha_list[[i]][,2],alpha_list[[i]][,3],alpha_list[[i]][,4],alpha_list[[i]][,5],sep=" ")
dcr_j<-paste(alpha_list[[j]][,1],alpha_list[[j]][,2],alpha_list[[j]][,3],alpha_list[[j]][,4],alpha_list[[j]][,5],sep=" ")
y1<-c(dcr_i,dcr_j)
total_i<-length(dcr_i)#nrow(alpha_list[[i]])
total_ij<-length(y1)
unique_ij<-length(unique(y1))
C_ij<-total_ij-unique_ij
alpha_mat[i,j]<-C_ij/total_i
}}
row.names(alpha_mat)<-names_alpha
colnames(alpha_mat)<-names_alpha
alpha_mat_df<-data.frame(alpha_mat)
# Defining the accepted background threshold as mean + 3 std. devs.:
alpha_1<-which(alpha_mat==1)
alpha_mat_nondiag<-alpha_mat[-alpha_1]
mean_alpha<-mean(alpha_mat_nondiag)
sd_alpha<-sd(alpha_mat_nondiag)
outliers3sd<-mean_alpha+3*sd_alpha
outliers3<-which(alpha_mat_df>=outliers3sd)
outliers3<-setdiff(outliers3,alpha_1)
# Setting parameters for heatmap:
border_colours_alpha<-matrix(ncol=len_alpha, nrow=len_alpha)
border_colours_alpha[-outliers3]<-"grey"
border_colours_alpha[outliers3]<-"red"
bk1 <- c(seq(0,outliers3sd,by=0.005),outliers3sd)
bk2 <- c(outliers3sd+0.0001,seq(outliers3sd+0.0002,0.99,by=0.005))
bk <- c(bk1,bk2) #combine the break limits for purpose of graphing
my_palette <- c(colorRampPalette(colors = c("lightyellow", "lightblue"))(n = length(bk1)-1),
"gray38", "gray38",
c(colorRampPalette(colors = c("red", "darkred"))(n = length(bk2)-1)))
# Plotting the heatmap:
pheatmap(alpha_mat_df,cluster_method="Ward.D",clustering_distance_row ="euclidean" ,main=paste("Sample Overlap, alpha"),treeheight_row=0, cluster_cols = FALSE,cluster_rows = FALSE, col= my_palette, breaks=bk, fontsize = 11,fontsize_row=10,show_rownames = TRUE, border_color = border_colours_alpha)
# Save heatmap image as pdf
#dev.off()
#pdf(paste("/path/to/folder/overlap_alpha_",Sys.Date(),".pdf",sep=""))
#pheatmap(alpha_mat_df,cluster_method="Ward.D",clustering_distance_row ="euclidean" ,main=paste("Sample Overlap, alpha"),treeheight_row=0, cluster_cols = FALSE,cluster_rows = FALSE, col= my_palette, breaks=bk, fontsize = 11,fontsize_row=10,show_rownames = TRUE, border_color = border_colours_alpha)
#dev.off()
# Beta
beta_list<-list()
beta_idx<-grep('beta',dir)
beta_dir<-dir[beta_idx]
# Reading in all beta collapsed files
for (i in 1:length(beta_dir)){
beta_list[[i]]<-read.csv(paste(input,beta_dir[i],sep=""),header=FALSE)
}
# Defining the sample names (removing 'dcr' prefix and chain+file suffix to shorten)
s1<-strsplit(beta_dir, '_beta.freq.gz')
s2<-strsplit(unlist(s1), 'dcr_')
names_beta<-lapply(1:length(beta_dir), function(x){(s2[[x]][2])})
names(beta_list)<-names_beta
# Calculating the overlap matrix
beta_mat<-matrix(ncol=length(beta_dir),nrow=length(beta_dir))
len_beta<-length(beta_dir)
for (i in 1:len_beta){
for (j in 1:len_beta){
dcr_i<-paste(beta_list[[i]][,1],beta_list[[i]][,2],beta_list[[i]][,3],beta_list[[i]][,4],beta_list[[i]][,5],sep=" ")
dcr_j<-paste(beta_list[[j]][,1],beta_list[[j]][,2],beta_list[[j]][,3],beta_list[[j]][,4],beta_list[[j]][,5],sep=" ")
y1<-c(dcr_i,dcr_j)
total_i<-length(dcr_i)#nrow(beta_list[[i]])
total_ij<-length(y1)
unique_ij<-length(unique(y1))
C_ij<-total_ij-unique_ij
beta_mat[i,j]<-C_ij/total_i
}}
row.names(beta_mat)<-names_beta
colnames(beta_mat)<-names_beta
beta_mat_df<-data.frame(beta_mat)
# Defining the accepted background threshold as mean + 3 std. devs.:
beta_1<-which(beta_mat==1)
beta_mat_nondiag<-beta_mat[-beta_1]
mean_beta<-mean(beta_mat_nondiag)
sd_beta<-sd(beta_mat_nondiag)
outliers3sd<-mean_beta+3*sd_beta
outliers3<-which(beta_mat_df>=outliers3sd)
outliers3<-setdiff(outliers3,beta_1)
# Setting parameters for heatmap:
border_colours_beta<-matrix(ncol=len_beta, nrow=len_beta)
border_colours_beta[-outliers3]<-"grey"
border_colours_beta[outliers3]<-"red"
bk1 <- c(seq(0,outliers3sd,by=0.005),outliers3sd)
bk2 <- c(outliers3sd+0.0001,seq(outliers3sd+0.0002,0.99,by=0.005))
bk <- c(bk1,bk2) #combine the break limits for purpose of graphing
my_palette <- c(colorRampPalette(colors = c("lightyellow", "lightblue"))(n = length(bk1)-1),
"gray38", "gray38",
c(colorRampPalette(colors = c("red", "darkred"))(n = length(bk2)-1)))
# Plotting the heatmap:
pheatmap(beta_mat_df,cluster_method="Ward.D",clustering_distance_row ="euclidean" ,main=paste("Sample Overlap, beta"),treeheight_row=0, cluster_cols = FALSE,cluster_rows = FALSE, col= my_palette, breaks=bk, fontsize = 11,fontsize_row=10,show_rownames = TRUE, border_color = border_colours_beta)
# Save heatmap image as pdf
#dev.off()
#pdf(paste("/path/to/folder/overlap_beta_",Sys.Date(),".pdf",sep=""))
#pheatmap(beta_mat_df,cluster_method="Ward.D",clustering_distance_row ="euclidean" ,main=paste("Sample Overlap, beta"),treeheight_row=0, cluster_cols = FALSE,cluster_rows = FALSE, col= my_palette, breaks=bk, fontsize = 11,fontsize_row=10,show_rownames = TRUE, border_color = border_colours_beta)
#dev.off()
|
/collapsed_sample_overlap/collapsed_sample_overlap.R
|
no_license
|
innate2adaptive/Decombinator-Tools
|
R
| false | false | 6,856 |
r
|
# Collapsed samples overlap
# Tahel Ronel, June 2021
# This script calculates the TCR overlap (using the 5-part Decombinator id, DCR) between any two samples produced using Collapsinator from Decombinator V4.
# The overlap is calculated as follows:
# Let A be the overlap matrix. Then for any two samples i,j,
# A_{i,j}, the overlap with respect to row i, is
# (Number of distinct DCRs found in both i and j) / (number of unique DCRs in i).
# Note this is in general asymmetric (A_{i,j} is not equal to A_{j,i}).
# The overlap matrix is then plotted as a heatmap, with red squares marking an overlap greater than (mean + 3 standard deviations).
# If comparing all samples in a particular sequencing run, note that as some runs contain several samples from the same individual/s,
# the absolute expected 'background' overlap will differ between runs.
# The script calculates and plots the overlap of the alpha files first followed by beta.
library(pheatmap)
library(RColorBrewer)
library(ggplot2)
# Change this to the path to the directory containing the collapsed (.freq) files to be compared
input<-'/path/to/collapsed/files'
dir<-dir(input)
# Alpha
alpha_list<-list()
alpha_idx<-grep('alpha',dir)
alpha_dir<-dir[alpha_idx]
# Reading in all alpha collapsed files
for (i in 1:length(alpha_dir)){
alpha_list[[i]]<-read.csv(paste(input,alpha_dir[i],sep=""),header=FALSE)
}
# Defining the sample names (removing 'dcr' prefix and chain+file suffix to shorten)
s1<-strsplit(alpha_dir, '_alpha.freq.gz')
s2<-strsplit(unlist(s1), 'dcr_')
names_alpha<-lapply(1:length(alpha_dir), function(x){(s2[[x]][2])})
names(alpha_list)<-names_alpha
# Calculating the overlap matrix:
alpha_mat<-matrix(ncol=length(alpha_dir),nrow=length(alpha_dir))
len_alpha<-length(alpha_dir)
for (i in 1:len_alpha){
for (j in 1:len_alpha){
dcr_i<-paste(alpha_list[[i]][,1],alpha_list[[i]][,2],alpha_list[[i]][,3],alpha_list[[i]][,4],alpha_list[[i]][,5],sep=" ")
dcr_j<-paste(alpha_list[[j]][,1],alpha_list[[j]][,2],alpha_list[[j]][,3],alpha_list[[j]][,4],alpha_list[[j]][,5],sep=" ")
y1<-c(dcr_i,dcr_j)
total_i<-length(dcr_i)#nrow(alpha_list[[i]])
total_ij<-length(y1)
unique_ij<-length(unique(y1))
C_ij<-total_ij-unique_ij
alpha_mat[i,j]<-C_ij/total_i
}}
row.names(alpha_mat)<-names_alpha
colnames(alpha_mat)<-names_alpha
alpha_mat_df<-data.frame(alpha_mat)
# Defining the accepted background threshold as mean + 3 std. devs.:
alpha_1<-which(alpha_mat==1)
alpha_mat_nondiag<-alpha_mat[-alpha_1]
mean_alpha<-mean(alpha_mat_nondiag)
sd_alpha<-sd(alpha_mat_nondiag)
outliers3sd<-mean_alpha+3*sd_alpha
outliers3<-which(alpha_mat_df>=outliers3sd)
outliers3<-setdiff(outliers3,alpha_1)
# Setting parameters for heatmap:
border_colours_alpha<-matrix(ncol=len_alpha, nrow=len_alpha)
border_colours_alpha[-outliers3]<-"grey"
border_colours_alpha[outliers3]<-"red"
bk1 <- c(seq(0,outliers3sd,by=0.005),outliers3sd)
bk2 <- c(outliers3sd+0.0001,seq(outliers3sd+0.0002,0.99,by=0.005))
bk <- c(bk1,bk2) #combine the break limits for purpose of graphing
my_palette <- c(colorRampPalette(colors = c("lightyellow", "lightblue"))(n = length(bk1)-1),
"gray38", "gray38",
c(colorRampPalette(colors = c("red", "darkred"))(n = length(bk2)-1)))
# Plotting the heatmap:
pheatmap(alpha_mat_df,cluster_method="Ward.D",clustering_distance_row ="euclidean" ,main=paste("Sample Overlap, alpha"),treeheight_row=0, cluster_cols = FALSE,cluster_rows = FALSE, col= my_palette, breaks=bk, fontsize = 11,fontsize_row=10,show_rownames = TRUE, border_color = border_colours_alpha)
# Save heatmap image as pdf
#dev.off()
#pdf(paste("/path/to/folder/overlap_alpha_",Sys.Date(),".pdf",sep=""))
#pheatmap(alpha_mat_df,cluster_method="Ward.D",clustering_distance_row ="euclidean" ,main=paste("Sample Overlap, alpha"),treeheight_row=0, cluster_cols = FALSE,cluster_rows = FALSE, col= my_palette, breaks=bk, fontsize = 11,fontsize_row=10,show_rownames = TRUE, border_color = border_colours_alpha)
#dev.off()
# Beta
beta_list<-list()
beta_idx<-grep('beta',dir)
beta_dir<-dir[beta_idx]
# Reading in all beta collapsed files
for (i in 1:length(beta_dir)){
beta_list[[i]]<-read.csv(paste(input,beta_dir[i],sep=""),header=FALSE)
}
# Defining the sample names (removing 'dcr' prefix and chain+file suffix to shorten)
s1<-strsplit(beta_dir, '_beta.freq.gz')
s2<-strsplit(unlist(s1), 'dcr_')
names_beta<-lapply(1:length(beta_dir), function(x){(s2[[x]][2])})
names(beta_list)<-names_beta
# Calculating the overlap matrix
beta_mat<-matrix(ncol=length(beta_dir),nrow=length(beta_dir))
len_beta<-length(beta_dir)
for (i in 1:len_beta){
for (j in 1:len_beta){
dcr_i<-paste(beta_list[[i]][,1],beta_list[[i]][,2],beta_list[[i]][,3],beta_list[[i]][,4],beta_list[[i]][,5],sep=" ")
dcr_j<-paste(beta_list[[j]][,1],beta_list[[j]][,2],beta_list[[j]][,3],beta_list[[j]][,4],beta_list[[j]][,5],sep=" ")
y1<-c(dcr_i,dcr_j)
total_i<-length(dcr_i)#nrow(beta_list[[i]])
total_ij<-length(y1)
unique_ij<-length(unique(y1))
C_ij<-total_ij-unique_ij
beta_mat[i,j]<-C_ij/total_i
}}
row.names(beta_mat)<-names_beta
colnames(beta_mat)<-names_beta
beta_mat_df<-data.frame(beta_mat)
# Defining the accepted background threshold as mean + 3 std. devs.:
beta_1<-which(beta_mat==1)
beta_mat_nondiag<-beta_mat[-beta_1]
mean_beta<-mean(beta_mat_nondiag)
sd_beta<-sd(beta_mat_nondiag)
outliers3sd<-mean_beta+3*sd_beta
outliers3<-which(beta_mat_df>=outliers3sd)
outliers3<-setdiff(outliers3,beta_1)
# Setting parameters for heatmap:
border_colours_beta<-matrix(ncol=len_beta, nrow=len_beta)
border_colours_beta[-outliers3]<-"grey"
border_colours_beta[outliers3]<-"red"
bk1 <- c(seq(0,outliers3sd,by=0.005),outliers3sd)
bk2 <- c(outliers3sd+0.0001,seq(outliers3sd+0.0002,0.99,by=0.005))
bk <- c(bk1,bk2) #combine the break limits for purpose of graphing
my_palette <- c(colorRampPalette(colors = c("lightyellow", "lightblue"))(n = length(bk1)-1),
"gray38", "gray38",
c(colorRampPalette(colors = c("red", "darkred"))(n = length(bk2)-1)))
# Plotting the heatmap:
pheatmap(beta_mat_df,cluster_method="Ward.D",clustering_distance_row ="euclidean" ,main=paste("Sample Overlap, beta"),treeheight_row=0, cluster_cols = FALSE,cluster_rows = FALSE, col= my_palette, breaks=bk, fontsize = 11,fontsize_row=10,show_rownames = TRUE, border_color = border_colours_beta)
# Save heatmap image as pdf
#dev.off()
#pdf(paste("/path/to/folder/overlap_beta_",Sys.Date(),".pdf",sep=""))
#pheatmap(beta_mat_df,cluster_method="Ward.D",clustering_distance_row ="euclidean" ,main=paste("Sample Overlap, beta"),treeheight_row=0, cluster_cols = FALSE,cluster_rows = FALSE, col= my_palette, breaks=bk, fontsize = 11,fontsize_row=10,show_rownames = TRUE, border_color = border_colours_beta)
#dev.off()
|
################################################################################
##' @title Analyze log size - LODI
##'
##' @author Robin Elahi
##' @contact elahi.robin@gmail.com
##'
##' @date 2017-12-17
##'
##' @log
################################################################################
##### PACKAGES, DATA #####
source("3_analyse_data/01_sbs_bayes_data.R")
source("R/truncate_data.R")
library(rjags)
##### PREPARE DATA FOR JAGS #####
##' x1 = era
##' x2 = density
##' x3 = tide height
## My data
statDat <- hexDF
## My quantile for size threshold
my_quantile <- 0
statDat <- truncate_data(statDat, era = "past", quant = my_quantile)
statDat <- statDat %>% mutate(era01 = ifelse(era == "past", 0, 1))
## My species
my_species <- "LODI"
## My data type
my_data <- "raw"
# Get means and sd of continuous variables
x2_mu <- mean(statDat$density_m2)
x2_sd <- sd(statDat$density_m2)
x3_mu <- mean(statDat$tideHTm)
x3_sd <- sd(statDat$tideHTm)
# Standardize continuous variables
statDat$x2z <- as.numeric(scale(statDat$density_m2))
statDat$x3z <- as.numeric(scale(statDat$tideHTm))
make_predict_vector <- function(my_vector, predict_length = 100){
my_min <- min(my_vector)
my_max <- max(my_vector)
my_vector_pred <- seq(my_min, my_max, length.out = predict_length)
return(my_vector_pred)
}
x2z_pred <- make_predict_vector(statDat$x2z, predict_length = 100)
x3z_pred <- make_predict_vector(statDat$x3z, predict_length = 100)
# For prediction
era_predict <- c(0,1)
pred_df <- expand.grid(x2z_pred, era_predict) %>%
rename(x2z = Var1, x1 = Var2) %>% tbl_df()
pred_df$x3z <- 0
# Get data
data = list(
N = nrow(statDat),
y = as.double(statDat$size_log),
x1 = as.double(statDat$era01),
x2 = as.double(statDat$x2z),
x3 = as.double(statDat$x3z),
x1_pred = as.double(pred_df$x1),
x2_pred = as.double(pred_df$x2z),
x3_pred = as.double(pred_df$x3z)
)
##### MODEL 1: ERA + DENSITY + TIDE ####
my_model <- "eraPdensityPtide"
output_location <- "3_analyse_data/bayes_output/by_species/"
# JAGS model
sink("3_analyse_data/bayes_models/modelJags.R")
cat("
model{
# priors
b0 ~ dnorm(0, 1/10^2)
b1 ~ dnorm(0, 1/10^2)
b2 ~ dnorm(0, 1/10^2)
b3 ~ dnorm(0, 1/10^2)
sigma ~ dunif(0, 5)
tau <- 1/sigma^2
# likelihood
for (i in 1:N){
mu[i] <- b0 + b1*x1[i] + b2*x2[i] + b3*x3[i]
y[i] ~ dnorm(mu[i], tau)
y.new[i] ~ dnorm(mu[i], tau)
sq.error.data[i] <- (y[i] - mu[i])^2
sq.error.new[i] <- (y.new[i] - mu[i])^2
}
# bayesian p-values
sd.data <- sd(y)
sd.new <- sd(y.new)
p.sd <- step(sd.new - sd.data)
mean.data <- mean(y)
mean.new <- mean(y.new)
p.mean <- step(mean.new - mean.data)
discrep.data <- sum(sq.error.data)
discrep.new <- sum(sq.error.new)
p.discrep <- step(discrep.new - discrep.data)
}
", fill = TRUE)
sink()
inits = list(
list(b0 = 1, b1 = 0, b2 = 0, b3 = 0, sigma = 4),
list(b0 = 0.5, b1 = 0.1, b2 = -0.1, b3 = 0.2, sigma = 2),
list(b0 = 2, b1 = -0.1, b2 = 0.1, b3 = -0.1, sigma = 1))
# Number of iterations
n.adapt <- 1000
n.update <- 1000
n.iter <- 1000
## Run model
jm <- jags.model("3_analyse_data/bayes_models/modelJags.R", data = data,
inits = inits, n.chains = length(inits),
n.adapt = n.adapt)
update(jm, n.iter = n.update)
zm = coda.samples(jm, variable.names = c("b0",
"b1", "b2", "b3",
"sigma"),
n.iter = n.iter, n.thin = 10)
zj = jags.samples(jm, variable.names = c("b0", "b1","p.mean", "p.sd", "p.discrep"),
n.iter = n.iter, n.thin = 10)
#Produce a summary table for the parameters.
summary(zm)
10^(summary(zm)$stat[1]) # intercept
# Save trace plots
trace_file <- paste(output_location, my_species, my_data,
my_model, my_quantile, "trace", sep = "_")
png(filename = paste(trace_file, "png", sep = "."),
height = 5, width = 5, units = "in", res = 150)
par(mfrow = c(3,2))
traceplot(zm)
dev.off()
# Save density plots
density_file <- paste(output_location, my_species, my_data,
my_model, my_quantile, "dens", sep = "_")
png(filename = paste(density_file, "png", sep = "."),
height = 5, width = 5, units = "in", res = 150)
par(mfrow = c(3,2))
densplot(zm)
dev.off()
# Test for convergence using the Gelman diagnostic.
gd <- gelman.diag(zm, multivariate = F)[[1]]
# Check Bayesian pvals
pvals <- c(p.mean = mean(zj$p.mean), p.sd = mean(zj$p.sd),
p.discrep = mean(zj$p.discrep))
# Get proportional change [assumes all other variable are at mean]
str(zj)
zj_b0 <- zj$b0
head(zj_b0)
zj_b1 <- zj$b1
past_size <- 10^zj_b0
present_size <- 10^(zj_b0 + zj_b1)
prop_change <- (present_size - past_size)/past_size
prop_change_vec <- as.numeric(prop_change)
prop_change_quantile <- t(quantile(prop_change_vec,
probs = c(0.025, 0.25, 0.5, 0.75, 0.975)))
rownames(prop_change_quantile) <- "prop_change"
# Save coda summary
coda_summary <- summary(zm)
coda_quantile <- data.frame(rbind(coda_summary$quantile, prop_change_quantile))
params <- rownames(coda_quantile)
coda_quantile <- coda_quantile %>%
mutate(spp = my_species,
data = my_data,
model = my_model,
param = params)
##### SAVE OUTPUT #####
my_file <- paste(output_location, my_species, my_data, my_model, my_quantile, sep = "_")
## Coda quantile summary
write.csv(x = coda_quantile, file = paste(my_file, "csv", sep = "."))
## Pvals
write.csv(x = pvals, file = paste(my_file, "pvals", "csv", sep = "."))
## Gelman
write.csv(x = gd, file = paste(my_file, "gd", "csv", sep = "."))
|
/3_analyse_data/02_analyse_logsize_lodi.R
|
permissive
|
elahi/sbs_analysis
|
R
| false | false | 5,753 |
r
|
################################################################################
##' @title Analyze log size - LODI
##'
##' @author Robin Elahi
##' @contact elahi.robin@gmail.com
##'
##' @date 2017-12-17
##'
##' @log
################################################################################
##### PACKAGES, DATA #####
source("3_analyse_data/01_sbs_bayes_data.R")
source("R/truncate_data.R")
library(rjags)
##### PREPARE DATA FOR JAGS #####
##' x1 = era
##' x2 = density
##' x3 = tide height
## My data
statDat <- hexDF
## My quantile for size threshold
my_quantile <- 0
statDat <- truncate_data(statDat, era = "past", quant = my_quantile)
statDat <- statDat %>% mutate(era01 = ifelse(era == "past", 0, 1))
## My species
my_species <- "LODI"
## My data type
my_data <- "raw"
# Get means and sd of continuous variables
x2_mu <- mean(statDat$density_m2)
x2_sd <- sd(statDat$density_m2)
x3_mu <- mean(statDat$tideHTm)
x3_sd <- sd(statDat$tideHTm)
# Standardize continuous variables
statDat$x2z <- as.numeric(scale(statDat$density_m2))
statDat$x3z <- as.numeric(scale(statDat$tideHTm))
make_predict_vector <- function(my_vector, predict_length = 100){
my_min <- min(my_vector)
my_max <- max(my_vector)
my_vector_pred <- seq(my_min, my_max, length.out = predict_length)
return(my_vector_pred)
}
x2z_pred <- make_predict_vector(statDat$x2z, predict_length = 100)
x3z_pred <- make_predict_vector(statDat$x3z, predict_length = 100)
# For prediction
era_predict <- c(0,1)
pred_df <- expand.grid(x2z_pred, era_predict) %>%
rename(x2z = Var1, x1 = Var2) %>% tbl_df()
pred_df$x3z <- 0
# Get data
data = list(
N = nrow(statDat),
y = as.double(statDat$size_log),
x1 = as.double(statDat$era01),
x2 = as.double(statDat$x2z),
x3 = as.double(statDat$x3z),
x1_pred = as.double(pred_df$x1),
x2_pred = as.double(pred_df$x2z),
x3_pred = as.double(pred_df$x3z)
)
##### MODEL 1: ERA + DENSITY + TIDE ####
my_model <- "eraPdensityPtide"
output_location <- "3_analyse_data/bayes_output/by_species/"
# JAGS model
sink("3_analyse_data/bayes_models/modelJags.R")
cat("
model{
# priors
b0 ~ dnorm(0, 1/10^2)
b1 ~ dnorm(0, 1/10^2)
b2 ~ dnorm(0, 1/10^2)
b3 ~ dnorm(0, 1/10^2)
sigma ~ dunif(0, 5)
tau <- 1/sigma^2
# likelihood
for (i in 1:N){
mu[i] <- b0 + b1*x1[i] + b2*x2[i] + b3*x3[i]
y[i] ~ dnorm(mu[i], tau)
y.new[i] ~ dnorm(mu[i], tau)
sq.error.data[i] <- (y[i] - mu[i])^2
sq.error.new[i] <- (y.new[i] - mu[i])^2
}
# bayesian p-values
sd.data <- sd(y)
sd.new <- sd(y.new)
p.sd <- step(sd.new - sd.data)
mean.data <- mean(y)
mean.new <- mean(y.new)
p.mean <- step(mean.new - mean.data)
discrep.data <- sum(sq.error.data)
discrep.new <- sum(sq.error.new)
p.discrep <- step(discrep.new - discrep.data)
}
", fill = TRUE)
sink()
inits = list(
list(b0 = 1, b1 = 0, b2 = 0, b3 = 0, sigma = 4),
list(b0 = 0.5, b1 = 0.1, b2 = -0.1, b3 = 0.2, sigma = 2),
list(b0 = 2, b1 = -0.1, b2 = 0.1, b3 = -0.1, sigma = 1))
# Number of iterations
n.adapt <- 1000
n.update <- 1000
n.iter <- 1000
## Run model
jm <- jags.model("3_analyse_data/bayes_models/modelJags.R", data = data,
inits = inits, n.chains = length(inits),
n.adapt = n.adapt)
update(jm, n.iter = n.update)
zm = coda.samples(jm, variable.names = c("b0",
"b1", "b2", "b3",
"sigma"),
n.iter = n.iter, n.thin = 10)
zj = jags.samples(jm, variable.names = c("b0", "b1","p.mean", "p.sd", "p.discrep"),
n.iter = n.iter, n.thin = 10)
#Produce a summary table for the parameters.
summary(zm)
10^(summary(zm)$stat[1]) # intercept
# Save trace plots
trace_file <- paste(output_location, my_species, my_data,
my_model, my_quantile, "trace", sep = "_")
png(filename = paste(trace_file, "png", sep = "."),
height = 5, width = 5, units = "in", res = 150)
par(mfrow = c(3,2))
traceplot(zm)
dev.off()
# Save density plots
density_file <- paste(output_location, my_species, my_data,
my_model, my_quantile, "dens", sep = "_")
png(filename = paste(density_file, "png", sep = "."),
height = 5, width = 5, units = "in", res = 150)
par(mfrow = c(3,2))
densplot(zm)
dev.off()
# Test for convergence using the Gelman diagnostic.
gd <- gelman.diag(zm, multivariate = F)[[1]]
# Check Bayesian pvals
pvals <- c(p.mean = mean(zj$p.mean), p.sd = mean(zj$p.sd),
p.discrep = mean(zj$p.discrep))
# Get proportional change [assumes all other variable are at mean]
str(zj)
zj_b0 <- zj$b0
head(zj_b0)
zj_b1 <- zj$b1
past_size <- 10^zj_b0
present_size <- 10^(zj_b0 + zj_b1)
prop_change <- (present_size - past_size)/past_size
prop_change_vec <- as.numeric(prop_change)
prop_change_quantile <- t(quantile(prop_change_vec,
probs = c(0.025, 0.25, 0.5, 0.75, 0.975)))
rownames(prop_change_quantile) <- "prop_change"
# Save coda summary
coda_summary <- summary(zm)
coda_quantile <- data.frame(rbind(coda_summary$quantile, prop_change_quantile))
params <- rownames(coda_quantile)
coda_quantile <- coda_quantile %>%
mutate(spp = my_species,
data = my_data,
model = my_model,
param = params)
##### SAVE OUTPUT #####
my_file <- paste(output_location, my_species, my_data, my_model, my_quantile, sep = "_")
## Coda quantile summary
write.csv(x = coda_quantile, file = paste(my_file, "csv", sep = "."))
## Pvals
write.csv(x = pvals, file = paste(my_file, "pvals", "csv", sep = "."))
## Gelman
write.csv(x = gd, file = paste(my_file, "gd", "csv", sep = "."))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/properties.R
\name{sol_set_property}
\alias{sol_set_property}
\alias{sol_get_property}
\title{Set or get the property name}
\usage{
sol_set_property(x, prop, with_units, ...)
sol_get_property(x)
}
\arguments{
\item{x}{vector: data}
\item{prop}{string: property name}
\item{with_units}{string: units of measurement to use. If missing, the default units for the property will be used}
\item{...}{: extra arguments, currently ignored}
}
\value{
x with additional class set
}
\description{
Set or get the property name
}
\examples{
x <- data.frame(LRL=c(11.3,13.9),species=c("Architeuthis dux"),
stringsAsFactors=FALSE)
## it doesn't matter what the column names are, but we
## need to set the property types correctly
x$LRL <- sol_set_property(x$LRL,"lower rostral length")
## remove the property
x$LRL <- sol_set_property(x$LRL,NULL)
}
\seealso{
\code{\link{sol_properties}}
}
|
/man/sol_set_property.Rd
|
permissive
|
SCAR/solong
|
R
| false | true | 1,001 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/properties.R
\name{sol_set_property}
\alias{sol_set_property}
\alias{sol_get_property}
\title{Set or get the property name}
\usage{
sol_set_property(x, prop, with_units, ...)
sol_get_property(x)
}
\arguments{
\item{x}{vector: data}
\item{prop}{string: property name}
\item{with_units}{string: units of measurement to use. If missing, the default units for the property will be used}
\item{...}{: extra arguments, currently ignored}
}
\value{
x with additional class set
}
\description{
Set or get the property name
}
\examples{
x <- data.frame(LRL=c(11.3,13.9),species=c("Architeuthis dux"),
stringsAsFactors=FALSE)
## it doesn't matter what the column names are, but we
## need to set the property types correctly
x$LRL <- sol_set_property(x$LRL,"lower rostral length")
## remove the property
x$LRL <- sol_set_property(x$LRL,NULL)
}
\seealso{
\code{\link{sol_properties}}
}
|
# This file implemented all the classifiers with feature selection
# This file will use the variables from the other 3 files to plot the graph
# Please run the other 3 files first
# Setup ----
rm(list=ls())
# !!CHANGE WORKING DIR TO MATCH YOUR CASE!!
setwd("/Users/shuqishang/Documents/courses/Statistical_Learning/proj/data/")
require(RWeka) # C4.5
require(e1071) # SVM, Naive Bayes
require(class) # KNN
require(randomForest) # Random Forest
require(ipred) # Bagging
require(MASS) # lda, qda
# Different datasets should have different preprocessing for these several lines
# In the latter part, V9 should be changed to other dependent variables in other datasets
# The title of the graphs should also be changed for different datasets
# Selected features should be changed for different datasets
cancer = read.table('ecoli.data.txt', sep = '', header = F)
row_to_delete = c()
for (row in 1:nrow(cancer)) {
if ('imS' %in% cancer[row, 'V9'] || 'imL' %in% cancer[row, 'V9'] || 'omL' %in% cancer[row, 'V9']) {
row_to_delete = union(row_to_delete, row)
}
}
cancer = cancer[-row_to_delete,]
cancer$V9 = factor(cancer$V9)
cancer_without_id = cancer[, !(names(cancer) %in% c('V1'))]
independ_var = cancer[, !(names(cancer) %in% c('V1', 'V9'))]
select_var = cancer[, (names(cancer) %in% c('V7', 'V2', 'V8', 'V3', 'V6'))]
n_samples = nrow(cancer_without_id)
sknn_3_acc = rep(0, 80)
sknn_5_acc = rep(0, 80)
sknn_10_acc = rep(0, 80)
ssvm_acc = rep(0, 80)
slda_acc = rep(0, 80)
sqda_acc = rep(0, 70)
# Run all the classifiers with feature selection
for (i in 11:90) {
for (j in 1:15) {
cat('i =', i, 'j =', j, '\n')
set.seed(j)
train = sample(n_samples, n_samples / 100 * i)
test = -train
X_train = cancer_without_id[train,]
X_test = cancer_without_id[test,]
sknn_X_train = select_var[train,]
sknn_X_test = select_var[test,]
sknn_3_pred = knn(train = sknn_X_train, test = sknn_X_test, cl = X_train$V9, k = 3)
sknn_3_acc[i-10] = sknn_3_acc[i-10] + sum(sknn_3_pred == X_test$V9) / length(sknn_3_pred)
sknn_5_pred = knn(train = sknn_X_train, test = sknn_X_test, cl = X_train$V9, k = 5)
sknn_5_acc[i-10] = sknn_5_acc[i-10] + sum(sknn_5_pred == X_test$V9) / length(sknn_5_pred)
sknn_10_pred = knn(train = sknn_X_train, test = sknn_X_test, cl = X_train$V9, k = 10)
sknn_10_acc[i-10] = sknn_10_acc[i-10] + sum(sknn_10_pred == X_test$V9) / length(sknn_10_pred)
ssvm_model = svm(V9 ~ V7 + V2 + V8 + V3 + V6, data = X_train)
ssvm_pred = predict(ssvm_model, X_test)
ssvm_acc[i-10] = ssvm_acc[i-10] + sum(ssvm_pred == X_test$V9) / length(ssvm_pred)
# slda_model = lda(V9 ~ V7 + V2 + V8 + V3 + V6, data = X_train)
# slda_pred = predict(slda_model, X_test)$class
# slda_acc[i-10] = slda_acc[i-10] + sum(slda_pred == X_test$V9) / length(slda_pred)
#
# if (i > 20) { # if i < 20, some group is too small for 'qda'
# sqda_model = qda(V9 ~ V7 + V2 + V8 + V3 + V6, data = X_train)
# sqda_pred = predict(sqda_model, X_test)$class
# sqda_acc[i-20] = sqda_acc[i-20] + sum(sqda_pred == X_test$V9) / length(sqda_pred)
# }
}
sknn_3_acc[i-10] = sknn_3_acc[i-10] / 15
sknn_5_acc[i-10] = sknn_5_acc[i-10] / 15
sknn_10_acc[i-10] = sknn_10_acc[i-10] / 15
ssvm_acc[i-10] = ssvm_acc[i-10] / 15
# slda_acc[i-10] = slda_acc[i-10] / 15
# sqda_acc[i-20] = sqda_acc[i-20] / 15
}
# KNN, SKNN
plot(knn_3_acc~c(11:90), col = 'red', cex = 0.3, main = "Ecoli",
xlab = 'Training Data (%)', ylab = 'Accuracy', ylim = c(0.73, 0.9))
points(c(11:90), knn_5_acc, col = 'blue', cex = 0.3)
points(c(11:90), knn_10_acc, col = 'darkgreen', cex = 0.3)
points(c(11:90), sknn_3_acc, col = 'darkred', cex = 0.3)
points(c(11:90), sknn_5_acc, col = 'darkblue', cex = 0.3)
points(c(11:90), sknn_10_acc, col = 'black', cex = 0.3)
lines(knn_3_acc~c(11:90), col = 'red')
lines(knn_5_acc~c(11:90), col = 'blue')
lines(knn_10_acc~c(11:90), col = 'darkgreen')
lines(sknn_3_acc~c(11:90), col = 'darkred')
lines(sknn_5_acc~c(11:90), col = 'darkblue')
lines(sknn_10_acc~c(11:90), col = 'black')
legend("bottomright", ncol = 2,
c("KNN(k=3)", "KNN(k=5)", "KNN(k=10)", "SKNN(k=3)", "SKNN(k=5)", "SKNN(k=10)"),
col = c("red", "blue", "darkgreen", "darkred", "darkblue", "black"),
text.col = c("red", "blue", "darkgreen", "darkred", "darkblue", "black"),
lty = c(1, 1, 1, 1, 1, 1))
# SVM, LDA, QDA, SSVM, SLDA, SQDA
plot(svm_acc~c(11:90), col = 'red', cex = 0.3, main = "Ecoli",
xlab = 'Training Data (%)', ylab = 'Accuracy', ylim = c(0.5, 0.88))
# points(c(11:90), lda_acc, col = 'blue', cex = 0.3)
# points(c(21:90), qda_acc, col = 'darkgreen', cex = 0.3)
points(c(11:90), ssvm_acc, col = 'darkred', cex = 0.3)
# points(c(11:90), slda_acc, col = 'darkblue', cex = 0.3)
# points(c(21:90), sqda_acc, col = 'black', cex = 0.3)
lines(svm_acc~c(11:90), col = 'red')
# lines(lda_acc~c(11:90), col = 'blue')
# lines(qda_acc~c(21:90), col = 'darkgreen')
lines(ssvm_acc~c(11:90), col = 'darkred')
# lines(slda_acc~c(11:90), col = 'darkblue')
# lines(sqda_acc~c(21:90), col = 'black')
legend("bottomright",
c("SVM", "SSVM"),
col = c("red", "darkred"),
text.col = c("red", "darkred"),
lty = c(1, 1))
# Best few methods comparison
plot(ssvm_acc~c(11:90), col = 'red', cex = 0.3, main = "Ecoli",
xlab = 'Training Data (%)', ylab = 'Accuracy', ylim = c(0.77, 0.91))
points(c(11:90), forest_acc, col = 'blue', cex = 0.3)
points(c(11:90), sbc_acc, col = 'darkgreen', cex = 0.3)
points(c(11:90), knn_5_acc, col = 'purple', cex = 0.3)
lines(ssvm_acc~c(11:90), col = 'red')
lines(forest_acc~c(11:90), col = 'blue')
lines(sbc_acc~c(11:90), col = 'darkgreen')
lines(knn_5_acc~c(11:90), col = 'purple')
legend("bottomright",
c("SSVM", "Random Forest", "SBC", "KNN(k=5)"),
col = c("red", "blue", "darkgreen", "purple"),
text.col = c("red", "blue", "darkgreen", "purple"),
lty = c(1, 1, 1, 1))
|
/Ecoli/selective_all_methods.R
|
no_license
|
sshang0309/Statistical-Learning-in-Biology-Information-Systems
|
R
| false | false | 5,955 |
r
|
# This file implemented all the classifiers with feature selection
# This file will use the variables from the other 3 files to plot the graph
# Please run the other 3 files first
# Setup ----
rm(list=ls())
# !!CHANGE WORKING DIR TO MATCH YOUR CASE!!
setwd("/Users/shuqishang/Documents/courses/Statistical_Learning/proj/data/")
require(RWeka) # C4.5
require(e1071) # SVM, Naive Bayes
require(class) # KNN
require(randomForest) # Random Forest
require(ipred) # Bagging
require(MASS) # lda, qda
# Different datasets should have different preprocessing for these several lines
# In the latter part, V9 should be changed to other dependent variables in other datasets
# The title of the graphs should also be changed for different datasets
# Selected features should be changed for different datasets
cancer = read.table('ecoli.data.txt', sep = '', header = F)
row_to_delete = c()
for (row in 1:nrow(cancer)) {
if ('imS' %in% cancer[row, 'V9'] || 'imL' %in% cancer[row, 'V9'] || 'omL' %in% cancer[row, 'V9']) {
row_to_delete = union(row_to_delete, row)
}
}
cancer = cancer[-row_to_delete,]
cancer$V9 = factor(cancer$V9)
cancer_without_id = cancer[, !(names(cancer) %in% c('V1'))]
independ_var = cancer[, !(names(cancer) %in% c('V1', 'V9'))]
select_var = cancer[, (names(cancer) %in% c('V7', 'V2', 'V8', 'V3', 'V6'))]
n_samples = nrow(cancer_without_id)
sknn_3_acc = rep(0, 80)
sknn_5_acc = rep(0, 80)
sknn_10_acc = rep(0, 80)
ssvm_acc = rep(0, 80)
slda_acc = rep(0, 80)
sqda_acc = rep(0, 70)
# Run all the classifiers with feature selection
for (i in 11:90) {
for (j in 1:15) {
cat('i =', i, 'j =', j, '\n')
set.seed(j)
train = sample(n_samples, n_samples / 100 * i)
test = -train
X_train = cancer_without_id[train,]
X_test = cancer_without_id[test,]
sknn_X_train = select_var[train,]
sknn_X_test = select_var[test,]
sknn_3_pred = knn(train = sknn_X_train, test = sknn_X_test, cl = X_train$V9, k = 3)
sknn_3_acc[i-10] = sknn_3_acc[i-10] + sum(sknn_3_pred == X_test$V9) / length(sknn_3_pred)
sknn_5_pred = knn(train = sknn_X_train, test = sknn_X_test, cl = X_train$V9, k = 5)
sknn_5_acc[i-10] = sknn_5_acc[i-10] + sum(sknn_5_pred == X_test$V9) / length(sknn_5_pred)
sknn_10_pred = knn(train = sknn_X_train, test = sknn_X_test, cl = X_train$V9, k = 10)
sknn_10_acc[i-10] = sknn_10_acc[i-10] + sum(sknn_10_pred == X_test$V9) / length(sknn_10_pred)
ssvm_model = svm(V9 ~ V7 + V2 + V8 + V3 + V6, data = X_train)
ssvm_pred = predict(ssvm_model, X_test)
ssvm_acc[i-10] = ssvm_acc[i-10] + sum(ssvm_pred == X_test$V9) / length(ssvm_pred)
# slda_model = lda(V9 ~ V7 + V2 + V8 + V3 + V6, data = X_train)
# slda_pred = predict(slda_model, X_test)$class
# slda_acc[i-10] = slda_acc[i-10] + sum(slda_pred == X_test$V9) / length(slda_pred)
#
# if (i > 20) { # if i < 20, some group is too small for 'qda'
# sqda_model = qda(V9 ~ V7 + V2 + V8 + V3 + V6, data = X_train)
# sqda_pred = predict(sqda_model, X_test)$class
# sqda_acc[i-20] = sqda_acc[i-20] + sum(sqda_pred == X_test$V9) / length(sqda_pred)
# }
}
sknn_3_acc[i-10] = sknn_3_acc[i-10] / 15
sknn_5_acc[i-10] = sknn_5_acc[i-10] / 15
sknn_10_acc[i-10] = sknn_10_acc[i-10] / 15
ssvm_acc[i-10] = ssvm_acc[i-10] / 15
# slda_acc[i-10] = slda_acc[i-10] / 15
# sqda_acc[i-20] = sqda_acc[i-20] / 15
}
# KNN, SKNN
plot(knn_3_acc~c(11:90), col = 'red', cex = 0.3, main = "Ecoli",
xlab = 'Training Data (%)', ylab = 'Accuracy', ylim = c(0.73, 0.9))
points(c(11:90), knn_5_acc, col = 'blue', cex = 0.3)
points(c(11:90), knn_10_acc, col = 'darkgreen', cex = 0.3)
points(c(11:90), sknn_3_acc, col = 'darkred', cex = 0.3)
points(c(11:90), sknn_5_acc, col = 'darkblue', cex = 0.3)
points(c(11:90), sknn_10_acc, col = 'black', cex = 0.3)
lines(knn_3_acc~c(11:90), col = 'red')
lines(knn_5_acc~c(11:90), col = 'blue')
lines(knn_10_acc~c(11:90), col = 'darkgreen')
lines(sknn_3_acc~c(11:90), col = 'darkred')
lines(sknn_5_acc~c(11:90), col = 'darkblue')
lines(sknn_10_acc~c(11:90), col = 'black')
legend("bottomright", ncol = 2,
c("KNN(k=3)", "KNN(k=5)", "KNN(k=10)", "SKNN(k=3)", "SKNN(k=5)", "SKNN(k=10)"),
col = c("red", "blue", "darkgreen", "darkred", "darkblue", "black"),
text.col = c("red", "blue", "darkgreen", "darkred", "darkblue", "black"),
lty = c(1, 1, 1, 1, 1, 1))
# SVM, LDA, QDA, SSVM, SLDA, SQDA
plot(svm_acc~c(11:90), col = 'red', cex = 0.3, main = "Ecoli",
xlab = 'Training Data (%)', ylab = 'Accuracy', ylim = c(0.5, 0.88))
# points(c(11:90), lda_acc, col = 'blue', cex = 0.3)
# points(c(21:90), qda_acc, col = 'darkgreen', cex = 0.3)
points(c(11:90), ssvm_acc, col = 'darkred', cex = 0.3)
# points(c(11:90), slda_acc, col = 'darkblue', cex = 0.3)
# points(c(21:90), sqda_acc, col = 'black', cex = 0.3)
lines(svm_acc~c(11:90), col = 'red')
# lines(lda_acc~c(11:90), col = 'blue')
# lines(qda_acc~c(21:90), col = 'darkgreen')
lines(ssvm_acc~c(11:90), col = 'darkred')
# lines(slda_acc~c(11:90), col = 'darkblue')
# lines(sqda_acc~c(21:90), col = 'black')
legend("bottomright",
c("SVM", "SSVM"),
col = c("red", "darkred"),
text.col = c("red", "darkred"),
lty = c(1, 1))
# Best few methods comparison
plot(ssvm_acc~c(11:90), col = 'red', cex = 0.3, main = "Ecoli",
xlab = 'Training Data (%)', ylab = 'Accuracy', ylim = c(0.77, 0.91))
points(c(11:90), forest_acc, col = 'blue', cex = 0.3)
points(c(11:90), sbc_acc, col = 'darkgreen', cex = 0.3)
points(c(11:90), knn_5_acc, col = 'purple', cex = 0.3)
lines(ssvm_acc~c(11:90), col = 'red')
lines(forest_acc~c(11:90), col = 'blue')
lines(sbc_acc~c(11:90), col = 'darkgreen')
lines(knn_5_acc~c(11:90), col = 'purple')
legend("bottomright",
c("SSVM", "Random Forest", "SBC", "KNN(k=5)"),
col = c("red", "blue", "darkgreen", "purple"),
text.col = c("red", "blue", "darkgreen", "purple"),
lty = c(1, 1, 1, 1))
|
\name{HMRF}
\alias{HMRF}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Image Segmentation using Hidden Markov Random Field with EM Algorithm
%% ~~function to do ... ~~
}
\description{This function can be used to obtain the segmented image using HMRF-EM Algorithm.
}
%% ~~ A concise (1-5 lines) description of what the function does. ~~
\usage{
HMRF(X, Y, Z, em_iter, map_iter, beta = 2,
epsilon_em = 0.00001, epsilon_map = 0.00001)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{an m by n binary matrix of the inital labels for an image, which can be obtained using initital segmentation methods, such as K-means or thresholding methods. Note that X could be any binary matrix, for example, its element could be 0 & 1, or 1 & 2, or 2 & 3, ..., etc.
%% ~~Describe \code{x} here~~
}
\item{Y}{an m by n matrix of pixel intensity. For plant segmentation, we recommend to use relative green.
}
\item{Z}{ an m by n binary matrix, giving an estimate for the object edges in Y. We can obtain Z using the Canny edge detector:
Z = t(cannEdges (Y) [ , , 1, 1]) from the package: imager. See the example for details.
}
\item{em_iter}{a positive integer, which is the number of iteration steps of the EM Algorithm.
}
\item{map_iter}{a positive integer, which is the number of iteration steps of calculating MAP (the maximum a posterior estimation).
}
\item{beta}{The clique potential parameter for neighbourhood dependence. In default, beta = 2. See details in the supplementary file on the HMRF Model. This beta is equivalent to the Psi in the supplentary file (see page 20, 21).
}
\item{epsilon_em}{a small positive number, which is the convergence criterion of the EM Algorithm.
}
\item{epsilon_map}{a small positive number, which is the convergence criterion of MAP (maximum a posterior estimation).
}
}
\details{1. More detailed explanation about this method can be found in the supplymentary file:
https://github.com/rwang14/implant/blob/master/vignettes/HMRF_EM.pdf
2. The arguement Z can be obatined by CannyEdge detector using function cannEdges( ) from the package: imager. However,
since this package needs to involve Rcpp and other dependent packages which may increase installation complexity of our package, we recommend the users to install the package ``imager" by themselves if needed.
%% ~~ If necessary, more details than the description above ~~
}
\value{
\item{image_matrix}{A matrix giving labels for the segmented image.
}
}
\references{Wang, Quan (2012), “Hmrf-em-image: implementation of the hidden markov random field model and its expectation-maximization algorithm.”arXivpreprintarXiv:1207.3510
}
\author{
%% ~~who you are~~
}
\note{ This function is modified based on the matlab code written by Quan Wang (see reference).
%% ~~further notes~~
}
\seealso{
\code{\link{image_kmeans}}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
library(implant)
library(png)
orig = readPNG(system.file("extdata", "reduced.png", package = "implant", mustWork = TRUE))
#Define the response as relative green.
Y = orig[ , , 2]/(orig[ , , 1] + orig[ , , 2] + orig[ , , 3])
#Z is a matrix obtained by CannyEdge detector
Z = readPNG(system.file("extdata", "Z.png",
package = "implant", mustWork = TRUE))
##Note: Users can obtain Z using the package "imager" and the function
#CannyEdges( ) for different images
#Z = t(cannyEdges(orig)[ , , 1, 1])
#Take the initial label of EM algorithm using K-means
X = image_kmeans(Y, k = 2)$X
#Obtain the image produced by kmeans clustering
output = matrix(as.numeric(X), nrow = nrow(X), ncol = ncol(X)) - 1
writePNG(output,"~/kmeans.png")
#Run the HMRF Model. Note that it may take a lot of time ...
img = HMRF(X, Y, Z, em_iter = 20, map_iter = 20, beta = 2,
epsilon_em = 0.00001, epsilon_map = 0.00001)
#Obtain the matrix of the segmented image
image = img$image_matrix
#Morphological Operations
imageD = dilation(image)
imageDE = erosion(imageD)
imageDEE = erosion(imageDE)
imageDEED = dilation(imageDEE)
writePNG("~/HMRF.png")
}
|
/man/HMRF.Rd
|
no_license
|
rwang14/implant
|
R
| false | false | 4,133 |
rd
|
\name{HMRF}
\alias{HMRF}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Image Segmentation using Hidden Markov Random Field with EM Algorithm
%% ~~function to do ... ~~
}
\description{This function can be used to obtain the segmented image using HMRF-EM Algorithm.
}
%% ~~ A concise (1-5 lines) description of what the function does. ~~
\usage{
HMRF(X, Y, Z, em_iter, map_iter, beta = 2,
epsilon_em = 0.00001, epsilon_map = 0.00001)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{an m by n binary matrix of the inital labels for an image, which can be obtained using initital segmentation methods, such as K-means or thresholding methods. Note that X could be any binary matrix, for example, its element could be 0 & 1, or 1 & 2, or 2 & 3, ..., etc.
%% ~~Describe \code{x} here~~
}
\item{Y}{an m by n matrix of pixel intensity. For plant segmentation, we recommend to use relative green.
}
\item{Z}{ an m by n binary matrix, giving an estimate for the object edges in Y. We can obtain Z using the Canny edge detector:
Z = t(cannEdges (Y) [ , , 1, 1]) from the package: imager. See the example for details.
}
\item{em_iter}{a positive integer, which is the number of iteration steps of the EM Algorithm.
}
\item{map_iter}{a positive integer, which is the number of iteration steps of calculating MAP (the maximum a posterior estimation).
}
\item{beta}{The clique potential parameter for neighbourhood dependence. In default, beta = 2. See details in the supplementary file on the HMRF Model. This beta is equivalent to the Psi in the supplentary file (see page 20, 21).
}
\item{epsilon_em}{a small positive number, which is the convergence criterion of the EM Algorithm.
}
\item{epsilon_map}{a small positive number, which is the convergence criterion of MAP (maximum a posterior estimation).
}
}
\details{1. More detailed explanation about this method can be found in the supplymentary file:
https://github.com/rwang14/implant/blob/master/vignettes/HMRF_EM.pdf
2. The arguement Z can be obatined by CannyEdge detector using function cannEdges( ) from the package: imager. However,
since this package needs to involve Rcpp and other dependent packages which may increase installation complexity of our package, we recommend the users to install the package ``imager" by themselves if needed.
%% ~~ If necessary, more details than the description above ~~
}
\value{
\item{image_matrix}{A matrix giving labels for the segmented image.
}
}
\references{Wang, Quan (2012), “Hmrf-em-image: implementation of the hidden markov random field model and its expectation-maximization algorithm.”arXivpreprintarXiv:1207.3510
}
\author{
%% ~~who you are~~
}
\note{ This function is modified based on the matlab code written by Quan Wang (see reference).
%% ~~further notes~~
}
\seealso{
\code{\link{image_kmeans}}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
library(implant)
library(png)
orig = readPNG(system.file("extdata", "reduced.png", package = "implant", mustWork = TRUE))
#Define the response as relative green.
Y = orig[ , , 2]/(orig[ , , 1] + orig[ , , 2] + orig[ , , 3])
#Z is a matrix obtained by CannyEdge detector
Z = readPNG(system.file("extdata", "Z.png",
package = "implant", mustWork = TRUE))
##Note: Users can obtain Z using the package "imager" and the function
#CannyEdges( ) for different images
#Z = t(cannyEdges(orig)[ , , 1, 1])
#Take the initial label of EM algorithm using K-means
X = image_kmeans(Y, k = 2)$X
#Obtain the image produced by kmeans clustering
output = matrix(as.numeric(X), nrow = nrow(X), ncol = ncol(X)) - 1
writePNG(output,"~/kmeans.png")
#Run the HMRF Model. Note that it may take a lot of time ...
img = HMRF(X, Y, Z, em_iter = 20, map_iter = 20, beta = 2,
epsilon_em = 0.00001, epsilon_map = 0.00001)
#Obtain the matrix of the segmented image
image = img$image_matrix
#Morphological Operations
imageD = dilation(image)
imageDE = erosion(imageD)
imageDEE = erosion(imageDE)
imageDEED = dilation(imageDEE)
writePNG("~/HMRF.png")
}
|
# evaluate post.predictive checks
library(readr)
library(ggplot2)
library(tidyr)
library(dplyr)
source("R/martin.R")
# simulated based on posteriors
all_checks1 <- read_delim("output/model_evaluation/check5_postpred/sims_10000kbot500_post_pred_checks2.txt", delim = " ")
all_checks2 <- read_delim("output/model_evaluation/check5_postpred/sims_10000kbot500_post_pred_checks1.txt", delim = " ")
all_checks <- rbind(all_checks1, all_checks2)
head(all_checks)
# compare these summary statistics
sumstats <- c("num_alleles_mean",
"exp_het_mean",
"mratio_mean",
"prop_low_afs_mean",
"mean_allele_range")
# empirical summary stats
all_stats <- read_csv("data/processed/all_stats_30_modeling.csv") %>%
dplyr::select(species, common, sumstats, bot)
# long format for plotting
all_checks_long <- all_checks %>%
left_join(all_stats[c("species", "common")], by = "species") %>%
dplyr::select(common, sumstats) %>%
gather(sumstat, value, -common)
# observed sumstats long format
all_sumstats_full_long <- all_stats %>%
gather(sumstat, value, -species, -common, -bot)
# lookup_table <- paste(all_stats$species, " = ", all_stats$common)
sumstat_names <- c(
exp_het_mean = "Expected\nhetetozygosity",
mean_allele_range = "Allelic range",
mratio_mean = "M-ratio",
num_alleles_mean = "Allelic richness",
prop_low_afs_mean = "Prop. of low\nfrequency alleles"
)
# bottlenecked or not bottlenecked
all_stats$mod <- ifelse(all_stats$bot > 0.5, "Bottleneck", "Non-bottleneck")
all_data <- all_checks_long %>%
left_join(all_stats[c("common", "mod")])
p <- ggplot(all_data, aes(value)) +
geom_histogram(aes(fill = mod)) +
geom_vline(aes(xintercept = value), all_sumstats_full_long) +
facet_grid(common ~ sumstat, scales = "free", labeller = labeller(
sumstat = sumstat_names
)) +
theme_martin() +
scale_fill_manual(values = c("#fc8d62","#8da0cb")) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.text.y = element_text(angle = 0),
axis.text= element_text(size = 9.5),
axis.text.y = element_blank(),
legend.position = "bottom",
legend.title=element_blank(),
axis.line.x = element_line(color="black", size = 0.5))
ggsave(filename = "post_pred_checks.jpg", plot = p, width = 10, height = 10)
|
/R/posterior_predictive_checks.R
|
no_license
|
mastoffel/pinniped_bottlenecks
|
R
| false | false | 2,388 |
r
|
# evaluate post.predictive checks
library(readr)
library(ggplot2)
library(tidyr)
library(dplyr)
source("R/martin.R")
# simulated based on posteriors
all_checks1 <- read_delim("output/model_evaluation/check5_postpred/sims_10000kbot500_post_pred_checks2.txt", delim = " ")
all_checks2 <- read_delim("output/model_evaluation/check5_postpred/sims_10000kbot500_post_pred_checks1.txt", delim = " ")
all_checks <- rbind(all_checks1, all_checks2)
head(all_checks)
# compare these summary statistics
sumstats <- c("num_alleles_mean",
"exp_het_mean",
"mratio_mean",
"prop_low_afs_mean",
"mean_allele_range")
# empirical summary stats
all_stats <- read_csv("data/processed/all_stats_30_modeling.csv") %>%
dplyr::select(species, common, sumstats, bot)
# long format for plotting
all_checks_long <- all_checks %>%
left_join(all_stats[c("species", "common")], by = "species") %>%
dplyr::select(common, sumstats) %>%
gather(sumstat, value, -common)
# observed sumstats long format
all_sumstats_full_long <- all_stats %>%
gather(sumstat, value, -species, -common, -bot)
# lookup_table <- paste(all_stats$species, " = ", all_stats$common)
sumstat_names <- c(
exp_het_mean = "Expected\nhetetozygosity",
mean_allele_range = "Allelic range",
mratio_mean = "M-ratio",
num_alleles_mean = "Allelic richness",
prop_low_afs_mean = "Prop. of low\nfrequency alleles"
)
# bottlenecked or not bottlenecked
all_stats$mod <- ifelse(all_stats$bot > 0.5, "Bottleneck", "Non-bottleneck")
all_data <- all_checks_long %>%
left_join(all_stats[c("common", "mod")])
p <- ggplot(all_data, aes(value)) +
geom_histogram(aes(fill = mod)) +
geom_vline(aes(xintercept = value), all_sumstats_full_long) +
facet_grid(common ~ sumstat, scales = "free", labeller = labeller(
sumstat = sumstat_names
)) +
theme_martin() +
scale_fill_manual(values = c("#fc8d62","#8da0cb")) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.text.y = element_text(angle = 0),
axis.text= element_text(size = 9.5),
axis.text.y = element_blank(),
legend.position = "bottom",
legend.title=element_blank(),
axis.line.x = element_line(color="black", size = 0.5))
ggsave(filename = "post_pred_checks.jpg", plot = p, width = 10, height = 10)
|
# Title : TODO
# Objective : TODO
# Created by: yz
# Created on: 2018/9/19
library(tidyverse)
getSub <- function(data, filterPeak, response) {
subData = subset(data, SEC >= filterPeak$rtmin & SEC <= filterPeak$rtmax)
if (response == "height") {
index <- order(abs(subData$SEC - filterPeak$rt))[1]
subData <- subData[index,]
}
subData
}
getFirstPeak <- function(filterPeak, compoundRow) {
minDif = 0
for (i in 1:nrow(filterPeak)) {
dif = filterPeak[i, "rt"] - compoundRow$rtLeft
if (dif <= minDif) {
firstRow = filterPeak[i,]
minDif = dif
}else if (minDif == 0) {
minDif = dif
firstRow = filterPeak[i,]
}
}
firstRow
}
getLargestPeak <- function(filterPeak, data, compoundRow) {
maxArea = 0
response = dealStr(compoundRow$response)
for (i in 1:nrow(filterPeak)) {
subData = getSub(data, filterPeak[i,], response)
area = getArea(subData)
if (area >= maxArea || maxArea == 0) {
maxArea = area
firstRow = filterPeak[i,]
}
}
firstRow
}
getNearestPeak <- function(filterPeak, compoundRow) {
firstRow <- as_tibble(filterPeak) %>%
mutate(min = abs(rt - compoundRow$rt)) %>%
arrange(min) %>%
head(1) %>%
select(-"min") %>%
as.data.frame()
# for (i in 1 : nrow(filterPeak)) {
# dif = abs(filterPeak[i, "rt"] - compoundRow$rt)
# if (dif <= minDif || minDif == 0) {
# firstRow = filterPeak[i,]
# minDif = dif
# }
# }
firstRow
}
myRound <- function(value) {
int = floor(value)
double = value - int
if (double > 0.5) {
int = int + 1
}else if (double == 0.5) {
int = int + 0.5
}else {
int = int
}
int
}
plotSlightCorrect <- function(data, compoundRow) {
plot(data$SEC, data$INT, col = "red", cex = 0.5, xlab = "RT(m)",
main = paste("peak area | window size:", compoundRow$dfl, " BLine: ", compoundRow$bline, sep = ""),
ylab = "Intensity", xaxt = "n")
at <- seq(from = myRound(min(data$SEC)), to = myRound(max(data$SEC)), by = 0.5)
axis(side = 1, at = at)
lines(data$SEC, data$INT, col = "grey")
}
plotArea <- function(subData, response) {
if (response == "height") {
lines(x = c(subData$SEC, subData$SEC), y = c(0, subData$INT), col = "green", lwd = 1.5)
}else {
n = length(subData$SEC)
polygon(c(subData$SEC[1], subData$SEC, subData$SEC[n]), c(0, subData$INT, 0), col = "green")
}
}
getArea <- function(subData) {
area = 0
if (nrow(subData) == 1) {
area = subData$INT
}else {
for (i in 2:nrow(subData)) {
currentRow <- subData[i,]
beforeRow <- subData[i - 1,]
curArea = (currentRow$INT + beforeRow$INT) * (currentRow$SEC - beforeRow$SEC) / 2
if (curArea < 0) {
curArea = 0
}
area = curArea + area
}
}
area
}
plotAndReturnTotalInt <- function(filterPeak, data, compoundRow) {
totalInt = 0
intensityMethod = tolower(compoundRow$peakMethod)
intensityMethod <- as.character(intensityMethod)
response = dealStr(compoundRow$response)
if (intensityMethod == "all") {
for (i in 1:nrow(filterPeak)) {
row <- filterPeak[i,]
subData = getSub(data, row, response)
totalInt = getArea(subData) + totalInt
}
row <- filterPeak
}else if (intensityMethod == "first") {
row = getFirstPeak(filterPeak, compoundRow)
subData = getSub(data, row, response)
totalInt = getArea(subData) + totalInt
}else if (intensityMethod == "largest") {
row = getLargestPeak(filterPeak, data, compoundRow)
subData = getSub(data, row, response)
totalInt = getArea(subData) + totalInt
}else if (intensityMethod == "nearest") {
row <- getNearestPeak(filterPeak, compoundRow)
subData = getSub(data, row, response)
totalInt = getArea(subData) + totalInt
}
abline(v = row$rt)
abline(v = row$rtmin, col = "red")
abline(v = row$rtmax, col = "red")
print(row)
print(min(subData$SEC))
print(max(subData$SEC))
plotSlightCorrect(data, compoundRow)
if (intensityMethod == "all") {
for (i in 1:nrow(filterPeak)) {
row <- filterPeak[i,]
subData = getSub(data, row, response)
plotArea(subData, response)
}
}else {
plotArea(subData, response)
}
if (totalInt < 0) {
totalInt = 0
}
list(totalInt = totalInt, firstRow = row)
}
getMedian <- function(originalData, times) {
tmpVec = Filter(function(f) f >= 0, originalData)
vec = c()
for (i in 1:times) {
vec = c(vec, min(sample(tmpVec, length(tmpVec) / 10)))
}
median(vec)
}
getFilterPeak <- function(peak, compoundRow) {
colnames(peak) = c("INT", "rt", "rtmin", "rtmax")
peak$rt = data$SEC[peak$rt]
peak$rtmin = data$SEC[peak$rtmin]
peak$rtmax = data$SEC[peak$rtmax]
filterPeak = subset(peak, rt >= compoundRow$rtLeft & rt <= compoundRow$rtRight)
filterPeak
}
createWhenNoExist <- function(f) {
!dir.exists(f) && dir.create(f)
}
getColorByRow <- function(firstRow, compoundRow) {
if (abs(compoundRow$rt - firstRow$rt) > 0.2) {
"red"
}else {
"NA"
}
}
getColor <- function(compoundRow, list) {
colorStr = "NA"
intensityMethod = tolower(compoundRow$peakMethod)
if (intensityMethod != "all") {
colorStr <- getColorByRow(list$firstRow, compoundRow)
}
colorStr
}
changeRtTime <- function(compoundRow) {
compoundRow$rtLeft <- (compoundRow$rt - compoundRow$rtLeft)
compoundRow$rtRight <- (compoundRow$rt + compoundRow$rtRight)
compoundRow$rt <- compoundRow$rt
compoundRow
}
library(optparse)
library(pracma)
library(baseline)
source("base.R")
library(xlsx)
option_list <- list(
make_option("--ci", default = "is_0/compoundName.xlsx", type = "character", help = "compound name file"),
make_option("--si", default = "sample_config.xlsx", type = "character", help = "sample config input file"),
make_option("--co", default = "is_0/color.txt", type = "character", help = "color output file"),
make_option("--io", default = "is_0/intensity.txt", type = "character", help = "intensity output file")
)
opt <- parse_args(OptionParser(option_list = option_list))
sampleConfig <- read.xlsx(opt$si, 1, check.names = F)
sampleConfig <- setSampleConfigHeader(sampleConfig)
sampleConfig$fileName <- tolower(sampleConfig$fileName)
compoundConfig <- read.xlsx("compound_config.xlsx", 1, check.names = F)
compoundConfig <- setCompoundConfigHeader(compoundConfig)
compoundConfig <- changeRtTime(compoundConfig)
intensity = data.frame(sample = sampleConfig$fileName)
color = data.frame(sample = sampleConfig$fileName)
uniqBatch = unique(sampleConfig$batch)
compoundNameData <- read.xlsx(opt$ci, 1, check.names = F)
# for (compoundName in compoundConfig$compound) {
for (compoundName in compoundNameData$CompoundName) {
print(compoundName)
dirName = "plot_peaks"
createWhenNoExist(dirName)
compoundRow <- compoundConfig[which(tolower(compoundConfig$compound) == compoundName),]
pdf(file = paste(dirName, "/", compoundName, ".pdf", sep = ""), width = 15, height = 9)
for (bat in uniqBatch) {
config = subset(sampleConfig, batch == bat)
fileNames = config$fileName
for (fileName in fileNames) {
data <- read.table(quote = "", paste("dta/", compoundName, "/", fileName, ".dta", sep = ""), header = T, com = '', sep = "\t", check.names = F)
# print(fileName)
# print(paste("dta/", compoundName, "/", fileName, ".dta", sep = ""))
colnames(data) = c("SEC", "MZ", "INT")
par(mfrow = c(3, 1))
originalData = data$INT
slightSmoothData <- savgol(data$INT, compoundRow$dfl)
smoothData <- data$INT
for (i in 1:compoundRow$iteration) {
smoothData <- savgol(smoothData, compoundRow$fl)
}
if (compoundRow$bline == "no") {
data$INT <- slightSmoothData
slightCorrectValue <- slightSmoothData
correctValue <- smoothData
}else {
baseLineFrame <- data.frame(Date = data$SEC, Visits = slightSmoothData)
baseLineFrame <- t(baseLineFrame$Visits)
slightBaseLine <- baseline(baseLineFrame, method = 'irls')
slightCorrectValue = c(getCorrected(slightBaseLine))
data$INT = slightCorrectValue
baseLineFrame <- data.frame(Date = data$SEC, Visits = smoothData)
baseLineFrame <- t(baseLineFrame$Visits)
baseLine <- baseline(baseLineFrame, method = 'irls')
correctValue = c(getCorrected(baseLine))
}
median = getMedian(slightCorrectValue, 1000)
std <- as.character(compoundRow$std)
std <- tolower(std)
index <- dealStr(compoundRow$index)
if (myStartsWith(index, "is")) {
mic <- compoundRow$std
}else {
mic <- sampleConfig[which(sampleConfig$fileName == fileName), std]
}
plot(data$SEC, originalData, col = "red", cex = 0.5, main = paste("raw chromatogram | batch: ", bat, " sample: ",
fileName, " conc: ", mic, " function: ", compoundRow$fc, " mass: ", compoundRow$mz, sep = ""),
xlab = "RT(m)", ylab = "Intensity", xaxt = "n")
at <- seq(from = myRound(min(data$SEC)), to = myRound(max(data$SEC)), by = 0.5)
axis(side = 1, at = at)
lines(data$SEC, originalData, col = "grey")
noiseStr <- signif(median, 3)
plot2 = plot(data$SEC, correctValue, col = "red", cex = 0.5, xlab = "RT(m)",
main = paste("peak picking | window size: ", compoundRow$fl, " iteration: ", compoundRow$iteration, " lp: ", compoundRow$nups,
" rp: ", compoundRow$ndowns,
" snr: ", compoundRow$snr, " peak location: ",
compoundRow$peakMethod, " noise: ", noiseStr, " BLine: ", compoundRow$bline, sep = ""), ylab = "Intensity",
xaxt = "n")
at <- seq(from = myRound(min(data$SEC)), to = myRound(max(data$SEC)), by = 0.5)
axis(side = 1, at = at)
lines(data$SEC, correctValue, col = "grey")
plot2 + abline(h = median, col = "blue")
plot2 + abline(h = median * compoundRow$snr, col = "blue")
peak = findpeaks(correctValue, threshold = median * compoundRow$snr, nups = compoundRow$nups, ndowns = compoundRow$ndowns)
peak = as.data.frame(peak)
valid <- nrow(peak) != 0
filterPeak <- data.frame()
if (valid) {
filterPeak <- getFilterPeak(peak, compoundRow)
}
abline(v = c(compoundRow$rt, compoundRow$rtLeft, compoundRow$rtRight), col = "blue", lty = 3)
valid <- nrow(filterPeak) != 0
if (!valid) {
intensity[which(intensity$sample == fileName), "batch"] = bat
intensity[which(intensity$sample == fileName), compoundName] = 0
color[which(color$sample == fileName), "batch"] = bat
color[which(color$sample == fileName), compoundName] = "NA"
plotSlightCorrect(data, compoundRow)
next
}
list <- plotAndReturnTotalInt(filterPeak, data, compoundRow)
totalInt <- list$totalInt
colorStr <- getColor(compoundRow, list)
intensity[which(intensity$sample == fileName), "batch"] = bat
intensity[which(intensity$sample == fileName), compoundName] = totalInt
color[which(color$sample == fileName), "batch"] = bat
color[which(color$sample == fileName), compoundName] = colorStr
}
}
dev.off()
}
write.table(intensity, opt$io, quote = FALSE, sep = "\t", row.names = F)
write.table(color, opt$co, quote = FALSE, sep = "\t", row.names = F)
|
/server/rScripts/findPeak.R
|
no_license
|
yz8169/product_tmbq
|
R
| false | false | 11,502 |
r
|
# Title : TODO
# Objective : TODO
# Created by: yz
# Created on: 2018/9/19
library(tidyverse)
getSub <- function(data, filterPeak, response) {
subData = subset(data, SEC >= filterPeak$rtmin & SEC <= filterPeak$rtmax)
if (response == "height") {
index <- order(abs(subData$SEC - filterPeak$rt))[1]
subData <- subData[index,]
}
subData
}
getFirstPeak <- function(filterPeak, compoundRow) {
minDif = 0
for (i in 1:nrow(filterPeak)) {
dif = filterPeak[i, "rt"] - compoundRow$rtLeft
if (dif <= minDif) {
firstRow = filterPeak[i,]
minDif = dif
}else if (minDif == 0) {
minDif = dif
firstRow = filterPeak[i,]
}
}
firstRow
}
getLargestPeak <- function(filterPeak, data, compoundRow) {
maxArea = 0
response = dealStr(compoundRow$response)
for (i in 1:nrow(filterPeak)) {
subData = getSub(data, filterPeak[i,], response)
area = getArea(subData)
if (area >= maxArea || maxArea == 0) {
maxArea = area
firstRow = filterPeak[i,]
}
}
firstRow
}
getNearestPeak <- function(filterPeak, compoundRow) {
firstRow <- as_tibble(filterPeak) %>%
mutate(min = abs(rt - compoundRow$rt)) %>%
arrange(min) %>%
head(1) %>%
select(-"min") %>%
as.data.frame()
# for (i in 1 : nrow(filterPeak)) {
# dif = abs(filterPeak[i, "rt"] - compoundRow$rt)
# if (dif <= minDif || minDif == 0) {
# firstRow = filterPeak[i,]
# minDif = dif
# }
# }
firstRow
}
myRound <- function(value) {
int = floor(value)
double = value - int
if (double > 0.5) {
int = int + 1
}else if (double == 0.5) {
int = int + 0.5
}else {
int = int
}
int
}
plotSlightCorrect <- function(data, compoundRow) {
plot(data$SEC, data$INT, col = "red", cex = 0.5, xlab = "RT(m)",
main = paste("peak area | window size:", compoundRow$dfl, " BLine: ", compoundRow$bline, sep = ""),
ylab = "Intensity", xaxt = "n")
at <- seq(from = myRound(min(data$SEC)), to = myRound(max(data$SEC)), by = 0.5)
axis(side = 1, at = at)
lines(data$SEC, data$INT, col = "grey")
}
plotArea <- function(subData, response) {
if (response == "height") {
lines(x = c(subData$SEC, subData$SEC), y = c(0, subData$INT), col = "green", lwd = 1.5)
}else {
n = length(subData$SEC)
polygon(c(subData$SEC[1], subData$SEC, subData$SEC[n]), c(0, subData$INT, 0), col = "green")
}
}
getArea <- function(subData) {
area = 0
if (nrow(subData) == 1) {
area = subData$INT
}else {
for (i in 2:nrow(subData)) {
currentRow <- subData[i,]
beforeRow <- subData[i - 1,]
curArea = (currentRow$INT + beforeRow$INT) * (currentRow$SEC - beforeRow$SEC) / 2
if (curArea < 0) {
curArea = 0
}
area = curArea + area
}
}
area
}
plotAndReturnTotalInt <- function(filterPeak, data, compoundRow) {
totalInt = 0
intensityMethod = tolower(compoundRow$peakMethod)
intensityMethod <- as.character(intensityMethod)
response = dealStr(compoundRow$response)
if (intensityMethod == "all") {
for (i in 1:nrow(filterPeak)) {
row <- filterPeak[i,]
subData = getSub(data, row, response)
totalInt = getArea(subData) + totalInt
}
row <- filterPeak
}else if (intensityMethod == "first") {
row = getFirstPeak(filterPeak, compoundRow)
subData = getSub(data, row, response)
totalInt = getArea(subData) + totalInt
}else if (intensityMethod == "largest") {
row = getLargestPeak(filterPeak, data, compoundRow)
subData = getSub(data, row, response)
totalInt = getArea(subData) + totalInt
}else if (intensityMethod == "nearest") {
row <- getNearestPeak(filterPeak, compoundRow)
subData = getSub(data, row, response)
totalInt = getArea(subData) + totalInt
}
abline(v = row$rt)
abline(v = row$rtmin, col = "red")
abline(v = row$rtmax, col = "red")
print(row)
print(min(subData$SEC))
print(max(subData$SEC))
plotSlightCorrect(data, compoundRow)
if (intensityMethod == "all") {
for (i in 1:nrow(filterPeak)) {
row <- filterPeak[i,]
subData = getSub(data, row, response)
plotArea(subData, response)
}
}else {
plotArea(subData, response)
}
if (totalInt < 0) {
totalInt = 0
}
list(totalInt = totalInt, firstRow = row)
}
getMedian <- function(originalData, times) {
tmpVec = Filter(function(f) f >= 0, originalData)
vec = c()
for (i in 1:times) {
vec = c(vec, min(sample(tmpVec, length(tmpVec) / 10)))
}
median(vec)
}
getFilterPeak <- function(peak, compoundRow) {
colnames(peak) = c("INT", "rt", "rtmin", "rtmax")
peak$rt = data$SEC[peak$rt]
peak$rtmin = data$SEC[peak$rtmin]
peak$rtmax = data$SEC[peak$rtmax]
filterPeak = subset(peak, rt >= compoundRow$rtLeft & rt <= compoundRow$rtRight)
filterPeak
}
createWhenNoExist <- function(f) {
!dir.exists(f) && dir.create(f)
}
getColorByRow <- function(firstRow, compoundRow) {
if (abs(compoundRow$rt - firstRow$rt) > 0.2) {
"red"
}else {
"NA"
}
}
getColor <- function(compoundRow, list) {
colorStr = "NA"
intensityMethod = tolower(compoundRow$peakMethod)
if (intensityMethod != "all") {
colorStr <- getColorByRow(list$firstRow, compoundRow)
}
colorStr
}
changeRtTime <- function(compoundRow) {
compoundRow$rtLeft <- (compoundRow$rt - compoundRow$rtLeft)
compoundRow$rtRight <- (compoundRow$rt + compoundRow$rtRight)
compoundRow$rt <- compoundRow$rt
compoundRow
}
library(optparse)
library(pracma)
library(baseline)
source("base.R")
library(xlsx)
option_list <- list(
make_option("--ci", default = "is_0/compoundName.xlsx", type = "character", help = "compound name file"),
make_option("--si", default = "sample_config.xlsx", type = "character", help = "sample config input file"),
make_option("--co", default = "is_0/color.txt", type = "character", help = "color output file"),
make_option("--io", default = "is_0/intensity.txt", type = "character", help = "intensity output file")
)
opt <- parse_args(OptionParser(option_list = option_list))
sampleConfig <- read.xlsx(opt$si, 1, check.names = F)
sampleConfig <- setSampleConfigHeader(sampleConfig)
sampleConfig$fileName <- tolower(sampleConfig$fileName)
compoundConfig <- read.xlsx("compound_config.xlsx", 1, check.names = F)
compoundConfig <- setCompoundConfigHeader(compoundConfig)
compoundConfig <- changeRtTime(compoundConfig)
intensity = data.frame(sample = sampleConfig$fileName)
color = data.frame(sample = sampleConfig$fileName)
uniqBatch = unique(sampleConfig$batch)
compoundNameData <- read.xlsx(opt$ci, 1, check.names = F)
# for (compoundName in compoundConfig$compound) {
for (compoundName in compoundNameData$CompoundName) {
print(compoundName)
dirName = "plot_peaks"
createWhenNoExist(dirName)
compoundRow <- compoundConfig[which(tolower(compoundConfig$compound) == compoundName),]
pdf(file = paste(dirName, "/", compoundName, ".pdf", sep = ""), width = 15, height = 9)
for (bat in uniqBatch) {
config = subset(sampleConfig, batch == bat)
fileNames = config$fileName
for (fileName in fileNames) {
data <- read.table(quote = "", paste("dta/", compoundName, "/", fileName, ".dta", sep = ""), header = T, com = '', sep = "\t", check.names = F)
# print(fileName)
# print(paste("dta/", compoundName, "/", fileName, ".dta", sep = ""))
colnames(data) = c("SEC", "MZ", "INT")
par(mfrow = c(3, 1))
originalData = data$INT
slightSmoothData <- savgol(data$INT, compoundRow$dfl)
smoothData <- data$INT
for (i in 1:compoundRow$iteration) {
smoothData <- savgol(smoothData, compoundRow$fl)
}
if (compoundRow$bline == "no") {
data$INT <- slightSmoothData
slightCorrectValue <- slightSmoothData
correctValue <- smoothData
}else {
baseLineFrame <- data.frame(Date = data$SEC, Visits = slightSmoothData)
baseLineFrame <- t(baseLineFrame$Visits)
slightBaseLine <- baseline(baseLineFrame, method = 'irls')
slightCorrectValue = c(getCorrected(slightBaseLine))
data$INT = slightCorrectValue
baseLineFrame <- data.frame(Date = data$SEC, Visits = smoothData)
baseLineFrame <- t(baseLineFrame$Visits)
baseLine <- baseline(baseLineFrame, method = 'irls')
correctValue = c(getCorrected(baseLine))
}
median = getMedian(slightCorrectValue, 1000)
std <- as.character(compoundRow$std)
std <- tolower(std)
index <- dealStr(compoundRow$index)
if (myStartsWith(index, "is")) {
mic <- compoundRow$std
}else {
mic <- sampleConfig[which(sampleConfig$fileName == fileName), std]
}
plot(data$SEC, originalData, col = "red", cex = 0.5, main = paste("raw chromatogram | batch: ", bat, " sample: ",
fileName, " conc: ", mic, " function: ", compoundRow$fc, " mass: ", compoundRow$mz, sep = ""),
xlab = "RT(m)", ylab = "Intensity", xaxt = "n")
at <- seq(from = myRound(min(data$SEC)), to = myRound(max(data$SEC)), by = 0.5)
axis(side = 1, at = at)
lines(data$SEC, originalData, col = "grey")
noiseStr <- signif(median, 3)
plot2 = plot(data$SEC, correctValue, col = "red", cex = 0.5, xlab = "RT(m)",
main = paste("peak picking | window size: ", compoundRow$fl, " iteration: ", compoundRow$iteration, " lp: ", compoundRow$nups,
" rp: ", compoundRow$ndowns,
" snr: ", compoundRow$snr, " peak location: ",
compoundRow$peakMethod, " noise: ", noiseStr, " BLine: ", compoundRow$bline, sep = ""), ylab = "Intensity",
xaxt = "n")
at <- seq(from = myRound(min(data$SEC)), to = myRound(max(data$SEC)), by = 0.5)
axis(side = 1, at = at)
lines(data$SEC, correctValue, col = "grey")
plot2 + abline(h = median, col = "blue")
plot2 + abline(h = median * compoundRow$snr, col = "blue")
peak = findpeaks(correctValue, threshold = median * compoundRow$snr, nups = compoundRow$nups, ndowns = compoundRow$ndowns)
peak = as.data.frame(peak)
valid <- nrow(peak) != 0
filterPeak <- data.frame()
if (valid) {
filterPeak <- getFilterPeak(peak, compoundRow)
}
abline(v = c(compoundRow$rt, compoundRow$rtLeft, compoundRow$rtRight), col = "blue", lty = 3)
valid <- nrow(filterPeak) != 0
if (!valid) {
intensity[which(intensity$sample == fileName), "batch"] = bat
intensity[which(intensity$sample == fileName), compoundName] = 0
color[which(color$sample == fileName), "batch"] = bat
color[which(color$sample == fileName), compoundName] = "NA"
plotSlightCorrect(data, compoundRow)
next
}
list <- plotAndReturnTotalInt(filterPeak, data, compoundRow)
totalInt <- list$totalInt
colorStr <- getColor(compoundRow, list)
intensity[which(intensity$sample == fileName), "batch"] = bat
intensity[which(intensity$sample == fileName), compoundName] = totalInt
color[which(color$sample == fileName), "batch"] = bat
color[which(color$sample == fileName), compoundName] = colorStr
}
}
dev.off()
}
write.table(intensity, opt$io, quote = FALSE, sep = "\t", row.names = F)
write.table(color, opt$co, quote = FALSE, sep = "\t", row.names = F)
|
library(mcp)
library(tidybayes)
library(changepoint)
library(dplyr)
library(tidyr)
library(lubridate)
library(readr)
library(hrbrthemes)
library(ggplot2)
library(stringr)
library(RcppRoll)
library(ragg)
plot_county <- function(the_state, the_county, counties=my_counties) {
state <- counties %>% filter(state == the_state)
one_county <- state %>% filter(county == the_county)
print(paste(the_state, the_county, nrow(one_county)))
one_county <- one_county %>%
mutate(sequence = as.numeric(date)) %>%
arrange(date) %>%
mutate(daily_deaths = deaths - lag(deaths),
daily_cases = cases - lag(cases)) %>%
replace_na(list(daily_deaths = 0,
daily_cases = 0)) %>%
mutate(mean_cases_7 = roll_mean(daily_cases, 7, fill=0, align="right"),
mean_deaths_7 = roll_mean(daily_deaths, 7, fill=0, align="right")) %>%
ungroup()
# use two-phase changepoint detection:
# 1) changepoint package for quick search
# 2) build model from quick search and use
# that model in mcp package for more
# thorough search
fit_changepoint = cpt.meanvar(one_county$mean_cases_7, method = "PELT",
minseglen = 30)
# plot(fit_changepoint)
#str(fit_changepoint)
cps <- cpts(fit_changepoint)
# maybe add the change point locations as priors, somehow? Right now,
# just the number of changepoints is a "prior" of sorts
#
# using 1 + number of changepoints above as an aggressive attempt
# to find changepoints. Not sure if sound...
model <- c(mean_cases_7 ~ sequence,
rep_len(c(1~1),
1 + ncpts(fit_changepoint))) %>%
as.list()
# model
fit <- mcp(model, data = one_county)
# plot(fit)
#fit
cp_df <- fit$mcmc_post %>%
tidy_draws() %>%
summarize_all(mean) %>%
select(starts_with("cp_")) %>%
pivot_longer(cols = starts_with("cp_"),
names_to = "changepoint",
values_to = "sequence") %>%
mutate(sequence = floor(sequence))
one_county <- one_county %>% left_join(cp_df) %>%
fill(changepoint, .direction="up") %>%
replace_na(list(changepoint = "cp_n")) %>%
group_by(changepoint) %>%
mutate(mean_7_mean = mean(mean_cases_7)) %>%
ungroup() %>%
mutate(cummax = cummax(daily_cases),
is_highpoint = cummax == daily_cases)
the_plot <- one_county %>%
pivot_longer(cols = c(mean_cases_7, daily_cases), names_to = "type", values_to = "count") %>%
ggplot() +
geom_rect(data = extract_segments, aes(xmin = xmin, xmax=xmax, fill = as.factor(shade)), ymin=0, ymax=Inf, color=NA, alpha=0.5) +
scale_fill_manual(guide=FALSE, values = c("white", "#e0e0e0")) +
geom_line(aes(x=date, y=count, color=type)) +
scale_color_manual(NULL,values = c("gray", "black"), labels = c("Daily", "7-day average")) +
geom_point(aes(x=date, y=count, shape = is_highpoint, size = type), color="black", show.legend = FALSE) +
scale_size_manual(NULL, values = c(1.5,0)) + # zero-sized if on the average line
scale_shape_manual(NULL, values = c(NA,1)) + # no shape if not a highpoint
labs(
title = paste(min(one_county$county), "County", min(one_county$state)),
subtitle = max(one_county$date),
caption = str_wrap("Shading shows changes in data, circles are new highpoints",60),
y = "Cases",
x = "Date"
) + theme_ipsum_rc() +
theme(
#legend.position="none"
) + ylim(0,NA)
dpi=200
img_name <- paste(the_county, the_state, "case-wide.png", sep="-")
agg_png(here::here("county_plots", img_name), width = 1600, height = 900 , res=dpi)
print(the_plot)
invisible(dev.off())
return(the_plot)
}
extract_segments <- function(one_county) {
one_county %>%
group_by(changepoint) %>%
summarize(xmin = min(date)-days(1), xmax=max(date))%>%
mutate(changepoint = factor(changepoint)) %>%
mutate(shade = as.numeric(changepoint) %%2)
}
my_counties <- read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv")
tib <- tribble(
~state, ~county, ~note,
# "Washington", "Whitman","",
"Texas", "Travis", "",
"Texas", "Lubbock","",
"Montana", "Lewis and Clark","",
"Montana", "Yellowstone","",
"Montana", "Missoula","",
"Arizona", "Maricopa","",
"Oregon", "Multnomah","",
"Massachusetts", "Hampshire","" ,
"Texas","Tarrant", "",
"Texas","El Paso", "",
"Texas","Midland", "",
"Texas","Harris", "",
# "Georgia", "Bulloch", "",
# "Mississippi", "Lafayette","",
# "Missouri","Greene","",
# "Illinois", "Champaign","",
# "Colorado", "Summit","",
# "Florida", "Leon","",
"Oklahoma", "Garfield","",
"Oklahoma", "Payne","",
"Oklahoma", "Muskogee","",
# "Iowa", "Story","",
# "Iowa", "Johnson","",
# "North Carolina", "Orange","",
# "Indiana", "Delaware", "Ball State University"
) %>% arrange(state, county)
future::plan(future::multisession, workers = 4)
furrr::future_map2(tib$state, tib$county, ~plot_county(.x, .y, counties = my_counties))
|
/any-county.R
|
no_license
|
schnee/covid-19
|
R
| false | false | 5,073 |
r
|
library(mcp)
library(tidybayes)
library(changepoint)
library(dplyr)
library(tidyr)
library(lubridate)
library(readr)
library(hrbrthemes)
library(ggplot2)
library(stringr)
library(RcppRoll)
library(ragg)
plot_county <- function(the_state, the_county, counties=my_counties) {
state <- counties %>% filter(state == the_state)
one_county <- state %>% filter(county == the_county)
print(paste(the_state, the_county, nrow(one_county)))
one_county <- one_county %>%
mutate(sequence = as.numeric(date)) %>%
arrange(date) %>%
mutate(daily_deaths = deaths - lag(deaths),
daily_cases = cases - lag(cases)) %>%
replace_na(list(daily_deaths = 0,
daily_cases = 0)) %>%
mutate(mean_cases_7 = roll_mean(daily_cases, 7, fill=0, align="right"),
mean_deaths_7 = roll_mean(daily_deaths, 7, fill=0, align="right")) %>%
ungroup()
# use two-phase changepoint detection:
# 1) changepoint package for quick search
# 2) build model from quick search and use
# that model in mcp package for more
# thorough search
fit_changepoint = cpt.meanvar(one_county$mean_cases_7, method = "PELT",
minseglen = 30)
# plot(fit_changepoint)
#str(fit_changepoint)
cps <- cpts(fit_changepoint)
# maybe add the change point locations as priors, somehow? Right now,
# just the number of changepoints is a "prior" of sorts
#
# using 1 + number of changepoints above as an aggressive attempt
# to find changepoints. Not sure if sound...
model <- c(mean_cases_7 ~ sequence,
rep_len(c(1~1),
1 + ncpts(fit_changepoint))) %>%
as.list()
# model
fit <- mcp(model, data = one_county)
# plot(fit)
#fit
cp_df <- fit$mcmc_post %>%
tidy_draws() %>%
summarize_all(mean) %>%
select(starts_with("cp_")) %>%
pivot_longer(cols = starts_with("cp_"),
names_to = "changepoint",
values_to = "sequence") %>%
mutate(sequence = floor(sequence))
one_county <- one_county %>% left_join(cp_df) %>%
fill(changepoint, .direction="up") %>%
replace_na(list(changepoint = "cp_n")) %>%
group_by(changepoint) %>%
mutate(mean_7_mean = mean(mean_cases_7)) %>%
ungroup() %>%
mutate(cummax = cummax(daily_cases),
is_highpoint = cummax == daily_cases)
the_plot <- one_county %>%
pivot_longer(cols = c(mean_cases_7, daily_cases), names_to = "type", values_to = "count") %>%
ggplot() +
geom_rect(data = extract_segments, aes(xmin = xmin, xmax=xmax, fill = as.factor(shade)), ymin=0, ymax=Inf, color=NA, alpha=0.5) +
scale_fill_manual(guide=FALSE, values = c("white", "#e0e0e0")) +
geom_line(aes(x=date, y=count, color=type)) +
scale_color_manual(NULL,values = c("gray", "black"), labels = c("Daily", "7-day average")) +
geom_point(aes(x=date, y=count, shape = is_highpoint, size = type), color="black", show.legend = FALSE) +
scale_size_manual(NULL, values = c(1.5,0)) + # zero-sized if on the average line
scale_shape_manual(NULL, values = c(NA,1)) + # no shape if not a highpoint
labs(
title = paste(min(one_county$county), "County", min(one_county$state)),
subtitle = max(one_county$date),
caption = str_wrap("Shading shows changes in data, circles are new highpoints",60),
y = "Cases",
x = "Date"
) + theme_ipsum_rc() +
theme(
#legend.position="none"
) + ylim(0,NA)
dpi=200
img_name <- paste(the_county, the_state, "case-wide.png", sep="-")
agg_png(here::here("county_plots", img_name), width = 1600, height = 900 , res=dpi)
print(the_plot)
invisible(dev.off())
return(the_plot)
}
extract_segments <- function(one_county) {
one_county %>%
group_by(changepoint) %>%
summarize(xmin = min(date)-days(1), xmax=max(date))%>%
mutate(changepoint = factor(changepoint)) %>%
mutate(shade = as.numeric(changepoint) %%2)
}
my_counties <- read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv")
tib <- tribble(
~state, ~county, ~note,
# "Washington", "Whitman","",
"Texas", "Travis", "",
"Texas", "Lubbock","",
"Montana", "Lewis and Clark","",
"Montana", "Yellowstone","",
"Montana", "Missoula","",
"Arizona", "Maricopa","",
"Oregon", "Multnomah","",
"Massachusetts", "Hampshire","" ,
"Texas","Tarrant", "",
"Texas","El Paso", "",
"Texas","Midland", "",
"Texas","Harris", "",
# "Georgia", "Bulloch", "",
# "Mississippi", "Lafayette","",
# "Missouri","Greene","",
# "Illinois", "Champaign","",
# "Colorado", "Summit","",
# "Florida", "Leon","",
"Oklahoma", "Garfield","",
"Oklahoma", "Payne","",
"Oklahoma", "Muskogee","",
# "Iowa", "Story","",
# "Iowa", "Johnson","",
# "North Carolina", "Orange","",
# "Indiana", "Delaware", "Ball State University"
) %>% arrange(state, county)
future::plan(future::multisession, workers = 4)
furrr::future_map2(tib$state, tib$county, ~plot_county(.x, .y, counties = my_counties))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{plot_clustering}
\alias{plot_clustering}
\title{Graphic representation of the evaluation measures.}
\usage{
plot_clustering(df, metric)
}
\arguments{
\item{df}{data matrix or data frame with the result of running the clustering
algorithm.}
\item{metric}{it's a string with the name of the metric select to evaluate.}
}
\description{
Graphical representation of the evaluation measures grouped by
cluster.
}
\details{
In certain cases the review or filtering of the data is necessary to
select the data, that is why thanks to the graphic representations this task
is much easier. Therefore with this method we will be able to filter the data
by metrics and see the data in a graphical way.
}
\examples{
result = clustering(
df = cluster::agriculture,
min = 4,
max = 5,
algorithm='gmm',
metrics=c("precision"),
attributes = TRUE
)
plot_clustering(result,c("precision"))
}
|
/man/plot_clustering.Rd
|
no_license
|
minghao2016/Clustering-1
|
R
| false | true | 1,063 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{plot_clustering}
\alias{plot_clustering}
\title{Graphic representation of the evaluation measures.}
\usage{
plot_clustering(df, metric)
}
\arguments{
\item{df}{data matrix or data frame with the result of running the clustering
algorithm.}
\item{metric}{it's a string with the name of the metric select to evaluate.}
}
\description{
Graphical representation of the evaluation measures grouped by
cluster.
}
\details{
In certain cases the review or filtering of the data is necessary to
select the data, that is why thanks to the graphic representations this task
is much easier. Therefore with this method we will be able to filter the data
by metrics and see the data in a graphical way.
}
\examples{
result = clustering(
df = cluster::agriculture,
min = 4,
max = 5,
algorithm='gmm',
metrics=c("precision"),
attributes = TRUE
)
plot_clustering(result,c("precision"))
}
|
#' @import chromote
#' @import later
#' @import promises
#'
NULL
#' Take a screenshot of a URL
#'
#' @param url A vector of URLs to visit. If multiple URLs are provided, it will
#' load and take screenshots of those web pages in parallel.
#' @param file A vector of names of output files. Should end with \code{.png} or
#' \code{.pdf}. If several screenshots have to be taken and only one filename
#' is provided, then the function appends the index number of the screenshot
#' to the file name. For PDF output, it is just like printing the page to PDF
#' in a browser; \code{selector}, \code{cliprect}, \code{expand}, and
#' \code{zoom} will not be used for PDFs.
#' @param vwidth Viewport width. This is the width of the browser "window".
#' @param vheight Viewport height This is the height of the browser "window".
#' @param selector One or more CSS selectors specifying a DOM element to set the
#' clipping rectangle to. The screenshot will contain these DOM elements. For
#' a given selector, if it has more than one match, all matching elements will
#' be used. This option is not compatible with \code{cliprect}. When taking
#' screenshots of multiple URLs, this parameter can also be a list with same
#' length as \code{url} with each element of the list containing a vector of
#' CSS selectors to use for the corresponding URL.
#' @param cliprect Clipping rectangle. If \code{cliprect} and \code{selector}
#' are both unspecified, the clipping rectangle will contain the entire page.
#' This can be the string \code{"viewport"}, in which case the clipping
#' rectangle matches the viewport size, or it can be a four-element numeric
#' vector specifying the top, left, width, and height. When taking screenshots
#' of multiple URLs, this parameter can also be a list with same length as
#' \code{url} with each element of the list being "viewport" or a
#' four-elements numeric vector. This option is not compatible with
#' \code{selector}.
#' @param delay Time to wait before taking screenshot, in seconds. Sometimes a
#' longer delay is needed for all assets to display properly.
#' @param expand A numeric vector specifying how many pixels to expand the
#' clipping rectangle by. If one number, the rectangle will be expanded by
#' that many pixels on all sides. If four numbers, they specify the top,
#' right, bottom, and left, in that order. When taking screenshots of multiple
#' URLs, this parameter can also be a list with same length as \code{url} with
#' each element of the list containing a single number or four numbers to use
#' for the corresponding URL.
#' @param zoom A number specifying the zoom factor. A zoom factor of 2 will
#' result in twice as many pixels vertically and horizontally. Note that using
#' 2 is not exactly the same as taking a screenshot on a HiDPI (Retina)
#' device: it is like increasing the zoom to 200% in a desktop browser and
#' doubling the height and width of the browser window. This differs from
#' using a HiDPI device because some web pages load different,
#' higher-resolution images when they know they will be displayed on a HiDPI
#' device (but using zoom will not report that there is a HiDPI device).
#' @param useragent The User-Agent header used to request the URL.
#'
#' @examples
#' if (interactive()) {
#'
#' # Whole web page
#' webshot("https://github.com/rstudio/shiny")
#'
#' # Might need a delay for all assets to display
#' webshot("http://rstudio.github.io/leaflet", delay = 0.5)
#'
#' # One can also take screenshots of several URLs with only one command.
#' # This is more efficient than calling 'webshot' multiple times.
#' webshot(c("https://github.com/rstudio/shiny",
#' "http://rstudio.github.io/leaflet"),
#' delay = 0.5)
#'
#' # Clip to the viewport
#' webshot("http://rstudio.github.io/leaflet", "leaflet-viewport.png",
#' cliprect = "viewport")
#'
#' # Specific size
#' webshot("https://www.r-project.org", vwidth = 1600, vheight = 900,
#' cliprect = "viewport")
#'
#' # Manual clipping rectangle
#' webshot("http://rstudio.github.io/leaflet", "leaflet-clip.png",
#' cliprect = c(200, 5, 400, 300))
#'
#' # Using CSS selectors to pick out regions
#' webshot("http://rstudio.github.io/leaflet", "leaflet-menu.png", selector = ".list-group")
#' # With multiple selectors, the screenshot will contain all selected elements
#' webshot("http://reddit.com/", "reddit-top.png",
#' selector = c("[aria-label='Home']", "input[type='search']"))
#'
#' # Expand selection region
#' webshot("http://rstudio.github.io/leaflet", "leaflet-boxes.png",
#' selector = "#installation", expand = c(10, 50, 0, 50))
#'
#' # If multiple matches for a given selector, it will take a screenshot that
#' # contains all matching elements.
#' webshot("http://rstudio.github.io/leaflet", "leaflet-p.png", selector = "p")
#' webshot("https://github.com/rstudio/shiny/", "shiny-stats.png",
#' selector = "ul.numbers-summary")
#'
#' # Result can be piped to other commands like resize() and shrink()
#' webshot("https://www.r-project.org/", "r-small.png") %>%
#' resize("75%") %>%
#' shrink()
#'
#' }
#'
#' @export
webshot <- function(
url = NULL,
file = "webshot.png",
vwidth = 992,
vheight = 744,
selector = NULL,
cliprect = NULL,
expand = NULL,
delay = 0.2,
zoom = 1,
useragent = NULL,
max_concurrent = getOption("webshot.concurrent", default = 6)
) {
if (length(url) == 0) {
stop("Need url.")
}
# Ensure urls are either web URLs or local file URLs.
url <- vapply(url,
function(x) {
if (!is_url(x)) {
# `url` is a filename, not an actual URL. Convert to file:// format.
file_url(x)
} else {
x
}
},
character(1)
)
# Convert params cliprect, selector and expand to list if necessary, because
# they can be vectors.
if(!is.null(cliprect) && !is.list(cliprect)) cliprect <- list(cliprect)
if(!is.null(selector) && !is.list(selector)) selector <- list(selector)
if(!is.null(expand) && !is.list(expand)) expand <- list(expand)
if (is.null(selector)) {
selector <- "html"
}
# If user provides only one file name but wants several screenshots, then the
# below code generates as many file names as URLs following the pattern
# "filename001.png", "filename002.png", ... (or whatever extension it is)
if (length(url) > 1 && length(file) == 1) {
file <- vapply(1:length(url), FUN.VALUE = character(1), function(i) {
replacement <- sprintf("%03d.\\1", i)
gsub("\\.(.{3,4})$", replacement, file)
})
}
# Check length of arguments and replicate if necessary
args_all <- list(
url = url,
file = file,
vwidth = vwidth,
vheight = vheight,
selector = selector,
cliprect = cliprect,
expand = expand,
delay = delay,
zoom = zoom,
useragent = useragent
)
n_urls <- length(url)
args_all <- mapply(args_all, names(args_all),
FUN = function(arg, name) {
if (length(arg) == 0) {
return(vector(mode = "list", n_urls))
} else if (length(arg) == 1) {
return(rep(arg, n_urls))
} else if (length(arg) == n_urls) {
return(arg)
} else {
stop("Argument `", name, "` should be NULL, length 1, or same length as `url`.")
}
},
SIMPLIFY = FALSE
)
args_all <- long_to_wide(args_all)
cm <- default_chromote_object()
# A list of promises for the screenshots
res <- lapply(args_all,
function(args) {
new_session_screenshot(cm,
args$url, args$file, args$vwidth, args$vheight, args$selector,
args$cliprect, args$expand, args$delay, args$zoom, args$useragent
)
}
)
p <- promise_all(.list = res)
res <- cm$wait_for(p)
res <- structure(unlist(res), class = "webshot")
res
}
new_session_screenshot <- function(
chromote,
url,
file,
vwidth,
vheight,
selector,
cliprect,
expand,
delay,
zoom,
useragent
) {
filetype <- tolower(tools::file_ext(file))
if (filetype != "png" && filetype != "pdf") {
stop("File extension must be 'png' or 'pdf'")
}
if (is.null(selector)) {
selector <- "html"
}
if (is.character(cliprect)) {
if (cliprect == "viewport") {
cliprect <- c(0, 0, vwidth, vheight)
} else {
stop("Invalid value for cliprect: ", cliprect)
}
} else {
if (!is.null(cliprect) && !(is.numeric(cliprect) && length(cliprect) == 4)) {
stop("`cliprect` must be a vector with four numbers, or a list of such vectors")
}
}
s <- NULL
p <- chromote$new_session(wait_ = FALSE,
width = vwidth,
height = vheight
)$
then(function(session) {
s <<- session
if (!is.null(useragent)) {
s$Network$setUserAgentOverride(userAgent = useragent)
}
s$Page$navigate(url, wait_ = FALSE)
s$Page$loadEventFired(wait_ = FALSE)
})$
then(function(value) {
if (delay > 0) {
promise(function(resolve, reject) {
later(
function() {
resolve(value)
},
delay
)
})
} else {
value
}
})$
then(function(value) {
if (filetype == "png") {
s$screenshot(
filename = file, selector = selector, cliprect = cliprect,
expand = expand, scale = zoom,
show = FALSE, wait_ = FALSE
)
} else if (filetype == "pdf") {
s$screenshot_pdf(filename = file, wait_ = FALSE)
}
})$
then(function(value) {
message(url, " screenshot completed")
normalizePath(value)
})$
finally(function() {
s$close()
})
p
}
#' @export
find_phantom <- function() TRUE
knit_print.webshot <- function(x, ...) {
lapply(x, function(filename) {
res <- readBin(filename, "raw", file.size(filename))
ext <- gsub(".*[.]", "", basename(filename))
structure(list(image = res, extension = ext), class = "html_screenshot")
})
}
#' @export
print.webshot <- function(x, ...) {
invisible(x)
}
|
/R/webshot.R
|
no_license
|
LouisStAmour/webshot2
|
R
| false | false | 10,127 |
r
|
#' @import chromote
#' @import later
#' @import promises
#'
NULL
#' Take a screenshot of a URL
#'
#' @param url A vector of URLs to visit. If multiple URLs are provided, it will
#' load and take screenshots of those web pages in parallel.
#' @param file A vector of names of output files. Should end with \code{.png} or
#' \code{.pdf}. If several screenshots have to be taken and only one filename
#' is provided, then the function appends the index number of the screenshot
#' to the file name. For PDF output, it is just like printing the page to PDF
#' in a browser; \code{selector}, \code{cliprect}, \code{expand}, and
#' \code{zoom} will not be used for PDFs.
#' @param vwidth Viewport width. This is the width of the browser "window".
#' @param vheight Viewport height This is the height of the browser "window".
#' @param selector One or more CSS selectors specifying a DOM element to set the
#' clipping rectangle to. The screenshot will contain these DOM elements. For
#' a given selector, if it has more than one match, all matching elements will
#' be used. This option is not compatible with \code{cliprect}. When taking
#' screenshots of multiple URLs, this parameter can also be a list with same
#' length as \code{url} with each element of the list containing a vector of
#' CSS selectors to use for the corresponding URL.
#' @param cliprect Clipping rectangle. If \code{cliprect} and \code{selector}
#' are both unspecified, the clipping rectangle will contain the entire page.
#' This can be the string \code{"viewport"}, in which case the clipping
#' rectangle matches the viewport size, or it can be a four-element numeric
#' vector specifying the top, left, width, and height. When taking screenshots
#' of multiple URLs, this parameter can also be a list with same length as
#' \code{url} with each element of the list being "viewport" or a
#' four-elements numeric vector. This option is not compatible with
#' \code{selector}.
#' @param delay Time to wait before taking screenshot, in seconds. Sometimes a
#' longer delay is needed for all assets to display properly.
#' @param expand A numeric vector specifying how many pixels to expand the
#' clipping rectangle by. If one number, the rectangle will be expanded by
#' that many pixels on all sides. If four numbers, they specify the top,
#' right, bottom, and left, in that order. When taking screenshots of multiple
#' URLs, this parameter can also be a list with same length as \code{url} with
#' each element of the list containing a single number or four numbers to use
#' for the corresponding URL.
#' @param zoom A number specifying the zoom factor. A zoom factor of 2 will
#' result in twice as many pixels vertically and horizontally. Note that using
#' 2 is not exactly the same as taking a screenshot on a HiDPI (Retina)
#' device: it is like increasing the zoom to 200% in a desktop browser and
#' doubling the height and width of the browser window. This differs from
#' using a HiDPI device because some web pages load different,
#' higher-resolution images when they know they will be displayed on a HiDPI
#' device (but using zoom will not report that there is a HiDPI device).
#' @param useragent The User-Agent header used to request the URL.
#'
#' @examples
#' if (interactive()) {
#'
#' # Whole web page
#' webshot("https://github.com/rstudio/shiny")
#'
#' # Might need a delay for all assets to display
#' webshot("http://rstudio.github.io/leaflet", delay = 0.5)
#'
#' # One can also take screenshots of several URLs with only one command.
#' # This is more efficient than calling 'webshot' multiple times.
#' webshot(c("https://github.com/rstudio/shiny",
#' "http://rstudio.github.io/leaflet"),
#' delay = 0.5)
#'
#' # Clip to the viewport
#' webshot("http://rstudio.github.io/leaflet", "leaflet-viewport.png",
#' cliprect = "viewport")
#'
#' # Specific size
#' webshot("https://www.r-project.org", vwidth = 1600, vheight = 900,
#' cliprect = "viewport")
#'
#' # Manual clipping rectangle
#' webshot("http://rstudio.github.io/leaflet", "leaflet-clip.png",
#' cliprect = c(200, 5, 400, 300))
#'
#' # Using CSS selectors to pick out regions
#' webshot("http://rstudio.github.io/leaflet", "leaflet-menu.png", selector = ".list-group")
#' # With multiple selectors, the screenshot will contain all selected elements
#' webshot("http://reddit.com/", "reddit-top.png",
#' selector = c("[aria-label='Home']", "input[type='search']"))
#'
#' # Expand selection region
#' webshot("http://rstudio.github.io/leaflet", "leaflet-boxes.png",
#' selector = "#installation", expand = c(10, 50, 0, 50))
#'
#' # If multiple matches for a given selector, it will take a screenshot that
#' # contains all matching elements.
#' webshot("http://rstudio.github.io/leaflet", "leaflet-p.png", selector = "p")
#' webshot("https://github.com/rstudio/shiny/", "shiny-stats.png",
#' selector = "ul.numbers-summary")
#'
#' # Result can be piped to other commands like resize() and shrink()
#' webshot("https://www.r-project.org/", "r-small.png") %>%
#' resize("75%") %>%
#' shrink()
#'
#' }
#'
#' @export
webshot <- function(
url = NULL,
file = "webshot.png",
vwidth = 992,
vheight = 744,
selector = NULL,
cliprect = NULL,
expand = NULL,
delay = 0.2,
zoom = 1,
useragent = NULL,
max_concurrent = getOption("webshot.concurrent", default = 6)
) {
if (length(url) == 0) {
stop("Need url.")
}
# Ensure urls are either web URLs or local file URLs.
url <- vapply(url,
function(x) {
if (!is_url(x)) {
# `url` is a filename, not an actual URL. Convert to file:// format.
file_url(x)
} else {
x
}
},
character(1)
)
# Convert params cliprect, selector and expand to list if necessary, because
# they can be vectors.
if(!is.null(cliprect) && !is.list(cliprect)) cliprect <- list(cliprect)
if(!is.null(selector) && !is.list(selector)) selector <- list(selector)
if(!is.null(expand) && !is.list(expand)) expand <- list(expand)
if (is.null(selector)) {
selector <- "html"
}
# If user provides only one file name but wants several screenshots, then the
# below code generates as many file names as URLs following the pattern
# "filename001.png", "filename002.png", ... (or whatever extension it is)
if (length(url) > 1 && length(file) == 1) {
file <- vapply(1:length(url), FUN.VALUE = character(1), function(i) {
replacement <- sprintf("%03d.\\1", i)
gsub("\\.(.{3,4})$", replacement, file)
})
}
# Check length of arguments and replicate if necessary
args_all <- list(
url = url,
file = file,
vwidth = vwidth,
vheight = vheight,
selector = selector,
cliprect = cliprect,
expand = expand,
delay = delay,
zoom = zoom,
useragent = useragent
)
n_urls <- length(url)
args_all <- mapply(args_all, names(args_all),
FUN = function(arg, name) {
if (length(arg) == 0) {
return(vector(mode = "list", n_urls))
} else if (length(arg) == 1) {
return(rep(arg, n_urls))
} else if (length(arg) == n_urls) {
return(arg)
} else {
stop("Argument `", name, "` should be NULL, length 1, or same length as `url`.")
}
},
SIMPLIFY = FALSE
)
args_all <- long_to_wide(args_all)
cm <- default_chromote_object()
# A list of promises for the screenshots
res <- lapply(args_all,
function(args) {
new_session_screenshot(cm,
args$url, args$file, args$vwidth, args$vheight, args$selector,
args$cliprect, args$expand, args$delay, args$zoom, args$useragent
)
}
)
p <- promise_all(.list = res)
res <- cm$wait_for(p)
res <- structure(unlist(res), class = "webshot")
res
}
new_session_screenshot <- function(
chromote,
url,
file,
vwidth,
vheight,
selector,
cliprect,
expand,
delay,
zoom,
useragent
) {
filetype <- tolower(tools::file_ext(file))
if (filetype != "png" && filetype != "pdf") {
stop("File extension must be 'png' or 'pdf'")
}
if (is.null(selector)) {
selector <- "html"
}
if (is.character(cliprect)) {
if (cliprect == "viewport") {
cliprect <- c(0, 0, vwidth, vheight)
} else {
stop("Invalid value for cliprect: ", cliprect)
}
} else {
if (!is.null(cliprect) && !(is.numeric(cliprect) && length(cliprect) == 4)) {
stop("`cliprect` must be a vector with four numbers, or a list of such vectors")
}
}
s <- NULL
p <- chromote$new_session(wait_ = FALSE,
width = vwidth,
height = vheight
)$
then(function(session) {
s <<- session
if (!is.null(useragent)) {
s$Network$setUserAgentOverride(userAgent = useragent)
}
s$Page$navigate(url, wait_ = FALSE)
s$Page$loadEventFired(wait_ = FALSE)
})$
then(function(value) {
if (delay > 0) {
promise(function(resolve, reject) {
later(
function() {
resolve(value)
},
delay
)
})
} else {
value
}
})$
then(function(value) {
if (filetype == "png") {
s$screenshot(
filename = file, selector = selector, cliprect = cliprect,
expand = expand, scale = zoom,
show = FALSE, wait_ = FALSE
)
} else if (filetype == "pdf") {
s$screenshot_pdf(filename = file, wait_ = FALSE)
}
})$
then(function(value) {
message(url, " screenshot completed")
normalizePath(value)
})$
finally(function() {
s$close()
})
p
}
#' @export
find_phantom <- function() TRUE
knit_print.webshot <- function(x, ...) {
lapply(x, function(filename) {
res <- readBin(filename, "raw", file.size(filename))
ext <- gsub(".*[.]", "", basename(filename))
structure(list(image = res, extension = ext), class = "html_screenshot")
})
}
#' @export
print.webshot <- function(x, ...) {
invisible(x)
}
|
# The script below estimates selection coefficients of L1 from the
# 1000 genome data using insertion estimates obtained by MELT
#
##########################################
# #
# Load packages #
# #
##########################################
# Source start script
source('D:/L1polymORFgit/Scripts/_Start_L1polymORF.R')
# Load packages
library(GenomicRanges)
library(pracma)
library(rtracklayer)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
##########################################
# #
# Set parameters #
# #
##########################################
# Specify file paths
DataPath <- 'D:/L1polymORF/Data/'
MeltInsPath <- "D:/L1polymORF/Data/nstd144.GRCh37.variant_call.vcf"
MeltDelPath <- "D:/L1polymORF/Data/DEL.final_comp.vcf"
ChrLPath <- 'D:/L1polymORF/Data/ChromLengthsHg19.Rdata'
InputPath <- 'D:/L1polymORF/Data/SingletonAnalysis_unphased.RData'
L1RefPath <- 'D:/L1polymORF/Data/L1HS_repeat_table_Hg19.csv'
L1RefRangePath <- 'D:/L1polymORF/Data/L1RefRanges_hg19.Rdata'
RegrOutputPath <- "D:/L1polymORF/Data/L1RegressionResults.RData"
SelectTabOutPath <- "D:/L1polymORF/Data/L1SelectionResults_MELT.csv"
SelectGenTabOutPath <- "D:/L1polymORF/Data/L1SelectionGeneResults_MELT.csv"
SelectResultOutPath <- "D:/L1polymORF/Data/L1SelectionResults_MELT.RData"
SelectWithinGenTabOutPath <- "D:/L1polymORF/Data/L1SelectionWithinGeneResults_MELT.csv"
SelectSingletonTabOutPath <- "D:/L1polymORF/Data/L1SelectionSingletonResults_MELT.csv"
# False discovery rate for selected L1
FDR <- 0.1
# Specify range width for DNAse analysis
RangeWidth <- 10^6
# Human effective population size
PopSize <- 10^5
# Minimum length for a full L1
MinLengthFullL1 <- 6000
# Sample size for ME insertion calls
MEInsSamplesize <- 2453
##########################################
# #
# Load and process data #
# #
##########################################
cat("\n\nLoading and processing data ...")
# Read in vcf file with MELT insertion calls
MEInsCall <- read.table(MeltInsPath,
as.is = T,
col.names = c("Chrom", "Pos", "ID", "Alt", "Type", "V6",
"V7", "Info"))
MEInsCall <- MEInsCall[MEInsCall$Type == "<INS:ME:LINE1>",]
# Extract allele frequency from info column
GetAF <- function(x){
xSplit <- strsplit(x, ";")[[1]]
AFch <- strsplit(xSplit[length(xSplit)], "=")[[1]][2]
as.numeric(AFch)
}
GetLength <- function(x){
xSplit <- strsplit(x, ";")[[1]]
LengthCh <- strsplit(xSplit[grep("SVLEN=", xSplit)], "=")[[1]][2]
as.numeric(LengthCh)
}
# Add columns necessary for analysis
MEInsCall$AF <- sapply(MEInsCall$Info, GetAF)
MEInsCall <- MEInsCall[!is.na(MEInsCall$AF), ]
MEInsCall$L1width <- sapply(MEInsCall$Info, GetLength)
MEInsCall$SampleSize <- 1/min(MEInsCall$AF)
# MEInsCall$SampleSize <- 2 * MEInsSamplesize
MEInsCall$Freq <- MEInsCall$SampleSize * MEInsCall$AF
MEInsCall$blnFull <- MEInsCall$L1width >= MinLengthFullL1
# Create GRanges object for MEInsCall
MEInsCall$ChromName <- paste("chr", MEInsCall$Chrom, sep = "")
MEIns_GR <- makeGRangesFromDataFrame(df = MEInsCall,
seqnames.field = "ChromName",
start.field = "Pos",
end.field = "Pos")
# Read in vcf file with MELT deletion calls
MEDelCall <- ReadVCF(MeltDelPath)
MEDelCall$chromosome <- paste("chr", MEDelCall$X.CHROM, sep = "")
MEDel_GR <- makeGRangesFromDataFrame(df = MEDelCall,
start.field = "POS",
end.field = "POS")
colnames(MEDelCall)
# function to get numeric genotype
GetNumericGenotype <- function(x){
Split1 <- strsplit(x, ":")[[1]][1]
Split2 <- strsplit(Split1, "/")[[1]]
sum(as.numeric(Split2))
}
# Get numeric genotype of all reference L1 deletions
GTCols <- grep("L1Filtered", colnames(MEDelCall))
L1RefNumGen <- 2 - sapply(GTCols, function(x){
sapply(1:nrow(MEDelCall), function(y) GetNumericGenotype(MEDelCall[y,x]))
})
# Add columns for frequency and sample size
MEDelCall$Freq <- rowSums(L1RefNumGen, na.rm = T)
MEDelCall$SampleSize <- apply(L1RefNumGen, 1, function(x) 2*sum(!is.na(x)))
# Load previously generated objects
load(InputPath)
load(L1GRPath)
load(ChrLPath)
load(L1RefRangePath)
load(RegrOutputPath)
load("D:/L1polymORF/Data/DelVsL1Length.RData")
# Create genomic ranges of reference L1 with 100 bp added on each side
L1NeighborRanges <- GRanges(seqnames = seqnames(L1GRanges),
IRanges(start = start(L1GRanges) - 100,
end = end(L1GRanges) + 100))
# Create a data frame of reference L1
RefL1Data <- data.frame(L1width = width(L1GRanges),
Freq = 30, SampleSize = 30)
OL_MEDelRefL1 <- findOverlaps(L1NeighborRanges, MEDel_GR)
RefL1Data$Freq[OL_MEDelRefL1@from] <- MEDelCall$Freq[OL_MEDelRefL1@to]
RefL1Data$SampleSize[OL_MEDelRefL1@from] <- MEDelCall$SampleSize[OL_MEDelRefL1@to]
RefL1Data$blnFull <- RefL1Data$L1width >= MinLengthFullL1
# Number of L1 that are fixed at proportion 1
N1 <- length(L1GRanges) - length(OL_MEDelRefL1@from)
RefL1Data <- RefL1Data[OL_MEDelRefL1@from, ]
L1GRanges <- L1GRanges[OL_MEDelRefL1@from]
# Put data of non-reference L1 (insertions) and reference L1 (deletions)
# together
L1TotData <- rbind(MEInsCall[ ,c("L1width", "Freq", "SampleSize", "blnFull")],
RefL1Data)
L1TotData$blnIns <- c(rep(T, nrow(MEInsCall)), rep(F, nrow(RefL1Data)))
L1TotData$L1Freq <- NA
L1TotData$L1Freq[L1TotData$blnIns] <- L1TotData$Freq[L1TotData$blnIns] /
L1TotData$SampleSize[L1TotData$blnIns]
L1TotData$L1Freq[!L1TotData$blnIns] <- 1 - L1TotData$Freq[!L1TotData$blnIns] /
L1TotData$SampleSize[!L1TotData$blnIns]
L1TotData$DetectProb <- 0.85
L1TotData$DetectProb[L1TotData$blnIns] <- 0.9
# Perform logistic regression for the probability of reference L1 as function
# of L1 frequency
L1TotData$blnRef <- !L1TotData$blnIns
LogRegL1Ref <- glm(blnRef ~ L1Freq, family = binomial, data = L1TotData)
LogRegL1Ref$coefficients
# Combine genomic ranges
L1TotGR <- c(MEIns_GR, L1GRanges)
# Create a predictor variable for involvement in ectopic recombination
# L1Width <- width(L1TotGR)
# hist(L1Width, breaks = seq(0, 6500, 100))
# idxWidth <- which(!is.na(L1TotData$L1width))
# L1TotData$RecPredict <- NA
# L1TotData$RecPredict[idxWidth] <- 150000*sapply(L1TotData$L1width[idxWidth], function(x){
# idxMatch <- which.min(abs(x - DelVsL1Length$x))
# DelVsL1Length$y[idxMatch]
# })
#
# L1Width <- L1TotData$L1width[idxWidth]
# L1Width[L1Width >= 4500] <- 4500
# L1WidthOrder <- order(L1Width, decreasing = T)
# OrderMatch <- match(1:length(idxWidth), L1WidthOrder)
# Deltas <- c(L1Width[L1WidthOrder[-length(L1Width)]] - L1Width[L1WidthOrder[-1]],
# L1Width[L1WidthOrder[length(L1Width)]])
# DeltasSqProd <- 10^-7*Deltas^2 * (L1WidthOrder - 1)
# Rev <- length(idxWidth):1
# L1WidthProd <- cumsum(DeltasSqProd[Rev])[Rev]
# L1TotData$RecPredict[idxWidth] <- L1WidthProd[OrderMatch]
# max(L1TotData$RecPredict[idxWidth])
# plot(L1TotData$L1width, L1TotData$RecPredict)
# Number of L1 that are not fixed
Nnf <- nrow(L1TotData)
# Make genomic ranges for L1SingletonCoeffs
L1SingletonCoeffs$chromosome <- paste("chr", L1SingletonCoeffs$Chrom, sep = "")
L1SingletonCoeffs_GR <- makeGRangesFromDataFrame(L1SingletonCoeffs,
seqnames.field = "chromosome",
start.field = "Pos",
end.field = "Pos")
# Read information about 1000 genome samples
SampleInfo <- read.table(G1000SamplePath, header = T)
SampleMatch <- match(SampleColumns, SampleInfo$sample)
Pops <- SampleInfo$super_pop[SampleMatch]
NrS <- length(SampleColumns)
# Define more genomic ranges
GeneGR <- genes(TxDb.Hsapiens.UCSC.hg19.knownGene)
ExonGR <- exons(TxDb.Hsapiens.UCSC.hg19.knownGene)
PromGR <- promoters(TxDb.Hsapiens.UCSC.hg19.knownGene, upstream = 10000)
CDSGR <- cds(TxDb.Hsapiens.UCSC.hg19.knownGene)
IntronGRList <- intronsByTranscript(TxDb.Hsapiens.UCSC.hg19.knownGene,
use.names = T)
FiveUTRGRList <- fiveUTRsByTranscript(TxDb.Hsapiens.UCSC.hg19.knownGene,
use.names = T)
ThreeUTRGRList <- threeUTRsByTranscript(TxDb.Hsapiens.UCSC.hg19.knownGene,
use.names = T)
sum(width(GeneGR)/10^6) / sum(ChromLengthsHg19/10^6)
# Among overlapping genomic ranges, retain the longest
GeneGR <- UniqueGRanges(GeneGR)
cat("done!\n")
##########################################
# #
# Add columns to L1SingletonCoeffs #
# #
##########################################
cat("Add columns to L1SingletonCoeffs ...")
# Turn factors into numeric values
L1SingletonCoeffs$L1Start <- as.numeric(as.character(L1SingletonCoeffs$L1Start))
L1SingletonCoeffs$L1End <- as.numeric(as.character(L1SingletonCoeffs$L1End))
# Indicator for full-length
L1SingletonCoeffs$blnFull <- L1SingletonCoeffs$L1Start <= 3 &
L1SingletonCoeffs$L1End >= MinLengthFullL1
sum(L1SingletonCoeffs$InsLength <= 100)
# Indicator for significant effect
L1SingletonCoeffs$blnSig <- p.adjust(L1SingletonCoeffs$Pr...z..) < FDR
hist(L1SingletonCoeffs$Pr...z.., breaks = seq(0, 1, 0.005))
# Indicator for positive selection
L1SingletonCoeffs$blnSelect <- L1SingletonCoeffs$blnSig &
L1SingletonCoeffs$coef < 0
# Indicator for negative selection
L1SingletonCoeffs$blnNegSelect <- L1SingletonCoeffs$blnSig &
L1SingletonCoeffs$coef > 0
sum(L1SingletonCoeffs$blnNegSelect)
sum(L1SingletonCoeffs$blnSelect)
# Indicator fo selection (+1 = positive, -1 = negative, 0 = neutral)
L1SingletonCoeffs$SelectInd <- 0
L1SingletonCoeffs$SelectInd[L1SingletonCoeffs$blnSelect] <- 1
L1SingletonCoeffs$SelectInd[L1SingletonCoeffs$blnNegSelect] <- -1
# Caclulate distance to genes
L1SingletonCoeffs$Dist2Gene <- Dist2Closest(L1SingletonCoeffs_GR, GeneGR)
L1SingletonCoeffs$blnOLGene <- L1SingletonCoeffs$Dist2Gene == 0
# Caclulate logarithm of distance to genes
L1SingletonCoeffs$LogDist2Gene <- log(L1SingletonCoeffs$Dist2Gene + 0.1)
# Standardize SE ratio to one
L1SingletonCoeffs$SE_RatioSt <- 1/L1SingletonCoeffs$se.coef./
mean(1/L1SingletonCoeffs$se.coef.)
# Add boolean indicators for overlap
L1SingletonCoeffs$blnOLGene <- overlapsAny(L1SingletonCoeffs_GR, GeneGR, ignore.strand = T)
L1SingletonCoeffs$blnOLProm <- overlapsAny(L1SingletonCoeffs_GR, PromGR, ignore.strand = T)
L1SingletonCoeffs$blnOLExon <- overlapsAny(L1SingletonCoeffs_GR, ExonGR, ignore.strand = T)
L1SingletonCoeffs$blnOLIntron <- L1SingletonCoeffs$blnOLGene & (!L1SingletonCoeffs$blnOLExon)
L1SingletonCoeffs$blnOLIntergen <- !(L1SingletonCoeffs$blnOLGene | L1SingletonCoeffs$blnOLProm)
L1SingletonCoeffs$L1StartNum <- as.numeric(as.character(L1SingletonCoeffs$L1Start))
L1SingletonCoeffs$L1EndNum <- as.numeric(as.character(L1SingletonCoeffs$L1End))
L1SingletonCoeffs$blnFull <- L1SingletonCoeffs$L1StartNum <= 1 & L1SingletonCoeffs$L1EndNum >= 6000
# Add info about overlapping genes
L1coeff_Gene_OL <- findOverlaps(L1SingletonCoeffs_GR, GeneGR, ignore.strand = T)
L1SingletonCoeffs$idxGene <- NA
L1SingletonCoeffs$GeneWidth <- NA
L1SingletonCoeffs$GeneID <- NA
L1SingletonCoeffs$blnOLGeneSameStrand <- NA
L1SingletonCoeffs$idxGene[L1coeff_Gene_OL@from] <- L1coeff_Gene_OL@to
L1SingletonCoeffs$GeneWidth[L1coeff_Gene_OL@from] <- width(GeneGR)[L1coeff_Gene_OL@to]
L1SingletonCoeffs$GeneID[L1coeff_Gene_OL@from] <-
GeneGR@elementMetadata@listData$gene_id[L1coeff_Gene_OL@to]
L1SingletonCoeffs$blnOLGeneSameStrand[L1coeff_Gene_OL@from] <-
L1SingletonCoeffs$L1Strand[L1coeff_Gene_OL@from] == as.vector(strand(GeneGR))[L1coeff_Gene_OL@to]
# Check out properties of L1 with signal of positive selectiom
L1SingletonCoeffs[L1SingletonCoeffs$blnSelect,]
fisher.test(L1SingletonCoeffs$blnSelect, (L1SingletonCoeffs$blnOLIntron &
L1SingletonCoeffs$blnOLGeneSameStrand))
mean((L1SingletonCoeffs$blnOLIntron &
L1SingletonCoeffs$blnOLGeneSameStrand))
# Standardize selection coefficients
CoeffAggMean <- aggregate(coef ~ Freq, data = L1SingletonCoeffs, FUN = mean)
CoeffAggVar <- aggregate(coef ~ Freq, data = L1SingletonCoeffs, FUN = var)
CoeffAggN <- aggregate(coef ~ Freq, data = L1SingletonCoeffs, FUN = length)
CoeffAggMerge <- merge(CoeffAggMean, CoeffAggVar, by = 'Freq')
CoeffAggMerge <- merge(CoeffAggMerge, CoeffAggN, by = 'Freq')
colnames(CoeffAggMerge)[2:4] <- c("Mean", "Var", "N")
CoeffAggMerge$StDev <- sqrt(CoeffAggMerge$Var)
FreqMatch <- match(L1SingletonCoeffs$Freq, CoeffAggMerge$Freq)
L1SingletonCoeffs$MeanCof <- CoeffAggMerge$Mean[FreqMatch]
L1SingletonCoeffs$StDevCof <- CoeffAggMerge$StDev[FreqMatch]
L1SingletonCoeffs$CoefSt <- (L1SingletonCoeffs$coef - L1SingletonCoeffs$MeanCof) /
L1SingletonCoeffs$StDevCof
cat("done!\n")
####################################################
# #
# Overview of L1 intersection with features #
# #
####################################################
# Indicator variable for intersection with various GRanges
L1TotData$blnOLGene <- overlapsAny(L1TotGR, GeneGR, ignore.strand = T)
L1TotData$blnOLGeneSameStrand <- overlapsAny(L1TotGR, GeneGR)
L1TotData$blnOLProm <- overlapsAny(L1TotGR, PromGR, ignore.strand = T)
L1TotData$blnOLExon <- overlapsAny(L1TotGR, ExonGR, ignore.strand = T)
L1TotData$blnOLIntron <- L1TotData$blnOLGene & (!L1TotData$blnOLExon)
L1TotData$blnOLIntergen <- !(L1TotData$blnOLGene | L1TotData$blnOLProm)
# Create a variable indicating insertion type
L1TotData$InsType <- "Intergenic"
L1TotData$InsType[L1TotData$blnOLProm] <- "Promoter"
L1TotData$InsType[L1TotData$blnOLExon] <- "Exon"
L1TotData$InsType[L1TotData$blnOLIntron] <- "Intron"
# Perform pairwise Wilcoxon test for differences in L1 frequencies
pairwise.wilcox.test(L1TotData$L1Freq, L1TotData$InsType,
p.adjust.method = "BH")
# Average mean frequency
MeanFreqAgg <- aggregate(L1Freq ~ InsType, data = L1TotData, FUN = mean)
VarFreqAgg <- aggregate(L1Freq ~ InsType, data = L1TotData, FUN = var)
L1TotData$Dummy <- 1
NAgg <- aggregate(Dummy ~ InsType, data = L1TotData, FUN = sum)
StErr <- sqrt(VarFreqAgg$Frequency / NAgg$Dummy)
# Indicator variable for intersection with reference L1
blnOLGene_RefL1 <- overlapsAny(L1GRanges, GeneGR, ignore.strand = T)
blnOLGeneSameStrand_RefL1 <- overlapsAny(L1GRanges, GeneGR)
blnOLProm_RefL1 <- overlapsAny(L1GRanges, PromGR, ignore.strand = T)
blnOLExon_RefL1 <- overlapsAny(L1GRanges, ExonGR, ignore.strand = T)
blnOLIntron_RefL1 <- blnOLGene_RefL1 & (!blnOLExon_RefL1)
# Get number of insertions per bp
GeneTot <- sum(width(GeneGR))
ExonTot <- sum(width(ExonGR))
IntronTot <- GeneTot - ExonTot
PromTot <- sum(width(PromGR))
IntergenTot <- sum(as.numeric(ChromLengthsHg19)) - GeneTot - PromTot #- EnhancerTot
# Get mean frequency of L1 in different functional regions
MeanFreqs <- c(
Promoter = mean(L1TotData$L1Freq[L1TotData$blnOLProm], na.rm = T),
Exon = mean(L1TotData$L1Freq[L1TotData$blnOLExon], na.rm = T),
Intron = mean(L1TotData$L1Freq[L1TotData$blnOLIntron], na.rm = T),
Intergenic = mean(L1TotData$L1Freq[L1TotData$blnOLIntergen], na.rm = T)
)
# Plot distn of frequency of L1 in different functional regions
par(mfrow = c(1, 1))
hist(L1TotData$L1Freq[L1TotData$blnOLProm], breaks = seq(0, 1, 0.01))
hist(L1TotData$L1Freq[L1TotData$blnOLExon], breaks = seq(0, 1, 0.01))
hist(L1TotData$L1Freq[L1TotData$blnOLIntron], breaks = seq(0, 1, 0.01))
hist(L1TotData$L1Freq[L1TotData$blnOLIntergen], breaks = seq(0, 1, 0.01))
hist(sqrt(-log10(L1TotData$L1Freq[L1TotData$blnOLProm])))
hist(-log10(L1TotData$L1Freq[L1TotData$blnOLExon]))
hist(log10(L1TotData$L1Freq[L1TotData$blnOLIntron]))
hist(log10(L1TotData$L1Freq[L1TotData$blnOLIntergen]))
# Get number of L1 per Mb in different functional regions
InsPerbp <- 10^6 * rbind(
c(
Promoter = sum(blnOLProm_RefL1) / PromTot,
Exon = sum(blnOLExon_RefL1) / ExonTot,
Intron = sum(blnOLIntron_RefL1) / IntronTot,
Intergenic = sum(!(blnOLGene_RefL1 | blnOLProm_RefL1)) / IntergenTot
),
c(
Promoter = sum(L1TotData$blnOLProm) / PromTot,
Exon = sum(L1TotData$blnOLExon) / ExonTot,
Intron = sum(L1TotData$blnOLIntron) / IntronTot,
Intergenic = sum(!(L1TotData$blnOLGene | L1TotData$blnOLProm)) / IntergenTot
)
)
InsPerbp[1,] / InsPerbp[2,]
###################################################
# #
# Fit effect of insertion length on selection #
# #
###################################################
cat("\n******** Estimating effect of insertion length **********\n")
# Match summary ranges to L1 ranges of 1000 genome data
L1SummaryOL <- findOverlaps(L1TotGR, SummaryGR)
all(L1SummaryOL@from %in% 1:nrow(L1TotData))
blnNoDupl <- !duplicated(L1SummaryOL@from)
L1TotData$L1Count <- NA
L1TotData$L1Count[L1SummaryOL@from[blnNoDupl]] <-
DataPerSummaryGR$L1Count[L1SummaryOL@to[blnNoDupl]]
# Get distance to nearest other L1
Dist2Nearest <- distanceToNearest(L1TotGR)
L1TotData$Dist2Nearest <- Dist2Nearest@elementMetadata@listData$distance
max(L1TotData$Dist2Nearest)
# Create a matrix of predictor variables (L1 start and boolean variable for)
PredictMat <- L1TotData[, c("L1Count", "L1width", "blnFull", "RecPredict",
"Freq", "SampleSize", "blnIns")]
blnNA <- sapply(1:nrow(L1TotData), function(x) any(is.na(PredictMat[x,])))
sum(!blnNA)
max(L1TotData$Freq / L1TotData$SampleSize, na.rm = T)
max(PredictMat$RecPredict)
# Plot log-likelihood for different selection coefficients
# aVals <- seq(-0.0021, 0.003, 0.0001)
# LikVals <- sapply(aVals, function(x) {
# print(x)
# LL_FPrime = AlleleFreqLogLik_4Par(
# Freqs = round(L1TotData$Freq[!blnNA], 0),
# Counts = rep(1, sum(!blnNA)),
# Predict = PredictMat[!blnNA, 1:3],
# a = x, b = 0, c = 0, d = 0, N = PopSize,
# SampleSize = L1TotData$SampleSize[!blnNA],
# blnIns = L1TotData$blnIns[!blnNA],
# DetectProb = 0.9)
# })
# par(mfrow = c(1, 1))
# plot(aVals, LikVals, type = "l", col = "red")
# plot(aVals, LikVals, type = "l", col = "red",
# xlim = c(-0.0005, 0), ylim)
# Estimate maximum likelihood for a single selection coefficient
cat("Estimate maximum likelihood for a single selection coefficient\n")
ML_1Par <- constrOptim(theta = c(a = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = 0, c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(1,-1),
ci = c(a = -0.03, a = -0.03),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of L1 start on selection
cat("Estimate effect of L1 start on selections ...")
ML_L1width <- constrOptim(theta = c(a = ML_1Par$par, c = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = 0, c = x[2], d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.02, c = -10^(-6),
a = -0.02, c = -10^(-6)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of full-length L1 on selection
cat("Estimate effect of L1 full-length on selections ...")
ML_L1full <- constrOptim(theta = c(a = ML_1Par$par, d = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = 0, c = 0, d = x[2], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.02, d = -10^(-3),
a = -0.02, d = -10^(-3)),
method = "Nelder-Mead")
cat("done!\n")
# Determine maximum likelihood with 3 parameters (selection coefficient as
# function of L1 start and indicator for full-length)
cat("Maximizing likelihood for three parameters ...")
ML_L1widthL1full <- constrOptim(theta = c(a = ML_L1width$par[1], b = ML_L1width$par[2],
c = ML_L1full$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = 0, c = x[2], d = x[3], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0), c(0, 0, -1)),
ci = c(a = -0.01, b = -10^(-6), d = -10^(-3),
a = -0.02, b = -10^(-6), d = -10^(-3)),
method = "Nelder-Mead")
cat("done!\n")
# Determine maximum likelihood with 3 parameters (selection coefficient as
# function of Recombination predictor and indicator for full-length)
cat("Maximizing likelihood for three parameters ...")
ML_L1RecL1full <- constrOptim(theta = c(a = ML_L1widthL1full$par[1],
c = ML_L1widthL1full$par[3], d = ML_L1widthL1full$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 2:4],
a = x[1], b = 0, c = x[2], d = x[3], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0), c(0, 0, -1)),
ci = c(a = -0.01, b = -10^(-3), d = -10^(-6),
a = -0.02, b = -10^(-3), d = -10^(-6)),
method = "Nelder-Mead")
cat("done!\n")
###################################################
# #
# Fit effect of L1 density on selection #
# #
###################################################
# Determine maximum likelihood with 3 parameters (selection coefficient as
# function of L1 start and indicator for full-length)
cat("Maximizing likelihood for L1 count ...")
ML_2Pars_L1count <- constrOptim(
theta = c(a = ML_1Par$par, b = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = x[2], c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
# ci = c(a = -0.01, b = -10^(-3),
# a = -0.01, b = -2*10^(-3)),
ci = c(a = -0.01, b = -10^(-9),
a = -0.01, b = -10^(-9)),
method = "Nelder-Mead")
# Maximum likelihood estimate for effect of L1 density and full-length L1
ML_3Pars_L1countL1full <- constrOptim(
theta = c(a = ML_2Pars_L1count$par[1], ML_2Pars_L1count$par[2],
d = ML_L1full$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = x[2], c = 0, d = x[3], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0), c(0, 0, -1)),
ci = c(a = -0.02, b = -5*10^(-3), d = -10^(-3),
a = -0.02, b = -5*10^(-3), d = -10^(-3)),
method = "Nelder-Mead")
# Maximum likelihood estimate for effect of L1 density and L1 start
ML_3Pars_L1countL1width <- constrOptim(
theta = c(a = ML_2Pars_L1count$par[1], ML_2Pars_L1count$par[2],
c = ML_L1width$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = x[2], c = x[3], d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0), c(0, 0, -1)),
ci = c(a = -0.02, b = -5*10^(-3), c = -10^(-6),
a = -0.02, b = -5*10^(-3), c = -10^(-6)),
method = "Nelder-Mead")
# Maximum likelihood estimate for effect of L1 density, L1 start, and
# full-length L1
ML_4Pars_L1countL1widthL1full <- constrOptim(
theta = c(a = ML_2Pars_L1count$par[1], b = 0,
c = ML_L1widthL1full$par[2], d = ML_L1widthL1full$par[3]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = x[2], c = x[3], d = x[4], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0, 0), c(0, 1, 0, 0), c(0, 0, 1, 0), c(0, 0, 0, 1),
c(-1, 0, 0, 0), c(0, -1, 0, 0), c(0, 0, -1, 0), c(0, 0, 0, -1)),
ci = c(a = -0.01, b = -2*10^(-3), c = -10^(-6), d = -10^(-3),
a = -0.01, b = -2*10^(-3), c = -10^(-6), d = -10^(-3)),
method = "Nelder-Mead")
###################################################
# #
# Compare estimated and observed frequencies #
# #
###################################################
# LogProbs <- AlleleFreqSampleProb(s = 0, N = PopSize, SampleSize = 2*2504)
# sum(is.infinite(LogProbs))
# length(LogProbs)
# min(LogProbs[!is.infinite(LogProbs)])
# idxFinite <- which(!is.infinite(LogProbs))
# plot(idxFinite, LogProbs[idxFinite])
# plot(idxFinite, LogProbs[idxFinite], xlim = c(4800, 5000))
# lchoose(5008, 1000)
# k <- 200
# SampleSize = 5008
# integrate(function(x) AlleleFreqTime(x, s = 0, N = PopSize) * x^(k) *
# (1 - x)^(SampleSize - k) , 0, 1)$value
# integrate(function(x) log(AlleleFreqTime(x, s = 0, N = PopSize)) +
# k * log(x) + (SampleSize - k) * log(1 - x), 0, 1)$value
#
###################################################
# #
# Summarize results #
# #
###################################################
# Function to extract AIC from optim results
GetAIC <- function(OptimResults){
round(2 * (length(OptimResults$par) + OptimResults$value), 2)
}
GetParVals <- function(OptimResults){
Results <- paste(names(OptimResults$par),
format(OptimResults$par, digits = 2), sep = " = ",
collapse = ", ")
}
GetNPar <- function(OptimResults){
length(OptimResults$par)
}
# Get columns of AIC and parameter values
Cols2Append <- t(sapply(list(ML_1Par,
ML_L1width,
ML_L1full,
# ML_2Pars_L1count,
ML_L1widthL1full,
# ML_3Pars_L1countL1width,
# ML_3Pars_L1countL1full
# ML_4Pars_L1countL1widthL1full
), function(x){
c(AIC = GetAIC(x), Pars = GetParVals(x))
}))
# Combine AIC values into one vector
AICTab <- cbind(data.frame(
NrParameters = c(1,
2,
2,
# 2,
3,
# 3,
# 3
# 4
),
Predictor = c("none",
"L1 width",
"L1 full-length",
# "L1count",
"L1 width and full-length",
# "L1 count and L1 start",
# "L1 count and L1 full"
# "L1 start, L1 full-length, L1count"
),
stringsAsFactors = F),
Cols2Append)
# Save table with AIC
write.csv(AICTab, SelectTabOutPath)
save.image(SelectResultOutPath)
###################################################
# #
# Fit effect of genic insertion on selection #
# #
###################################################
# Create a matrix of predictor variables (L1 start and boolean variable for)
PredictMatGeneOL <- L1TotData[, c("blnOLExon", "blnOLIntron", "blnOLProm")]
PredictMatGeneOL2 <- L1TotData[, c("blnOLGene", "blnOLIntron", "blnOLProm")]
blnNA <- sapply(1:nrow(PredictMatGeneOL), function(x) any(is.na(PredictMatGeneOL[x,]))) |
sapply(1:nrow(L1TotData), function(x) any(is.na(PredictMat[x,])))
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of exon overlap on selections ...")
ML_L1Exon <- constrOptim(theta = c(a = ML_1Par$par, b = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = x[2], c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.001, b = -10^(-2),
a = -0.001, b = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of intronic L1 on selection
cat("Estimate effect of intron overlap on selections ...")
ML_L1Intron <- constrOptim(theta = c(a = ML_1Par$par, c = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = 0, c = x[2], d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.01, c = -10^(-2),
a = -0.01, c = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of intronic L1 on selection
cat("Estimate effect of promoter overlap on selections ...")
ML_L1Prom <- constrOptim(theta = c(a = ML_1Par$par, d = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = 0, c = 0, d = x[2], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.01, c = -10^(-2),
a = -0.01, c = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of exon nad intron overlap on selections ...")
ML_L1ExonIntron <- constrOptim(
theta = c(a = ML_1Par$par,
b = ML_L1Exon$par[2],
c = ML_L1Intron$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = x[2], c = x[3], d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0) , c(0, 0, -1)),
ci = c(a = -0.01, b = -10^(-2), c = -10^(-2),
a = -0.01, b = -10^(-2), c = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of exon nad intron overlap on selections ...")
ML_L1ExonIntronProm <- constrOptim(
theta = c(a = ML_L1ExonIntron$par[1],
b = ML_L1ExonIntron$par[2],
c = ML_L1ExonIntron$par[3],
d = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = x[2], c = x[3], d = x[4], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0, 0), c(0, 1, 0, 0), c(0, 0, 1, 0), c(0, 0, 0, 1),
c(-1, 0, 0, 0), c(0, -1, 0, 0) , c(0, 0, -1, 0), c(0, 0, 0, -1)),
ci = c(a = -0.01, b = -10^(-2), c = -10^(-2), d = -10^(-2),
a = -0.01, b = -10^(-2), c = -10^(-2), d = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of exon or intron overlap on selections ...")
ML_L1PromOrIntron <- constrOptim(theta = c(a = ML_1Par$par,
b = ML_L1Exon$par[2],
c = ML_L1Intron$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = x[2], c = x[3], d = x[3], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0) , c(0, 0, -1)),
ci = c(a = -0.01, b = -10^(-2), c = -10^(-2),
a = -0.01, b = -10^(-2), c = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get columns of AIC and parameter values
Cols2Append <- t(sapply(list(ML_1Par, ML_L1Exon, ML_L1Intron, ML_L1Prom,
ML_L1ExonIntron,
ML_L1ExonIntronProm,
ML_L1PromOrIntron),
function(x){
c(NrParameters = GetNPar(x), AIC = GetAIC(x),
Pars = GetParVals(x))
}))
# Combine AIC values into one vector
AICTabGene <- cbind(data.frame(
Predictor = c("none", "Exon", "Intron", "Promoter",
"Exon and intron",
"Exon, intron, and promoter",
"Exon, intron or promoter"),
stringsAsFactors = F),
Cols2Append)
# Save table with AIC
write.csv(AICTabGene, SelectGenTabOutPath)
###################################################
# #
# Plot density vs. selection coefficient #
# #
###################################################
# Create a vector of selection coefficients
SCoeffVect <- c(Promoter = ML_L1ExonIntron$par[1],
Exon = sum(ML_L1ExonIntron$par[c(1, 2)]),
Intron = sum(ML_L1ExonIntron$par[c(1, 3)]),
Intergenic = ML_L1ExonIntron$par[1])
names(SCoeffVect) <- sapply(names(SCoeffVect),
function(x) strsplit(x, "\\.")[[1]][1])
# Plot selection coefficient against
if (!all(names(SCoeffVect) == colnames(InsPerbp))){
stop("Selection coefficients and L1 densities are not in same order!")
}
if (!all(names(SCoeffVect) == names(MeanFreqs))){
stop("Selection coefficients and L1 frequencies are not in same order!")
}
# Get sample size and create a range of s-values
SSize <- 2 * MEInsSamplesize
SVals <- seq(-0.0025, -0.00001, 0.00001)
# Plot probability for inclusion versus number of LINE-1 per Mb
ProbL1 <- sapply(SVals, function(x) ProbAlleleIncluded(x,N = PopSize, SampleSize = 2*2504))
par(oma = c(7, 1, 0, 2), mfrow = c(2, 1), mai = c(0.5, 1, 0.5, 1))
plot(SCoeffVect, InsPerbp[2,], ylab = "LINE-1s per Mb",
xlab = "", ylim = c(0, 3), xlim = c(-0.0025, 0), main = "A")
text(SCoeffVect, InsPerbp[2,] + 2*10^(-1), names(SCoeffVect))
par(new = TRUE)
plot(SVals, ProbL1, type = "l", axes = FALSE, bty = "n", xlab = "", ylab = "")
axis(side = 4)
mtext("Inclusion probability", 4, line = 3)
# Plot expected frequency versus observed mean frequency
ExpL1 <- sapply(SVals, function(x) ExpAlleleFreq(x, N = PopSize, SampleSize = 2*2504))
plot(SCoeffVect, MeanFreqs*SSize, ylab = "Mean LINE-1 frequency",
xlab = "", xlim = c(-0.0025, 0.0001), main = "B")
text(SCoeffVect + c(0.0002, 0, -0.0001, -0.0002), MeanFreqs*SSize + 10, names(SCoeffVect))
lines(SVals, ExpL1)
mtext("Selection coefficient", 1, line = 3)
CreateDisplayPdf('D:/L1polymORF/Figures/SelectionPerRegion_MELT.pdf',
PdfProgramPath = '"C:\\Program Files (x86)\\Adobe\\Reader 11.0\\Reader\\AcroRd32"',
height = 7, width = 7)
###################################################
# #
# Plot frequency vs. insertion length #
# #
###################################################
# # Create a vector of L1 start classes
# L1TotData$L1widthClass <- cut(L1TotData$L1width, breaks =
# seq(0, 7000, 1000))
# MEInsCall$L1widthClass <- cut(MEInsCall$L1width, breaks =
# seq(0, 7000, 1000))
#
# MEInsCall$Freq
# # Get mean L1 frequency per start
# L1widthAggregated <- aggregate(L1TotData[,c("L1width", "L1Freq")],
# by = list(L1TotData$L1widthClass),
# FUN = function(x) mean(x, na.rm = T))
# L1widthAggregated_Ins <- aggregate(MEInsCall[,c("L1width", "AF")],
# by = list(MEInsCall$L1widthClass),
# FUN = function(x) mean(x, na.rm = T))
# plot(L1widthAggregated_Ins$L1width, L1widthAggregated_Ins$AF)
#
# # Get sample size and create a range of s-values
# SSize <- 2 * MEInsSamplesize
# StartVals <- seq(0, 6000, 100)
# Full <- StartVals == 6000
# SVals <- ML_L1widthL1full$par[1] + ML_L1widthL1full$par[2]*StartVals +
# ML_L1widthL1full$par[3]*Full
#
# # Plot expected frequency versus observed mean frequency
# ExpL1width <- sapply(SVals, function(x) ExpAlleleFreq(x, N = PopSize,
# SampleSize = 2*MEInsSamplesize))
# par( mfrow = c(1, 1))
# plot(L1widthAggregated$L1width,
# L1widthAggregated$L1Freq, xlab = "LINE-1 length",
# ylab = "Mean LINE-1 frequency")
# lines(StartVals, ExpL1width )
# mtext("Selection coefficient", 1, line = 3)
# CreateDisplayPdf('D:/L1polymORF/Figures/FreqVsL1width_MELT.pdf',
# PdfProgramPath = '"C:\\Program Files (x86)\\Adobe\\Reader 11.0\\Reader\\AcroRd32"',
# height = 7, width = 7)
###################################################
# #
# Fit effect of strandedness on selection #
# #
###################################################
# Create a matrix of predictor variables (L1 start and boolean variable for)
PredictMatWithinGene <- L1TotData[L1TotData$blnOLGene & !blnNA ,
c( "blnOLGeneSameStrand", "blnOLGene", "blnOLGene")]
# Estimate maximum likelihood for a single selection coefficient
sum(L1TotData$blnOLGene)
colSums(PredictMatWithinGene)
ML_1Par_gene <- constrOptim(theta = c(a = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[L1TotData$blnOLGene & !blnNA], 0),
Counts = rep(1, sum(L1TotData$blnOLGene & !blnNA)),
Predict = PredictMatWithinGene,
a = x[1], b = 0, c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[L1TotData$blnOLGene & !blnNA ],
blnIns = L1TotData$blnIns[L1TotData$blnOLGene & !blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[L1TotData$blnOLGene & !blnNA]),
grad = NULL,
ui = rbind(1,-1),
ci = c(a = -0.001, a = -0.001),
method = "Nelder-Mead")
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of same strand overlap on selections ...")
ML_L1SameStrand <- constrOptim(theta = c(a = ML_1Par_gene$par, b = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[L1TotData$blnOLGene & !blnNA], 0),
Counts = rep(1, sum(L1TotData$blnOLGene & !blnNA)),
Predict = PredictMatWithinGene,
a = x[1], b = x[2], c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[L1TotData$blnOLGene & !blnNA ],
blnIns = L1TotData$blnIns[L1TotData$blnOLGene & !blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[L1TotData$blnOLGene & !blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.01, b = -10^(-2),
a = -0.01, b = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get columns of AIC and parameter values
Cols2Append <- t(sapply(list(ML_1Par_gene, ML_L1SameStrand),
function(x){
c(NrParameters = GetNPar(x), AIC = GetAIC(x),
Pars = GetParVals(x))
}))
# Combine AIC values into one vector
AICTabWithinGene <- cbind(data.frame(
Predictor = c("none", "SameStrand"),
stringsAsFactors = F),
Cols2Append)
# Save table with AIC
write.csv(AICTabWithinGene, SelectWithinGenTabOutPath)
###################################################
# #
# Fit effect of singleton coef. on selection #
# #
###################################################
# Create a matrix of predictor variables
PredictMat <- L1SingletonCoeffs[, c("coef", "coef", "coef")]
blnNA <- sapply(1:nrow(L1SingletonCoeffs), function(x) any(is.na(PredictMat[x,])))
# Determine maximum likelihood with one parameter (selection coefficient)
cat("Maximizing likelihood for one parameter (selection coefficient) ...")
ML_1Par_coef <- constrOptim(
theta = c(a = ML_1Par$par),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = (L1SingletonCoeffs$Freq * 2*2504)[!blnNA],
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA,],
a = x[1], b = 0, c = 0, d = 0, N = PopSize,
SampleSize = rep(2*2504, sum(!blnNA)),
blnIns = rep(T, sum(!blnNA)),
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = rep(0.9, sum(!blnNA))),
grad = NULL,
ui = rbind(1,-1),
ci = c(a = -0.03, a = -0.03),
method = "Nelder-Mead")
# Determine maximum likelihood with an intercept and one parameter for thr
# selection coefficient
ML_2Pars_L1coef <- constrOptim(
theta = c(a = ML_1Par_coef$par, b = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = (L1SingletonCoeffs$Freq * 2*2504)[!blnNA],
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA,],
a = x[1], b = x[2], c = 0, d = 0, N = PopSize,
SampleSize = rep(2*2504, sum(!blnNA)),
blnIns = rep(T, sum(!blnNA)),
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = rep(0.9, sum(!blnNA))),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.01, b = -2*10^(-3),
a = -0.01, b = -2*10^(-3)),
method = "Nelder-Mead")
cat("done!\n")
# Get columns of AIC and parameter values
Cols2Append <- t(sapply(list(ML_1Par_coef, ML_2Pars_L1coef),
function(x){
c(NrParameters = GetNPar(x), AIC = GetAIC(x),
Pars = GetParVals(x))
}))
# Combine AIC values into one vector
AICTabSingleton <- cbind(data.frame(
Predictor = c("none", "Signleton coefficient"),
stringsAsFactors = F),
Cols2Append)
# Save table with AIC
write.csv(AICTabSingleton, SelectSingletonTabOutPath)
# Save everything
save.image(SelectResultOutPath)
|
/Scripts/EstimateL1SelectionPars_MELT.R
|
no_license
|
hdohna/L1polymORFgit
|
R
| false | false | 51,169 |
r
|
# The script below estimates selection coefficients of L1 from the
# 1000 genome data using insertion estimates obtained by MELT
#
##########################################
# #
# Load packages #
# #
##########################################
# Source start script
source('D:/L1polymORFgit/Scripts/_Start_L1polymORF.R')
# Load packages
library(GenomicRanges)
library(pracma)
library(rtracklayer)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
##########################################
# #
# Set parameters #
# #
##########################################
# Specify file paths
DataPath <- 'D:/L1polymORF/Data/'
MeltInsPath <- "D:/L1polymORF/Data/nstd144.GRCh37.variant_call.vcf"
MeltDelPath <- "D:/L1polymORF/Data/DEL.final_comp.vcf"
ChrLPath <- 'D:/L1polymORF/Data/ChromLengthsHg19.Rdata'
InputPath <- 'D:/L1polymORF/Data/SingletonAnalysis_unphased.RData'
L1RefPath <- 'D:/L1polymORF/Data/L1HS_repeat_table_Hg19.csv'
L1RefRangePath <- 'D:/L1polymORF/Data/L1RefRanges_hg19.Rdata'
RegrOutputPath <- "D:/L1polymORF/Data/L1RegressionResults.RData"
SelectTabOutPath <- "D:/L1polymORF/Data/L1SelectionResults_MELT.csv"
SelectGenTabOutPath <- "D:/L1polymORF/Data/L1SelectionGeneResults_MELT.csv"
SelectResultOutPath <- "D:/L1polymORF/Data/L1SelectionResults_MELT.RData"
SelectWithinGenTabOutPath <- "D:/L1polymORF/Data/L1SelectionWithinGeneResults_MELT.csv"
SelectSingletonTabOutPath <- "D:/L1polymORF/Data/L1SelectionSingletonResults_MELT.csv"
# False discovery rate for selected L1
FDR <- 0.1
# Specify range width for DNAse analysis
RangeWidth <- 10^6
# Human effective population size
PopSize <- 10^5
# Minimum length for a full L1
MinLengthFullL1 <- 6000
# Sample size for ME insertion calls
MEInsSamplesize <- 2453
##########################################
# #
# Load and process data #
# #
##########################################
cat("\n\nLoading and processing data ...")
# Read in vcf file with MELT insertion calls
MEInsCall <- read.table(MeltInsPath,
as.is = T,
col.names = c("Chrom", "Pos", "ID", "Alt", "Type", "V6",
"V7", "Info"))
MEInsCall <- MEInsCall[MEInsCall$Type == "<INS:ME:LINE1>",]
# Extract allele frequency from info column
GetAF <- function(x){
xSplit <- strsplit(x, ";")[[1]]
AFch <- strsplit(xSplit[length(xSplit)], "=")[[1]][2]
as.numeric(AFch)
}
GetLength <- function(x){
xSplit <- strsplit(x, ";")[[1]]
LengthCh <- strsplit(xSplit[grep("SVLEN=", xSplit)], "=")[[1]][2]
as.numeric(LengthCh)
}
# Add columns necessary for analysis
MEInsCall$AF <- sapply(MEInsCall$Info, GetAF)
MEInsCall <- MEInsCall[!is.na(MEInsCall$AF), ]
MEInsCall$L1width <- sapply(MEInsCall$Info, GetLength)
MEInsCall$SampleSize <- 1/min(MEInsCall$AF)
# MEInsCall$SampleSize <- 2 * MEInsSamplesize
MEInsCall$Freq <- MEInsCall$SampleSize * MEInsCall$AF
MEInsCall$blnFull <- MEInsCall$L1width >= MinLengthFullL1
# Create GRanges object for MEInsCall
MEInsCall$ChromName <- paste("chr", MEInsCall$Chrom, sep = "")
MEIns_GR <- makeGRangesFromDataFrame(df = MEInsCall,
seqnames.field = "ChromName",
start.field = "Pos",
end.field = "Pos")
# Read in vcf file with MELT deletion calls
MEDelCall <- ReadVCF(MeltDelPath)
MEDelCall$chromosome <- paste("chr", MEDelCall$X.CHROM, sep = "")
MEDel_GR <- makeGRangesFromDataFrame(df = MEDelCall,
start.field = "POS",
end.field = "POS")
colnames(MEDelCall)
# function to get numeric genotype
GetNumericGenotype <- function(x){
Split1 <- strsplit(x, ":")[[1]][1]
Split2 <- strsplit(Split1, "/")[[1]]
sum(as.numeric(Split2))
}
# Get numeric genotype of all reference L1 deletions
GTCols <- grep("L1Filtered", colnames(MEDelCall))
L1RefNumGen <- 2 - sapply(GTCols, function(x){
sapply(1:nrow(MEDelCall), function(y) GetNumericGenotype(MEDelCall[y,x]))
})
# Add columns for frequency and sample size
MEDelCall$Freq <- rowSums(L1RefNumGen, na.rm = T)
MEDelCall$SampleSize <- apply(L1RefNumGen, 1, function(x) 2*sum(!is.na(x)))
# Load previously generated objects
load(InputPath)
load(L1GRPath)
load(ChrLPath)
load(L1RefRangePath)
load(RegrOutputPath)
load("D:/L1polymORF/Data/DelVsL1Length.RData")
# Create genomic ranges of reference L1 with 100 bp added on each side
L1NeighborRanges <- GRanges(seqnames = seqnames(L1GRanges),
IRanges(start = start(L1GRanges) - 100,
end = end(L1GRanges) + 100))
# Create a data frame of reference L1
RefL1Data <- data.frame(L1width = width(L1GRanges),
Freq = 30, SampleSize = 30)
OL_MEDelRefL1 <- findOverlaps(L1NeighborRanges, MEDel_GR)
RefL1Data$Freq[OL_MEDelRefL1@from] <- MEDelCall$Freq[OL_MEDelRefL1@to]
RefL1Data$SampleSize[OL_MEDelRefL1@from] <- MEDelCall$SampleSize[OL_MEDelRefL1@to]
RefL1Data$blnFull <- RefL1Data$L1width >= MinLengthFullL1
# Number of L1 that are fixed at proportion 1
N1 <- length(L1GRanges) - length(OL_MEDelRefL1@from)
RefL1Data <- RefL1Data[OL_MEDelRefL1@from, ]
L1GRanges <- L1GRanges[OL_MEDelRefL1@from]
# Put data of non-reference L1 (insertions) and reference L1 (deletions)
# together
L1TotData <- rbind(MEInsCall[ ,c("L1width", "Freq", "SampleSize", "blnFull")],
RefL1Data)
L1TotData$blnIns <- c(rep(T, nrow(MEInsCall)), rep(F, nrow(RefL1Data)))
L1TotData$L1Freq <- NA
L1TotData$L1Freq[L1TotData$blnIns] <- L1TotData$Freq[L1TotData$blnIns] /
L1TotData$SampleSize[L1TotData$blnIns]
L1TotData$L1Freq[!L1TotData$blnIns] <- 1 - L1TotData$Freq[!L1TotData$blnIns] /
L1TotData$SampleSize[!L1TotData$blnIns]
L1TotData$DetectProb <- 0.85
L1TotData$DetectProb[L1TotData$blnIns] <- 0.9
# Perform logistic regression for the probability of reference L1 as function
# of L1 frequency
L1TotData$blnRef <- !L1TotData$blnIns
LogRegL1Ref <- glm(blnRef ~ L1Freq, family = binomial, data = L1TotData)
LogRegL1Ref$coefficients
# Combine genomic ranges
L1TotGR <- c(MEIns_GR, L1GRanges)
# Create a predictor variable for involvement in ectopic recombination
# L1Width <- width(L1TotGR)
# hist(L1Width, breaks = seq(0, 6500, 100))
# idxWidth <- which(!is.na(L1TotData$L1width))
# L1TotData$RecPredict <- NA
# L1TotData$RecPredict[idxWidth] <- 150000*sapply(L1TotData$L1width[idxWidth], function(x){
# idxMatch <- which.min(abs(x - DelVsL1Length$x))
# DelVsL1Length$y[idxMatch]
# })
#
# L1Width <- L1TotData$L1width[idxWidth]
# L1Width[L1Width >= 4500] <- 4500
# L1WidthOrder <- order(L1Width, decreasing = T)
# OrderMatch <- match(1:length(idxWidth), L1WidthOrder)
# Deltas <- c(L1Width[L1WidthOrder[-length(L1Width)]] - L1Width[L1WidthOrder[-1]],
# L1Width[L1WidthOrder[length(L1Width)]])
# DeltasSqProd <- 10^-7*Deltas^2 * (L1WidthOrder - 1)
# Rev <- length(idxWidth):1
# L1WidthProd <- cumsum(DeltasSqProd[Rev])[Rev]
# L1TotData$RecPredict[idxWidth] <- L1WidthProd[OrderMatch]
# max(L1TotData$RecPredict[idxWidth])
# plot(L1TotData$L1width, L1TotData$RecPredict)
# Number of L1 that are not fixed
Nnf <- nrow(L1TotData)
# Make genomic ranges for L1SingletonCoeffs
L1SingletonCoeffs$chromosome <- paste("chr", L1SingletonCoeffs$Chrom, sep = "")
L1SingletonCoeffs_GR <- makeGRangesFromDataFrame(L1SingletonCoeffs,
seqnames.field = "chromosome",
start.field = "Pos",
end.field = "Pos")
# Read information about 1000 genome samples
SampleInfo <- read.table(G1000SamplePath, header = T)
SampleMatch <- match(SampleColumns, SampleInfo$sample)
Pops <- SampleInfo$super_pop[SampleMatch]
NrS <- length(SampleColumns)
# Define more genomic ranges
GeneGR <- genes(TxDb.Hsapiens.UCSC.hg19.knownGene)
ExonGR <- exons(TxDb.Hsapiens.UCSC.hg19.knownGene)
PromGR <- promoters(TxDb.Hsapiens.UCSC.hg19.knownGene, upstream = 10000)
CDSGR <- cds(TxDb.Hsapiens.UCSC.hg19.knownGene)
IntronGRList <- intronsByTranscript(TxDb.Hsapiens.UCSC.hg19.knownGene,
use.names = T)
FiveUTRGRList <- fiveUTRsByTranscript(TxDb.Hsapiens.UCSC.hg19.knownGene,
use.names = T)
ThreeUTRGRList <- threeUTRsByTranscript(TxDb.Hsapiens.UCSC.hg19.knownGene,
use.names = T)
sum(width(GeneGR)/10^6) / sum(ChromLengthsHg19/10^6)
# Among overlapping genomic ranges, retain the longest
GeneGR <- UniqueGRanges(GeneGR)
cat("done!\n")
##########################################
# #
# Add columns to L1SingletonCoeffs #
# #
##########################################
cat("Add columns to L1SingletonCoeffs ...")
# Turn factors into numeric values
L1SingletonCoeffs$L1Start <- as.numeric(as.character(L1SingletonCoeffs$L1Start))
L1SingletonCoeffs$L1End <- as.numeric(as.character(L1SingletonCoeffs$L1End))
# Indicator for full-length
L1SingletonCoeffs$blnFull <- L1SingletonCoeffs$L1Start <= 3 &
L1SingletonCoeffs$L1End >= MinLengthFullL1
sum(L1SingletonCoeffs$InsLength <= 100)
# Indicator for significant effect
L1SingletonCoeffs$blnSig <- p.adjust(L1SingletonCoeffs$Pr...z..) < FDR
hist(L1SingletonCoeffs$Pr...z.., breaks = seq(0, 1, 0.005))
# Indicator for positive selection
L1SingletonCoeffs$blnSelect <- L1SingletonCoeffs$blnSig &
L1SingletonCoeffs$coef < 0
# Indicator for negative selection
L1SingletonCoeffs$blnNegSelect <- L1SingletonCoeffs$blnSig &
L1SingletonCoeffs$coef > 0
sum(L1SingletonCoeffs$blnNegSelect)
sum(L1SingletonCoeffs$blnSelect)
# Indicator fo selection (+1 = positive, -1 = negative, 0 = neutral)
L1SingletonCoeffs$SelectInd <- 0
L1SingletonCoeffs$SelectInd[L1SingletonCoeffs$blnSelect] <- 1
L1SingletonCoeffs$SelectInd[L1SingletonCoeffs$blnNegSelect] <- -1
# Caclulate distance to genes
L1SingletonCoeffs$Dist2Gene <- Dist2Closest(L1SingletonCoeffs_GR, GeneGR)
L1SingletonCoeffs$blnOLGene <- L1SingletonCoeffs$Dist2Gene == 0
# Caclulate logarithm of distance to genes
L1SingletonCoeffs$LogDist2Gene <- log(L1SingletonCoeffs$Dist2Gene + 0.1)
# Standardize SE ratio to one
L1SingletonCoeffs$SE_RatioSt <- 1/L1SingletonCoeffs$se.coef./
mean(1/L1SingletonCoeffs$se.coef.)
# Add boolean indicators for overlap
L1SingletonCoeffs$blnOLGene <- overlapsAny(L1SingletonCoeffs_GR, GeneGR, ignore.strand = T)
L1SingletonCoeffs$blnOLProm <- overlapsAny(L1SingletonCoeffs_GR, PromGR, ignore.strand = T)
L1SingletonCoeffs$blnOLExon <- overlapsAny(L1SingletonCoeffs_GR, ExonGR, ignore.strand = T)
L1SingletonCoeffs$blnOLIntron <- L1SingletonCoeffs$blnOLGene & (!L1SingletonCoeffs$blnOLExon)
L1SingletonCoeffs$blnOLIntergen <- !(L1SingletonCoeffs$blnOLGene | L1SingletonCoeffs$blnOLProm)
L1SingletonCoeffs$L1StartNum <- as.numeric(as.character(L1SingletonCoeffs$L1Start))
L1SingletonCoeffs$L1EndNum <- as.numeric(as.character(L1SingletonCoeffs$L1End))
L1SingletonCoeffs$blnFull <- L1SingletonCoeffs$L1StartNum <= 1 & L1SingletonCoeffs$L1EndNum >= 6000
# Add info about overlapping genes
L1coeff_Gene_OL <- findOverlaps(L1SingletonCoeffs_GR, GeneGR, ignore.strand = T)
L1SingletonCoeffs$idxGene <- NA
L1SingletonCoeffs$GeneWidth <- NA
L1SingletonCoeffs$GeneID <- NA
L1SingletonCoeffs$blnOLGeneSameStrand <- NA
L1SingletonCoeffs$idxGene[L1coeff_Gene_OL@from] <- L1coeff_Gene_OL@to
L1SingletonCoeffs$GeneWidth[L1coeff_Gene_OL@from] <- width(GeneGR)[L1coeff_Gene_OL@to]
L1SingletonCoeffs$GeneID[L1coeff_Gene_OL@from] <-
GeneGR@elementMetadata@listData$gene_id[L1coeff_Gene_OL@to]
L1SingletonCoeffs$blnOLGeneSameStrand[L1coeff_Gene_OL@from] <-
L1SingletonCoeffs$L1Strand[L1coeff_Gene_OL@from] == as.vector(strand(GeneGR))[L1coeff_Gene_OL@to]
# Check out properties of L1 with signal of positive selectiom
L1SingletonCoeffs[L1SingletonCoeffs$blnSelect,]
fisher.test(L1SingletonCoeffs$blnSelect, (L1SingletonCoeffs$blnOLIntron &
L1SingletonCoeffs$blnOLGeneSameStrand))
mean((L1SingletonCoeffs$blnOLIntron &
L1SingletonCoeffs$blnOLGeneSameStrand))
# Standardize selection coefficients
CoeffAggMean <- aggregate(coef ~ Freq, data = L1SingletonCoeffs, FUN = mean)
CoeffAggVar <- aggregate(coef ~ Freq, data = L1SingletonCoeffs, FUN = var)
CoeffAggN <- aggregate(coef ~ Freq, data = L1SingletonCoeffs, FUN = length)
CoeffAggMerge <- merge(CoeffAggMean, CoeffAggVar, by = 'Freq')
CoeffAggMerge <- merge(CoeffAggMerge, CoeffAggN, by = 'Freq')
colnames(CoeffAggMerge)[2:4] <- c("Mean", "Var", "N")
CoeffAggMerge$StDev <- sqrt(CoeffAggMerge$Var)
FreqMatch <- match(L1SingletonCoeffs$Freq, CoeffAggMerge$Freq)
L1SingletonCoeffs$MeanCof <- CoeffAggMerge$Mean[FreqMatch]
L1SingletonCoeffs$StDevCof <- CoeffAggMerge$StDev[FreqMatch]
L1SingletonCoeffs$CoefSt <- (L1SingletonCoeffs$coef - L1SingletonCoeffs$MeanCof) /
L1SingletonCoeffs$StDevCof
cat("done!\n")
####################################################
# #
# Overview of L1 intersection with features #
# #
####################################################
# Indicator variable for intersection with various GRanges
L1TotData$blnOLGene <- overlapsAny(L1TotGR, GeneGR, ignore.strand = T)
L1TotData$blnOLGeneSameStrand <- overlapsAny(L1TotGR, GeneGR)
L1TotData$blnOLProm <- overlapsAny(L1TotGR, PromGR, ignore.strand = T)
L1TotData$blnOLExon <- overlapsAny(L1TotGR, ExonGR, ignore.strand = T)
L1TotData$blnOLIntron <- L1TotData$blnOLGene & (!L1TotData$blnOLExon)
L1TotData$blnOLIntergen <- !(L1TotData$blnOLGene | L1TotData$blnOLProm)
# Create a variable indicating insertion type
L1TotData$InsType <- "Intergenic"
L1TotData$InsType[L1TotData$blnOLProm] <- "Promoter"
L1TotData$InsType[L1TotData$blnOLExon] <- "Exon"
L1TotData$InsType[L1TotData$blnOLIntron] <- "Intron"
# Perform pairwise Wilcoxon test for differences in L1 frequencies
pairwise.wilcox.test(L1TotData$L1Freq, L1TotData$InsType,
p.adjust.method = "BH")
# Average mean frequency
MeanFreqAgg <- aggregate(L1Freq ~ InsType, data = L1TotData, FUN = mean)
VarFreqAgg <- aggregate(L1Freq ~ InsType, data = L1TotData, FUN = var)
L1TotData$Dummy <- 1
NAgg <- aggregate(Dummy ~ InsType, data = L1TotData, FUN = sum)
StErr <- sqrt(VarFreqAgg$Frequency / NAgg$Dummy)
# Indicator variable for intersection with reference L1
blnOLGene_RefL1 <- overlapsAny(L1GRanges, GeneGR, ignore.strand = T)
blnOLGeneSameStrand_RefL1 <- overlapsAny(L1GRanges, GeneGR)
blnOLProm_RefL1 <- overlapsAny(L1GRanges, PromGR, ignore.strand = T)
blnOLExon_RefL1 <- overlapsAny(L1GRanges, ExonGR, ignore.strand = T)
blnOLIntron_RefL1 <- blnOLGene_RefL1 & (!blnOLExon_RefL1)
# Get number of insertions per bp
GeneTot <- sum(width(GeneGR))
ExonTot <- sum(width(ExonGR))
IntronTot <- GeneTot - ExonTot
PromTot <- sum(width(PromGR))
IntergenTot <- sum(as.numeric(ChromLengthsHg19)) - GeneTot - PromTot #- EnhancerTot
# Get mean frequency of L1 in different functional regions
MeanFreqs <- c(
Promoter = mean(L1TotData$L1Freq[L1TotData$blnOLProm], na.rm = T),
Exon = mean(L1TotData$L1Freq[L1TotData$blnOLExon], na.rm = T),
Intron = mean(L1TotData$L1Freq[L1TotData$blnOLIntron], na.rm = T),
Intergenic = mean(L1TotData$L1Freq[L1TotData$blnOLIntergen], na.rm = T)
)
# Plot distn of frequency of L1 in different functional regions
par(mfrow = c(1, 1))
hist(L1TotData$L1Freq[L1TotData$blnOLProm], breaks = seq(0, 1, 0.01))
hist(L1TotData$L1Freq[L1TotData$blnOLExon], breaks = seq(0, 1, 0.01))
hist(L1TotData$L1Freq[L1TotData$blnOLIntron], breaks = seq(0, 1, 0.01))
hist(L1TotData$L1Freq[L1TotData$blnOLIntergen], breaks = seq(0, 1, 0.01))
hist(sqrt(-log10(L1TotData$L1Freq[L1TotData$blnOLProm])))
hist(-log10(L1TotData$L1Freq[L1TotData$blnOLExon]))
hist(log10(L1TotData$L1Freq[L1TotData$blnOLIntron]))
hist(log10(L1TotData$L1Freq[L1TotData$blnOLIntergen]))
# Get number of L1 per Mb in different functional regions
InsPerbp <- 10^6 * rbind(
c(
Promoter = sum(blnOLProm_RefL1) / PromTot,
Exon = sum(blnOLExon_RefL1) / ExonTot,
Intron = sum(blnOLIntron_RefL1) / IntronTot,
Intergenic = sum(!(blnOLGene_RefL1 | blnOLProm_RefL1)) / IntergenTot
),
c(
Promoter = sum(L1TotData$blnOLProm) / PromTot,
Exon = sum(L1TotData$blnOLExon) / ExonTot,
Intron = sum(L1TotData$blnOLIntron) / IntronTot,
Intergenic = sum(!(L1TotData$blnOLGene | L1TotData$blnOLProm)) / IntergenTot
)
)
InsPerbp[1,] / InsPerbp[2,]
###################################################
# #
# Fit effect of insertion length on selection #
# #
###################################################
cat("\n******** Estimating effect of insertion length **********\n")
# Match summary ranges to L1 ranges of 1000 genome data
L1SummaryOL <- findOverlaps(L1TotGR, SummaryGR)
all(L1SummaryOL@from %in% 1:nrow(L1TotData))
blnNoDupl <- !duplicated(L1SummaryOL@from)
L1TotData$L1Count <- NA
L1TotData$L1Count[L1SummaryOL@from[blnNoDupl]] <-
DataPerSummaryGR$L1Count[L1SummaryOL@to[blnNoDupl]]
# Get distance to nearest other L1
Dist2Nearest <- distanceToNearest(L1TotGR)
L1TotData$Dist2Nearest <- Dist2Nearest@elementMetadata@listData$distance
max(L1TotData$Dist2Nearest)
# Create a matrix of predictor variables (L1 start and boolean variable for)
PredictMat <- L1TotData[, c("L1Count", "L1width", "blnFull", "RecPredict",
"Freq", "SampleSize", "blnIns")]
blnNA <- sapply(1:nrow(L1TotData), function(x) any(is.na(PredictMat[x,])))
sum(!blnNA)
max(L1TotData$Freq / L1TotData$SampleSize, na.rm = T)
max(PredictMat$RecPredict)
# Plot log-likelihood for different selection coefficients
# aVals <- seq(-0.0021, 0.003, 0.0001)
# LikVals <- sapply(aVals, function(x) {
# print(x)
# LL_FPrime = AlleleFreqLogLik_4Par(
# Freqs = round(L1TotData$Freq[!blnNA], 0),
# Counts = rep(1, sum(!blnNA)),
# Predict = PredictMat[!blnNA, 1:3],
# a = x, b = 0, c = 0, d = 0, N = PopSize,
# SampleSize = L1TotData$SampleSize[!blnNA],
# blnIns = L1TotData$blnIns[!blnNA],
# DetectProb = 0.9)
# })
# par(mfrow = c(1, 1))
# plot(aVals, LikVals, type = "l", col = "red")
# plot(aVals, LikVals, type = "l", col = "red",
# xlim = c(-0.0005, 0), ylim)
# Estimate maximum likelihood for a single selection coefficient
cat("Estimate maximum likelihood for a single selection coefficient\n")
ML_1Par <- constrOptim(theta = c(a = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = 0, c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(1,-1),
ci = c(a = -0.03, a = -0.03),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of L1 start on selection
cat("Estimate effect of L1 start on selections ...")
ML_L1width <- constrOptim(theta = c(a = ML_1Par$par, c = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = 0, c = x[2], d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.02, c = -10^(-6),
a = -0.02, c = -10^(-6)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of full-length L1 on selection
cat("Estimate effect of L1 full-length on selections ...")
ML_L1full <- constrOptim(theta = c(a = ML_1Par$par, d = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = 0, c = 0, d = x[2], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.02, d = -10^(-3),
a = -0.02, d = -10^(-3)),
method = "Nelder-Mead")
cat("done!\n")
# Determine maximum likelihood with 3 parameters (selection coefficient as
# function of L1 start and indicator for full-length)
cat("Maximizing likelihood for three parameters ...")
ML_L1widthL1full <- constrOptim(theta = c(a = ML_L1width$par[1], b = ML_L1width$par[2],
c = ML_L1full$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = 0, c = x[2], d = x[3], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0), c(0, 0, -1)),
ci = c(a = -0.01, b = -10^(-6), d = -10^(-3),
a = -0.02, b = -10^(-6), d = -10^(-3)),
method = "Nelder-Mead")
cat("done!\n")
# Determine maximum likelihood with 3 parameters (selection coefficient as
# function of Recombination predictor and indicator for full-length)
cat("Maximizing likelihood for three parameters ...")
ML_L1RecL1full <- constrOptim(theta = c(a = ML_L1widthL1full$par[1],
c = ML_L1widthL1full$par[3], d = ML_L1widthL1full$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 2:4],
a = x[1], b = 0, c = x[2], d = x[3], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0), c(0, 0, -1)),
ci = c(a = -0.01, b = -10^(-3), d = -10^(-6),
a = -0.02, b = -10^(-3), d = -10^(-6)),
method = "Nelder-Mead")
cat("done!\n")
###################################################
# #
# Fit effect of L1 density on selection #
# #
###################################################
# Determine maximum likelihood with 3 parameters (selection coefficient as
# function of L1 start and indicator for full-length)
cat("Maximizing likelihood for L1 count ...")
ML_2Pars_L1count <- constrOptim(
theta = c(a = ML_1Par$par, b = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = x[2], c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
# ci = c(a = -0.01, b = -10^(-3),
# a = -0.01, b = -2*10^(-3)),
ci = c(a = -0.01, b = -10^(-9),
a = -0.01, b = -10^(-9)),
method = "Nelder-Mead")
# Maximum likelihood estimate for effect of L1 density and full-length L1
ML_3Pars_L1countL1full <- constrOptim(
theta = c(a = ML_2Pars_L1count$par[1], ML_2Pars_L1count$par[2],
d = ML_L1full$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = x[2], c = 0, d = x[3], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0), c(0, 0, -1)),
ci = c(a = -0.02, b = -5*10^(-3), d = -10^(-3),
a = -0.02, b = -5*10^(-3), d = -10^(-3)),
method = "Nelder-Mead")
# Maximum likelihood estimate for effect of L1 density and L1 start
ML_3Pars_L1countL1width <- constrOptim(
theta = c(a = ML_2Pars_L1count$par[1], ML_2Pars_L1count$par[2],
c = ML_L1width$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = x[2], c = x[3], d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0), c(0, 0, -1)),
ci = c(a = -0.02, b = -5*10^(-3), c = -10^(-6),
a = -0.02, b = -5*10^(-3), c = -10^(-6)),
method = "Nelder-Mead")
# Maximum likelihood estimate for effect of L1 density, L1 start, and
# full-length L1
ML_4Pars_L1countL1widthL1full <- constrOptim(
theta = c(a = ML_2Pars_L1count$par[1], b = 0,
c = ML_L1widthL1full$par[2], d = ML_L1widthL1full$par[3]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA, 1:3],
a = x[1], b = x[2], c = x[3], d = x[4], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0, 0), c(0, 1, 0, 0), c(0, 0, 1, 0), c(0, 0, 0, 1),
c(-1, 0, 0, 0), c(0, -1, 0, 0), c(0, 0, -1, 0), c(0, 0, 0, -1)),
ci = c(a = -0.01, b = -2*10^(-3), c = -10^(-6), d = -10^(-3),
a = -0.01, b = -2*10^(-3), c = -10^(-6), d = -10^(-3)),
method = "Nelder-Mead")
###################################################
# #
# Compare estimated and observed frequencies #
# #
###################################################
# LogProbs <- AlleleFreqSampleProb(s = 0, N = PopSize, SampleSize = 2*2504)
# sum(is.infinite(LogProbs))
# length(LogProbs)
# min(LogProbs[!is.infinite(LogProbs)])
# idxFinite <- which(!is.infinite(LogProbs))
# plot(idxFinite, LogProbs[idxFinite])
# plot(idxFinite, LogProbs[idxFinite], xlim = c(4800, 5000))
# lchoose(5008, 1000)
# k <- 200
# SampleSize = 5008
# integrate(function(x) AlleleFreqTime(x, s = 0, N = PopSize) * x^(k) *
# (1 - x)^(SampleSize - k) , 0, 1)$value
# integrate(function(x) log(AlleleFreqTime(x, s = 0, N = PopSize)) +
# k * log(x) + (SampleSize - k) * log(1 - x), 0, 1)$value
#
###################################################
# #
# Summarize results #
# #
###################################################
# Function to extract AIC from optim results
GetAIC <- function(OptimResults){
round(2 * (length(OptimResults$par) + OptimResults$value), 2)
}
GetParVals <- function(OptimResults){
Results <- paste(names(OptimResults$par),
format(OptimResults$par, digits = 2), sep = " = ",
collapse = ", ")
}
GetNPar <- function(OptimResults){
length(OptimResults$par)
}
# Get columns of AIC and parameter values
Cols2Append <- t(sapply(list(ML_1Par,
ML_L1width,
ML_L1full,
# ML_2Pars_L1count,
ML_L1widthL1full,
# ML_3Pars_L1countL1width,
# ML_3Pars_L1countL1full
# ML_4Pars_L1countL1widthL1full
), function(x){
c(AIC = GetAIC(x), Pars = GetParVals(x))
}))
# Combine AIC values into one vector
AICTab <- cbind(data.frame(
NrParameters = c(1,
2,
2,
# 2,
3,
# 3,
# 3
# 4
),
Predictor = c("none",
"L1 width",
"L1 full-length",
# "L1count",
"L1 width and full-length",
# "L1 count and L1 start",
# "L1 count and L1 full"
# "L1 start, L1 full-length, L1count"
),
stringsAsFactors = F),
Cols2Append)
# Save table with AIC
write.csv(AICTab, SelectTabOutPath)
save.image(SelectResultOutPath)
###################################################
# #
# Fit effect of genic insertion on selection #
# #
###################################################
# Create a matrix of predictor variables (L1 start and boolean variable for)
PredictMatGeneOL <- L1TotData[, c("blnOLExon", "blnOLIntron", "blnOLProm")]
PredictMatGeneOL2 <- L1TotData[, c("blnOLGene", "blnOLIntron", "blnOLProm")]
blnNA <- sapply(1:nrow(PredictMatGeneOL), function(x) any(is.na(PredictMatGeneOL[x,]))) |
sapply(1:nrow(L1TotData), function(x) any(is.na(PredictMat[x,])))
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of exon overlap on selections ...")
ML_L1Exon <- constrOptim(theta = c(a = ML_1Par$par, b = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = x[2], c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.001, b = -10^(-2),
a = -0.001, b = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of intronic L1 on selection
cat("Estimate effect of intron overlap on selections ...")
ML_L1Intron <- constrOptim(theta = c(a = ML_1Par$par, c = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = 0, c = x[2], d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.01, c = -10^(-2),
a = -0.01, c = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of intronic L1 on selection
cat("Estimate effect of promoter overlap on selections ...")
ML_L1Prom <- constrOptim(theta = c(a = ML_1Par$par, d = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = 0, c = 0, d = x[2], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.01, c = -10^(-2),
a = -0.01, c = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of exon nad intron overlap on selections ...")
ML_L1ExonIntron <- constrOptim(
theta = c(a = ML_1Par$par,
b = ML_L1Exon$par[2],
c = ML_L1Intron$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = x[2], c = x[3], d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0) , c(0, 0, -1)),
ci = c(a = -0.01, b = -10^(-2), c = -10^(-2),
a = -0.01, b = -10^(-2), c = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of exon nad intron overlap on selections ...")
ML_L1ExonIntronProm <- constrOptim(
theta = c(a = ML_L1ExonIntron$par[1],
b = ML_L1ExonIntron$par[2],
c = ML_L1ExonIntron$par[3],
d = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = x[2], c = x[3], d = x[4], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0, 0), c(0, 1, 0, 0), c(0, 0, 1, 0), c(0, 0, 0, 1),
c(-1, 0, 0, 0), c(0, -1, 0, 0) , c(0, 0, -1, 0), c(0, 0, 0, -1)),
ci = c(a = -0.01, b = -10^(-2), c = -10^(-2), d = -10^(-2),
a = -0.01, b = -10^(-2), c = -10^(-2), d = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of exon or intron overlap on selections ...")
ML_L1PromOrIntron <- constrOptim(theta = c(a = ML_1Par$par,
b = ML_L1Exon$par[2],
c = ML_L1Intron$par[2]),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[!blnNA], 0),
Counts = rep(1, sum(!blnNA)),
Predict = PredictMatGeneOL[!blnNA,],
a = x[1], b = x[2], c = x[3], d = x[3], N = PopSize,
SampleSize = L1TotData$SampleSize[!blnNA],
blnIns = L1TotData$blnIns[!blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[!blnNA]),
grad = NULL,
ui = rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1),
c(-1, 0, 0), c(0, -1, 0) , c(0, 0, -1)),
ci = c(a = -0.01, b = -10^(-2), c = -10^(-2),
a = -0.01, b = -10^(-2), c = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get columns of AIC and parameter values
Cols2Append <- t(sapply(list(ML_1Par, ML_L1Exon, ML_L1Intron, ML_L1Prom,
ML_L1ExonIntron,
ML_L1ExonIntronProm,
ML_L1PromOrIntron),
function(x){
c(NrParameters = GetNPar(x), AIC = GetAIC(x),
Pars = GetParVals(x))
}))
# Combine AIC values into one vector
AICTabGene <- cbind(data.frame(
Predictor = c("none", "Exon", "Intron", "Promoter",
"Exon and intron",
"Exon, intron, and promoter",
"Exon, intron or promoter"),
stringsAsFactors = F),
Cols2Append)
# Save table with AIC
write.csv(AICTabGene, SelectGenTabOutPath)
###################################################
# #
# Plot density vs. selection coefficient #
# #
###################################################
# Create a vector of selection coefficients
SCoeffVect <- c(Promoter = ML_L1ExonIntron$par[1],
Exon = sum(ML_L1ExonIntron$par[c(1, 2)]),
Intron = sum(ML_L1ExonIntron$par[c(1, 3)]),
Intergenic = ML_L1ExonIntron$par[1])
names(SCoeffVect) <- sapply(names(SCoeffVect),
function(x) strsplit(x, "\\.")[[1]][1])
# Plot selection coefficient against
if (!all(names(SCoeffVect) == colnames(InsPerbp))){
stop("Selection coefficients and L1 densities are not in same order!")
}
if (!all(names(SCoeffVect) == names(MeanFreqs))){
stop("Selection coefficients and L1 frequencies are not in same order!")
}
# Get sample size and create a range of s-values
SSize <- 2 * MEInsSamplesize
SVals <- seq(-0.0025, -0.00001, 0.00001)
# Plot probability for inclusion versus number of LINE-1 per Mb
ProbL1 <- sapply(SVals, function(x) ProbAlleleIncluded(x,N = PopSize, SampleSize = 2*2504))
par(oma = c(7, 1, 0, 2), mfrow = c(2, 1), mai = c(0.5, 1, 0.5, 1))
plot(SCoeffVect, InsPerbp[2,], ylab = "LINE-1s per Mb",
xlab = "", ylim = c(0, 3), xlim = c(-0.0025, 0), main = "A")
text(SCoeffVect, InsPerbp[2,] + 2*10^(-1), names(SCoeffVect))
par(new = TRUE)
plot(SVals, ProbL1, type = "l", axes = FALSE, bty = "n", xlab = "", ylab = "")
axis(side = 4)
mtext("Inclusion probability", 4, line = 3)
# Plot expected frequency versus observed mean frequency
ExpL1 <- sapply(SVals, function(x) ExpAlleleFreq(x, N = PopSize, SampleSize = 2*2504))
plot(SCoeffVect, MeanFreqs*SSize, ylab = "Mean LINE-1 frequency",
xlab = "", xlim = c(-0.0025, 0.0001), main = "B")
text(SCoeffVect + c(0.0002, 0, -0.0001, -0.0002), MeanFreqs*SSize + 10, names(SCoeffVect))
lines(SVals, ExpL1)
mtext("Selection coefficient", 1, line = 3)
CreateDisplayPdf('D:/L1polymORF/Figures/SelectionPerRegion_MELT.pdf',
PdfProgramPath = '"C:\\Program Files (x86)\\Adobe\\Reader 11.0\\Reader\\AcroRd32"',
height = 7, width = 7)
###################################################
# #
# Plot frequency vs. insertion length #
# #
###################################################
# # Create a vector of L1 start classes
# L1TotData$L1widthClass <- cut(L1TotData$L1width, breaks =
# seq(0, 7000, 1000))
# MEInsCall$L1widthClass <- cut(MEInsCall$L1width, breaks =
# seq(0, 7000, 1000))
#
# MEInsCall$Freq
# # Get mean L1 frequency per start
# L1widthAggregated <- aggregate(L1TotData[,c("L1width", "L1Freq")],
# by = list(L1TotData$L1widthClass),
# FUN = function(x) mean(x, na.rm = T))
# L1widthAggregated_Ins <- aggregate(MEInsCall[,c("L1width", "AF")],
# by = list(MEInsCall$L1widthClass),
# FUN = function(x) mean(x, na.rm = T))
# plot(L1widthAggregated_Ins$L1width, L1widthAggregated_Ins$AF)
#
# # Get sample size and create a range of s-values
# SSize <- 2 * MEInsSamplesize
# StartVals <- seq(0, 6000, 100)
# Full <- StartVals == 6000
# SVals <- ML_L1widthL1full$par[1] + ML_L1widthL1full$par[2]*StartVals +
# ML_L1widthL1full$par[3]*Full
#
# # Plot expected frequency versus observed mean frequency
# ExpL1width <- sapply(SVals, function(x) ExpAlleleFreq(x, N = PopSize,
# SampleSize = 2*MEInsSamplesize))
# par( mfrow = c(1, 1))
# plot(L1widthAggregated$L1width,
# L1widthAggregated$L1Freq, xlab = "LINE-1 length",
# ylab = "Mean LINE-1 frequency")
# lines(StartVals, ExpL1width )
# mtext("Selection coefficient", 1, line = 3)
# CreateDisplayPdf('D:/L1polymORF/Figures/FreqVsL1width_MELT.pdf',
# PdfProgramPath = '"C:\\Program Files (x86)\\Adobe\\Reader 11.0\\Reader\\AcroRd32"',
# height = 7, width = 7)
###################################################
# #
# Fit effect of strandedness on selection #
# #
###################################################
# Create a matrix of predictor variables (L1 start and boolean variable for)
PredictMatWithinGene <- L1TotData[L1TotData$blnOLGene & !blnNA ,
c( "blnOLGeneSameStrand", "blnOLGene", "blnOLGene")]
# Estimate maximum likelihood for a single selection coefficient
sum(L1TotData$blnOLGene)
colSums(PredictMatWithinGene)
ML_1Par_gene <- constrOptim(theta = c(a = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[L1TotData$blnOLGene & !blnNA], 0),
Counts = rep(1, sum(L1TotData$blnOLGene & !blnNA)),
Predict = PredictMatWithinGene,
a = x[1], b = 0, c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[L1TotData$blnOLGene & !blnNA ],
blnIns = L1TotData$blnIns[L1TotData$blnOLGene & !blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[L1TotData$blnOLGene & !blnNA]),
grad = NULL,
ui = rbind(1,-1),
ci = c(a = -0.001, a = -0.001),
method = "Nelder-Mead")
# Get maximum likelihood estimate for effect of exonic L1 on selection
cat("Estimate effect of same strand overlap on selections ...")
ML_L1SameStrand <- constrOptim(theta = c(a = ML_1Par_gene$par, b = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = round(L1TotData$Freq[L1TotData$blnOLGene & !blnNA], 0),
Counts = rep(1, sum(L1TotData$blnOLGene & !blnNA)),
Predict = PredictMatWithinGene,
a = x[1], b = x[2], c = 0, d = 0, N = PopSize,
SampleSize = L1TotData$SampleSize[L1TotData$blnOLGene & !blnNA ],
blnIns = L1TotData$blnIns[L1TotData$blnOLGene & !blnNA],
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = L1TotData$DetectProb[L1TotData$blnOLGene & !blnNA]),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.01, b = -10^(-2),
a = -0.01, b = -10^(-2)),
method = "Nelder-Mead")
cat("done!\n")
# Get columns of AIC and parameter values
Cols2Append <- t(sapply(list(ML_1Par_gene, ML_L1SameStrand),
function(x){
c(NrParameters = GetNPar(x), AIC = GetAIC(x),
Pars = GetParVals(x))
}))
# Combine AIC values into one vector
AICTabWithinGene <- cbind(data.frame(
Predictor = c("none", "SameStrand"),
stringsAsFactors = F),
Cols2Append)
# Save table with AIC
write.csv(AICTabWithinGene, SelectWithinGenTabOutPath)
###################################################
# #
# Fit effect of singleton coef. on selection #
# #
###################################################
# Create a matrix of predictor variables
PredictMat <- L1SingletonCoeffs[, c("coef", "coef", "coef")]
blnNA <- sapply(1:nrow(L1SingletonCoeffs), function(x) any(is.na(PredictMat[x,])))
# Determine maximum likelihood with one parameter (selection coefficient)
cat("Maximizing likelihood for one parameter (selection coefficient) ...")
ML_1Par_coef <- constrOptim(
theta = c(a = ML_1Par$par),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = (L1SingletonCoeffs$Freq * 2*2504)[!blnNA],
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA,],
a = x[1], b = 0, c = 0, d = 0, N = PopSize,
SampleSize = rep(2*2504, sum(!blnNA)),
blnIns = rep(T, sum(!blnNA)),
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = rep(0.9, sum(!blnNA))),
grad = NULL,
ui = rbind(1,-1),
ci = c(a = -0.03, a = -0.03),
method = "Nelder-Mead")
# Determine maximum likelihood with an intercept and one parameter for thr
# selection coefficient
ML_2Pars_L1coef <- constrOptim(
theta = c(a = ML_1Par_coef$par, b = 0),
f = function(x) -AlleleFreqLogLik_4Par(
Freqs = (L1SingletonCoeffs$Freq * 2*2504)[!blnNA],
Counts = rep(1, sum(!blnNA)),
Predict = PredictMat[!blnNA,],
a = x[1], b = x[2], c = 0, d = 0, N = PopSize,
SampleSize = rep(2*2504, sum(!blnNA)),
blnIns = rep(T, sum(!blnNA)),
LogRegCoeff = LogRegL1Ref$coefficients,
DetectProb = rep(0.9, sum(!blnNA))),
grad = NULL,
ui = rbind(c(1, 0), c(0, 1),
c(-1, 0), c(0, -1)),
ci = c(a = -0.01, b = -2*10^(-3),
a = -0.01, b = -2*10^(-3)),
method = "Nelder-Mead")
cat("done!\n")
# Get columns of AIC and parameter values
Cols2Append <- t(sapply(list(ML_1Par_coef, ML_2Pars_L1coef),
function(x){
c(NrParameters = GetNPar(x), AIC = GetAIC(x),
Pars = GetParVals(x))
}))
# Combine AIC values into one vector
AICTabSingleton <- cbind(data.frame(
Predictor = c("none", "Signleton coefficient"),
stringsAsFactors = F),
Cols2Append)
# Save table with AIC
write.csv(AICTabSingleton, SelectSingletonTabOutPath)
# Save everything
save.image(SelectResultOutPath)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CreateRandom.R
\name{CreateOneRandomMutSigProfile}
\alias{CreateOneRandomMutSigProfile}
\title{Create one "random" artificial signature profile.}
\usage{
CreateOneRandomMutSigProfile(row.names)
}
\arguments{
\item{row.names}{One of the \code{\link{ICAMS}} package variable such as
\code{catalog.row.order[["SBS96"]]}.}
}
\value{
A single column matrix with \code{rownames} \code{row.headers} and
\code{colnames} \code{"RandSig"}.
}
\description{
Create one "random" artificial signature profile.
}
\keyword{internal}
|
/man/CreateOneRandomMutSigProfile.Rd
|
no_license
|
steverozen/SynSigGen
|
R
| false | true | 597 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CreateRandom.R
\name{CreateOneRandomMutSigProfile}
\alias{CreateOneRandomMutSigProfile}
\title{Create one "random" artificial signature profile.}
\usage{
CreateOneRandomMutSigProfile(row.names)
}
\arguments{
\item{row.names}{One of the \code{\link{ICAMS}} package variable such as
\code{catalog.row.order[["SBS96"]]}.}
}
\value{
A single column matrix with \code{rownames} \code{row.headers} and
\code{colnames} \code{"RandSig"}.
}
\description{
Create one "random" artificial signature profile.
}
\keyword{internal}
|
## Merging files
# Import Libraries
library(plyr)
library(tidyverse)
library(lubridate) # work with dates
mydir = "Stack_2" # Change this to direct to your data directory
# List files
A_K2 = list.files(path = mydir, pattern = "*A_K2.csv", full.names = TRUE)
B_K2 = list.files(path = mydir, pattern = "*B_K2.csv", full.names = TRUE)
Power = list.files(path = mydir, pattern = "*POWER.csv", full.names = TRUE)
P_demand = list.files(path = mydir, pattern = "*.CSV", full.names = TRUE)
# Read csv
A_df = A_K2 %>% ldply(read.csv) %>% unique() %>% mutate(Time = as.POSIXct(Time, format="%d/%m/%Y %H:%M:%S"))
B_df = B_K2 %>% ldply(read.csv) %>% unique() %>% mutate(Time = as.POSIXct(Time, format="%d/%m/%Y %H:%M:%S"))
Power_df = Power %>% ldply(read.csv) %>% unique() %>% mutate(Time = as.POSIXct(Time, format="%d/%m/%Y %H:%M:%S"))
# Combine the Date and Time columns into a timestamp column
P_demand = P_demand %>% ldply(read.csv) %>% mutate(timestamp= paste(Date,Time)) %>% mutate(timestamp= as.POSIXct(timestamp, format="%Y/%m/%d %H:%M:%S"))
# Removing columns that are not necessary or inaccurate according to "data_dictionary"
# A_df
A_df=A_df[,-1]
# B_df
B_df=B_df[,c(2:14,27:32)]
# Power_df
Power_df=Power_df[,c(2:5,12,14)]
# Remove columns with legacy data from P_demand
P_demand = P_demand[,c(3:12,21)]
# Reorder Columns so "timestamp" is now first
P_demand = P_demand[ , c(ncol(P_demand), 1:(ncol(P_demand)-1))]
# rename "timestamp" to "Time"
names(P_demand)[1] <- "Time"
# Ordering dataframes by time
A_df = arrange(A_df, Time)
B_df = arrange(B_df, Time)
Power_df = arrange(Power_df, Time)
P_demand = arrange(P_demand, Time)
# Save combine data as new csv
write.csv(A_df, "A.csv", row.names = FALSE)
write.csv(B_df, "B.csv", row.names = FALSE)
write.csv(Power_df, "Pow.csv", row.names = FALSE)
write.csv(P_demand, "P_demand.csv", row.names = FALSE)
|
/data-combining.R
|
no_license
|
nshyam97/Group-Project-CSC8633
|
R
| false | false | 1,869 |
r
|
## Merging files
# Import Libraries
library(plyr)
library(tidyverse)
library(lubridate) # work with dates
mydir = "Stack_2" # Change this to direct to your data directory
# List files
A_K2 = list.files(path = mydir, pattern = "*A_K2.csv", full.names = TRUE)
B_K2 = list.files(path = mydir, pattern = "*B_K2.csv", full.names = TRUE)
Power = list.files(path = mydir, pattern = "*POWER.csv", full.names = TRUE)
P_demand = list.files(path = mydir, pattern = "*.CSV", full.names = TRUE)
# Read csv
A_df = A_K2 %>% ldply(read.csv) %>% unique() %>% mutate(Time = as.POSIXct(Time, format="%d/%m/%Y %H:%M:%S"))
B_df = B_K2 %>% ldply(read.csv) %>% unique() %>% mutate(Time = as.POSIXct(Time, format="%d/%m/%Y %H:%M:%S"))
Power_df = Power %>% ldply(read.csv) %>% unique() %>% mutate(Time = as.POSIXct(Time, format="%d/%m/%Y %H:%M:%S"))
# Combine the Date and Time columns into a timestamp column
P_demand = P_demand %>% ldply(read.csv) %>% mutate(timestamp= paste(Date,Time)) %>% mutate(timestamp= as.POSIXct(timestamp, format="%Y/%m/%d %H:%M:%S"))
# Removing columns that are not necessary or inaccurate according to "data_dictionary"
# A_df
A_df=A_df[,-1]
# B_df
B_df=B_df[,c(2:14,27:32)]
# Power_df
Power_df=Power_df[,c(2:5,12,14)]
# Remove columns with legacy data from P_demand
P_demand = P_demand[,c(3:12,21)]
# Reorder Columns so "timestamp" is now first
P_demand = P_demand[ , c(ncol(P_demand), 1:(ncol(P_demand)-1))]
# rename "timestamp" to "Time"
names(P_demand)[1] <- "Time"
# Ordering dataframes by time
A_df = arrange(A_df, Time)
B_df = arrange(B_df, Time)
Power_df = arrange(Power_df, Time)
P_demand = arrange(P_demand, Time)
# Save combine data as new csv
write.csv(A_df, "A.csv", row.names = FALSE)
write.csv(B_df, "B.csv", row.names = FALSE)
write.csv(Power_df, "Pow.csv", row.names = FALSE)
write.csv(P_demand, "P_demand.csv", row.names = FALSE)
|
plot3 <- function() {
# step 1--Reading data
hpcdata <- read.csv("./data/household_power_consumption.txt",TRUE,sep=";",na.strings="?",as.is = c(2) )
# step 2 - Subsetting 01-02/02/2007 dates
hpcdatafilt <- hpcdata[hpcdata$Date %in% c("1/2/2007","2/2/2007"),]
# adding datetime column
hpcdatafilt$datetime <- strptime( paste(hpcdatafilt$Date,hpcdatafilt$Time), format="%d/%m/%Y %H:%M:%S")
#step 3 -- Construct plot3
# initialize the plotting area
# check device
if (dev.cur() == 1) dev.new()
#step 3 -- Construct plot3
par(oma=c(1,1,1,1),bg="white")
with(hpcdatafilt, {
plot(datetime,Sub_metering_1, type="l" ,xlab="", ylab="Energy sub metering", col="black")
lines(datetime, Sub_metering_2, col = "red")
lines(datetime, Sub_metering_3, col = "blue")
legend("topright", lty=1, col = c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
}
)
#copy it to PNG file
dev.copy(png, file = "./plot3.png",width=480,height=480,pointsize=8)
#close the png device
dev.off()
#close the window device
dev.off()
}
|
/plot3.R
|
no_license
|
paolobudroni/ExData_Plotting1
|
R
| false | false | 1,081 |
r
|
plot3 <- function() {
# step 1--Reading data
hpcdata <- read.csv("./data/household_power_consumption.txt",TRUE,sep=";",na.strings="?",as.is = c(2) )
# step 2 - Subsetting 01-02/02/2007 dates
hpcdatafilt <- hpcdata[hpcdata$Date %in% c("1/2/2007","2/2/2007"),]
# adding datetime column
hpcdatafilt$datetime <- strptime( paste(hpcdatafilt$Date,hpcdatafilt$Time), format="%d/%m/%Y %H:%M:%S")
#step 3 -- Construct plot3
# initialize the plotting area
# check device
if (dev.cur() == 1) dev.new()
#step 3 -- Construct plot3
par(oma=c(1,1,1,1),bg="white")
with(hpcdatafilt, {
plot(datetime,Sub_metering_1, type="l" ,xlab="", ylab="Energy sub metering", col="black")
lines(datetime, Sub_metering_2, col = "red")
lines(datetime, Sub_metering_3, col = "blue")
legend("topright", lty=1, col = c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
}
)
#copy it to PNG file
dev.copy(png, file = "./plot3.png",width=480,height=480,pointsize=8)
#close the png device
dev.off()
#close the window device
dev.off()
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818251789663e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613112922-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 251 |
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818251789663e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
fitted.JMbayes <-
function (object, process = c("Longitudinal", "longitudinal", "Event", "event"),
type = c("Marginal", "marginal", "Subject", "subject"), nullY = FALSE, ...) {
if (!inherits(object, "JMbayes"))
stop("Use only with 'JMbayes' objects.\n")
process <- match.arg(process)
type <- match.arg(type)
if (process == "Longitudinal" || process == "longitudinal") {
fitY <- c(object$x$X %*% object$postMeans$betas)
names(fitY) <- row.names(object$Data$data)
if (type == "Subject" || type == "subject")
fitY <- fitY + rowSums(object$x$Z * ranef(object)[object$y$id, ])
fitY
} else {
Data <- object$Data
Funs <- object$Funs
Forms <- object$Forms
timeVar <- object$timeVar
param <- object$param
indFixed <- Forms$extraForm$indFixed
indRandom <- Forms$extraForm$indRandom
lag <- object$y$lag
TermsX <- object$Terms$termsYx
TermsZ <- object$Terms$termsYz
TermsX.extra <- object$Terms$termsYx.extra
TermsZ.extra <- object$Terms$termsYz.extra
formYx <- reformulate(attr(delete.response(TermsX), "term.labels"))
formYz <- Forms$formYz
times <- Data$data[[timeVar]]
GQsurv <- if (object$control$GQsurv == "GaussKronrod")
gaussKronrod() else gaussLegendre(object$control$GQsurv.k)
wk <- GQsurv$wk
sk <- GQsurv$sk
K <- length(sk)
anyLeftTrunc <- object$y$anyLeftTrunc
if (length(anyLeftTrunc)) {
ni <- tapply(object$y$id, object$y$id, length)
TimeL <- rep(object$y$TimeL, ni)
P <- (times - TimeL) / 2
st <- outer(P, sk) + c(times + TimeL) / 2
} else {
P <- times / 2
st <- outer(P, sk + 1)
}
id.GK <- rep(seq_along(times), each = K)
indBetas <- object$y$indBetas
data.id2 <- Data$data.id[rep(object$y$id, each = K), ]
data.id2[[timeVar]] <- pmax(c(t(st)) - lag, 0)
if (param %in% c("td-value", "td-both")) {
mfX <- model.frame(TermsX, data = data.id2)
mfZ <- model.frame(TermsZ, data = data.id2)
Xs <- model.matrix(formYx, mfX)
Zs <- model.matrix(formYz, mfZ)
}
if (param %in% c("td-extra", "td-both")) {
mfX.extra <- model.frame(TermsX.extra, data = data.id2)
mfZ.extra<- model.frame(TermsZ.extra, data = data.id2)
Xs.extra <- model.matrix(Forms$extraForm$fixed, mfX.extra)
Zs.extra <- model.matrix(Forms$extraForm$random, mfZ.extra)
}
betas <- object$postMeans$betas
sigma <- object$postMeans$sigma
D <- object$postMeans$D
gammas <- object$postMeans$gammas
alphas <- object$postMeans$alphas
Dalphas <- object$postMeans$Dalphas
if (nullY) {
alphas <- rep(0, length.out = length(alphas))
Dalphas <- rep(0, length.out = length(Dalphas))
}
Bs.gammas <- object$postMeans$Bs.gammas
b <- ranef(object)
idK <- rep(object$y$id, each = K)
b <- b[idK, ]
if (param %in% c("td-value", "td-both")) {
Ys <- Funs$transFun.value(as.vector(Xs %*% betas + rowSums(Zs * b)), data.id2)
}
if (param %in% c("td-extra", "td-both")) {
Ys.extra <- Funs$transFun.extra(as.vector(Xs.extra %*% betas[indFixed]) +
rowSums(Zs.extra * b[, indRandom, drop = FALSE]), data.id2)
}
tt <- c(switch(param,
"td-value" = as.matrix(Ys) %*% alphas,
"td-extra" = as.matrix(Ys.extra) %*% Dalphas,
"td-both" = as.matrix(Ys) %*% alphas + as.matrix(Ys.extra) %*% Dalphas,
"shared-betasRE" = (rep(betas[indBetas], each = nrow(b)) + b) %*% alphas,
"shared-RE" = b %*% alphas))
W <- object$x$W[object$y$id, seq_along(gammas), drop = FALSE]
eta.tw <- if (!is.null(W)) c(W %*% gammas) else rep(0, length(object$y$id))
kn <- object$control$knots
W2s <- splineDesign(unlist(kn, use.names = FALSE), c(t(st)),
ord = object$control$ordSpline, outer.ok = TRUE)
Vi <- exp(c(W2s %*% Bs.gammas) + tt)
cumHaz <- exp(eta.tw) * P * tapply(rep(wk, length.out = length(Vi)) * Vi, id.GK, sum)
names(cumHaz) <- row.names(Data$data)
cumHaz
}
}
|
/R/fitted.JMbayes.R
|
no_license
|
guptashilpa/JMbayes
|
R
| false | false | 4,505 |
r
|
fitted.JMbayes <-
function (object, process = c("Longitudinal", "longitudinal", "Event", "event"),
type = c("Marginal", "marginal", "Subject", "subject"), nullY = FALSE, ...) {
if (!inherits(object, "JMbayes"))
stop("Use only with 'JMbayes' objects.\n")
process <- match.arg(process)
type <- match.arg(type)
if (process == "Longitudinal" || process == "longitudinal") {
fitY <- c(object$x$X %*% object$postMeans$betas)
names(fitY) <- row.names(object$Data$data)
if (type == "Subject" || type == "subject")
fitY <- fitY + rowSums(object$x$Z * ranef(object)[object$y$id, ])
fitY
} else {
Data <- object$Data
Funs <- object$Funs
Forms <- object$Forms
timeVar <- object$timeVar
param <- object$param
indFixed <- Forms$extraForm$indFixed
indRandom <- Forms$extraForm$indRandom
lag <- object$y$lag
TermsX <- object$Terms$termsYx
TermsZ <- object$Terms$termsYz
TermsX.extra <- object$Terms$termsYx.extra
TermsZ.extra <- object$Terms$termsYz.extra
formYx <- reformulate(attr(delete.response(TermsX), "term.labels"))
formYz <- Forms$formYz
times <- Data$data[[timeVar]]
GQsurv <- if (object$control$GQsurv == "GaussKronrod")
gaussKronrod() else gaussLegendre(object$control$GQsurv.k)
wk <- GQsurv$wk
sk <- GQsurv$sk
K <- length(sk)
anyLeftTrunc <- object$y$anyLeftTrunc
if (length(anyLeftTrunc)) {
ni <- tapply(object$y$id, object$y$id, length)
TimeL <- rep(object$y$TimeL, ni)
P <- (times - TimeL) / 2
st <- outer(P, sk) + c(times + TimeL) / 2
} else {
P <- times / 2
st <- outer(P, sk + 1)
}
id.GK <- rep(seq_along(times), each = K)
indBetas <- object$y$indBetas
data.id2 <- Data$data.id[rep(object$y$id, each = K), ]
data.id2[[timeVar]] <- pmax(c(t(st)) - lag, 0)
if (param %in% c("td-value", "td-both")) {
mfX <- model.frame(TermsX, data = data.id2)
mfZ <- model.frame(TermsZ, data = data.id2)
Xs <- model.matrix(formYx, mfX)
Zs <- model.matrix(formYz, mfZ)
}
if (param %in% c("td-extra", "td-both")) {
mfX.extra <- model.frame(TermsX.extra, data = data.id2)
mfZ.extra<- model.frame(TermsZ.extra, data = data.id2)
Xs.extra <- model.matrix(Forms$extraForm$fixed, mfX.extra)
Zs.extra <- model.matrix(Forms$extraForm$random, mfZ.extra)
}
betas <- object$postMeans$betas
sigma <- object$postMeans$sigma
D <- object$postMeans$D
gammas <- object$postMeans$gammas
alphas <- object$postMeans$alphas
Dalphas <- object$postMeans$Dalphas
if (nullY) {
alphas <- rep(0, length.out = length(alphas))
Dalphas <- rep(0, length.out = length(Dalphas))
}
Bs.gammas <- object$postMeans$Bs.gammas
b <- ranef(object)
idK <- rep(object$y$id, each = K)
b <- b[idK, ]
if (param %in% c("td-value", "td-both")) {
Ys <- Funs$transFun.value(as.vector(Xs %*% betas + rowSums(Zs * b)), data.id2)
}
if (param %in% c("td-extra", "td-both")) {
Ys.extra <- Funs$transFun.extra(as.vector(Xs.extra %*% betas[indFixed]) +
rowSums(Zs.extra * b[, indRandom, drop = FALSE]), data.id2)
}
tt <- c(switch(param,
"td-value" = as.matrix(Ys) %*% alphas,
"td-extra" = as.matrix(Ys.extra) %*% Dalphas,
"td-both" = as.matrix(Ys) %*% alphas + as.matrix(Ys.extra) %*% Dalphas,
"shared-betasRE" = (rep(betas[indBetas], each = nrow(b)) + b) %*% alphas,
"shared-RE" = b %*% alphas))
W <- object$x$W[object$y$id, seq_along(gammas), drop = FALSE]
eta.tw <- if (!is.null(W)) c(W %*% gammas) else rep(0, length(object$y$id))
kn <- object$control$knots
W2s <- splineDesign(unlist(kn, use.names = FALSE), c(t(st)),
ord = object$control$ordSpline, outer.ok = TRUE)
Vi <- exp(c(W2s %*% Bs.gammas) + tt)
cumHaz <- exp(eta.tw) * P * tapply(rep(wk, length.out = length(Vi)) * Vi, id.GK, sum)
names(cumHaz) <- row.names(Data$data)
cumHaz
}
}
|
#!/usr/bin/Rscript
library(rEDM)
library(zoo)
source( "helpers/helper.r" )
source( "helpers/mve.r" )
set.seed( 19 )
## filename = paste0("http://science.sciencemag.org/highwire/filestream/683325/",
## "field_highwire_adjunct_files/1/aag0863_SupportingFile_Suppl._Excel_seq1_v2.xlsx")
save_predictions <- function(dirname = stop("Directory name must be provided!"),
target_column = 1,
E = 3, ## Embedding dimension of the system.
max_lag = E, ## 0,-1, ..., -max_lag
n_samp = 100, ## Number of random libraries, should be in the hundreds
lib = c(501,2000), ## Library set.
pred = c(2500,2999), ## Prediciton set.
lib_sizes = (1:25)*5, ## Library changes in size and is also random
method = "mve",
num_neighbors = E+1 )
{
print( paste0( "Method: ", method, ", target: ", target_column) )
## Load data
raw_df <- read.csv(paste0( dirname, "/original.csv" ),
header = TRUE,
sep = "," )
raw_df$time <- NULL
## raw_df <- raw_df[1:3000, 1:3 ]
if( dirname == "huisman" )
rel_err <- 0.1
else
rel_err <- 0.01
## ## Rescale the data frame and keep track of the normalizing
## ## factors.
mus <- get_mus(raw_df)
sigs <- get_sigs(raw_df)
df <- data.frame(scale(raw_df))
noise <- rnorm( prod(dim(df)), mean=0, sd=sqrt(0.1))
df <- noise + df ## As long as first_column_time == FALSE
filename <- paste0(dirname, "/runs/", method, "_", target_column, "_", num_neighbors,".csv")
empty_file( filename = filename )
pred_func <- mve
if( method == "uwe" )
pred_func <- uwe
if( method == "hao" )
{
pred_func <- multiview
target_column <- which( names(raw_df) == target_column )
}
## Preallocate
rhos <- numeric( n_samp )
for (lib_size in lib_sizes)
{
## For every random library starting point
for( smp in 1:n_samp )
{
rand_lib <- random_lib(lib, lib_size)
## Find the MVE prediction
output <- pred_func(df,
lib = rand_lib,
pred = pred,
norm_type = c("L2 norm", "L1 norm", "P norm"),
P = 0.5,
E = E,
tau = 1,
tp = 1,
max_lag = max_lag,
num_neighbors = num_neighbors,
k = "sqrt",
na.rm = FALSE,
target_column = target_column,
stats_only = TRUE,
first_column_time = FALSE,
exclusion_radius = NULL,
silent = FALSE)
rhos[smp] <- output$rho
if( smp %% 5 == 0 )
{
avg <- mean(rhos[1:smp])
err <- sd( rhos[1:smp] )/ sqrt( smp )
print( paste0("Sample ", smp, "/", n_samp, ", lib size ", lib_size, " mean ", avg, " +/- ", err ) )
if (err < rel_err*avg )
break
}
} ## Closes for( smp in 1:n_samp )
## Order agrees with order set in empty_file
vec <- matrix( c( lib_size, mean(rhos), quantile(rhos, probs = c(0.25,0.5,0.75)) ), nrow = 1)
write.table(vec,
file = filename,
sep = ",",
append = TRUE,
quote = FALSE,
col.names = FALSE,
row.names = FALSE)
} ## Closes for (lib_size in lib_sizes)
} ## Closes function save_predictions
args <- commandArgs( trailingOnly = TRUE )
if( args[1] == "huisman" ) {
dirname <-"huisman"
max_lag <- 3
E <- 5
} else if( args[1] == "hp" ) {
dirname <- "hp"
max_lag <- 3
E <- 3
} else {
stop( "Unknown model" )
}
if( args[2] == "mve" ) {
method <- "mve"
} else if( args[2] == "uwe" ) {
method <- "uwe"
} else {
stop( "Unknown method" )
}
target_column <- args[3]
num_neighbors <- as.numeric(args[4] )
if( args[length(args)] == "test" ) {
print( "Testing..." )
n_samp <- 5
lib_sizes <- c(25)
E <- 2
max_lag <- 2
} else if( args[3] == "long" ) {
n_samp <- 50
lib_sizes <- c(25,50)
} else {
n_samp <- 150
lib_sizes <- (1:10)*10
}
save_predictions(dirname = dirname,
target_column = target_column,
n_samp = n_samp,
method = method,
num_neighbors = num_neighbors,
lib_sizes = lib_sizes,
max_lag = max_lag,
E = E
)
|
/runnable.r
|
no_license
|
yairdaon/weight-pred
|
R
| false | false | 5,202 |
r
|
#!/usr/bin/Rscript
library(rEDM)
library(zoo)
source( "helpers/helper.r" )
source( "helpers/mve.r" )
set.seed( 19 )
## filename = paste0("http://science.sciencemag.org/highwire/filestream/683325/",
## "field_highwire_adjunct_files/1/aag0863_SupportingFile_Suppl._Excel_seq1_v2.xlsx")
save_predictions <- function(dirname = stop("Directory name must be provided!"),
target_column = 1,
E = 3, ## Embedding dimension of the system.
max_lag = E, ## 0,-1, ..., -max_lag
n_samp = 100, ## Number of random libraries, should be in the hundreds
lib = c(501,2000), ## Library set.
pred = c(2500,2999), ## Prediciton set.
lib_sizes = (1:25)*5, ## Library changes in size and is also random
method = "mve",
num_neighbors = E+1 )
{
print( paste0( "Method: ", method, ", target: ", target_column) )
## Load data
raw_df <- read.csv(paste0( dirname, "/original.csv" ),
header = TRUE,
sep = "," )
raw_df$time <- NULL
## raw_df <- raw_df[1:3000, 1:3 ]
if( dirname == "huisman" )
rel_err <- 0.1
else
rel_err <- 0.01
## ## Rescale the data frame and keep track of the normalizing
## ## factors.
mus <- get_mus(raw_df)
sigs <- get_sigs(raw_df)
df <- data.frame(scale(raw_df))
noise <- rnorm( prod(dim(df)), mean=0, sd=sqrt(0.1))
df <- noise + df ## As long as first_column_time == FALSE
filename <- paste0(dirname, "/runs/", method, "_", target_column, "_", num_neighbors,".csv")
empty_file( filename = filename )
pred_func <- mve
if( method == "uwe" )
pred_func <- uwe
if( method == "hao" )
{
pred_func <- multiview
target_column <- which( names(raw_df) == target_column )
}
## Preallocate
rhos <- numeric( n_samp )
for (lib_size in lib_sizes)
{
## For every random library starting point
for( smp in 1:n_samp )
{
rand_lib <- random_lib(lib, lib_size)
## Find the MVE prediction
output <- pred_func(df,
lib = rand_lib,
pred = pred,
norm_type = c("L2 norm", "L1 norm", "P norm"),
P = 0.5,
E = E,
tau = 1,
tp = 1,
max_lag = max_lag,
num_neighbors = num_neighbors,
k = "sqrt",
na.rm = FALSE,
target_column = target_column,
stats_only = TRUE,
first_column_time = FALSE,
exclusion_radius = NULL,
silent = FALSE)
rhos[smp] <- output$rho
if( smp %% 5 == 0 )
{
avg <- mean(rhos[1:smp])
err <- sd( rhos[1:smp] )/ sqrt( smp )
print( paste0("Sample ", smp, "/", n_samp, ", lib size ", lib_size, " mean ", avg, " +/- ", err ) )
if (err < rel_err*avg )
break
}
} ## Closes for( smp in 1:n_samp )
## Order agrees with order set in empty_file
vec <- matrix( c( lib_size, mean(rhos), quantile(rhos, probs = c(0.25,0.5,0.75)) ), nrow = 1)
write.table(vec,
file = filename,
sep = ",",
append = TRUE,
quote = FALSE,
col.names = FALSE,
row.names = FALSE)
} ## Closes for (lib_size in lib_sizes)
} ## Closes function save_predictions
args <- commandArgs( trailingOnly = TRUE )
if( args[1] == "huisman" ) {
dirname <-"huisman"
max_lag <- 3
E <- 5
} else if( args[1] == "hp" ) {
dirname <- "hp"
max_lag <- 3
E <- 3
} else {
stop( "Unknown model" )
}
if( args[2] == "mve" ) {
method <- "mve"
} else if( args[2] == "uwe" ) {
method <- "uwe"
} else {
stop( "Unknown method" )
}
target_column <- args[3]
num_neighbors <- as.numeric(args[4] )
if( args[length(args)] == "test" ) {
print( "Testing..." )
n_samp <- 5
lib_sizes <- c(25)
E <- 2
max_lag <- 2
} else if( args[3] == "long" ) {
n_samp <- 50
lib_sizes <- c(25,50)
} else {
n_samp <- 150
lib_sizes <- (1:10)*10
}
save_predictions(dirname = dirname,
target_column = target_column,
n_samp = n_samp,
method = method,
num_neighbors = num_neighbors,
lib_sizes = lib_sizes,
max_lag = max_lag,
E = E
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unsupervised.R
\name{mcoa}
\alias{mcoa}
\title{Multiple Co-Inertia Analysis - MCOA}
\usage{
mcoa(X, ncomp = 2, scale = FALSE, verbose = FALSE, ...)
}
\arguments{
\item{X}{\code{list} of input blocks.}
\item{ncomp}{\code{integer} number of components to extract.}
\item{scale}{\code{logical} indicating if variables should be scaled.}
\item{verbose}{\code{logical} indicating if diagnostic information should be printed.}
\item{...}{additional arguments for RGCCA.}
}
\value{
\code{multiblock} object including relevant scores and loadings. Relevant plotting functions: \code{\link{multiblock_plots}}
and result functions: \code{\link{multiblock_results}}.
}
\description{
This is a wrapper for the \code{RGCCA::rgcca} function for computing MCOA.
}
\details{
MCOA resembles GCA and MFA in that it creates a set of reference scores, for which each
block's individual scores should correlate maximally too, but also the variance within
each block should be taken into account. A single component solution is equivalent to a
PCA on concatenated blocks scaled by the so called inverse inertia.
}
\examples{
data(potato)
potList <- as.list(potato[c(1,2,9)])
pot.mcoa <- mcoa(potList)
plot(scores(pot.mcoa), labels="names")
}
\references{
\itemize{
\item Le Roux; B. and H. Rouanet (2004). Geometric Data Analysis, From Correspondence Analysis to Structured Data Analysis. Dordrecht. Kluwer: p.180.
\item Greenacre, Michael and Blasius, Jörg (editors) (2006). Multiple Correspondence Analysis and Related Methods. London: Chapman & Hall/CRC.
}
}
\seealso{
Overviews of available methods, \code{\link{multiblock}}, and methods organised by main structure: \code{\link{basic}}, \code{\link{unsupervised}}, \code{\link{asca}}, \code{\link{supervised}} and \code{\link{complex}}.
Common functions for computation and extraction of results and plotting are found in \code{\link{multiblock_results}} and \code{\link{multiblock_plots}}, respectively.
}
|
/man/mcoa.Rd
|
no_license
|
minghao2016/multiblock
|
R
| false | true | 2,026 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unsupervised.R
\name{mcoa}
\alias{mcoa}
\title{Multiple Co-Inertia Analysis - MCOA}
\usage{
mcoa(X, ncomp = 2, scale = FALSE, verbose = FALSE, ...)
}
\arguments{
\item{X}{\code{list} of input blocks.}
\item{ncomp}{\code{integer} number of components to extract.}
\item{scale}{\code{logical} indicating if variables should be scaled.}
\item{verbose}{\code{logical} indicating if diagnostic information should be printed.}
\item{...}{additional arguments for RGCCA.}
}
\value{
\code{multiblock} object including relevant scores and loadings. Relevant plotting functions: \code{\link{multiblock_plots}}
and result functions: \code{\link{multiblock_results}}.
}
\description{
This is a wrapper for the \code{RGCCA::rgcca} function for computing MCOA.
}
\details{
MCOA resembles GCA and MFA in that it creates a set of reference scores, for which each
block's individual scores should correlate maximally too, but also the variance within
each block should be taken into account. A single component solution is equivalent to a
PCA on concatenated blocks scaled by the so called inverse inertia.
}
\examples{
data(potato)
potList <- as.list(potato[c(1,2,9)])
pot.mcoa <- mcoa(potList)
plot(scores(pot.mcoa), labels="names")
}
\references{
\itemize{
\item Le Roux; B. and H. Rouanet (2004). Geometric Data Analysis, From Correspondence Analysis to Structured Data Analysis. Dordrecht. Kluwer: p.180.
\item Greenacre, Michael and Blasius, Jörg (editors) (2006). Multiple Correspondence Analysis and Related Methods. London: Chapman & Hall/CRC.
}
}
\seealso{
Overviews of available methods, \code{\link{multiblock}}, and methods organised by main structure: \code{\link{basic}}, \code{\link{unsupervised}}, \code{\link{asca}}, \code{\link{supervised}} and \code{\link{complex}}.
Common functions for computation and extraction of results and plotting are found in \code{\link{multiblock_results}} and \code{\link{multiblock_plots}}, respectively.
}
|
## Wynand van Staden
## Least-Squares Claibrtion method using the Peak prevalence data
## Copyright 2019
samSize <- 1000
# sir model returning infectious prevalences at 2 time points + the peak prevalence time point
sirModelPeakPrev <- function(gamma, beta = 0.2, N = 10000, inf = 0.1, sampleSize = samSize){
library(SimInf)
u0 <- data.frame(S=N*(1-inf), I=N*inf, R=0)
model <- SIR(u0, tspan = seq(0,100,by=1), beta= beta, gamma=gamma)
result <- run(model)
peakIPrev <- which.max(result@U[2,])
individualsPeak <- c(rep("S", result@U[1, peakIPrev]), rep("I", result@U[2, peakIPrev]), rep("R", result@U[3, peakIPrev]))
individuals50 <- c(rep("S", result@U[1, 50]), rep("I", result@U[2, 50]), rep("R", result@U[3, 50]))
individuals65 <- c(rep("S", result@U[1, 65]), rep("I", result@U[2, 65]), rep("R", result@U[3, 65]))
samplePop <- c(summary(as.factor(sample(individualsPeak, size = sampleSize))), summary(as.factor(sample(individuals50, size = sampleSize))), summary(as.factor(sample(individuals65, size = sampleSize))))
pop <- samplePop[names(samplePop) == "I"] #let it return zero instead of numeric(0)
for(i in 1:3){
if(is.na(pop[i])){
pop[i] <- 0
}
}
return(pop/sampleSize)
}
#####################################################################
## Model Calibration
#####################################################################
trueGamma <- 0.02
calibModelRuns <- 1000
infPopPeak <- matrix(c(0, 0, 0), calibModelRuns, 3)
resultPeak <- list()
#to just store beta and gamma parameters from the reuslt
LS.resultPeak <- c()
#This step takes quite a long time
print("Data Simulation Run Counter")
for (i in 1:calibModelRuns) {
infPopPeak[i,] <- sirModelPeakPrev(trueGamma)
f.optPeak <- function(params) sum((sirModelPeakPrev(params)- infPopPeak[i,])^2 )
resultPeak[[i]] <- optim(c(runif(1, 0.01, 0.1)), f.optPeak, method = "Nelder-Mead",lower = 0.01, upper = 0.1, control=list(maxit=1000), hessian = T)
LS.resultPeak[i] <- resultPeak[[i]]$par
print(i) #to check number of runs
}
#####################################################################
## Measuring performance
#####################################################################
# Calculating the average calibrated parameteres
avgParPeak <- round(mean(LS.resultPeak), 3)
print("Average of the parameter estimates of gamma:")
print(paste0('Model with 2 + Peak target features: gamma = ', avgParPeak))
#Calculating the Bias
gBiasPeak <- avgParPeak - trueGamma
print("The Percentage Bias of the parameter estimates of gamma:")
print(paste0('Bias for Model with 2 + Peak target features: gamma Bias = ', gBiasPeak/trueGamma *100, '%' ))
#Calculating the accuracy using the Root Mean Square Error
gAccuPeak <- sqrt((sum((LS.resultPeak - trueGamma)^2)/calibModelRuns))
print("The accuracy of the parameter estimates for gamma using RMSE:")
print(paste0('RMSE for Model with 2 + Peak target features: gamma = ', round(gAccuPeak, 3)))
#Calculating the coverage using confidence intervals
standErrorPeak <- c()
for(i in 1:calibModelRuns){
standErrorPeak[i] <- sqrt(abs(diag(solve(resultPeak[[i]]$hessian))))
}
# Now confidence intervals of the parameter estimates
CI_Peak_g <- matrix(c(0, 0), calibModelRuns, 2)
for(i in 1:calibModelRuns){
CI_Peak_g[i,] <- c(LS.resultPeak[i] - 1.96*standErrorPeak[i], LS.resultPeak[i] + 1.96*standErrorPeak[i])
}
# Now to calculate coverage of the true estimate given the confidence intervals of the parameter estimates
LSPeak_gcov <- sum((trueGamma >= CI_Peak_g[,1]) == TRUE & (trueGamma <= CI_Peak_g[,2]) == TRUE)/calibModelRuns * 100
print("The coverage of each parameter estimates of gamma given the CI's:")
print(paste0('Coverage for Model with 2 + Peak target features: gamma = ', LSPeak_gcov, '%'))
|
/Wynand Masters Thesis R code/1 Parameter with Peak Prev/1 One parameter Peak Prev with LS.R
|
no_license
|
Wynand93/Wynand_Masters_R_Code
|
R
| false | false | 3,945 |
r
|
## Wynand van Staden
## Least-Squares Claibrtion method using the Peak prevalence data
## Copyright 2019
samSize <- 1000
# sir model returning infectious prevalences at 2 time points + the peak prevalence time point
sirModelPeakPrev <- function(gamma, beta = 0.2, N = 10000, inf = 0.1, sampleSize = samSize){
library(SimInf)
u0 <- data.frame(S=N*(1-inf), I=N*inf, R=0)
model <- SIR(u0, tspan = seq(0,100,by=1), beta= beta, gamma=gamma)
result <- run(model)
peakIPrev <- which.max(result@U[2,])
individualsPeak <- c(rep("S", result@U[1, peakIPrev]), rep("I", result@U[2, peakIPrev]), rep("R", result@U[3, peakIPrev]))
individuals50 <- c(rep("S", result@U[1, 50]), rep("I", result@U[2, 50]), rep("R", result@U[3, 50]))
individuals65 <- c(rep("S", result@U[1, 65]), rep("I", result@U[2, 65]), rep("R", result@U[3, 65]))
samplePop <- c(summary(as.factor(sample(individualsPeak, size = sampleSize))), summary(as.factor(sample(individuals50, size = sampleSize))), summary(as.factor(sample(individuals65, size = sampleSize))))
pop <- samplePop[names(samplePop) == "I"] #let it return zero instead of numeric(0)
for(i in 1:3){
if(is.na(pop[i])){
pop[i] <- 0
}
}
return(pop/sampleSize)
}
#####################################################################
## Model Calibration
#####################################################################
trueGamma <- 0.02
calibModelRuns <- 1000
infPopPeak <- matrix(c(0, 0, 0), calibModelRuns, 3)
resultPeak <- list()
#to just store beta and gamma parameters from the reuslt
LS.resultPeak <- c()
#This step takes quite a long time
print("Data Simulation Run Counter")
for (i in 1:calibModelRuns) {
infPopPeak[i,] <- sirModelPeakPrev(trueGamma)
f.optPeak <- function(params) sum((sirModelPeakPrev(params)- infPopPeak[i,])^2 )
resultPeak[[i]] <- optim(c(runif(1, 0.01, 0.1)), f.optPeak, method = "Nelder-Mead",lower = 0.01, upper = 0.1, control=list(maxit=1000), hessian = T)
LS.resultPeak[i] <- resultPeak[[i]]$par
print(i) #to check number of runs
}
#####################################################################
## Measuring performance
#####################################################################
# Calculating the average calibrated parameteres
avgParPeak <- round(mean(LS.resultPeak), 3)
print("Average of the parameter estimates of gamma:")
print(paste0('Model with 2 + Peak target features: gamma = ', avgParPeak))
#Calculating the Bias
gBiasPeak <- avgParPeak - trueGamma
print("The Percentage Bias of the parameter estimates of gamma:")
print(paste0('Bias for Model with 2 + Peak target features: gamma Bias = ', gBiasPeak/trueGamma *100, '%' ))
#Calculating the accuracy using the Root Mean Square Error
gAccuPeak <- sqrt((sum((LS.resultPeak - trueGamma)^2)/calibModelRuns))
print("The accuracy of the parameter estimates for gamma using RMSE:")
print(paste0('RMSE for Model with 2 + Peak target features: gamma = ', round(gAccuPeak, 3)))
#Calculating the coverage using confidence intervals
standErrorPeak <- c()
for(i in 1:calibModelRuns){
standErrorPeak[i] <- sqrt(abs(diag(solve(resultPeak[[i]]$hessian))))
}
# Now confidence intervals of the parameter estimates
CI_Peak_g <- matrix(c(0, 0), calibModelRuns, 2)
for(i in 1:calibModelRuns){
CI_Peak_g[i,] <- c(LS.resultPeak[i] - 1.96*standErrorPeak[i], LS.resultPeak[i] + 1.96*standErrorPeak[i])
}
# Now to calculate coverage of the true estimate given the confidence intervals of the parameter estimates
LSPeak_gcov <- sum((trueGamma >= CI_Peak_g[,1]) == TRUE & (trueGamma <= CI_Peak_g[,2]) == TRUE)/calibModelRuns * 100
print("The coverage of each parameter estimates of gamma given the CI's:")
print(paste0('Coverage for Model with 2 + Peak target features: gamma = ', LSPeak_gcov, '%'))
|
require(dplyr)
data <- read.table("household_power_consumption.txt", sep = ";", dec =".", stringsAsFactors = F, header = T)
samp <- data %>% filter(Date %in% c("1/2/2007","2/2/2007"))
datetime <- strptime(paste(samp$Date,samp$Time,sep = " "), format = "%d/%m/%Y %H:%M:%S")
for (i in 3:8) {
samp[, i] <- as.numeric(samp[, i])
}
png("plot3.png", width = 480, height = 480)
plot(datetime, samp[, 7], type = "l", xlab = " ", ylab = "Energy Sub Metering")
lines(datetime, samp[, 8], type = "l", col = "red")
lines(datetime, samp[, 9], type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"), lty = 1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
kimlongngo/ExData_Plotting1
|
R
| false | false | 718 |
r
|
require(dplyr)
data <- read.table("household_power_consumption.txt", sep = ";", dec =".", stringsAsFactors = F, header = T)
samp <- data %>% filter(Date %in% c("1/2/2007","2/2/2007"))
datetime <- strptime(paste(samp$Date,samp$Time,sep = " "), format = "%d/%m/%Y %H:%M:%S")
for (i in 3:8) {
samp[, i] <- as.numeric(samp[, i])
}
png("plot3.png", width = 480, height = 480)
plot(datetime, samp[, 7], type = "l", xlab = " ", ylab = "Energy Sub Metering")
lines(datetime, samp[, 8], type = "l", col = "red")
lines(datetime, samp[, 9], type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"), lty = 1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
install.packages(c("TTR", "forecast", "tseries"))
# call libraries
library(TTR)
library(forecast)
library(tseries)
# read data
wine <- read.csv("./dataset/timedata/AustralianWines.csv")
head(wine)
# 1980년 1월부터의 자료인 것을 확인할 수 있다.
#======================================================
# Problem 1. Fortified, Red, Rose, Sparkling, Sweet white, 그리고
# Dry white 와인의 매출 자료의 시간변수를 생성하고 시계열 그림을 그려라.
# make time series variables
fortified <- wine$Fortified
fortified.ts <- ts(fortified, frequency=12, start=c(1980, 1))
red <- wine$Red
red.ts <- ts(red, frequency=12, start=c(1980, 1))
rose <- wine$Rose
rose.ts <- ts(rose, frequency=12, start=c(1980, 1))
sparkling <- wine$sparkling
sparkling.ts <- ts(sparkling, frequency=12, start=c(1980, 1))
sweetwhite <- wine$Sweet.white
sweetwhite.ts <- ts(sweetwhite, frequency=12, start=c(1980, 1))
# timeseries plots
layout(1:5)
plot.ts(fortified.ts, main="Fortified wine")
plot.ts(red.ts, main="Red wine")
plot.ts(rose.ts, main="Rose wine")
plot.ts(sparkling.ts, main="Sparkling wine")
plot.ts(sweetwhite.ts, main="Dry white wine")
#======================================================
# Problem 2. Red 와인 자료를 가지고 단순지수평활법과 Hold-Winter의
# 지수평활법을 적용하여 향후 1년의 매출을 예측해보아라.
# 시계열 그림도 그려보아라.
# Red wine: 단순지수평활법
red.ts.simple <- HoltWinters(red.ts, beta=FALSE, gamma=FALSE)
red.ts.simple.forecasts <- forecast(red.ts.simple, h=12)
red.ts.simple.forecasts # 1995년 매출 예측값
# Red wine: Holt-Winter 지수평활법
red.ts.hw <- HoltWinters(red.ts, gamma=FALSE)
red.ts.hw.forecasts <- forecast(red.ts.hw, h=12)
red.ts.hw.forecasts # 1995년 매출 예측값
# plots
layout(1:2)
plot(red.ts.simple.forecasts, main="단순지수평활법")
plot(red.ts.hw.forecasts, main="Holt-Winter 지수평활법")
#======================================================
# Problem 3. Sweet.white의 시계열 그림을 보고 정상성 여부를 확인하시오.
# 또한 로그변환 후 정상성 여부를 확인해보시오.
layout(1)
plot.ts(sweetwhite.ts)
adf.test(sweetwhite.ts)
# p-value = 0.094, 유의수준 0.05에서 비정상적이다.
plot.ts(log(sweetwhite.ts))
adf.test(log(sweetwhite.ts))
# p-value = 0.081, 유의수준 0.05에서 비정상적이다.
#======================================================
# Problem 4. Sweet.white 자료를 1차 차분한 뒤 단위근 검정을 실애해보시오.
sweetwhite.diff <- diff(sweetwhite.ts)
plot.ts(sweetwhite.diff)
adf.test(sweetwhite.diff)
# p-value < 0.01, 정상성을 갖는 데이터로 변환되었다.
#======================================================
# Problem 5. Sweet.white 자료에 ARIMA모형을 적합해 보아라.
# ARMA(p, d, q) fitting
# Problem 3-4에서 확인했듯이 정상성을 보장하기 인해 d=1로 놓는다.
# 표본상관도표
layout(1:2)
acf(sweetwhite.diff)
pacf(sweetwhite.diff)
# SACF, PACF 모두 첫 시차에서 시작해서 소멸하는 싸인함수 형태이므로
# p=q 라고 할 수 있다.
# 첫 두 시차가 유의함을 볼 수 있으므로 p=q=2 라 하고,
# ARMA(2, 1, 2)을 적합하자.
sweetwhite.arima = arima(sweetwhite, order=c(2, 1, 2))
sweetwhite.arima
sweetwhite.arima.forecasts <- forecast(sweetwhite.arima, h=12)
layout(1)
plot(sweetwhite.arima.forecasts)
|
/on_site_training/3. 고급 데이터 분석 - 정성규 교수님/Demo_Exercises/TimeSeries-exercise_sol.R
|
no_license
|
soykim-snail/Begas-BigDataTraining
|
R
| false | false | 3,445 |
r
|
install.packages(c("TTR", "forecast", "tseries"))
# call libraries
library(TTR)
library(forecast)
library(tseries)
# read data
wine <- read.csv("./dataset/timedata/AustralianWines.csv")
head(wine)
# 1980년 1월부터의 자료인 것을 확인할 수 있다.
#======================================================
# Problem 1. Fortified, Red, Rose, Sparkling, Sweet white, 그리고
# Dry white 와인의 매출 자료의 시간변수를 생성하고 시계열 그림을 그려라.
# make time series variables
fortified <- wine$Fortified
fortified.ts <- ts(fortified, frequency=12, start=c(1980, 1))
red <- wine$Red
red.ts <- ts(red, frequency=12, start=c(1980, 1))
rose <- wine$Rose
rose.ts <- ts(rose, frequency=12, start=c(1980, 1))
sparkling <- wine$sparkling
sparkling.ts <- ts(sparkling, frequency=12, start=c(1980, 1))
sweetwhite <- wine$Sweet.white
sweetwhite.ts <- ts(sweetwhite, frequency=12, start=c(1980, 1))
# timeseries plots
layout(1:5)
plot.ts(fortified.ts, main="Fortified wine")
plot.ts(red.ts, main="Red wine")
plot.ts(rose.ts, main="Rose wine")
plot.ts(sparkling.ts, main="Sparkling wine")
plot.ts(sweetwhite.ts, main="Dry white wine")
#======================================================
# Problem 2. Red 와인 자료를 가지고 단순지수평활법과 Hold-Winter의
# 지수평활법을 적용하여 향후 1년의 매출을 예측해보아라.
# 시계열 그림도 그려보아라.
# Red wine: 단순지수평활법
red.ts.simple <- HoltWinters(red.ts, beta=FALSE, gamma=FALSE)
red.ts.simple.forecasts <- forecast(red.ts.simple, h=12)
red.ts.simple.forecasts # 1995년 매출 예측값
# Red wine: Holt-Winter 지수평활법
red.ts.hw <- HoltWinters(red.ts, gamma=FALSE)
red.ts.hw.forecasts <- forecast(red.ts.hw, h=12)
red.ts.hw.forecasts # 1995년 매출 예측값
# plots
layout(1:2)
plot(red.ts.simple.forecasts, main="단순지수평활법")
plot(red.ts.hw.forecasts, main="Holt-Winter 지수평활법")
#======================================================
# Problem 3. Sweet.white의 시계열 그림을 보고 정상성 여부를 확인하시오.
# 또한 로그변환 후 정상성 여부를 확인해보시오.
layout(1)
plot.ts(sweetwhite.ts)
adf.test(sweetwhite.ts)
# p-value = 0.094, 유의수준 0.05에서 비정상적이다.
plot.ts(log(sweetwhite.ts))
adf.test(log(sweetwhite.ts))
# p-value = 0.081, 유의수준 0.05에서 비정상적이다.
#======================================================
# Problem 4. Sweet.white 자료를 1차 차분한 뒤 단위근 검정을 실애해보시오.
sweetwhite.diff <- diff(sweetwhite.ts)
plot.ts(sweetwhite.diff)
adf.test(sweetwhite.diff)
# p-value < 0.01, 정상성을 갖는 데이터로 변환되었다.
#======================================================
# Problem 5. Sweet.white 자료에 ARIMA모형을 적합해 보아라.
# ARMA(p, d, q) fitting
# Problem 3-4에서 확인했듯이 정상성을 보장하기 인해 d=1로 놓는다.
# 표본상관도표
layout(1:2)
acf(sweetwhite.diff)
pacf(sweetwhite.diff)
# SACF, PACF 모두 첫 시차에서 시작해서 소멸하는 싸인함수 형태이므로
# p=q 라고 할 수 있다.
# 첫 두 시차가 유의함을 볼 수 있으므로 p=q=2 라 하고,
# ARMA(2, 1, 2)을 적합하자.
sweetwhite.arima = arima(sweetwhite, order=c(2, 1, 2))
sweetwhite.arima
sweetwhite.arima.forecasts <- forecast(sweetwhite.arima, h=12)
layout(1)
plot(sweetwhite.arima.forecasts)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/registry.R
\name{add_credential_function}
\alias{add_credential_function}
\title{Add a new credential fetching function.}
\usage{
add_credential_function(f)
}
\arguments{
\item{f}{A function with the right signature.}
}
\description{
Note that this implicitly adds \code{f} to the \emph{end} of the list.
}
\seealso{
Other registration: \code{\link{all_credential_functions}},
\code{\link{set_credential_functions}}
}
|
/man/add_credential_function.Rd
|
permissive
|
sunivazquez/gauth
|
R
| false | true | 499 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/registry.R
\name{add_credential_function}
\alias{add_credential_function}
\title{Add a new credential fetching function.}
\usage{
add_credential_function(f)
}
\arguments{
\item{f}{A function with the right signature.}
}
\description{
Note that this implicitly adds \code{f} to the \emph{end} of the list.
}
\seealso{
Other registration: \code{\link{all_credential_functions}},
\code{\link{set_credential_functions}}
}
|
library(rvest)
washington <- read_html("https://www.washingtonian.com/2016/05/05/best-cheap-restaurants-in-washington-dc/")
names <-
washington %>%
html_nodes(".styled-list a") %>%
html_text()
# build data frame
numberToLoad <- 100
restaurants <- data.frame(matrix(nrow = length(names[1:numberToLoad])))
restaurants$Names <- names[1:numberToLoad]
restaurants[1] <- NULL
restaurants$Stars <- NA
restaurants$Ratings <- NA
restaurants$Addresses <- NA
pb <- txtProgressBar(min = 0, max = length(restaurants$Names), style=3)
for (i in 1:length(restaurants$Names)) {
search <- read_html(paste("http://www.yelp.com/search?find_desc=", URLencode(restaurants$Names[i]), "&find_loc=Washington%2C+DC&ns=1", sep = ""))
# record number of stars
starSet <-
search %>%
html_nodes(".natural-search-result .star-img") %>%
html_attr("class")
stars <- as.numeric(gsub("_half", ".5",
gsub("star-img stars_", "", starSet[1])
)
)
restaurants$Stars[i] <- stars
# record number of ratings
ratings <-
search %>%
html_node(".yloca-search-result+ .regular-search-result .rating-qualifier") %>%
html_text()
ratings <- gsub("\n ","",as.character(ratings))
ratings <- gsub("reviews","",as.character(ratings))
ratings <- as.numeric(ratings)
restaurants$Ratings[i] <- ratings
# record addresses
addresses <-
search %>%
html_nodes(".yloca-search-result+ .regular-search-result address")
addresses <- gsub("\n", "", addresses)
addresses <- gsub("<address>", "", addresses)
addresses <- gsub("</address>", "", addresses)
addresses <- gsub("<br/>", " ", addresses)
restaurants$Addresses[i] <- addresses
# update progress bar
setTxtProgressBar(pb, i)
}
# bayesian estimator
populationRatings <- sum(restaurants$Ratings) / length(restaurants$Ratings)
populationMean <- sum(restaurants$Stars) / length(restaurants$Stars)
# squared bayesian removes errors but seems to give less of a spread
# restaurants$BayesianSquared <- (restaurants$Ratings ^ 2) / (restaurants$Ratings ^ 2 + populationRatings ^ 2) * restaurants$Stars +
# (populationRatings ^ 2) / (restaurants$Ratings ^ 2 + populationRatings ^ 2) * populationMean
restaurants$Bayesian <- (restaurants$Ratings) / (restaurants$Ratings + populationRatings) * restaurants$Stars +
(populationRatings) / (restaurants$Ratings + populationRatings) * populationMean
restaurants$NormalizedBayes <- (restaurants$Bayesian - populationMean) / var(restaurants$Stars)
library(ggmap)
boozAddresses <- c("901 15th Street Northwest, Washington, DC 20005", "8283 Greensboro Drive, McLean, VA 22102", "1550 Crystal Drive, Arlington, VA 22202", "20 M Street Southeast 1000, Washington, DC 20003")
boozPositions <- data.frame(matrix(nrow = 4))
for (i in 1:length(boozAddresses)) {
position <- geocode(boozAddresses[i]);
boozPositions$Latitudes[i] <- position$lat;
boozPositions$Longitudes[i] <- position$lon;
}
boozPositions[1] <- NULL
for (i in 1:length(restaurants$Addresses)) {
position <- geocode(restaurants$Addresses[i]);
restaurants$Latitudes[i] <- position$lat
restaurants$Longitudes[i] <- position$lon
restaurants$Distances[i] <- mapdist(boozAddresses[1], restaurants$Addresses[i], mode = "driving")$minutes
}
meanDistance <- sum(restaurants$Distances) / length(restaurants$Distances)
restaurants$NormalizedDistances <- (restaurants$Distances - meanDistance) / var(restaurants$Distances)
restaurants$WeightedRating <- restaurants$NormalizedBayes - restaurants$NormalizedDistances * 10
library(ggplot2)
library(ggmap)
# getting the map
map <- get_map(location = c(lon = mean(restaurants$Longitudes), lat = mean(restaurants$Latitudes)), zoom = 12,
maptype = "roadmap", scale = 2)
# plotting the map with some points on it
ggmap(map) +
geom_point(data = restaurants, aes(x = Longitudes, y = Latitudes, fill = "red", alpha = 0.8), size = 5, shape = 21) +
geom_point(data = boozPositions, aes(x = Longitudes, y = Latitudes, fill = "blue", alpha = 0.8), size = 5, shape = 21) +
guides(fill=FALSE, alpha=FALSE, size=FALSE)
library(leaflet)
library(rgdal)
pal <- colorNumeric(
palette = "Blues",
domain = restaurants$WeightedRating
)
m <- leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addCircleMarkers(m, lng = restaurants$Longitudes, lat = restaurants$Latitudes, popup = restaurants$Names, color = pal(restaurants$WeightedRating)) %>%
addCircleMarkers(m, lng = boozPositions$Longitudes, lat = boozPositions$Latitudes, popup = "Booz Allen Office", color = "red", fillColor = "red")
m # Print the map
|
/restaurants.R
|
no_license
|
danielgwilson/RestaurantDC
|
R
| false | false | 4,700 |
r
|
library(rvest)
washington <- read_html("https://www.washingtonian.com/2016/05/05/best-cheap-restaurants-in-washington-dc/")
names <-
washington %>%
html_nodes(".styled-list a") %>%
html_text()
# build data frame
numberToLoad <- 100
restaurants <- data.frame(matrix(nrow = length(names[1:numberToLoad])))
restaurants$Names <- names[1:numberToLoad]
restaurants[1] <- NULL
restaurants$Stars <- NA
restaurants$Ratings <- NA
restaurants$Addresses <- NA
pb <- txtProgressBar(min = 0, max = length(restaurants$Names), style=3)
for (i in 1:length(restaurants$Names)) {
search <- read_html(paste("http://www.yelp.com/search?find_desc=", URLencode(restaurants$Names[i]), "&find_loc=Washington%2C+DC&ns=1", sep = ""))
# record number of stars
starSet <-
search %>%
html_nodes(".natural-search-result .star-img") %>%
html_attr("class")
stars <- as.numeric(gsub("_half", ".5",
gsub("star-img stars_", "", starSet[1])
)
)
restaurants$Stars[i] <- stars
# record number of ratings
ratings <-
search %>%
html_node(".yloca-search-result+ .regular-search-result .rating-qualifier") %>%
html_text()
ratings <- gsub("\n ","",as.character(ratings))
ratings <- gsub("reviews","",as.character(ratings))
ratings <- as.numeric(ratings)
restaurants$Ratings[i] <- ratings
# record addresses
addresses <-
search %>%
html_nodes(".yloca-search-result+ .regular-search-result address")
addresses <- gsub("\n", "", addresses)
addresses <- gsub("<address>", "", addresses)
addresses <- gsub("</address>", "", addresses)
addresses <- gsub("<br/>", " ", addresses)
restaurants$Addresses[i] <- addresses
# update progress bar
setTxtProgressBar(pb, i)
}
# bayesian estimator
populationRatings <- sum(restaurants$Ratings) / length(restaurants$Ratings)
populationMean <- sum(restaurants$Stars) / length(restaurants$Stars)
# squared bayesian removes errors but seems to give less of a spread
# restaurants$BayesianSquared <- (restaurants$Ratings ^ 2) / (restaurants$Ratings ^ 2 + populationRatings ^ 2) * restaurants$Stars +
# (populationRatings ^ 2) / (restaurants$Ratings ^ 2 + populationRatings ^ 2) * populationMean
restaurants$Bayesian <- (restaurants$Ratings) / (restaurants$Ratings + populationRatings) * restaurants$Stars +
(populationRatings) / (restaurants$Ratings + populationRatings) * populationMean
restaurants$NormalizedBayes <- (restaurants$Bayesian - populationMean) / var(restaurants$Stars)
library(ggmap)
boozAddresses <- c("901 15th Street Northwest, Washington, DC 20005", "8283 Greensboro Drive, McLean, VA 22102", "1550 Crystal Drive, Arlington, VA 22202", "20 M Street Southeast 1000, Washington, DC 20003")
boozPositions <- data.frame(matrix(nrow = 4))
for (i in 1:length(boozAddresses)) {
position <- geocode(boozAddresses[i]);
boozPositions$Latitudes[i] <- position$lat;
boozPositions$Longitudes[i] <- position$lon;
}
boozPositions[1] <- NULL
for (i in 1:length(restaurants$Addresses)) {
position <- geocode(restaurants$Addresses[i]);
restaurants$Latitudes[i] <- position$lat
restaurants$Longitudes[i] <- position$lon
restaurants$Distances[i] <- mapdist(boozAddresses[1], restaurants$Addresses[i], mode = "driving")$minutes
}
meanDistance <- sum(restaurants$Distances) / length(restaurants$Distances)
restaurants$NormalizedDistances <- (restaurants$Distances - meanDistance) / var(restaurants$Distances)
restaurants$WeightedRating <- restaurants$NormalizedBayes - restaurants$NormalizedDistances * 10
library(ggplot2)
library(ggmap)
# getting the map
map <- get_map(location = c(lon = mean(restaurants$Longitudes), lat = mean(restaurants$Latitudes)), zoom = 12,
maptype = "roadmap", scale = 2)
# plotting the map with some points on it
ggmap(map) +
geom_point(data = restaurants, aes(x = Longitudes, y = Latitudes, fill = "red", alpha = 0.8), size = 5, shape = 21) +
geom_point(data = boozPositions, aes(x = Longitudes, y = Latitudes, fill = "blue", alpha = 0.8), size = 5, shape = 21) +
guides(fill=FALSE, alpha=FALSE, size=FALSE)
library(leaflet)
library(rgdal)
pal <- colorNumeric(
palette = "Blues",
domain = restaurants$WeightedRating
)
m <- leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addCircleMarkers(m, lng = restaurants$Longitudes, lat = restaurants$Latitudes, popup = restaurants$Names, color = pal(restaurants$WeightedRating)) %>%
addCircleMarkers(m, lng = boozPositions$Longitudes, lat = boozPositions$Latitudes, popup = "Booz Allen Office", color = "red", fillColor = "red")
m # Print the map
|
###########################################################################/**
# @RdocFunction withCapture
# @alias evalCapture
#
# @title "Evaluates an expression and captures the code and/or the output"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{expr}{The R expression to be evaluated.}
# \item{substitute}{An optional named @list used for substituting
# symbols with other strings.}
# \item{code}{If @TRUE, the deparsed code of the expression is echoed.}
# \item{output}{If @TRUE, the output of each evaluated subexpression
# is echoed.}
# \item{...}{Additional arguments passed to @see "R.utils::sourceTo"
# which in turn passes arguments to @see "base::source".}
# \item{max.deparse.length}{A positive @integer specifying the maximum
# length of a deparsed expression, before truncating it.}
# \item{trim}{If @TRUE, the captured rows are trimmed.}
# \item{newline}{If @TRUE and \code{collapse} is non-@NULL, a newline
# is appended at the end.}
# \item{collapse}{A @character string used for collapsing the captured
# rows. If @NULL, the rows are not collapsed.}
# \item{envir}{The @environment in which the expression is evaluated.}
# }
#
# \value{
# Returns a @character string class 'CapturedEvaluation'.
# }
#
# @examples "../incl/withCapture.Rex"
#
# @author
#
# \seealso{
# Internally, @see "base::eval" is used to evaluate the expression.
# }
#
# @keyword utilities
#*/###########################################################################
withCapture <- function(expr, substitute=getOption("withCapture/substitute", ".x."), code=TRUE, output=code, ..., max.deparse.length=getOption("max.deparse.length", 10e3), trim=TRUE, newline=getOption("withCapture/newline", TRUE), collapse="\n", envir=parent.frame()) {
# Get code/expression without evaluating it
expr2 <- substitute(expr);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Substitute?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# (a) Substitute by "constant" symbols?
if (is.list(substitute) && (length(substitute) > 0L)) {
names <- names(substitute);
if (is.null(names)) throw("Argument 'substitute' must be named.");
expr2 <- do.call(base::substitute, args=list(expr2, substitute))
}
# (b) Substitute code by regular expressions?
if (is.character(substitute) && (length(substitute) > 0L)) {
patterns <- names(substitute);
replacements <- substitute;
# Predefined rules?
if (is.null(patterns)) {
patterns <- rep(NA_character_, times=length(replacements));
for (kk in seq_along(replacements)) {
replacement <- replacements[kk];
if (identical(replacement, ".x.")) {
patterns[kk] <- "^[.]([a-zA-Z0-9_.]+)[.]$"
replacements[kk] <- "\\1";
} else if (identical(replacement, "..x..")) {
patterns[kk] <- "^[.][.]([a-zA-Z0-9_.]+)[.][.]$"
replacements[kk] <- "\\1";
}
}
unknown <- replacements[is.na(patterns)];
if (length(unknown) > 0L) {
throw("Unknown substitution rules: ", paste(sQuote(unknown), collapse=", "));
}
}
if (is.null(patterns)) throw("Argument 'substitute' must be named.");
# (b) Substitute via regular expression
for (kk in seq_along(replacements)) {
pattern <- patterns[kk];
replacement <- replacements[kk];
expr2 <- egsub(pattern, replacement, expr2, envir=envir);
}
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Deparse
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# WAS:
## sourceCode <- capture.output(print(expr2));
sourceCode <- deparse(expr2, width.cutoff=getOption("deparse.cutoff", 60L));
# Nothing todo?
if (length(sourceCode) == 0L) {
## Can this ever happen? /HB 2015-05-27
return(structure(character(0L), class=c("CapturedEvaluation", "character")));
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Trim code
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Trim of surrounding { ... }
if (sourceCode[1L] == "{") {
sourceCode <- sourceCode[-c(1L, length(sourceCode))];
# Nothing todo?
if (length(sourceCode) == 0L) {
return(structure(character(0L), class=c("CapturedEvaluation", "character")));
}
# Drop shortest white space prefix
prefix <- gsub("^([ \t]*).*", "\\1", sourceCode);
minPrefix <- min(nchar(prefix), na.rm=TRUE);
if (minPrefix > 0L) {
sourceCode <- substring(sourceCode, first=minPrefix+1);
}
# WORKAROUND: Put standalone 'else':s together with previous statement.
# This solves the problem described in R help thread "deparse() and the
# 'else' statement" by Yihui Xie on 2009-11-09
# [http://tolstoy.newcastle.edu.au/R/e8/help/09/11/4204.html], where
# deparse puts 'else' on a new line iff if-else statement is enclosed
# in an { ... } expression, e.g.
# cat(deparse(substitute({if (T) 1 else 2})), sep="\n") gives:
# {
# if (T)
# 1
# else 2
# }
# whereas deparse(substitute(if (T) 1 else 2)) gives:
# if (T) 1 else 2
# /HB 2014-08-12
idxs <- grep("^[ ]*else[ ]*", sourceCode);
if (length(idxs) > 0L) {
if (any(idxs == 1L)) {
stop("INTERNAL ERROR: Detected 'else' statement at the very beginning: ", paste(sourceCode, collapse="\n"));
}
sourceCode[idxs-1L] <- paste(sourceCode[idxs-1L], sourceCode[idxs], sep=" ");
sourceCode <- sourceCode[-idxs];
}
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Evalute code expression
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# WORKAROUND: The following will *not* evaluate in environment
# 'envir' due to capture.output() *unless* we evaluate 'envir'
# before. This sanity check will do that. /HB 2011-11-23
stopifnot(is.environment(envir));
# Evaluate the sourceCode via source()
con <- textConnection(sourceCode, open="r");
res <- captureOutput({
sourceTo(file=con, echo=code, print.eval=output, max.deparse.length=max.deparse.length, ..., envir=envir);
});
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Cleanup captured output?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Drop empty lines?
if (trim) {
res <- res[nchar(res) > 0L];
}
if (!is.null(collapse)) {
if (newline) res <- c(res, "");
res <- paste(res, collapse=collapse);
}
class(res) <- c("CapturedEvaluation", class(res));
res;
} # withCapture()
# BACKWARD COMPATIBIILTY
evalCapture <- withCapture
setMethodS3("print", "CapturedEvaluation", function(x, ...) {
cat(x);
})
##############################################################################
# HISTORY:
# 2014-12-02
# o withCapture({}) no longer generates a warning.
# 2014-08-12
# o BUG FIX: withCapture({ if (T) 1 else 2 }) would give a parse error on
# "unexpected 'else'", because the internal deparsing puts the 'else'
# statement on a new line whenever an if-else statement is enclosed
# in an { ... } expression. This problem is also described in R help
# thread "deparse() and the 'else' statement" by Yihui Xie on 2009-11-09
# [http://tolstoy.newcastle.edu.au/R/e8/help/09/11/4204.html]. The
# workaround is to detect standalone 'else' statements and merge them
# with the previous line.
# 2014-05-06
# o Added support for expression substitution via regular expressions.
# The default is now to substitute any '.x.' with gstring("${x}").
# 2014-05-01
# o Renamed evalCapture() to withCapture(). Old name kept for backward
# compatibility, but will eventually be deprecated.
# 2014-04-26
# o Added option "evalCapture/newline".
# 2014-04-24
# o Added argument 'newline' to evalCapture().
# 2014-04-22
# o Added argument 'substitute' to evalCapture() for substituting symbols
# "on the fly" in the expression before it is evaluated.
# 2014-04-09
# o Added argument 'max.deparse.length' to evalCapture().
# 2014-04-06
# o Now evalCapture() utilizes deparse() to get the source code and
# acknowledges options 'deparse.cutoff' to control the code wrapping.
# Previously capture.output(print()) was used.
# 2011-11-23
# o BUG FIX: evalCapture() with argument 'envir' defaulting to parent.frame()
# would not be evaluated in the parent frame as it should. It appears
# that the internal capture.output() prevents this from happening, unless
# argument 'envir' is explictly evaluated within evalCapture().
# 2011-11-05
# o Added evalCapture(..., code=TRUE, output=TRUE), which is adopted from
# evalWithEcho() in R.rsp v0.6.5.
#
# HISTORY of evalWithEcho() in R.rsp:
# 2011-03-28
# o Rewrote evalWithEcho() so that it utilizes source(..., echo=TRUE).
# o BUG FIX: evalWithEcho() would only add the prompt to the first line.
# 2011-03-15
# o Added evalWithEcho().
# o Created.
##############################################################################
|
/R.utils/R/withCapture.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 9,176 |
r
|
###########################################################################/**
# @RdocFunction withCapture
# @alias evalCapture
#
# @title "Evaluates an expression and captures the code and/or the output"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{expr}{The R expression to be evaluated.}
# \item{substitute}{An optional named @list used for substituting
# symbols with other strings.}
# \item{code}{If @TRUE, the deparsed code of the expression is echoed.}
# \item{output}{If @TRUE, the output of each evaluated subexpression
# is echoed.}
# \item{...}{Additional arguments passed to @see "R.utils::sourceTo"
# which in turn passes arguments to @see "base::source".}
# \item{max.deparse.length}{A positive @integer specifying the maximum
# length of a deparsed expression, before truncating it.}
# \item{trim}{If @TRUE, the captured rows are trimmed.}
# \item{newline}{If @TRUE and \code{collapse} is non-@NULL, a newline
# is appended at the end.}
# \item{collapse}{A @character string used for collapsing the captured
# rows. If @NULL, the rows are not collapsed.}
# \item{envir}{The @environment in which the expression is evaluated.}
# }
#
# \value{
# Returns a @character string class 'CapturedEvaluation'.
# }
#
# @examples "../incl/withCapture.Rex"
#
# @author
#
# \seealso{
# Internally, @see "base::eval" is used to evaluate the expression.
# }
#
# @keyword utilities
#*/###########################################################################
withCapture <- function(expr, substitute=getOption("withCapture/substitute", ".x."), code=TRUE, output=code, ..., max.deparse.length=getOption("max.deparse.length", 10e3), trim=TRUE, newline=getOption("withCapture/newline", TRUE), collapse="\n", envir=parent.frame()) {
# Get code/expression without evaluating it
expr2 <- substitute(expr);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Substitute?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# (a) Substitute by "constant" symbols?
if (is.list(substitute) && (length(substitute) > 0L)) {
names <- names(substitute);
if (is.null(names)) throw("Argument 'substitute' must be named.");
expr2 <- do.call(base::substitute, args=list(expr2, substitute))
}
# (b) Substitute code by regular expressions?
if (is.character(substitute) && (length(substitute) > 0L)) {
patterns <- names(substitute);
replacements <- substitute;
# Predefined rules?
if (is.null(patterns)) {
patterns <- rep(NA_character_, times=length(replacements));
for (kk in seq_along(replacements)) {
replacement <- replacements[kk];
if (identical(replacement, ".x.")) {
patterns[kk] <- "^[.]([a-zA-Z0-9_.]+)[.]$"
replacements[kk] <- "\\1";
} else if (identical(replacement, "..x..")) {
patterns[kk] <- "^[.][.]([a-zA-Z0-9_.]+)[.][.]$"
replacements[kk] <- "\\1";
}
}
unknown <- replacements[is.na(patterns)];
if (length(unknown) > 0L) {
throw("Unknown substitution rules: ", paste(sQuote(unknown), collapse=", "));
}
}
if (is.null(patterns)) throw("Argument 'substitute' must be named.");
# (b) Substitute via regular expression
for (kk in seq_along(replacements)) {
pattern <- patterns[kk];
replacement <- replacements[kk];
expr2 <- egsub(pattern, replacement, expr2, envir=envir);
}
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Deparse
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# WAS:
## sourceCode <- capture.output(print(expr2));
sourceCode <- deparse(expr2, width.cutoff=getOption("deparse.cutoff", 60L));
# Nothing todo?
if (length(sourceCode) == 0L) {
## Can this ever happen? /HB 2015-05-27
return(structure(character(0L), class=c("CapturedEvaluation", "character")));
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Trim code
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Trim of surrounding { ... }
if (sourceCode[1L] == "{") {
sourceCode <- sourceCode[-c(1L, length(sourceCode))];
# Nothing todo?
if (length(sourceCode) == 0L) {
return(structure(character(0L), class=c("CapturedEvaluation", "character")));
}
# Drop shortest white space prefix
prefix <- gsub("^([ \t]*).*", "\\1", sourceCode);
minPrefix <- min(nchar(prefix), na.rm=TRUE);
if (minPrefix > 0L) {
sourceCode <- substring(sourceCode, first=minPrefix+1);
}
# WORKAROUND: Put standalone 'else':s together with previous statement.
# This solves the problem described in R help thread "deparse() and the
# 'else' statement" by Yihui Xie on 2009-11-09
# [http://tolstoy.newcastle.edu.au/R/e8/help/09/11/4204.html], where
# deparse puts 'else' on a new line iff if-else statement is enclosed
# in an { ... } expression, e.g.
# cat(deparse(substitute({if (T) 1 else 2})), sep="\n") gives:
# {
# if (T)
# 1
# else 2
# }
# whereas deparse(substitute(if (T) 1 else 2)) gives:
# if (T) 1 else 2
# /HB 2014-08-12
idxs <- grep("^[ ]*else[ ]*", sourceCode);
if (length(idxs) > 0L) {
if (any(idxs == 1L)) {
stop("INTERNAL ERROR: Detected 'else' statement at the very beginning: ", paste(sourceCode, collapse="\n"));
}
sourceCode[idxs-1L] <- paste(sourceCode[idxs-1L], sourceCode[idxs], sep=" ");
sourceCode <- sourceCode[-idxs];
}
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Evalute code expression
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# WORKAROUND: The following will *not* evaluate in environment
# 'envir' due to capture.output() *unless* we evaluate 'envir'
# before. This sanity check will do that. /HB 2011-11-23
stopifnot(is.environment(envir));
# Evaluate the sourceCode via source()
con <- textConnection(sourceCode, open="r");
res <- captureOutput({
sourceTo(file=con, echo=code, print.eval=output, max.deparse.length=max.deparse.length, ..., envir=envir);
});
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Cleanup captured output?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Drop empty lines?
if (trim) {
res <- res[nchar(res) > 0L];
}
if (!is.null(collapse)) {
if (newline) res <- c(res, "");
res <- paste(res, collapse=collapse);
}
class(res) <- c("CapturedEvaluation", class(res));
res;
} # withCapture()
# BACKWARD COMPATIBIILTY
evalCapture <- withCapture
setMethodS3("print", "CapturedEvaluation", function(x, ...) {
cat(x);
})
##############################################################################
# HISTORY:
# 2014-12-02
# o withCapture({}) no longer generates a warning.
# 2014-08-12
# o BUG FIX: withCapture({ if (T) 1 else 2 }) would give a parse error on
# "unexpected 'else'", because the internal deparsing puts the 'else'
# statement on a new line whenever an if-else statement is enclosed
# in an { ... } expression. This problem is also described in R help
# thread "deparse() and the 'else' statement" by Yihui Xie on 2009-11-09
# [http://tolstoy.newcastle.edu.au/R/e8/help/09/11/4204.html]. The
# workaround is to detect standalone 'else' statements and merge them
# with the previous line.
# 2014-05-06
# o Added support for expression substitution via regular expressions.
# The default is now to substitute any '.x.' with gstring("${x}").
# 2014-05-01
# o Renamed evalCapture() to withCapture(). Old name kept for backward
# compatibility, but will eventually be deprecated.
# 2014-04-26
# o Added option "evalCapture/newline".
# 2014-04-24
# o Added argument 'newline' to evalCapture().
# 2014-04-22
# o Added argument 'substitute' to evalCapture() for substituting symbols
# "on the fly" in the expression before it is evaluated.
# 2014-04-09
# o Added argument 'max.deparse.length' to evalCapture().
# 2014-04-06
# o Now evalCapture() utilizes deparse() to get the source code and
# acknowledges options 'deparse.cutoff' to control the code wrapping.
# Previously capture.output(print()) was used.
# 2011-11-23
# o BUG FIX: evalCapture() with argument 'envir' defaulting to parent.frame()
# would not be evaluated in the parent frame as it should. It appears
# that the internal capture.output() prevents this from happening, unless
# argument 'envir' is explictly evaluated within evalCapture().
# 2011-11-05
# o Added evalCapture(..., code=TRUE, output=TRUE), which is adopted from
# evalWithEcho() in R.rsp v0.6.5.
#
# HISTORY of evalWithEcho() in R.rsp:
# 2011-03-28
# o Rewrote evalWithEcho() so that it utilizes source(..., echo=TRUE).
# o BUG FIX: evalWithEcho() would only add the prompt to the first line.
# 2011-03-15
# o Added evalWithEcho().
# o Created.
##############################################################################
|
pairs(NOISE ~., filter)
# Plot data
par(mfrow = c(1,2))
boxplot(NOISE ~ size, df, xlab = "Gender", ylab = "Distance", col = c(2,3))
boxplot(Distance ~ Grade, df, xlab = "Grade", ylab = "Distance")
boxplot(Distance ~ Gender, df, xlab = "Gender", ylab = "Distance", col = c(2,3))
plot(as.numeric(df$Gender), df$Distance, col = as.numeric(df$Gender)+1,
pch = as.numeric(df$Grade), xlab = "Gender", ylab = "Distance")
legend("center", legend = c("Boys 7th", "Boys 8th", "Girls 7th", "Girls 8th"), col = c(2,2,3,3), pch = c(1,1,2,2))
|
/Exercises_AppliedStatisticsCourse/fertilizer.R
|
no_license
|
rebekabato/DTU-archive
|
R
| false | false | 536 |
r
|
pairs(NOISE ~., filter)
# Plot data
par(mfrow = c(1,2))
boxplot(NOISE ~ size, df, xlab = "Gender", ylab = "Distance", col = c(2,3))
boxplot(Distance ~ Grade, df, xlab = "Grade", ylab = "Distance")
boxplot(Distance ~ Gender, df, xlab = "Gender", ylab = "Distance", col = c(2,3))
plot(as.numeric(df$Gender), df$Distance, col = as.numeric(df$Gender)+1,
pch = as.numeric(df$Grade), xlab = "Gender", ylab = "Distance")
legend("center", legend = c("Boys 7th", "Boys 8th", "Girls 7th", "Girls 8th"), col = c(2,2,3,3), pch = c(1,1,2,2))
|
\name{mUnits}
\alias{mUnits}
\title{Metric system}
\description{This function control metric units.}
\usage{mUnits(x, from = "mm", to = "mm")}
\arguments{
\item{x}{\code{numeric} vector.}
\item{from}{\code{character}. Initial metric unit.}
\item{to}{\code{character}. Final metric unit.}
}
\details{Characters in \code{from} and \code{to}
arguments have the form 'p_', where 'p' is the
metric prefix and '_' is a base unit. Sixteen
metric prefixes are supported: atto 'a', femto
'f', pico 'p', nano 'n', micro 'mm', mili 'm',
centi 'c', deci 'd', deca 'da', hecto 'h', kilo
'k', mega 'M', giga 'G', tera 'T', peta 'P', and
exa 'E'.}
\value{\code{numeric} vector.}
\author{Wilson Lara <wilarhen@gmail.com>, Felipe Bravo <fbravo@pvs.uva.es>}
\examples{
## Simulation of TRW data
set.seed(1)
w <- abs(rnorm(12,1,1))
trw <- ts(w,start = 1970)
## transforming metric units of trw vector from milimeters to meters
sr <- mUnits(trw, from = 'mm', to = 'm')
attributes(sr)
}
|
/man/mUnits.Rd
|
no_license
|
cran/BIOdry
|
R
| false | false | 1,012 |
rd
|
\name{mUnits}
\alias{mUnits}
\title{Metric system}
\description{This function control metric units.}
\usage{mUnits(x, from = "mm", to = "mm")}
\arguments{
\item{x}{\code{numeric} vector.}
\item{from}{\code{character}. Initial metric unit.}
\item{to}{\code{character}. Final metric unit.}
}
\details{Characters in \code{from} and \code{to}
arguments have the form 'p_', where 'p' is the
metric prefix and '_' is a base unit. Sixteen
metric prefixes are supported: atto 'a', femto
'f', pico 'p', nano 'n', micro 'mm', mili 'm',
centi 'c', deci 'd', deca 'da', hecto 'h', kilo
'k', mega 'M', giga 'G', tera 'T', peta 'P', and
exa 'E'.}
\value{\code{numeric} vector.}
\author{Wilson Lara <wilarhen@gmail.com>, Felipe Bravo <fbravo@pvs.uva.es>}
\examples{
## Simulation of TRW data
set.seed(1)
w <- abs(rnorm(12,1,1))
trw <- ts(w,start = 1970)
## transforming metric units of trw vector from milimeters to meters
sr <- mUnits(trw, from = 'mm', to = 'm')
attributes(sr)
}
|
# Obtenemos el directorio de trabajo actual
currentWD <- getwd()
# Obtenemos el directorio del proyecto
dirProyect <- dirname(rstudioapi::getSourceEditorContext()$path)
# Verificamos si estamos en el mismo directorio del proyecto
if (dirProyect != currentWD)
{
setwd(dirProyect) # Ponemos el directorio del proyecto
}
# Obtenemos el directorio donde se guardaran los archivos
dirDataFrame <- paste(dirProyect, "/data-postwork3", sep = "")
# Ponemos el directorio de trabajo en la carpeta donde se guardaran nuestro archivos
setwd(dirDataFrame)
# URL de los datos
u1718 <- "https://www.football-data.co.uk/mmz4281/1718/SP1.csv"
u1819 <- "https://www.football-data.co.uk/mmz4281/1819/SP1.csv"
u1920 <- "https://www.football-data.co.uk/mmz4281/1920/SP1.csv"
# Obtenemos los datos y los almacenamos en los data frames.
download.file(url = u1718, destfile = "sp1-esp-1718.csv", mode = "wb")
download.file(url = u1819, destfile = "sp1-esp-1819.csv", mode = "wb")
download.file(url = u1920, destfile = "sp1-esp-1920.csv", mode = "wb")
# Revisamos los archivos en el directorio
dir()
# Cargamos los archivos en una lista
files.sp1.esp <- lapply(dir(), read.csv)
# Obtenemos sus caracteristicas
# str nos sirve para conocer el tipo y el nombre de cada todo dentro de nuestro dataframe
str(files.sp1.esp[1])
# head nos trae los primeros 6 elementos dentro nuestro data frame.
head(files.sp1.esp[1])
# View nos muestra los datos en formato de tabla para visualizar el contenido
View(files.sp1.esp[1])
# summary nos data un resumen de cada columna de nuestro data frame.
summary(files.sp1.esp[1])
# Invocamos nuestra biblioteca dplyr
library(dplyr)
# Seleccionamos nuestras 6 variables de interes.
files.sp1.esp <- lapply(files.sp1.esp, select, Date, HomeTeam, AwayTeam, FTHG, FTAG, FTR)
str(files.sp1.esp) # Verificamos que nuestra salida sea la correcta
# Cambiamos el tipo de nuestras variables de cada archivo en la lista
files.sp1.esp <- lapply(files.sp1.esp, mutate, Date = as.Date(Date, "%Y-%m-%d"))
str(files.sp1.esp) # Verificamos que nuestra salida sea correcta
# Combinamos nuestros documentos en un solo data frame.
sp1.esp.1720 <- do.call(rbind, files.sp1.esp)
# Ordenamos nuestro dataframe de forma creciente.
sp1.esp.1720 <- sp1.esp.1720[order(sp1.esp.1720$Date), ]
str(sp1.esp.1720) # Verificamos nuestra salida
dim(sp1.esp.1720) # Obtenemos las dimensiones
head(sp1.esp.1720) # Vemos los primeros 6 elementos.
tail(sp1.esp.1720) # Vemos los ultimos 6 elementos.
# Guardamos nuestro nuestro nuevo data frame como csv.
write.csv(sp1.esp.1720, "sp1-esp-1720.csv", row.names = F)
|
/Sesion 2/postwork/postwork-s2.R
|
no_license
|
Deltarios/bedu-postwork-e2p1
|
R
| false | false | 2,603 |
r
|
# Obtenemos el directorio de trabajo actual
currentWD <- getwd()
# Obtenemos el directorio del proyecto
dirProyect <- dirname(rstudioapi::getSourceEditorContext()$path)
# Verificamos si estamos en el mismo directorio del proyecto
if (dirProyect != currentWD)
{
setwd(dirProyect) # Ponemos el directorio del proyecto
}
# Obtenemos el directorio donde se guardaran los archivos
dirDataFrame <- paste(dirProyect, "/data-postwork3", sep = "")
# Ponemos el directorio de trabajo en la carpeta donde se guardaran nuestro archivos
setwd(dirDataFrame)
# URL de los datos
u1718 <- "https://www.football-data.co.uk/mmz4281/1718/SP1.csv"
u1819 <- "https://www.football-data.co.uk/mmz4281/1819/SP1.csv"
u1920 <- "https://www.football-data.co.uk/mmz4281/1920/SP1.csv"
# Obtenemos los datos y los almacenamos en los data frames.
download.file(url = u1718, destfile = "sp1-esp-1718.csv", mode = "wb")
download.file(url = u1819, destfile = "sp1-esp-1819.csv", mode = "wb")
download.file(url = u1920, destfile = "sp1-esp-1920.csv", mode = "wb")
# Revisamos los archivos en el directorio
dir()
# Cargamos los archivos en una lista
files.sp1.esp <- lapply(dir(), read.csv)
# Obtenemos sus caracteristicas
# str nos sirve para conocer el tipo y el nombre de cada todo dentro de nuestro dataframe
str(files.sp1.esp[1])
# head nos trae los primeros 6 elementos dentro nuestro data frame.
head(files.sp1.esp[1])
# View nos muestra los datos en formato de tabla para visualizar el contenido
View(files.sp1.esp[1])
# summary nos data un resumen de cada columna de nuestro data frame.
summary(files.sp1.esp[1])
# Invocamos nuestra biblioteca dplyr
library(dplyr)
# Seleccionamos nuestras 6 variables de interes.
files.sp1.esp <- lapply(files.sp1.esp, select, Date, HomeTeam, AwayTeam, FTHG, FTAG, FTR)
str(files.sp1.esp) # Verificamos que nuestra salida sea la correcta
# Cambiamos el tipo de nuestras variables de cada archivo en la lista
files.sp1.esp <- lapply(files.sp1.esp, mutate, Date = as.Date(Date, "%Y-%m-%d"))
str(files.sp1.esp) # Verificamos que nuestra salida sea correcta
# Combinamos nuestros documentos en un solo data frame.
sp1.esp.1720 <- do.call(rbind, files.sp1.esp)
# Ordenamos nuestro dataframe de forma creciente.
sp1.esp.1720 <- sp1.esp.1720[order(sp1.esp.1720$Date), ]
str(sp1.esp.1720) # Verificamos nuestra salida
dim(sp1.esp.1720) # Obtenemos las dimensiones
head(sp1.esp.1720) # Vemos los primeros 6 elementos.
tail(sp1.esp.1720) # Vemos los ultimos 6 elementos.
# Guardamos nuestro nuestro nuevo data frame como csv.
write.csv(sp1.esp.1720, "sp1-esp-1720.csv", row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/customvision_imgs.R
\name{browse_images}
\alias{browse_images}
\title{View images uploaded to a Custom Vision project}
\usage{
browse_images(project, img_ids, which = c("resized", "original",
"thumbnail"), max_images = 20, iteration = NULL)
}
\arguments{
\item{project}{A Custom Vision project.}
\item{img_ids}{The IDs of the images to view. You can use \code{\link{list_images}} to get the image IDs for this project.}
\item{which}{Which image to view: the resized version used for training (the default), the original uploaded image, or the thumbnail.}
\item{max_images}{The maximum number of images to display.}
\item{iteration}{The iteration ID (roughly, which model generation to use). Defaults to the latest iteration.}
}
\description{
View images uploaded to a Custom Vision project
}
\details{
Images in a Custom Vision project are stored in Azure Storage. This function gets the URLs for the uploaded images and displays them in your browser.
}
\seealso{
\code{\link{list_images}}
}
|
/man/browse_images.Rd
|
permissive
|
isabella232/AzureVision
|
R
| false | true | 1,076 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/customvision_imgs.R
\name{browse_images}
\alias{browse_images}
\title{View images uploaded to a Custom Vision project}
\usage{
browse_images(project, img_ids, which = c("resized", "original",
"thumbnail"), max_images = 20, iteration = NULL)
}
\arguments{
\item{project}{A Custom Vision project.}
\item{img_ids}{The IDs of the images to view. You can use \code{\link{list_images}} to get the image IDs for this project.}
\item{which}{Which image to view: the resized version used for training (the default), the original uploaded image, or the thumbnail.}
\item{max_images}{The maximum number of images to display.}
\item{iteration}{The iteration ID (roughly, which model generation to use). Defaults to the latest iteration.}
}
\description{
View images uploaded to a Custom Vision project
}
\details{
Images in a Custom Vision project are stored in Azure Storage. This function gets the URLs for the uploaded images and displays them in your browser.
}
\seealso{
\code{\link{list_images}}
}
|
vwc.data.50cm <- read.csv('C:/Users/smdevine/Desktop/rangeland project/soilmoisture/all.sites.50cmVWC.csv', stringsAsFactors = FALSE)
lapply(vwc.data.50cm, class)
time.cols <- grepl('time', colnames(vwc.data.50cm))
vwc.cols <- grepl('VWC', colnames(vwc.data.50cm))
vwc.data.50cm[,time.cols] <- lapply(vwc.data.50cm[,time.cols], strptime, format="%m/%d/%Y %H:%M")
head(vwc.data.50cm$Adelaida.time)
lapply(vwc.data.50cm[,vwc.cols], summary)
vwc.data.50cm[,vwc.cols] <- lapply(vwc.data.50cm[,vwc.cols], function(x) ifelse(x < 0, NA, x))
lapply(vwc.data.50cm[,vwc.cols], summary)
plot(vwc.data.50cm$Adelaida.time, vwc.data.50cm$Adelaida.VWC.50cm) #full wet-up almost every winter
plot(vwc.data.50cm$Bitterwater.time, vwc.data.50cm$Bitterwater.VWC.50cm) #no wet-up until 2017
plot(vwc.data.50cm$Cambria.time, vwc.data.50cm$Cambria.VWC.50cm) #no wet-up until WY2016
plot(vwc.data.50cm$MorroBayN.time, vwc.data.50cm$MorroBayN.VWC.50cm) #may be erroneous during summer/fall 2016; wet-ups in 2015, 2016, and 2017
plot(vwc.data.50cm$Shandon.time, vwc.data.50cm$Shandon.VWC.50cm) #no wet-up until 2017
plot(vwc.data.50cm$SodaLake.time, vwc.data.50cm$SodaLake.VWC.50cm) #no wet-up until 2017
sum(vwc.data.50cm$Cambria.VWC.50cm < 0.05, na.rm = TRUE) #150
sum(vwc.data.50cm$Cambria.VWC.50cm < 0.1, na.rm = TRUE) #150
sum(vwc.data.50cm$Cambria.VWC.50cm < 0.13, na.rm = TRUE) #150
sum(vwc.data.50cm$Cambria.VWC.50cm < 0.15, na.rm = TRUE) #3010
#so, change less than
vwc.data.50cm$Cambria.time[which(vwc.data.50cm$Cambria.VWC.50cm < 0.05)] #2/16 to 2/26 in 2014 and 5/7 to 5/8 in 2015
vwc.data.50cm$Cambria.VWC.50cm[5000:5300]
vwc.data.50cm$Cambria.VWC.50cm[4500:5000]
vwc.data.50cm$Cambria.VWC.50cm[5300:6000]#clearly there were problems
sum(vwc.data.50cm$MorroBayN.VWC.50cm < 0.05, na.rm = TRUE) #284
hist(vwc.data.50cm$MorroBayN.VWC.50cm)
which(vwc.data.50cm$MorroBayN.VWC.50cm < 0.05)
vwc.data.50cm$MorroBayN.time[18000:18500]
vwc.data.50cm$MorroBayN.VWC.50cm[18000:18500]
plot(vwc.data.50cm$MorroBayN.time, vwc.data.50cm$MorroBayN.VWC.50cm)
soil_moisture_dfs[[i]]$date <- format(soil_moisture_dfs[[i]]$Measurement.Time, "%Y%m%d")
|
/analysis_dissertation/50cm.VWC.allsites.R
|
no_license
|
smdevine/RangelandTheta
|
R
| false | false | 2,122 |
r
|
vwc.data.50cm <- read.csv('C:/Users/smdevine/Desktop/rangeland project/soilmoisture/all.sites.50cmVWC.csv', stringsAsFactors = FALSE)
lapply(vwc.data.50cm, class)
time.cols <- grepl('time', colnames(vwc.data.50cm))
vwc.cols <- grepl('VWC', colnames(vwc.data.50cm))
vwc.data.50cm[,time.cols] <- lapply(vwc.data.50cm[,time.cols], strptime, format="%m/%d/%Y %H:%M")
head(vwc.data.50cm$Adelaida.time)
lapply(vwc.data.50cm[,vwc.cols], summary)
vwc.data.50cm[,vwc.cols] <- lapply(vwc.data.50cm[,vwc.cols], function(x) ifelse(x < 0, NA, x))
lapply(vwc.data.50cm[,vwc.cols], summary)
plot(vwc.data.50cm$Adelaida.time, vwc.data.50cm$Adelaida.VWC.50cm) #full wet-up almost every winter
plot(vwc.data.50cm$Bitterwater.time, vwc.data.50cm$Bitterwater.VWC.50cm) #no wet-up until 2017
plot(vwc.data.50cm$Cambria.time, vwc.data.50cm$Cambria.VWC.50cm) #no wet-up until WY2016
plot(vwc.data.50cm$MorroBayN.time, vwc.data.50cm$MorroBayN.VWC.50cm) #may be erroneous during summer/fall 2016; wet-ups in 2015, 2016, and 2017
plot(vwc.data.50cm$Shandon.time, vwc.data.50cm$Shandon.VWC.50cm) #no wet-up until 2017
plot(vwc.data.50cm$SodaLake.time, vwc.data.50cm$SodaLake.VWC.50cm) #no wet-up until 2017
sum(vwc.data.50cm$Cambria.VWC.50cm < 0.05, na.rm = TRUE) #150
sum(vwc.data.50cm$Cambria.VWC.50cm < 0.1, na.rm = TRUE) #150
sum(vwc.data.50cm$Cambria.VWC.50cm < 0.13, na.rm = TRUE) #150
sum(vwc.data.50cm$Cambria.VWC.50cm < 0.15, na.rm = TRUE) #3010
#so, change less than
vwc.data.50cm$Cambria.time[which(vwc.data.50cm$Cambria.VWC.50cm < 0.05)] #2/16 to 2/26 in 2014 and 5/7 to 5/8 in 2015
vwc.data.50cm$Cambria.VWC.50cm[5000:5300]
vwc.data.50cm$Cambria.VWC.50cm[4500:5000]
vwc.data.50cm$Cambria.VWC.50cm[5300:6000]#clearly there were problems
sum(vwc.data.50cm$MorroBayN.VWC.50cm < 0.05, na.rm = TRUE) #284
hist(vwc.data.50cm$MorroBayN.VWC.50cm)
which(vwc.data.50cm$MorroBayN.VWC.50cm < 0.05)
vwc.data.50cm$MorroBayN.time[18000:18500]
vwc.data.50cm$MorroBayN.VWC.50cm[18000:18500]
plot(vwc.data.50cm$MorroBayN.time, vwc.data.50cm$MorroBayN.VWC.50cm)
soil_moisture_dfs[[i]]$date <- format(soil_moisture_dfs[[i]]$Measurement.Time, "%Y%m%d")
|
fileUrl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD"
if(!file.exists("data")){
dir.create("data")
}
download.file(fileUrl, destfile="./data/cameras.csv", method="curl")
dateDownloaded <- date()
save(dateDownloaded, file="./data/downloadDate.rda")
|
/Workshop1/downloadData.R
|
no_license
|
lindsayrutter/SISBID
|
R
| false | false | 290 |
r
|
fileUrl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD"
if(!file.exists("data")){
dir.create("data")
}
download.file(fileUrl, destfile="./data/cameras.csv", method="curl")
dateDownloaded <- date()
save(dateDownloaded, file="./data/downloadDate.rda")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/processData.R
\name{correlationOmics}
\alias{correlationOmics}
\title{Correlation beetween omics data}
\usage{
correlationOmics(dataOmics1, dataOmics2)
}
\arguments{
\item{dataOmics1}{}
\item{dataOmics2}{}
}
\value{
}
\description{
Get correlation of genes between different omics data
}
|
/man/correlationOmics.Rd
|
no_license
|
miccec/CPTACBiolinks
|
R
| false | true | 368 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/processData.R
\name{correlationOmics}
\alias{correlationOmics}
\title{Correlation beetween omics data}
\usage{
correlationOmics(dataOmics1, dataOmics2)
}
\arguments{
\item{dataOmics1}{}
\item{dataOmics2}{}
}
\value{
}
\description{
Get correlation of genes between different omics data
}
|
marfissci.simple.map<-function(rds,
agg.by = "SPECIES_CODE",
colour.by = "SUM_RND_WEIGHT_KGS",
crs.out="+proj=utm +zone=20 +datum=WGS84",
xlim=c(-68,-53),
ylim=c(40,49),
valid.only = T,
show.legend = T,
save.plot = T,
plot.title="",
name.det = NULL,
nclasses=5,
add.OCMD = c("St_Ann","Gully","Vazella_Emerald","Vazella_Sambro","Lophelia", "NE_Channel")
){
proj.metric = '+proj=aea +lat_1=20 +lat_2=60 +lat_0=23 +lon_0=-96
+x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m'
library(classInt)
library(rgdal)
limits = data.frame(X = xlim, Y = ylim)
coordinates(limits) = c("X", "Y")
proj4string(limits) = CRS("+proj=longlat +datum=WGS84")
boundbox = SpatialPolygons(list(Polygons(list(Polygon(cbind(
mm= c(limits$X[2],
seq(limits$X[2],limits$X[1],length=200),
seq(limits$X[1],limits$X[2],length=200)),
nn = c(limits$Y[2],
seq(limits$Y[1],limits$Y[1],length=200),
seq(limits$Y[2],limits$Y[2],length=200))))),
ID = "bb")),
proj4str = CRS("+proj=longlat +datum=WGS84"))
boundbox.pr = spTransform(boundbox,crs.out)
#make slightly bigger bbox to reserve space in final
boundbox2 = SpatialPolygons(list(Polygons(list(Polygon(cbind(
mm= c(limits$X[2]+1,
seq(limits$X[2]+1,limits$X[1]-1,length=200),
seq(limits$X[1]-1,limits$X[2]+1,length=200)),
nn = c(limits$Y[2]+1,
seq(limits$Y[1]-1,limits$Y[1]-1,length=200),
seq(limits$Y[2]+1,limits$Y[2]+1,length=200))))),
ID = "bb2")),
proj4str = CRS("+proj=longlat +datum=WGS84"))
boundbox2.pr = spTransform(boundbox2,crs.out)
#should clip the data to desired frame here so that classIntervals only works
#on visible data!
#Do nrow check AFTER that
rds.clipped = rds[boundbox,] #clip data to bbox
if (valid.only) rds.clipped = rds.clipped[rds.clipped@data$VALIDITY == 'VALID',]
ncheck=length(unique(rds.clipped@data[,c(colour.by)])) #don't have enough data for requested number of classes
if (ncheck<2) return(NULL) #can't classify on a single value
if (nclasses>ncheck) nclasses=ncheck
rds.clipped@data$ORD = seq.int(nrow(rds.clipped))
classes = classIntervals(rds.clipped@data[,c(colour.by)], n=nclasses, style= "quantile", dataPrecision=0)
colcode = findColours(classes,
c("#c7e9b4","#41b6c4","#225ea8","#081d58")) #colorblind-friendly yellow-blue
#c("#deebf7", "#9ecae1","#3182bd")) #colorblind-friendly blues
#c("#fee6ce","#fdae6b","#e6550d")) #colorblind-friendly oranges
colour.df = as.data.frame(cbind(varname=classes$var,colcode))
names(colour.df)[names(colour.df)=="varname"] <- colour.by
rds.clipped@data = merge( rds.clipped@data,unique(colour.df), all.x = T)
#rds.clipped@data = rds.clipped@data[order(rds.clipped@data$ORD),]
rds.clipped.pr = spTransform(rds.clipped, CRS(crs.out), match.ID=F)
rds.clipped.pr@data = rds.clipped.pr@data[order(rds.clipped.pr@data$ORD),]
if (!exists("coast.aea") ||
!exists("coast.clipped.aea") ||
!exists("coast.clipped.pr") ||
!exists("the.grid") ||
!exists("grid.pr") ||
!exists("these.gridlines") ||
!exists("these.gridlines.pr") )
{
loadfunctions("coastline")
writeLines("Building the coastline...")
coast.aea <<- coastline.db( DS="gshhg coastline highres",
crs=proj.metric,
p=NULL, level=1, xlim=NULL, ylim=NULL )
library(rgeos)
writeLines("Trimming the data to match the selected bounding box (so that data can be projected)")
coast.clipped.aea <<- gIntersection(gBuffer(coast.aea, byid=TRUE, width=0), spTransform(boundbox,proj.metric))
coast.clipped.pr <<- spTransform(coast.clipped.aea,crs.out)
#'using the clipped data (pre-projection), capture information for the grid,
#'including information about the gridlines, as well as their labels
the.grid <<- gridat(boundbox, easts=seq(boundbox@"bbox"[1],boundbox@"bbox"[3],by=2),
norths=seq(boundbox@"bbox"[2],boundbox@"bbox"[4],by=2))
grid.pr <<- spTransform(the.grid, CRS(crs.out))
these.gridlines <<- gridlines(boundbox, easts=seq(boundbox@"bbox"[1],boundbox@"bbox"[3],by=1),
norths=seq(boundbox@"bbox"[2],boundbox@"bbox"[4],by=1))
these.gridlines.pr <<- spTransform(these.gridlines, CRS(crs.out))
}
if (!exists("clip.100") ||
!exists("clip.200") ||
!exists("clip.300") ||
!exists("clip.400") ||
!exists("clip.500") ||
!exists("clip.600") ||
!exists("clip.700") ||
!exists("clip.800") ||
!exists("clip.900"))
{
#bathy data
writeLines("Generating contours")
p = list( project.name = "bathymetry" )
p$project.root = project.datadirectory( p$project.name )
p$init.files = loadfunctions( c( "spacetime", "utility", "parallel", "bathymetry", "polygons" ) )
p$libs = RLibrary( "rgdal", "maps", "mapdata", "maptools", "lattice", "geosphere", "sp", "raster", "colorspace" )
p = spatial.parameters( type="canada.east.highres", p=p )
depths = c(100, 200, 300, 400, 500, 600, 700, 800, 900) #, 2000, 5000 )
plygn = isobath.db( p=p, DS="isobath", depths=depths )
#data must be clipped so it doesn't extend beyond the bounding box
clip.100 <<- gIntersection(spTransform(plygn["100"], CRS(crs.out)), boundbox.pr)
clip.200 <<- gIntersection(spTransform(plygn["200"], CRS(crs.out)), boundbox.pr)
clip.300 <<- gIntersection(spTransform(plygn["300"], CRS(crs.out)), boundbox.pr)
clip.400 <<- gIntersection(spTransform(plygn["400"], CRS(crs.out)), boundbox.pr)
clip.500 <<- gIntersection(spTransform(plygn["500"], CRS(crs.out)), boundbox.pr)
clip.600 <<- gIntersection(spTransform(plygn["600"], CRS(crs.out)), boundbox.pr)
clip.700 <<- gIntersection(spTransform(plygn["700"], CRS(crs.out)), boundbox.pr)
clip.800 <<- gIntersection(spTransform(plygn["800"], CRS(crs.out)), boundbox.pr)
clip.900 <<- gIntersection(spTransform(plygn["900"], CRS(crs.out)), boundbox.pr)
#clip.1000 <<- gIntersection(spTransform(plygn["1000"], CRS(crs.out)), boundbox.pr)
}
#get the desired OCMD areas
if (length(add.OCMD)>0) OCMD.areas=get.ocmd.areas(add.OCMD)
if (save.plot){
if (!is.null(name.det)){
name.detail=paste0(name.det,"_")
}else{
name.detail=""
}
if (range(rds.clipped.pr@data[agg.by])[1] == range(rds.clipped.pr@data[agg.by])[2]) {
the.filename = range(rds.clipped.pr@data[agg.by])[1]
}else{
the.filename = paste(range(rds.clipped.pr@data[agg.by]),collapse = "_")
}
# plot.title.clean=gsub("(\\(|\\)|\\s|\\/|,)","_",plot.title)
# plot.title.clean=gsub("__","_", plot.title.clean)
# plot.title.clean=substr(plot.title.clean,1,15)
# plot.title.clean=paste0(sub('_$', '', plot.title.clean),"_")
agg.type=paste0(substr(agg.by, 1, 4),"_")
the.filename=paste0(name.detail,agg.type,the.filename,".png")
png(filename=paste0(project.datadirectory("mpa"),"/figures/",the.filename),
width = 6, height = 4, units = "in", res= 300, pointsize = 4,
bg = "white", family = "", restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"))
}
par(mar=c(2,2,1,1),xaxs = "i",yaxs = "i",cex.axis=1.3,cex.lab=1.4)
plot(boundbox2.pr, border="transparent", add=F, lwd=1) #add transparent boundbox first to ensure all data shown
plot(coast.clipped.pr, col="navajowhite2", border="navajowhite4", lwd=0.5, axes=F, add=T ) #add coastline
# lines(clip.1000, col="#666666", lwd=0.5)
lines(clip.900, col="#717171", lwd=0.5)
lines(clip.800, col="#7C7C7C", lwd=0.5)
lines(clip.700, col="#888888", lwd=0.5)
lines(clip.600, col="#939393", lwd=0.5)
lines(clip.500, col="#9E9E9E", lwd=0.5)
lines(clip.400, col="#AAAAAA", lwd=0.5)
lines(clip.300, col="#B5B5B5", lwd=0.5)
lines(clip.200, col="#C0C0C0", lwd=0.5)
lines(clip.100, col="#CCCCCC", lwd=0.5)
for (o in 1:length(OCMD.areas)){
plot(spTransform(OCMD.areas[[o]], CRS(crs.out)), border="olivedrab4", lwd=0.5, add=T)
}
plot(these.gridlines.pr, col="grey77", lty=2, lwd=0.5, add=T) #gridlines
points(rds.clipped.pr, col = rds.clipped.pr@data$colcode, pch = 15, cex = 0.5)
text(coordinates(grid.pr), pos=grid.pr$pos, labels=parse(text=as.character(the.grid$labels)),
offset=0.2, col="black", cex=1)
plot(boundbox.pr, border="black", add=T, lwd=1) #add actual boundbox
if (show.legend){
legend(title = plot.title ,min(boundbox.pr@bbox[1,])+(0.075*(max(boundbox.pr@bbox[1,])-min(boundbox.pr@bbox[1,]))),
min(boundbox.pr@bbox[2,])+(0.95*(max(boundbox.pr@bbox[2,])-min(boundbox.pr@bbox[2,]))),
cex=1, y.intersp=0.8,
legend = c(gsub(",","-",names(attr(colcode, "table"))),"no data"),
fill = c(attr(colcode, "palette"),"white"))
}
if (save.plot) dev.off()
}
|
/mpa/src/_Rfunctions/marfissci.simple.map.r
|
no_license
|
surfcao/ecomod
|
R
| false | false | 9,209 |
r
|
marfissci.simple.map<-function(rds,
agg.by = "SPECIES_CODE",
colour.by = "SUM_RND_WEIGHT_KGS",
crs.out="+proj=utm +zone=20 +datum=WGS84",
xlim=c(-68,-53),
ylim=c(40,49),
valid.only = T,
show.legend = T,
save.plot = T,
plot.title="",
name.det = NULL,
nclasses=5,
add.OCMD = c("St_Ann","Gully","Vazella_Emerald","Vazella_Sambro","Lophelia", "NE_Channel")
){
proj.metric = '+proj=aea +lat_1=20 +lat_2=60 +lat_0=23 +lon_0=-96
+x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m'
library(classInt)
library(rgdal)
limits = data.frame(X = xlim, Y = ylim)
coordinates(limits) = c("X", "Y")
proj4string(limits) = CRS("+proj=longlat +datum=WGS84")
boundbox = SpatialPolygons(list(Polygons(list(Polygon(cbind(
mm= c(limits$X[2],
seq(limits$X[2],limits$X[1],length=200),
seq(limits$X[1],limits$X[2],length=200)),
nn = c(limits$Y[2],
seq(limits$Y[1],limits$Y[1],length=200),
seq(limits$Y[2],limits$Y[2],length=200))))),
ID = "bb")),
proj4str = CRS("+proj=longlat +datum=WGS84"))
boundbox.pr = spTransform(boundbox,crs.out)
#make slightly bigger bbox to reserve space in final
boundbox2 = SpatialPolygons(list(Polygons(list(Polygon(cbind(
mm= c(limits$X[2]+1,
seq(limits$X[2]+1,limits$X[1]-1,length=200),
seq(limits$X[1]-1,limits$X[2]+1,length=200)),
nn = c(limits$Y[2]+1,
seq(limits$Y[1]-1,limits$Y[1]-1,length=200),
seq(limits$Y[2]+1,limits$Y[2]+1,length=200))))),
ID = "bb2")),
proj4str = CRS("+proj=longlat +datum=WGS84"))
boundbox2.pr = spTransform(boundbox2,crs.out)
#should clip the data to desired frame here so that classIntervals only works
#on visible data!
#Do nrow check AFTER that
rds.clipped = rds[boundbox,] #clip data to bbox
if (valid.only) rds.clipped = rds.clipped[rds.clipped@data$VALIDITY == 'VALID',]
ncheck=length(unique(rds.clipped@data[,c(colour.by)])) #don't have enough data for requested number of classes
if (ncheck<2) return(NULL) #can't classify on a single value
if (nclasses>ncheck) nclasses=ncheck
rds.clipped@data$ORD = seq.int(nrow(rds.clipped))
classes = classIntervals(rds.clipped@data[,c(colour.by)], n=nclasses, style= "quantile", dataPrecision=0)
colcode = findColours(classes,
c("#c7e9b4","#41b6c4","#225ea8","#081d58")) #colorblind-friendly yellow-blue
#c("#deebf7", "#9ecae1","#3182bd")) #colorblind-friendly blues
#c("#fee6ce","#fdae6b","#e6550d")) #colorblind-friendly oranges
colour.df = as.data.frame(cbind(varname=classes$var,colcode))
names(colour.df)[names(colour.df)=="varname"] <- colour.by
rds.clipped@data = merge( rds.clipped@data,unique(colour.df), all.x = T)
#rds.clipped@data = rds.clipped@data[order(rds.clipped@data$ORD),]
rds.clipped.pr = spTransform(rds.clipped, CRS(crs.out), match.ID=F)
rds.clipped.pr@data = rds.clipped.pr@data[order(rds.clipped.pr@data$ORD),]
if (!exists("coast.aea") ||
!exists("coast.clipped.aea") ||
!exists("coast.clipped.pr") ||
!exists("the.grid") ||
!exists("grid.pr") ||
!exists("these.gridlines") ||
!exists("these.gridlines.pr") )
{
loadfunctions("coastline")
writeLines("Building the coastline...")
coast.aea <<- coastline.db( DS="gshhg coastline highres",
crs=proj.metric,
p=NULL, level=1, xlim=NULL, ylim=NULL )
library(rgeos)
writeLines("Trimming the data to match the selected bounding box (so that data can be projected)")
coast.clipped.aea <<- gIntersection(gBuffer(coast.aea, byid=TRUE, width=0), spTransform(boundbox,proj.metric))
coast.clipped.pr <<- spTransform(coast.clipped.aea,crs.out)
#'using the clipped data (pre-projection), capture information for the grid,
#'including information about the gridlines, as well as their labels
the.grid <<- gridat(boundbox, easts=seq(boundbox@"bbox"[1],boundbox@"bbox"[3],by=2),
norths=seq(boundbox@"bbox"[2],boundbox@"bbox"[4],by=2))
grid.pr <<- spTransform(the.grid, CRS(crs.out))
these.gridlines <<- gridlines(boundbox, easts=seq(boundbox@"bbox"[1],boundbox@"bbox"[3],by=1),
norths=seq(boundbox@"bbox"[2],boundbox@"bbox"[4],by=1))
these.gridlines.pr <<- spTransform(these.gridlines, CRS(crs.out))
}
if (!exists("clip.100") ||
!exists("clip.200") ||
!exists("clip.300") ||
!exists("clip.400") ||
!exists("clip.500") ||
!exists("clip.600") ||
!exists("clip.700") ||
!exists("clip.800") ||
!exists("clip.900"))
{
#bathy data
writeLines("Generating contours")
p = list( project.name = "bathymetry" )
p$project.root = project.datadirectory( p$project.name )
p$init.files = loadfunctions( c( "spacetime", "utility", "parallel", "bathymetry", "polygons" ) )
p$libs = RLibrary( "rgdal", "maps", "mapdata", "maptools", "lattice", "geosphere", "sp", "raster", "colorspace" )
p = spatial.parameters( type="canada.east.highres", p=p )
depths = c(100, 200, 300, 400, 500, 600, 700, 800, 900) #, 2000, 5000 )
plygn = isobath.db( p=p, DS="isobath", depths=depths )
#data must be clipped so it doesn't extend beyond the bounding box
clip.100 <<- gIntersection(spTransform(plygn["100"], CRS(crs.out)), boundbox.pr)
clip.200 <<- gIntersection(spTransform(plygn["200"], CRS(crs.out)), boundbox.pr)
clip.300 <<- gIntersection(spTransform(plygn["300"], CRS(crs.out)), boundbox.pr)
clip.400 <<- gIntersection(spTransform(plygn["400"], CRS(crs.out)), boundbox.pr)
clip.500 <<- gIntersection(spTransform(plygn["500"], CRS(crs.out)), boundbox.pr)
clip.600 <<- gIntersection(spTransform(plygn["600"], CRS(crs.out)), boundbox.pr)
clip.700 <<- gIntersection(spTransform(plygn["700"], CRS(crs.out)), boundbox.pr)
clip.800 <<- gIntersection(spTransform(plygn["800"], CRS(crs.out)), boundbox.pr)
clip.900 <<- gIntersection(spTransform(plygn["900"], CRS(crs.out)), boundbox.pr)
#clip.1000 <<- gIntersection(spTransform(plygn["1000"], CRS(crs.out)), boundbox.pr)
}
#get the desired OCMD areas
if (length(add.OCMD)>0) OCMD.areas=get.ocmd.areas(add.OCMD)
if (save.plot){
if (!is.null(name.det)){
name.detail=paste0(name.det,"_")
}else{
name.detail=""
}
if (range(rds.clipped.pr@data[agg.by])[1] == range(rds.clipped.pr@data[agg.by])[2]) {
the.filename = range(rds.clipped.pr@data[agg.by])[1]
}else{
the.filename = paste(range(rds.clipped.pr@data[agg.by]),collapse = "_")
}
# plot.title.clean=gsub("(\\(|\\)|\\s|\\/|,)","_",plot.title)
# plot.title.clean=gsub("__","_", plot.title.clean)
# plot.title.clean=substr(plot.title.clean,1,15)
# plot.title.clean=paste0(sub('_$', '', plot.title.clean),"_")
agg.type=paste0(substr(agg.by, 1, 4),"_")
the.filename=paste0(name.detail,agg.type,the.filename,".png")
png(filename=paste0(project.datadirectory("mpa"),"/figures/",the.filename),
width = 6, height = 4, units = "in", res= 300, pointsize = 4,
bg = "white", family = "", restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"))
}
par(mar=c(2,2,1,1),xaxs = "i",yaxs = "i",cex.axis=1.3,cex.lab=1.4)
plot(boundbox2.pr, border="transparent", add=F, lwd=1) #add transparent boundbox first to ensure all data shown
plot(coast.clipped.pr, col="navajowhite2", border="navajowhite4", lwd=0.5, axes=F, add=T ) #add coastline
# lines(clip.1000, col="#666666", lwd=0.5)
lines(clip.900, col="#717171", lwd=0.5)
lines(clip.800, col="#7C7C7C", lwd=0.5)
lines(clip.700, col="#888888", lwd=0.5)
lines(clip.600, col="#939393", lwd=0.5)
lines(clip.500, col="#9E9E9E", lwd=0.5)
lines(clip.400, col="#AAAAAA", lwd=0.5)
lines(clip.300, col="#B5B5B5", lwd=0.5)
lines(clip.200, col="#C0C0C0", lwd=0.5)
lines(clip.100, col="#CCCCCC", lwd=0.5)
for (o in 1:length(OCMD.areas)){
plot(spTransform(OCMD.areas[[o]], CRS(crs.out)), border="olivedrab4", lwd=0.5, add=T)
}
plot(these.gridlines.pr, col="grey77", lty=2, lwd=0.5, add=T) #gridlines
points(rds.clipped.pr, col = rds.clipped.pr@data$colcode, pch = 15, cex = 0.5)
text(coordinates(grid.pr), pos=grid.pr$pos, labels=parse(text=as.character(the.grid$labels)),
offset=0.2, col="black", cex=1)
plot(boundbox.pr, border="black", add=T, lwd=1) #add actual boundbox
if (show.legend){
legend(title = plot.title ,min(boundbox.pr@bbox[1,])+(0.075*(max(boundbox.pr@bbox[1,])-min(boundbox.pr@bbox[1,]))),
min(boundbox.pr@bbox[2,])+(0.95*(max(boundbox.pr@bbox[2,])-min(boundbox.pr@bbox[2,]))),
cex=1, y.intersp=0.8,
legend = c(gsub(",","-",names(attr(colcode, "table"))),"no data"),
fill = c(attr(colcode, "palette"),"white"))
}
if (save.plot) dev.off()
}
|
# package umap
#
# UMAP stands for "Uniform Manifold Approximation and Projection"
# UMAP is a method proposed by Leland McInnes and John Healy.
#
# The original implementation was written in python by Leland McInnes.
# The original implementation is available at https://github.com/lmcinnes/umap
#
# This package is an interface to using the UMAP algorithm in R. This file
# is the entrypoint to the package. It defines a configuration object
# and a umap() function.
#
# These three lines required to use Rcpp; do not remove
#' @useDynLib umap
#' @importFrom Rcpp sourceCpp
NULL
# For interfacing with python and "umap-learn"
#' @importFrom reticulate py_module_available import
NULL
# This implements a "soft" requirement for python and the umap module
# i.e. the package should work when those components are absent
# but gain additional functionality when those components are present
#' interface to umap-learn via reticulate
#'
#' @keywords internal
#' @noRd
python.umap <- NULL
.onLoad <- function(libname, pkgname) {
# this "try" block is necessary because:
# a system that has python but not umap-learn stops during the test suite
# with the following sequence of commands (devtools)
# document(); test(); test()
# note that test() only fails at second round
try({
python.umap <<- reticulate::import("umap", delay_load=TRUE)
}, silent=TRUE)
}
#' Default configuration for umap
#'
#' A list with parameters customizing a UMAP embedding. Each component of the
#' list is an effective argument for umap().
#'
#' n_neighbors: integer; number of nearest neighbors
#'
#' n_components: integer; dimension of target (output) space
#'
#' metric: character or function; determines how distances between
#' data points are computed. When using a string, available metrics are:
#' euclidean, manhattan. Other available generalized metrics are: cosine,
#' pearson, pearson2. Note the triangle inequality may not be satisfied by
#' some generalized metrics, hence knn search may not be optimal.
#' When using metric.function as a function, the signature must be
#' function(matrix, origin, target) and should compute a distance between
#' the origin column and the target columns
#'
#' n_epochs: integer; number of iterations performed during
#' layout optimization
#'
#' input: character, use either "data" or "dist"; determines whether the
#' primary input argument to umap() is treated as a data matrix or as a
#' distance matrix
#'
#' init: character or matrix. The default string "spectral" computes an initial
#' embedding using eigenvectors of the connectivity graph matrix. An
#' alternative is the string "random", which creates an initial layout based on
#' random coordinates. This setting.can also be set to a matrix, in which case
#' layout optimization begins from the provided coordinates.
#'
#' min_dist: numeric; determines how close points appear in the final layout
#'
#' set_op_ratio_mix_ratio: numeric in range [0,1]; determines who the knn-graph
#' is used to create a fuzzy simplicial graph
#'
#' local_connectivity: numeric; used during construction of fuzzy simplicial
#' set
#'
#' bandwidth: numeric; used during construction of fuzzy simplicial set
#'
#' alpha: numeric; initial value of "learning rate" of layout optimization
#'
#' gamma: numeric; determines, together with alpha, the learning rate of
#' layout optimization
#'
#' negative_sample_rate: integer; determines how many non-neighbor points are
#' used per point and per iteration during layout optimization
#'
#' a: numeric; contributes to gradient calculations during layout optimization.
#' When left at NA, a suitable value will be estimated automatically.
#'
#' b: numeric; contributes to gradient calculations during layout optimization.
#' When left at NA, a suitable value will be estimated automatically.
#'
#' spread: numeric; used during automatic estimation of a/b parameters.
#'
#' random_state: integer; seed for random number generation used during umap()
#'
#' transform_state: integer; seed for random number generation used during
#' predict()
#'
#' knn: object of class umap.knn; precomputed nearest neighbors
#'
#' knn.repeat: number of times to restart knn search
#'
#' verbose: logical or integer; determines whether to show progress messages
#'
#' umap_learn_args: vector of arguments to python package umap-learn
#'
#' @examples
#' # display all default settings
#' umap.defaults
#'
#' # create a new settings object with n_neighbors set to 5
#' custom.settings = umap.defaults
#' custom.settings$n_neighbors = 5
#' custom.settings
#'
#' @export
umap.defaults <- list(
n_neighbors=15,
n_components=2,
metric="euclidean",
n_epochs=200,
input="data",
init="spectral",
min_dist=0.1,
set_op_mix_ratio=1,
local_connectivity=1,
bandwidth=1.0,
alpha=1,
gamma=1.0,
negative_sample_rate=5,
a=NA,
b=NA,
spread=1,
random_state=NA,
transform_state=NA,
knn=NA,
knn_repeats=1,
verbose=FALSE,
umap_learn_args = NA
)
class(umap.defaults) <- "umap.config"
#' Computes a manifold approximation and projection
#'
#' @export
#' @param d matrix, input data
#' @param config object of class umap.config
#' @param method character, implementation. Available methods are 'naive'
#' (an implementation written in pure R) and 'umap-learn' (requires python
#' package 'umap-learn')
#' @param preserve.seed logical, leave TRUE to insulate external code from
#' randomness within the umap algorithms; set FALSE to allow randomness used
#' in umap algorithms to alter the external random-number generator
#' @param ... list of settings; values overwrite defaults from config;
#' see documentation of umap.default for details about available settings
#'
#' @return object of class umap, containing at least a component
#' with an embedding and a component with configuration settings
#'
#' @examples
#' # embedd iris dataset using default settings
#' iris.umap = umap(iris[,1:4])
#'
#' # display object summary
#' iris.umap
#'
#' # display embedding coordinates
#' head(iris.umap$layout)
#'
umap <- function(d, config=umap.defaults,
method=c("naive", "umap-learn"),
preserve.seed=TRUE,
...) {
# save existing RNG seed, will use "internal" seed later
old.seed <- get.global.seed()
# prep - check inputs, configuration settings
method <- config$method <- match.arg(method)
config <- umap.prep.config(config, ...)
d <- umap.prep.input(d, config)
set.seed(config$random_state)
# perform the actual work with a specific umap implementation
if (nrow(d)<=2) {
result <- umap.small(d, config)
} else {
implementations <- c(naive=umap.naive,
"umap-learn"=umap.learn)
if (method %in% names(implementations)) {
result <- implementations[[method]](d, config)
}
}
class(result) <- "umap"
# restore state and finish
if (preserve.seed) {
set.global.seed(old.seed)
}
result
}
#' project data points onto an existing umap embedding
#'
#' @export
#' @param object trained object of class umap
#' @param data matrix with data
#' @param ... additional arguments (not used)
#'
#' @return new matrix
#'
#' @examples
#' # embedd iris dataset using default settings
#' iris.umap = umap(iris[,1:4])
#'
#' # create a dataset with structure like iris, but with perturbation
#' iris.perturbed = iris[,1:4] + matrix(rnorm(nrow(iris)*4, 0, 0.1), ncol=4)
#'
#' # project perturbed dataset
#' perturbed.embedding = predict(iris.umap, iris.perturbed)
#'
#' # output is a matrix with embedding coordinates
#' head(perturbed.embedding)
#'
predict.umap <- function(object, data, ...) {
umap.check.config.class(object$config)
if (object$config$input == "dist") {
umap.error("predict cannot work from object fitted by input='dist'")
}
if (nrow(object$layout)<=2) {
umap.error("predict cannot work when too-small initial training set")
}
old.seed <- get.global.seed()
if (!is.na(object$config$transform_state)) {
set.seed(object$config$transform_state)
}
# extract method from the umap object
method <- object$config$method
implementations <- c(naive=umap.naive.predict,
"umap-learn"=umap.learn.predict)
if (!method %in% names(implementations)) {
umap.error("unknown prediction method")
}
# carry out the predictions
result <- implementations[[method]](object, data)
# restore state and finish
set.global.seed(old.seed)
result
}
|
/R/umap.R
|
permissive
|
tkonopka/umap
|
R
| false | false | 8,472 |
r
|
# package umap
#
# UMAP stands for "Uniform Manifold Approximation and Projection"
# UMAP is a method proposed by Leland McInnes and John Healy.
#
# The original implementation was written in python by Leland McInnes.
# The original implementation is available at https://github.com/lmcinnes/umap
#
# This package is an interface to using the UMAP algorithm in R. This file
# is the entrypoint to the package. It defines a configuration object
# and a umap() function.
#
# These three lines required to use Rcpp; do not remove
#' @useDynLib umap
#' @importFrom Rcpp sourceCpp
NULL
# For interfacing with python and "umap-learn"
#' @importFrom reticulate py_module_available import
NULL
# This implements a "soft" requirement for python and the umap module
# i.e. the package should work when those components are absent
# but gain additional functionality when those components are present
#' interface to umap-learn via reticulate
#'
#' @keywords internal
#' @noRd
python.umap <- NULL
.onLoad <- function(libname, pkgname) {
# this "try" block is necessary because:
# a system that has python but not umap-learn stops during the test suite
# with the following sequence of commands (devtools)
# document(); test(); test()
# note that test() only fails at second round
try({
python.umap <<- reticulate::import("umap", delay_load=TRUE)
}, silent=TRUE)
}
#' Default configuration for umap
#'
#' A list with parameters customizing a UMAP embedding. Each component of the
#' list is an effective argument for umap().
#'
#' n_neighbors: integer; number of nearest neighbors
#'
#' n_components: integer; dimension of target (output) space
#'
#' metric: character or function; determines how distances between
#' data points are computed. When using a string, available metrics are:
#' euclidean, manhattan. Other available generalized metrics are: cosine,
#' pearson, pearson2. Note the triangle inequality may not be satisfied by
#' some generalized metrics, hence knn search may not be optimal.
#' When using metric.function as a function, the signature must be
#' function(matrix, origin, target) and should compute a distance between
#' the origin column and the target columns
#'
#' n_epochs: integer; number of iterations performed during
#' layout optimization
#'
#' input: character, use either "data" or "dist"; determines whether the
#' primary input argument to umap() is treated as a data matrix or as a
#' distance matrix
#'
#' init: character or matrix. The default string "spectral" computes an initial
#' embedding using eigenvectors of the connectivity graph matrix. An
#' alternative is the string "random", which creates an initial layout based on
#' random coordinates. This setting.can also be set to a matrix, in which case
#' layout optimization begins from the provided coordinates.
#'
#' min_dist: numeric; determines how close points appear in the final layout
#'
#' set_op_ratio_mix_ratio: numeric in range [0,1]; determines who the knn-graph
#' is used to create a fuzzy simplicial graph
#'
#' local_connectivity: numeric; used during construction of fuzzy simplicial
#' set
#'
#' bandwidth: numeric; used during construction of fuzzy simplicial set
#'
#' alpha: numeric; initial value of "learning rate" of layout optimization
#'
#' gamma: numeric; determines, together with alpha, the learning rate of
#' layout optimization
#'
#' negative_sample_rate: integer; determines how many non-neighbor points are
#' used per point and per iteration during layout optimization
#'
#' a: numeric; contributes to gradient calculations during layout optimization.
#' When left at NA, a suitable value will be estimated automatically.
#'
#' b: numeric; contributes to gradient calculations during layout optimization.
#' When left at NA, a suitable value will be estimated automatically.
#'
#' spread: numeric; used during automatic estimation of a/b parameters.
#'
#' random_state: integer; seed for random number generation used during umap()
#'
#' transform_state: integer; seed for random number generation used during
#' predict()
#'
#' knn: object of class umap.knn; precomputed nearest neighbors
#'
#' knn.repeat: number of times to restart knn search
#'
#' verbose: logical or integer; determines whether to show progress messages
#'
#' umap_learn_args: vector of arguments to python package umap-learn
#'
#' @examples
#' # display all default settings
#' umap.defaults
#'
#' # create a new settings object with n_neighbors set to 5
#' custom.settings = umap.defaults
#' custom.settings$n_neighbors = 5
#' custom.settings
#'
#' @export
umap.defaults <- list(
n_neighbors=15,
n_components=2,
metric="euclidean",
n_epochs=200,
input="data",
init="spectral",
min_dist=0.1,
set_op_mix_ratio=1,
local_connectivity=1,
bandwidth=1.0,
alpha=1,
gamma=1.0,
negative_sample_rate=5,
a=NA,
b=NA,
spread=1,
random_state=NA,
transform_state=NA,
knn=NA,
knn_repeats=1,
verbose=FALSE,
umap_learn_args = NA
)
class(umap.defaults) <- "umap.config"
#' Computes a manifold approximation and projection
#'
#' @export
#' @param d matrix, input data
#' @param config object of class umap.config
#' @param method character, implementation. Available methods are 'naive'
#' (an implementation written in pure R) and 'umap-learn' (requires python
#' package 'umap-learn')
#' @param preserve.seed logical, leave TRUE to insulate external code from
#' randomness within the umap algorithms; set FALSE to allow randomness used
#' in umap algorithms to alter the external random-number generator
#' @param ... list of settings; values overwrite defaults from config;
#' see documentation of umap.default for details about available settings
#'
#' @return object of class umap, containing at least a component
#' with an embedding and a component with configuration settings
#'
#' @examples
#' # embedd iris dataset using default settings
#' iris.umap = umap(iris[,1:4])
#'
#' # display object summary
#' iris.umap
#'
#' # display embedding coordinates
#' head(iris.umap$layout)
#'
umap <- function(d, config=umap.defaults,
method=c("naive", "umap-learn"),
preserve.seed=TRUE,
...) {
# save existing RNG seed, will use "internal" seed later
old.seed <- get.global.seed()
# prep - check inputs, configuration settings
method <- config$method <- match.arg(method)
config <- umap.prep.config(config, ...)
d <- umap.prep.input(d, config)
set.seed(config$random_state)
# perform the actual work with a specific umap implementation
if (nrow(d)<=2) {
result <- umap.small(d, config)
} else {
implementations <- c(naive=umap.naive,
"umap-learn"=umap.learn)
if (method %in% names(implementations)) {
result <- implementations[[method]](d, config)
}
}
class(result) <- "umap"
# restore state and finish
if (preserve.seed) {
set.global.seed(old.seed)
}
result
}
#' project data points onto an existing umap embedding
#'
#' @export
#' @param object trained object of class umap
#' @param data matrix with data
#' @param ... additional arguments (not used)
#'
#' @return new matrix
#'
#' @examples
#' # embedd iris dataset using default settings
#' iris.umap = umap(iris[,1:4])
#'
#' # create a dataset with structure like iris, but with perturbation
#' iris.perturbed = iris[,1:4] + matrix(rnorm(nrow(iris)*4, 0, 0.1), ncol=4)
#'
#' # project perturbed dataset
#' perturbed.embedding = predict(iris.umap, iris.perturbed)
#'
#' # output is a matrix with embedding coordinates
#' head(perturbed.embedding)
#'
predict.umap <- function(object, data, ...) {
umap.check.config.class(object$config)
if (object$config$input == "dist") {
umap.error("predict cannot work from object fitted by input='dist'")
}
if (nrow(object$layout)<=2) {
umap.error("predict cannot work when too-small initial training set")
}
old.seed <- get.global.seed()
if (!is.na(object$config$transform_state)) {
set.seed(object$config$transform_state)
}
# extract method from the umap object
method <- object$config$method
implementations <- c(naive=umap.naive.predict,
"umap-learn"=umap.learn.predict)
if (!method %in% names(implementations)) {
umap.error("unknown prediction method")
}
# carry out the predictions
result <- implementations[[method]](object, data)
# restore state and finish
set.global.seed(old.seed)
result
}
|
#### download file ####
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", "project",method = "curl")
#### unzip foleder####
unzip("project")
#### view files in folders ####
list.files(".")
list.files("UCI HAR Dataset")
list.files("UCI HAR Dataset/test")
#### Read activity Data ####
activities_labels <- read.table("UCI HAR Dataset/activity_labels.txt",stringsAsFactors = F)
activities_labels
str(activities_labels)
features<-read.table("UCI HAR Dataset/features.txt",stringsAsFactors = F)
str(features)
#### which features are mean and standard deviation ####
features_mean<-grep("mean()", features[,2],fixed = T) ### index of rows that are mean()
features[features_mean,]
features_std <- grep("std()",features[,2],fixed = T)### index of rows thar are std()
features[features_std,]
length(features_mean) ## checking the lenght of rows
length(features_std)
################################################################
#### read in training data,only features meand and std ####
train <- read.table("UCI HAR Dataset/train/X_train.txt",col.names = features[,2])
str(train)
names(train)
#### filter mean and std ####
train <- train[,c(features_mean,features_std)]
head(train)
dim(train)
#### Modifing columns names ####
names(train) <- gsub(".","",names(train),fixed = T)
names(train) <- sub("mean","Mean",names(train))
names(train)<-sub("std","Std",names(train))
#### Read in activities and subject for training data ####
Subject_training <- read.table("UCI HAR Dataset/train/subject_train.txt",col.names = "Subject")
dim(Subject_training)
head(Subject_training)
activities_training<-read.table("UCI HAR Dataset/train/y_train.txt",col.names = "activities")
dim(activities_training)
head(activities_training)
##### combine subjects,activities, and features, for training data ####
train <- cbind(activities_training,Subject_training,train)
dim(train)
names(train)
##########################################################
#### read in testing data,only features meand and std ####
test <- read.table("UCI HAR Dataset/test/X_test.txt",col.names = features[,2])
str(test)
names(test)
#### filter mean and std ####
test <- test[,c(features_mean,features_std)]
head(test)
dim(test)
#### Modifing columns names ####
names(test) <- gsub(".","",names(test),fixed = T)
names(test) <- sub("mean","Mean",names(test))
names(test)<-sub("std","Std",names(test))
#### Read in activities and subject for testing data ####
Subject_testing <- read.table("UCI HAR Dataset/test/subject_test.txt",col.names = "Subject")
dim(Subject_testing)
head(Subject_testing)
activities_testing<-read.table("UCI HAR Dataset/test/y_test.txt",col.names = "activities")
dim(activities_testing)
head(activities_testing)
##### combine subjects,activities, and features, for testing data ####
test <- cbind(activities_testing,Subject_testing,test)
dim(test)
names(test)
#### Make a data tabel #####
#################################
#### combine train and test data ########
df<-rbind(train,test)
dim(df)
names(df)
######## Changing activities names ####
df$activities<-as.factor(df$activities) #### convert to factor
levels(df$activities)<-activities_labels$V2 #### change level names
df$activities
##################################
#### creating data summary and activities###
require(dplyr)
df_tidy<-tbl_df(df)
df_tidy_summary<-df_tidy %>%
group_by(Subject,activities) %>%
summarise_all(funs(mean))
write.table(df_tidy_summary,file = "Tidy.txt",row.names = FALSE,quote = F)
|
/run_analysis.R
|
no_license
|
Rlopez3013/Tidy-Data
|
R
| false | false | 3,512 |
r
|
#### download file ####
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", "project",method = "curl")
#### unzip foleder####
unzip("project")
#### view files in folders ####
list.files(".")
list.files("UCI HAR Dataset")
list.files("UCI HAR Dataset/test")
#### Read activity Data ####
activities_labels <- read.table("UCI HAR Dataset/activity_labels.txt",stringsAsFactors = F)
activities_labels
str(activities_labels)
features<-read.table("UCI HAR Dataset/features.txt",stringsAsFactors = F)
str(features)
#### which features are mean and standard deviation ####
features_mean<-grep("mean()", features[,2],fixed = T) ### index of rows that are mean()
features[features_mean,]
features_std <- grep("std()",features[,2],fixed = T)### index of rows thar are std()
features[features_std,]
length(features_mean) ## checking the lenght of rows
length(features_std)
################################################################
#### read in training data,only features meand and std ####
train <- read.table("UCI HAR Dataset/train/X_train.txt",col.names = features[,2])
str(train)
names(train)
#### filter mean and std ####
train <- train[,c(features_mean,features_std)]
head(train)
dim(train)
#### Modifing columns names ####
names(train) <- gsub(".","",names(train),fixed = T)
names(train) <- sub("mean","Mean",names(train))
names(train)<-sub("std","Std",names(train))
#### Read in activities and subject for training data ####
Subject_training <- read.table("UCI HAR Dataset/train/subject_train.txt",col.names = "Subject")
dim(Subject_training)
head(Subject_training)
activities_training<-read.table("UCI HAR Dataset/train/y_train.txt",col.names = "activities")
dim(activities_training)
head(activities_training)
##### combine subjects,activities, and features, for training data ####
train <- cbind(activities_training,Subject_training,train)
dim(train)
names(train)
##########################################################
#### read in testing data,only features meand and std ####
test <- read.table("UCI HAR Dataset/test/X_test.txt",col.names = features[,2])
str(test)
names(test)
#### filter mean and std ####
test <- test[,c(features_mean,features_std)]
head(test)
dim(test)
#### Modifing columns names ####
names(test) <- gsub(".","",names(test),fixed = T)
names(test) <- sub("mean","Mean",names(test))
names(test)<-sub("std","Std",names(test))
#### Read in activities and subject for testing data ####
Subject_testing <- read.table("UCI HAR Dataset/test/subject_test.txt",col.names = "Subject")
dim(Subject_testing)
head(Subject_testing)
activities_testing<-read.table("UCI HAR Dataset/test/y_test.txt",col.names = "activities")
dim(activities_testing)
head(activities_testing)
##### combine subjects,activities, and features, for testing data ####
test <- cbind(activities_testing,Subject_testing,test)
dim(test)
names(test)
#### Make a data tabel #####
#################################
#### combine train and test data ########
df<-rbind(train,test)
dim(df)
names(df)
######## Changing activities names ####
df$activities<-as.factor(df$activities) #### convert to factor
levels(df$activities)<-activities_labels$V2 #### change level names
df$activities
##################################
#### creating data summary and activities###
require(dplyr)
df_tidy<-tbl_df(df)
df_tidy_summary<-df_tidy %>%
group_by(Subject,activities) %>%
summarise_all(funs(mean))
write.table(df_tidy_summary,file = "Tidy.txt",row.names = FALSE,quote = F)
|
# R_code_exam.r
# Copernicus data: https://land.copernicus.vgt.vito.be/PDF/portal/Application.html
# 1. 01_R_code_first.r
# 2. 02_R_code_spatial.r
# 3. 03_R_code_point_patterns.r
# 4. 04_R_code_TeleRil.r
# 5. 05_R_code_multitemp.r
# 6. 06_R_code_multitemp_NO2.r
# 7. 07_R_code_snow.r
# 8. 08_R_code_patches.r
# 9. 09_R_code_crop.r
# 10. 10_R_code_species_distribution_modeling.r
# 11. 11_R_code_examproject.r
#############################################################################
#############################################################################
#############################################################################
### 1. 01_R_code_first.r - Primo codice R Ecologia del Paesaggio
# GZ pacchetti: "install.packages()" -> scaricare pacchetti (poi richiamabili con comando "library()" [o "require()])
install.packages("sp")
library(sp)
# GZ dataset e funzioni associate
data("meuse") # GZ richiamo dataset "meuse" (dati su presenza metalli pesanti nel terreno), inserito nella libreria "sp"
meuse # GZ visualizzare dati
head(meuse) # GZ prime 6 righe del dataset
names(meuse) # GZ nomi variabili (colonne del dataset)
summary(meuse) # GZ riporta statistiche di base per le variabili del dataset
# GZ grafici: "pairs()" per creare grafici a coppie tra variabili di un dataset
pairs(meuse) # GZ grafici a coppie tra tutte le variabili
pairs(~cadmium + copper + lead, data = meuse) # GZ grafici a coppie tra le variabili indicate
# GZ esercizio: pairs() quattro variabili [cadmium, copper, lead, zinc]
pairs(~cadmium+copper+lead+zinc,data=meuse)
# GZ [,x:y] per selezionare subset composto da righe selezionate (3, 4, 5, 6 -> cadmium, copper, lead, zinc)
pairs(meuse[,3:6])
# GZ visualizzazione: scelgo colori["col="], simboli["pch="] e dimensioni["cex="] => per simboli "pch=n" con 1<n<25 (ad ogni numero un diverso simbolo)
pairs(meuse[,3:6],col="blue",pch=18,cex=3)
# GZ "main=" per dare titolo al grafico
pairs(meuse[,3:6],col="blue",pch=18,cex=3,main="Primo pairs")
# GZ prendere funzioni esterne => "panel.correlations" indica coefficiente di correlazione tra variabili
panel.correlations<-function(x,y,digits=1,prefix="",cex.cor)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r1=cor(x,y,use="pairwise.complete.obs")
r <- abs(cor(x, y,use="pairwise.complete.obs"))
txt <- format(c(r1, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.9/strwidth(txt)
text(0.5, 0.5, txt, cex = cex * r)
}
# GZ "panel.smoothing" -> fa una specie di regressione tra variabili
panel.smoothing <- function (x, y, col = par("col"), bg = NA, pch = par("pch"),
cex = 1, col.smooth = "red", span = 2/3, iter = 3, ...)
{
points(x, y, pch = pch, col = col, bg = bg, cex = cex)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
lines(stats::lowess(x[ok], y[ok], f = span, iter = iter),
col = 1, ...)
}
# GZ "panel.histograms" -> crea istogramma di una variabile
panel.histograms <- function(x, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col="white", ...)
}
# GZ uso funzioni precedentemente create per costruire grafici a coppie fra le quattro variabili selezionate, in cui vengono mostrati anche coefficienti di correlazione tra le variabili
# GZ lower.panel -> parte sopra la diagonale
# GZ upper.panel -> parte sotto la diagonale
# GZ diag.panel -> diagonale
pairs(meuse[,3:6],lower.panel=panel.correlations,upper.panel=panel.smoothing,diag.panel=panel.histograms)
# GZ esercizio: invertire posto rispetto alla diagonale di correlazione e interpolazione
pairs(meuse[,3:6],lower.panel=panel.smoothing,upper.panel=panel.correlations,diag.panel=panel.histograms)
#############################################################################
#############################################################################
#############################################################################
### 2. 02_R_code_spatial.r - Funzioni sapziali in Ecologia del Paesaggio [24/03/2020]
# GZ caricare pacchetti e dati
library(sp)
data(meuse)
head(meuse)
# GZ fissare dataframe -> attach()
attach(meuse)
# GZ plot cadmium e lead segliendo colori["col"], caratteri["pch"] e dimensioni["cex"]
plot(cadmium,lead,col="red",pch=19,cex=1)
# GZ esercizio: plot copper e zinc con carattere triangolo(17) e colore verde
plot(copper,zinc,col="green",pch=17)
# GZ cambiare etichette relative ad assi del grafico => "xlab","ylab"
plot(copper,zinc,col="green",pch=17,xlab="rame",ylab="zinco")
# GZ multiframe o multipanel => "par(mfrow=c(numero righe,numero colonne))"; a capo i plot che si vogliono mettere in una sola finestra
par(mfrow=c(1,2)) # GZ "par(mfrow)" -> funzione per gestire aspetto dei grafici (creare diagramma a più riquadri); (1,2) indica una riga e due colonne
plot(cadmium,lead,col="red",pch=19,cex=1)
plot(copper,zinc,col="green",pch=17,xlab="rame",ylab="zinco")
# GZ invertire grafici riga/colonna [(2,1) anzichè (1,2)]
par(mfrow=c(2,1))
plot(cadmium,lead,col="red",pch=19,cex=1)
plot(copper,zinc,col="green",pch=17,xlab="rame",ylab="zinco")
# GZ multiframe automatico -> pacchetto "GGally"
install.packages("GGally")
library(GGally)
ggpairs(meuse[,3:6]) # GZ "ggpairs" crea matrice di grafici con un determinato set di dati (in questo caso dalla terza alla sesta colonna del dataset "meuse")
# GZ Spatial; "coordinates()" per indicare che i dati hanno coordinate (in meuse x e y => facendo ~x+y)
head(meuse)
gpairs
coordinates(meuse)=x+y
plot(meuse)
# GZ "spplot()" -> distribuzione spaziale di una variabile (in questo caso "zinc")
spplot(meuse,"zinc")
# Spatial-2 [25/03/2020]
# GZ installare pacchetto "sp", caricare dati "meuse" e fissare dataset ["attach()"]
install.packages("sp")
library(sp)
data(meuse)
attach(meuse)
# GZ specificare coordinate del dataset => "coordinates(dataset)=~(coordinata,coordinata)"
coordinates(meuse)=~x+y
# GZ "spplot" dati zinco
spplot(meuse,"zinc")
# GZ esercizio: "spplot" dati rame
spplot(meuse,"copper")
# GZ "bubble(dataset,"variabile")" => rappresentazione spaziale come "spplot", crea un grafico a bolle di grandezza proporzionale a valore variabile
bubble(meuse,"zinc")
# GZ esercizio: bubble rame, colore rosso
bubble(meuse,"copper",col="red")
# GZ esempio: foraminiferi, carbon capture
# GZ creare vettore che contenga dati di campionamento dei foraminiferi chiamandolo "foram" ["<-" per dare nome al vettore c]
foram<-c(10,20,35,55,67,80)
# GZ "carbon" per carbon stock
carbon<-c(5,15,30,70,85,99)
# GZ plot con questi vettori
plot(foram,carbon,col="green",pch=19)
# GZ prendere dati dall'esterno (dati "covid19agg.csv")
# GZ settare cartella di lavoro -> wd("percorso") [in questo caso dico C, cartella lab]
setwd("C:/lab")
# GZ leggere tabella e usarla per costuire un dataframe; head=T per indicare a R che ci sono titoli delle colonne (prima riga è una stringa di testo)
Covid19<-read.table("covid_agg.csv",head=T) # GZ intitolare tabella "Covid19"
#############################################################################
#############################################################################
#############################################################################
### 03_R_code_point_patterns.r - Analisi point patterns [25/03/2020]
# GZ installare e richiamare pacchetti ("ggplot2", "spatstat")
intall.packages("ggplot2")
library(ggplot2)
install.packages("spatstat)
library(spatstat)
# GZ set working directory
setwd("C:/lab")
# GZ importare tabella dati Covid19; "head=T" per indicare a R che ci sono i titoli delle colonne; dare alla tabella il nome Covid19
Covid19<-read.table("covid_agg.csv",head=T)
head(Covid19) # comando per vedere tabella
# GZ creare plot che associa Paesi e casi di Covid19 (anzichè "$" si può fare "attach(Covid19) plot(country,cases)")
plot(Covid19$country,Covid19$cases)
# GZ modificare struttura plot -> posizione etichette rispetto ad asse ("las=0" etichette parallele, 1 orizzontali, 2 perpendicolari, 3 verticali)
plot(Covid19$country,Covid19$cases,las=0)
plot(Covid19$country,Covid19$cases,las=1)
plot(Covid19$country,Covid19$cases,las=2)
plot(Covid19$country,Covid19$cases,las=3)
plot(Covid19$country,Covid19$cases,las=3,cex.axis=0.5) # GZ "cex.axis" -> rimpicciolire dimensione etichette
# GZ richiamare "ggplot2" (pacchetto per estetica e cura dei dettagli)
library(ggplot2)
# GZ dataframe "mpg" da pacchetto "ggplot2" ("mpg" raccoglie osservazioni US Environmental Protection Agency su 38 modelli di auto)
data(mpg)
head(mpg)
# GZ esempio di plot con 2 variabili numeriche -> ggplot2 ha bisogno di 3 cose: dati ("mpg"), estetica del grafico ("aes", funzione di quotazione) e geometria ("geom_")
ggplot(mpg,aes(x=displ,y=hwy))+geom_point()
ggplot(mpg,aes(x=displ,y=hwy))+geom_line() # GZ linee anzichè punti nella visualizzazione
ggplot(mpg,aes(x=displ,y=hwy))+geom_polygon() # GZ poligoni
# GZ "ggplot2" per dati Covid19 -> usare longitudine e latitudine per avere i punti nello spazio, "size=cases" -> punti più grandi dove ci sono più casi
ggplot(Covid19,aes(x=lon,y=lat,size=cases))+geom_point()
# GZ richiamare pacchetto "spatstat" (mostra analisi dei modelli dei punti spaziali) e fissare dataframe
library(spatstat)
attach(covid)
# GZ esercizio: zona con più alta densità casi di Covid19
# GZ creare dataset per spatstat -> "ppp" crea un oggetto che rappresenta un insieme di dati del pattern puntiforme nel piano bidimensionale
covids<-ppp(lon,lat,c(-180,180),c(-90,90)) # GZ necessario specificare cosa indicano x e y ("lon","lat") e definirne il range
d<-density(covids) # GZ comando per calcolare densità dei casi
plot(d) # GZ plot (rappresentazione grafica) densità
points(covids,pch=19) # GZ mostare i punti Covid19 sulla mappa di densità
# point patterns-2 [01/04/20]
# GZ settare wd, caricare file salvato, richiamare "spatstat" e mostrare grafico densità casi Covid
setwd("C:/lab")
load("point_pattern.RData")
ls() # per vedere contenuto del file caricato
library(spatstat)
plot(d)
# GZ palette -> modificare colori del plot d; (100) per dire a R quante sfumature deve avere la scala di colori
cl<-colorRampPalette(c('yellow','orange','red')) (100)
plot(d,col=cl) # GZ plot densità con nuovi colori
# esercizio: plot densità dal verde al blu
bluverde<-colorRampPalette(c('blue','grey','green')) (200)
plot(d,col=bluverde)
# GZ mostare punti Covid19 sulla mappa di densità
points(covids)
# GZ inserire nella mappa confini degli stati
install.packages("rgdal") # GZ "rgdal" -> pacchetto necessario per usare il comando "readOGR"
library(rgdal)
coastlines<-readOGR("ne_10m_coastline.shp") # GZ "readOGR" -> funzione che legge origine dati OGR e un layer in un oggetto vettoriale spaziale adatto, serve per creare layer dei confini
plot(coastlines,add=T) # GZ "add=T" per aggiungere confini al vecchio plot senza eliminarlo -> grafico completo
# GZ esercizio: plot della mappa di densità con nuova colorazione e aggiunta coastlines
clr<-colorRampPalette(c('light blue','blue','pink','purple')) (400)
plot(d,col=clr)
plot(coastlines,add=T)
setwd("C:/lab")
load("C:/lab/point_ppattern.RData")
library(spatstat)
ls()
library(rgdal)
# GZ esercizio: plot mappa di densità con nuova palette
coastlines<-readOGR("ne_10m_coastline.shp")
clr<-colorRampPalette(c('light blue','blue','pink','purple')) (400)
plot(d,col=clr,main="density")
plot(coastlines,add=T)
points(covids)
### GZ INTERPOLATION
covid
marks(covids)<-covid$cases # "marks" -> associare dati categoria "cases" al pointpattern "covids"
s<-Smooth(covids) # "Smooth" -> creare mappa con i dati appena costruiti
plot(s) # plot mappa appena creata
# GZ esercizio: plot(s) con coastlines e punti
cls<-colorRampPalette(c('light blue','blue','green'))(100)
plot(s,col=cls,main="Cases")
points(covids)
plot(coastlines,add=T)
# GZ mappa finale (multiframe con entrambi i plot fatti)
par(mfrow=c(2,1))
# GZ primo plot: densità
clr<-colorRampPalette(c('light blue','blue','pink','purple')) (400)
plot(d,col=clr,main="density")
plot(coastlines,add=T)
points(covids)
# GZ secondo plot: interpolazione numero di casi
cls<-colorRampPalette(c('light blue','blue','green'))(100)
plot(s,col=cls,main="Cases")
points(covids)
plot(coastlines,add=T)
# San Marino (lavorare con set di dati di una tesi su San Marino scaricati in "lab" )
setwd("C:/lab")
load("C:/lab/Tesi.RData")
ls()
head(Tesi)
library(spatstat)
attach(Tesi)
# GZ Point pattern: x,y,c(xmin,xmax),c(ymin,ymax)
summary(Tesi) # sommario del dataset, posso trovare rapidamente le info principali
# GZ "summary" -> longitudine: 12.42<x<12.46 e latitudine: 43.91<y<43.94
Tesippp<-ppp(Longitude,Latitude,c(12.41,12.47),c(43.90,43.95))
# GZ Mappa densità
dT<-density(Tesippp)
dev.off
plot(dT)
points(Tesippp,col="green")
# GZ set wd e richiamo pacchetti
setwd("C:/lab")
load("C:/lab/Tesi.RData")
library(spatstat)
library(rgdal)
# GZ dt=density map, Tesi=dataset, Tesippp=point pattern (coordinate longitudine e latitudine)
head(Tesi)
# GZ associare al point pattern il valore d'interesse (ricchezza di specie) e poi procedere con l'interpolazione
marks(Tesippp)<-Tesi$Species_richness
interpol<-Smooth(Tesippp)
plot(interpol) # GZ mappa
points(Tesippp)
# GZ caricare il file vettoriale "San_Marino" e sovrapponiamo la mappa costruita prima (così da avere i confini)
sanmarino<-readOGR("San_Marino.shp")
plot(sanmarino)
plot(interpol,add=T) # GZ "add=T" per indicare che mappa di interpolazione sovrapposta a mappa di San Marino
points(Tesippp)
plot(sanmarino,add=T) # GZ -> vedere nuovamente confini
# GZ esercizio: plot multiframe densità e interpolazione (due righe, una colonna)
par(mfrow=c(2,1))
plot(dT,main="Density of points")
points(Tesippp)
plot(interpol,main="Estimate of species richness")
points(Tesippp)
# GZ esercizio: come prima ma due colonne e una riga
par(mfrow=c(1,2))
plot(dT,main="Density of points")
points(Tesippp)
plot(interpol,main="Estimate of species richness")
points(Tesippp)
#############################################################################
#############################################################################
#############################################################################
### 4. 04_R_code_TeleRil.r - codice R per analisi satellitari (telerilevamento)
# GZ set wd e pacchetti ("raster","RStoolbox")
setwd("C:/lab")
install.packages("raster") # "raster" per lettura, scrittura, analisi e modellizzazione di dati spaziali
library(raster)
install.packages("RStoolbox") # "RStoolbox" per analisi dati mediante telerilevamento
# GZ funzione "brick" per importare immagine selezionata e creare ogetto "RasterBrick" (multistrato)
p224r63_2011<-brick("p224r63_2011_masked.grd")
# GZ plot oggetto appena creato
plot(p224r63_2011) # 7 riquadri che mostrano un'immagine basata su riflettanza a varie lunghezze d'onda, come indicato sotto
# B1: blue, B2: green, B3: red, B4: near infrared (nir), B5: medium infrared, B6: thermal infrared, B7: medium infrared
# GZ RampPalette ("cl") per avere immagini con scala di colori da bianco a nero una volta rifatto il comando plot con specifica del colore
cl<-colorRampPalette(c('black','grey','light grey'))(100)
plot(p224r63_2011,col=cl)
# GZ modifica scala cromatica (da 100 a 5 sfumature)
cllow<-colorRampPalette(c('black','grey','light grey'))(5)
plot(p224r63_2011,col=cllow)
# GZ plot banda blu (B1)
names(p224r63_2011) # GZ "names" -> visionare nomi oggetto
# [1] "B1_sre" "B2_sre" "B3_sre" "B4_sre" "B5_sre" "B6_bt" "B7_sre"
clb<-colorRampPalette(c('dark blue','blue','light blue'))(100) # GZ palette blu
plot(p224r63_2011$B1_sre,col=clb)
# GZ esercizio: plottare banda infrarosso vicino palette rosso-arancione-giallo
clnir<-colorRampPalette(c('red','orange','yellow'))(100)
plot(p224r63_2011$B4_sre,col=clnir)
# GZ plot multiframe, quattro bande
par(mfrow=c(2,2))
# blue
clb<-colorRampPalette(c('dark blue','blue','light blue'))(100)
plot(p224r63_2011$B1_sre,col=clb)
# green
clg<-colorRampPalette(c('dark green','green','light green'))(100)
plot(p224r63_2011$B2_sre,col=clg)
# red
clr<-colorRampPalette(c('dark red','red','pink'))(100)
plot(p224r63_2011$B3_sre,col=clr)
# nir
clnir<-colorRampPalette(c('red','orange','yellow'))(100)
plot(p224r63_2011$B4_sre,col=clnir)
dev.off()
# GZ natural colours
# 3 componenti: R G B
# 3 bande: R = banda rosso, G = banda verde, B = banda blu
# B1: blue - 1
# B2: green - 2
# B3: red - 3
# B4: near infrared (nir) - 4
# GZ "plotRGB" -> creare plot rosso-verde-blu su tre livelli (tre strati combinati per rappresentare bande rosso, verde e blu)
plotRGB(p224r63_2011, r=3, g=2, b=1, stretch="Lin") # GZ stretch="Lin" per migliorare visibilità immagine
# GZ nir => aggiunta banda infrarosso per rendere immagine più leggibile (necessario togliere una delle altre tre, in questo caso blu)
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
# GZ permette di visualizzare vegetazione
# GZ salvataggio immagine appena ottenuta
pdf("primografico.pdf")
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
dev.off()
# GZ multiframe bande diverse
par(mfrow=c(2,1))
plotRGB(p224r63_2011, r=3, g=2, b=1, stretch="Lin")
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
dev.off()
# GZ esercizio: nir nella compnente R(Red)
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
# GZ esercizio: nir nella componente G(Green)
plotRGB(p224r63_2011, r=3, g=4, b=2, stretch="Lin")
# GZ esercizo: nir nella componente B(Blue)
plotRGB(p224r63_2011, r=3, g=2, b=4, stretch="Lin")
### LANDCOVER
# GZ setwd e pacchetti
setwd("C:/lab/p224r63")
library(raster)
# GZ "brick" per importare immagine
p224r63_2011<-brick("p224r63_2011_masked.grd")
# GZ richiamare "RStoolbox"
library(RStoolbox)
# GZ plottare immagine in RGB
plotRGB(p224r63_2011,r=4,g=3,b=2,stretch="Lin")
# GZ classificazione dati raster con "unsuperClass", specificando numero di classi
p224r63_2011c<-unsuperClass(p224r63_2011,nClasses = 4)
# GZ visualizzare nuovo modello contenente anche mappa
p224r63_2011c
# GZ plot mappa (quattro colori -> quattro classi specificate)
plot(p224r63_2011c$map)
# GZ nuova palette (migliore visualizzazione del grafico)
clclass <- colorRampPalette(c('green',"red","blue","black"))(100)
plot(p224r63_2011c$map,col=clclass)
# Day2
# GZ setwd e pacchetti
library(raster)
setwd("C:/lab")
load("TeleRil.RData")
ls()
# GZ importare file 1988 e 2011 ("brick")
p224r63_2011<-brick("p224r63_2011_masked.grd")
p224r63_1988<-brick("p224r63_1988_masked.grd")
# GZ immagine 1988, come 2011 ha sette bande (colori):
# B1: blue - 1
# B2: green - 2
# B3: red - 3
# B4: near infrared (nir) - 4
# B4: near infrared (nir)
# B5: medium infrared
# B6: thermal infrared
# B7: medium infrared
# GZ plot oggetto 1988 e visualizzare campi
plot(p224r63_1988)
names(p224r63_1988)
# GZ plot multiframe per banda blu (1), verde (2), rosso (3) e nir (4)
par(mfrow=c(2,2))
clb<-colorRampPalette(c("dark blue","blue","light blue"))(100) # blue
plot(p224r63_1988$B1_sre,col=clb)
clg<-colorRampPalette(c("dark green","green","light green"))(100) # green
plot(p224r63_1988$B2_sre,col=clg)
clr<-colorRampPalette(c("red","orange","yellow"))(100) # red
plot(p224r63_1988$B3_sre,col=clr)
clnir<-colorRampPalette(c("purple","pink","light pink"))(100) # nir
plot(p224r63_1988$B4_sre,col=clnir)
dev.off()
# GZ immagine con colori visibili (plotRGB "natural colours")
plotRGB(p224r63_1988, r=3, g=2, b=1, stretch="Lin")
# GZ grafico poco comprensibile => usare infrarosso (plotRGB "false colours"
# GZ esercizio: plotRGB con componenete infrarossa
plotRGB(p224r63_1988,r=4,g=3,b=2,stretch="Lin")
# GZ richiamare immagine 2011
p224r63_2011
# GZ plot per confronto immagini 1988 e 2011
par(mfrow=c(2,1))
plotRGB(p224r63_1988,r=4,g=2,b=1,stretch="Lin")
plotRGB(p224r63_2011,r=4,g=2,b=1,stretch="Lin")
# GZ => territorio agricolo è molto più sviluppato nel 2011
# GZ nir indica la presenza di vegetazione, zolle di terra sono bianche o celeste
dev.off()
# GZ spectral indices (DVI) => verificare stato salute vegetazione (foglie sane riflettono infrarosso)
# GZ DVI=nir-red -> es: dvi1988=nir1988-red1988 => risultati diversi in base a salute piante (sane=nir alto)
# GZ DVI 1988
dvi1988<-p224r63_1988$B4_sre-p224r63_1988$B3_sre
plot(dvi1988)
# GZ esercizio: DVI 2011
dvi2011<-p224r63_2011$B4_sre-p224r63_2011$B3_sre
plot(dvi2011)
# GZ cambio palette
cldvi<-colorRampPalette(c('light blue','light green','green'))(100)
plot(dvi2011,col=cldvi)
# GZ analisi multitemporale (differenza 2011-1988) => differenza tra DVI dei 2 anni mostra cambiamento stato vegetazione
difdvi<-dvi2011-dvi1988
plot(difdvi)
cldifdvi<-colorRampPalette(c('red','white','blue'))(100)
plot(difdvi,col=cldifdvi)
dev.off()
# GZ visualize the output
# GZ multiframe 1988RGB, 2011RGB, difdvi
par(mfrow=c(3,1))
plotRGB(p224r63_1988,r=4,g=3,b=2,stretch="Lin")
plotRGB(p224r63_2011,r=4,g=3,b=2,stretch="Lin")
plot(difdvi,col=cldifdvi)
dev.off()
# GZ "aggregate" -> modificare risoluzione (grana) immagine creando nuovo RasterLayer con risoluzione più bassa quindi celle più grandi ("fact=n" è un moltiplicatore che ci dà dei pixel n volte più grandi dei precedenti)
p224r63_2011lr<-aggregate(p224r63_2011,fact=10) # lr=lowresolution
# GZ inserire i due oggetti per vedere caratteristiche dei pixel
p224r63_2011
p224r63_2011lr
# GZ plot multiframe confronto tra le due risoluzioni
par(mfrow=c(2,1))
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
plotRGB(p224r63_2011lr, r=4, g=3, b=2, stretch="Lin")
# GZ lower resolution ("fact=50")
p224r63_2011lr50<-aggregate(p224r63_2011,fact=50)
p224r63_2011lr50
# GZ plot multiframe comparativo (normale, lr, lr50)
par(mfrow=c(3,1))
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
plotRGB(p224r63_2011lr, r=4, g=3, b=2, stretch="Lin")
plotRGB(p224r63_2011lr50, r=4, g=3, b=2, stretch="Lin")
# GZ DVI lr50 2011
dvi2011lr50<-p224r63_2011lr50$B4_sre-p224r63_2011lr50$B3_sre
plot(dvi2011lr50)
# GZ DVI lr50 1988
p224r63_1988lr50<-aggregate(p224r63_1988,fact=50) # GZ creare lr50 1988
dvi1988lr50<-p224r63_1988lr50$B4_sre-p224r63_1988lr50$B3_sre
plot(dvi1988lr50)
# GZ difdvi lr50
difdvilr50<-dvi2011lr50-dvi1988lr50
plot(difdvilr50,col=cldifdvi) # GZ riprendere palette "cldifdvi" creata in precedenza
# GZ multiframe differenze DVI alle diverse risoluzioni
par(mfrow=c(2,1))
plot(difdvi,col=cldifdvi)
plot(difdvilr50,col=cldifdvi)
#############################################################################
#############################################################################
#############################################################################
### 5. 05_R_code_multitemp.r - Analisi multitemporale variazione landcover
# GZ setwd e pacchetti
setwd("C:/lab")
library(raster)
library(RStoolbox)
library(ggplot2)
# GZ importare immagini
defor1 <-brick("defor1_.png")
defor2 <-brick("defor2_.png")
# GZ plotRGB "defor1"
defor1 # GZ visualizzare i campi dell'oggetto
# "defor1_.1" "defor1_.2" "defor1_.3"
# defor1_.1 = NIR
# defor1_.2 = red
# defor1_.3 = green
plotRGB(defor1,r=1,g=2,b=3,stretch="Lin") # GZ banda red->nir(r=1), green->red(g=2), blue->green(b=3)
# GZ eserczio: plot seconda data
plotRGB(defor2,r=1,g=2,b=3,stretch="Lin")
# GZ confronto (multiframe) stessa area in momenti differenti (prima e dopo deforestazione)
par(mfrow = c(2,1))
plotRGB(defor1,r=1,g=2,b=3,stretch="Lin")
plotRGB(defor2,r=1,g=2,b=3,stretch="Lin")
dev.off()
# GZ classificazione non supervisionata
d1c<-unsuperClass(defor1,nClasses=2) # GZ si creano classi di foresta e non foresta (suddivisione pixel in queste due categorie)
plot(d1c$map)
cl<-colorRampPalette(c('green','black'))(100)
plot(d1c$map,col=cl)
# GZ esercizio: come prima per "defor2"
d2c<-unsuperClass(defor2,nClasses=2)
plot(d2c$map,col=cl)
dev.off()
# GZ confronto tra i due momenti (multiframe) con pixel classificati
# GZ due righe, una colonna
par(mfrow=c(2,1))
plot(d1c$map,col=cl)
plot(d2c$map,col=cl)
# GZ due colonne, una riga
par(mfrow=c(1,2))
plot(d1c$map,col=cl)
plot(d2c$map,col=cl)
dev.off()
# GZ calcolo frequenza delle due classi di pixel nella prima immagine
freq(d1c$map)
# GZ aree aperte=37039
# GZ foresta=304253
# GZ numero di pixel totali nella prima immagine (necessario per calcolo percentuale)
totd1<-37039+304253
totd1
# totd1=341292
# GZ calcolo frequenze percentuali
percent1<-freq(d1c$map)*100/totd1
# GZ foreste: 89.1 %
# GZ aree aperte: 10.9 %
# GZ stesso procedimento per la seconda immagine
freq(d2c$map)
# GZ aree aperte=165055
# GZ foreste=177671
totd2<-165055+177671
totd2
# GZ totd2=342726
percent2<-freq(d2c$map)*100/totd2
# GZ aree aperte: 48.2 %
# GZ foreste: 51.8 %
# GZ creare vettori per analisi grafica
cover<-c("Agriculture","Forest")
before<-c(10.9,89.1)
after<-c(48.2,51.8)
# GZ creare nuovo dataset con i dati ottenuti
output<-data.frame(cover,before,after)
output
# Day2
setwd("C:/lab")
load("C:/lab/defor.RData")
library(raster)
library(ggplot2)
install.packages("gridExtra")
library(gridExtra)
# GZ riprendere mappe
par(mfrow=c(1,2))
cl<-colorRampPalette(c('black','green'))(100)
plot(d1c$map,col=cl)
plot(d2c$map,col=cl)
ls()
output
# cover before after
# 1 Agriculture 10.9 48.2
# 2 Forest 89.1 51.8
# GZ istogramma delle percentuali di copertura prima della deforestazione
grafico1<-ggplot(output,aes(x=cover,y=before,color=cover)) +
geom_bar(stat="identity",fill="white")
grafico1 # GZ in ascissa "aes"(aree di foresta/aperte), in ordinata percentuale di copertura
# GZ esercizio: istogramma dopo deforestazione
grafico2<-ggplot(output,aes(x=cover,y=after,color=cover)) +
geom_bar(stat="identity",fill="white")
grafico2
# GZ esercizio: usare "grid.arrange" (funzione del pacchetto "gridExtra" che permette confronto tra istogrammi) per creare un plot con grafico1 e grafico2
# GZ grid.arrange=> grid.arrange(plot1,plot2,nrow=1), questa funzione crea un plot con più grafici
grid.arrange(grafico1,grafico2,nrow=1)
# GZ evidente cambiamento nelle percentuali di copertura
# GZ per facilitare confronto uniformare sacla dei due istogrammi (imporre al grafico il limite y=100)
grafico1<-ggplot(output,aes(x=cover,y=before,color=cover)) +
geom_bar(stat="identity",fill="white") +
ylim(0,100)
grafico2 <- ggplot(output, aes(x=cover, y=after, color=cover)) +
geom_bar(stat="identity", fill="white") +
ylim(0, 100)
grid.arrange(grafico1,grafico2,nrow=1)
#############################################################################
#############################################################################
#############################################################################
### 6. 06_R_code_multitemp_NO2.r - Codice per analisi dati ESA su NO2 (gennaio-marzo 2020 => lockdown)
# GZ setwd e pacchetti
setwd("C:/lab")
library(raster)
# GZ importare immagini -> "raster" perchè immagine con una sola banda (con più bande si usa "brick")
EN01<-raster("EN_0001.png")
plot(EN01)
# GZ eserizio: importare tutte le immagini
EN02<-raster("EN_0002.png")
EN03<-raster("EN_0003.png")
EN04<-raster("EN_0004.png")
EN05<-raster("EN_0005.png")
EN06<-raster("EN_0006.png")
EN07<-raster("EN_0007.png")
EN08<-raster("EN_0008.png")
EN09<-raster("EN_0009.png")
EN10<-raster("EN_0010.png")
EN11<-raster("EN_0011.png")
EN12<-raster("EN_0012.png")
EN13<-raster("EN_0013.png")
# GZ plot multiframe immagine inizale-finale => confronto
cl<-colorRampPalette(c('red','orange',yellow'))(100)
par(mfrow=c(1,2)
plot(EN01,col=cl)
plot(EN13,col=cl)
dev.off()
# GZ differenza "EN13" - "EN01"
difno2<-EN13-EN01
cldif<-colorRampPalette(c('blue','black','yellow'))(100)
plot(difno2,col=cldif)
# GZ esercizio: plot multiframe tutte le immagini
par(mfrow=c(4,4))
plot(EN01,col=cl)
plot(EN02,col=cl)
plot(EN03,col=cl)
plot(EN04,col=cl)
plot(EN05,col=cl)
plot(EN06,col=cl)
plot(EN07,col=cl)
plot(EN08,col=cl)
plot(EN09,col=cl)
plot(EN10,col=cl)
plot(EN11,col=cl)
plot(EN12,col=cl)
plot(EN13,col=cl)
# GZ alternativa: plot(EN01,EN02,EN03,EN04,EN05,EN06,EN07,EN08,EN09,EN10,EN11,EN12,EN13,col=cl)
# Day2
# setwd, pacchetti e load
setwd("C:/lab")
load("EN.RData")
ls()
library(raster)
# GZ "list.files" (pacchetto "raster") -> vettore comprendente lista di file in una data directory (cartella creata appositamente - "esa_no2")
setwd("C:/lab/esa_no2")
rlist<-list.files(pattern=".png")
rlist
# GZ "lapply" -> applica funzione indicata ad una lista (anzichè ad un solo file)
# GZ in questo caso "raster" -> importare lista di immagini
listafinale<-lapply(rlist,raster)
listafinale
# GZ "stack" -> trasformare lista in una sorta di agglomerato di n bande (13 in questi caso), come fosse un set multitemporale
EN<-stack(listafinale)
cl<-colorRampPalette(c('red','orange',yellow'))(100)
plot(EN,col=cl) # GZ -> visualizzare immagini contenute nello stack "EN"
# GZ differenza marzo ("EN13") - gennaio ("EN01")
difEN<-EN$EN_0013-EN$EN_0001
cld<-colorRampPalette(c('blue','white','red'))(100)
plot(difEN,col=cld)
# GZ boxplot EN -> confronto tra tutte le immagini creando diagramma a riquadri (indicando caratteristiche grafiche)
boxplot(EN,horizontal=T, # barre boxplot orizzontali
outline=F, # elimina outliners
axes=T) # presenza assi nel plot
# in media cambiamenti non clamorosi, cambiamenti più evidenti sui massimi
#############################################################################
#############################################################################
#############################################################################
### 7. 07_R_code_snow.r - Codice analisi copertura nevosa
# GZ setwd e pacchetti
setwd("C:/lab")
install.packages("ncdf4") # pacchetto per fornire interfaccia R per file di dati binari
library(ncdf4)
library(raster)
# GZ importare immagine scaricata da Copernicus (copertura nevosa 18/05/2020)
snowmay<-raster("c_gls_SCE500_202005180000_CEURO_MODIS_V1.0.1.nc")
# GZ plot "snowmay"
cl<-colorRampPalette(c('darkblue','blue','light blue'))(100)
plot(snowmay,col=cl)
# GZ settare nuova wd (cartella "snow" => immagini copertura nevosa in diversi momenti)
setwd("C:/lab/snow")
# GZ importare file -> "rlist"
library(raster)
rlist<-list.files(pattern=".tif",full.names=T)
# GZ "lapply" lista appena creata (ogni file "rlist" importato con"raster")
list_rast<-lapply(rlist,raster)
# GZ raggruppare raster in unico vettore -> "stack" (consente di plottarle semplicemente tutte assieme)
snow.multitemp<-stack(list_rast)
# GZ plottare (usare palette creata prima)
plot(snow.multitemp,col=cl)
# GZ multiframe (confronto) 2000 ("snow2000r") - 2020 ("snow2020r")
par(mfrow=c(1,2))
plot(snow.multitemp$snow2000r,col=cl)
plot(snow.multitemp$snow2020r,col=cl)
# GZ limite ordinate uguale per entrambe le mappe => confronto più facile
par(mfrow=c(1,2))
plot(snow.multitemp$snow2000r,col=cl,zlim=c(0,250))
plot(snow.multitemp$snow2020r,col=cl,zlim=c(0,250))
dev.off()
# GZ differenza 2000-2020
difsnow<-snow.multitemp$snow2020r - snow.multitemp$snow2000r
cldif<-colorRampPalette(c('blue','white','red'))(100) # GZ nuova palette
plot(difsnow,col=cldif) # GZ pixel blu => diminuzione copertura, bianchi => stato stazionario, rossi => aumento
# GZ "source" -> caricare codice da file esterni
source("prediction.r")
# GZ comando "lento" => caricare direttamente "predicted.snow.2025"
# GZ previsione 2025
predicted.snow.2025.norm<-raster("predicted.snow.2025.norm.tif")
plot(predicted.snow.2025.norm,col=cl)
#############################################################################
#############################################################################
#############################################################################
### 8. 08_R_code_patches.r
# GZ setwd e pacchetti
setwd("C:/lab")
install.packages("igraph")
library(igraph)
library(ggplot2)
library(raster)
# GZ caricare immagini raster -> "raster"
d1c<-raster("d1c.tif")
d2c<-raster("d2c.tif")
# GZ plot per distinguere aree di foresta (palette bicolore)
cl<-colorRampPalette(c('green','black'))(100)
par(mfrow=c(1,2))
plot(d1c,col=cl)
plot(d2c,col=cl)
cl<-colorRampPalette(c('black','green'))(100) # correzione mappa => inversione colori
par(mfrow=c(1,2))
plot(d1c,col=cl)
plot(d2c,col=cl)
dev.off()
# GZ valori 2 => foresta, 1 => aree agricole
# GZ lasciare solo pixel aree forestali -> "reclassify" per riclassificare valori, "cbind" per trasformare valori 1 (agricoltura) in valori nulli ("NA" => valore mancante)
d1c.for<-reclassify(d1c,cbind(1,NA))
d2c.for<-reclassify(d2c,cbind(1,NA))
# GZ multiframe di confronto (solo foreste, foreste+agricoltura)
par(mfrow=c(1,2))
cl<-colorRampPalette(c('black','green'))(100)
plot(d1c,col=cl)
plot(d1c.for,col=cl)
# GZ plot mappe solo foresta
par(mfrow=c(1,2))
plot(d1c)
plot(d2c)
# GZ creare patches ("igraph")
library(igraph)
d1c.for.patches<-clump(d1c.for) # "clump"-> unire e raggruppare pixel vicini (creare patches)
d2c.for.patches<-clump(d2c.for)
# GZ "writerRaster" -> esportare il file in formato ".tif" all'esterno di R (in questo caso cartella "lab))
writeRaster(d1c.for.patches,"d1c.for.patches.tif")
writeRaster(d2c.for.patches,"d2c.for.patches.tif")
# GZ esercizio: plottare mappe una accanto all'altra
par(mfrow=c(1,2))
clp<-colorRampPalette(c('darkblue','blue','green','orange','yellow','red'))(100) # GZ palette con più colori per visualizzare meglio patch di foresta
plot(d1c.for.patches,col=clp)
plot(d2c.for.patches,col=clp)
# GZ numero patches creati nelle mappe
d1c.for.patches # GZ => 301 patches
d2c.for.patches # GZ => 1212 patches
# GZ risultati in nuovo dataframe
time<-c("Before deforestation","After deforestation") # GZ "time" -> dati prima e dopo deforestazione
npatches<-c(301,1212) # GZ "npatches" -> numero patches
# GZ creare dataframe "output"
output<-data.frame(time,npatches)
attach(output)
# GZ plot finale ("ggplot")
library(ggplot2)
ggplot(output,aes(x=time,y=npatches,color="red"))+geom_bar(stat="identity",fill="white")
#############################################################################
#############################################################################
#############################################################################
### 9. 09_R_code_crop.r
# GZ setwd (dati snow già usati => cartella "snow")
setwd("C:/lab/snow")
# GZ esercizio: caricare tutte le immagini della cartella
library(raster)
rlist<-list.files(pattern="snow") # GZ "pattern" -> permettere a R riconoscimento dei file
list.rast<-lapply(rlist, raster)
list.rast
# GZ stack
snow.multitemp<-stack(list.rast)
# GZ plot
clb<-colorRampPalette(c('dark blue','blue','light blue'))(100)
plot(snow.multitemp,col=clb)
# GZ analisi immagini multitemporali
snow.multitemp
plot(snow.multitemp$snow2010r, col=clb) # GZ plot immagine 2010 (Italia tra 6 e 20 gradi e tra 35 e 50)
# GZ zoom su Italia -> "zoom", prima impostare nuova estensione ("extension")
extension<-c(6,20,35,50)
zoom(snow.multitemp$snow2010r,ext=extension)
zoom(snow.multitemp$snow2010r,ext=extension,col=clb) # GZ plot 2010, zoom Italia, palette "clb"
# GZ definire estensione tramite disegno ("drawExtent")
plot(snow.multitemp$snow2010r, col=clb) # GZ riplottare immagine originale
zoom(snow.multitemp$snow2010r,ext=drawExtent())
extension<-c(6,20,35,50)
snow2010r.italy<-crop(snow.multitemp$snow2010r,extension) # GZ "crop" -> ottenere immagine zona ritagliata
plot(snow2010r.italy,col=clb) # GZ plot immagine ottenuta
# GZ esercizio: crop Italia con stack completo
extension<-c(6,20,35,50)
snow.multitemp.Italy<-crop(snow.multitemp,extension)
plot(snow.multitemp.Italy,col=clb)
# GZ impostare legenda uniforme
snow.multitemp.Italy
# GZ min->20, MAX->200
# GZ aggiungere limite => "zlim=c(20,200)"
plot(snow.multitemp.Italy,col=clb,zlim=c(20,200))
# GZ boxplot => valore MAX copertura nevosa diminuisce nel tempo
boxplot(snow.multitemp.Italy,horizontal=T,outline=F)
#############################################################################
#############################################################################
#############################################################################
### 10. 10_R_code_species_distribution_modeling.r - Species Distribution Modeling
# GZ pacchetti (no setwd perchè dati presenti nel pacchetto "sdm")
install.packages("sdm")
library(sdm)
library(raster)
library(rgdal)
# GZ "system.file" -> caricare file da utilizzare contenuto in "sdm"
file<-system.file("external/species.shp",package="sdm")
# GZ "shapefile" (pacchetto "raster")
species<-shapefile(file)
# GZ caratteristiche dataset
species
species$Occurrence # GZ valori "Occurrence" # ogni punto associato a presenza assenza specie => "Occurrence" = 0(assente) o 1(presente)
# GZ plot dataset "species"
plot(species) # GZ mostrate presenze e assenze
# GZ diversificare assenze (rosso) da presenze (blu)
plot(species[species$Occurrence==1,],col='blue',pch=16)
points(species[species$Occurrence==0,],col='red',pch=16)
# GZ variabili ambientali disponibili (cartella "external", pacchetto "sdm")
path <- system.file("external",package="sdm")
# GZ importare file per prevedere distribuzione spaziale in base a variabili ambientali
lst<-list.files(path=path,pattern='asc$',full.names=T)
lst # GZ variabili: elevation, precipitation, temperature, vegetation
preds<-stack(lst) # GZ stack => predittore distribuzione
cl<-colorRampPalette(c('yellow','orange','red'))(100) # GZ palette
plot(preds,col=cl) # GZ distribuzione probabilmente relazionata a valori variabili
# GZ plot elevation
plot(preds$elevation,col=cl)
points(species[species$Occurrence==1,],pch=16) # GZ aggiungere punti presenza => specie presente a bassa quota
# GZ temperature
plot(preds$temperature, col=cl)
points(species[species$Occurrence==1,],pch=16) # GZ => specie non gradisce basse temperature
# GZ precipitation
plot(preds$precipitation, col=cl)
points(species[species$Occurrence==1,],pch=16) # GZ => condizioni medie sono ottimali
# GZ vegetation
plot(preds$vegetation, col=cl)
points(species[species$Occurrence==1,],pch=16) # GZ => elevata copertura vegetale è favorevole
# GZ sintesi: bassa quota, temperatura medio-alta, piovosità media, buona copertura vegetale
# GZ Generalized Linear Model (glm)
d<-sdmData(train=species,predictors=preds) # GZ indicare a R dati relativi a specie e variabili da considerare
d
# GZ modello
m1<-sdm(Occurrence~elevation+precipitation+temperature+vegetation,data=d,methods='glm')
# GZ previsione (creare mappa predittiva distribuzione in base alle quattro variabili) -> "predict"
p1<-predict(m1,newdata=preds)
plot(p1,col=cl)
points(species[species$Occurrence== 1,],pch=16)
#############################################################################
#############################################################################
#############################################################################
### 11_R_code_project.r
# GZ setwd e pacchetti
setwd("C:/lab/exam")
library(ncdf4)
library(raster)
# GZ importare dati albedo e raggrupparli
examlist<-list.files(pattern=".nc",full.names=T)
list_rast<-lapply(examlist,raster)
alb.multitemp<-stack(list_rast)
# GZ rinominare file per praticità
alb.jan2000<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.1
alb.jan2010<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.2
alb.jan2020<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.3
alb.jul2000<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.4
alb.jul2010<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.5
alb.jul2020<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.6
# GZ plot
cl<-colorRampPalette(c('red','orange','yellow'))(100)
plot(alb.multitemp,col=cl,zlim=c(0,1)) # GZ omesso, con sei grafici troppo complicato, la mia palette fa schifo
# GZ confronto jan
par(mfrow=c(1,3))
plot(alb.jan2000,zlim=c(0,1))
plot(alb.jan2010,zlim=c(0,1))
plot(alb.jan2020,zlim=c(0,1))
# GZ confronto jul
par(mfrow=c(1,3))
plot(alb.jul2000,zlim=c(0,1))
plot(alb.jul2010,zlim=c(0,1))
plot(alb.jul2020,zlim=c(0,1))
# GZ crop arco alpino, inverno
extension<-c(0,20,42,50)
alb.alps.2000.jan<-crop(alb.jan2000,extension)
alb.alps.2010.jan<-crop(alb.jan2010,extension)
alb.alps.2020.jan<-crop(alb.jan2020,extension)
par(mfrow=c(1,3))
plot(alb.alps.2000.jan,zlim=c(0,1))
plot(alb.alps.2010.jan,zlim=c(0,1))
plot(alb.alps.2020.jan,zlim=c(0,1))
# GZ crop arco apino estivo
alb.alps.2000.jul<-crop(alb.jul2000,extension)
alb.alps.2010.jul<-crop(alb.jul2010,extension)
alb.alps.2020.jul<-crop(alb.jul2020,extension)
par(mfrow=c(1,3))
plot(alb.alps.2000.jul,zlim=c(0,1))
plot(alb.alps.2010.jul,zlim=c(0,1))
plot(alb.alps.2020.jul,zlim=c(0,1))
# GZ differenze nell'albedo inverno-estate negli anni
dif1<-albjan.2000-albjul.2000
dif2<-albjan.2020-albjul.2020
cldif<-colorRampPalette(c('blue','white','red'))(100)
par(mfrow=c(1,2))
plot(dif1,col=cldif)
plot(dif2,col=cldif)
# mi spettavo differenze più evidenti, forse lettura errata
# GZ confronto con copertura nevosa => attesi pattern in linea
# GZ importare raster copertura nevosa
snowlist<-list.files(pattern="snow",full.names=T)
list_snow<-lapply(snowlist,raster)
snow.multitemp<-stack(list_snow)
# GZ confronto (uso mesi invernali per rendere confronto più visibile)
clsnow<-colorRampPalette(c('darkblue','blue','light blue'))(100)
par(mfrow=c(2,2))
plot(snow.multitemp$snow2000r,col=clsnow,zlim=c(0,250))
plot(snow.multitemp$snow2020r,col=clsnow,zlim=c(0,250))
plot(alb.jan2000,zlim=c(0,1))
plot(alb.jan2020,zlim=c(0,1))
# GZ crop arco alpino (albedo e snow)
snow.alps.2000<-crop(snow.multitemp$snow2000r,extension)
snow.alps.2020<-crop(snow.multitemp$snow2020r,extension)
par(mfrow=c(2,2))
plot(alb.alps.2000.jan,zlim=c(0,1))
plot(alb.alps.2020.jan,zlim=c(0,1))
plot(snow.alps.2000,col=clsnow,zlim=c(0,250))
plot(snow.alps.2020,col=clsnow,zlim=c(0,250))
# GZ confronto tra differenze 2000-2020 albedo e snow
dif3<-alb.jan2000-alb.jan2020
difsnow<-snow.multitemp$snow2000r-snow.multitemp$snow2020r
par(mfrow=c(2,2))
plot(dif3,col=cldif)
plot(difsnow,col=cldif)
# GZ crop Alpi differenze
alb.dif.alps<-crop(dif3,extension)
snow.dif.alps<-crop(difsnow,extension)
par(mfrow=c(1,2))
plot(alb.dif.alps,col=cldif)
plot(snow.dif.alps,col=cldif)
# GZ differenza molto più evidente a livello di copertura nevosa (ovvio, per albedo basta anche uno strato di neve sottile)
|
/R_code_exam.r
|
no_license
|
GiovanniZanfei/Ecologia_del_Paesaggio
|
R
| false | false | 43,455 |
r
|
# R_code_exam.r
# Copernicus data: https://land.copernicus.vgt.vito.be/PDF/portal/Application.html
# 1. 01_R_code_first.r
# 2. 02_R_code_spatial.r
# 3. 03_R_code_point_patterns.r
# 4. 04_R_code_TeleRil.r
# 5. 05_R_code_multitemp.r
# 6. 06_R_code_multitemp_NO2.r
# 7. 07_R_code_snow.r
# 8. 08_R_code_patches.r
# 9. 09_R_code_crop.r
# 10. 10_R_code_species_distribution_modeling.r
# 11. 11_R_code_examproject.r
#############################################################################
#############################################################################
#############################################################################
### 1. 01_R_code_first.r - Primo codice R Ecologia del Paesaggio
# GZ pacchetti: "install.packages()" -> scaricare pacchetti (poi richiamabili con comando "library()" [o "require()])
install.packages("sp")
library(sp)
# GZ dataset e funzioni associate
data("meuse") # GZ richiamo dataset "meuse" (dati su presenza metalli pesanti nel terreno), inserito nella libreria "sp"
meuse # GZ visualizzare dati
head(meuse) # GZ prime 6 righe del dataset
names(meuse) # GZ nomi variabili (colonne del dataset)
summary(meuse) # GZ riporta statistiche di base per le variabili del dataset
# GZ grafici: "pairs()" per creare grafici a coppie tra variabili di un dataset
pairs(meuse) # GZ grafici a coppie tra tutte le variabili
pairs(~cadmium + copper + lead, data = meuse) # GZ grafici a coppie tra le variabili indicate
# GZ esercizio: pairs() quattro variabili [cadmium, copper, lead, zinc]
pairs(~cadmium+copper+lead+zinc,data=meuse)
# GZ [,x:y] per selezionare subset composto da righe selezionate (3, 4, 5, 6 -> cadmium, copper, lead, zinc)
pairs(meuse[,3:6])
# GZ visualizzazione: scelgo colori["col="], simboli["pch="] e dimensioni["cex="] => per simboli "pch=n" con 1<n<25 (ad ogni numero un diverso simbolo)
pairs(meuse[,3:6],col="blue",pch=18,cex=3)
# GZ "main=" per dare titolo al grafico
pairs(meuse[,3:6],col="blue",pch=18,cex=3,main="Primo pairs")
# GZ prendere funzioni esterne => "panel.correlations" indica coefficiente di correlazione tra variabili
panel.correlations<-function(x,y,digits=1,prefix="",cex.cor)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r1=cor(x,y,use="pairwise.complete.obs")
r <- abs(cor(x, y,use="pairwise.complete.obs"))
txt <- format(c(r1, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.9/strwidth(txt)
text(0.5, 0.5, txt, cex = cex * r)
}
# GZ "panel.smoothing" -> fa una specie di regressione tra variabili
panel.smoothing <- function (x, y, col = par("col"), bg = NA, pch = par("pch"),
cex = 1, col.smooth = "red", span = 2/3, iter = 3, ...)
{
points(x, y, pch = pch, col = col, bg = bg, cex = cex)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
lines(stats::lowess(x[ok], y[ok], f = span, iter = iter),
col = 1, ...)
}
# GZ "panel.histograms" -> crea istogramma di una variabile
panel.histograms <- function(x, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col="white", ...)
}
# GZ uso funzioni precedentemente create per costruire grafici a coppie fra le quattro variabili selezionate, in cui vengono mostrati anche coefficienti di correlazione tra le variabili
# GZ lower.panel -> parte sopra la diagonale
# GZ upper.panel -> parte sotto la diagonale
# GZ diag.panel -> diagonale
pairs(meuse[,3:6],lower.panel=panel.correlations,upper.panel=panel.smoothing,diag.panel=panel.histograms)
# GZ esercizio: invertire posto rispetto alla diagonale di correlazione e interpolazione
pairs(meuse[,3:6],lower.panel=panel.smoothing,upper.panel=panel.correlations,diag.panel=panel.histograms)
#############################################################################
#############################################################################
#############################################################################
### 2. 02_R_code_spatial.r - Funzioni sapziali in Ecologia del Paesaggio [24/03/2020]
# GZ caricare pacchetti e dati
library(sp)
data(meuse)
head(meuse)
# GZ fissare dataframe -> attach()
attach(meuse)
# GZ plot cadmium e lead segliendo colori["col"], caratteri["pch"] e dimensioni["cex"]
plot(cadmium,lead,col="red",pch=19,cex=1)
# GZ esercizio: plot copper e zinc con carattere triangolo(17) e colore verde
plot(copper,zinc,col="green",pch=17)
# GZ cambiare etichette relative ad assi del grafico => "xlab","ylab"
plot(copper,zinc,col="green",pch=17,xlab="rame",ylab="zinco")
# GZ multiframe o multipanel => "par(mfrow=c(numero righe,numero colonne))"; a capo i plot che si vogliono mettere in una sola finestra
par(mfrow=c(1,2)) # GZ "par(mfrow)" -> funzione per gestire aspetto dei grafici (creare diagramma a più riquadri); (1,2) indica una riga e due colonne
plot(cadmium,lead,col="red",pch=19,cex=1)
plot(copper,zinc,col="green",pch=17,xlab="rame",ylab="zinco")
# GZ invertire grafici riga/colonna [(2,1) anzichè (1,2)]
par(mfrow=c(2,1))
plot(cadmium,lead,col="red",pch=19,cex=1)
plot(copper,zinc,col="green",pch=17,xlab="rame",ylab="zinco")
# GZ multiframe automatico -> pacchetto "GGally"
install.packages("GGally")
library(GGally)
ggpairs(meuse[,3:6]) # GZ "ggpairs" crea matrice di grafici con un determinato set di dati (in questo caso dalla terza alla sesta colonna del dataset "meuse")
# GZ Spatial; "coordinates()" per indicare che i dati hanno coordinate (in meuse x e y => facendo ~x+y)
head(meuse)
gpairs
coordinates(meuse)=x+y
plot(meuse)
# GZ "spplot()" -> distribuzione spaziale di una variabile (in questo caso "zinc")
spplot(meuse,"zinc")
# Spatial-2 [25/03/2020]
# GZ installare pacchetto "sp", caricare dati "meuse" e fissare dataset ["attach()"]
install.packages("sp")
library(sp)
data(meuse)
attach(meuse)
# GZ specificare coordinate del dataset => "coordinates(dataset)=~(coordinata,coordinata)"
coordinates(meuse)=~x+y
# GZ "spplot" dati zinco
spplot(meuse,"zinc")
# GZ esercizio: "spplot" dati rame
spplot(meuse,"copper")
# GZ "bubble(dataset,"variabile")" => rappresentazione spaziale come "spplot", crea un grafico a bolle di grandezza proporzionale a valore variabile
bubble(meuse,"zinc")
# GZ esercizio: bubble rame, colore rosso
bubble(meuse,"copper",col="red")
# GZ esempio: foraminiferi, carbon capture
# GZ creare vettore che contenga dati di campionamento dei foraminiferi chiamandolo "foram" ["<-" per dare nome al vettore c]
foram<-c(10,20,35,55,67,80)
# GZ "carbon" per carbon stock
carbon<-c(5,15,30,70,85,99)
# GZ plot con questi vettori
plot(foram,carbon,col="green",pch=19)
# GZ prendere dati dall'esterno (dati "covid19agg.csv")
# GZ settare cartella di lavoro -> wd("percorso") [in questo caso dico C, cartella lab]
setwd("C:/lab")
# GZ leggere tabella e usarla per costuire un dataframe; head=T per indicare a R che ci sono titoli delle colonne (prima riga è una stringa di testo)
Covid19<-read.table("covid_agg.csv",head=T) # GZ intitolare tabella "Covid19"
#############################################################################
#############################################################################
#############################################################################
### 03_R_code_point_patterns.r - Analisi point patterns [25/03/2020]
# GZ installare e richiamare pacchetti ("ggplot2", "spatstat")
intall.packages("ggplot2")
library(ggplot2)
install.packages("spatstat)
library(spatstat)
# GZ set working directory
setwd("C:/lab")
# GZ importare tabella dati Covid19; "head=T" per indicare a R che ci sono i titoli delle colonne; dare alla tabella il nome Covid19
Covid19<-read.table("covid_agg.csv",head=T)
head(Covid19) # comando per vedere tabella
# GZ creare plot che associa Paesi e casi di Covid19 (anzichè "$" si può fare "attach(Covid19) plot(country,cases)")
plot(Covid19$country,Covid19$cases)
# GZ modificare struttura plot -> posizione etichette rispetto ad asse ("las=0" etichette parallele, 1 orizzontali, 2 perpendicolari, 3 verticali)
plot(Covid19$country,Covid19$cases,las=0)
plot(Covid19$country,Covid19$cases,las=1)
plot(Covid19$country,Covid19$cases,las=2)
plot(Covid19$country,Covid19$cases,las=3)
plot(Covid19$country,Covid19$cases,las=3,cex.axis=0.5) # GZ "cex.axis" -> rimpicciolire dimensione etichette
# GZ richiamare "ggplot2" (pacchetto per estetica e cura dei dettagli)
library(ggplot2)
# GZ dataframe "mpg" da pacchetto "ggplot2" ("mpg" raccoglie osservazioni US Environmental Protection Agency su 38 modelli di auto)
data(mpg)
head(mpg)
# GZ esempio di plot con 2 variabili numeriche -> ggplot2 ha bisogno di 3 cose: dati ("mpg"), estetica del grafico ("aes", funzione di quotazione) e geometria ("geom_")
ggplot(mpg,aes(x=displ,y=hwy))+geom_point()
ggplot(mpg,aes(x=displ,y=hwy))+geom_line() # GZ linee anzichè punti nella visualizzazione
ggplot(mpg,aes(x=displ,y=hwy))+geom_polygon() # GZ poligoni
# GZ "ggplot2" per dati Covid19 -> usare longitudine e latitudine per avere i punti nello spazio, "size=cases" -> punti più grandi dove ci sono più casi
ggplot(Covid19,aes(x=lon,y=lat,size=cases))+geom_point()
# GZ richiamare pacchetto "spatstat" (mostra analisi dei modelli dei punti spaziali) e fissare dataframe
library(spatstat)
attach(covid)
# GZ esercizio: zona con più alta densità casi di Covid19
# GZ creare dataset per spatstat -> "ppp" crea un oggetto che rappresenta un insieme di dati del pattern puntiforme nel piano bidimensionale
covids<-ppp(lon,lat,c(-180,180),c(-90,90)) # GZ necessario specificare cosa indicano x e y ("lon","lat") e definirne il range
d<-density(covids) # GZ comando per calcolare densità dei casi
plot(d) # GZ plot (rappresentazione grafica) densità
points(covids,pch=19) # GZ mostare i punti Covid19 sulla mappa di densità
# point patterns-2 [01/04/20]
# GZ settare wd, caricare file salvato, richiamare "spatstat" e mostrare grafico densità casi Covid
setwd("C:/lab")
load("point_pattern.RData")
ls() # per vedere contenuto del file caricato
library(spatstat)
plot(d)
# GZ palette -> modificare colori del plot d; (100) per dire a R quante sfumature deve avere la scala di colori
cl<-colorRampPalette(c('yellow','orange','red')) (100)
plot(d,col=cl) # GZ plot densità con nuovi colori
# esercizio: plot densità dal verde al blu
bluverde<-colorRampPalette(c('blue','grey','green')) (200)
plot(d,col=bluverde)
# GZ mostare punti Covid19 sulla mappa di densità
points(covids)
# GZ inserire nella mappa confini degli stati
install.packages("rgdal") # GZ "rgdal" -> pacchetto necessario per usare il comando "readOGR"
library(rgdal)
coastlines<-readOGR("ne_10m_coastline.shp") # GZ "readOGR" -> funzione che legge origine dati OGR e un layer in un oggetto vettoriale spaziale adatto, serve per creare layer dei confini
plot(coastlines,add=T) # GZ "add=T" per aggiungere confini al vecchio plot senza eliminarlo -> grafico completo
# GZ esercizio: plot della mappa di densità con nuova colorazione e aggiunta coastlines
clr<-colorRampPalette(c('light blue','blue','pink','purple')) (400)
plot(d,col=clr)
plot(coastlines,add=T)
setwd("C:/lab")
load("C:/lab/point_ppattern.RData")
library(spatstat)
ls()
library(rgdal)
# GZ esercizio: plot mappa di densità con nuova palette
coastlines<-readOGR("ne_10m_coastline.shp")
clr<-colorRampPalette(c('light blue','blue','pink','purple')) (400)
plot(d,col=clr,main="density")
plot(coastlines,add=T)
points(covids)
### GZ INTERPOLATION
covid
marks(covids)<-covid$cases # "marks" -> associare dati categoria "cases" al pointpattern "covids"
s<-Smooth(covids) # "Smooth" -> creare mappa con i dati appena costruiti
plot(s) # plot mappa appena creata
# GZ esercizio: plot(s) con coastlines e punti
cls<-colorRampPalette(c('light blue','blue','green'))(100)
plot(s,col=cls,main="Cases")
points(covids)
plot(coastlines,add=T)
# GZ mappa finale (multiframe con entrambi i plot fatti)
par(mfrow=c(2,1))
# GZ primo plot: densità
clr<-colorRampPalette(c('light blue','blue','pink','purple')) (400)
plot(d,col=clr,main="density")
plot(coastlines,add=T)
points(covids)
# GZ secondo plot: interpolazione numero di casi
cls<-colorRampPalette(c('light blue','blue','green'))(100)
plot(s,col=cls,main="Cases")
points(covids)
plot(coastlines,add=T)
# San Marino (lavorare con set di dati di una tesi su San Marino scaricati in "lab" )
setwd("C:/lab")
load("C:/lab/Tesi.RData")
ls()
head(Tesi)
library(spatstat)
attach(Tesi)
# GZ Point pattern: x,y,c(xmin,xmax),c(ymin,ymax)
summary(Tesi) # sommario del dataset, posso trovare rapidamente le info principali
# GZ "summary" -> longitudine: 12.42<x<12.46 e latitudine: 43.91<y<43.94
Tesippp<-ppp(Longitude,Latitude,c(12.41,12.47),c(43.90,43.95))
# GZ Mappa densità
dT<-density(Tesippp)
dev.off
plot(dT)
points(Tesippp,col="green")
# GZ set wd e richiamo pacchetti
setwd("C:/lab")
load("C:/lab/Tesi.RData")
library(spatstat)
library(rgdal)
# GZ dt=density map, Tesi=dataset, Tesippp=point pattern (coordinate longitudine e latitudine)
head(Tesi)
# GZ associare al point pattern il valore d'interesse (ricchezza di specie) e poi procedere con l'interpolazione
marks(Tesippp)<-Tesi$Species_richness
interpol<-Smooth(Tesippp)
plot(interpol) # GZ mappa
points(Tesippp)
# GZ caricare il file vettoriale "San_Marino" e sovrapponiamo la mappa costruita prima (così da avere i confini)
sanmarino<-readOGR("San_Marino.shp")
plot(sanmarino)
plot(interpol,add=T) # GZ "add=T" per indicare che mappa di interpolazione sovrapposta a mappa di San Marino
points(Tesippp)
plot(sanmarino,add=T) # GZ -> vedere nuovamente confini
# GZ esercizio: plot multiframe densità e interpolazione (due righe, una colonna)
par(mfrow=c(2,1))
plot(dT,main="Density of points")
points(Tesippp)
plot(interpol,main="Estimate of species richness")
points(Tesippp)
# GZ esercizio: come prima ma due colonne e una riga
par(mfrow=c(1,2))
plot(dT,main="Density of points")
points(Tesippp)
plot(interpol,main="Estimate of species richness")
points(Tesippp)
#############################################################################
#############################################################################
#############################################################################
### 4. 04_R_code_TeleRil.r - codice R per analisi satellitari (telerilevamento)
# GZ set wd e pacchetti ("raster","RStoolbox")
setwd("C:/lab")
install.packages("raster") # "raster" per lettura, scrittura, analisi e modellizzazione di dati spaziali
library(raster)
install.packages("RStoolbox") # "RStoolbox" per analisi dati mediante telerilevamento
# GZ funzione "brick" per importare immagine selezionata e creare ogetto "RasterBrick" (multistrato)
p224r63_2011<-brick("p224r63_2011_masked.grd")
# GZ plot oggetto appena creato
plot(p224r63_2011) # 7 riquadri che mostrano un'immagine basata su riflettanza a varie lunghezze d'onda, come indicato sotto
# B1: blue, B2: green, B3: red, B4: near infrared (nir), B5: medium infrared, B6: thermal infrared, B7: medium infrared
# GZ RampPalette ("cl") per avere immagini con scala di colori da bianco a nero una volta rifatto il comando plot con specifica del colore
cl<-colorRampPalette(c('black','grey','light grey'))(100)
plot(p224r63_2011,col=cl)
# GZ modifica scala cromatica (da 100 a 5 sfumature)
cllow<-colorRampPalette(c('black','grey','light grey'))(5)
plot(p224r63_2011,col=cllow)
# GZ plot banda blu (B1)
names(p224r63_2011) # GZ "names" -> visionare nomi oggetto
# [1] "B1_sre" "B2_sre" "B3_sre" "B4_sre" "B5_sre" "B6_bt" "B7_sre"
clb<-colorRampPalette(c('dark blue','blue','light blue'))(100) # GZ palette blu
plot(p224r63_2011$B1_sre,col=clb)
# GZ esercizio: plottare banda infrarosso vicino palette rosso-arancione-giallo
clnir<-colorRampPalette(c('red','orange','yellow'))(100)
plot(p224r63_2011$B4_sre,col=clnir)
# GZ plot multiframe, quattro bande
par(mfrow=c(2,2))
# blue
clb<-colorRampPalette(c('dark blue','blue','light blue'))(100)
plot(p224r63_2011$B1_sre,col=clb)
# green
clg<-colorRampPalette(c('dark green','green','light green'))(100)
plot(p224r63_2011$B2_sre,col=clg)
# red
clr<-colorRampPalette(c('dark red','red','pink'))(100)
plot(p224r63_2011$B3_sre,col=clr)
# nir
clnir<-colorRampPalette(c('red','orange','yellow'))(100)
plot(p224r63_2011$B4_sre,col=clnir)
dev.off()
# GZ natural colours
# 3 componenti: R G B
# 3 bande: R = banda rosso, G = banda verde, B = banda blu
# B1: blue - 1
# B2: green - 2
# B3: red - 3
# B4: near infrared (nir) - 4
# GZ "plotRGB" -> creare plot rosso-verde-blu su tre livelli (tre strati combinati per rappresentare bande rosso, verde e blu)
plotRGB(p224r63_2011, r=3, g=2, b=1, stretch="Lin") # GZ stretch="Lin" per migliorare visibilità immagine
# GZ nir => aggiunta banda infrarosso per rendere immagine più leggibile (necessario togliere una delle altre tre, in questo caso blu)
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
# GZ permette di visualizzare vegetazione
# GZ salvataggio immagine appena ottenuta
pdf("primografico.pdf")
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
dev.off()
# GZ multiframe bande diverse
par(mfrow=c(2,1))
plotRGB(p224r63_2011, r=3, g=2, b=1, stretch="Lin")
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
dev.off()
# GZ esercizio: nir nella compnente R(Red)
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
# GZ esercizio: nir nella componente G(Green)
plotRGB(p224r63_2011, r=3, g=4, b=2, stretch="Lin")
# GZ esercizo: nir nella componente B(Blue)
plotRGB(p224r63_2011, r=3, g=2, b=4, stretch="Lin")
### LANDCOVER
# GZ setwd e pacchetti
setwd("C:/lab/p224r63")
library(raster)
# GZ "brick" per importare immagine
p224r63_2011<-brick("p224r63_2011_masked.grd")
# GZ richiamare "RStoolbox"
library(RStoolbox)
# GZ plottare immagine in RGB
plotRGB(p224r63_2011,r=4,g=3,b=2,stretch="Lin")
# GZ classificazione dati raster con "unsuperClass", specificando numero di classi
p224r63_2011c<-unsuperClass(p224r63_2011,nClasses = 4)
# GZ visualizzare nuovo modello contenente anche mappa
p224r63_2011c
# GZ plot mappa (quattro colori -> quattro classi specificate)
plot(p224r63_2011c$map)
# GZ nuova palette (migliore visualizzazione del grafico)
clclass <- colorRampPalette(c('green',"red","blue","black"))(100)
plot(p224r63_2011c$map,col=clclass)
# Day2
# GZ setwd e pacchetti
library(raster)
setwd("C:/lab")
load("TeleRil.RData")
ls()
# GZ importare file 1988 e 2011 ("brick")
p224r63_2011<-brick("p224r63_2011_masked.grd")
p224r63_1988<-brick("p224r63_1988_masked.grd")
# GZ immagine 1988, come 2011 ha sette bande (colori):
# B1: blue - 1
# B2: green - 2
# B3: red - 3
# B4: near infrared (nir) - 4
# B4: near infrared (nir)
# B5: medium infrared
# B6: thermal infrared
# B7: medium infrared
# GZ plot oggetto 1988 e visualizzare campi
plot(p224r63_1988)
names(p224r63_1988)
# GZ plot multiframe per banda blu (1), verde (2), rosso (3) e nir (4)
par(mfrow=c(2,2))
clb<-colorRampPalette(c("dark blue","blue","light blue"))(100) # blue
plot(p224r63_1988$B1_sre,col=clb)
clg<-colorRampPalette(c("dark green","green","light green"))(100) # green
plot(p224r63_1988$B2_sre,col=clg)
clr<-colorRampPalette(c("red","orange","yellow"))(100) # red
plot(p224r63_1988$B3_sre,col=clr)
clnir<-colorRampPalette(c("purple","pink","light pink"))(100) # nir
plot(p224r63_1988$B4_sre,col=clnir)
dev.off()
# GZ immagine con colori visibili (plotRGB "natural colours")
plotRGB(p224r63_1988, r=3, g=2, b=1, stretch="Lin")
# GZ grafico poco comprensibile => usare infrarosso (plotRGB "false colours"
# GZ esercizio: plotRGB con componenete infrarossa
plotRGB(p224r63_1988,r=4,g=3,b=2,stretch="Lin")
# GZ richiamare immagine 2011
p224r63_2011
# GZ plot per confronto immagini 1988 e 2011
par(mfrow=c(2,1))
plotRGB(p224r63_1988,r=4,g=2,b=1,stretch="Lin")
plotRGB(p224r63_2011,r=4,g=2,b=1,stretch="Lin")
# GZ => territorio agricolo è molto più sviluppato nel 2011
# GZ nir indica la presenza di vegetazione, zolle di terra sono bianche o celeste
dev.off()
# GZ spectral indices (DVI) => verificare stato salute vegetazione (foglie sane riflettono infrarosso)
# GZ DVI=nir-red -> es: dvi1988=nir1988-red1988 => risultati diversi in base a salute piante (sane=nir alto)
# GZ DVI 1988
dvi1988<-p224r63_1988$B4_sre-p224r63_1988$B3_sre
plot(dvi1988)
# GZ esercizio: DVI 2011
dvi2011<-p224r63_2011$B4_sre-p224r63_2011$B3_sre
plot(dvi2011)
# GZ cambio palette
cldvi<-colorRampPalette(c('light blue','light green','green'))(100)
plot(dvi2011,col=cldvi)
# GZ analisi multitemporale (differenza 2011-1988) => differenza tra DVI dei 2 anni mostra cambiamento stato vegetazione
difdvi<-dvi2011-dvi1988
plot(difdvi)
cldifdvi<-colorRampPalette(c('red','white','blue'))(100)
plot(difdvi,col=cldifdvi)
dev.off()
# GZ visualize the output
# GZ multiframe 1988RGB, 2011RGB, difdvi
par(mfrow=c(3,1))
plotRGB(p224r63_1988,r=4,g=3,b=2,stretch="Lin")
plotRGB(p224r63_2011,r=4,g=3,b=2,stretch="Lin")
plot(difdvi,col=cldifdvi)
dev.off()
# GZ "aggregate" -> modificare risoluzione (grana) immagine creando nuovo RasterLayer con risoluzione più bassa quindi celle più grandi ("fact=n" è un moltiplicatore che ci dà dei pixel n volte più grandi dei precedenti)
p224r63_2011lr<-aggregate(p224r63_2011,fact=10) # lr=lowresolution
# GZ inserire i due oggetti per vedere caratteristiche dei pixel
p224r63_2011
p224r63_2011lr
# GZ plot multiframe confronto tra le due risoluzioni
par(mfrow=c(2,1))
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
plotRGB(p224r63_2011lr, r=4, g=3, b=2, stretch="Lin")
# GZ lower resolution ("fact=50")
p224r63_2011lr50<-aggregate(p224r63_2011,fact=50)
p224r63_2011lr50
# GZ plot multiframe comparativo (normale, lr, lr50)
par(mfrow=c(3,1))
plotRGB(p224r63_2011, r=4, g=3, b=2, stretch="Lin")
plotRGB(p224r63_2011lr, r=4, g=3, b=2, stretch="Lin")
plotRGB(p224r63_2011lr50, r=4, g=3, b=2, stretch="Lin")
# GZ DVI lr50 2011
dvi2011lr50<-p224r63_2011lr50$B4_sre-p224r63_2011lr50$B3_sre
plot(dvi2011lr50)
# GZ DVI lr50 1988
p224r63_1988lr50<-aggregate(p224r63_1988,fact=50) # GZ creare lr50 1988
dvi1988lr50<-p224r63_1988lr50$B4_sre-p224r63_1988lr50$B3_sre
plot(dvi1988lr50)
# GZ difdvi lr50
difdvilr50<-dvi2011lr50-dvi1988lr50
plot(difdvilr50,col=cldifdvi) # GZ riprendere palette "cldifdvi" creata in precedenza
# GZ multiframe differenze DVI alle diverse risoluzioni
par(mfrow=c(2,1))
plot(difdvi,col=cldifdvi)
plot(difdvilr50,col=cldifdvi)
#############################################################################
#############################################################################
#############################################################################
### 5. 05_R_code_multitemp.r - Analisi multitemporale variazione landcover
# GZ setwd e pacchetti
setwd("C:/lab")
library(raster)
library(RStoolbox)
library(ggplot2)
# GZ importare immagini
defor1 <-brick("defor1_.png")
defor2 <-brick("defor2_.png")
# GZ plotRGB "defor1"
defor1 # GZ visualizzare i campi dell'oggetto
# "defor1_.1" "defor1_.2" "defor1_.3"
# defor1_.1 = NIR
# defor1_.2 = red
# defor1_.3 = green
plotRGB(defor1,r=1,g=2,b=3,stretch="Lin") # GZ banda red->nir(r=1), green->red(g=2), blue->green(b=3)
# GZ eserczio: plot seconda data
plotRGB(defor2,r=1,g=2,b=3,stretch="Lin")
# GZ confronto (multiframe) stessa area in momenti differenti (prima e dopo deforestazione)
par(mfrow = c(2,1))
plotRGB(defor1,r=1,g=2,b=3,stretch="Lin")
plotRGB(defor2,r=1,g=2,b=3,stretch="Lin")
dev.off()
# GZ classificazione non supervisionata
d1c<-unsuperClass(defor1,nClasses=2) # GZ si creano classi di foresta e non foresta (suddivisione pixel in queste due categorie)
plot(d1c$map)
cl<-colorRampPalette(c('green','black'))(100)
plot(d1c$map,col=cl)
# GZ esercizio: come prima per "defor2"
d2c<-unsuperClass(defor2,nClasses=2)
plot(d2c$map,col=cl)
dev.off()
# GZ confronto tra i due momenti (multiframe) con pixel classificati
# GZ due righe, una colonna
par(mfrow=c(2,1))
plot(d1c$map,col=cl)
plot(d2c$map,col=cl)
# GZ due colonne, una riga
par(mfrow=c(1,2))
plot(d1c$map,col=cl)
plot(d2c$map,col=cl)
dev.off()
# GZ calcolo frequenza delle due classi di pixel nella prima immagine
freq(d1c$map)
# GZ aree aperte=37039
# GZ foresta=304253
# GZ numero di pixel totali nella prima immagine (necessario per calcolo percentuale)
totd1<-37039+304253
totd1
# totd1=341292
# GZ calcolo frequenze percentuali
percent1<-freq(d1c$map)*100/totd1
# GZ foreste: 89.1 %
# GZ aree aperte: 10.9 %
# GZ stesso procedimento per la seconda immagine
freq(d2c$map)
# GZ aree aperte=165055
# GZ foreste=177671
totd2<-165055+177671
totd2
# GZ totd2=342726
percent2<-freq(d2c$map)*100/totd2
# GZ aree aperte: 48.2 %
# GZ foreste: 51.8 %
# GZ creare vettori per analisi grafica
cover<-c("Agriculture","Forest")
before<-c(10.9,89.1)
after<-c(48.2,51.8)
# GZ creare nuovo dataset con i dati ottenuti
output<-data.frame(cover,before,after)
output
# Day2
setwd("C:/lab")
load("C:/lab/defor.RData")
library(raster)
library(ggplot2)
install.packages("gridExtra")
library(gridExtra)
# GZ riprendere mappe
par(mfrow=c(1,2))
cl<-colorRampPalette(c('black','green'))(100)
plot(d1c$map,col=cl)
plot(d2c$map,col=cl)
ls()
output
# cover before after
# 1 Agriculture 10.9 48.2
# 2 Forest 89.1 51.8
# GZ istogramma delle percentuali di copertura prima della deforestazione
grafico1<-ggplot(output,aes(x=cover,y=before,color=cover)) +
geom_bar(stat="identity",fill="white")
grafico1 # GZ in ascissa "aes"(aree di foresta/aperte), in ordinata percentuale di copertura
# GZ esercizio: istogramma dopo deforestazione
grafico2<-ggplot(output,aes(x=cover,y=after,color=cover)) +
geom_bar(stat="identity",fill="white")
grafico2
# GZ esercizio: usare "grid.arrange" (funzione del pacchetto "gridExtra" che permette confronto tra istogrammi) per creare un plot con grafico1 e grafico2
# GZ grid.arrange=> grid.arrange(plot1,plot2,nrow=1), questa funzione crea un plot con più grafici
grid.arrange(grafico1,grafico2,nrow=1)
# GZ evidente cambiamento nelle percentuali di copertura
# GZ per facilitare confronto uniformare sacla dei due istogrammi (imporre al grafico il limite y=100)
grafico1<-ggplot(output,aes(x=cover,y=before,color=cover)) +
geom_bar(stat="identity",fill="white") +
ylim(0,100)
grafico2 <- ggplot(output, aes(x=cover, y=after, color=cover)) +
geom_bar(stat="identity", fill="white") +
ylim(0, 100)
grid.arrange(grafico1,grafico2,nrow=1)
#############################################################################
#############################################################################
#############################################################################
### 6. 06_R_code_multitemp_NO2.r - Codice per analisi dati ESA su NO2 (gennaio-marzo 2020 => lockdown)
# GZ setwd e pacchetti
setwd("C:/lab")
library(raster)
# GZ importare immagini -> "raster" perchè immagine con una sola banda (con più bande si usa "brick")
EN01<-raster("EN_0001.png")
plot(EN01)
# GZ eserizio: importare tutte le immagini
EN02<-raster("EN_0002.png")
EN03<-raster("EN_0003.png")
EN04<-raster("EN_0004.png")
EN05<-raster("EN_0005.png")
EN06<-raster("EN_0006.png")
EN07<-raster("EN_0007.png")
EN08<-raster("EN_0008.png")
EN09<-raster("EN_0009.png")
EN10<-raster("EN_0010.png")
EN11<-raster("EN_0011.png")
EN12<-raster("EN_0012.png")
EN13<-raster("EN_0013.png")
# GZ plot multiframe immagine inizale-finale => confronto
cl<-colorRampPalette(c('red','orange',yellow'))(100)
par(mfrow=c(1,2)
plot(EN01,col=cl)
plot(EN13,col=cl)
dev.off()
# GZ differenza "EN13" - "EN01"
difno2<-EN13-EN01
cldif<-colorRampPalette(c('blue','black','yellow'))(100)
plot(difno2,col=cldif)
# GZ esercizio: plot multiframe tutte le immagini
par(mfrow=c(4,4))
plot(EN01,col=cl)
plot(EN02,col=cl)
plot(EN03,col=cl)
plot(EN04,col=cl)
plot(EN05,col=cl)
plot(EN06,col=cl)
plot(EN07,col=cl)
plot(EN08,col=cl)
plot(EN09,col=cl)
plot(EN10,col=cl)
plot(EN11,col=cl)
plot(EN12,col=cl)
plot(EN13,col=cl)
# GZ alternativa: plot(EN01,EN02,EN03,EN04,EN05,EN06,EN07,EN08,EN09,EN10,EN11,EN12,EN13,col=cl)
# Day2
# setwd, pacchetti e load
setwd("C:/lab")
load("EN.RData")
ls()
library(raster)
# GZ "list.files" (pacchetto "raster") -> vettore comprendente lista di file in una data directory (cartella creata appositamente - "esa_no2")
setwd("C:/lab/esa_no2")
rlist<-list.files(pattern=".png")
rlist
# GZ "lapply" -> applica funzione indicata ad una lista (anzichè ad un solo file)
# GZ in questo caso "raster" -> importare lista di immagini
listafinale<-lapply(rlist,raster)
listafinale
# GZ "stack" -> trasformare lista in una sorta di agglomerato di n bande (13 in questi caso), come fosse un set multitemporale
EN<-stack(listafinale)
cl<-colorRampPalette(c('red','orange',yellow'))(100)
plot(EN,col=cl) # GZ -> visualizzare immagini contenute nello stack "EN"
# GZ differenza marzo ("EN13") - gennaio ("EN01")
difEN<-EN$EN_0013-EN$EN_0001
cld<-colorRampPalette(c('blue','white','red'))(100)
plot(difEN,col=cld)
# GZ boxplot EN -> confronto tra tutte le immagini creando diagramma a riquadri (indicando caratteristiche grafiche)
boxplot(EN,horizontal=T, # barre boxplot orizzontali
outline=F, # elimina outliners
axes=T) # presenza assi nel plot
# in media cambiamenti non clamorosi, cambiamenti più evidenti sui massimi
#############################################################################
#############################################################################
#############################################################################
### 7. 07_R_code_snow.r - Codice analisi copertura nevosa
# GZ setwd e pacchetti
setwd("C:/lab")
install.packages("ncdf4") # pacchetto per fornire interfaccia R per file di dati binari
library(ncdf4)
library(raster)
# GZ importare immagine scaricata da Copernicus (copertura nevosa 18/05/2020)
snowmay<-raster("c_gls_SCE500_202005180000_CEURO_MODIS_V1.0.1.nc")
# GZ plot "snowmay"
cl<-colorRampPalette(c('darkblue','blue','light blue'))(100)
plot(snowmay,col=cl)
# GZ settare nuova wd (cartella "snow" => immagini copertura nevosa in diversi momenti)
setwd("C:/lab/snow")
# GZ importare file -> "rlist"
library(raster)
rlist<-list.files(pattern=".tif",full.names=T)
# GZ "lapply" lista appena creata (ogni file "rlist" importato con"raster")
list_rast<-lapply(rlist,raster)
# GZ raggruppare raster in unico vettore -> "stack" (consente di plottarle semplicemente tutte assieme)
snow.multitemp<-stack(list_rast)
# GZ plottare (usare palette creata prima)
plot(snow.multitemp,col=cl)
# GZ multiframe (confronto) 2000 ("snow2000r") - 2020 ("snow2020r")
par(mfrow=c(1,2))
plot(snow.multitemp$snow2000r,col=cl)
plot(snow.multitemp$snow2020r,col=cl)
# GZ limite ordinate uguale per entrambe le mappe => confronto più facile
par(mfrow=c(1,2))
plot(snow.multitemp$snow2000r,col=cl,zlim=c(0,250))
plot(snow.multitemp$snow2020r,col=cl,zlim=c(0,250))
dev.off()
# GZ differenza 2000-2020
difsnow<-snow.multitemp$snow2020r - snow.multitemp$snow2000r
cldif<-colorRampPalette(c('blue','white','red'))(100) # GZ nuova palette
plot(difsnow,col=cldif) # GZ pixel blu => diminuzione copertura, bianchi => stato stazionario, rossi => aumento
# GZ "source" -> caricare codice da file esterni
source("prediction.r")
# GZ comando "lento" => caricare direttamente "predicted.snow.2025"
# GZ previsione 2025
predicted.snow.2025.norm<-raster("predicted.snow.2025.norm.tif")
plot(predicted.snow.2025.norm,col=cl)
#############################################################################
#############################################################################
#############################################################################
### 8. 08_R_code_patches.r
# GZ setwd e pacchetti
setwd("C:/lab")
install.packages("igraph")
library(igraph)
library(ggplot2)
library(raster)
# GZ caricare immagini raster -> "raster"
d1c<-raster("d1c.tif")
d2c<-raster("d2c.tif")
# GZ plot per distinguere aree di foresta (palette bicolore)
cl<-colorRampPalette(c('green','black'))(100)
par(mfrow=c(1,2))
plot(d1c,col=cl)
plot(d2c,col=cl)
cl<-colorRampPalette(c('black','green'))(100) # correzione mappa => inversione colori
par(mfrow=c(1,2))
plot(d1c,col=cl)
plot(d2c,col=cl)
dev.off()
# GZ valori 2 => foresta, 1 => aree agricole
# GZ lasciare solo pixel aree forestali -> "reclassify" per riclassificare valori, "cbind" per trasformare valori 1 (agricoltura) in valori nulli ("NA" => valore mancante)
d1c.for<-reclassify(d1c,cbind(1,NA))
d2c.for<-reclassify(d2c,cbind(1,NA))
# GZ multiframe di confronto (solo foreste, foreste+agricoltura)
par(mfrow=c(1,2))
cl<-colorRampPalette(c('black','green'))(100)
plot(d1c,col=cl)
plot(d1c.for,col=cl)
# GZ plot mappe solo foresta
par(mfrow=c(1,2))
plot(d1c)
plot(d2c)
# GZ creare patches ("igraph")
library(igraph)
d1c.for.patches<-clump(d1c.for) # "clump"-> unire e raggruppare pixel vicini (creare patches)
d2c.for.patches<-clump(d2c.for)
# GZ "writerRaster" -> esportare il file in formato ".tif" all'esterno di R (in questo caso cartella "lab))
writeRaster(d1c.for.patches,"d1c.for.patches.tif")
writeRaster(d2c.for.patches,"d2c.for.patches.tif")
# GZ esercizio: plottare mappe una accanto all'altra
par(mfrow=c(1,2))
clp<-colorRampPalette(c('darkblue','blue','green','orange','yellow','red'))(100) # GZ palette con più colori per visualizzare meglio patch di foresta
plot(d1c.for.patches,col=clp)
plot(d2c.for.patches,col=clp)
# GZ numero patches creati nelle mappe
d1c.for.patches # GZ => 301 patches
d2c.for.patches # GZ => 1212 patches
# GZ risultati in nuovo dataframe
time<-c("Before deforestation","After deforestation") # GZ "time" -> dati prima e dopo deforestazione
npatches<-c(301,1212) # GZ "npatches" -> numero patches
# GZ creare dataframe "output"
output<-data.frame(time,npatches)
attach(output)
# GZ plot finale ("ggplot")
library(ggplot2)
ggplot(output,aes(x=time,y=npatches,color="red"))+geom_bar(stat="identity",fill="white")
#############################################################################
#############################################################################
#############################################################################
### 9. 09_R_code_crop.r
# GZ setwd (dati snow già usati => cartella "snow")
setwd("C:/lab/snow")
# GZ esercizio: caricare tutte le immagini della cartella
library(raster)
rlist<-list.files(pattern="snow") # GZ "pattern" -> permettere a R riconoscimento dei file
list.rast<-lapply(rlist, raster)
list.rast
# GZ stack
snow.multitemp<-stack(list.rast)
# GZ plot
clb<-colorRampPalette(c('dark blue','blue','light blue'))(100)
plot(snow.multitemp,col=clb)
# GZ analisi immagini multitemporali
snow.multitemp
plot(snow.multitemp$snow2010r, col=clb) # GZ plot immagine 2010 (Italia tra 6 e 20 gradi e tra 35 e 50)
# GZ zoom su Italia -> "zoom", prima impostare nuova estensione ("extension")
extension<-c(6,20,35,50)
zoom(snow.multitemp$snow2010r,ext=extension)
zoom(snow.multitemp$snow2010r,ext=extension,col=clb) # GZ plot 2010, zoom Italia, palette "clb"
# GZ definire estensione tramite disegno ("drawExtent")
plot(snow.multitemp$snow2010r, col=clb) # GZ riplottare immagine originale
zoom(snow.multitemp$snow2010r,ext=drawExtent())
extension<-c(6,20,35,50)
snow2010r.italy<-crop(snow.multitemp$snow2010r,extension) # GZ "crop" -> ottenere immagine zona ritagliata
plot(snow2010r.italy,col=clb) # GZ plot immagine ottenuta
# GZ esercizio: crop Italia con stack completo
extension<-c(6,20,35,50)
snow.multitemp.Italy<-crop(snow.multitemp,extension)
plot(snow.multitemp.Italy,col=clb)
# GZ impostare legenda uniforme
snow.multitemp.Italy
# GZ min->20, MAX->200
# GZ aggiungere limite => "zlim=c(20,200)"
plot(snow.multitemp.Italy,col=clb,zlim=c(20,200))
# GZ boxplot => valore MAX copertura nevosa diminuisce nel tempo
boxplot(snow.multitemp.Italy,horizontal=T,outline=F)
#############################################################################
#############################################################################
#############################################################################
### 10. 10_R_code_species_distribution_modeling.r - Species Distribution Modeling
# GZ pacchetti (no setwd perchè dati presenti nel pacchetto "sdm")
install.packages("sdm")
library(sdm)
library(raster)
library(rgdal)
# GZ "system.file" -> caricare file da utilizzare contenuto in "sdm"
file<-system.file("external/species.shp",package="sdm")
# GZ "shapefile" (pacchetto "raster")
species<-shapefile(file)
# GZ caratteristiche dataset
species
species$Occurrence # GZ valori "Occurrence" # ogni punto associato a presenza assenza specie => "Occurrence" = 0(assente) o 1(presente)
# GZ plot dataset "species"
plot(species) # GZ mostrate presenze e assenze
# GZ diversificare assenze (rosso) da presenze (blu)
plot(species[species$Occurrence==1,],col='blue',pch=16)
points(species[species$Occurrence==0,],col='red',pch=16)
# GZ variabili ambientali disponibili (cartella "external", pacchetto "sdm")
path <- system.file("external",package="sdm")
# GZ importare file per prevedere distribuzione spaziale in base a variabili ambientali
lst<-list.files(path=path,pattern='asc$',full.names=T)
lst # GZ variabili: elevation, precipitation, temperature, vegetation
preds<-stack(lst) # GZ stack => predittore distribuzione
cl<-colorRampPalette(c('yellow','orange','red'))(100) # GZ palette
plot(preds,col=cl) # GZ distribuzione probabilmente relazionata a valori variabili
# GZ plot elevation
plot(preds$elevation,col=cl)
points(species[species$Occurrence==1,],pch=16) # GZ aggiungere punti presenza => specie presente a bassa quota
# GZ temperature
plot(preds$temperature, col=cl)
points(species[species$Occurrence==1,],pch=16) # GZ => specie non gradisce basse temperature
# GZ precipitation
plot(preds$precipitation, col=cl)
points(species[species$Occurrence==1,],pch=16) # GZ => condizioni medie sono ottimali
# GZ vegetation
plot(preds$vegetation, col=cl)
points(species[species$Occurrence==1,],pch=16) # GZ => elevata copertura vegetale è favorevole
# GZ sintesi: bassa quota, temperatura medio-alta, piovosità media, buona copertura vegetale
# GZ Generalized Linear Model (glm)
d<-sdmData(train=species,predictors=preds) # GZ indicare a R dati relativi a specie e variabili da considerare
d
# GZ modello
m1<-sdm(Occurrence~elevation+precipitation+temperature+vegetation,data=d,methods='glm')
# GZ previsione (creare mappa predittiva distribuzione in base alle quattro variabili) -> "predict"
p1<-predict(m1,newdata=preds)
plot(p1,col=cl)
points(species[species$Occurrence== 1,],pch=16)
#############################################################################
#############################################################################
#############################################################################
### 11_R_code_project.r
# GZ setwd e pacchetti
setwd("C:/lab/exam")
library(ncdf4)
library(raster)
# GZ importare dati albedo e raggrupparli
examlist<-list.files(pattern=".nc",full.names=T)
list_rast<-lapply(examlist,raster)
alb.multitemp<-stack(list_rast)
# GZ rinominare file per praticità
alb.jan2000<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.1
alb.jan2010<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.2
alb.jan2020<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.3
alb.jul2000<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.4
alb.jul2010<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.5
alb.jul2020<-alb.multitemp$Broadband.directional.albedo.over.total.spectrum.6
# GZ plot
cl<-colorRampPalette(c('red','orange','yellow'))(100)
plot(alb.multitemp,col=cl,zlim=c(0,1)) # GZ omesso, con sei grafici troppo complicato, la mia palette fa schifo
# GZ confronto jan
par(mfrow=c(1,3))
plot(alb.jan2000,zlim=c(0,1))
plot(alb.jan2010,zlim=c(0,1))
plot(alb.jan2020,zlim=c(0,1))
# GZ confronto jul
par(mfrow=c(1,3))
plot(alb.jul2000,zlim=c(0,1))
plot(alb.jul2010,zlim=c(0,1))
plot(alb.jul2020,zlim=c(0,1))
# GZ crop arco alpino, inverno
extension<-c(0,20,42,50)
alb.alps.2000.jan<-crop(alb.jan2000,extension)
alb.alps.2010.jan<-crop(alb.jan2010,extension)
alb.alps.2020.jan<-crop(alb.jan2020,extension)
par(mfrow=c(1,3))
plot(alb.alps.2000.jan,zlim=c(0,1))
plot(alb.alps.2010.jan,zlim=c(0,1))
plot(alb.alps.2020.jan,zlim=c(0,1))
# GZ crop arco apino estivo
alb.alps.2000.jul<-crop(alb.jul2000,extension)
alb.alps.2010.jul<-crop(alb.jul2010,extension)
alb.alps.2020.jul<-crop(alb.jul2020,extension)
par(mfrow=c(1,3))
plot(alb.alps.2000.jul,zlim=c(0,1))
plot(alb.alps.2010.jul,zlim=c(0,1))
plot(alb.alps.2020.jul,zlim=c(0,1))
# GZ differenze nell'albedo inverno-estate negli anni
dif1<-albjan.2000-albjul.2000
dif2<-albjan.2020-albjul.2020
cldif<-colorRampPalette(c('blue','white','red'))(100)
par(mfrow=c(1,2))
plot(dif1,col=cldif)
plot(dif2,col=cldif)
# mi spettavo differenze più evidenti, forse lettura errata
# GZ confronto con copertura nevosa => attesi pattern in linea
# GZ importare raster copertura nevosa
snowlist<-list.files(pattern="snow",full.names=T)
list_snow<-lapply(snowlist,raster)
snow.multitemp<-stack(list_snow)
# GZ confronto (uso mesi invernali per rendere confronto più visibile)
clsnow<-colorRampPalette(c('darkblue','blue','light blue'))(100)
par(mfrow=c(2,2))
plot(snow.multitemp$snow2000r,col=clsnow,zlim=c(0,250))
plot(snow.multitemp$snow2020r,col=clsnow,zlim=c(0,250))
plot(alb.jan2000,zlim=c(0,1))
plot(alb.jan2020,zlim=c(0,1))
# GZ crop arco alpino (albedo e snow)
snow.alps.2000<-crop(snow.multitemp$snow2000r,extension)
snow.alps.2020<-crop(snow.multitemp$snow2020r,extension)
par(mfrow=c(2,2))
plot(alb.alps.2000.jan,zlim=c(0,1))
plot(alb.alps.2020.jan,zlim=c(0,1))
plot(snow.alps.2000,col=clsnow,zlim=c(0,250))
plot(snow.alps.2020,col=clsnow,zlim=c(0,250))
# GZ confronto tra differenze 2000-2020 albedo e snow
dif3<-alb.jan2000-alb.jan2020
difsnow<-snow.multitemp$snow2000r-snow.multitemp$snow2020r
par(mfrow=c(2,2))
plot(dif3,col=cldif)
plot(difsnow,col=cldif)
# GZ crop Alpi differenze
alb.dif.alps<-crop(dif3,extension)
snow.dif.alps<-crop(difsnow,extension)
par(mfrow=c(1,2))
plot(alb.dif.alps,col=cldif)
plot(snow.dif.alps,col=cldif)
# GZ differenza molto più evidente a livello di copertura nevosa (ovvio, per albedo basta anche uno strato di neve sottile)
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.590269601728e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615833644-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 2,046 |
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.590269601728e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
library(httr)
# Auth0 custom rule streams relevant user info to Keen.io after successful sign-in. Use a saved Keen.io query
# to retrieve the latest streamed event to identify the user within Shiny. It's lame having to use these third-party
# platforms, but as of now, unable to authenticate in open-source shiny-server AND read authentication credentials into
# Shiny. This is still a janky solution (ie. what if 2 users sign-in at the same time?). Would rather use the
# request IP to help identify the right user info in Keen.io but this is also not exposed in Shiny.
# Awaiting resolution of https://github.com/rstudio/shiny/issues/141fu
keenUser <- function() {
Sys.sleep(5) # Allow time for KeenIO to register data-stream from Auth0
keen_req <- GET(
url = paste("https://api.keen.io/3.0/projects", Sys.getenv("DIAGNOSTIC_KEEN_PROJECT_ID"), "queries/saved",
tolower(Sys.getenv("DIAGNOSTIC_KEEN_QUERY")), "result", sep = "/"),
add_headers("Authorization" = Sys.getenv("DIAGNOSTIC_KEEN_READKEY"), "Content-Type" = "application/json")
)
print(keen_req$all_headers)
res <- content(keen_req)[["result"]]
if ( length(res) < 1) {
res$name <- sQuote("Unknown")
res$group <- "guest"
res$id <- "00GUEST"
} else if ( length(res) > 1) {
timestamp <- strptime( sapply(res, function(x) x$keen$timestamp), "%Y-%m-%dT%H:%M:%OSZ")
res <- res[[ which(timestamp == min(timestamp)) ]]
} else {
res <- res[[1]]
}
return(res)
}
capitalize <- function(x) paste0(toupper(substring(x, 0, 1)), substring(x, 2, nchar(x)))
ENTITY_FIELDS <- c("symptoms", "conditions")
EDIT_FIELDS <- c("add", "delete", "change")
|
/global.R
|
no_license
|
Chrisss93/Diagnostics
|
R
| false | false | 1,628 |
r
|
library(httr)
# Auth0 custom rule streams relevant user info to Keen.io after successful sign-in. Use a saved Keen.io query
# to retrieve the latest streamed event to identify the user within Shiny. It's lame having to use these third-party
# platforms, but as of now, unable to authenticate in open-source shiny-server AND read authentication credentials into
# Shiny. This is still a janky solution (ie. what if 2 users sign-in at the same time?). Would rather use the
# request IP to help identify the right user info in Keen.io but this is also not exposed in Shiny.
# Awaiting resolution of https://github.com/rstudio/shiny/issues/141fu
keenUser <- function() {
Sys.sleep(5) # Allow time for KeenIO to register data-stream from Auth0
keen_req <- GET(
url = paste("https://api.keen.io/3.0/projects", Sys.getenv("DIAGNOSTIC_KEEN_PROJECT_ID"), "queries/saved",
tolower(Sys.getenv("DIAGNOSTIC_KEEN_QUERY")), "result", sep = "/"),
add_headers("Authorization" = Sys.getenv("DIAGNOSTIC_KEEN_READKEY"), "Content-Type" = "application/json")
)
print(keen_req$all_headers)
res <- content(keen_req)[["result"]]
if ( length(res) < 1) {
res$name <- sQuote("Unknown")
res$group <- "guest"
res$id <- "00GUEST"
} else if ( length(res) > 1) {
timestamp <- strptime( sapply(res, function(x) x$keen$timestamp), "%Y-%m-%dT%H:%M:%OSZ")
res <- res[[ which(timestamp == min(timestamp)) ]]
} else {
res <- res[[1]]
}
return(res)
}
capitalize <- function(x) paste0(toupper(substring(x, 0, 1)), substring(x, 2, nchar(x)))
ENTITY_FIELDS <- c("symptoms", "conditions")
EDIT_FIELDS <- c("add", "delete", "change")
|
#' Create Related Numeric Columns
#'
#' Generate coumns that are related.
#'
#' @param x A starting column.
#' @param j The number of columns to produce.
#' @param name An optional prefix name to give to the columns. If \code{NULL}
#' attepts to take from the \code{varname} attribute of \code{x}. If not found,
#' "Variable" is used.
#' @param operation A operation character vector of length 1; either
#' \code{c("+", "-", "*", "/")}. This is the relationship between columns.
#' @param mean Mean is the average vaule to add, subtract, multiple, or divide
#' by.
#' @param sd The amount of variability to allow in \code{mean}. Setting to 0
#' will constrain the operation between x_(n - 1) column and x_n to be exactly
#' the mean value (see \bold{Examples} for a demonstration).
#' @param rep.sep A separator to use for repeated variable names. For example
#' if the \code{\link[wakefield]{age}} is used three times
#' (\code{r_data_frame(age, age, age)}), the name "Age" will be assigned to all
#' three columns. The resuts in column names \code{c("Age_1", "Age_2", "Age_3")}.
#' @param digits The number of digits to round to. Defaults to the max number
#' of significant digits in \code{x}.
#' @return Returns a \code{\link[dplyr]{tbl_df}}.
#' @keywords correlate related
#' @export
#' @seealso \code{\link[wakefield]{r_series}}
#' @examples
#' relate(1:10, 10)
#'
#' (x <- r_data_frame(10, id, relate(1:10, 10, "Time", mean = 2)))
#' library(ggplot2)
#'
#' dat <- with(x, data.frame(ID = rep(ID, ncol(x[, -1])), stack(x[, -1])))
#' dat[["Time"]] <- factor(sub("Time_", "", dat[["ind"]]), levels = 1:10)
#' ggplot(dat, aes(x = Time, y = values, color = ID, group = ID)) +
#' geom_line(size=.8)
#'
#' relate(1:10, 10, name = "X", operation = "-")
#' relate(1:10, 10, "X", mean = 1, sd = 0)
#' relate(1:10, 10, "Var", "*")
#' relate(1:10, 10, "Var", "/")
#'
#' relate(gpa(30), 5, mean = .1)
#' relate(likert(10), 5, mean = .1, sd = .2)
#' relate(date_stamp(10), 6)
#' relate(time_stamp(10), 6)
#' relate(rep(100, 10), 6, "Reaction", "-")
relate <- function(x, j, name = NULL, operation = "+", mean = 5, sd = 1,
rep.sep = "_", digits = max(nchar(sub("^[^.]*.", "", x)))){
if (is.null(name)) name <- attributes(x)[["varname"]]
if (is.null(name)) name <- "Variable"
if (is.factor(x) | inherits(x, c("Date", "POSIXct", "POSIXt"))) x <- as.numeric(x)
if (!is.numeric(x)) warning("`x` is not numeric, date, or factor.", immediate. = TRUE)
elems <- (j - 1) * length(x)
seed_dat <- data.frame(x, matrix(rnorm(elems, mean = mean, sd = sd),
nrow = length(x)), stringsAsFactors = FALSE)
if (!is.null(digits)) seed_dat <- round(seed_dat, digits)
for (i in 2:ncol(seed_dat)) {
seed_dat[, i] <- match.fun(operation)(seed_dat[, i - 1], seed_dat[, i])
}
out <- setNames(seed_dat, paste(name, seq_len(j), sep = rep.sep))
seriesname(dplyr::tbl_df(out), name)
}
|
/R/relate.R
|
no_license
|
ds4ci/wakefield
|
R
| false | false | 2,931 |
r
|
#' Create Related Numeric Columns
#'
#' Generate coumns that are related.
#'
#' @param x A starting column.
#' @param j The number of columns to produce.
#' @param name An optional prefix name to give to the columns. If \code{NULL}
#' attepts to take from the \code{varname} attribute of \code{x}. If not found,
#' "Variable" is used.
#' @param operation A operation character vector of length 1; either
#' \code{c("+", "-", "*", "/")}. This is the relationship between columns.
#' @param mean Mean is the average vaule to add, subtract, multiple, or divide
#' by.
#' @param sd The amount of variability to allow in \code{mean}. Setting to 0
#' will constrain the operation between x_(n - 1) column and x_n to be exactly
#' the mean value (see \bold{Examples} for a demonstration).
#' @param rep.sep A separator to use for repeated variable names. For example
#' if the \code{\link[wakefield]{age}} is used three times
#' (\code{r_data_frame(age, age, age)}), the name "Age" will be assigned to all
#' three columns. The resuts in column names \code{c("Age_1", "Age_2", "Age_3")}.
#' @param digits The number of digits to round to. Defaults to the max number
#' of significant digits in \code{x}.
#' @return Returns a \code{\link[dplyr]{tbl_df}}.
#' @keywords correlate related
#' @export
#' @seealso \code{\link[wakefield]{r_series}}
#' @examples
#' relate(1:10, 10)
#'
#' (x <- r_data_frame(10, id, relate(1:10, 10, "Time", mean = 2)))
#' library(ggplot2)
#'
#' dat <- with(x, data.frame(ID = rep(ID, ncol(x[, -1])), stack(x[, -1])))
#' dat[["Time"]] <- factor(sub("Time_", "", dat[["ind"]]), levels = 1:10)
#' ggplot(dat, aes(x = Time, y = values, color = ID, group = ID)) +
#' geom_line(size=.8)
#'
#' relate(1:10, 10, name = "X", operation = "-")
#' relate(1:10, 10, "X", mean = 1, sd = 0)
#' relate(1:10, 10, "Var", "*")
#' relate(1:10, 10, "Var", "/")
#'
#' relate(gpa(30), 5, mean = .1)
#' relate(likert(10), 5, mean = .1, sd = .2)
#' relate(date_stamp(10), 6)
#' relate(time_stamp(10), 6)
#' relate(rep(100, 10), 6, "Reaction", "-")
relate <- function(x, j, name = NULL, operation = "+", mean = 5, sd = 1,
rep.sep = "_", digits = max(nchar(sub("^[^.]*.", "", x)))){
if (is.null(name)) name <- attributes(x)[["varname"]]
if (is.null(name)) name <- "Variable"
if (is.factor(x) | inherits(x, c("Date", "POSIXct", "POSIXt"))) x <- as.numeric(x)
if (!is.numeric(x)) warning("`x` is not numeric, date, or factor.", immediate. = TRUE)
elems <- (j - 1) * length(x)
seed_dat <- data.frame(x, matrix(rnorm(elems, mean = mean, sd = sd),
nrow = length(x)), stringsAsFactors = FALSE)
if (!is.null(digits)) seed_dat <- round(seed_dat, digits)
for (i in 2:ncol(seed_dat)) {
seed_dat[, i] <- match.fun(operation)(seed_dat[, i - 1], seed_dat[, i])
}
out <- setNames(seed_dat, paste(name, seq_len(j), sep = rep.sep))
seriesname(dplyr::tbl_df(out), name)
}
|
#' Power calculations.
#'
#' This function simply counts the proportion of people who selected the data plot,
#' in a set of lineups. It adjusts for multiple picks by the same individual, by weighting
#' by the total number of choices.
#' @param data summary of the results, containing columns id, pic_id, response, detected
#' @param m size of the lineup
#' @return vector of powers for each pic_id
#' @export
#' @examples
#' data(turk_results)
#' visual_power(turk_results)
visual_power <- function(data, m=20) {
data <- data %>% mutate(
nchoices_wgt = (m-sapply(strsplit(as.character(data$response), ","), length))/19)
visual_p <- data %>% group_by(pic_id) %>%
summarise(power = sum(detected*nchoices_wgt)/length(detected), n=length(detected))
return(visual_p)
}
|
/R/power.r
|
no_license
|
sa-lee/nullabor
|
R
| false | false | 781 |
r
|
#' Power calculations.
#'
#' This function simply counts the proportion of people who selected the data plot,
#' in a set of lineups. It adjusts for multiple picks by the same individual, by weighting
#' by the total number of choices.
#' @param data summary of the results, containing columns id, pic_id, response, detected
#' @param m size of the lineup
#' @return vector of powers for each pic_id
#' @export
#' @examples
#' data(turk_results)
#' visual_power(turk_results)
visual_power <- function(data, m=20) {
data <- data %>% mutate(
nchoices_wgt = (m-sapply(strsplit(as.character(data$response), ","), length))/19)
visual_p <- data %>% group_by(pic_id) %>%
summarise(power = sum(detected*nchoices_wgt)/length(detected), n=length(detected))
return(visual_p)
}
|
# R script for Peak Model
# -- generated by MACS
p <- c(0.0414937466192,0.0489353039017,0.0560707211693,0.0635358276837,0.0698234726027,0.070247358777,0.0698705710665,0.0689521510222,0.0688815033264,0.0689286017903,0.0690934464136,0.0691405448774,0.0689757002541,0.0686460110074,0.0691169956455,0.0698941202984,0.0707418926471,0.0705299495599,0.0704357526323,0.0707418926471,0.0706476957194,0.0706476957194,0.0717074111552,0.0715190173,0.0717780588509,0.0713777219085,0.0705299495599,0.0720371004019,0.0721548465614,0.0722961419529,0.0720841988657,0.0712128772852,0.070011866458,0.0701060633856,0.0701531618494,0.0705299495599,0.0703651049366,0.0691405448774,0.0694466848922,0.069258291037,0.0695408818198,0.0698234726027,0.0702238095451,0.0701767110813,0.0701767110813,0.0699647679941,0.0717074111552,0.0714483696043,0.0711186803576,0.070506400328,0.0706476957194,0.070765441879,0.0697763741389,0.0699647679941,0.0697763741389,0.0697057264432,0.0689521510222,0.0697999233708,0.0706005972556,0.071000934198,0.0703886541685,0.070765441879,0.0725787327358,0.07250808504,0.073026168142,0.073026168142,0.0731203650696,0.0735678004758,0.0729555204463,0.0738032927949,0.0744155728245,0.0746510651435,0.0733323081568,0.0742271789692,0.0741094328097,0.0738503912587,0.0732145619972,0.0722490434891,0.0726258311996,0.071754509619,0.0719900019381,0.0715190173,0.0715661157638,0.0705299495599,0.0706241464875,0.0710715818937,0.071259975749,0.0719193542424,0.0722254942572,0.0719429034743,0.0714954680681,0.0716367634595,0.0716132142276,0.0714012711404,0.0708831880385,0.0701767110813,0.0703415557046,0.0703651049366,0.070011866458,0.0700354156899,0.0691169956455,0.0692818402689,0.0696586279794,0.0698941202984,0.0700825141537,0.069752824907,0.0692111925731,0.0693524879646,0.069258291037,0.069234741805,0.0696821772113,0.0687873063988,0.0686224617755,0.0693524879646,0.0695644310517,0.0703415557046,0.0715425665319,0.0713070742128,0.0706712449513,0.0706712449513,0.0708831880385,0.0718016080828,0.0715425665319,0.0711893280533,0.0713777219085,0.0717309603871,0.0715425665319,0.0714248203723,0.0719664527062,0.0727435773591,0.0732616604611,0.0726729296634,0.0729790696782,0.0729084219825,0.0725551835039,0.0718251573148,0.0710480326618,0.0713306234447,0.0709538357342,0.0700354156899,0.0693289387327,0.068999249486,0.0694466848922,0.0694702341241,0.0704357526323,0.0706476957194,0.0709067372704,0.0705534987918,0.0715661157638,0.0727200281272,0.0728142250548,0.0736619974035,0.0728848727505,0.0731203650696,0.071754509619,0.0715896649957,0.0711186803576,0.0714012711404,0.0711186803576,0.0703886541685,0.0706712449513,0.0705299495599,0.0714248203723,0.0716367634595,0.0718487065467,0.0720606496338,0.0742507282011,0.0742271789692,0.0738739404906,0.0743449251287,0.0746275159116,0.0738503912587,0.0734736035482,0.0737561943311,0.0738974897225,0.0740623343459,0.0725551835039,0.0726964788953,0.0726964788953,0.0726729296634,0.072767126591,0.0732145619972,0.0734971527801,0.0725787327358,0.0731674635334,0.0733794066206,0.0738503912587,0.0749336559264,0.0747688113031,0.0747452620712,0.0746981636073,0.0753339928688,0.0753104436369,0.0753104436369,0.074792360535,0.0743920235926,0.0743449251287,0.0739445881863,0.0740623343459,0.0740623343459,0.0737090958673,0.0736384481716,0.0737326450992,0.0735913497077,0.0733558573887,0.0734500543163,0.0728142250548,0.0722490434891,0.0721548465614,0.0715425665319,0.0718487065467,0.0713070742128,0.0712364265171,0.0710715818937,0.0712364265171,0.0710715818937,0.0712128772852,0.0711893280533,0.0711422295895,0.0718487065467,0.0719900019381,0.07250808504,0.0723667896486,0.0727435773591,0.0730026189101,0.0734971527801,0.0734736035482,0.0739681374182,0.0748865574626,0.0747688113031,0.0746981636073,0.0745804174478,0.0746746143754,0.0754281897964,0.0757107805793,0.0757578790431,0.0758285267389,0.0749807543902,0.0746039666797,0.0745097697521,0.0742978266649,0.0748865574626,0.075027852854,0.0745804174478,0.0746746143754,0.0744391220564,0.0740858835778,0.0739445881863,0.0744626712883,0.0741800805054,0.0746746143754,0.0749101066945,0.0750985005498,0.074792360535,0.0746275159116,0.0739916866502,0.0739210389544,0.0736619974035,0.0730968158377,0.0728613235186,0.0722490434891,0.0721312973295,0.0710480326618,0.0715425665319,0.0713777219085,0.0722490434891,0.0721077480976,0.0727200281272,0.0722961419529,0.0724374373443,0.0724374373443,0.0718958050105,0.0730497173739,0.0727200281272,0.0725551835039,0.0724138881124,0.0726022819677,0.0730497173739,0.0733558573887,0.0739445881863,0.0741565312735,0.0734029558525,0.0727200281272,0.0729555204463,0.0728848727505,0.0723903388805,0.07250808504,0.0721548465614,0.07250808504,0.072272592721,0.0732381112291,0.0743449251287,0.0752397959412,0.0749572051583,0.0752162467093,0.0758991744346,0.0766527498556,0.0773827760447,0.0773827760447,0.0786544345676,0.0790076730462,0.0801615854096,0.0810564562221,0.0830816901661,0.0844710948485,0.0869908626625,0.088121225794,0.0896048274041,0.0908293874633,0.0919597505948,0.0925955798563,0.0933256060454,0.0935375491325,0.0927839737115,0.0937494922197,0.0932785075816,0.0940320830026,0.0939378860749,0.0948563061193,0.0947385599598,0.0950682492064,0.0944559691769,0.0951153476703,0.0961279646422,0.0959631200189,0.0966695969761,0.0971170323823,0.09770576318,0.0973054262376,0.0972112293099,0.0978941570352,0.0976822139481,0.0980354524267,0.0979648047309,0.0977293124119,0.0969992862228,0.0968579908313,0.0965754000485,0.0962928092656,0.0963634569613,0.0952095445979,0.095445036917,0.0944559691769,0.0944088707131,0.0940556322345,0.0937730414516,0.0929723675668,0.0927368752477,0.0933256060454,0.0933727045092,0.0933256060454,0.093914336843,0.0945737153364,0.0939614353068,0.0944088707131,0.0935846475963,0.0939849845388,0.0939614353068,0.093419802973,0.0929488183349,0.0933727045092,0.0930430152625,0.0929959167987,0.0934433522049,0.0932078598858,0.0935846475963,0.0933020568135,0.0932078598858,0.0932314091177,0.0928075229434,0.0933020568135,0.0923836367691,0.09214814445,0.0923365383053,0.0916300613481,0.0920068490586,0.0918655536672,0.0917949059714,0.0917478075076,0.0926426783201,0.092171693682,0.0928310721753,0.0924542844648,0.0920774967543,0.0928310721753,0.0932078598858,0.0933256060454,0.0931843106539,0.0936081968282,0.093160761422,0.0935375491325,0.0938907876111,0.0940791814664,0.0943382230173,0.0947150107279,0.0941262799302,0.0941027306983,0.095421487685,0.0951388969022,0.0961515138741,0.0963399077294,0.0961515138741,0.0961750631061,0.0957511769317,0.0953272907574,0.0968344415994,0.0979177062671,0.0962928092656,0.0965754000485,0.0963399077294,0.0955863323084,0.0955627830765,0.0962457108018,0.0969050892952,0.0966695969761,0.0950917984384,0.0932785075816,0.0947150107279,0.0950446999745,0.0946208138002,0.0950446999745,0.0950917984384,0.0943146737854,0.0941262799302,0.0943146737854,0.0955627830765,0.0968579908313,0.0960573169465,0.0954685861489,0.0957747261636,0.0959160215551,0.0966224985123,0.0973525247014,0.0967402446718,0.0972583277738,0.0967637939037,0.096693146208,0.0971405816142,0.0976115662523,0.0976351154843,0.097941255499,0.096952187759,0.0966460477442,0.0966460477442,0.0969050892952,0.096198612338,0.095680529236,0.0950446999745,0.0952330938298,0.0948798553512,0.0938907876111,0.0938436891473,0.0938201399154,0.0940791814664,0.0935610983644,0.0937494922197,0.0932785075816,0.0928075229434,0.0932314091177,0.0940085337707,0.0945737153364,0.0953508399893,0.0942440260897,0.0945737153364,0.0949740522788,0.0943382230173,0.0950446999745,0.0959631200189,0.0952095445979,0.0948092076555,0.0938672383792,0.0932078598858,0.0943853214812,0.0947856584236,0.0944088707131,0.0949034045831,0.0950211507426,0.0938201399154,0.0935610983644,0.0927368752477,0.0937259429878,0.0941262799302,0.0940556322345,0.0934904506687,0.0938672383792,0.0948092076555,0.0950682492064,0.0971170323823,0.0981767478181,0.0993306601815,0.100154883298,0.101214598734,0.102227215706,0.102815946504,0.103357578838,0.104417294273,0.105006025071,0.104417294273,0.103922760403,0.104346646578,0.103781465012,0.103381128069,0.103169184982,0.103946309635,0.104982475839,0.104723434288,0.10437019581,0.104558589665,0.105335714318,0.105500558941,0.105641854332,0.106065740507,0.106678020536,0.105335714318,0.104464392737,0.10425244965,0.104558589665,0.105076672767,0.10535926355,0.104841180448,0.10486472968,0.105053123535,0.104888278911,0.106207035898,0.106630922073,0.105924445115,0.106489626681,0.107196103638,0.107667088276,0.108632606785,0.109362632974,0.109857166844,0.110022011467,0.110328151482,0.110704939192,0.111434965381,0.111552711541,0.111317219222,0.110869783816,0.110752037656,0.110775586888,0.111199473062,0.110752037656,0.111270120758,0.112023696179,0.112612426977,0.112235639266,0.113106960847,0.114166676283,0.114684759385,0.115556080965,0.115391236342,0.117086781039,0.118240693402,0.119041367287,0.121043051999,0.123633467509,0.124481239858,0.124952224496,0.125470307598,0.127001007672,0.130109506283,0.132087641763,0.134136424939,0.136114560419,0.138822722089,0.141460236062,0.146429123994,0.151609955014,0.158462781499,0.165080115664,0.170779029786,0.177773151662,0.185002765857,0.190819426138,0.196848029506,0.203630208295,0.207610028488,0.210247542461,0.212979253362,0.212861507203,0.211283708665,0.207821971575,0.202758886715,0.198096138797,0.1914552554,0.184343387364,0.177090223937,0.171344211352,0.163266824808,0.156249153699,0.150809281129,0.144804226993,0.140165028307,0.135667125013,0.131969895604,0.128390412354,0.125729349149,0.122220513595,0.118688128809,0.116757091792,0.113742790108,0.112212090034,0.109715871452,0.108138072915,0.106536725145,0.103710817316,0.101332344894,0.0990951678625,0.0968344415994,0.0952330938298,0.0935375491325,0.0922187921458,0.090900035159,0.0903113043613,0.088874801215,0.088874801215,0.0882860704174,0.0877208888516,0.088121225794,0.0875324949964,0.087367650373,0.0863550334011,0.0861666395458,0.0846359394719,0.0844239963847,0.0837646178913,0.0826813532236,0.0828226486151,0.0831523378618,0.0824223116727,0.0820455239622,0.0817629331793,0.0811506531497,0.0808680623668,0.0803028808011,0.079572854612,0.0804206269606,0.0801615854096,0.0795022069163,0.0791254192058,0.0783247453209,0.0787486314953,0.0798554453949,0.0803970777287,0.0804206269606,0.0810564562221,0.0810564562221,0.0803970777287,0.0804912746563,0.0818806793388,0.0835762240361,0.0836468717318,0.0829639440065,0.0830581409341,0.0837175194275,0.0841885040657,0.083340731717,0.084094307138,0.0849891779505,0.0848007840952,0.0848243333271,0.0849420794867,0.0861430903139,0.0861430903139,0.0856485564439,0.0850598256462,0.0858840487629,0.0862843857053,0.0860724426182,0.0859075979948,0.0855308102843,0.0863079349372,0.0849656287186,0.0854130641248,0.0860488933863,0.0859546964586,0.0848243333271,0.0844946440804,0.0837410686594,0.0839765609785,0.0833878301808,0.082092622426,0.0821161716579,0.0813625962369,0.0806561192797,0.0807267669754,0.0809387100626,0.0803028808011,0.0804441761925,0.0794551084525,0.0798083469311,0.080326430033,0.0805619223521,0.0805148238882,0.0813861454688,0.0809858085264,0.0816922854836,0.0822574670493,0.0821632701217,0.0821632701217,0.0819748762664,0.0822103685855,0.083105239398,0.0844475456166,0.0839294625147,0.0834349286446,0.0827520009194,0.0826342547598,0.0823752132089,0.083105239398,0.0833642809489,0.0828932963108,0.082092622426,0.0815038916283,0.0819277778026,0.0829168455427,0.0839765609785,0.0834584778766,0.0844239963847,0.0835526748042,0.0834820271085,0.0834820271085,0.0832465347894,0.0830110424703,0.0833171824851,0.0834820271085,0.0828932963108,0.0836468717318,0.0835526748042,0.0835291255723,0.0828932963108,0.0832465347894,0.083340731717,0.0827049024555,0.0824223116727,0.0818571301069,0.0825165086003,0.082092622426,0.0822103685855,0.083340731717,0.0843297994571,0.0848714317909,0.0847536856314,0.0859546964586,0.0853188671972,0.0855779087481,0.0854366133567,0.0851775718057,0.0843062502252,0.084353348689,0.0837410686594,0.082587156296,0.0819984254984,0.0811977516135,0.0812919485412,0.0815038916283,0.0801380361777,0.0805383731202,0.0813154977731,0.080820963903,0.0806796685116,0.0809387100626,0.0816451870198,0.0815038916283,0.0816687362517,0.081574539324,0.0820455239622,0.0814096947007,0.0808680623668,0.0809151608307,0.0811035546859,0.0807267669754,0.080820963903,0.0807267669754,0.0810329069902,0.0810329069902,0.0808916115988,0.0819984254984,0.0824458609046,0.0818100316431,0.0815509900921,0.0821632701217,0.0816687362517,0.0825165086003,0.0819748762664,0.0818100316431,0.0821632701217,0.0812213008454,0.080326430033,0.0807032177435,0.0811742023816,0.0807738654392,0.0811977516135,0.0805383731202,0.0803028808011,0.0811506531497,0.0810564562221,0.081339047005,0.081574539324,0.0815038916283,0.0809151608307,0.0808445131349,0.0814803423964,0.0818571301069,0.0819748762664,0.0815038916283,0.0817629331793,0.080820963903,0.0813625962369,0.0811035546859,0.0817393839474,0.0818806793388,0.0804206269606,0.0803970777287,0.0797847976991,0.0779244083785,0.0775711698999,0.0787721807272,0.0779008591466,0.0783718437848,0.07806570377,0.0784424914805,0.0792667145972,0.0787957299591,0.0797376992353,0.0809151608307,0.0811742023816,0.0803028808011,0.0807503162073,0.0807267669754,0.0809622592945,0.080820963903,0.0806325700478,0.0806796685116,0.0799967407863,0.0799025438587,0.0797376992353,0.0803970777287,0.080067388482,0.0792431653653,0.0787957299591,0.078819279191,0.0788428284229,0.079078320742,0.0791018699739,0.0791725176696,0.0787015330314,0.0781834499295,0.0779479576104,0.0779715068423,0.0776653668276,0.0773592268128,0.0766056513918,0.0763230606089,0.0764879052322,0.0765350036961,0.0764879052322,0.076299511377,0.0764408067684,0.0766762990875,0.0771001852618,0.0766527498556,0.0772179314213,0.0771472837256,0.0771472837256,0.0772414806532,0.0774534237404,0.0771943821894,0.076299511377,0.0757107805793,0.075804977507,0.0753575421007,0.0746039666797,0.0739210389544,0.0737326450992,0.0734500543163,0.0730732666058,0.0730497173739,0.0736619974035,0.074274277433,0.0738032927949,0.074274277433,0.0739916866502,0.0746275159116,0.0741800805054,0.0739445881863,0.074274277433,0.0742036297373,0.0740152358821,0.0737326450992,0.0741565312735,0.0735913497077,0.0743920235926,0.0736148989396,0.0735678004758,0.0739210389544,0.0735678004758,0.0738974897225,0.0738974897225,0.0742978266649,0.0734500543163,0.0739210389544,0.0742978266649,0.0749572051583,0.0751926974774,0.0748865574626,0.0746981636073,0.0738974897225,0.0743920235926,0.074533318984,0.0745568682159,0.0742036297373,0.0736384481716,0.0732145619972,0.0728848727505,0.0723903388805,0.072531634272,0.0736855466354,0.072767126591,0.0719429034743,0.0722254942572,0.0731439143015,0.0735442512439,0.0735442512439,0.073779743563,0.0744155728245,0.074274277433,0.0728613235186,0.0738268420268,0.0737090958673,0.0729790696782,0.0723432404167,0.0717074111552,0.0722019450253,0.0719664527062,0.0720371004019,0.0718487065467,0.0727200281272,0.0722019450253,0.0721783957934,0.0725787327358,0.0723196911848,0.0726493804315,0.0730732666058,0.0733794066206,0.0723667896486,0.0726964788953,0.0722961419529,0.0717780588509,0.0719193542424,0.0722254942572,0.0715425665319,0.0709067372704,0.0697763741389,0.0688815033264,0.0692111925731,0.0691169956455,0.0689286017903,0.0697999233708,0.0696821772113,0.0686224617755,0.0691405448774,0.0695644310517,0.0702709080089,0.0708596388066,0.0705770480237,0.0699883172261,0.0699412187622,0.0696350787475,0.0699647679941,0.0707889911109,0.0703651049366,0.0701296126175,0.0695879802836,0.0691169956455,0.0691640941093,0.0689050525584,0.0688344048626,0.0686931094712,0.0680337309778,0.0672566063249,0.0683163217607,0.0682221248331,0.0675627463397,0.0680101817459,0.0682692232969,0.0685518140798,0.0691640941093,0.0697057264432,0.0700354156899,0.0704122034004,0.070506400328,0.0705534987918,0.0704122034004,0.0701060633856,0.0697057264432,0.0700825141537,0.0703180064727,0.0701060633856,0.0693995864284,0.0694702341241,0.0691876433412,0.0696586279794,0.069752824907,0.0700354156899,0.0701767110813,0.0702709080089,0.0697057264432,0.0691876433412,0.0693289387327,0.0693289387327,0.0682927725288,0.0680808294416,0.0688108556307,0.0684576171521,0.068245674065,0.067492098644,0.0677746894268,0.0675156478759,0.0679395340502,0.0680808294416,0.0686460110074,0.0682221248331,0.0681043786735,0.0688108556307,0.069258291037,0.0699176695303,0.0698941202984,0.0696821772113,0.0703651049366,0.0696115295155,0.0699647679941,0.0698234726027,0.0699883172261,0.0694702341241,0.0690227987179,0.0684576171521,0.0689050525584,0.0691876433412,0.0683869694564,0.0691169956455,0.0690227987179,0.0697999233708,0.0697999233708,0.070247358777,0.0702944572408,0.0707889911109,0.0703415557046,0.0708596388066,0.0712128772852,0.0709538357342,0.0707418926471,0.0704122034004,0.0698705710665,0.0695879802836,0.0706476957194,0.0708831880385,0.0711422295895,0.0710480326618,0.0705770480237,0.0707889911109,0.0711657788214,0.0716132142276,0.0721312973295,0.0722961419529,0.0712835249809,0.0714012711404,0.0709773849661,0.0705534987918,0.0708831880385,0.0706947941832,0.0701531618494,0.0692111925731,0.0689757002541,0.0691405448774,0.0696586279794,0.0694466848922,0.070247358777,0.071259975749,0.0722019450253,0.0718722557786,0.0721548465614,0.0729319712144,0.0728142250548,0.0730497173739,0.0728377742867,0.0730968158377,0.0721548465614,0.0717780588509,0.0713306234447,0.0714012711404,0.0715896649957,0.0715661157638,0.0720606496338,0.0713070742128,0.0712364265171,0.071000934198,0.0714012711404,0.0720371004019,0.0716367634595,0.0719193542424,0.0712128772852,0.0708596388066,0.0706005972556,0.0708831880385,0.0709538357342,0.0709067372704,0.0710951311257,0.0705299495599,0.0708125403428,0.0712128772852,0.0720606496338,0.0719664527062,0.0718016080828,0.0717074111552,0.0714012711404,0.0714954680681,0.0714719188362,0.0711657788214,0.0710244834299,0.0704357526323,0.0700589649218,0.0699176695303,0.0693524879646,0.069258291037,0.0695408818198,0.0687873063988,0.0690698971817,0.0689757002541,0.0685989125436,0.0693524879646,0.0696350787475,0.0697292756751,0.0696350787475,0.0697763741389,0.0700589649218,0.0698234726027,0.0686224617755,0.0686224617755,0.0679395340502,0.0668562693825,0.0656788077872,0.0657494554829,0.0662675385848,0.0654904139319,0.0646661908152,0.0657494554829,0.0663617355125,0.0664088339763,0.0669504663101,0.0670682124696,0.067492098644,0.067727590963,0.0678453371226,0.0684340679202,0.0686224617755,0.0686931094712,0.0680337309778,0.0672801555568,0.0676098448035,0.0683634202245,0.0689286017903,0.0693053895008,0.0696821772113,0.0693760371965,0.0696821772113,0.0688344048626,0.0689286017903,0.0691640941093,0.0692111925731,0.0692111925731,0.0693289387327,0.0687166587031,0.0683163217607,0.068245674065,0.0677040417311,0.0679630832821,0.0686460110074,0.0692111925731,0.068481166384,0.067727590963,0.0672566063249,0.067492098644,0.0679395340502,0.0689286017903,0.069493783356,0.0694466848922,0.0693524879646,0.0693289387327,0.0693053895008,0.0698234726027,0.0699883172261,0.0697999233708,0.0690227987179,0.0689050525584,0.069493783356,0.0695173325879,0.0694702341241,0.0691405448774,0.0703180064727,0.0695408818198,0.0687873063988,0.0690698971817,0.068999249486,0.0688108556307,0.0612044537249,0.0544458241678,0.0474752515235,0.040787269662,0.0)
m <- c(0.0405831966008,0.0480339269004,0.0547163006379,0.0619109120835,0.0688028376106,0.0684303010957,0.0698738800912,0.0705025354602,0.0695246271084,0.0705723860568,0.070595669589,0.0702697001384,0.0707120872499,0.0701998495418,0.070595669589,0.070851788443,0.0701998495418,0.0700601483487,0.0715735779408,0.0715735779408,0.0715037273443,0.0713873096833,0.0716201450052,0.0715037273443,0.0708052213787,0.0715037273443,0.071596861473,0.0710846237649,0.0702464166062,0.0690356729325,0.0687795540784,0.0689658223359,0.0686864199497,0.069594477705,0.0699902977521,0.0699902977521,0.0698273130268,0.0699437306878,0.0703861177993,0.0711311908293,0.0712708920224,0.0708052213787,0.0707353707821,0.0705723860568,0.0705258189924,0.0702697001384,0.0703628342671,0.0708983555074,0.0699902977521,0.0707353707821,0.0707819378465,0.0707586543143,0.0712708920224,0.0707819378465,0.0701532824774,0.0700368648165,0.0699902977521,0.0696643283015,0.0700135812843,0.0690589564647,0.068849404675,0.0693150753187,0.0690822399969,0.0687562705463,0.0692917917865,0.0692917917865,0.0698971636234,0.0694082094475,0.0687097034819,0.0684535846278,0.0690589564647,0.0689425388038,0.0691520905934,0.0690356729325,0.0690356729325,0.068593285821,0.0673592586151,0.0680577645807,0.0683837340313,0.0690589564647,0.0681508987094,0.0675222433404,0.0674058256795,0.0678016457266,0.0679646304519,0.0681508987094,0.0679180633876,0.0677783621944,0.0676619445335,0.0671729903576,0.067475676276,0.0682440328382,0.0681741822416,0.0690123894003,0.0687795540784,0.0690589564647,0.0697108953659,0.0698040294946,0.0708750719752,0.0713640261511,0.0722720839064,0.0724350686317,0.0711079072971,0.0701765660096,0.0698273130268,0.0700135812843,0.0712708920224,0.0706422366534,0.0704326848637,0.0699437306878,0.0691753741256,0.0691055235291,0.0696876118337,0.0698040294946,0.0705491025246,0.0710846237649,0.069594477705,0.0699437306878,0.0702464166062,0.0705491025246,0.0712476084902,0.0713873096833,0.0712010414258,0.0716899956017,0.0722022333098,0.0708285049108,0.0704792519281,0.0701765660096,0.0699437306878,0.0701067154131,0.0698040294946,0.069850596559,0.0704094013315,0.069850596559,0.0685467187566,0.0690123894003,0.0695479106406,0.0700601483487,0.070223133074,0.0700601483487,0.0697108953659,0.0692917917865,0.0689891058681,0.0692452247222,0.0694314929797,0.0705258189924,0.0707120872499,0.0707353707821,0.0707586543143,0.0710846237649,0.0709914896362,0.0709216390396,0.070595669589,0.0703395507349,0.0704326848637,0.0695479106406,0.0697108953659,0.0688726882072,0.0682905999025,0.06847686816,0.0685467187566,0.0691288070612,0.0696177612372,0.0704559683959,0.0703628342671,0.0704326848637,0.0700135812843,0.071224324958,0.0708983555074,0.0702929836706,0.0696643283015,0.0687562705463,0.0688028376106,0.0679879139841,0.0684303010957,0.068220749306,0.067848212791,0.0671264232932,0.0669401550357,0.0666840361817,0.0675222433404,0.0678249292588,0.0681741822416,0.0686398528853,0.0681508987094,0.0690822399969,0.0695246271084,0.0689891058681,0.0691055235291,0.0686631364175,0.0689425388038,0.0688726882072,0.0684070175635,0.067848212791,0.0676386610013,0.0673126915507,0.0672428409541,0.0683371669669,0.068220749306,0.0680810481129,0.0673126915507,0.0674291092116,0.0681276151772,0.0686864199497,0.0693616423831,0.0687562705463,0.0697574624303,0.0698040294946,0.0704094013315,0.0709914896362,0.0719228309236,0.0719926815202,0.0713640261511,0.0711544743615,0.0712941755546,0.0710380567005,0.0704326848637,0.0705025354602,0.0704326848637,0.0702929836706,0.0700601483487,0.0701067154131,0.0702929836706,0.0707120872499,0.0702929836706,0.070968206104,0.0707120872499,0.0706888037177,0.0702697001384,0.0705025354602,0.070223133074,0.0707353707821,0.070851788443,0.0709914896362,0.0713174590868,0.0706422366534,0.0700368648165,0.0700368648165,0.0712010414258,0.0717598461983,0.0713407426189,0.070851788443,0.0712941755546,0.0704094013315,0.0700601483487,0.0702464166062,0.0713174590868,0.0701765660096,0.0693383588509,0.0691288070612,0.0696876118337,0.0697574624303,0.0697108953659,0.0701299989453,0.0706655201855,0.070851788443,0.0704559683959,0.0713873096833,0.0720159650523,0.0721323827133,0.0711544743615,0.0714804438121,0.0712476084902,0.070851788443,0.0704792519281,0.0706888037177,0.0705491025246,0.0706655201855,0.0705723860568,0.0706422366534,0.0715270108764,0.0710613402327,0.0706422366534,0.070968206104,0.0712476084902,0.0714571602799,0.0716667120696,0.0715270108764,0.0716899956017,0.0714338767477,0.0715270108764,0.0714105932155,0.0718995473914,0.0720858156489,0.0713640261511,0.0704792519281,0.070595669589,0.0701299989453,0.0701299989453,0.0705025354602,0.0710846237649,0.0713174590868,0.070595669589,0.0705025354602,0.0713640261511,0.0720625321167,0.0724117850995,0.0722022333098,0.0715270108764,0.0713640261511,0.0704326848637,0.0701998495418,0.0708983555074,0.0708750719752,0.0710846237649,0.0708750719752,0.0696876118337,0.0703162672027,0.0710147731683,0.0700135812843,0.0702697001384,0.0701067154131,0.0704094013315,0.0708285049108,0.070223133074,0.0705025354602,0.0714105932155,0.0712708920224,0.0702697001384,0.0713407426189,0.0722720839064,0.0728076051467,0.0723186509708,0.0716667120696,0.0723186509708,0.0718762638592,0.0714571602799,0.0711311908293,0.0722022333098,0.0722953674386,0.0719461144558,0.0726911874858,0.0732965593226,0.073226708726,0.0725282027604,0.0729938734042,0.0731568581295,0.0737855134985,0.0734362605157,0.0736923793698,0.0723885015673,0.0713407426189,0.070595669589,0.071596861473,0.0721090991811,0.0714338767477,0.0718995473914,0.072225516842,0.0739950652882,0.0734595440479,0.0753920772194,0.0755550619447,0.0763699855712,0.0759508819918,0.0758810313953,0.0773711774552,0.0770452080046,0.0772780433265,0.0759043149275,0.0768123726827,0.0763932691034,0.0766726714896,0.0768356562149,0.0776505798414,0.0772780433265,0.0771383421333,0.078465503468,0.0780696834208,0.0783956528714,0.0779299822277,0.0772547597943,0.0771150586012,0.0774875951161,0.0770452080046,0.0776738633736,0.0776505798414,0.0769287903437,0.0777669975024,0.0779765492921,0.0780696834208,0.0791407259014,0.0788147564508,0.0781861010817,0.0781162504852,0.078349085807,0.0781628175495,0.0783956528714,0.0774177445196,0.0771616256655,0.0772780433265,0.0768123726827,0.0768822232793,0.0767658056184,0.0773246103908,0.0774410280518,0.0776505798414,0.0779299822277,0.0779066986955,0.0779299822277,0.078349085807,0.0780463998886,0.0783723693392,0.0785819211289,0.0778601316311,0.0779765492921,0.077720430438,0.077091775069,0.0769520738759,0.0767425220862,0.0760440161206,0.0770452080046,0.0783723693392,0.0783258022748,0.0783723693392,0.0784189364036,0.0776505798414,0.0777669975024,0.0792105764979,0.0795365459485,0.0798857989313,0.0788380399829,0.0781395340174,0.0782093846139,0.0785120705323,0.0782093846139,0.0789777411761,0.0791174423692,0.078349085807,0.0781861010817,0.0779765492921,0.0783956528714,0.0782792352105,0.0786517717255,0.0786052046611,0.0792105764979,0.0797693812704,0.0794899788842,0.079466695352,0.0791872929657,0.0796296800773,0.0803281860429,0.0798159483348,0.0792804270945,0.0792338600301,0.079350277691,0.0792105764979,0.0794201282876,0.0795132624164,0.0804213201716,0.0801419177854,0.0802117683819,0.0809801249441,0.0813060943947,0.0818648991672,0.0814923626522,0.081212960266,0.0811198261372,0.0806075884291,0.0800720671888,0.0808869908153,0.080467887236,0.0807938566866,0.0803281860429,0.0796296800773,0.0806541554935,0.0810034084763,0.081212960266,0.0816786309097,0.0823538533431,0.0818183321028,0.0819114662315,0.0815389297166,0.0821675850856,0.0827031063259,0.0820744509568,0.0809801249441,0.0815156461844,0.0813060943947,0.0800255001245,0.0798159483348,0.0799556495279,0.079350277691,0.0788846070473,0.0787681893864,0.0785819211289,0.0799323659957,0.0798857989313,0.0798857989313,0.0809335578797,0.08146907912,0.0808171402188,0.0815854967809,0.0819347497637,0.0813992285234,0.0809335578797,0.0801186342532,0.0803281860429,0.0814923626522,0.0811198261372,0.0805377378325,0.0803049025107,0.0807007225579,0.0805144543004,0.0804446037038,0.08072400609,0.0812362437981,0.0800487836566,0.0790475917726,0.0795132624164,0.0798159483348,0.0807007225579,0.080095350721,0.080467887236,0.0810034084763,0.0806774390257,0.0805144543004,0.081096542605,0.0815389297166,0.081096542605,0.0807007225579,0.0815622132487,0.0817484815062,0.0819580332959,0.0814923626522,0.0816553473775,0.0812828108625,0.0817019144419,0.0817251979741,0.08221415215,0.0823538533431,0.0815854967809,0.0822374356822,0.0814923626522,0.0825634051328,0.0830290757765,0.0837042982099,0.0831454934374,0.0826099721971,0.0825866886649,0.083098926373,0.083215344034,0.0828893745834,0.0838905664674,0.0834947464202,0.0840535511927,0.0837741488065,0.0845425053686,0.0846123559652,0.0834248958237,0.0838905664674,0.0835180299524,0.0836344476133,0.0830290757765,0.0836344476133,0.0829359416477,0.0833783287593,0.0833550452271,0.0833783287593,0.0843096700467,0.0831920605018,0.082842807519,0.0820278838925,0.082470271004,0.0823305698109,0.0823538533431,0.0821443015534,0.0814923626522,0.0817717650384,0.0827496733903,0.0836111640811,0.0833550452271,0.0836810146777,0.0833317616949,0.0827962404546,0.0825866886649,0.0823538533431,0.0827962404546,0.0833783287593,0.0821443015534,0.0820278838925,0.0832386275662,0.0832386275662,0.0841466853214,0.0853108619307,0.0851013101411,0.0858929502354,0.0857998161067,0.0859162337676,0.0877789163425,0.0880350351966,0.0885007058403,0.0897813001105,0.0907359249302,0.0908756261233,0.0925287569085,0.0938559182431,0.0956487502215,0.0956254666893,0.0964403903158,0.0973018810067,0.0981400881654,0.0997932189507,0.10000277074,0.101516200332,0.102633809877,0.103704852358,0.105474400804,0.108152007006,0.109642153065,0.113134682893,0.115556170241,0.118932282408,0.122401528704,0.127034951609,0.13141225566,0.134741800762,0.138257614122,0.14324029001,0.147524459933,0.152483852288,0.15793219882,0.16272860645,0.170062919089,0.176000219797,0.183241398306,0.189853921447,0.195767938623,0.200680763914,0.206129110446,0.20862044839,0.21139118872,0.212019844089,0.209924326192,0.205453888012,0.200471212124,0.195139283254,0.190273025027,0.184359007851,0.177490365856,0.17092440978,0.163659947738,0.158258168271,0.152483852288,0.148292816495,0.143729244186,0.140259997891,0.136557916273,0.133274938235,0.131156136806,0.128408680008,0.126476146836,0.123728690038,0.122797348751,0.121516754481,0.120445712,0.119630788373,0.118163925846,0.117488703412,0.11576572203,0.116464227996,0.115090499597,0.114811097211,0.113856472391,0.113227817022,0.112319759267,0.111644536834,0.111877372155,0.111528119173,0.111085732061,0.109874988387,0.11001468958,0.109246333018,0.108291708199,0.107500068104,0.108710811778,0.108897080036,0.108314991731,0.106894696267,0.107034397461,0.107267232782,0.107872604619,0.108803945907,0.108990214164,0.11022424137,0.108710811778,0.106871412735,0.106661860946,0.108058872877,0.107639769297,0.106848129203,0.1069179798,0.106266040898,0.107895888151,0.108012305812,0.108547827053,0.109944838984,0.11043379316,0.109432601276,0.109362750679,0.110457076692,0.110503643756,0.109805137791,0.108966930632,0.106848129203,0.106289324431,0.105567534933,0.104938879564,0.105311416079,0.105381266675,0.103658285294,0.102889928731,0.102587242813,0.102121572169,0.10265709341,0.102796794603,0.103914404148,0.104100672405,0.103844553551,0.103704852358,0.104170523002,0.104659477178,0.10500873016,0.103891120615,0.103541867633,0.103588434697,0.102400974556,0.102400974556,0.101003962624,0.100165755466,0.0993741153713,0.0980003869723,0.0966965091699,0.0961377043974,0.0964869573802,0.0950200948525,0.0944147230156,0.0928314428269,0.0940189029684,0.0941353206294,0.0944147230156,0.0947406924662,0.0949502442559,0.0959980032043,0.0953460643031,0.095835018479,0.0967896432986,0.097581283393,0.0971854633458,0.0972320304102,0.097581283393,0.0976744175217,0.0987221764701,0.0969991950883,0.0969991950883,0.0963705397193,0.0959281526077,0.0958583020112,0.0950200948525,0.0948105430628,0.0949269607237,0.0947639759984,0.0941818876937,0.0951132289812,0.0956720337537,0.0964171067836,0.0961377043974,0.0961377043974,0.0973950151355,0.0977908351826,0.0973717316033,0.0963938232514,0.0953227807709,0.0951830795778,0.0951597960456,0.0958583020112,0.0965102409124,0.0960445702687,0.0952994972387,0.0946941254018,0.094717408934,0.0944845736122,0.0953926313674,0.0958583020112,0.0955090490284,0.095835018479,0.0958117349468,0.0958815855433,0.0954857654962,0.0952762137065,0.0948571101271,0.0956720337537,0.0951365125134,0.0940654700328,0.0945777077409,0.0929944275522,0.0925520404407,0.0928314428269,0.0938559182431,0.0940421865006,0.0950200948525,0.0951132289812,0.095718600818,0.0970690456849,0.0968827774274,0.0975579998608,0.0976744175217,0.0974415821998,0.0959048690755,0.0957884514146,0.0949502442559,0.0947639759984,0.0949269607237,0.0943215888869,0.093832634711,0.0939024853075,0.0943215888869,0.0939956194363,0.0944380065478,0.0943215888869,0.0951365125134,0.0952994972387,0.0956021831571,0.0960445702687,0.0961842714618,0.0961144208652,0.0953926313674,0.0954159148996,0.0956953172859,0.0964171067836,0.0958117349468,0.0953693478352,0.0953693478352,0.0945078571444,0.0949968113203,0.09446129008,0.0939723359041,0.0937395005822,0.0939257688397,0.0940189029684,0.0948571101271,0.0951830795778,0.0941120370972,0.09446129008,0.0933203970029,0.0938792017753,0.0941120370972,0.0941120370972,0.0947406924662,0.0951830795778,0.0946242748053,0.0953693478352,0.0970690456849,0.0974881492642,0.0990248623885,0.0988153105988,0.0988851611954,0.0987221764701,0.0986523258735,0.0969991950883,0.096463673848,0.0959747196721,0.0946475583375,0.0948105430628,0.0936463664535,0.0943681559512,0.0949502442559,0.0950200948525,0.0944380065478,0.0950200948525,0.0945777077409,0.094833826595,0.0956487502215,0.0951365125134,0.0959980032043,0.0958815855433,0.0952529301743,0.0961377043974,0.0968129268308,0.0969759115561,0.0976278504573,0.0976278504573,0.0971621798136,0.0979538199079,0.0975579998608,0.0971854633458,0.0979771034401,0.0967430762342,0.0966965091699,0.0966499421055,0.0961377043974,0.0957884514146,0.0954857654962,0.0945544242087,0.0940421865006,0.0938792017753,0.0931574122775,0.0942517382903,0.093460098196,0.0934368146638,0.0925054733763,0.0924821898441,0.0915508485567,0.0910386108486,0.0898278671749,0.0876624986816,0.0867544409263,0.0849383254158,0.0842398194502,0.0828195239868,0.0838207158708,0.0823538533431,0.0821443015534,0.0817950485706,0.0818881826994,0.0815389297166,0.0811663932016,0.0815156461844,0.0808869908153,0.0805610213647,0.0797460977382,0.0793037106267,0.0789078905795,0.0786750552576,0.078092966953,0.0782559516783,0.0783025187427,0.0772780433265,0.0768822232793,0.0765562538287,0.0754619278159,0.075601629009,0.0755317784125,0.0740649158847,0.074344318271,0.0746470041894,0.0731335745973,0.0730404404685,0.0725282027604,0.0726911874858,0.0730637240007,0.0727843216145,0.0725514862926,0.0724117850995,0.0710147731683,0.0708285049108,0.071852980327,0.0721789497777,0.0723652180351,0.0722022333098,0.071852980327,0.0715037273443,0.0717831297305,0.0726679039536,0.0739019311594,0.0742511841422,0.073715662902,0.0745771535928,0.0748332724469,0.0751592418975,0.0757413302022,0.0761371502493,0.0760440161206,0.0758810313953,0.0758344643309,0.0758810313953,0.075345510155,0.0743676018032,0.074344318271,0.0736690958376,0.0735061111123,0.0737389464341,0.073226708726,0.0734129769835,0.0724816356961,0.0718064132627,0.0723885015673,0.0728308886789,0.072341934503,0.072225516842,0.0711544743615,0.0708052213787,0.0711311908293,0.0710380567005,0.0722488003742,0.0720625321167,0.0718762638592,0.0710147731683,0.0713640261511,0.0721090991811,0.0729473063398,0.0727144710179,0.0716899956017,0.0723186509708,0.0718296967949,0.0721323827133,0.072341934503,0.0729240228076,0.0730171569364,0.073599245241,0.0729938734042,0.0729938734042,0.0742744676744,0.0737855134985,0.074344318271,0.0743908853354,0.0738786476273,0.0734362605157,0.0732965593226,0.0726213368892,0.0725514862926,0.0723186509708,0.0716434285374,0.0718762638592,0.0715735779408,0.0709449225718,0.0715037273443,0.0716667120696,0.0716201450052,0.0709914896362,0.0724583521639,0.0727843216145,0.0728774557432,0.0724350686317,0.0724350686317,0.072598053357,0.0734595440479,0.0739252146916,0.0744141688675,0.0754153607516,0.0738087970307,0.0740183488204,0.0740881994169,0.0739019311594,0.073343126387,0.0739019311594,0.0723885015673,0.0722488003742,0.0715735779408,0.0713174590868,0.0720625321167,0.0715502944086,0.071852980327,0.0721556662455,0.0724117850995,0.0724117850995,0.0729007392754,0.0724350686317,0.072225516842,0.072341934503,0.0723885015673,0.0728774557432,0.0720858156489,0.0719926815202,0.0713640261511,0.0711079072971,0.0716434285374,0.0717365626661,0.0725049192283,0.0715735779408,0.071596861473,0.0710147731683,0.071596861473,0.0721090991811,0.0725282027604,0.071852980327,0.0717132791339,0.0710380567005,0.0710846237649,0.0717598461983,0.0713640261511,0.0714571602799,0.0714338767477,0.0709914896362,0.0719228309236,0.0721789497777,0.072225516842,0.0738553640951,0.0734362605157,0.0735293946445,0.073971781756,0.0741580500135,0.0736690958376,0.0735759617088,0.0728774557432,0.0728541722111,0.0728308886789,0.0716667120696,0.0716434285374,0.0720858156489,0.0724117850995,0.072970589872,0.0728774557432,0.0728076051467,0.0727377545501,0.0734362605157,0.0730404404685,0.0733896934513,0.0731801416617,0.0732965593226,0.0726213368892,0.0725049192283,0.0726446204214,0.0726213368892,0.0726446204214,0.0720625321167,0.0725049192283,0.0719228309236,0.071969397988,0.0708983555074,0.0709449225718,0.070595669589,0.0708285049108,0.0706655201855,0.0707819378465,0.0711311908293,0.0705723860568,0.0712941755546,0.0714338767477,0.071852980327,0.0718995473914,0.0722720839064,0.0726679039536,0.0727610380823,0.0726679039536,0.072225516842,0.0731335745973,0.0728541722111,0.0724816356961,0.0727144710179,0.0729938734042,0.0729240228076,0.0724350686317,0.0726679039536,0.0725282027604,0.0727610380823,0.0721090991811,0.0728076051467,0.0728774557432,0.0723186509708,0.072225516842,0.0724816356961,0.0730637240007,0.072341934503,0.0726213368892,0.0727610380823,0.0726911874858,0.0722953674386,0.0728076051467,0.0736225287732,0.0736225287732,0.0727144710179,0.0720392485845,0.0726213368892,0.0727843216145,0.0723885015673,0.0719926815202,0.0716667120696,0.0712708920224,0.0707819378465,0.0700834318809,0.0698273130268,0.070223133074,0.0704792519281,0.0704094013315,0.070851788443,0.0711777578936,0.0709449225718,0.0713407426189,0.0713407426189,0.0717598461983,0.0726911874858,0.0727144710179,0.072598053357,0.0719926815202,0.0717831297305,0.0719228309236,0.0720159650523,0.0715502944086,0.0718995473914,0.0724816356961,0.0721090991811,0.0722022333098,0.0720625321167,0.0725282027604,0.0725282027604,0.0727610380823,0.0731335745973,0.0739484982238,0.0738553640951,0.0733664099192,0.0739950652882,0.0739252146916,0.0736923793698,0.073715662902,0.0740183488204,0.0740183488204,0.0742279006101,0.0735759617088,0.0730870075329,0.0729240228076,0.0726213368892,0.0727377545501,0.0725282027604,0.072970589872,0.0728308886789,0.0726911874858,0.0651240395252,0.0580691292727,0.0509676519559,0.0439825923,0.0)
ycorr <- c(-8.27358908567e-06,-8.66608098524e-06,-9.82701030176e-06,-1.17779831261e-05,-1.45428142131e-05,-1.81320491947e-05,-2.25739396861e-05,-2.7336738214e-05,-3.24282925172e-05,-3.78557354552e-05,-4.3625465525e-05,-4.97158737741e-05,-5.6100365226e-05,-6.27227186274e-05,-6.95288746118e-05,-7.64386113897e-05,-8.3348263303e-05,-9.01150002235e-05,-9.66619714788e-05,-0.000102876212786,-0.000108691068188,-0.00011404885968,-0.000118906360287,-0.000123222267144,-0.000126997139359,-0.000130238233983,-0.000132965627712,-0.000135262756553,-0.000137200784871,-0.000138864330236,-0.000140326040595,-0.000141632521634,-0.000142841047426,-0.000143995105989,-0.000145127783009,-0.000146268786687,-0.000147456026675,-0.000148723258692,-0.000150067703733,-0.000151488639471,-0.000152953024274,-0.000154465894296,-0.000156006139489,-0.000157579148975,-0.000159176664081,-0.00016080040738,-0.000162445821371,-0.000164080169322,-0.000165683213442,-0.000167250317846,-0.000168794028462,-0.000170317989924,-0.000171810762954,-0.000173278963077,-0.00017471401496,-0.000176122864801,-0.00017749908313,-0.000178857662117,-0.000180233361695,-0.000181641950226,-0.000183070455399,-0.000184502611471,-0.00018595742458,-0.000187443997276,-0.000188979678756,-0.000190564664381,-0.000192201537493,-0.000193903418081,-0.000195633901299,-0.000197343465988,-0.000199046649589,-0.000200770933939,-0.000202507753201,-0.000204240468269,-0.000205963726411,-0.000207653455293,-0.000209319388318,-0.000210951972625,-0.000212537581161,-0.000214110713323,-0.000215678267721,-0.000217202496049,-0.000218671859232,-0.000220106266719,-0.000221493142198,-0.000222815290002,-0.000224077885098,-0.00022522963304,-0.00022628967677,-0.000227262871482,-0.00022815132417,-0.000228973742015,-0.000229746552308,-0.00023044448429,-0.00023107233542,-0.000231652185989,-0.000232190490336,-0.000232709406786,-0.000233237481799,-0.000233774184886,-0.000234301370027,-0.000234800958871,-0.00023525519492,-0.000235666855918,-0.000236051640315,-0.000236402985414,-0.000236694193574,-0.00023693339424,-0.000237101800907,-0.000237190988161,-0.000237206056903,-0.000237152549204,-0.000237058822651,-0.000236939726798,-0.000236789402963,-0.000236603211318,-0.000236400110905,-0.0002362194884,-0.000236059284869,-0.000235930591034,-0.000235824299867,-0.000235763833888,-0.000235754519105,-0.000235780236558,-0.000235849805018,-0.000235982825724,-0.000236200250504,-0.000236472412332,-0.000236779318116,-0.000237125990974,-0.000237516972,-0.000237954916118,-0.000238407794934,-0.000238893695784,-0.000239407206639,-0.000239944735072,-0.000240462741195,-0.000240962221168,-0.000241459785786,-0.000241967353134,-0.000242491995066,-0.000243027049976,-0.000243593048665,-0.000244198010019,-0.000244829069484,-0.000245494045603,-0.000246205877102,-0.000246993174457,-0.000247848420939,-0.000248756994061,-0.000249707314519,-0.00025069229543,-0.000251698259309,-0.000252706538583,-0.000253699169377,-0.000254661410968,-0.000255591897875,-0.00025646847176,-0.000257304501136,-0.000258103103257,-0.000258870683403,-0.000259604801129,-0.000260309792418,-0.000260999187119,-0.000261683355979,-0.000262391149495,-0.00026314124502,-0.00026393332025,-0.000264777948014,-0.000265652814287,-0.000266543281915,-0.000267448285281,-0.000268372647397,-0.000269310875242,-0.000270274724756,-0.000271252801794,-0.000272218679309,-0.000273178948798,-0.000274106195153,-0.000275010548659,-0.000275923863844,-0.000276865774587,-0.000277836799211,-0.000278845329128,-0.000279886705693,-0.000280957559598,-0.000282083371473,-0.000283254898423,-0.000284473882159,-0.000285750620633,-0.000287044684008,-0.000288314867897,-0.000289552383852,-0.000290750355165,-0.00029190059098,-0.000293006062054,-0.000294039581848,-0.000294989124224,-0.000295866620955,-0.000296667808842,-0.000297407579932,-0.000298114940844,-0.000298805362135,-0.000299479379693,-0.000300141645632,-0.000300789384762,-0.00030142686084,-0.000302063215436,-0.000302695458046,-0.000303310231833,-0.000303895326316,-0.000304444439755,-0.000304953368308,-0.000305415358815,-0.000305813629669,-0.00030615787801,-0.000306429829885,-0.000306634001988,-0.000306764193973,-0.000306822557425,-0.000306827239613,-0.00030679330836,-0.000306732412054,-0.000306655324049,-0.000306582052098,-0.00030650617353,-0.000306441119522,-0.000306396210056,-0.000306387268033,-0.000306436894584,-0.00030654824602,-0.000306735128337,-0.000306988843659,-0.000307298490101,-0.000307665737616,-0.000308098138827,-0.0003086114617,-0.000309203365861,-0.000309863122474,-0.000310557380905,-0.000311270398202,-0.000311990304609,-0.000312679478768,-0.000313328665641,-0.000313936784037,-0.000314472572557,-0.000314930117471,-0.000315295607581,-0.000315578450764,-0.000315812113892,-0.000316005784611,-0.000316158552031,-0.000316291116318,-0.00031642404223,-0.000316565712633,-0.000316739354677,-0.000316946805922,-0.000317193897026,-0.00031748628963,-0.000317787626426,-0.000318105048605,-0.000318440136607,-0.000318791816832,-0.000319147846032,-0.000319502244124,-0.000319837757053,-0.000320145998773,-0.000320424089492,-0.000320646450503,-0.000320831832585,-0.000320961787372,-0.000321023874251,-0.000321012119115,-0.000320924789095,-0.000320768493021,-0.000320550486402,-0.000320280407411,-0.000319948270165,-0.000319575410562,-0.000319161519578,-0.000318728315857,-0.000318291576913,-0.000317859672113,-0.000317429076408,-0.000317014983238,-0.000316599465526,-0.00031617868269,-0.000315747977636,-0.000315288967775,-0.000314801721666,-0.000314273428986,-0.000313678081963,-0.000313016145234,-0.000312304911893,-0.000311533082683,-0.000310711771664,-0.000309856768384,-0.000308984715224,-0.000308103724307,-0.000307233010627,-0.000306374183616,-0.000305543383901,-0.000304741895341,-0.000303971610054,-0.000303194392197,-0.000302426431667,-0.000301653752954,-0.000300867501307,-0.00030006678533,-0.000299255134994,-0.000298426462991,-0.000297575271066,-0.000296708183644,-0.000295815243836,-0.000294928133677,-0.000294049817358,-0.000293165372101,-0.000292286143612,-0.000291422657396,-0.00029054599931,-0.000289665403477,-0.000288787796468,-0.000287918203141,-0.000287051025823,-0.000286186586506,-0.000285304251479,-0.00028440345754,-0.000283483906967,-0.000282527854949,-0.000281546292138,-0.000280549683978,-0.000279527665346,-0.000278472031305,-0.000277375202908,-0.000276234689925,-0.000275057989012,-0.00027385309052,-0.000272634992124,-0.000271418780678,-0.000270191053595,-0.000268946351493,-0.000267684851041,-0.000266404969931,-0.000265117067425,-0.000263816705149,-0.000262499756856,-0.000261157058825,-0.000259773626813,-0.000258333286067,-0.000256830827279,-0.000255272621521,-0.000253648427596,-0.00025195900614,-0.000250201550043,-0.000248376349942,-0.000246478827225,-0.000244525708078,-0.000242509271748,-0.000240436998389,-0.000238320250822,-0.000236165734282,-0.000233970868521,-0.000231728833088,-0.000229434913655,-0.000227094231806,-0.000224717305498,-0.00022231233117,-0.000219888210928,-0.000217448406154,-0.000214993918241,-0.000212507751047,-0.000210003387901,-0.000207502182149,-0.000205022115811,-0.00020256540975,-0.000200120121905,-0.000197688676331,-0.000195263717865,-0.000192844726565,-0.000190447964765,-0.000188086214614,-0.000185771348184,-0.000183503167249,-0.000181263832008,-0.000179053470299,-0.000176899870141,-0.000174808320923,-0.00017278151644,-0.000170832144793,-0.000168948790324,-0.000167122883852,-0.000165348611632,-0.000163624617934,-0.000161950954317,-0.000160343964211,-0.000158792481,-0.000157292040946,-0.000155848872816,-0.000154446898051,-0.000153096404333,-0.000151801804645,-0.000150563687586,-0.000149362178794,-0.000148215664792,-0.000147098380258,-0.00014599939828,-0.000144904296331,-0.000143795545165,-0.000142663255517,-0.000141510646949,-0.000140320312658,-0.000139088056785,-0.000137833457585,-0.000136538328934,-0.000135190941156,-0.000133806622376,-0.000132391169819,-0.000130956449696,-0.000129519226558,-0.00012807783156,-0.000126635737686,-0.000125187859592,-0.00012373338738,-0.000122269650718,-0.000120826652383,-0.000119400389189,-0.000117985397971,-0.000116584566686,-0.00011520040447,-0.000113811485342,-0.000112419242126,-0.000111016022067,-0.000109613247515,-0.000108206966663,-0.000106788479866,-0.000105349343662,-0.000103901372675,-0.000102441631213,-0.000100959970332,-9.94693716778e-05,-9.79776975023e-05,-9.65135639481e-05,-9.50558556982e-05,-9.3609024579e-05,-9.21623476419e-05,-9.07154844964e-05,-8.92721292933e-05,-8.78171658835e-05,-8.63510506809e-05,-8.48799517986e-05,-8.33829246819e-05,-8.18589357728e-05,-8.0301321692e-05,-7.87232668844e-05,-7.71222700978e-05,-7.55154149912e-05,-7.38921154279e-05,-7.22691009747e-05,-7.06414242481e-05,-6.90172190452e-05,-6.74162266103e-05,-6.58281304295e-05,-6.4247730196e-05,-6.26590175493e-05,-6.10714774826e-05,-5.94676529095e-05,-5.78414905184e-05,-5.61875292233e-05,-5.44990475308e-05,-5.2780752901e-05,-5.10168156648e-05,-4.91965013959e-05,-4.73535250023e-05,-4.5499685369e-05,-4.36387964435e-05,-4.17658106734e-05,-3.98954094874e-05,-3.80396078772e-05,-3.62253252729e-05,-3.44417580236e-05,-3.26969487514e-05,-3.10038727309e-05,-2.9357760087e-05,-2.77488141876e-05,-2.61901171609e-05,-2.46916752267e-05,-2.32657005752e-05,-2.19136023364e-05,-2.06204585401e-05,-1.9384208401e-05,-1.82086305033e-05,-1.7092192315e-05,-1.60233002363e-05,-1.49957365673e-05,-1.39975776374e-05,-1.30202469148e-05,-1.20457082407e-05,-1.10689461919e-05,-1.00864272594e-05,-9.08915832352e-06,-8.0757383222e-06,-7.04710400968e-06,-5.99973407346e-06,-4.94569652451e-06,-3.88817967953e-06,-2.83472462818e-06,-1.77750397346e-06,-7.21028103789e-07,3.42992049901e-07,1.40128164814e-06,2.44758585899e-06,3.49749504207e-06,4.53390398504e-06,5.55384784953e-06,6.55459762473e-06,7.54163854644e-06,8.5071282224e-06,9.46030337383e-06,1.03927273627e-05,1.13034230573e-05,1.21966311069e-05,1.30652487302e-05,1.39264508826e-05,1.47888239801e-05,1.5657955907e-05,1.65390863113e-05,1.74475143214e-05,1.83850573161e-05,1.9347352259e-05,2.03471453711e-05,2.13907432596e-05,2.24877586469e-05,2.36303584736e-05,2.48236165652e-05,2.60941707636e-05,2.74438965007e-05,2.88665120097e-05,3.03494670335e-05,3.19179698297e-05,3.35702225291e-05,3.53004172377e-05,3.71065097767e-05,3.89970358603e-05,4.09782681302e-05,4.30225815063e-05,4.5101890061e-05,4.7202179772e-05,4.93263779167e-05,5.14548285106e-05,5.35759232857e-05,5.56843442946e-05,5.77611665572e-05,5.97918871695e-05,6.17529069135e-05,6.36316636021e-05,6.54294284388e-05,6.71494869104e-05,6.880035317e-05,7.03622316961e-05,7.18323060682e-05,7.32000409868e-05,7.44685498789e-05,7.56397131501e-05,7.67002772327e-05,7.76606237027e-05,7.85273057465e-05,7.93011394578e-05,7.9978810499e-05,8.05745892949e-05,8.10958802388e-05,8.15507948887e-05,8.19366004228e-05,8.22610583537e-05,8.25546673941e-05,8.28297147508e-05,8.30987527321e-05,8.33807782993e-05,8.3685162899e-05,8.40259984167e-05,8.44157239905e-05,8.48787623835e-05,8.54503908962e-05,8.61508636253e-05,8.69907475629e-05,8.79757560763e-05,8.91287298061e-05,9.04644728201e-05,9.19791087142e-05,9.3681883815e-05,9.55850499156e-05,9.77890359463e-05,0.00010038508749,0.000103448502711,0.000107044967047,0.000111690673419,0.000115755079026,0.000119713132431,0.00012362119076,0.000127568297771,0.00013163916721,0.000135928475171,0.000140353096752,0.00014484536064,0.000149318090955,0.000153733906341,0.000157610237445,0.000162568476969,0.000168152142197,0.000174311139364,0.000180988528141,0.000188111642489,0.000195589221316,0.000203424288668,0.000211630299921,0.000220231583629,0.000229218190512,0.000238581268038,0.000248351891047,0.000258538652664,0.000269151939409,0.000280194089294,0.000291671368062,0.000303591921262,0.000315939080811,0.00032869103606,0.000341841895335,0.000355403232892,0.000369351567554,0.000383660075893,0.000398295054058,0.00041322999139,0.000428416301629,0.000443813105167,0.000459384108867,0.000475079398188,0.000490833848843,0.000506555939554,0.00052213354504,0.000537462146937,0.000552444833161,0.00056696027551,0.000580883151749,0.000594088753359,0.000606434350336,0.00061778069262,0.00062799431442,0.0006369532408,0.000644578320127,0.000650801417834,0.000655573139583,0.000658854082652,0.000660643695102,0.000660959270139,0.000659856323331,0.000657412213981,0.000653724880021,0.000648918359962,0.000643134635634,0.000636505221761,0.000629157677251,0.0006212460726,0.0006129299145,0.000604339248342,0.000595586096698,0.000586781298803,0.000578044679982,0.000569463444658,0.000561101024432,0.000553023584291,0.000545273631381,0.000537888491864,0.000530868920577,0.000524213651099,0.000517933405919,0.00051204515112,0.000506517885579,0.0005013299626,0.00049646815503,0.000491912269035,0.000487647938368,0.000483655974718,0.000479921197524,0.000476441075131,0.000473193200173,0.000470157487034,0.00046732178454,0.000464693115194,0.000462253192184,0.000459993513806,0.000457905254355,0.000455962179786,0.000454151749551,0.000452468901991,0.000450913541188,0.000449470656418,0.000448139308545,0.000446897554989,0.000445735133059,0.000444634637911,0.000443576304261,0.000442558771957,0.000441587488112,0.000440655876445,0.000439760125729,0.000438895638326,0.000438051236807,0.000437229598704,0.000436430315022,0.000435645235515,0.000434886465442,0.000434156857679,0.000433445312166,0.000432727382292,0.000431993510326,0.000431225455108,0.000430396882514,0.000429506306731,0.00042852894578,0.000427469894778,0.000426328233291,0.000425086706639,0.000423732755303,0.000422275138936,0.000420713420489,0.000419048666742,0.000417305308055,0.000415491105312,0.000413608269597,0.000411651188062,0.000409601946555,0.000407453098461,0.000405209985196,0.000402874550828,0.000400445323953,0.000397931084912,0.000395332613358,0.000392622151524,0.000389802304853,0.000386885957562,0.000383885819402,0.000380835015302,0.000377753233855,0.000374647477683,0.00037153759219,0.000368437637056,0.000365357079832,0.000362325561907,0.000359376841869,0.000356530897918,0.000353806268963,0.000351212011021,0.000348745942195,0.000346421883506,0.000344250153993,0.00034223524175,0.000340384042755,0.000338702986319,0.000337175036986,0.000335800902263,0.000334562883285,0.000333443871034,0.00033243839833,0.000331547610229,0.000330745035137,0.000330024588959,0.000329352885043,0.000328716020352,0.000328100614814,0.000327496452503,0.000326902554397,0.000326327538388,0.000325779020838,0.000325256546353,0.000324750178982,0.000324261179867,0.000323813407335,0.000323421312449,0.000323090584963,0.000322826864682,0.000322625889445,0.00032247716265,0.000322365131299,0.00032226969532,0.000322199506682,0.000322132110032,0.000322068446643,0.000321972297855,0.000321842173765,0.000321649578675,0.000321388120858,0.00032106229949,0.000320669314766,0.000320211666493,0.000319688928724,0.000319104734253,0.000318466211803,0.000317771242827,0.000317035881864,0.000316278805289,0.000315524128614,0.00031476384917,0.000314005347317,0.000313246649013,0.000312490245239,0.000311758151007,0.000311048925863,0.000310379609001,0.000309763017218,0.000309196993199,0.000308665368662,0.000308170936381,0.000307727069076,0.000307335749077,0.000306999879868,0.000306720375224,0.00030647576462,0.000306258339546,0.000306049889764,0.00030583074258,0.000305586902532,0.000305318588657,0.000304997164563,0.000304619103586,0.000304176254538,0.000303640010627,0.000303009765508,0.000302299066936,0.000301507827053,0.00030064470774,0.0002997259549,0.000298750833368,0.000297723109855,0.000296671483201,0.000295607596674,0.000294557169261,0.000293540746905,0.000292555333895,0.000291606523698,0.000290691632668,0.00028981841291,0.000288997905874,0.000288247769682,0.000287556864537,0.000286910495399,0.000286282447475,0.000285667793266,0.00028508059863,0.000284515966852,0.000283965164057,0.000283440985056,0.000282928384045,0.00028242043879,0.000281928582449,0.000281476977849,0.000281091066443,0.000280782658387,0.000280536564398,0.000280349893312,0.00028024639001,0.000280215223853,0.000280270392568,0.000280415060867,0.0002806449431,0.00028093758777,0.00028128276743,0.000281659794177,0.000282065946638,0.000282507263247,0.000282960495918,0.000283410734767,0.000283854578118,0.000284274082623,0.000284653705339,0.000284986494637,0.000285274231061,0.000285520824852,0.000285730464461,0.000285890794273,0.000286018103767,0.000286133721622,0.000286233401159,0.000286323020694,0.000286395961496,0.000286473557071,0.000286549431241,0.000286614635205,0.0002866628314,0.000286695566422,0.000286700741769,0.000286641445064,0.000286503979238,0.000286286291116,0.000285977700715,0.00028556139892,0.000285020138964,0.000284338160998,0.000283500928385,0.000282475150138,0.000281241925248,0.000279809990469,0.00027817717787,0.000276323400468,0.000274233751414,0.00027190236157,0.000269304941839,0.000266448072048,0.000263330545194,0.000259960143382,0.000256346351963,0.000252483846808,0.000248363178002,0.000243976049299,0.000239340798159,0.000234436597373,0.000229280877638,0.000223893503998,0.000218267539155,0.000212415473079,0.000206337437647,0.000200045012775,0.000193550712575,0.000186889765276,0.000180073479577,0.000173133881392,0.000166112998551,0.000159033410415,0.000151933026939,0.00014483450197,0.000137764248228,0.000130751961687,0.000123828153623,0.000117013258441,0.000110328564021,0.000103806182683,9.74484881662e-05,9.12597190429e-05,8.52316659576e-05,7.93682388419e-05,7.36862299211e-05,6.81850030672e-05,6.28571820559e-05,5.76969280116e-05,5.27046418794e-05,4.78633121595e-05,4.3168980031e-05,3.86157331238e-05,3.42096545091e-05,2.99512010618e-05,2.58289244843e-05,2.18291154192e-05,1.79616215179e-05,1.42233856089e-05,1.06110277317e-05,7.117370856e-06,3.74934175409e-06,5.00988213956e-07,-2.6385476814e-06,-5.66562341918e-06,-8.56975556119e-06,-1.13461687979e-05,-1.39981696412e-05,-1.65231698499e-05,-1.89189257946e-05,-2.1179995274e-05,-2.3312047531e-05,-2.53275986386e-05,-2.72200825809e-05,-2.89836226593e-05,-3.06412929847e-05,-3.22031090542e-05,-3.36859274253e-05,-3.51007782505e-05,-3.64703132501e-05,-3.78058881934e-05,-3.91238072452e-05,-4.04251559144e-05,-4.17193312551e-05,-4.30419109216e-05,-4.43984840056e-05,-4.57809400987e-05,-4.72163769609e-05,-4.87141056957e-05,-5.02704575495e-05,-5.18992106274e-05,-5.36057118429e-05,-5.53952716544e-05,-5.72851059043e-05,-5.92585377e-05,-6.13159459983e-05,-6.34743772982e-05,-6.57095187927e-05,-6.79993917858e-05,-7.03385094932e-05,-7.27055739719e-05,-7.50786353045e-05,-7.74520220088e-05,-7.98094527711e-05,-8.21403912347e-05,-8.44424406877e-05,-8.66941219041e-05,-8.88741049914e-05,-9.09986033439e-05,-9.30725455653e-05,-9.50912424119e-05,-9.70785690011e-05,-9.90226676971e-05,-0.000100895784783,-0.000102699925346,-0.00010442445296,-0.000106070406709,-0.000107651723682,-0.000109174355529,-0.000110613678834,-0.000111971606833,-0.000113228263863,-0.000114389064711,-0.000115488905898,-0.000116544725238,-0.000117548156654,-0.000118509732522,-0.000119439091463,-0.000120312505843,-0.000121155597028,-0.000121989093638,-0.000122822352644,-0.000123668397752,-0.00012450846018,-0.00012532076049,-0.000126122666217,-0.000126925357037,-0.000127712831982,-0.00012849361635,-0.000129277418342,-0.000130048322094,-0.000130809161107,-0.000131548708595,-0.00013227697774,-0.000133010430675,-0.000133750081729,-0.000134488466986,-0.00013523583744,-0.000136008390709,-0.000136800459212,-0.000137621363863,-0.000138477718274,-0.000139381792061,-0.000140324185927,-0.000141306336285,-0.000142334057015,-0.000143402722662,-0.00014451246099,-0.00014566694395,-0.000146858437674,-0.000148096534493,-0.000149360807181,-0.000150637530345,-0.000151926574186,-0.000153232272987,-0.000154550621585,-0.000155862835882,-0.000157185574661,-0.000158495573867,-0.000159795909393,-0.000161072845669,-0.000162319029168,-0.000163549646814,-0.000164784321387,-0.000166017495305,-0.00016724117354,-0.000168462767529,-0.00016968922486,-0.000170908064995,-0.000172125997459,-0.000173335819215,-0.000174558885378,-0.000175782493858,-0.000177004587623,-0.000178203092585,-0.000179387818872,-0.000180562095621,-0.000181703639065,-0.000182815784368,-0.00018389800846,-0.000184948424559,-0.000185953065076,-0.000186905264408,-0.000187784041345,-0.000188598055246,-0.000189342477661,-0.000190021182389,-0.000190663683126,-0.000191293364289,-0.000191903852575,-0.0001925003762,-0.000193098098826,-0.00019371523229,-0.000194370207072,-0.000195095187027,-0.000195886527333,-0.000196745822442,-0.000197648998233,-0.000198568452393,-0.000199509778968,-0.000200485330008,-0.000201499784316,-0.00020254235986,-0.000203605363241,-0.000204663209035,-0.000205711852271,-0.000206758475733,-0.000207801655906,-0.000208859275412,-0.000209934229464,-0.00021100958938,-0.000212081996858,-0.000213152255601,-0.000214215319455,-0.000215273283012,-0.000216326555191,-0.000217378236316,-0.000218425159505,-0.000219468847962,-0.000220489616881,-0.000221498007368,-0.000222475827773,-0.000223411070394,-0.000224298508212,-0.000225147208125,-0.000225947938145,-0.000226687643007,-0.000227351776863,-0.000227934852895,-0.000228460809296,-0.000228915622281,-0.000229303342985,-0.000229635182204,-0.000229920819162,-0.000230149797582,-0.000230328385741,-0.000230474412052,-0.000230605602,-0.000230729517302,-0.000230833122252,-0.00023092345567,-0.000231027975208,-0.000231145512517,-0.000231273494042,-0.000231423519797,-0.000231623105597,-0.000231863881743,-0.000232137219586,-0.000232451368159,-0.000232819575398,-0.000233239581293,-0.00023368841615,-0.000234183399772,-0.000234746093777,-0.000235389498983,-0.000236094244173,-0.000236846369114,-0.000237666835806,-0.000238561468394,-0.00023953245704,-0.000240586295503,-0.00024174125469,-0.000242993645873,-0.000244323438776,-0.000245698274978,-0.000247109362578,-0.00024857562115,-0.000250088490414,-0.000251623011519,-0.000253163685731,-0.000254699156017,-0.000256216214685,-0.00025769631237,-0.000259128168686,-0.000260509150884,-0.00026184099214,-0.000263121169688,-0.000264322882518,-0.000265462815195,-0.000266531082768,-0.000267527105812,-0.000268440894041,-0.000269274915311,-0.000270027433197,-0.000270720289995,-0.000271358785796,-0.000271931463922,-0.000272455207522,-0.000272923248818,-0.000273337833087,-0.000273715735878,-0.000274065646586,-0.0002743733563,-0.000274610763638,-0.000274792490898,-0.00027490901222,-0.000274970499575)
xcorr <- c(-600.0,-598.999165972,-597.998331943,-596.997497915,-595.996663887,-594.995829858,-593.99499583,-592.994161802,-591.993327773,-590.992493745,-589.991659716,-588.990825688,-587.98999166,-586.989157631,-585.988323603,-584.987489575,-583.986655546,-582.985821518,-581.98498749,-580.984153461,-579.983319433,-578.982485405,-577.981651376,-576.980817348,-575.979983319,-574.979149291,-573.978315263,-572.977481234,-571.976647206,-570.975813178,-569.974979149,-568.974145121,-567.973311093,-566.972477064,-565.971643036,-564.970809008,-563.969974979,-562.969140951,-561.968306922,-560.967472894,-559.966638866,-558.965804837,-557.964970809,-556.964136781,-555.963302752,-554.962468724,-553.961634696,-552.960800667,-551.959966639,-550.959132611,-549.958298582,-548.957464554,-547.956630525,-546.955796497,-545.954962469,-544.95412844,-543.953294412,-542.952460384,-541.951626355,-540.950792327,-539.949958299,-538.94912427,-537.948290242,-536.947456214,-535.946622185,-534.945788157,-533.944954128,-532.9441201,-531.943286072,-530.942452043,-529.941618015,-528.940783987,-527.939949958,-526.93911593,-525.938281902,-524.937447873,-523.936613845,-522.935779817,-521.934945788,-520.93411176,-519.933277731,-518.932443703,-517.931609675,-516.930775646,-515.929941618,-514.92910759,-513.928273561,-512.927439533,-511.926605505,-510.925771476,-509.924937448,-508.92410342,-507.923269391,-506.922435363,-505.921601334,-504.920767306,-503.919933278,-502.919099249,-501.918265221,-500.917431193,-499.916597164,-498.915763136,-497.914929108,-496.914095079,-495.913261051,-494.912427023,-493.911592994,-492.910758966,-491.909924937,-490.909090909,-489.908256881,-488.907422852,-487.906588824,-486.905754796,-485.904920767,-484.904086739,-483.903252711,-482.902418682,-481.901584654,-480.900750626,-479.899916597,-478.899082569,-477.89824854,-476.897414512,-475.896580484,-474.895746455,-473.894912427,-472.894078399,-471.89324437,-470.892410342,-469.891576314,-468.890742285,-467.889908257,-466.889074229,-465.8882402,-464.887406172,-463.886572143,-462.885738115,-461.884904087,-460.884070058,-459.88323603,-458.882402002,-457.881567973,-456.880733945,-455.879899917,-454.879065888,-453.87823186,-452.877397832,-451.876563803,-450.875729775,-449.874895746,-448.874061718,-447.87322769,-446.872393661,-445.871559633,-444.870725605,-443.869891576,-442.869057548,-441.86822352,-440.867389491,-439.866555463,-438.865721435,-437.864887406,-436.864053378,-435.863219349,-434.862385321,-433.861551293,-432.860717264,-431.859883236,-430.859049208,-429.858215179,-428.857381151,-427.856547123,-426.855713094,-425.854879066,-424.854045038,-423.853211009,-422.852376981,-421.851542952,-420.850708924,-419.849874896,-418.849040867,-417.848206839,-416.847372811,-415.846538782,-414.845704754,-413.844870726,-412.844036697,-411.843202669,-410.842368641,-409.841534612,-408.840700584,-407.839866555,-406.839032527,-405.838198499,-404.83736447,-403.836530442,-402.835696414,-401.834862385,-400.834028357,-399.833194329,-398.8323603,-397.831526272,-396.830692244,-395.829858215,-394.829024187,-393.828190158,-392.82735613,-391.826522102,-390.825688073,-389.824854045,-388.824020017,-387.823185988,-386.82235196,-385.821517932,-384.820683903,-383.819849875,-382.819015847,-381.818181818,-380.81734779,-379.816513761,-378.815679733,-377.814845705,-376.814011676,-375.813177648,-374.81234362,-373.811509591,-372.810675563,-371.809841535,-370.809007506,-369.808173478,-368.80733945,-367.806505421,-366.805671393,-365.804837364,-364.804003336,-363.803169308,-362.802335279,-361.801501251,-360.800667223,-359.799833194,-358.798999166,-357.798165138,-356.797331109,-355.796497081,-354.795663053,-353.794829024,-352.793994996,-351.793160967,-350.792326939,-349.791492911,-348.790658882,-347.789824854,-346.788990826,-345.788156797,-344.787322769,-343.786488741,-342.785654712,-341.784820684,-340.783986656,-339.783152627,-338.782318599,-337.78148457,-336.780650542,-335.779816514,-334.778982485,-333.778148457,-332.777314429,-331.7764804,-330.775646372,-329.774812344,-328.773978315,-327.773144287,-326.772310259,-325.77147623,-324.770642202,-323.769808173,-322.768974145,-321.768140117,-320.767306088,-319.76647206,-318.765638032,-317.764804003,-316.763969975,-315.763135947,-314.762301918,-313.76146789,-312.760633862,-311.759799833,-310.758965805,-309.758131776,-308.757297748,-307.75646372,-306.755629691,-305.754795663,-304.753961635,-303.753127606,-302.752293578,-301.75145955,-300.750625521,-299.749791493,-298.748957465,-297.748123436,-296.747289408,-295.746455379,-294.745621351,-293.744787323,-292.743953294,-291.743119266,-290.742285238,-289.741451209,-288.740617181,-287.739783153,-286.738949124,-285.738115096,-284.737281068,-283.736447039,-282.735613011,-281.734778982,-280.733944954,-279.733110926,-278.732276897,-277.731442869,-276.730608841,-275.729774812,-274.728940784,-273.728106756,-272.727272727,-271.726438699,-270.725604671,-269.724770642,-268.723936614,-267.723102585,-266.722268557,-265.721434529,-264.7206005,-263.719766472,-262.718932444,-261.718098415,-260.717264387,-259.716430359,-258.71559633,-257.714762302,-256.713928274,-255.713094245,-254.712260217,-253.711426188,-252.71059216,-251.709758132,-250.708924103,-249.708090075,-248.707256047,-247.706422018,-246.70558799,-245.704753962,-244.703919933,-243.703085905,-242.702251877,-241.701417848,-240.70058382,-239.699749791,-238.698915763,-237.698081735,-236.697247706,-235.696413678,-234.69557965,-233.694745621,-232.693911593,-231.693077565,-230.692243536,-229.691409508,-228.69057548,-227.689741451,-226.688907423,-225.688073394,-224.687239366,-223.686405338,-222.685571309,-221.684737281,-220.683903253,-219.683069224,-218.682235196,-217.681401168,-216.680567139,-215.679733111,-214.678899083,-213.678065054,-212.677231026,-211.676396997,-210.675562969,-209.674728941,-208.673894912,-207.673060884,-206.672226856,-205.671392827,-204.670558799,-203.669724771,-202.668890742,-201.668056714,-200.667222686,-199.666388657,-198.665554629,-197.664720601,-196.663886572,-195.663052544,-194.662218515,-193.661384487,-192.660550459,-191.65971643,-190.658882402,-189.658048374,-188.657214345,-187.656380317,-186.655546289,-185.65471226,-184.653878232,-183.653044204,-182.652210175,-181.651376147,-180.650542118,-179.64970809,-178.648874062,-177.648040033,-176.647206005,-175.646371977,-174.645537948,-173.64470392,-172.643869892,-171.643035863,-170.642201835,-169.641367807,-168.640533778,-167.63969975,-166.638865721,-165.638031693,-164.637197665,-163.636363636,-162.635529608,-161.63469558,-160.633861551,-159.633027523,-158.632193495,-157.631359466,-156.630525438,-155.62969141,-154.628857381,-153.628023353,-152.627189324,-151.626355296,-150.625521268,-149.624687239,-148.623853211,-147.623019183,-146.622185154,-145.621351126,-144.620517098,-143.619683069,-142.618849041,-141.618015013,-140.617180984,-139.616346956,-138.615512927,-137.614678899,-136.613844871,-135.613010842,-134.612176814,-133.611342786,-132.610508757,-131.609674729,-130.608840701,-129.608006672,-128.607172644,-127.606338616,-126.605504587,-125.604670559,-124.60383653,-123.603002502,-122.602168474,-121.601334445,-120.600500417,-119.599666389,-118.59883236,-117.597998332,-116.597164304,-115.596330275,-114.595496247,-113.594662219,-112.59382819,-111.592994162,-110.592160133,-109.591326105,-108.590492077,-107.589658048,-106.58882402,-105.587989992,-104.587155963,-103.586321935,-102.585487907,-101.584653878,-100.58381985,-99.5829858215,-98.5821517932,-97.5813177648,-96.5804837364,-95.5796497081,-94.5788156797,-93.5779816514,-92.577147623,-91.5763135947,-90.5754795663,-89.5746455379,-88.5738115096,-87.5729774812,-86.5721434529,-85.5713094245,-84.5704753962,-83.5696413678,-82.5688073394,-81.5679733111,-80.5671392827,-79.5663052544,-78.565471226,-77.5646371977,-76.5638031693,-75.562969141,-74.5621351126,-73.5613010842,-72.5604670559,-71.5596330275,-70.5587989992,-69.5579649708,-68.5571309425,-67.5562969141,-66.5554628857,-65.5546288574,-64.553794829,-63.5529608007,-62.5521267723,-61.551292744,-60.5504587156,-59.5496246872,-58.5487906589,-57.5479566305,-56.5471226022,-55.5462885738,-54.5454545455,-53.5446205171,-52.5437864887,-51.5429524604,-50.542118432,-49.5412844037,-48.5404503753,-47.539616347,-46.5387823186,-45.5379482902,-44.5371142619,-43.5362802335,-42.5354462052,-41.5346121768,-40.5337781485,-39.5329441201,-38.5321100917,-37.5312760634,-36.530442035,-35.5296080067,-34.5287739783,-33.52793995,-32.5271059216,-31.5262718932,-30.5254378649,-29.5246038365,-28.5237698082,-27.5229357798,-26.5221017515,-25.5212677231,-24.5204336947,-23.5195996664,-22.518765638,-21.5179316097,-20.5170975813,-19.516263553,-18.5154295246,-17.5145954962,-16.5137614679,-15.5129274395,-14.5120934112,-13.5112593828,-12.5104253545,-11.5095913261,-10.5087572977,-9.50792326939,-8.50708924103,-7.50625521268,-6.50542118432,-5.50458715596,-4.50375312761,-3.50291909925,-2.50208507089,-1.50125104254,-0.500417014179,0.500417014178,1.50125104254,2.50208507089,3.50291909925,4.50375312761,5.50458715596,6.50542118432,7.50625521268,8.50708924103,9.50792326939,10.5087572977,11.5095913261,12.5104253545,13.5112593828,14.5120934112,15.5129274395,16.5137614679,17.5145954962,18.5154295246,19.516263553,20.5170975813,21.5179316097,22.518765638,23.5195996664,24.5204336947,25.5212677231,26.5221017515,27.5229357798,28.5237698082,29.5246038365,30.5254378649,31.5262718932,32.5271059216,33.52793995,34.5287739783,35.5296080067,36.530442035,37.5312760634,38.5321100917,39.5329441201,40.5337781485,41.5346121768,42.5354462052,43.5362802335,44.5371142619,45.5379482902,46.5387823186,47.539616347,48.5404503753,49.5412844037,50.542118432,51.5429524604,52.5437864887,53.5446205171,54.5454545455,55.5462885738,56.5471226022,57.5479566305,58.5487906589,59.5496246872,60.5504587156,61.551292744,62.5521267723,63.5529608007,64.553794829,65.5546288574,66.5554628857,67.5562969141,68.5571309425,69.5579649708,70.5587989992,71.5596330275,72.5604670559,73.5613010842,74.5621351126,75.562969141,76.5638031693,77.5646371977,78.565471226,79.5663052544,80.5671392827,81.5679733111,82.5688073394,83.5696413678,84.5704753962,85.5713094245,86.5721434529,87.5729774812,88.5738115096,89.5746455379,90.5754795663,91.5763135947,92.577147623,93.5779816514,94.5788156797,95.5796497081,96.5804837364,97.5813177648,98.5821517932,99.5829858215,100.58381985,101.584653878,102.585487907,103.586321935,104.587155963,105.587989992,106.58882402,107.589658048,108.590492077,109.591326105,110.592160133,111.592994162,112.59382819,113.594662219,114.595496247,115.596330275,116.597164304,117.597998332,118.59883236,119.599666389,120.600500417,121.601334445,122.602168474,123.603002502,124.60383653,125.604670559,126.605504587,127.606338616,128.607172644,129.608006672,130.608840701,131.609674729,132.610508757,133.611342786,134.612176814,135.613010842,136.613844871,137.614678899,138.615512927,139.616346956,140.617180984,141.618015013,142.618849041,143.619683069,144.620517098,145.621351126,146.622185154,147.623019183,148.623853211,149.624687239,150.625521268,151.626355296,152.627189324,153.628023353,154.628857381,155.62969141,156.630525438,157.631359466,158.632193495,159.633027523,160.633861551,161.63469558,162.635529608,163.636363636,164.637197665,165.638031693,166.638865721,167.63969975,168.640533778,169.641367807,170.642201835,171.643035863,172.643869892,173.64470392,174.645537948,175.646371977,176.647206005,177.648040033,178.648874062,179.64970809,180.650542118,181.651376147,182.652210175,183.653044204,184.653878232,185.65471226,186.655546289,187.656380317,188.657214345,189.658048374,190.658882402,191.65971643,192.660550459,193.661384487,194.662218515,195.663052544,196.663886572,197.664720601,198.665554629,199.666388657,200.667222686,201.668056714,202.668890742,203.669724771,204.670558799,205.671392827,206.672226856,207.673060884,208.673894912,209.674728941,210.675562969,211.676396997,212.677231026,213.678065054,214.678899083,215.679733111,216.680567139,217.681401168,218.682235196,219.683069224,220.683903253,221.684737281,222.685571309,223.686405338,224.687239366,225.688073394,226.688907423,227.689741451,228.69057548,229.691409508,230.692243536,231.693077565,232.693911593,233.694745621,234.69557965,235.696413678,236.697247706,237.698081735,238.698915763,239.699749791,240.70058382,241.701417848,242.702251877,243.703085905,244.703919933,245.704753962,246.70558799,247.706422018,248.707256047,249.708090075,250.708924103,251.709758132,252.71059216,253.711426188,254.712260217,255.713094245,256.713928274,257.714762302,258.71559633,259.716430359,260.717264387,261.718098415,262.718932444,263.719766472,264.7206005,265.721434529,266.722268557,267.723102585,268.723936614,269.724770642,270.725604671,271.726438699,272.727272727,273.728106756,274.728940784,275.729774812,276.730608841,277.731442869,278.732276897,279.733110926,280.733944954,281.734778982,282.735613011,283.736447039,284.737281068,285.738115096,286.738949124,287.739783153,288.740617181,289.741451209,290.742285238,291.743119266,292.743953294,293.744787323,294.745621351,295.746455379,296.747289408,297.748123436,298.748957465,299.749791493,300.750625521,301.75145955,302.752293578,303.753127606,304.753961635,305.754795663,306.755629691,307.75646372,308.757297748,309.758131776,310.758965805,311.759799833,312.760633862,313.76146789,314.762301918,315.763135947,316.763969975,317.764804003,318.765638032,319.76647206,320.767306088,321.768140117,322.768974145,323.769808173,324.770642202,325.77147623,326.772310259,327.773144287,328.773978315,329.774812344,330.775646372,331.7764804,332.777314429,333.778148457,334.778982485,335.779816514,336.780650542,337.78148457,338.782318599,339.783152627,340.783986656,341.784820684,342.785654712,343.786488741,344.787322769,345.788156797,346.788990826,347.789824854,348.790658882,349.791492911,350.792326939,351.793160967,352.793994996,353.794829024,354.795663053,355.796497081,356.797331109,357.798165138,358.798999166,359.799833194,360.800667223,361.801501251,362.802335279,363.803169308,364.804003336,365.804837364,366.805671393,367.806505421,368.80733945,369.808173478,370.809007506,371.809841535,372.810675563,373.811509591,374.81234362,375.813177648,376.814011676,377.814845705,378.815679733,379.816513761,380.81734779,381.818181818,382.819015847,383.819849875,384.820683903,385.821517932,386.82235196,387.823185988,388.824020017,389.824854045,390.825688073,391.826522102,392.82735613,393.828190158,394.829024187,395.829858215,396.830692244,397.831526272,398.8323603,399.833194329,400.834028357,401.834862385,402.835696414,403.836530442,404.83736447,405.838198499,406.839032527,407.839866555,408.840700584,409.841534612,410.842368641,411.843202669,412.844036697,413.844870726,414.845704754,415.846538782,416.847372811,417.848206839,418.849040867,419.849874896,420.850708924,421.851542952,422.852376981,423.853211009,424.854045038,425.854879066,426.855713094,427.856547123,428.857381151,429.858215179,430.859049208,431.859883236,432.860717264,433.861551293,434.862385321,435.863219349,436.864053378,437.864887406,438.865721435,439.866555463,440.867389491,441.86822352,442.869057548,443.869891576,444.870725605,445.871559633,446.872393661,447.87322769,448.874061718,449.874895746,450.875729775,451.876563803,452.877397832,453.87823186,454.879065888,455.879899917,456.880733945,457.881567973,458.882402002,459.88323603,460.884070058,461.884904087,462.885738115,463.886572143,464.887406172,465.8882402,466.889074229,467.889908257,468.890742285,469.891576314,470.892410342,471.89324437,472.894078399,473.894912427,474.895746455,475.896580484,476.897414512,477.89824854,478.899082569,479.899916597,480.900750626,481.901584654,482.902418682,483.903252711,484.904086739,485.904920767,486.905754796,487.906588824,488.907422852,489.908256881,490.909090909,491.909924937,492.910758966,493.911592994,494.912427023,495.913261051,496.914095079,497.914929108,498.915763136,499.916597164,500.917431193,501.918265221,502.919099249,503.919933278,504.920767306,505.921601334,506.922435363,507.923269391,508.92410342,509.924937448,510.925771476,511.926605505,512.927439533,513.928273561,514.92910759,515.929941618,516.930775646,517.931609675,518.932443703,519.933277731,520.93411176,521.934945788,522.935779817,523.936613845,524.937447873,525.938281902,526.93911593,527.939949958,528.940783987,529.941618015,530.942452043,531.943286072,532.9441201,533.944954128,534.945788157,535.946622185,536.947456214,537.948290242,538.94912427,539.949958299,540.950792327,541.951626355,542.952460384,543.953294412,544.95412844,545.954962469,546.955796497,547.956630525,548.957464554,549.958298582,550.959132611,551.959966639,552.960800667,553.961634696,554.962468724,555.963302752,556.964136781,557.964970809,558.965804837,559.966638866,560.967472894,561.968306922,562.969140951,563.969974979,564.970809008,565.971643036,566.972477064,567.973311093,568.974145121,569.974979149,570.975813178,571.976647206,572.977481234,573.978315263,574.979149291,575.979983319,576.980817348,577.981651376,578.982485405,579.983319433,580.984153461,581.98498749,582.985821518,583.986655546,584.987489575,585.988323603,586.989157631,587.98999166,588.990825688,589.991659716,590.992493745,591.993327773,592.994161802,593.99499583,594.995829858,595.996663887,596.997497915,597.998331943,598.999165972,600.0)
altd <- c(52, 288)
x <- seq.int((length(p)-1)/2*-1,(length(p)-1)/2)
pdf('/magnuson-lab/jraab/analysis/swi_snf_final/output/macs_peaks//arid2_model.pdf',height=6,width=6)
plot(x,p,type='l',col=c('red'),main='Peak Model',xlab='Distance to the middle',ylab='Percentage')
lines(x,m,col=c('blue'))
legend('topleft',c('forward tags','reverse tags'),lty=c(1,1,1),col=c('red','blue'))
plot(xcorr,ycorr,type='l',col=c('black'),main='Cross-Correlation',xlab='Lag between + and - tags',ylab='Correlation')
abline(v=altd,lty=2,col=c('red'))
legend('topleft','alternative lag(s)',lty=2,col='red')
legend('right','alt lag(s) : 52,288',bty='n')
dev.off()
|
/output/macs_peaks/arid2_model.r
|
permissive
|
ytakemon/raab_swisnf_2015
|
R
| false | false | 78,410 |
r
|
# R script for Peak Model
# -- generated by MACS
p <- c(0.0414937466192,0.0489353039017,0.0560707211693,0.0635358276837,0.0698234726027,0.070247358777,0.0698705710665,0.0689521510222,0.0688815033264,0.0689286017903,0.0690934464136,0.0691405448774,0.0689757002541,0.0686460110074,0.0691169956455,0.0698941202984,0.0707418926471,0.0705299495599,0.0704357526323,0.0707418926471,0.0706476957194,0.0706476957194,0.0717074111552,0.0715190173,0.0717780588509,0.0713777219085,0.0705299495599,0.0720371004019,0.0721548465614,0.0722961419529,0.0720841988657,0.0712128772852,0.070011866458,0.0701060633856,0.0701531618494,0.0705299495599,0.0703651049366,0.0691405448774,0.0694466848922,0.069258291037,0.0695408818198,0.0698234726027,0.0702238095451,0.0701767110813,0.0701767110813,0.0699647679941,0.0717074111552,0.0714483696043,0.0711186803576,0.070506400328,0.0706476957194,0.070765441879,0.0697763741389,0.0699647679941,0.0697763741389,0.0697057264432,0.0689521510222,0.0697999233708,0.0706005972556,0.071000934198,0.0703886541685,0.070765441879,0.0725787327358,0.07250808504,0.073026168142,0.073026168142,0.0731203650696,0.0735678004758,0.0729555204463,0.0738032927949,0.0744155728245,0.0746510651435,0.0733323081568,0.0742271789692,0.0741094328097,0.0738503912587,0.0732145619972,0.0722490434891,0.0726258311996,0.071754509619,0.0719900019381,0.0715190173,0.0715661157638,0.0705299495599,0.0706241464875,0.0710715818937,0.071259975749,0.0719193542424,0.0722254942572,0.0719429034743,0.0714954680681,0.0716367634595,0.0716132142276,0.0714012711404,0.0708831880385,0.0701767110813,0.0703415557046,0.0703651049366,0.070011866458,0.0700354156899,0.0691169956455,0.0692818402689,0.0696586279794,0.0698941202984,0.0700825141537,0.069752824907,0.0692111925731,0.0693524879646,0.069258291037,0.069234741805,0.0696821772113,0.0687873063988,0.0686224617755,0.0693524879646,0.0695644310517,0.0703415557046,0.0715425665319,0.0713070742128,0.0706712449513,0.0706712449513,0.0708831880385,0.0718016080828,0.0715425665319,0.0711893280533,0.0713777219085,0.0717309603871,0.0715425665319,0.0714248203723,0.0719664527062,0.0727435773591,0.0732616604611,0.0726729296634,0.0729790696782,0.0729084219825,0.0725551835039,0.0718251573148,0.0710480326618,0.0713306234447,0.0709538357342,0.0700354156899,0.0693289387327,0.068999249486,0.0694466848922,0.0694702341241,0.0704357526323,0.0706476957194,0.0709067372704,0.0705534987918,0.0715661157638,0.0727200281272,0.0728142250548,0.0736619974035,0.0728848727505,0.0731203650696,0.071754509619,0.0715896649957,0.0711186803576,0.0714012711404,0.0711186803576,0.0703886541685,0.0706712449513,0.0705299495599,0.0714248203723,0.0716367634595,0.0718487065467,0.0720606496338,0.0742507282011,0.0742271789692,0.0738739404906,0.0743449251287,0.0746275159116,0.0738503912587,0.0734736035482,0.0737561943311,0.0738974897225,0.0740623343459,0.0725551835039,0.0726964788953,0.0726964788953,0.0726729296634,0.072767126591,0.0732145619972,0.0734971527801,0.0725787327358,0.0731674635334,0.0733794066206,0.0738503912587,0.0749336559264,0.0747688113031,0.0747452620712,0.0746981636073,0.0753339928688,0.0753104436369,0.0753104436369,0.074792360535,0.0743920235926,0.0743449251287,0.0739445881863,0.0740623343459,0.0740623343459,0.0737090958673,0.0736384481716,0.0737326450992,0.0735913497077,0.0733558573887,0.0734500543163,0.0728142250548,0.0722490434891,0.0721548465614,0.0715425665319,0.0718487065467,0.0713070742128,0.0712364265171,0.0710715818937,0.0712364265171,0.0710715818937,0.0712128772852,0.0711893280533,0.0711422295895,0.0718487065467,0.0719900019381,0.07250808504,0.0723667896486,0.0727435773591,0.0730026189101,0.0734971527801,0.0734736035482,0.0739681374182,0.0748865574626,0.0747688113031,0.0746981636073,0.0745804174478,0.0746746143754,0.0754281897964,0.0757107805793,0.0757578790431,0.0758285267389,0.0749807543902,0.0746039666797,0.0745097697521,0.0742978266649,0.0748865574626,0.075027852854,0.0745804174478,0.0746746143754,0.0744391220564,0.0740858835778,0.0739445881863,0.0744626712883,0.0741800805054,0.0746746143754,0.0749101066945,0.0750985005498,0.074792360535,0.0746275159116,0.0739916866502,0.0739210389544,0.0736619974035,0.0730968158377,0.0728613235186,0.0722490434891,0.0721312973295,0.0710480326618,0.0715425665319,0.0713777219085,0.0722490434891,0.0721077480976,0.0727200281272,0.0722961419529,0.0724374373443,0.0724374373443,0.0718958050105,0.0730497173739,0.0727200281272,0.0725551835039,0.0724138881124,0.0726022819677,0.0730497173739,0.0733558573887,0.0739445881863,0.0741565312735,0.0734029558525,0.0727200281272,0.0729555204463,0.0728848727505,0.0723903388805,0.07250808504,0.0721548465614,0.07250808504,0.072272592721,0.0732381112291,0.0743449251287,0.0752397959412,0.0749572051583,0.0752162467093,0.0758991744346,0.0766527498556,0.0773827760447,0.0773827760447,0.0786544345676,0.0790076730462,0.0801615854096,0.0810564562221,0.0830816901661,0.0844710948485,0.0869908626625,0.088121225794,0.0896048274041,0.0908293874633,0.0919597505948,0.0925955798563,0.0933256060454,0.0935375491325,0.0927839737115,0.0937494922197,0.0932785075816,0.0940320830026,0.0939378860749,0.0948563061193,0.0947385599598,0.0950682492064,0.0944559691769,0.0951153476703,0.0961279646422,0.0959631200189,0.0966695969761,0.0971170323823,0.09770576318,0.0973054262376,0.0972112293099,0.0978941570352,0.0976822139481,0.0980354524267,0.0979648047309,0.0977293124119,0.0969992862228,0.0968579908313,0.0965754000485,0.0962928092656,0.0963634569613,0.0952095445979,0.095445036917,0.0944559691769,0.0944088707131,0.0940556322345,0.0937730414516,0.0929723675668,0.0927368752477,0.0933256060454,0.0933727045092,0.0933256060454,0.093914336843,0.0945737153364,0.0939614353068,0.0944088707131,0.0935846475963,0.0939849845388,0.0939614353068,0.093419802973,0.0929488183349,0.0933727045092,0.0930430152625,0.0929959167987,0.0934433522049,0.0932078598858,0.0935846475963,0.0933020568135,0.0932078598858,0.0932314091177,0.0928075229434,0.0933020568135,0.0923836367691,0.09214814445,0.0923365383053,0.0916300613481,0.0920068490586,0.0918655536672,0.0917949059714,0.0917478075076,0.0926426783201,0.092171693682,0.0928310721753,0.0924542844648,0.0920774967543,0.0928310721753,0.0932078598858,0.0933256060454,0.0931843106539,0.0936081968282,0.093160761422,0.0935375491325,0.0938907876111,0.0940791814664,0.0943382230173,0.0947150107279,0.0941262799302,0.0941027306983,0.095421487685,0.0951388969022,0.0961515138741,0.0963399077294,0.0961515138741,0.0961750631061,0.0957511769317,0.0953272907574,0.0968344415994,0.0979177062671,0.0962928092656,0.0965754000485,0.0963399077294,0.0955863323084,0.0955627830765,0.0962457108018,0.0969050892952,0.0966695969761,0.0950917984384,0.0932785075816,0.0947150107279,0.0950446999745,0.0946208138002,0.0950446999745,0.0950917984384,0.0943146737854,0.0941262799302,0.0943146737854,0.0955627830765,0.0968579908313,0.0960573169465,0.0954685861489,0.0957747261636,0.0959160215551,0.0966224985123,0.0973525247014,0.0967402446718,0.0972583277738,0.0967637939037,0.096693146208,0.0971405816142,0.0976115662523,0.0976351154843,0.097941255499,0.096952187759,0.0966460477442,0.0966460477442,0.0969050892952,0.096198612338,0.095680529236,0.0950446999745,0.0952330938298,0.0948798553512,0.0938907876111,0.0938436891473,0.0938201399154,0.0940791814664,0.0935610983644,0.0937494922197,0.0932785075816,0.0928075229434,0.0932314091177,0.0940085337707,0.0945737153364,0.0953508399893,0.0942440260897,0.0945737153364,0.0949740522788,0.0943382230173,0.0950446999745,0.0959631200189,0.0952095445979,0.0948092076555,0.0938672383792,0.0932078598858,0.0943853214812,0.0947856584236,0.0944088707131,0.0949034045831,0.0950211507426,0.0938201399154,0.0935610983644,0.0927368752477,0.0937259429878,0.0941262799302,0.0940556322345,0.0934904506687,0.0938672383792,0.0948092076555,0.0950682492064,0.0971170323823,0.0981767478181,0.0993306601815,0.100154883298,0.101214598734,0.102227215706,0.102815946504,0.103357578838,0.104417294273,0.105006025071,0.104417294273,0.103922760403,0.104346646578,0.103781465012,0.103381128069,0.103169184982,0.103946309635,0.104982475839,0.104723434288,0.10437019581,0.104558589665,0.105335714318,0.105500558941,0.105641854332,0.106065740507,0.106678020536,0.105335714318,0.104464392737,0.10425244965,0.104558589665,0.105076672767,0.10535926355,0.104841180448,0.10486472968,0.105053123535,0.104888278911,0.106207035898,0.106630922073,0.105924445115,0.106489626681,0.107196103638,0.107667088276,0.108632606785,0.109362632974,0.109857166844,0.110022011467,0.110328151482,0.110704939192,0.111434965381,0.111552711541,0.111317219222,0.110869783816,0.110752037656,0.110775586888,0.111199473062,0.110752037656,0.111270120758,0.112023696179,0.112612426977,0.112235639266,0.113106960847,0.114166676283,0.114684759385,0.115556080965,0.115391236342,0.117086781039,0.118240693402,0.119041367287,0.121043051999,0.123633467509,0.124481239858,0.124952224496,0.125470307598,0.127001007672,0.130109506283,0.132087641763,0.134136424939,0.136114560419,0.138822722089,0.141460236062,0.146429123994,0.151609955014,0.158462781499,0.165080115664,0.170779029786,0.177773151662,0.185002765857,0.190819426138,0.196848029506,0.203630208295,0.207610028488,0.210247542461,0.212979253362,0.212861507203,0.211283708665,0.207821971575,0.202758886715,0.198096138797,0.1914552554,0.184343387364,0.177090223937,0.171344211352,0.163266824808,0.156249153699,0.150809281129,0.144804226993,0.140165028307,0.135667125013,0.131969895604,0.128390412354,0.125729349149,0.122220513595,0.118688128809,0.116757091792,0.113742790108,0.112212090034,0.109715871452,0.108138072915,0.106536725145,0.103710817316,0.101332344894,0.0990951678625,0.0968344415994,0.0952330938298,0.0935375491325,0.0922187921458,0.090900035159,0.0903113043613,0.088874801215,0.088874801215,0.0882860704174,0.0877208888516,0.088121225794,0.0875324949964,0.087367650373,0.0863550334011,0.0861666395458,0.0846359394719,0.0844239963847,0.0837646178913,0.0826813532236,0.0828226486151,0.0831523378618,0.0824223116727,0.0820455239622,0.0817629331793,0.0811506531497,0.0808680623668,0.0803028808011,0.079572854612,0.0804206269606,0.0801615854096,0.0795022069163,0.0791254192058,0.0783247453209,0.0787486314953,0.0798554453949,0.0803970777287,0.0804206269606,0.0810564562221,0.0810564562221,0.0803970777287,0.0804912746563,0.0818806793388,0.0835762240361,0.0836468717318,0.0829639440065,0.0830581409341,0.0837175194275,0.0841885040657,0.083340731717,0.084094307138,0.0849891779505,0.0848007840952,0.0848243333271,0.0849420794867,0.0861430903139,0.0861430903139,0.0856485564439,0.0850598256462,0.0858840487629,0.0862843857053,0.0860724426182,0.0859075979948,0.0855308102843,0.0863079349372,0.0849656287186,0.0854130641248,0.0860488933863,0.0859546964586,0.0848243333271,0.0844946440804,0.0837410686594,0.0839765609785,0.0833878301808,0.082092622426,0.0821161716579,0.0813625962369,0.0806561192797,0.0807267669754,0.0809387100626,0.0803028808011,0.0804441761925,0.0794551084525,0.0798083469311,0.080326430033,0.0805619223521,0.0805148238882,0.0813861454688,0.0809858085264,0.0816922854836,0.0822574670493,0.0821632701217,0.0821632701217,0.0819748762664,0.0822103685855,0.083105239398,0.0844475456166,0.0839294625147,0.0834349286446,0.0827520009194,0.0826342547598,0.0823752132089,0.083105239398,0.0833642809489,0.0828932963108,0.082092622426,0.0815038916283,0.0819277778026,0.0829168455427,0.0839765609785,0.0834584778766,0.0844239963847,0.0835526748042,0.0834820271085,0.0834820271085,0.0832465347894,0.0830110424703,0.0833171824851,0.0834820271085,0.0828932963108,0.0836468717318,0.0835526748042,0.0835291255723,0.0828932963108,0.0832465347894,0.083340731717,0.0827049024555,0.0824223116727,0.0818571301069,0.0825165086003,0.082092622426,0.0822103685855,0.083340731717,0.0843297994571,0.0848714317909,0.0847536856314,0.0859546964586,0.0853188671972,0.0855779087481,0.0854366133567,0.0851775718057,0.0843062502252,0.084353348689,0.0837410686594,0.082587156296,0.0819984254984,0.0811977516135,0.0812919485412,0.0815038916283,0.0801380361777,0.0805383731202,0.0813154977731,0.080820963903,0.0806796685116,0.0809387100626,0.0816451870198,0.0815038916283,0.0816687362517,0.081574539324,0.0820455239622,0.0814096947007,0.0808680623668,0.0809151608307,0.0811035546859,0.0807267669754,0.080820963903,0.0807267669754,0.0810329069902,0.0810329069902,0.0808916115988,0.0819984254984,0.0824458609046,0.0818100316431,0.0815509900921,0.0821632701217,0.0816687362517,0.0825165086003,0.0819748762664,0.0818100316431,0.0821632701217,0.0812213008454,0.080326430033,0.0807032177435,0.0811742023816,0.0807738654392,0.0811977516135,0.0805383731202,0.0803028808011,0.0811506531497,0.0810564562221,0.081339047005,0.081574539324,0.0815038916283,0.0809151608307,0.0808445131349,0.0814803423964,0.0818571301069,0.0819748762664,0.0815038916283,0.0817629331793,0.080820963903,0.0813625962369,0.0811035546859,0.0817393839474,0.0818806793388,0.0804206269606,0.0803970777287,0.0797847976991,0.0779244083785,0.0775711698999,0.0787721807272,0.0779008591466,0.0783718437848,0.07806570377,0.0784424914805,0.0792667145972,0.0787957299591,0.0797376992353,0.0809151608307,0.0811742023816,0.0803028808011,0.0807503162073,0.0807267669754,0.0809622592945,0.080820963903,0.0806325700478,0.0806796685116,0.0799967407863,0.0799025438587,0.0797376992353,0.0803970777287,0.080067388482,0.0792431653653,0.0787957299591,0.078819279191,0.0788428284229,0.079078320742,0.0791018699739,0.0791725176696,0.0787015330314,0.0781834499295,0.0779479576104,0.0779715068423,0.0776653668276,0.0773592268128,0.0766056513918,0.0763230606089,0.0764879052322,0.0765350036961,0.0764879052322,0.076299511377,0.0764408067684,0.0766762990875,0.0771001852618,0.0766527498556,0.0772179314213,0.0771472837256,0.0771472837256,0.0772414806532,0.0774534237404,0.0771943821894,0.076299511377,0.0757107805793,0.075804977507,0.0753575421007,0.0746039666797,0.0739210389544,0.0737326450992,0.0734500543163,0.0730732666058,0.0730497173739,0.0736619974035,0.074274277433,0.0738032927949,0.074274277433,0.0739916866502,0.0746275159116,0.0741800805054,0.0739445881863,0.074274277433,0.0742036297373,0.0740152358821,0.0737326450992,0.0741565312735,0.0735913497077,0.0743920235926,0.0736148989396,0.0735678004758,0.0739210389544,0.0735678004758,0.0738974897225,0.0738974897225,0.0742978266649,0.0734500543163,0.0739210389544,0.0742978266649,0.0749572051583,0.0751926974774,0.0748865574626,0.0746981636073,0.0738974897225,0.0743920235926,0.074533318984,0.0745568682159,0.0742036297373,0.0736384481716,0.0732145619972,0.0728848727505,0.0723903388805,0.072531634272,0.0736855466354,0.072767126591,0.0719429034743,0.0722254942572,0.0731439143015,0.0735442512439,0.0735442512439,0.073779743563,0.0744155728245,0.074274277433,0.0728613235186,0.0738268420268,0.0737090958673,0.0729790696782,0.0723432404167,0.0717074111552,0.0722019450253,0.0719664527062,0.0720371004019,0.0718487065467,0.0727200281272,0.0722019450253,0.0721783957934,0.0725787327358,0.0723196911848,0.0726493804315,0.0730732666058,0.0733794066206,0.0723667896486,0.0726964788953,0.0722961419529,0.0717780588509,0.0719193542424,0.0722254942572,0.0715425665319,0.0709067372704,0.0697763741389,0.0688815033264,0.0692111925731,0.0691169956455,0.0689286017903,0.0697999233708,0.0696821772113,0.0686224617755,0.0691405448774,0.0695644310517,0.0702709080089,0.0708596388066,0.0705770480237,0.0699883172261,0.0699412187622,0.0696350787475,0.0699647679941,0.0707889911109,0.0703651049366,0.0701296126175,0.0695879802836,0.0691169956455,0.0691640941093,0.0689050525584,0.0688344048626,0.0686931094712,0.0680337309778,0.0672566063249,0.0683163217607,0.0682221248331,0.0675627463397,0.0680101817459,0.0682692232969,0.0685518140798,0.0691640941093,0.0697057264432,0.0700354156899,0.0704122034004,0.070506400328,0.0705534987918,0.0704122034004,0.0701060633856,0.0697057264432,0.0700825141537,0.0703180064727,0.0701060633856,0.0693995864284,0.0694702341241,0.0691876433412,0.0696586279794,0.069752824907,0.0700354156899,0.0701767110813,0.0702709080089,0.0697057264432,0.0691876433412,0.0693289387327,0.0693289387327,0.0682927725288,0.0680808294416,0.0688108556307,0.0684576171521,0.068245674065,0.067492098644,0.0677746894268,0.0675156478759,0.0679395340502,0.0680808294416,0.0686460110074,0.0682221248331,0.0681043786735,0.0688108556307,0.069258291037,0.0699176695303,0.0698941202984,0.0696821772113,0.0703651049366,0.0696115295155,0.0699647679941,0.0698234726027,0.0699883172261,0.0694702341241,0.0690227987179,0.0684576171521,0.0689050525584,0.0691876433412,0.0683869694564,0.0691169956455,0.0690227987179,0.0697999233708,0.0697999233708,0.070247358777,0.0702944572408,0.0707889911109,0.0703415557046,0.0708596388066,0.0712128772852,0.0709538357342,0.0707418926471,0.0704122034004,0.0698705710665,0.0695879802836,0.0706476957194,0.0708831880385,0.0711422295895,0.0710480326618,0.0705770480237,0.0707889911109,0.0711657788214,0.0716132142276,0.0721312973295,0.0722961419529,0.0712835249809,0.0714012711404,0.0709773849661,0.0705534987918,0.0708831880385,0.0706947941832,0.0701531618494,0.0692111925731,0.0689757002541,0.0691405448774,0.0696586279794,0.0694466848922,0.070247358777,0.071259975749,0.0722019450253,0.0718722557786,0.0721548465614,0.0729319712144,0.0728142250548,0.0730497173739,0.0728377742867,0.0730968158377,0.0721548465614,0.0717780588509,0.0713306234447,0.0714012711404,0.0715896649957,0.0715661157638,0.0720606496338,0.0713070742128,0.0712364265171,0.071000934198,0.0714012711404,0.0720371004019,0.0716367634595,0.0719193542424,0.0712128772852,0.0708596388066,0.0706005972556,0.0708831880385,0.0709538357342,0.0709067372704,0.0710951311257,0.0705299495599,0.0708125403428,0.0712128772852,0.0720606496338,0.0719664527062,0.0718016080828,0.0717074111552,0.0714012711404,0.0714954680681,0.0714719188362,0.0711657788214,0.0710244834299,0.0704357526323,0.0700589649218,0.0699176695303,0.0693524879646,0.069258291037,0.0695408818198,0.0687873063988,0.0690698971817,0.0689757002541,0.0685989125436,0.0693524879646,0.0696350787475,0.0697292756751,0.0696350787475,0.0697763741389,0.0700589649218,0.0698234726027,0.0686224617755,0.0686224617755,0.0679395340502,0.0668562693825,0.0656788077872,0.0657494554829,0.0662675385848,0.0654904139319,0.0646661908152,0.0657494554829,0.0663617355125,0.0664088339763,0.0669504663101,0.0670682124696,0.067492098644,0.067727590963,0.0678453371226,0.0684340679202,0.0686224617755,0.0686931094712,0.0680337309778,0.0672801555568,0.0676098448035,0.0683634202245,0.0689286017903,0.0693053895008,0.0696821772113,0.0693760371965,0.0696821772113,0.0688344048626,0.0689286017903,0.0691640941093,0.0692111925731,0.0692111925731,0.0693289387327,0.0687166587031,0.0683163217607,0.068245674065,0.0677040417311,0.0679630832821,0.0686460110074,0.0692111925731,0.068481166384,0.067727590963,0.0672566063249,0.067492098644,0.0679395340502,0.0689286017903,0.069493783356,0.0694466848922,0.0693524879646,0.0693289387327,0.0693053895008,0.0698234726027,0.0699883172261,0.0697999233708,0.0690227987179,0.0689050525584,0.069493783356,0.0695173325879,0.0694702341241,0.0691405448774,0.0703180064727,0.0695408818198,0.0687873063988,0.0690698971817,0.068999249486,0.0688108556307,0.0612044537249,0.0544458241678,0.0474752515235,0.040787269662,0.0)
m <- c(0.0405831966008,0.0480339269004,0.0547163006379,0.0619109120835,0.0688028376106,0.0684303010957,0.0698738800912,0.0705025354602,0.0695246271084,0.0705723860568,0.070595669589,0.0702697001384,0.0707120872499,0.0701998495418,0.070595669589,0.070851788443,0.0701998495418,0.0700601483487,0.0715735779408,0.0715735779408,0.0715037273443,0.0713873096833,0.0716201450052,0.0715037273443,0.0708052213787,0.0715037273443,0.071596861473,0.0710846237649,0.0702464166062,0.0690356729325,0.0687795540784,0.0689658223359,0.0686864199497,0.069594477705,0.0699902977521,0.0699902977521,0.0698273130268,0.0699437306878,0.0703861177993,0.0711311908293,0.0712708920224,0.0708052213787,0.0707353707821,0.0705723860568,0.0705258189924,0.0702697001384,0.0703628342671,0.0708983555074,0.0699902977521,0.0707353707821,0.0707819378465,0.0707586543143,0.0712708920224,0.0707819378465,0.0701532824774,0.0700368648165,0.0699902977521,0.0696643283015,0.0700135812843,0.0690589564647,0.068849404675,0.0693150753187,0.0690822399969,0.0687562705463,0.0692917917865,0.0692917917865,0.0698971636234,0.0694082094475,0.0687097034819,0.0684535846278,0.0690589564647,0.0689425388038,0.0691520905934,0.0690356729325,0.0690356729325,0.068593285821,0.0673592586151,0.0680577645807,0.0683837340313,0.0690589564647,0.0681508987094,0.0675222433404,0.0674058256795,0.0678016457266,0.0679646304519,0.0681508987094,0.0679180633876,0.0677783621944,0.0676619445335,0.0671729903576,0.067475676276,0.0682440328382,0.0681741822416,0.0690123894003,0.0687795540784,0.0690589564647,0.0697108953659,0.0698040294946,0.0708750719752,0.0713640261511,0.0722720839064,0.0724350686317,0.0711079072971,0.0701765660096,0.0698273130268,0.0700135812843,0.0712708920224,0.0706422366534,0.0704326848637,0.0699437306878,0.0691753741256,0.0691055235291,0.0696876118337,0.0698040294946,0.0705491025246,0.0710846237649,0.069594477705,0.0699437306878,0.0702464166062,0.0705491025246,0.0712476084902,0.0713873096833,0.0712010414258,0.0716899956017,0.0722022333098,0.0708285049108,0.0704792519281,0.0701765660096,0.0699437306878,0.0701067154131,0.0698040294946,0.069850596559,0.0704094013315,0.069850596559,0.0685467187566,0.0690123894003,0.0695479106406,0.0700601483487,0.070223133074,0.0700601483487,0.0697108953659,0.0692917917865,0.0689891058681,0.0692452247222,0.0694314929797,0.0705258189924,0.0707120872499,0.0707353707821,0.0707586543143,0.0710846237649,0.0709914896362,0.0709216390396,0.070595669589,0.0703395507349,0.0704326848637,0.0695479106406,0.0697108953659,0.0688726882072,0.0682905999025,0.06847686816,0.0685467187566,0.0691288070612,0.0696177612372,0.0704559683959,0.0703628342671,0.0704326848637,0.0700135812843,0.071224324958,0.0708983555074,0.0702929836706,0.0696643283015,0.0687562705463,0.0688028376106,0.0679879139841,0.0684303010957,0.068220749306,0.067848212791,0.0671264232932,0.0669401550357,0.0666840361817,0.0675222433404,0.0678249292588,0.0681741822416,0.0686398528853,0.0681508987094,0.0690822399969,0.0695246271084,0.0689891058681,0.0691055235291,0.0686631364175,0.0689425388038,0.0688726882072,0.0684070175635,0.067848212791,0.0676386610013,0.0673126915507,0.0672428409541,0.0683371669669,0.068220749306,0.0680810481129,0.0673126915507,0.0674291092116,0.0681276151772,0.0686864199497,0.0693616423831,0.0687562705463,0.0697574624303,0.0698040294946,0.0704094013315,0.0709914896362,0.0719228309236,0.0719926815202,0.0713640261511,0.0711544743615,0.0712941755546,0.0710380567005,0.0704326848637,0.0705025354602,0.0704326848637,0.0702929836706,0.0700601483487,0.0701067154131,0.0702929836706,0.0707120872499,0.0702929836706,0.070968206104,0.0707120872499,0.0706888037177,0.0702697001384,0.0705025354602,0.070223133074,0.0707353707821,0.070851788443,0.0709914896362,0.0713174590868,0.0706422366534,0.0700368648165,0.0700368648165,0.0712010414258,0.0717598461983,0.0713407426189,0.070851788443,0.0712941755546,0.0704094013315,0.0700601483487,0.0702464166062,0.0713174590868,0.0701765660096,0.0693383588509,0.0691288070612,0.0696876118337,0.0697574624303,0.0697108953659,0.0701299989453,0.0706655201855,0.070851788443,0.0704559683959,0.0713873096833,0.0720159650523,0.0721323827133,0.0711544743615,0.0714804438121,0.0712476084902,0.070851788443,0.0704792519281,0.0706888037177,0.0705491025246,0.0706655201855,0.0705723860568,0.0706422366534,0.0715270108764,0.0710613402327,0.0706422366534,0.070968206104,0.0712476084902,0.0714571602799,0.0716667120696,0.0715270108764,0.0716899956017,0.0714338767477,0.0715270108764,0.0714105932155,0.0718995473914,0.0720858156489,0.0713640261511,0.0704792519281,0.070595669589,0.0701299989453,0.0701299989453,0.0705025354602,0.0710846237649,0.0713174590868,0.070595669589,0.0705025354602,0.0713640261511,0.0720625321167,0.0724117850995,0.0722022333098,0.0715270108764,0.0713640261511,0.0704326848637,0.0701998495418,0.0708983555074,0.0708750719752,0.0710846237649,0.0708750719752,0.0696876118337,0.0703162672027,0.0710147731683,0.0700135812843,0.0702697001384,0.0701067154131,0.0704094013315,0.0708285049108,0.070223133074,0.0705025354602,0.0714105932155,0.0712708920224,0.0702697001384,0.0713407426189,0.0722720839064,0.0728076051467,0.0723186509708,0.0716667120696,0.0723186509708,0.0718762638592,0.0714571602799,0.0711311908293,0.0722022333098,0.0722953674386,0.0719461144558,0.0726911874858,0.0732965593226,0.073226708726,0.0725282027604,0.0729938734042,0.0731568581295,0.0737855134985,0.0734362605157,0.0736923793698,0.0723885015673,0.0713407426189,0.070595669589,0.071596861473,0.0721090991811,0.0714338767477,0.0718995473914,0.072225516842,0.0739950652882,0.0734595440479,0.0753920772194,0.0755550619447,0.0763699855712,0.0759508819918,0.0758810313953,0.0773711774552,0.0770452080046,0.0772780433265,0.0759043149275,0.0768123726827,0.0763932691034,0.0766726714896,0.0768356562149,0.0776505798414,0.0772780433265,0.0771383421333,0.078465503468,0.0780696834208,0.0783956528714,0.0779299822277,0.0772547597943,0.0771150586012,0.0774875951161,0.0770452080046,0.0776738633736,0.0776505798414,0.0769287903437,0.0777669975024,0.0779765492921,0.0780696834208,0.0791407259014,0.0788147564508,0.0781861010817,0.0781162504852,0.078349085807,0.0781628175495,0.0783956528714,0.0774177445196,0.0771616256655,0.0772780433265,0.0768123726827,0.0768822232793,0.0767658056184,0.0773246103908,0.0774410280518,0.0776505798414,0.0779299822277,0.0779066986955,0.0779299822277,0.078349085807,0.0780463998886,0.0783723693392,0.0785819211289,0.0778601316311,0.0779765492921,0.077720430438,0.077091775069,0.0769520738759,0.0767425220862,0.0760440161206,0.0770452080046,0.0783723693392,0.0783258022748,0.0783723693392,0.0784189364036,0.0776505798414,0.0777669975024,0.0792105764979,0.0795365459485,0.0798857989313,0.0788380399829,0.0781395340174,0.0782093846139,0.0785120705323,0.0782093846139,0.0789777411761,0.0791174423692,0.078349085807,0.0781861010817,0.0779765492921,0.0783956528714,0.0782792352105,0.0786517717255,0.0786052046611,0.0792105764979,0.0797693812704,0.0794899788842,0.079466695352,0.0791872929657,0.0796296800773,0.0803281860429,0.0798159483348,0.0792804270945,0.0792338600301,0.079350277691,0.0792105764979,0.0794201282876,0.0795132624164,0.0804213201716,0.0801419177854,0.0802117683819,0.0809801249441,0.0813060943947,0.0818648991672,0.0814923626522,0.081212960266,0.0811198261372,0.0806075884291,0.0800720671888,0.0808869908153,0.080467887236,0.0807938566866,0.0803281860429,0.0796296800773,0.0806541554935,0.0810034084763,0.081212960266,0.0816786309097,0.0823538533431,0.0818183321028,0.0819114662315,0.0815389297166,0.0821675850856,0.0827031063259,0.0820744509568,0.0809801249441,0.0815156461844,0.0813060943947,0.0800255001245,0.0798159483348,0.0799556495279,0.079350277691,0.0788846070473,0.0787681893864,0.0785819211289,0.0799323659957,0.0798857989313,0.0798857989313,0.0809335578797,0.08146907912,0.0808171402188,0.0815854967809,0.0819347497637,0.0813992285234,0.0809335578797,0.0801186342532,0.0803281860429,0.0814923626522,0.0811198261372,0.0805377378325,0.0803049025107,0.0807007225579,0.0805144543004,0.0804446037038,0.08072400609,0.0812362437981,0.0800487836566,0.0790475917726,0.0795132624164,0.0798159483348,0.0807007225579,0.080095350721,0.080467887236,0.0810034084763,0.0806774390257,0.0805144543004,0.081096542605,0.0815389297166,0.081096542605,0.0807007225579,0.0815622132487,0.0817484815062,0.0819580332959,0.0814923626522,0.0816553473775,0.0812828108625,0.0817019144419,0.0817251979741,0.08221415215,0.0823538533431,0.0815854967809,0.0822374356822,0.0814923626522,0.0825634051328,0.0830290757765,0.0837042982099,0.0831454934374,0.0826099721971,0.0825866886649,0.083098926373,0.083215344034,0.0828893745834,0.0838905664674,0.0834947464202,0.0840535511927,0.0837741488065,0.0845425053686,0.0846123559652,0.0834248958237,0.0838905664674,0.0835180299524,0.0836344476133,0.0830290757765,0.0836344476133,0.0829359416477,0.0833783287593,0.0833550452271,0.0833783287593,0.0843096700467,0.0831920605018,0.082842807519,0.0820278838925,0.082470271004,0.0823305698109,0.0823538533431,0.0821443015534,0.0814923626522,0.0817717650384,0.0827496733903,0.0836111640811,0.0833550452271,0.0836810146777,0.0833317616949,0.0827962404546,0.0825866886649,0.0823538533431,0.0827962404546,0.0833783287593,0.0821443015534,0.0820278838925,0.0832386275662,0.0832386275662,0.0841466853214,0.0853108619307,0.0851013101411,0.0858929502354,0.0857998161067,0.0859162337676,0.0877789163425,0.0880350351966,0.0885007058403,0.0897813001105,0.0907359249302,0.0908756261233,0.0925287569085,0.0938559182431,0.0956487502215,0.0956254666893,0.0964403903158,0.0973018810067,0.0981400881654,0.0997932189507,0.10000277074,0.101516200332,0.102633809877,0.103704852358,0.105474400804,0.108152007006,0.109642153065,0.113134682893,0.115556170241,0.118932282408,0.122401528704,0.127034951609,0.13141225566,0.134741800762,0.138257614122,0.14324029001,0.147524459933,0.152483852288,0.15793219882,0.16272860645,0.170062919089,0.176000219797,0.183241398306,0.189853921447,0.195767938623,0.200680763914,0.206129110446,0.20862044839,0.21139118872,0.212019844089,0.209924326192,0.205453888012,0.200471212124,0.195139283254,0.190273025027,0.184359007851,0.177490365856,0.17092440978,0.163659947738,0.158258168271,0.152483852288,0.148292816495,0.143729244186,0.140259997891,0.136557916273,0.133274938235,0.131156136806,0.128408680008,0.126476146836,0.123728690038,0.122797348751,0.121516754481,0.120445712,0.119630788373,0.118163925846,0.117488703412,0.11576572203,0.116464227996,0.115090499597,0.114811097211,0.113856472391,0.113227817022,0.112319759267,0.111644536834,0.111877372155,0.111528119173,0.111085732061,0.109874988387,0.11001468958,0.109246333018,0.108291708199,0.107500068104,0.108710811778,0.108897080036,0.108314991731,0.106894696267,0.107034397461,0.107267232782,0.107872604619,0.108803945907,0.108990214164,0.11022424137,0.108710811778,0.106871412735,0.106661860946,0.108058872877,0.107639769297,0.106848129203,0.1069179798,0.106266040898,0.107895888151,0.108012305812,0.108547827053,0.109944838984,0.11043379316,0.109432601276,0.109362750679,0.110457076692,0.110503643756,0.109805137791,0.108966930632,0.106848129203,0.106289324431,0.105567534933,0.104938879564,0.105311416079,0.105381266675,0.103658285294,0.102889928731,0.102587242813,0.102121572169,0.10265709341,0.102796794603,0.103914404148,0.104100672405,0.103844553551,0.103704852358,0.104170523002,0.104659477178,0.10500873016,0.103891120615,0.103541867633,0.103588434697,0.102400974556,0.102400974556,0.101003962624,0.100165755466,0.0993741153713,0.0980003869723,0.0966965091699,0.0961377043974,0.0964869573802,0.0950200948525,0.0944147230156,0.0928314428269,0.0940189029684,0.0941353206294,0.0944147230156,0.0947406924662,0.0949502442559,0.0959980032043,0.0953460643031,0.095835018479,0.0967896432986,0.097581283393,0.0971854633458,0.0972320304102,0.097581283393,0.0976744175217,0.0987221764701,0.0969991950883,0.0969991950883,0.0963705397193,0.0959281526077,0.0958583020112,0.0950200948525,0.0948105430628,0.0949269607237,0.0947639759984,0.0941818876937,0.0951132289812,0.0956720337537,0.0964171067836,0.0961377043974,0.0961377043974,0.0973950151355,0.0977908351826,0.0973717316033,0.0963938232514,0.0953227807709,0.0951830795778,0.0951597960456,0.0958583020112,0.0965102409124,0.0960445702687,0.0952994972387,0.0946941254018,0.094717408934,0.0944845736122,0.0953926313674,0.0958583020112,0.0955090490284,0.095835018479,0.0958117349468,0.0958815855433,0.0954857654962,0.0952762137065,0.0948571101271,0.0956720337537,0.0951365125134,0.0940654700328,0.0945777077409,0.0929944275522,0.0925520404407,0.0928314428269,0.0938559182431,0.0940421865006,0.0950200948525,0.0951132289812,0.095718600818,0.0970690456849,0.0968827774274,0.0975579998608,0.0976744175217,0.0974415821998,0.0959048690755,0.0957884514146,0.0949502442559,0.0947639759984,0.0949269607237,0.0943215888869,0.093832634711,0.0939024853075,0.0943215888869,0.0939956194363,0.0944380065478,0.0943215888869,0.0951365125134,0.0952994972387,0.0956021831571,0.0960445702687,0.0961842714618,0.0961144208652,0.0953926313674,0.0954159148996,0.0956953172859,0.0964171067836,0.0958117349468,0.0953693478352,0.0953693478352,0.0945078571444,0.0949968113203,0.09446129008,0.0939723359041,0.0937395005822,0.0939257688397,0.0940189029684,0.0948571101271,0.0951830795778,0.0941120370972,0.09446129008,0.0933203970029,0.0938792017753,0.0941120370972,0.0941120370972,0.0947406924662,0.0951830795778,0.0946242748053,0.0953693478352,0.0970690456849,0.0974881492642,0.0990248623885,0.0988153105988,0.0988851611954,0.0987221764701,0.0986523258735,0.0969991950883,0.096463673848,0.0959747196721,0.0946475583375,0.0948105430628,0.0936463664535,0.0943681559512,0.0949502442559,0.0950200948525,0.0944380065478,0.0950200948525,0.0945777077409,0.094833826595,0.0956487502215,0.0951365125134,0.0959980032043,0.0958815855433,0.0952529301743,0.0961377043974,0.0968129268308,0.0969759115561,0.0976278504573,0.0976278504573,0.0971621798136,0.0979538199079,0.0975579998608,0.0971854633458,0.0979771034401,0.0967430762342,0.0966965091699,0.0966499421055,0.0961377043974,0.0957884514146,0.0954857654962,0.0945544242087,0.0940421865006,0.0938792017753,0.0931574122775,0.0942517382903,0.093460098196,0.0934368146638,0.0925054733763,0.0924821898441,0.0915508485567,0.0910386108486,0.0898278671749,0.0876624986816,0.0867544409263,0.0849383254158,0.0842398194502,0.0828195239868,0.0838207158708,0.0823538533431,0.0821443015534,0.0817950485706,0.0818881826994,0.0815389297166,0.0811663932016,0.0815156461844,0.0808869908153,0.0805610213647,0.0797460977382,0.0793037106267,0.0789078905795,0.0786750552576,0.078092966953,0.0782559516783,0.0783025187427,0.0772780433265,0.0768822232793,0.0765562538287,0.0754619278159,0.075601629009,0.0755317784125,0.0740649158847,0.074344318271,0.0746470041894,0.0731335745973,0.0730404404685,0.0725282027604,0.0726911874858,0.0730637240007,0.0727843216145,0.0725514862926,0.0724117850995,0.0710147731683,0.0708285049108,0.071852980327,0.0721789497777,0.0723652180351,0.0722022333098,0.071852980327,0.0715037273443,0.0717831297305,0.0726679039536,0.0739019311594,0.0742511841422,0.073715662902,0.0745771535928,0.0748332724469,0.0751592418975,0.0757413302022,0.0761371502493,0.0760440161206,0.0758810313953,0.0758344643309,0.0758810313953,0.075345510155,0.0743676018032,0.074344318271,0.0736690958376,0.0735061111123,0.0737389464341,0.073226708726,0.0734129769835,0.0724816356961,0.0718064132627,0.0723885015673,0.0728308886789,0.072341934503,0.072225516842,0.0711544743615,0.0708052213787,0.0711311908293,0.0710380567005,0.0722488003742,0.0720625321167,0.0718762638592,0.0710147731683,0.0713640261511,0.0721090991811,0.0729473063398,0.0727144710179,0.0716899956017,0.0723186509708,0.0718296967949,0.0721323827133,0.072341934503,0.0729240228076,0.0730171569364,0.073599245241,0.0729938734042,0.0729938734042,0.0742744676744,0.0737855134985,0.074344318271,0.0743908853354,0.0738786476273,0.0734362605157,0.0732965593226,0.0726213368892,0.0725514862926,0.0723186509708,0.0716434285374,0.0718762638592,0.0715735779408,0.0709449225718,0.0715037273443,0.0716667120696,0.0716201450052,0.0709914896362,0.0724583521639,0.0727843216145,0.0728774557432,0.0724350686317,0.0724350686317,0.072598053357,0.0734595440479,0.0739252146916,0.0744141688675,0.0754153607516,0.0738087970307,0.0740183488204,0.0740881994169,0.0739019311594,0.073343126387,0.0739019311594,0.0723885015673,0.0722488003742,0.0715735779408,0.0713174590868,0.0720625321167,0.0715502944086,0.071852980327,0.0721556662455,0.0724117850995,0.0724117850995,0.0729007392754,0.0724350686317,0.072225516842,0.072341934503,0.0723885015673,0.0728774557432,0.0720858156489,0.0719926815202,0.0713640261511,0.0711079072971,0.0716434285374,0.0717365626661,0.0725049192283,0.0715735779408,0.071596861473,0.0710147731683,0.071596861473,0.0721090991811,0.0725282027604,0.071852980327,0.0717132791339,0.0710380567005,0.0710846237649,0.0717598461983,0.0713640261511,0.0714571602799,0.0714338767477,0.0709914896362,0.0719228309236,0.0721789497777,0.072225516842,0.0738553640951,0.0734362605157,0.0735293946445,0.073971781756,0.0741580500135,0.0736690958376,0.0735759617088,0.0728774557432,0.0728541722111,0.0728308886789,0.0716667120696,0.0716434285374,0.0720858156489,0.0724117850995,0.072970589872,0.0728774557432,0.0728076051467,0.0727377545501,0.0734362605157,0.0730404404685,0.0733896934513,0.0731801416617,0.0732965593226,0.0726213368892,0.0725049192283,0.0726446204214,0.0726213368892,0.0726446204214,0.0720625321167,0.0725049192283,0.0719228309236,0.071969397988,0.0708983555074,0.0709449225718,0.070595669589,0.0708285049108,0.0706655201855,0.0707819378465,0.0711311908293,0.0705723860568,0.0712941755546,0.0714338767477,0.071852980327,0.0718995473914,0.0722720839064,0.0726679039536,0.0727610380823,0.0726679039536,0.072225516842,0.0731335745973,0.0728541722111,0.0724816356961,0.0727144710179,0.0729938734042,0.0729240228076,0.0724350686317,0.0726679039536,0.0725282027604,0.0727610380823,0.0721090991811,0.0728076051467,0.0728774557432,0.0723186509708,0.072225516842,0.0724816356961,0.0730637240007,0.072341934503,0.0726213368892,0.0727610380823,0.0726911874858,0.0722953674386,0.0728076051467,0.0736225287732,0.0736225287732,0.0727144710179,0.0720392485845,0.0726213368892,0.0727843216145,0.0723885015673,0.0719926815202,0.0716667120696,0.0712708920224,0.0707819378465,0.0700834318809,0.0698273130268,0.070223133074,0.0704792519281,0.0704094013315,0.070851788443,0.0711777578936,0.0709449225718,0.0713407426189,0.0713407426189,0.0717598461983,0.0726911874858,0.0727144710179,0.072598053357,0.0719926815202,0.0717831297305,0.0719228309236,0.0720159650523,0.0715502944086,0.0718995473914,0.0724816356961,0.0721090991811,0.0722022333098,0.0720625321167,0.0725282027604,0.0725282027604,0.0727610380823,0.0731335745973,0.0739484982238,0.0738553640951,0.0733664099192,0.0739950652882,0.0739252146916,0.0736923793698,0.073715662902,0.0740183488204,0.0740183488204,0.0742279006101,0.0735759617088,0.0730870075329,0.0729240228076,0.0726213368892,0.0727377545501,0.0725282027604,0.072970589872,0.0728308886789,0.0726911874858,0.0651240395252,0.0580691292727,0.0509676519559,0.0439825923,0.0)
ycorr <- c(-8.27358908567e-06,-8.66608098524e-06,-9.82701030176e-06,-1.17779831261e-05,-1.45428142131e-05,-1.81320491947e-05,-2.25739396861e-05,-2.7336738214e-05,-3.24282925172e-05,-3.78557354552e-05,-4.3625465525e-05,-4.97158737741e-05,-5.6100365226e-05,-6.27227186274e-05,-6.95288746118e-05,-7.64386113897e-05,-8.3348263303e-05,-9.01150002235e-05,-9.66619714788e-05,-0.000102876212786,-0.000108691068188,-0.00011404885968,-0.000118906360287,-0.000123222267144,-0.000126997139359,-0.000130238233983,-0.000132965627712,-0.000135262756553,-0.000137200784871,-0.000138864330236,-0.000140326040595,-0.000141632521634,-0.000142841047426,-0.000143995105989,-0.000145127783009,-0.000146268786687,-0.000147456026675,-0.000148723258692,-0.000150067703733,-0.000151488639471,-0.000152953024274,-0.000154465894296,-0.000156006139489,-0.000157579148975,-0.000159176664081,-0.00016080040738,-0.000162445821371,-0.000164080169322,-0.000165683213442,-0.000167250317846,-0.000168794028462,-0.000170317989924,-0.000171810762954,-0.000173278963077,-0.00017471401496,-0.000176122864801,-0.00017749908313,-0.000178857662117,-0.000180233361695,-0.000181641950226,-0.000183070455399,-0.000184502611471,-0.00018595742458,-0.000187443997276,-0.000188979678756,-0.000190564664381,-0.000192201537493,-0.000193903418081,-0.000195633901299,-0.000197343465988,-0.000199046649589,-0.000200770933939,-0.000202507753201,-0.000204240468269,-0.000205963726411,-0.000207653455293,-0.000209319388318,-0.000210951972625,-0.000212537581161,-0.000214110713323,-0.000215678267721,-0.000217202496049,-0.000218671859232,-0.000220106266719,-0.000221493142198,-0.000222815290002,-0.000224077885098,-0.00022522963304,-0.00022628967677,-0.000227262871482,-0.00022815132417,-0.000228973742015,-0.000229746552308,-0.00023044448429,-0.00023107233542,-0.000231652185989,-0.000232190490336,-0.000232709406786,-0.000233237481799,-0.000233774184886,-0.000234301370027,-0.000234800958871,-0.00023525519492,-0.000235666855918,-0.000236051640315,-0.000236402985414,-0.000236694193574,-0.00023693339424,-0.000237101800907,-0.000237190988161,-0.000237206056903,-0.000237152549204,-0.000237058822651,-0.000236939726798,-0.000236789402963,-0.000236603211318,-0.000236400110905,-0.0002362194884,-0.000236059284869,-0.000235930591034,-0.000235824299867,-0.000235763833888,-0.000235754519105,-0.000235780236558,-0.000235849805018,-0.000235982825724,-0.000236200250504,-0.000236472412332,-0.000236779318116,-0.000237125990974,-0.000237516972,-0.000237954916118,-0.000238407794934,-0.000238893695784,-0.000239407206639,-0.000239944735072,-0.000240462741195,-0.000240962221168,-0.000241459785786,-0.000241967353134,-0.000242491995066,-0.000243027049976,-0.000243593048665,-0.000244198010019,-0.000244829069484,-0.000245494045603,-0.000246205877102,-0.000246993174457,-0.000247848420939,-0.000248756994061,-0.000249707314519,-0.00025069229543,-0.000251698259309,-0.000252706538583,-0.000253699169377,-0.000254661410968,-0.000255591897875,-0.00025646847176,-0.000257304501136,-0.000258103103257,-0.000258870683403,-0.000259604801129,-0.000260309792418,-0.000260999187119,-0.000261683355979,-0.000262391149495,-0.00026314124502,-0.00026393332025,-0.000264777948014,-0.000265652814287,-0.000266543281915,-0.000267448285281,-0.000268372647397,-0.000269310875242,-0.000270274724756,-0.000271252801794,-0.000272218679309,-0.000273178948798,-0.000274106195153,-0.000275010548659,-0.000275923863844,-0.000276865774587,-0.000277836799211,-0.000278845329128,-0.000279886705693,-0.000280957559598,-0.000282083371473,-0.000283254898423,-0.000284473882159,-0.000285750620633,-0.000287044684008,-0.000288314867897,-0.000289552383852,-0.000290750355165,-0.00029190059098,-0.000293006062054,-0.000294039581848,-0.000294989124224,-0.000295866620955,-0.000296667808842,-0.000297407579932,-0.000298114940844,-0.000298805362135,-0.000299479379693,-0.000300141645632,-0.000300789384762,-0.00030142686084,-0.000302063215436,-0.000302695458046,-0.000303310231833,-0.000303895326316,-0.000304444439755,-0.000304953368308,-0.000305415358815,-0.000305813629669,-0.00030615787801,-0.000306429829885,-0.000306634001988,-0.000306764193973,-0.000306822557425,-0.000306827239613,-0.00030679330836,-0.000306732412054,-0.000306655324049,-0.000306582052098,-0.00030650617353,-0.000306441119522,-0.000306396210056,-0.000306387268033,-0.000306436894584,-0.00030654824602,-0.000306735128337,-0.000306988843659,-0.000307298490101,-0.000307665737616,-0.000308098138827,-0.0003086114617,-0.000309203365861,-0.000309863122474,-0.000310557380905,-0.000311270398202,-0.000311990304609,-0.000312679478768,-0.000313328665641,-0.000313936784037,-0.000314472572557,-0.000314930117471,-0.000315295607581,-0.000315578450764,-0.000315812113892,-0.000316005784611,-0.000316158552031,-0.000316291116318,-0.00031642404223,-0.000316565712633,-0.000316739354677,-0.000316946805922,-0.000317193897026,-0.00031748628963,-0.000317787626426,-0.000318105048605,-0.000318440136607,-0.000318791816832,-0.000319147846032,-0.000319502244124,-0.000319837757053,-0.000320145998773,-0.000320424089492,-0.000320646450503,-0.000320831832585,-0.000320961787372,-0.000321023874251,-0.000321012119115,-0.000320924789095,-0.000320768493021,-0.000320550486402,-0.000320280407411,-0.000319948270165,-0.000319575410562,-0.000319161519578,-0.000318728315857,-0.000318291576913,-0.000317859672113,-0.000317429076408,-0.000317014983238,-0.000316599465526,-0.00031617868269,-0.000315747977636,-0.000315288967775,-0.000314801721666,-0.000314273428986,-0.000313678081963,-0.000313016145234,-0.000312304911893,-0.000311533082683,-0.000310711771664,-0.000309856768384,-0.000308984715224,-0.000308103724307,-0.000307233010627,-0.000306374183616,-0.000305543383901,-0.000304741895341,-0.000303971610054,-0.000303194392197,-0.000302426431667,-0.000301653752954,-0.000300867501307,-0.00030006678533,-0.000299255134994,-0.000298426462991,-0.000297575271066,-0.000296708183644,-0.000295815243836,-0.000294928133677,-0.000294049817358,-0.000293165372101,-0.000292286143612,-0.000291422657396,-0.00029054599931,-0.000289665403477,-0.000288787796468,-0.000287918203141,-0.000287051025823,-0.000286186586506,-0.000285304251479,-0.00028440345754,-0.000283483906967,-0.000282527854949,-0.000281546292138,-0.000280549683978,-0.000279527665346,-0.000278472031305,-0.000277375202908,-0.000276234689925,-0.000275057989012,-0.00027385309052,-0.000272634992124,-0.000271418780678,-0.000270191053595,-0.000268946351493,-0.000267684851041,-0.000266404969931,-0.000265117067425,-0.000263816705149,-0.000262499756856,-0.000261157058825,-0.000259773626813,-0.000258333286067,-0.000256830827279,-0.000255272621521,-0.000253648427596,-0.00025195900614,-0.000250201550043,-0.000248376349942,-0.000246478827225,-0.000244525708078,-0.000242509271748,-0.000240436998389,-0.000238320250822,-0.000236165734282,-0.000233970868521,-0.000231728833088,-0.000229434913655,-0.000227094231806,-0.000224717305498,-0.00022231233117,-0.000219888210928,-0.000217448406154,-0.000214993918241,-0.000212507751047,-0.000210003387901,-0.000207502182149,-0.000205022115811,-0.00020256540975,-0.000200120121905,-0.000197688676331,-0.000195263717865,-0.000192844726565,-0.000190447964765,-0.000188086214614,-0.000185771348184,-0.000183503167249,-0.000181263832008,-0.000179053470299,-0.000176899870141,-0.000174808320923,-0.00017278151644,-0.000170832144793,-0.000168948790324,-0.000167122883852,-0.000165348611632,-0.000163624617934,-0.000161950954317,-0.000160343964211,-0.000158792481,-0.000157292040946,-0.000155848872816,-0.000154446898051,-0.000153096404333,-0.000151801804645,-0.000150563687586,-0.000149362178794,-0.000148215664792,-0.000147098380258,-0.00014599939828,-0.000144904296331,-0.000143795545165,-0.000142663255517,-0.000141510646949,-0.000140320312658,-0.000139088056785,-0.000137833457585,-0.000136538328934,-0.000135190941156,-0.000133806622376,-0.000132391169819,-0.000130956449696,-0.000129519226558,-0.00012807783156,-0.000126635737686,-0.000125187859592,-0.00012373338738,-0.000122269650718,-0.000120826652383,-0.000119400389189,-0.000117985397971,-0.000116584566686,-0.00011520040447,-0.000113811485342,-0.000112419242126,-0.000111016022067,-0.000109613247515,-0.000108206966663,-0.000106788479866,-0.000105349343662,-0.000103901372675,-0.000102441631213,-0.000100959970332,-9.94693716778e-05,-9.79776975023e-05,-9.65135639481e-05,-9.50558556982e-05,-9.3609024579e-05,-9.21623476419e-05,-9.07154844964e-05,-8.92721292933e-05,-8.78171658835e-05,-8.63510506809e-05,-8.48799517986e-05,-8.33829246819e-05,-8.18589357728e-05,-8.0301321692e-05,-7.87232668844e-05,-7.71222700978e-05,-7.55154149912e-05,-7.38921154279e-05,-7.22691009747e-05,-7.06414242481e-05,-6.90172190452e-05,-6.74162266103e-05,-6.58281304295e-05,-6.4247730196e-05,-6.26590175493e-05,-6.10714774826e-05,-5.94676529095e-05,-5.78414905184e-05,-5.61875292233e-05,-5.44990475308e-05,-5.2780752901e-05,-5.10168156648e-05,-4.91965013959e-05,-4.73535250023e-05,-4.5499685369e-05,-4.36387964435e-05,-4.17658106734e-05,-3.98954094874e-05,-3.80396078772e-05,-3.62253252729e-05,-3.44417580236e-05,-3.26969487514e-05,-3.10038727309e-05,-2.9357760087e-05,-2.77488141876e-05,-2.61901171609e-05,-2.46916752267e-05,-2.32657005752e-05,-2.19136023364e-05,-2.06204585401e-05,-1.9384208401e-05,-1.82086305033e-05,-1.7092192315e-05,-1.60233002363e-05,-1.49957365673e-05,-1.39975776374e-05,-1.30202469148e-05,-1.20457082407e-05,-1.10689461919e-05,-1.00864272594e-05,-9.08915832352e-06,-8.0757383222e-06,-7.04710400968e-06,-5.99973407346e-06,-4.94569652451e-06,-3.88817967953e-06,-2.83472462818e-06,-1.77750397346e-06,-7.21028103789e-07,3.42992049901e-07,1.40128164814e-06,2.44758585899e-06,3.49749504207e-06,4.53390398504e-06,5.55384784953e-06,6.55459762473e-06,7.54163854644e-06,8.5071282224e-06,9.46030337383e-06,1.03927273627e-05,1.13034230573e-05,1.21966311069e-05,1.30652487302e-05,1.39264508826e-05,1.47888239801e-05,1.5657955907e-05,1.65390863113e-05,1.74475143214e-05,1.83850573161e-05,1.9347352259e-05,2.03471453711e-05,2.13907432596e-05,2.24877586469e-05,2.36303584736e-05,2.48236165652e-05,2.60941707636e-05,2.74438965007e-05,2.88665120097e-05,3.03494670335e-05,3.19179698297e-05,3.35702225291e-05,3.53004172377e-05,3.71065097767e-05,3.89970358603e-05,4.09782681302e-05,4.30225815063e-05,4.5101890061e-05,4.7202179772e-05,4.93263779167e-05,5.14548285106e-05,5.35759232857e-05,5.56843442946e-05,5.77611665572e-05,5.97918871695e-05,6.17529069135e-05,6.36316636021e-05,6.54294284388e-05,6.71494869104e-05,6.880035317e-05,7.03622316961e-05,7.18323060682e-05,7.32000409868e-05,7.44685498789e-05,7.56397131501e-05,7.67002772327e-05,7.76606237027e-05,7.85273057465e-05,7.93011394578e-05,7.9978810499e-05,8.05745892949e-05,8.10958802388e-05,8.15507948887e-05,8.19366004228e-05,8.22610583537e-05,8.25546673941e-05,8.28297147508e-05,8.30987527321e-05,8.33807782993e-05,8.3685162899e-05,8.40259984167e-05,8.44157239905e-05,8.48787623835e-05,8.54503908962e-05,8.61508636253e-05,8.69907475629e-05,8.79757560763e-05,8.91287298061e-05,9.04644728201e-05,9.19791087142e-05,9.3681883815e-05,9.55850499156e-05,9.77890359463e-05,0.00010038508749,0.000103448502711,0.000107044967047,0.000111690673419,0.000115755079026,0.000119713132431,0.00012362119076,0.000127568297771,0.00013163916721,0.000135928475171,0.000140353096752,0.00014484536064,0.000149318090955,0.000153733906341,0.000157610237445,0.000162568476969,0.000168152142197,0.000174311139364,0.000180988528141,0.000188111642489,0.000195589221316,0.000203424288668,0.000211630299921,0.000220231583629,0.000229218190512,0.000238581268038,0.000248351891047,0.000258538652664,0.000269151939409,0.000280194089294,0.000291671368062,0.000303591921262,0.000315939080811,0.00032869103606,0.000341841895335,0.000355403232892,0.000369351567554,0.000383660075893,0.000398295054058,0.00041322999139,0.000428416301629,0.000443813105167,0.000459384108867,0.000475079398188,0.000490833848843,0.000506555939554,0.00052213354504,0.000537462146937,0.000552444833161,0.00056696027551,0.000580883151749,0.000594088753359,0.000606434350336,0.00061778069262,0.00062799431442,0.0006369532408,0.000644578320127,0.000650801417834,0.000655573139583,0.000658854082652,0.000660643695102,0.000660959270139,0.000659856323331,0.000657412213981,0.000653724880021,0.000648918359962,0.000643134635634,0.000636505221761,0.000629157677251,0.0006212460726,0.0006129299145,0.000604339248342,0.000595586096698,0.000586781298803,0.000578044679982,0.000569463444658,0.000561101024432,0.000553023584291,0.000545273631381,0.000537888491864,0.000530868920577,0.000524213651099,0.000517933405919,0.00051204515112,0.000506517885579,0.0005013299626,0.00049646815503,0.000491912269035,0.000487647938368,0.000483655974718,0.000479921197524,0.000476441075131,0.000473193200173,0.000470157487034,0.00046732178454,0.000464693115194,0.000462253192184,0.000459993513806,0.000457905254355,0.000455962179786,0.000454151749551,0.000452468901991,0.000450913541188,0.000449470656418,0.000448139308545,0.000446897554989,0.000445735133059,0.000444634637911,0.000443576304261,0.000442558771957,0.000441587488112,0.000440655876445,0.000439760125729,0.000438895638326,0.000438051236807,0.000437229598704,0.000436430315022,0.000435645235515,0.000434886465442,0.000434156857679,0.000433445312166,0.000432727382292,0.000431993510326,0.000431225455108,0.000430396882514,0.000429506306731,0.00042852894578,0.000427469894778,0.000426328233291,0.000425086706639,0.000423732755303,0.000422275138936,0.000420713420489,0.000419048666742,0.000417305308055,0.000415491105312,0.000413608269597,0.000411651188062,0.000409601946555,0.000407453098461,0.000405209985196,0.000402874550828,0.000400445323953,0.000397931084912,0.000395332613358,0.000392622151524,0.000389802304853,0.000386885957562,0.000383885819402,0.000380835015302,0.000377753233855,0.000374647477683,0.00037153759219,0.000368437637056,0.000365357079832,0.000362325561907,0.000359376841869,0.000356530897918,0.000353806268963,0.000351212011021,0.000348745942195,0.000346421883506,0.000344250153993,0.00034223524175,0.000340384042755,0.000338702986319,0.000337175036986,0.000335800902263,0.000334562883285,0.000333443871034,0.00033243839833,0.000331547610229,0.000330745035137,0.000330024588959,0.000329352885043,0.000328716020352,0.000328100614814,0.000327496452503,0.000326902554397,0.000326327538388,0.000325779020838,0.000325256546353,0.000324750178982,0.000324261179867,0.000323813407335,0.000323421312449,0.000323090584963,0.000322826864682,0.000322625889445,0.00032247716265,0.000322365131299,0.00032226969532,0.000322199506682,0.000322132110032,0.000322068446643,0.000321972297855,0.000321842173765,0.000321649578675,0.000321388120858,0.00032106229949,0.000320669314766,0.000320211666493,0.000319688928724,0.000319104734253,0.000318466211803,0.000317771242827,0.000317035881864,0.000316278805289,0.000315524128614,0.00031476384917,0.000314005347317,0.000313246649013,0.000312490245239,0.000311758151007,0.000311048925863,0.000310379609001,0.000309763017218,0.000309196993199,0.000308665368662,0.000308170936381,0.000307727069076,0.000307335749077,0.000306999879868,0.000306720375224,0.00030647576462,0.000306258339546,0.000306049889764,0.00030583074258,0.000305586902532,0.000305318588657,0.000304997164563,0.000304619103586,0.000304176254538,0.000303640010627,0.000303009765508,0.000302299066936,0.000301507827053,0.00030064470774,0.0002997259549,0.000298750833368,0.000297723109855,0.000296671483201,0.000295607596674,0.000294557169261,0.000293540746905,0.000292555333895,0.000291606523698,0.000290691632668,0.00028981841291,0.000288997905874,0.000288247769682,0.000287556864537,0.000286910495399,0.000286282447475,0.000285667793266,0.00028508059863,0.000284515966852,0.000283965164057,0.000283440985056,0.000282928384045,0.00028242043879,0.000281928582449,0.000281476977849,0.000281091066443,0.000280782658387,0.000280536564398,0.000280349893312,0.00028024639001,0.000280215223853,0.000280270392568,0.000280415060867,0.0002806449431,0.00028093758777,0.00028128276743,0.000281659794177,0.000282065946638,0.000282507263247,0.000282960495918,0.000283410734767,0.000283854578118,0.000284274082623,0.000284653705339,0.000284986494637,0.000285274231061,0.000285520824852,0.000285730464461,0.000285890794273,0.000286018103767,0.000286133721622,0.000286233401159,0.000286323020694,0.000286395961496,0.000286473557071,0.000286549431241,0.000286614635205,0.0002866628314,0.000286695566422,0.000286700741769,0.000286641445064,0.000286503979238,0.000286286291116,0.000285977700715,0.00028556139892,0.000285020138964,0.000284338160998,0.000283500928385,0.000282475150138,0.000281241925248,0.000279809990469,0.00027817717787,0.000276323400468,0.000274233751414,0.00027190236157,0.000269304941839,0.000266448072048,0.000263330545194,0.000259960143382,0.000256346351963,0.000252483846808,0.000248363178002,0.000243976049299,0.000239340798159,0.000234436597373,0.000229280877638,0.000223893503998,0.000218267539155,0.000212415473079,0.000206337437647,0.000200045012775,0.000193550712575,0.000186889765276,0.000180073479577,0.000173133881392,0.000166112998551,0.000159033410415,0.000151933026939,0.00014483450197,0.000137764248228,0.000130751961687,0.000123828153623,0.000117013258441,0.000110328564021,0.000103806182683,9.74484881662e-05,9.12597190429e-05,8.52316659576e-05,7.93682388419e-05,7.36862299211e-05,6.81850030672e-05,6.28571820559e-05,5.76969280116e-05,5.27046418794e-05,4.78633121595e-05,4.3168980031e-05,3.86157331238e-05,3.42096545091e-05,2.99512010618e-05,2.58289244843e-05,2.18291154192e-05,1.79616215179e-05,1.42233856089e-05,1.06110277317e-05,7.117370856e-06,3.74934175409e-06,5.00988213956e-07,-2.6385476814e-06,-5.66562341918e-06,-8.56975556119e-06,-1.13461687979e-05,-1.39981696412e-05,-1.65231698499e-05,-1.89189257946e-05,-2.1179995274e-05,-2.3312047531e-05,-2.53275986386e-05,-2.72200825809e-05,-2.89836226593e-05,-3.06412929847e-05,-3.22031090542e-05,-3.36859274253e-05,-3.51007782505e-05,-3.64703132501e-05,-3.78058881934e-05,-3.91238072452e-05,-4.04251559144e-05,-4.17193312551e-05,-4.30419109216e-05,-4.43984840056e-05,-4.57809400987e-05,-4.72163769609e-05,-4.87141056957e-05,-5.02704575495e-05,-5.18992106274e-05,-5.36057118429e-05,-5.53952716544e-05,-5.72851059043e-05,-5.92585377e-05,-6.13159459983e-05,-6.34743772982e-05,-6.57095187927e-05,-6.79993917858e-05,-7.03385094932e-05,-7.27055739719e-05,-7.50786353045e-05,-7.74520220088e-05,-7.98094527711e-05,-8.21403912347e-05,-8.44424406877e-05,-8.66941219041e-05,-8.88741049914e-05,-9.09986033439e-05,-9.30725455653e-05,-9.50912424119e-05,-9.70785690011e-05,-9.90226676971e-05,-0.000100895784783,-0.000102699925346,-0.00010442445296,-0.000106070406709,-0.000107651723682,-0.000109174355529,-0.000110613678834,-0.000111971606833,-0.000113228263863,-0.000114389064711,-0.000115488905898,-0.000116544725238,-0.000117548156654,-0.000118509732522,-0.000119439091463,-0.000120312505843,-0.000121155597028,-0.000121989093638,-0.000122822352644,-0.000123668397752,-0.00012450846018,-0.00012532076049,-0.000126122666217,-0.000126925357037,-0.000127712831982,-0.00012849361635,-0.000129277418342,-0.000130048322094,-0.000130809161107,-0.000131548708595,-0.00013227697774,-0.000133010430675,-0.000133750081729,-0.000134488466986,-0.00013523583744,-0.000136008390709,-0.000136800459212,-0.000137621363863,-0.000138477718274,-0.000139381792061,-0.000140324185927,-0.000141306336285,-0.000142334057015,-0.000143402722662,-0.00014451246099,-0.00014566694395,-0.000146858437674,-0.000148096534493,-0.000149360807181,-0.000150637530345,-0.000151926574186,-0.000153232272987,-0.000154550621585,-0.000155862835882,-0.000157185574661,-0.000158495573867,-0.000159795909393,-0.000161072845669,-0.000162319029168,-0.000163549646814,-0.000164784321387,-0.000166017495305,-0.00016724117354,-0.000168462767529,-0.00016968922486,-0.000170908064995,-0.000172125997459,-0.000173335819215,-0.000174558885378,-0.000175782493858,-0.000177004587623,-0.000178203092585,-0.000179387818872,-0.000180562095621,-0.000181703639065,-0.000182815784368,-0.00018389800846,-0.000184948424559,-0.000185953065076,-0.000186905264408,-0.000187784041345,-0.000188598055246,-0.000189342477661,-0.000190021182389,-0.000190663683126,-0.000191293364289,-0.000191903852575,-0.0001925003762,-0.000193098098826,-0.00019371523229,-0.000194370207072,-0.000195095187027,-0.000195886527333,-0.000196745822442,-0.000197648998233,-0.000198568452393,-0.000199509778968,-0.000200485330008,-0.000201499784316,-0.00020254235986,-0.000203605363241,-0.000204663209035,-0.000205711852271,-0.000206758475733,-0.000207801655906,-0.000208859275412,-0.000209934229464,-0.00021100958938,-0.000212081996858,-0.000213152255601,-0.000214215319455,-0.000215273283012,-0.000216326555191,-0.000217378236316,-0.000218425159505,-0.000219468847962,-0.000220489616881,-0.000221498007368,-0.000222475827773,-0.000223411070394,-0.000224298508212,-0.000225147208125,-0.000225947938145,-0.000226687643007,-0.000227351776863,-0.000227934852895,-0.000228460809296,-0.000228915622281,-0.000229303342985,-0.000229635182204,-0.000229920819162,-0.000230149797582,-0.000230328385741,-0.000230474412052,-0.000230605602,-0.000230729517302,-0.000230833122252,-0.00023092345567,-0.000231027975208,-0.000231145512517,-0.000231273494042,-0.000231423519797,-0.000231623105597,-0.000231863881743,-0.000232137219586,-0.000232451368159,-0.000232819575398,-0.000233239581293,-0.00023368841615,-0.000234183399772,-0.000234746093777,-0.000235389498983,-0.000236094244173,-0.000236846369114,-0.000237666835806,-0.000238561468394,-0.00023953245704,-0.000240586295503,-0.00024174125469,-0.000242993645873,-0.000244323438776,-0.000245698274978,-0.000247109362578,-0.00024857562115,-0.000250088490414,-0.000251623011519,-0.000253163685731,-0.000254699156017,-0.000256216214685,-0.00025769631237,-0.000259128168686,-0.000260509150884,-0.00026184099214,-0.000263121169688,-0.000264322882518,-0.000265462815195,-0.000266531082768,-0.000267527105812,-0.000268440894041,-0.000269274915311,-0.000270027433197,-0.000270720289995,-0.000271358785796,-0.000271931463922,-0.000272455207522,-0.000272923248818,-0.000273337833087,-0.000273715735878,-0.000274065646586,-0.0002743733563,-0.000274610763638,-0.000274792490898,-0.00027490901222,-0.000274970499575)
xcorr <- c(-600.0,-598.999165972,-597.998331943,-596.997497915,-595.996663887,-594.995829858,-593.99499583,-592.994161802,-591.993327773,-590.992493745,-589.991659716,-588.990825688,-587.98999166,-586.989157631,-585.988323603,-584.987489575,-583.986655546,-582.985821518,-581.98498749,-580.984153461,-579.983319433,-578.982485405,-577.981651376,-576.980817348,-575.979983319,-574.979149291,-573.978315263,-572.977481234,-571.976647206,-570.975813178,-569.974979149,-568.974145121,-567.973311093,-566.972477064,-565.971643036,-564.970809008,-563.969974979,-562.969140951,-561.968306922,-560.967472894,-559.966638866,-558.965804837,-557.964970809,-556.964136781,-555.963302752,-554.962468724,-553.961634696,-552.960800667,-551.959966639,-550.959132611,-549.958298582,-548.957464554,-547.956630525,-546.955796497,-545.954962469,-544.95412844,-543.953294412,-542.952460384,-541.951626355,-540.950792327,-539.949958299,-538.94912427,-537.948290242,-536.947456214,-535.946622185,-534.945788157,-533.944954128,-532.9441201,-531.943286072,-530.942452043,-529.941618015,-528.940783987,-527.939949958,-526.93911593,-525.938281902,-524.937447873,-523.936613845,-522.935779817,-521.934945788,-520.93411176,-519.933277731,-518.932443703,-517.931609675,-516.930775646,-515.929941618,-514.92910759,-513.928273561,-512.927439533,-511.926605505,-510.925771476,-509.924937448,-508.92410342,-507.923269391,-506.922435363,-505.921601334,-504.920767306,-503.919933278,-502.919099249,-501.918265221,-500.917431193,-499.916597164,-498.915763136,-497.914929108,-496.914095079,-495.913261051,-494.912427023,-493.911592994,-492.910758966,-491.909924937,-490.909090909,-489.908256881,-488.907422852,-487.906588824,-486.905754796,-485.904920767,-484.904086739,-483.903252711,-482.902418682,-481.901584654,-480.900750626,-479.899916597,-478.899082569,-477.89824854,-476.897414512,-475.896580484,-474.895746455,-473.894912427,-472.894078399,-471.89324437,-470.892410342,-469.891576314,-468.890742285,-467.889908257,-466.889074229,-465.8882402,-464.887406172,-463.886572143,-462.885738115,-461.884904087,-460.884070058,-459.88323603,-458.882402002,-457.881567973,-456.880733945,-455.879899917,-454.879065888,-453.87823186,-452.877397832,-451.876563803,-450.875729775,-449.874895746,-448.874061718,-447.87322769,-446.872393661,-445.871559633,-444.870725605,-443.869891576,-442.869057548,-441.86822352,-440.867389491,-439.866555463,-438.865721435,-437.864887406,-436.864053378,-435.863219349,-434.862385321,-433.861551293,-432.860717264,-431.859883236,-430.859049208,-429.858215179,-428.857381151,-427.856547123,-426.855713094,-425.854879066,-424.854045038,-423.853211009,-422.852376981,-421.851542952,-420.850708924,-419.849874896,-418.849040867,-417.848206839,-416.847372811,-415.846538782,-414.845704754,-413.844870726,-412.844036697,-411.843202669,-410.842368641,-409.841534612,-408.840700584,-407.839866555,-406.839032527,-405.838198499,-404.83736447,-403.836530442,-402.835696414,-401.834862385,-400.834028357,-399.833194329,-398.8323603,-397.831526272,-396.830692244,-395.829858215,-394.829024187,-393.828190158,-392.82735613,-391.826522102,-390.825688073,-389.824854045,-388.824020017,-387.823185988,-386.82235196,-385.821517932,-384.820683903,-383.819849875,-382.819015847,-381.818181818,-380.81734779,-379.816513761,-378.815679733,-377.814845705,-376.814011676,-375.813177648,-374.81234362,-373.811509591,-372.810675563,-371.809841535,-370.809007506,-369.808173478,-368.80733945,-367.806505421,-366.805671393,-365.804837364,-364.804003336,-363.803169308,-362.802335279,-361.801501251,-360.800667223,-359.799833194,-358.798999166,-357.798165138,-356.797331109,-355.796497081,-354.795663053,-353.794829024,-352.793994996,-351.793160967,-350.792326939,-349.791492911,-348.790658882,-347.789824854,-346.788990826,-345.788156797,-344.787322769,-343.786488741,-342.785654712,-341.784820684,-340.783986656,-339.783152627,-338.782318599,-337.78148457,-336.780650542,-335.779816514,-334.778982485,-333.778148457,-332.777314429,-331.7764804,-330.775646372,-329.774812344,-328.773978315,-327.773144287,-326.772310259,-325.77147623,-324.770642202,-323.769808173,-322.768974145,-321.768140117,-320.767306088,-319.76647206,-318.765638032,-317.764804003,-316.763969975,-315.763135947,-314.762301918,-313.76146789,-312.760633862,-311.759799833,-310.758965805,-309.758131776,-308.757297748,-307.75646372,-306.755629691,-305.754795663,-304.753961635,-303.753127606,-302.752293578,-301.75145955,-300.750625521,-299.749791493,-298.748957465,-297.748123436,-296.747289408,-295.746455379,-294.745621351,-293.744787323,-292.743953294,-291.743119266,-290.742285238,-289.741451209,-288.740617181,-287.739783153,-286.738949124,-285.738115096,-284.737281068,-283.736447039,-282.735613011,-281.734778982,-280.733944954,-279.733110926,-278.732276897,-277.731442869,-276.730608841,-275.729774812,-274.728940784,-273.728106756,-272.727272727,-271.726438699,-270.725604671,-269.724770642,-268.723936614,-267.723102585,-266.722268557,-265.721434529,-264.7206005,-263.719766472,-262.718932444,-261.718098415,-260.717264387,-259.716430359,-258.71559633,-257.714762302,-256.713928274,-255.713094245,-254.712260217,-253.711426188,-252.71059216,-251.709758132,-250.708924103,-249.708090075,-248.707256047,-247.706422018,-246.70558799,-245.704753962,-244.703919933,-243.703085905,-242.702251877,-241.701417848,-240.70058382,-239.699749791,-238.698915763,-237.698081735,-236.697247706,-235.696413678,-234.69557965,-233.694745621,-232.693911593,-231.693077565,-230.692243536,-229.691409508,-228.69057548,-227.689741451,-226.688907423,-225.688073394,-224.687239366,-223.686405338,-222.685571309,-221.684737281,-220.683903253,-219.683069224,-218.682235196,-217.681401168,-216.680567139,-215.679733111,-214.678899083,-213.678065054,-212.677231026,-211.676396997,-210.675562969,-209.674728941,-208.673894912,-207.673060884,-206.672226856,-205.671392827,-204.670558799,-203.669724771,-202.668890742,-201.668056714,-200.667222686,-199.666388657,-198.665554629,-197.664720601,-196.663886572,-195.663052544,-194.662218515,-193.661384487,-192.660550459,-191.65971643,-190.658882402,-189.658048374,-188.657214345,-187.656380317,-186.655546289,-185.65471226,-184.653878232,-183.653044204,-182.652210175,-181.651376147,-180.650542118,-179.64970809,-178.648874062,-177.648040033,-176.647206005,-175.646371977,-174.645537948,-173.64470392,-172.643869892,-171.643035863,-170.642201835,-169.641367807,-168.640533778,-167.63969975,-166.638865721,-165.638031693,-164.637197665,-163.636363636,-162.635529608,-161.63469558,-160.633861551,-159.633027523,-158.632193495,-157.631359466,-156.630525438,-155.62969141,-154.628857381,-153.628023353,-152.627189324,-151.626355296,-150.625521268,-149.624687239,-148.623853211,-147.623019183,-146.622185154,-145.621351126,-144.620517098,-143.619683069,-142.618849041,-141.618015013,-140.617180984,-139.616346956,-138.615512927,-137.614678899,-136.613844871,-135.613010842,-134.612176814,-133.611342786,-132.610508757,-131.609674729,-130.608840701,-129.608006672,-128.607172644,-127.606338616,-126.605504587,-125.604670559,-124.60383653,-123.603002502,-122.602168474,-121.601334445,-120.600500417,-119.599666389,-118.59883236,-117.597998332,-116.597164304,-115.596330275,-114.595496247,-113.594662219,-112.59382819,-111.592994162,-110.592160133,-109.591326105,-108.590492077,-107.589658048,-106.58882402,-105.587989992,-104.587155963,-103.586321935,-102.585487907,-101.584653878,-100.58381985,-99.5829858215,-98.5821517932,-97.5813177648,-96.5804837364,-95.5796497081,-94.5788156797,-93.5779816514,-92.577147623,-91.5763135947,-90.5754795663,-89.5746455379,-88.5738115096,-87.5729774812,-86.5721434529,-85.5713094245,-84.5704753962,-83.5696413678,-82.5688073394,-81.5679733111,-80.5671392827,-79.5663052544,-78.565471226,-77.5646371977,-76.5638031693,-75.562969141,-74.5621351126,-73.5613010842,-72.5604670559,-71.5596330275,-70.5587989992,-69.5579649708,-68.5571309425,-67.5562969141,-66.5554628857,-65.5546288574,-64.553794829,-63.5529608007,-62.5521267723,-61.551292744,-60.5504587156,-59.5496246872,-58.5487906589,-57.5479566305,-56.5471226022,-55.5462885738,-54.5454545455,-53.5446205171,-52.5437864887,-51.5429524604,-50.542118432,-49.5412844037,-48.5404503753,-47.539616347,-46.5387823186,-45.5379482902,-44.5371142619,-43.5362802335,-42.5354462052,-41.5346121768,-40.5337781485,-39.5329441201,-38.5321100917,-37.5312760634,-36.530442035,-35.5296080067,-34.5287739783,-33.52793995,-32.5271059216,-31.5262718932,-30.5254378649,-29.5246038365,-28.5237698082,-27.5229357798,-26.5221017515,-25.5212677231,-24.5204336947,-23.5195996664,-22.518765638,-21.5179316097,-20.5170975813,-19.516263553,-18.5154295246,-17.5145954962,-16.5137614679,-15.5129274395,-14.5120934112,-13.5112593828,-12.5104253545,-11.5095913261,-10.5087572977,-9.50792326939,-8.50708924103,-7.50625521268,-6.50542118432,-5.50458715596,-4.50375312761,-3.50291909925,-2.50208507089,-1.50125104254,-0.500417014179,0.500417014178,1.50125104254,2.50208507089,3.50291909925,4.50375312761,5.50458715596,6.50542118432,7.50625521268,8.50708924103,9.50792326939,10.5087572977,11.5095913261,12.5104253545,13.5112593828,14.5120934112,15.5129274395,16.5137614679,17.5145954962,18.5154295246,19.516263553,20.5170975813,21.5179316097,22.518765638,23.5195996664,24.5204336947,25.5212677231,26.5221017515,27.5229357798,28.5237698082,29.5246038365,30.5254378649,31.5262718932,32.5271059216,33.52793995,34.5287739783,35.5296080067,36.530442035,37.5312760634,38.5321100917,39.5329441201,40.5337781485,41.5346121768,42.5354462052,43.5362802335,44.5371142619,45.5379482902,46.5387823186,47.539616347,48.5404503753,49.5412844037,50.542118432,51.5429524604,52.5437864887,53.5446205171,54.5454545455,55.5462885738,56.5471226022,57.5479566305,58.5487906589,59.5496246872,60.5504587156,61.551292744,62.5521267723,63.5529608007,64.553794829,65.5546288574,66.5554628857,67.5562969141,68.5571309425,69.5579649708,70.5587989992,71.5596330275,72.5604670559,73.5613010842,74.5621351126,75.562969141,76.5638031693,77.5646371977,78.565471226,79.5663052544,80.5671392827,81.5679733111,82.5688073394,83.5696413678,84.5704753962,85.5713094245,86.5721434529,87.5729774812,88.5738115096,89.5746455379,90.5754795663,91.5763135947,92.577147623,93.5779816514,94.5788156797,95.5796497081,96.5804837364,97.5813177648,98.5821517932,99.5829858215,100.58381985,101.584653878,102.585487907,103.586321935,104.587155963,105.587989992,106.58882402,107.589658048,108.590492077,109.591326105,110.592160133,111.592994162,112.59382819,113.594662219,114.595496247,115.596330275,116.597164304,117.597998332,118.59883236,119.599666389,120.600500417,121.601334445,122.602168474,123.603002502,124.60383653,125.604670559,126.605504587,127.606338616,128.607172644,129.608006672,130.608840701,131.609674729,132.610508757,133.611342786,134.612176814,135.613010842,136.613844871,137.614678899,138.615512927,139.616346956,140.617180984,141.618015013,142.618849041,143.619683069,144.620517098,145.621351126,146.622185154,147.623019183,148.623853211,149.624687239,150.625521268,151.626355296,152.627189324,153.628023353,154.628857381,155.62969141,156.630525438,157.631359466,158.632193495,159.633027523,160.633861551,161.63469558,162.635529608,163.636363636,164.637197665,165.638031693,166.638865721,167.63969975,168.640533778,169.641367807,170.642201835,171.643035863,172.643869892,173.64470392,174.645537948,175.646371977,176.647206005,177.648040033,178.648874062,179.64970809,180.650542118,181.651376147,182.652210175,183.653044204,184.653878232,185.65471226,186.655546289,187.656380317,188.657214345,189.658048374,190.658882402,191.65971643,192.660550459,193.661384487,194.662218515,195.663052544,196.663886572,197.664720601,198.665554629,199.666388657,200.667222686,201.668056714,202.668890742,203.669724771,204.670558799,205.671392827,206.672226856,207.673060884,208.673894912,209.674728941,210.675562969,211.676396997,212.677231026,213.678065054,214.678899083,215.679733111,216.680567139,217.681401168,218.682235196,219.683069224,220.683903253,221.684737281,222.685571309,223.686405338,224.687239366,225.688073394,226.688907423,227.689741451,228.69057548,229.691409508,230.692243536,231.693077565,232.693911593,233.694745621,234.69557965,235.696413678,236.697247706,237.698081735,238.698915763,239.699749791,240.70058382,241.701417848,242.702251877,243.703085905,244.703919933,245.704753962,246.70558799,247.706422018,248.707256047,249.708090075,250.708924103,251.709758132,252.71059216,253.711426188,254.712260217,255.713094245,256.713928274,257.714762302,258.71559633,259.716430359,260.717264387,261.718098415,262.718932444,263.719766472,264.7206005,265.721434529,266.722268557,267.723102585,268.723936614,269.724770642,270.725604671,271.726438699,272.727272727,273.728106756,274.728940784,275.729774812,276.730608841,277.731442869,278.732276897,279.733110926,280.733944954,281.734778982,282.735613011,283.736447039,284.737281068,285.738115096,286.738949124,287.739783153,288.740617181,289.741451209,290.742285238,291.743119266,292.743953294,293.744787323,294.745621351,295.746455379,296.747289408,297.748123436,298.748957465,299.749791493,300.750625521,301.75145955,302.752293578,303.753127606,304.753961635,305.754795663,306.755629691,307.75646372,308.757297748,309.758131776,310.758965805,311.759799833,312.760633862,313.76146789,314.762301918,315.763135947,316.763969975,317.764804003,318.765638032,319.76647206,320.767306088,321.768140117,322.768974145,323.769808173,324.770642202,325.77147623,326.772310259,327.773144287,328.773978315,329.774812344,330.775646372,331.7764804,332.777314429,333.778148457,334.778982485,335.779816514,336.780650542,337.78148457,338.782318599,339.783152627,340.783986656,341.784820684,342.785654712,343.786488741,344.787322769,345.788156797,346.788990826,347.789824854,348.790658882,349.791492911,350.792326939,351.793160967,352.793994996,353.794829024,354.795663053,355.796497081,356.797331109,357.798165138,358.798999166,359.799833194,360.800667223,361.801501251,362.802335279,363.803169308,364.804003336,365.804837364,366.805671393,367.806505421,368.80733945,369.808173478,370.809007506,371.809841535,372.810675563,373.811509591,374.81234362,375.813177648,376.814011676,377.814845705,378.815679733,379.816513761,380.81734779,381.818181818,382.819015847,383.819849875,384.820683903,385.821517932,386.82235196,387.823185988,388.824020017,389.824854045,390.825688073,391.826522102,392.82735613,393.828190158,394.829024187,395.829858215,396.830692244,397.831526272,398.8323603,399.833194329,400.834028357,401.834862385,402.835696414,403.836530442,404.83736447,405.838198499,406.839032527,407.839866555,408.840700584,409.841534612,410.842368641,411.843202669,412.844036697,413.844870726,414.845704754,415.846538782,416.847372811,417.848206839,418.849040867,419.849874896,420.850708924,421.851542952,422.852376981,423.853211009,424.854045038,425.854879066,426.855713094,427.856547123,428.857381151,429.858215179,430.859049208,431.859883236,432.860717264,433.861551293,434.862385321,435.863219349,436.864053378,437.864887406,438.865721435,439.866555463,440.867389491,441.86822352,442.869057548,443.869891576,444.870725605,445.871559633,446.872393661,447.87322769,448.874061718,449.874895746,450.875729775,451.876563803,452.877397832,453.87823186,454.879065888,455.879899917,456.880733945,457.881567973,458.882402002,459.88323603,460.884070058,461.884904087,462.885738115,463.886572143,464.887406172,465.8882402,466.889074229,467.889908257,468.890742285,469.891576314,470.892410342,471.89324437,472.894078399,473.894912427,474.895746455,475.896580484,476.897414512,477.89824854,478.899082569,479.899916597,480.900750626,481.901584654,482.902418682,483.903252711,484.904086739,485.904920767,486.905754796,487.906588824,488.907422852,489.908256881,490.909090909,491.909924937,492.910758966,493.911592994,494.912427023,495.913261051,496.914095079,497.914929108,498.915763136,499.916597164,500.917431193,501.918265221,502.919099249,503.919933278,504.920767306,505.921601334,506.922435363,507.923269391,508.92410342,509.924937448,510.925771476,511.926605505,512.927439533,513.928273561,514.92910759,515.929941618,516.930775646,517.931609675,518.932443703,519.933277731,520.93411176,521.934945788,522.935779817,523.936613845,524.937447873,525.938281902,526.93911593,527.939949958,528.940783987,529.941618015,530.942452043,531.943286072,532.9441201,533.944954128,534.945788157,535.946622185,536.947456214,537.948290242,538.94912427,539.949958299,540.950792327,541.951626355,542.952460384,543.953294412,544.95412844,545.954962469,546.955796497,547.956630525,548.957464554,549.958298582,550.959132611,551.959966639,552.960800667,553.961634696,554.962468724,555.963302752,556.964136781,557.964970809,558.965804837,559.966638866,560.967472894,561.968306922,562.969140951,563.969974979,564.970809008,565.971643036,566.972477064,567.973311093,568.974145121,569.974979149,570.975813178,571.976647206,572.977481234,573.978315263,574.979149291,575.979983319,576.980817348,577.981651376,578.982485405,579.983319433,580.984153461,581.98498749,582.985821518,583.986655546,584.987489575,585.988323603,586.989157631,587.98999166,588.990825688,589.991659716,590.992493745,591.993327773,592.994161802,593.99499583,594.995829858,595.996663887,596.997497915,597.998331943,598.999165972,600.0)
altd <- c(52, 288)
x <- seq.int((length(p)-1)/2*-1,(length(p)-1)/2)
pdf('/magnuson-lab/jraab/analysis/swi_snf_final/output/macs_peaks//arid2_model.pdf',height=6,width=6)
plot(x,p,type='l',col=c('red'),main='Peak Model',xlab='Distance to the middle',ylab='Percentage')
lines(x,m,col=c('blue'))
legend('topleft',c('forward tags','reverse tags'),lty=c(1,1,1),col=c('red','blue'))
plot(xcorr,ycorr,type='l',col=c('black'),main='Cross-Correlation',xlab='Lag between + and - tags',ylab='Correlation')
abline(v=altd,lty=2,col=c('red'))
legend('topleft','alternative lag(s)',lty=2,col='red')
legend('right','alt lag(s) : 52,288',bty='n')
dev.off()
|
#19 febbraio 2018
#Unisci serie storiche con serie nuove scaricate da Centro Funzionale Calabria
rm(list=objects())
library("tidyverse")
library("stringr")
library("readr")
library("purrr")
options(error=recover,warn = 2)
ANNO<-as.integer(2017)
#Se il file delle nuove serie contiene i codici HisCentral OD (in anagrafica, SiteCode)
#vanno riconvertiti in SiteID
CONVERTI_NOMI<-TRUE
tryCatch({
read_delim("reg.calabria.info.csv",delim=";",col_names=TRUE)
},error=function(e){
stop("Anagrafica regione Calabria, file non trovato")
})->ana
c("Precipitation","Tmax","Tmin")->parametri
leggiDati<-function(nomeFile,DELIM=","){
if(missing(nomeFile)) stop("Manca il nome del file da leggere")
stopifnot(DELIM %in% c(",",";"))
#leggiamo solo intestazione per sapere quante colonne ho e quindi scrivere il formato di lettura
tryCatch({
read_delim(nomeFile,delim=DELIM,col_names=TRUE,n_max=1)
},error=function(e){
stop(sprintf("errore lettura file %s",nomeFile))
})->dati
stopifnot(all(names(dati)[1:3] %in% c("yy","mm","dd")))
#numero ci colonne
(ncol(dati)-3) ->numCol
#lettura dati storici utilizzando il formato double
read_delim(nomeFile,delim=DELIM,col_names=TRUE,col_types=paste("iii",paste(rep("d",numCol),collapse=""),sep =""))
}#fine leggiDati
purrr::walk(parametri,.f=function(parametro){
paste0(parametro,"_fino",ANNO-1,".csv")->nomeFile
leggiDati(nomeFile,DELIM=",")->dati
paste0(parametro,"_",ANNO,".csv")->nomeFileDatiNuovi
#leggiamo solo intestazione per sapere quante colonne ho e quindi scrivere il formato di lettura
leggiDati(nomeFileDatiNuovi,DELIM=";")->datiNuovi
if(CONVERTI_NOMI){
match(names(datiNuovi)[4:ncol(datiNuovi)],ana$SiteCode)->posizioni
stopifnot(all(!is.na(posizioni)))
names(datiNuovi)<-c("yy","mm","dd",ana[posizioni,]$SiteID)
}#fine CONVERTI_NOMI
#unione e scrittura
bind_rows(dati,datiNuovi)->finale
print(sprintf("PARAMETRO %s",parametro))
range(finale$yy)->anni
print(sprintf("Range anni da %s a %s",anni[1],anni[2]))
write_delim(finale,path=paste0(parametro,"_storici_fino",ANNO,".csv"),delim=",",col_names=TRUE)
#quale serie manca nel nuovo anno ANNO?
setdiff(names(dati),names(datiNuovi))->missingIn
#viceversa: quali serie sono presenti nel ANNO e non ho nel file fino ad ANNO-1?
setdiff(names(datiNuovi),names(dati))->missingInUp
sink(paste0("log_bindSerie",ANNO,"ConSerieFino",ANNO-1,".txt"),append=TRUE)
print("*************")
print(parametro)
print(sprintf("Serie Centro Funzionale Calabria nel %s ma che non hanno corrispondenza nel %s:",ANNO,ANNO-1))
print(missingInUp)
print(sprintf("Serie fino a %s senza dati nel %s:",ANNO-1,ANNO))
print(missingIn)
print("*************")
sink()
})#fine lapply
|
/calabria_centro_funzionale/unisci_serieStoriche_nuoviDati.R
|
no_license
|
guidofioravanti/serie_giornaliere
|
R
| false | false | 2,936 |
r
|
#19 febbraio 2018
#Unisci serie storiche con serie nuove scaricate da Centro Funzionale Calabria
rm(list=objects())
library("tidyverse")
library("stringr")
library("readr")
library("purrr")
options(error=recover,warn = 2)
ANNO<-as.integer(2017)
#Se il file delle nuove serie contiene i codici HisCentral OD (in anagrafica, SiteCode)
#vanno riconvertiti in SiteID
CONVERTI_NOMI<-TRUE
tryCatch({
read_delim("reg.calabria.info.csv",delim=";",col_names=TRUE)
},error=function(e){
stop("Anagrafica regione Calabria, file non trovato")
})->ana
c("Precipitation","Tmax","Tmin")->parametri
leggiDati<-function(nomeFile,DELIM=","){
if(missing(nomeFile)) stop("Manca il nome del file da leggere")
stopifnot(DELIM %in% c(",",";"))
#leggiamo solo intestazione per sapere quante colonne ho e quindi scrivere il formato di lettura
tryCatch({
read_delim(nomeFile,delim=DELIM,col_names=TRUE,n_max=1)
},error=function(e){
stop(sprintf("errore lettura file %s",nomeFile))
})->dati
stopifnot(all(names(dati)[1:3] %in% c("yy","mm","dd")))
#numero ci colonne
(ncol(dati)-3) ->numCol
#lettura dati storici utilizzando il formato double
read_delim(nomeFile,delim=DELIM,col_names=TRUE,col_types=paste("iii",paste(rep("d",numCol),collapse=""),sep =""))
}#fine leggiDati
purrr::walk(parametri,.f=function(parametro){
paste0(parametro,"_fino",ANNO-1,".csv")->nomeFile
leggiDati(nomeFile,DELIM=",")->dati
paste0(parametro,"_",ANNO,".csv")->nomeFileDatiNuovi
#leggiamo solo intestazione per sapere quante colonne ho e quindi scrivere il formato di lettura
leggiDati(nomeFileDatiNuovi,DELIM=";")->datiNuovi
if(CONVERTI_NOMI){
match(names(datiNuovi)[4:ncol(datiNuovi)],ana$SiteCode)->posizioni
stopifnot(all(!is.na(posizioni)))
names(datiNuovi)<-c("yy","mm","dd",ana[posizioni,]$SiteID)
}#fine CONVERTI_NOMI
#unione e scrittura
bind_rows(dati,datiNuovi)->finale
print(sprintf("PARAMETRO %s",parametro))
range(finale$yy)->anni
print(sprintf("Range anni da %s a %s",anni[1],anni[2]))
write_delim(finale,path=paste0(parametro,"_storici_fino",ANNO,".csv"),delim=",",col_names=TRUE)
#quale serie manca nel nuovo anno ANNO?
setdiff(names(dati),names(datiNuovi))->missingIn
#viceversa: quali serie sono presenti nel ANNO e non ho nel file fino ad ANNO-1?
setdiff(names(datiNuovi),names(dati))->missingInUp
sink(paste0("log_bindSerie",ANNO,"ConSerieFino",ANNO-1,".txt"),append=TRUE)
print("*************")
print(parametro)
print(sprintf("Serie Centro Funzionale Calabria nel %s ma che non hanno corrispondenza nel %s:",ANNO,ANNO-1))
print(missingInUp)
print(sprintf("Serie fino a %s senza dati nel %s:",ANNO-1,ANNO))
print(missingIn)
print("*************")
sink()
})#fine lapply
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{irish_county_data}
\alias{irish_county_data}
\title{Irish county data}
\format{
An object of class \code{sf} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 8008 rows and 10 columns.
}
\source{
\url{http://opendata-geohive.hub.arcgis.com}
}
\usage{
irish_county_data
}
\description{
Irish county data from Irish government.
}
\keyword{datasets}
|
/man/irish_county_data.Rd
|
permissive
|
hamilton-institute/hamiltonCovid19
|
R
| false | true | 469 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{irish_county_data}
\alias{irish_county_data}
\title{Irish county data}
\format{
An object of class \code{sf} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 8008 rows and 10 columns.
}
\source{
\url{http://opendata-geohive.hub.arcgis.com}
}
\usage{
irish_county_data
}
\description{
Irish county data from Irish government.
}
\keyword{datasets}
|
# Load amniote csv and master synonyms file
dat <- read.csv(file = "/Users/Anna/Google Drive/bird trait networks/inputs/data/r data/match data/preAves_Synonyms.csv")
synonyms <- read.csv(file = "/Users/Anna/Google Drive/bird trait networks/inputs/data/r data/match data/pre_synonyms.csv")
# clean amniote synonyms
dat <- unique(dat[!dat[,1] == dat[,2],])
# add amniote synonyms to master
synonyms <- rbind(synonyms, data.frame(dat, dataset = "amniote1"),
data.frame(dat[,2:1], dataset = "amniote2"))
# remove duplicates
synonyms <- synonyms[!duplicated(synonyms[,1:2]),]
# save
write.csv(synonyms, file = "/Users/Anna/Google Drive/bird trait networks/inputs/data/r data/synonyms.csv")
|
/R/add amniote synonyms.R
|
no_license
|
annakrystalli/bird_trait_networks
|
R
| false | false | 710 |
r
|
# Load amniote csv and master synonyms file
dat <- read.csv(file = "/Users/Anna/Google Drive/bird trait networks/inputs/data/r data/match data/preAves_Synonyms.csv")
synonyms <- read.csv(file = "/Users/Anna/Google Drive/bird trait networks/inputs/data/r data/match data/pre_synonyms.csv")
# clean amniote synonyms
dat <- unique(dat[!dat[,1] == dat[,2],])
# add amniote synonyms to master
synonyms <- rbind(synonyms, data.frame(dat, dataset = "amniote1"),
data.frame(dat[,2:1], dataset = "amniote2"))
# remove duplicates
synonyms <- synonyms[!duplicated(synonyms[,1:2]),]
# save
write.csv(synonyms, file = "/Users/Anna/Google Drive/bird trait networks/inputs/data/r data/synonyms.csv")
|
# This code is related to the extreme value theory (EVT)
# It builds a graph that shows the mean excess loss as a function of the threhold.
SP500 = read.csv("/Users/Larry/Documents/UIUC Schedule/FIN 580/HW/HW4.data.csv")
SP500$loss = -SP500$Return
loss <- SP500$loss[!is.na(SP500$loss)] # make sure "NA" is not output to loss
# For different threhold {0.01, 0.012, 0.014, …, 0.05}
steps = (0.05-0.01)/0.002+1
i = 0
meanexcess = matrix(nrow = steps, ncol = 2)
for (u in seq(0.01, 0.05, by = 0.002))
{
i = i+1
meanexcess[i,1] = u
meanexcess[i,2] = mean((loss-u)[loss>=u])
}
matplot(meanexcess[,1], meanexcess[,2], type = "l",
xlab = "Threshold (u)", ylab = "Mean Excess Loss (e(u))", col = "black")
#calculate number with loss higher than 0.022
excess = (loss-0.022)[loss>=0.022]
length(excess)
# MLE for GPD
# GPD fitting, theta[1]=kesai, theta[2]=beta
initialvalue = c(0.1, 0.05)
gpd = function(excess,theta){(1/theta[2])*((1+theta[1]*excess/theta[2])^(-(1+1/theta[1])))}
result = optim(initialvalue, fn=function(theta){-sum(log(gpd(excess,theta)))},
method="BFGS")
kesai = result$par[1]
beta = result$par[2]
#conditional desity
pdf_gpd = function(x,theta){(1/theta[2])*((1+theta[1]*x/theta[2])^(-(1+1/theta[1])))}
threshold_new = seq(from = 0.022, to = 0.1, by = 0.002) - 0.022
density = rep(0,length(threshold_new))
density = pdf_gpd(threshold_new,result$par)
threshold_new = threshold_new + 0.022
plot(threshold_new,density, type = "l")
#probability (need to check later)
prob22=length(excess)/length(loss)
condprob <- function(x){(1+kesai*x/beta)^(-1/kesai)}
series = seq(0.022 , 0.10, by = 0.001)
valueofprob <- vector("numeric", length=79)
for (j in 1: 79)
{
valueofprob[j] = prob22*condprob(series[j]-0.022)
}
length(series)
length(valueofprob)
plot(series,valueofprob, type ="l")
#Value at Risk
VaR_EVT = 0.022+(beta/kesai)*((0.01/prob22)^(-kesai)-1)
VaR_EVT
prob22
|
/Mean_excess_function.R
|
no_license
|
Karagul/risk_management
|
R
| false | false | 1,920 |
r
|
# This code is related to the extreme value theory (EVT)
# It builds a graph that shows the mean excess loss as a function of the threhold.
SP500 = read.csv("/Users/Larry/Documents/UIUC Schedule/FIN 580/HW/HW4.data.csv")
SP500$loss = -SP500$Return
loss <- SP500$loss[!is.na(SP500$loss)] # make sure "NA" is not output to loss
# For different threhold {0.01, 0.012, 0.014, …, 0.05}
steps = (0.05-0.01)/0.002+1
i = 0
meanexcess = matrix(nrow = steps, ncol = 2)
for (u in seq(0.01, 0.05, by = 0.002))
{
i = i+1
meanexcess[i,1] = u
meanexcess[i,2] = mean((loss-u)[loss>=u])
}
matplot(meanexcess[,1], meanexcess[,2], type = "l",
xlab = "Threshold (u)", ylab = "Mean Excess Loss (e(u))", col = "black")
#calculate number with loss higher than 0.022
excess = (loss-0.022)[loss>=0.022]
length(excess)
# MLE for GPD
# GPD fitting, theta[1]=kesai, theta[2]=beta
initialvalue = c(0.1, 0.05)
gpd = function(excess,theta){(1/theta[2])*((1+theta[1]*excess/theta[2])^(-(1+1/theta[1])))}
result = optim(initialvalue, fn=function(theta){-sum(log(gpd(excess,theta)))},
method="BFGS")
kesai = result$par[1]
beta = result$par[2]
#conditional desity
pdf_gpd = function(x,theta){(1/theta[2])*((1+theta[1]*x/theta[2])^(-(1+1/theta[1])))}
threshold_new = seq(from = 0.022, to = 0.1, by = 0.002) - 0.022
density = rep(0,length(threshold_new))
density = pdf_gpd(threshold_new,result$par)
threshold_new = threshold_new + 0.022
plot(threshold_new,density, type = "l")
#probability (need to check later)
prob22=length(excess)/length(loss)
condprob <- function(x){(1+kesai*x/beta)^(-1/kesai)}
series = seq(0.022 , 0.10, by = 0.001)
valueofprob <- vector("numeric", length=79)
for (j in 1: 79)
{
valueofprob[j] = prob22*condprob(series[j]-0.022)
}
length(series)
length(valueofprob)
plot(series,valueofprob, type ="l")
#Value at Risk
VaR_EVT = 0.022+(beta/kesai)*((0.01/prob22)^(-kesai)-1)
VaR_EVT
prob22
|
zilele trecute , spre seara , am oprit in Piata Domenii din Bucuresti .
piramide de pepeni , prapad de legume si stive de lazi cu prune .
ziceai ca s - au rupt barajele toamnei si toate s - au pravalit in acel loc .
am cerut unui taran din Dimbovita doua kilograme de prune brumarii .
cu miinile sale latarete a umplut tasul .
cupa de aluminiu era si ea murdara . Dar de zece ori mai jegoasa decit miinile barbatului mirosind a rachiu .
alaturi , nevasta ( sau ce i - o fi fost ) si un tingalau ( probabil fiul ori vreun sofer de imprumut ) beau bere .
dupa ce a cintarit pina la o pruna , taranul din Dimbovita s - a uitat lung la mine .
n - ai punga ?
imi pare rau , vin de la serviciu .
a pus tasul inapoi pe cintar si a inceput sa se invirta ca un ciine in jurul cozii , doar - doar gaseste pe jos vreo punga aruncata .
ba , s - a dus si la un tomberon batut de muste , de unde s - a intors cu un plastic mototolit si murdar .
esti nebun , am zis .
cum o sa - mi dai prunele in asa ceva ?
daca nu - ti convine , nu cumpara , a zis tingalaul atins de bere .
si am plecat cu mina goala , asta fiind singura sanctiune politicoasa pe care puteam sa i - o aplic taranului .
pentru ca e piatra de temelie a Romaniei , pentru ca e purtatorul de traditii , pentru ca a dus razboaiele , pentru ca in mina lui e piinea tuturor , taranul roman e idealizat departe de conditia sa de astazi .
o duce greu , a invatat sa triseze , adeseori e ignorant si plin de prejudecati .
pentru a - l cistiga in alegeri este cocolosit de politicieni , si pentru a sublinia specificul romanesc este supralicitat de intelectualii pasunisti .
prea putina lume incearca sa - l ajute pe taranul roman sa inteleaga vremurile . Sa priceapa ca s - au dus timpurile cind orasenii se bateau pe telemeaua lui amestecata cu gunoaie si par de oaie , ca nu mai poate vinde cu dispret fata de cumparator , ca nu mai poate sta nespalat , ca nu mai poate fura .
si , daca vrea sa supravietuiasca , trebuie sa se adapteze , sa deprinda gesturi noi , sa invete sa foloseasca masinarii mai complicate .
ba , daca vrea sa nu se impiedice de nebunii , trebuie sa invete si diferenta dintre banci si fonduri de investitii , dintre publicitate si manualul de folosire a unui aparat , chiar sa priceapa cursul monedei nationale si care - i treaba cu rata dobinzilor .
sa nu mai vorbim ca nu mai merge cu vindutul carnii in hirtie de ziar , cu fructe adunate in galetile de la porci , cu toate celelalte care tradeaza un anume dispret pentru semeni .
n - as vrea sa se inteleaga ca este vorba despre toti taranii romani .
unii au priceput ce se intimpla si au schimbat macazul , miscindu - se spirt .
dar multi traiesc si se poarta ca inainte de cel de - al doilea razboi mondial , daca nu chiar mai rau .
nici salvarea nu este foarte usoara .
cum sa ii luminezi pe unii , cind ei sint copti de alcool si au uitat sa munceasca ?
si daca ar fi sa declansam o campanie de iluminare , cum au mai incercat si alte generatii de intelectuali , de unde resurse ?
ziarele nu prea ajung pe la sate , programele televiziunilor numai la propasire nu duc , iar biserica numai educatie sociala si economica nu incearca .
taranul roman cel idealizat de poeti si politicieni poate face la fel de bine obiectul unei campanii de lichidare a efectelor ignorantei .
|
/data/Newspapers/2001.09.08.editorial.70827.0712.r
|
no_license
|
narcis96/decrypting-alpha
|
R
| false | false | 3,330 |
r
|
zilele trecute , spre seara , am oprit in Piata Domenii din Bucuresti .
piramide de pepeni , prapad de legume si stive de lazi cu prune .
ziceai ca s - au rupt barajele toamnei si toate s - au pravalit in acel loc .
am cerut unui taran din Dimbovita doua kilograme de prune brumarii .
cu miinile sale latarete a umplut tasul .
cupa de aluminiu era si ea murdara . Dar de zece ori mai jegoasa decit miinile barbatului mirosind a rachiu .
alaturi , nevasta ( sau ce i - o fi fost ) si un tingalau ( probabil fiul ori vreun sofer de imprumut ) beau bere .
dupa ce a cintarit pina la o pruna , taranul din Dimbovita s - a uitat lung la mine .
n - ai punga ?
imi pare rau , vin de la serviciu .
a pus tasul inapoi pe cintar si a inceput sa se invirta ca un ciine in jurul cozii , doar - doar gaseste pe jos vreo punga aruncata .
ba , s - a dus si la un tomberon batut de muste , de unde s - a intors cu un plastic mototolit si murdar .
esti nebun , am zis .
cum o sa - mi dai prunele in asa ceva ?
daca nu - ti convine , nu cumpara , a zis tingalaul atins de bere .
si am plecat cu mina goala , asta fiind singura sanctiune politicoasa pe care puteam sa i - o aplic taranului .
pentru ca e piatra de temelie a Romaniei , pentru ca e purtatorul de traditii , pentru ca a dus razboaiele , pentru ca in mina lui e piinea tuturor , taranul roman e idealizat departe de conditia sa de astazi .
o duce greu , a invatat sa triseze , adeseori e ignorant si plin de prejudecati .
pentru a - l cistiga in alegeri este cocolosit de politicieni , si pentru a sublinia specificul romanesc este supralicitat de intelectualii pasunisti .
prea putina lume incearca sa - l ajute pe taranul roman sa inteleaga vremurile . Sa priceapa ca s - au dus timpurile cind orasenii se bateau pe telemeaua lui amestecata cu gunoaie si par de oaie , ca nu mai poate vinde cu dispret fata de cumparator , ca nu mai poate sta nespalat , ca nu mai poate fura .
si , daca vrea sa supravietuiasca , trebuie sa se adapteze , sa deprinda gesturi noi , sa invete sa foloseasca masinarii mai complicate .
ba , daca vrea sa nu se impiedice de nebunii , trebuie sa invete si diferenta dintre banci si fonduri de investitii , dintre publicitate si manualul de folosire a unui aparat , chiar sa priceapa cursul monedei nationale si care - i treaba cu rata dobinzilor .
sa nu mai vorbim ca nu mai merge cu vindutul carnii in hirtie de ziar , cu fructe adunate in galetile de la porci , cu toate celelalte care tradeaza un anume dispret pentru semeni .
n - as vrea sa se inteleaga ca este vorba despre toti taranii romani .
unii au priceput ce se intimpla si au schimbat macazul , miscindu - se spirt .
dar multi traiesc si se poarta ca inainte de cel de - al doilea razboi mondial , daca nu chiar mai rau .
nici salvarea nu este foarte usoara .
cum sa ii luminezi pe unii , cind ei sint copti de alcool si au uitat sa munceasca ?
si daca ar fi sa declansam o campanie de iluminare , cum au mai incercat si alte generatii de intelectuali , de unde resurse ?
ziarele nu prea ajung pe la sate , programele televiziunilor numai la propasire nu duc , iar biserica numai educatie sociala si economica nu incearca .
taranul roman cel idealizat de poeti si politicieni poate face la fel de bine obiectul unei campanii de lichidare a efectelor ignorantei .
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/job_get.R
\name{job_get}
\alias{job_get}
\title{Get a single job}
\usage{
job_get(id, verbose = TRUE, ...)
}
\arguments{
\item{id}{A character string containing an ID for job.}
\item{verbose}{A logical indicating whether to print additional information about the request.}
\item{...}{Additional arguments passed to \code{\link{cf_query}}.}
}
\value{
A list containing details of all jobs. The \code{id} field provides the Crowdflower Job ID for each job.
}
\description{
Get a single Crowdflower job
}
\examples{
\dontrun{
# create new job
f1 <- system.file("templates/instructions1.html", package = "crowdflower")
f2 <- system.file("templates/cml1.xml", package = "crowdflower")
j <- job_create(title = "Job Title",
instructions = readChar(f1, nchars = 1e8L),
cml = readChar(f2, nchars = 1e8L))
# confirm details are correct
job_get(j)
# delete job
job_delete(j)
}
}
\seealso{
\code{\link{cf_account}}
}
\keyword{jobs}
|
/man/job_get.Rd
|
no_license
|
isabella232/crowdflower
|
R
| false | true | 1,035 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/job_get.R
\name{job_get}
\alias{job_get}
\title{Get a single job}
\usage{
job_get(id, verbose = TRUE, ...)
}
\arguments{
\item{id}{A character string containing an ID for job.}
\item{verbose}{A logical indicating whether to print additional information about the request.}
\item{...}{Additional arguments passed to \code{\link{cf_query}}.}
}
\value{
A list containing details of all jobs. The \code{id} field provides the Crowdflower Job ID for each job.
}
\description{
Get a single Crowdflower job
}
\examples{
\dontrun{
# create new job
f1 <- system.file("templates/instructions1.html", package = "crowdflower")
f2 <- system.file("templates/cml1.xml", package = "crowdflower")
j <- job_create(title = "Job Title",
instructions = readChar(f1, nchars = 1e8L),
cml = readChar(f2, nchars = 1e8L))
# confirm details are correct
job_get(j)
# delete job
job_delete(j)
}
}
\seealso{
\code{\link{cf_account}}
}
\keyword{jobs}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_misc_utils.R
\name{getDataFromTextArea}
\alias{getDataFromTextArea}
\title{Transform two column text to data matrix}
\usage{
getDataFromTextArea(txtInput, sep.type = "space")
}
\arguments{
\item{txtInput}{Input text}
\item{sep.type}{Indicate the seperator type for input text. Default set to "space"}
}
\description{
Transform two column input text to data matrix (single column data frame)
}
\author{
Jeff Xia\email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
|
/man/getDataFromTextArea.Rd
|
permissive
|
xia-lab/MetaboAnalystR
|
R
| false | true | 576 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_misc_utils.R
\name{getDataFromTextArea}
\alias{getDataFromTextArea}
\title{Transform two column text to data matrix}
\usage{
getDataFromTextArea(txtInput, sep.type = "space")
}
\arguments{
\item{txtInput}{Input text}
\item{sep.type}{Indicate the seperator type for input text. Default set to "space"}
}
\description{
Transform two column input text to data matrix (single column data frame)
}
\author{
Jeff Xia\email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
|
# Functions for handling data from stateior, https://github.com/USEPA/stateior
#' Load two-region IO data of model iolevel and year from user's local directory
#' or the EPA Data Commons.
#' @description Load two-region IO data of model iolevel and year from user's
#' local directory or the EPA Data Commons.
#' @param model An EEIO form USEEIO model object with model specs and IO meta data loaded.
#' @param dataname Name of desired IO data, can be "Make", "Use", "DomesticUse",
#' "UseTransactions", "FinalDemand", "InternationalTradeAdjustment,
#' "DomesticUseTransactions", "DomesticFinalDemand",
#' "CommodityOutput, "IndustryOutput", and "DomesticUsewithTrade".
#' @return A list of two-region IO data of model iolevel and year.
getTwoRegionIOData <- function(model, dataname) {
# Define state, year and iolevel
if (!"US-DC" %in% model$specs$ModelRegionAcronyms) {
state <- state.name[state.abb == gsub(".*-", "", model$specs$ModelRegionAcronyms[1])]
} else {
state <- "District of Columbia"
}
# Define data file name
filename <- paste("TwoRegion", model$specs$BaseIOLevel, dataname, model$specs$IOYear,
model$specs$IODataVersion, sep = "_")
# Adjust filename to fit what is on the Data Commons
if (dataname %in% c("UseTransactions", "FinalDemand")) {
filename <- gsub(dataname, "Use", filename)
} else if (dataname %in% c("DomesticUseTransactions", "DomesticFinalDemand")) {
filename <- gsub(dataname, "DomesticUse", filename)
}
# Load data
TwoRegionIOData <- readRDS(loadDataCommonsfile(paste0("stateio/", filename, ".rds")))
# Keep SoI and RoUS only
TwoRegionIOData <- TwoRegionIOData[[state]]
if (dataname %in% c("UseTransactions", "DomesticUseTransactions")) {
TwoRegionIOData <- TwoRegionIOData[model$Commodities$Code_Loc,
model$Industries$Code_Loc]
} else if (dataname %in% c("FinalDemand", "DomesticFinalDemand")) {
TwoRegionIOData <- TwoRegionIOData[model$Commodities$Code_Loc,
model$FinalDemandMeta$Code_Loc]
} else if (dataname == "ValueAdded") {
TwoRegionIOData <- TwoRegionIOData[model$ValueAddedMeta$Code_Loc,
model$Industries$Code_Loc]
}
return(TwoRegionIOData)
}
|
/R/StateiorFunctions.R
|
permissive
|
USEPA/useeior
|
R
| false | false | 2,291 |
r
|
# Functions for handling data from stateior, https://github.com/USEPA/stateior
#' Load two-region IO data of model iolevel and year from user's local directory
#' or the EPA Data Commons.
#' @description Load two-region IO data of model iolevel and year from user's
#' local directory or the EPA Data Commons.
#' @param model An EEIO form USEEIO model object with model specs and IO meta data loaded.
#' @param dataname Name of desired IO data, can be "Make", "Use", "DomesticUse",
#' "UseTransactions", "FinalDemand", "InternationalTradeAdjustment,
#' "DomesticUseTransactions", "DomesticFinalDemand",
#' "CommodityOutput, "IndustryOutput", and "DomesticUsewithTrade".
#' @return A list of two-region IO data of model iolevel and year.
getTwoRegionIOData <- function(model, dataname) {
# Define state, year and iolevel
if (!"US-DC" %in% model$specs$ModelRegionAcronyms) {
state <- state.name[state.abb == gsub(".*-", "", model$specs$ModelRegionAcronyms[1])]
} else {
state <- "District of Columbia"
}
# Define data file name
filename <- paste("TwoRegion", model$specs$BaseIOLevel, dataname, model$specs$IOYear,
model$specs$IODataVersion, sep = "_")
# Adjust filename to fit what is on the Data Commons
if (dataname %in% c("UseTransactions", "FinalDemand")) {
filename <- gsub(dataname, "Use", filename)
} else if (dataname %in% c("DomesticUseTransactions", "DomesticFinalDemand")) {
filename <- gsub(dataname, "DomesticUse", filename)
}
# Load data
TwoRegionIOData <- readRDS(loadDataCommonsfile(paste0("stateio/", filename, ".rds")))
# Keep SoI and RoUS only
TwoRegionIOData <- TwoRegionIOData[[state]]
if (dataname %in% c("UseTransactions", "DomesticUseTransactions")) {
TwoRegionIOData <- TwoRegionIOData[model$Commodities$Code_Loc,
model$Industries$Code_Loc]
} else if (dataname %in% c("FinalDemand", "DomesticFinalDemand")) {
TwoRegionIOData <- TwoRegionIOData[model$Commodities$Code_Loc,
model$FinalDemandMeta$Code_Loc]
} else if (dataname == "ValueAdded") {
TwoRegionIOData <- TwoRegionIOData[model$ValueAddedMeta$Code_Loc,
model$Industries$Code_Loc]
}
return(TwoRegionIOData)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/homebrewanalytics-package.R
\docType{package}
\name{homebrewanalytics}
\alias{homebrewanalytics}
\alias{homebrewanalytics-package}
\title{homebrewanalytics}
\description{
The 'Homebrew Project' <brew.sh> has a myriad of "recipes" that
make life easier for 'macOS' users by enabling them to (mostly) effortlessly
install popular open source libraries, tools, utilities and applications.
The project collectes anonymous metrics from users who have not opted-out
of metrics collection and makes them available via a 'JSON' 'API'.
}
\author{
Bob Rudis (bob@rud.is)
}
|
/man/homebrewanalytics.Rd
|
no_license
|
pombredanne/homebrewanalytics
|
R
| false | true | 641 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/homebrewanalytics-package.R
\docType{package}
\name{homebrewanalytics}
\alias{homebrewanalytics}
\alias{homebrewanalytics-package}
\title{homebrewanalytics}
\description{
The 'Homebrew Project' <brew.sh> has a myriad of "recipes" that
make life easier for 'macOS' users by enabling them to (mostly) effortlessly
install popular open source libraries, tools, utilities and applications.
The project collectes anonymous metrics from users who have not opted-out
of metrics collection and makes them available via a 'JSON' 'API'.
}
\author{
Bob Rudis (bob@rud.is)
}
|
#' Old Radial Matrix Element.
#'
#' \code{radial_matrix_element} calculates the radial matrix element.
#'
#' This function calculates the radial matrix element for two arbitrary states
#' (n1, l1, j1) and (n2, l2, j2). A Numerov algorithm is used to compute the
#' radial matrix elements as done in Appendix A of Zimmerman et al, PRA, 20, 2251
#' (1979). The scaling used in this function is \eqn{\xi = \sqrt r},
#' \eqn{\Psi = r^(3/4) R(r)} as done by Bhatti, Cromer, and Cooke, PRA, 24, 161
#' (1981).
#'
#' @param k A numeric. The power of r to be calculated over. To get a dipole
#' matrix element, k must be equal to 1. Default k = 1.
#' @param n1 A numeric. The principle quantum number of state 1.
#' @param n2 A numeric. The principle quantum number of state 2.
#' @param l1 A numeric. The orbital angular momentum number of state 1.
#' @param l2 A numeric. The orbital angular momentum number of state 2.
#' @param j1 A numeric. The total angular momentum number of state 1.
#' @param j2 A numeric. The total angular momentum number of state 2.
#'
#' @export
old_radial_matrix_element <- function(n1, n2, l1, l2, j1, j2, k = 1){
# Number of Electrons
Z <- 1
# Quantum numbers for states 1 and 2. Primary quantum number n, orbital
# angular momentum l, total angular momentum j
delta1 <- quantum_defect(n1, l1, j1)
delta2 <- quantum_defect(n2, l2, j2)
E1 <- -1 / (2 * (n1 - delta1) ^ 2)
E2 <- -1 / (2 * (n2 - delta2) ^ 2)
# Inner and outer turning points, core radius for both states
r1_O <- 2 * n1 * (n1 + 15)
r1_I <- (n1 ^ 2 - n1 * sqrt(n1 ^ 2 - l1 * (l1 + 1))) / 2
r2_O <- 2 * n2 * (n2 + 15)
r2_I <- (n2 ^ 2 - n2 * sqrt(n2 ^ 2 - l2 * (l2 + 1)))/ 2
# Core radius as a function of core polarizability r = (a_c)^(1/3)
# Core polarizability is a_c = 9.0760 for Rubidium
core.radius <- (9.0760) ^ (1/3)
# Determine which outer turning point is the larger to set as starting point
r_0 <- max(r1_O, r2_O)
# Defining scaled x-axis ksi = sqrt(r), step size h, and starting
# point ksi_0 = sqrt(r_0)
ksi_0 <- sqrt(r_0)
h <- 0.01
ksi_1 <- ksi_0 - h
ksi_2 <- ksi_1 - h
# Initial wavefunction guesses
Psi1_0 <- 10 ^ -15
Psi1_1 <- 10 ^ -14
Psi2_0 <- 10 ^ -15
Psi2_1 <- 10 ^ -14
# Defining terms to be used in Numerov Algorithm
ksi_iminus1 <- ksi_0
ksi_i <- ksi_1
ksi_iplus1 <- ksi_2
Psi1_iminus1 <- Psi1_0
Psi1_i <- Psi1_1
Psi2_iminus1 <- Psi2_0
Psi2_i <- Psi2_1
# Establishing Numerov integration data frame
ksi <- numeric()
Psi1 <- numeric()
Psi2 <- numeric()
N1_i <- numeric()
N2_i <- numeric()
Psi12 <- numeric()
if(r1_O < r2_O){
ksi <- c(ksi, ksi_0, ksi_1)
Psi1 <- c(Psi1, 0, 0)
Psi2 <- c(Psi2, Psi2_0, Psi2_1)
N1_i <- c(N1_i, 0, 0)
N2_i <- c(N2_i, 2 * ksi_0 ^ 2 * Psi2_0 ^ 2, 2 * ksi_1 ^ 2 * Psi2_1 ^ 2 )
Psi12 <- c(Psi12, 0, 0)
} else{
ksi <- c(ksi, ksi_0, ksi_1)
Psi1 <- c(Psi1, Psi1_0, Psi1_1)
Psi2 <- c(Psi2, 0, 0)
N1_i <- c(N1_i, 2 * ksi_0 ^ 2 * Psi1_0 ^ 2, 2 * ksi_1 ^ 2 * Psi1_1 ^ 2)
N2_i <- c(N2_i, 0, 0)
Psi12 <- c(Psi12, 0, 0)
}
# Numerov Algorithm
# Iterates algorithm until the condition of ksi_(i+1) < sqrt(r_I)
# or ksi_(i+1) < sqrt(core.radius) is met
repeat{
# When ksi_i is larger than the smallest of the starting points, the
# normalization for the larger outer turning point accumulates while the
# other does not.
if(ksi_i > sqrt(min(r1_O, r2_O))){
#First statement is case when r1_O < r2_O, second statement is r2_O < r1_O
if(r1_O < r2_O){
g2_iplus1 <- -8 * (ksi_iplus1 ^ 2 * E2 + Z - (l2 + 1/4) * (l2 + 3 / 4) / (2 * ksi_iplus1 ^ 2))
g2_i <- -8 * (ksi_i ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3/4) / (2 * ksi_i ^ 2))
g2_iminus1 <- -8 * (ksi_iminus1 ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3 / 4) / (2 * ksi_iminus1 ^ 2))
Psi2_iplus1 <- (Psi2_iminus1 * (g2_iminus1 - 12 / h^2) + Psi2_i * (10 * g2_i + 24 / h ^ 2)) / (12 / h ^ 2 - g2_iplus1)
N1_iplus1 <- 0
N2_iplus1 <- 2 * ksi_iplus1 ^ 2 * Psi2_iplus1 ^ 2 * h
if(ksi_iplus1 < sqrt(max(r1_I, r2_I)) | ksi_iplus1 < sqrt(core.radius)){
break
} else {
ksi <- c(ksi, ksi_iplus1)
Psi1 <- c(Psi1, 0)
Psi2 <- c(Psi2, Psi2_iplus1)
N1_i <- c(N1_i, N1_iplus1)
N2_i <- c(N2_i, N2_iplus1)
Psi12 <- c(Psi12, 0)
}
} else{
g1_iplus1 <- -8 * (ksi_iplus1 ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_iplus1 ^ 2))
g1_i <- -8 * (ksi_i ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_i ^ 2))
g1_iminus1 <- -8 * (ksi_iminus1 ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_iminus1 ^ 2))
Psi1_iplus1 <- (Psi1_iminus1 * (g1_iminus1 - 12 / h^2) + Psi1_i * (10 * g1_i + 24 / h ^ 2)) / (12 / h ^ 2 - g1_iplus1)
N1_iplus1 <- 2 * ksi_iplus1 ^ 2 * Psi1_iplus1 ^ 2 * h
N2_iplus1 <- 0
if(ksi_iplus1 < sqrt(max(r1_I, r2_I)) | ksi_iplus1 < sqrt(core.radius)){
break
} else {
ksi <- c(ksi, ksi_iplus1)
Psi1 <- c(Psi1, Psi1_iplus1)
Psi2 <- c(Psi2, 0)
N1_i <- c(N1_i, N1_iplus1)
N2_i <- c(N2_i, N2_iplus1)
Psi12 <- c(Psi12, 0)
}
}
if(r1_O < r2_O){
Psi2_iminus1 <- Psi2_i
Psi2_i <- Psi2_iplus1
} else{
Psi1_iminus1 <- Psi1_i
Psi1_i <- Psi1_iplus1
}
} else{
g1_iplus1 <- -8 * (ksi_iplus1 ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_iplus1 ^ 2))
g1_i <- -8 * (ksi_i ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_i ^ 2))
g1_iminus1 <- -8 * (ksi_iminus1 ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_iminus1 ^ 2))
g2_iplus1 <- -8 * (ksi_iplus1 ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3 / 4) / (2 * ksi_iplus1 ^ 2))
g2_i <- -8 * (ksi_i ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3 / 4) / (2 * ksi_i ^ 2))
g2_iminus1 <- -8 * (ksi_iminus1 ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3 / 4) / (2 * ksi_iminus1 ^ 2))
Psi1_iplus1 <- (Psi1_iminus1 * (g1_iminus1 - 12 / h ^ 2) + Psi1_i * (10 * g1_i + 24 / h ^ 2)) / (12 / h ^ 2 - g1_iplus1)
Psi2_iplus1 <- (Psi2_iminus1 * (g2_iminus1 - 12 / h ^ 2) + Psi2_i * (10 * g2_i + 24 / h ^ 2)) / (12 / h ^ 2 - g2_iplus1)
Psi12_iplus1 <- 2 * Psi1_iplus1 * Psi2_iplus1 * ksi_iplus1 ^ (2 + 2 * k) * h
N1_iplus1 <- 2 * ksi_iplus1 ^ 2 * Psi1_iplus1 ^ 2 * h
N2_iplus1 <- 2 * ksi_iplus1 ^ 2 * Psi2_iplus1 ^ 2 * h
new.row <- data.frame(ksi = ksi_iplus1, Psi1 = Psi1_iplus1, Psi2 = Psi2_iplus1, N1_i = N1_iplus1, N2_i=N2_iplus1, Psi12 = Psi12_iplus1)
if(ksi_iplus1 < sqrt(max(r1_I, r2_I)) | ksi_iplus1 < sqrt(core.radius)){
break
} else {
ksi <- c(ksi, ksi_iplus1)
Psi1 <- c(Psi1, Psi1_iplus1)
Psi2 <- c(Psi2, Psi2_iplus1)
N1_i <- c(N1_i, N1_iplus1)
N2_i <- c(N2_i, N2_iplus1)
Psi12 <- c(Psi12, Psi12_iplus1)
}
Psi1_iminus1 <- Psi1_i
Psi1_i <- Psi1_iplus1
Psi2_iminus1 <- Psi2_i
Psi2_i <- Psi2_iplus1
}
ksi_iminus1 <- ksi_i
ksi_i <- ksi_iplus1
ksi_iplus1 <- ksi_iplus1 - h
}
RadialMatrixElement <- sum(Psi12) / (sqrt(sum(N1_i)) * sqrt(sum(N2_i)))
RadialMatrixElement
}
|
/R/old_radial_matrix_element.R
|
no_license
|
bgrich/starkr
|
R
| false | false | 7,326 |
r
|
#' Old Radial Matrix Element.
#'
#' \code{radial_matrix_element} calculates the radial matrix element.
#'
#' This function calculates the radial matrix element for two arbitrary states
#' (n1, l1, j1) and (n2, l2, j2). A Numerov algorithm is used to compute the
#' radial matrix elements as done in Appendix A of Zimmerman et al, PRA, 20, 2251
#' (1979). The scaling used in this function is \eqn{\xi = \sqrt r},
#' \eqn{\Psi = r^(3/4) R(r)} as done by Bhatti, Cromer, and Cooke, PRA, 24, 161
#' (1981).
#'
#' @param k A numeric. The power of r to be calculated over. To get a dipole
#' matrix element, k must be equal to 1. Default k = 1.
#' @param n1 A numeric. The principle quantum number of state 1.
#' @param n2 A numeric. The principle quantum number of state 2.
#' @param l1 A numeric. The orbital angular momentum number of state 1.
#' @param l2 A numeric. The orbital angular momentum number of state 2.
#' @param j1 A numeric. The total angular momentum number of state 1.
#' @param j2 A numeric. The total angular momentum number of state 2.
#'
#' @export
old_radial_matrix_element <- function(n1, n2, l1, l2, j1, j2, k = 1){
# Number of Electrons
Z <- 1
# Quantum numbers for states 1 and 2. Primary quantum number n, orbital
# angular momentum l, total angular momentum j
delta1 <- quantum_defect(n1, l1, j1)
delta2 <- quantum_defect(n2, l2, j2)
E1 <- -1 / (2 * (n1 - delta1) ^ 2)
E2 <- -1 / (2 * (n2 - delta2) ^ 2)
# Inner and outer turning points, core radius for both states
r1_O <- 2 * n1 * (n1 + 15)
r1_I <- (n1 ^ 2 - n1 * sqrt(n1 ^ 2 - l1 * (l1 + 1))) / 2
r2_O <- 2 * n2 * (n2 + 15)
r2_I <- (n2 ^ 2 - n2 * sqrt(n2 ^ 2 - l2 * (l2 + 1)))/ 2
# Core radius as a function of core polarizability r = (a_c)^(1/3)
# Core polarizability is a_c = 9.0760 for Rubidium
core.radius <- (9.0760) ^ (1/3)
# Determine which outer turning point is the larger to set as starting point
r_0 <- max(r1_O, r2_O)
# Defining scaled x-axis ksi = sqrt(r), step size h, and starting
# point ksi_0 = sqrt(r_0)
ksi_0 <- sqrt(r_0)
h <- 0.01
ksi_1 <- ksi_0 - h
ksi_2 <- ksi_1 - h
# Initial wavefunction guesses
Psi1_0 <- 10 ^ -15
Psi1_1 <- 10 ^ -14
Psi2_0 <- 10 ^ -15
Psi2_1 <- 10 ^ -14
# Defining terms to be used in Numerov Algorithm
ksi_iminus1 <- ksi_0
ksi_i <- ksi_1
ksi_iplus1 <- ksi_2
Psi1_iminus1 <- Psi1_0
Psi1_i <- Psi1_1
Psi2_iminus1 <- Psi2_0
Psi2_i <- Psi2_1
# Establishing Numerov integration data frame
ksi <- numeric()
Psi1 <- numeric()
Psi2 <- numeric()
N1_i <- numeric()
N2_i <- numeric()
Psi12 <- numeric()
if(r1_O < r2_O){
ksi <- c(ksi, ksi_0, ksi_1)
Psi1 <- c(Psi1, 0, 0)
Psi2 <- c(Psi2, Psi2_0, Psi2_1)
N1_i <- c(N1_i, 0, 0)
N2_i <- c(N2_i, 2 * ksi_0 ^ 2 * Psi2_0 ^ 2, 2 * ksi_1 ^ 2 * Psi2_1 ^ 2 )
Psi12 <- c(Psi12, 0, 0)
} else{
ksi <- c(ksi, ksi_0, ksi_1)
Psi1 <- c(Psi1, Psi1_0, Psi1_1)
Psi2 <- c(Psi2, 0, 0)
N1_i <- c(N1_i, 2 * ksi_0 ^ 2 * Psi1_0 ^ 2, 2 * ksi_1 ^ 2 * Psi1_1 ^ 2)
N2_i <- c(N2_i, 0, 0)
Psi12 <- c(Psi12, 0, 0)
}
# Numerov Algorithm
# Iterates algorithm until the condition of ksi_(i+1) < sqrt(r_I)
# or ksi_(i+1) < sqrt(core.radius) is met
repeat{
# When ksi_i is larger than the smallest of the starting points, the
# normalization for the larger outer turning point accumulates while the
# other does not.
if(ksi_i > sqrt(min(r1_O, r2_O))){
#First statement is case when r1_O < r2_O, second statement is r2_O < r1_O
if(r1_O < r2_O){
g2_iplus1 <- -8 * (ksi_iplus1 ^ 2 * E2 + Z - (l2 + 1/4) * (l2 + 3 / 4) / (2 * ksi_iplus1 ^ 2))
g2_i <- -8 * (ksi_i ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3/4) / (2 * ksi_i ^ 2))
g2_iminus1 <- -8 * (ksi_iminus1 ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3 / 4) / (2 * ksi_iminus1 ^ 2))
Psi2_iplus1 <- (Psi2_iminus1 * (g2_iminus1 - 12 / h^2) + Psi2_i * (10 * g2_i + 24 / h ^ 2)) / (12 / h ^ 2 - g2_iplus1)
N1_iplus1 <- 0
N2_iplus1 <- 2 * ksi_iplus1 ^ 2 * Psi2_iplus1 ^ 2 * h
if(ksi_iplus1 < sqrt(max(r1_I, r2_I)) | ksi_iplus1 < sqrt(core.radius)){
break
} else {
ksi <- c(ksi, ksi_iplus1)
Psi1 <- c(Psi1, 0)
Psi2 <- c(Psi2, Psi2_iplus1)
N1_i <- c(N1_i, N1_iplus1)
N2_i <- c(N2_i, N2_iplus1)
Psi12 <- c(Psi12, 0)
}
} else{
g1_iplus1 <- -8 * (ksi_iplus1 ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_iplus1 ^ 2))
g1_i <- -8 * (ksi_i ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_i ^ 2))
g1_iminus1 <- -8 * (ksi_iminus1 ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_iminus1 ^ 2))
Psi1_iplus1 <- (Psi1_iminus1 * (g1_iminus1 - 12 / h^2) + Psi1_i * (10 * g1_i + 24 / h ^ 2)) / (12 / h ^ 2 - g1_iplus1)
N1_iplus1 <- 2 * ksi_iplus1 ^ 2 * Psi1_iplus1 ^ 2 * h
N2_iplus1 <- 0
if(ksi_iplus1 < sqrt(max(r1_I, r2_I)) | ksi_iplus1 < sqrt(core.radius)){
break
} else {
ksi <- c(ksi, ksi_iplus1)
Psi1 <- c(Psi1, Psi1_iplus1)
Psi2 <- c(Psi2, 0)
N1_i <- c(N1_i, N1_iplus1)
N2_i <- c(N2_i, N2_iplus1)
Psi12 <- c(Psi12, 0)
}
}
if(r1_O < r2_O){
Psi2_iminus1 <- Psi2_i
Psi2_i <- Psi2_iplus1
} else{
Psi1_iminus1 <- Psi1_i
Psi1_i <- Psi1_iplus1
}
} else{
g1_iplus1 <- -8 * (ksi_iplus1 ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_iplus1 ^ 2))
g1_i <- -8 * (ksi_i ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_i ^ 2))
g1_iminus1 <- -8 * (ksi_iminus1 ^ 2 * E1 + Z - (l1 + 1 / 4) * (l1 + 3 / 4) / (2 * ksi_iminus1 ^ 2))
g2_iplus1 <- -8 * (ksi_iplus1 ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3 / 4) / (2 * ksi_iplus1 ^ 2))
g2_i <- -8 * (ksi_i ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3 / 4) / (2 * ksi_i ^ 2))
g2_iminus1 <- -8 * (ksi_iminus1 ^ 2 * E2 + Z - (l2 + 1 / 4) * (l2 + 3 / 4) / (2 * ksi_iminus1 ^ 2))
Psi1_iplus1 <- (Psi1_iminus1 * (g1_iminus1 - 12 / h ^ 2) + Psi1_i * (10 * g1_i + 24 / h ^ 2)) / (12 / h ^ 2 - g1_iplus1)
Psi2_iplus1 <- (Psi2_iminus1 * (g2_iminus1 - 12 / h ^ 2) + Psi2_i * (10 * g2_i + 24 / h ^ 2)) / (12 / h ^ 2 - g2_iplus1)
Psi12_iplus1 <- 2 * Psi1_iplus1 * Psi2_iplus1 * ksi_iplus1 ^ (2 + 2 * k) * h
N1_iplus1 <- 2 * ksi_iplus1 ^ 2 * Psi1_iplus1 ^ 2 * h
N2_iplus1 <- 2 * ksi_iplus1 ^ 2 * Psi2_iplus1 ^ 2 * h
new.row <- data.frame(ksi = ksi_iplus1, Psi1 = Psi1_iplus1, Psi2 = Psi2_iplus1, N1_i = N1_iplus1, N2_i=N2_iplus1, Psi12 = Psi12_iplus1)
if(ksi_iplus1 < sqrt(max(r1_I, r2_I)) | ksi_iplus1 < sqrt(core.radius)){
break
} else {
ksi <- c(ksi, ksi_iplus1)
Psi1 <- c(Psi1, Psi1_iplus1)
Psi2 <- c(Psi2, Psi2_iplus1)
N1_i <- c(N1_i, N1_iplus1)
N2_i <- c(N2_i, N2_iplus1)
Psi12 <- c(Psi12, Psi12_iplus1)
}
Psi1_iminus1 <- Psi1_i
Psi1_i <- Psi1_iplus1
Psi2_iminus1 <- Psi2_i
Psi2_i <- Psi2_iplus1
}
ksi_iminus1 <- ksi_i
ksi_i <- ksi_iplus1
ksi_iplus1 <- ksi_iplus1 - h
}
RadialMatrixElement <- sum(Psi12) / (sqrt(sum(N1_i)) * sqrt(sum(N2_i)))
RadialMatrixElement
}
|
## 2018 APPLIED PUBLIC HEALTH STATISTICS BREAKFAST WORKSHOP ##
## R EXAMPLE 2: DESCRIPTIVES AND VISUALIZATION OF ILD ##
# SUMMER FRANK-PEARCE & TRENT L. LALONDE #
# R FILE FOR VISUALIZATION AND EXPLORATION OF ILD #
# CONTENTS: #
# ALL ANALYSES REPEATED FOR BOTH MJ AND SMK DATASETS #
#
# (1) DATA SUMMARY AND TABLES #
# (2) AGGREGATED SUMMARY STATISTICS #
# (3) BOX PLOTS FOR RAW DATA #
# (4) AGGREGATED SUMMARY STATISTICS #
# (5) HISTOGRAMS AND BOX PLOTS OF AGGREGATES #
# (6) TIME PLOTS #
# (7) SPAGHETTI PLOTS #
# (8) VARIOGRAMS #
library(ggplot2)
# READ IN THE DATA #
# MJ DATA #
setwd('/Users/trent.lalonde/Documents/Research/Presentations/APHA/APHA ILD Workshop/Software/Data/')
WorkshopMJData = read.csv('WorkshopMJData.csv')
# SMK DATA #
setwd('/Users/trent.lalonde/Documents/Research/Presentations/APHA/APHA ILD Workshop/Software/Data/')
WorkshopSMKData = read.csv('WorkshopSMKData.csv')
## BASIC EXPLORATION OF DATA ##
# MJ DATA #
dim(WorkshopMJData)
summary(WorkshopMJData)
table(WorkshopMJData$Used,useNA='ifany')
table(WorkshopMJData$Others,useNA='ifany')
# SMK DATA #
dim(WorkshopSMKData)
summary(WorkshopSMKData)
table(WorkshopSMKData$WithOthers,useNA='ifany')
table(WorkshopSMKData$Stress,useNA='ifany')
## MEANS AND VARIANCES BY RELEVANT TIME (DAY OF WEEK) ##
# MJ DATA #
(DailyFrequencyMeans = aggregate(Frequency~Day,data=WorkshopMJData,FUN=function(x)mean(x,na.rm=TRUE)))
(DailyFrequencyVariances = aggregate(Frequency~Day,data=WorkshopMJData,FUN=function(x)var(x,na.rm=TRUE)))
# SMK DATA #
(DailyCigaretteMeans = aggregate(Cigarettes~TimeInStudy,data=WorkshopSMKData,FUN=function(x)mean(x,na.rm=TRUE)))
(DailyCigaretteVariances = aggregate(Cigarettes~TimeInStudy,data=WorkshopSMKData,FUN=function(x)var(x,na.rm=TRUE)))
# BOXPLOTS BY DAY OF WEEK #
# MJ DATA #
# RE-ORDER DAYS #
WorkshopMJData$Day = factor(WorkshopMJData$Day, levels=levels(WorkshopMJData$Day)[c(2,6,7,5,1,3,4)])
png('BoxplotFrequencyByDay.png')
ggplot(WorkshopMJData, aes(x=as.factor(Day),y=Frequency)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plots of Frequency by Day") +
xlab("Day") +
ylab("Frequency") +
theme(axis.text=element_text(size=12), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# SMK DATA #
pdf('BoxplotCigarettesByDay.pdf')
ggplot(WorkshopSMKData, aes(x=as.factor(TimeInStudy),y=Cigarettes)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plots of Cigarettes by Time") +
xlab("Time") +
ylab("Cigarettes") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
## DISTRIBUTIONS OF SUMMARY STATISTICS AMONG SUBJECTS ##
# SOMETIMES USE MEANS #
# MJ DATA #
(SubjectFrequencyMeans = aggregate(Frequency~ID,data=WorkshopMJData,FUN=function(x)mean(x,na.rm=TRUE)))
fivenum(SubjectFrequencyMeans$Frequency)
pdf('HistogramFrequency.pdf')
ggplot(SubjectFrequencyMeans, aes(Frequency)) +
geom_histogram(aes(y=..density..),col='grey45') +
geom_density() +
ggtitle("Histogram of Frequency Means") +
xlab("Frequency of Use") +
ylab("Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
pdf('BoxPlotFrequency.pdf')
ggplot(SubjectFrequencyMeans, aes(x=1,y=Frequency)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plot of Frequency Means") +
xlab("Frequency Mean Box Plot") +
ylab("Frequency Mean") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# SMK DATA #
(SubjectCigaretteMeans = aggregate(Cigarettes~ID,data=WorkshopSMKData,FUN=function(x)mean(x,na.rm=TRUE)))
fivenum(SubjectCigaretteMeans$Cigarettes)
pdf('HistogramCigarettes.pdf')
ggplot(SubjectCigaretteMeans, aes(Cigarettes)) +
geom_histogram(aes(y=..density..),col='grey45') +
geom_density() +
ggtitle("Histogram of Cigarette Means") +
xlab("Mean of Cigarettes") +
ylab("Cigarettes") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
pdf('BoxPlotCigarettes.pdf')
ggplot(SubjectCigaretteMeans, aes(x=1,y=Cigarettes)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plot of Cigarette Means") +
xlab("Cigarette Mean Box Plot") +
ylab("Cigarette Mean") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# SOMETIMES USE TOTALS #
# MJ DATA #
(SubjectUseTotals = aggregate(Used~ID,data=WorkshopMJData,FUN=function(x)sum(x,na.rm=TRUE)))
fivenum(SubjectUseTotals$Used)
pdf('HistogramUse.pdf')
ggplot(SubjectUseTotals, aes(Used)) +
geom_histogram(aes(y=..density..),col='grey45') +
geom_density() +
ggtitle("Histogram of Total Use") +
xlab("Use") +
ylab("Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
pdf('BoxplotUse.pdf')
ggplot(SubjectUseTotals, aes(x=1,y=Used)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plot of Total Use") +
xlab("Total Use Box Plot") +
ylab("Total Use") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# MJ DATA #
(SubjectCigTotals = aggregate(Cigarettes~ID,data=WorkshopSMKData,FUN=function(x)sum(x,na.rm=TRUE)))
fivenum(SubjectCigTotals$Cigarettes)
pdf('HistogramCig.pdf')
ggplot(SubjectCigTotals, aes(Cigarettes)) +
geom_histogram(aes(y=..density..),col='grey45') +
geom_density() +
ggtitle("Histogram of Total Cigarettes") +
xlab("Cigarettes") +
ylab("Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
pdf('BoxplotCig.pdf')
ggplot(SubjectCigTotals, aes(x=1,y=Cigarettes)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plot of Total Cigarettes") +
xlab("Total Cigarettes Box Plot") +
ylab("Total Cigarettes") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# SOMETIMES COMPARE #
(UseData = merge(SubjectFrequencyMeans, SubjectUseTotals,by.x='ID',by.y='ID'))
pdf('ScatterPlotUse.pdf')
ggplot(UseData, aes(x=Used,y=Frequency)) +
geom_point(col='grey45') +
geom_smooth(col='grey45') +
ggtitle("Scatter Plot of Frequency Means Versus Total Use") +
xlab("Total Use") +
ylab("Mean Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=18))
dev.off()
## PLOTS USING RAW DATA ##
# TIME PLOT #
# MJ DATA #
pdf('ScatterPlotFrequency.pdf')
ggplot(WorkshopMJData, aes(x=TimeInStudy,y=Frequency)) +
geom_point(col='grey45') +
geom_smooth(col='grey45') +
ggtitle("Scatter Plot of Frequency Versus Time") +
xlab("Study Prompt") +
ylab("Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
# SMK DATA #
pdf('ScatterPlotCigarettes.pdf')
ggplot(WorkshopSMKData, aes(x=TimeInStudy,y=Cigarettes)) +
geom_point(col='grey45') +
geom_smooth(col='grey45') +
ggtitle("Scatter Plot of Cigarettes Versus Time") +
xlab("Study Prompt") +
ylab("Cigarettes") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
# SPAGHETTI PLOT #
# MJ DATA #
interaction.plot(x.factor=as.factor(WorkshopMJData$TimeInStudy),trace.factor=as.factor(WorkshopMJData$ID),response= WorkshopMJData$Frequency,fun=function(x)mean(x,na.rm=TRUE))
# SMK DATA #
interaction.plot(x.factor=as.factor(WorkshopSMKData$TimeInStudy),trace.factor=as.factor(WorkshopSMKData$ID),response= WorkshopSMKData$Cigarettes,fun=function(x)mean(x,na.rm=TRUE))
# VARIOGRAM #
library(joineR)
?plot
|
/2 - ILDVisualization.R
|
no_license
|
lalondetl/APHSWorkshop
|
R
| false | false | 8,563 |
r
|
## 2018 APPLIED PUBLIC HEALTH STATISTICS BREAKFAST WORKSHOP ##
## R EXAMPLE 2: DESCRIPTIVES AND VISUALIZATION OF ILD ##
# SUMMER FRANK-PEARCE & TRENT L. LALONDE #
# R FILE FOR VISUALIZATION AND EXPLORATION OF ILD #
# CONTENTS: #
# ALL ANALYSES REPEATED FOR BOTH MJ AND SMK DATASETS #
#
# (1) DATA SUMMARY AND TABLES #
# (2) AGGREGATED SUMMARY STATISTICS #
# (3) BOX PLOTS FOR RAW DATA #
# (4) AGGREGATED SUMMARY STATISTICS #
# (5) HISTOGRAMS AND BOX PLOTS OF AGGREGATES #
# (6) TIME PLOTS #
# (7) SPAGHETTI PLOTS #
# (8) VARIOGRAMS #
library(ggplot2)
# READ IN THE DATA #
# MJ DATA #
setwd('/Users/trent.lalonde/Documents/Research/Presentations/APHA/APHA ILD Workshop/Software/Data/')
WorkshopMJData = read.csv('WorkshopMJData.csv')
# SMK DATA #
setwd('/Users/trent.lalonde/Documents/Research/Presentations/APHA/APHA ILD Workshop/Software/Data/')
WorkshopSMKData = read.csv('WorkshopSMKData.csv')
## BASIC EXPLORATION OF DATA ##
# MJ DATA #
dim(WorkshopMJData)
summary(WorkshopMJData)
table(WorkshopMJData$Used,useNA='ifany')
table(WorkshopMJData$Others,useNA='ifany')
# SMK DATA #
dim(WorkshopSMKData)
summary(WorkshopSMKData)
table(WorkshopSMKData$WithOthers,useNA='ifany')
table(WorkshopSMKData$Stress,useNA='ifany')
## MEANS AND VARIANCES BY RELEVANT TIME (DAY OF WEEK) ##
# MJ DATA #
(DailyFrequencyMeans = aggregate(Frequency~Day,data=WorkshopMJData,FUN=function(x)mean(x,na.rm=TRUE)))
(DailyFrequencyVariances = aggregate(Frequency~Day,data=WorkshopMJData,FUN=function(x)var(x,na.rm=TRUE)))
# SMK DATA #
(DailyCigaretteMeans = aggregate(Cigarettes~TimeInStudy,data=WorkshopSMKData,FUN=function(x)mean(x,na.rm=TRUE)))
(DailyCigaretteVariances = aggregate(Cigarettes~TimeInStudy,data=WorkshopSMKData,FUN=function(x)var(x,na.rm=TRUE)))
# BOXPLOTS BY DAY OF WEEK #
# MJ DATA #
# RE-ORDER DAYS #
WorkshopMJData$Day = factor(WorkshopMJData$Day, levels=levels(WorkshopMJData$Day)[c(2,6,7,5,1,3,4)])
png('BoxplotFrequencyByDay.png')
ggplot(WorkshopMJData, aes(x=as.factor(Day),y=Frequency)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plots of Frequency by Day") +
xlab("Day") +
ylab("Frequency") +
theme(axis.text=element_text(size=12), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# SMK DATA #
pdf('BoxplotCigarettesByDay.pdf')
ggplot(WorkshopSMKData, aes(x=as.factor(TimeInStudy),y=Cigarettes)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plots of Cigarettes by Time") +
xlab("Time") +
ylab("Cigarettes") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
## DISTRIBUTIONS OF SUMMARY STATISTICS AMONG SUBJECTS ##
# SOMETIMES USE MEANS #
# MJ DATA #
(SubjectFrequencyMeans = aggregate(Frequency~ID,data=WorkshopMJData,FUN=function(x)mean(x,na.rm=TRUE)))
fivenum(SubjectFrequencyMeans$Frequency)
pdf('HistogramFrequency.pdf')
ggplot(SubjectFrequencyMeans, aes(Frequency)) +
geom_histogram(aes(y=..density..),col='grey45') +
geom_density() +
ggtitle("Histogram of Frequency Means") +
xlab("Frequency of Use") +
ylab("Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
pdf('BoxPlotFrequency.pdf')
ggplot(SubjectFrequencyMeans, aes(x=1,y=Frequency)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plot of Frequency Means") +
xlab("Frequency Mean Box Plot") +
ylab("Frequency Mean") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# SMK DATA #
(SubjectCigaretteMeans = aggregate(Cigarettes~ID,data=WorkshopSMKData,FUN=function(x)mean(x,na.rm=TRUE)))
fivenum(SubjectCigaretteMeans$Cigarettes)
pdf('HistogramCigarettes.pdf')
ggplot(SubjectCigaretteMeans, aes(Cigarettes)) +
geom_histogram(aes(y=..density..),col='grey45') +
geom_density() +
ggtitle("Histogram of Cigarette Means") +
xlab("Mean of Cigarettes") +
ylab("Cigarettes") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
pdf('BoxPlotCigarettes.pdf')
ggplot(SubjectCigaretteMeans, aes(x=1,y=Cigarettes)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plot of Cigarette Means") +
xlab("Cigarette Mean Box Plot") +
ylab("Cigarette Mean") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# SOMETIMES USE TOTALS #
# MJ DATA #
(SubjectUseTotals = aggregate(Used~ID,data=WorkshopMJData,FUN=function(x)sum(x,na.rm=TRUE)))
fivenum(SubjectUseTotals$Used)
pdf('HistogramUse.pdf')
ggplot(SubjectUseTotals, aes(Used)) +
geom_histogram(aes(y=..density..),col='grey45') +
geom_density() +
ggtitle("Histogram of Total Use") +
xlab("Use") +
ylab("Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
pdf('BoxplotUse.pdf')
ggplot(SubjectUseTotals, aes(x=1,y=Used)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plot of Total Use") +
xlab("Total Use Box Plot") +
ylab("Total Use") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# MJ DATA #
(SubjectCigTotals = aggregate(Cigarettes~ID,data=WorkshopSMKData,FUN=function(x)sum(x,na.rm=TRUE)))
fivenum(SubjectCigTotals$Cigarettes)
pdf('HistogramCig.pdf')
ggplot(SubjectCigTotals, aes(Cigarettes)) +
geom_histogram(aes(y=..density..),col='grey45') +
geom_density() +
ggtitle("Histogram of Total Cigarettes") +
xlab("Cigarettes") +
ylab("Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
pdf('BoxplotCig.pdf')
ggplot(SubjectCigTotals, aes(x=1,y=Cigarettes)) +
geom_boxplot(notch=TRUE,outlier.colour='grey45',fill='grey45') +
stat_summary(fun.y=mean, geom="point", shape=18, size=5) +
ggtitle("Box Plot of Total Cigarettes") +
xlab("Total Cigarettes Box Plot") +
ylab("Total Cigarettes") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24),legend.position="none")
dev.off()
# SOMETIMES COMPARE #
(UseData = merge(SubjectFrequencyMeans, SubjectUseTotals,by.x='ID',by.y='ID'))
pdf('ScatterPlotUse.pdf')
ggplot(UseData, aes(x=Used,y=Frequency)) +
geom_point(col='grey45') +
geom_smooth(col='grey45') +
ggtitle("Scatter Plot of Frequency Means Versus Total Use") +
xlab("Total Use") +
ylab("Mean Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=18))
dev.off()
## PLOTS USING RAW DATA ##
# TIME PLOT #
# MJ DATA #
pdf('ScatterPlotFrequency.pdf')
ggplot(WorkshopMJData, aes(x=TimeInStudy,y=Frequency)) +
geom_point(col='grey45') +
geom_smooth(col='grey45') +
ggtitle("Scatter Plot of Frequency Versus Time") +
xlab("Study Prompt") +
ylab("Frequency") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
# SMK DATA #
pdf('ScatterPlotCigarettes.pdf')
ggplot(WorkshopSMKData, aes(x=TimeInStudy,y=Cigarettes)) +
geom_point(col='grey45') +
geom_smooth(col='grey45') +
ggtitle("Scatter Plot of Cigarettes Versus Time") +
xlab("Study Prompt") +
ylab("Cigarettes") +
theme(axis.text=element_text(size=16), axis.title=element_text(size=20), plot.title=element_text(size=24))
dev.off()
# SPAGHETTI PLOT #
# MJ DATA #
interaction.plot(x.factor=as.factor(WorkshopMJData$TimeInStudy),trace.factor=as.factor(WorkshopMJData$ID),response= WorkshopMJData$Frequency,fun=function(x)mean(x,na.rm=TRUE))
# SMK DATA #
interaction.plot(x.factor=as.factor(WorkshopSMKData$TimeInStudy),trace.factor=as.factor(WorkshopSMKData$ID),response= WorkshopSMKData$Cigarettes,fun=function(x)mean(x,na.rm=TRUE))
# VARIOGRAM #
library(joineR)
?plot
|
library(tidyverse)
# https://stat.ethz.ch/R-manual/R-devel/library/datasets/html/00Index.html
# ability.cov
# airmiles
# AirPassengers
airquality %>%
dplyr::group_by(Month) %>%
dplyr::summarise_all(mean, na.rm = TRUE)
tidy_anscombe = anscombe %>%
tibble::rowid_to_column("id") %>%
tidyr::pivot_longer(!id,
names_to = c("axis", "group"),
names_sep = 1L,
names_transform = list(group = as.integer)) %>%
tidyr::pivot_wider(c(id, group), names_from = axis) %>%
dplyr::select(!id) %>%
dplyr::arrange(group)
tidy_anscombe %>%
dplyr::group_by(group) %>%
dplyr::summarise(
x_mean = mean(x), x_sd = sd(x),
y_mean = mean(y), y_sd = sd(y),
cor_xy = cor(x, y)
)
tidy_anscombe %>%
tidyr::nest(data = !group) %>%
dplyr::mutate(data = purrr::map(data, ~{
summarise_all(.x, funs(mean, sd)) %>%
dplyr::mutate(cor = cor(.x$x, .x$y))
})) %>%
tidyr::unnest(data)
tidy_anscombe %>%
ggplot(aes(x, y)) +
geom_point(size = 3) +
stat_smooth(method = lm, se = FALSE, fullrange = TRUE) +
facet_wrap(vars(group), nrow = 1L)
tidy_anscombe %>%
ggplot(aes(x, y)) +
geom_point(size = 2) +
stat_smooth(method = lm, se = FALSE, fullrange = TRUE) +
stat_summary(fun.data = mean_se) +
facet_wrap(vars(group))
# attenu
# attitude
# austres
# beaver1 beaver2
# BJsales
# BOD
# cars
ChickWeight %>%
ggplot(aes(Time, weight, group = Chick)) +
geom_line(aes(colour = Diet)) +
theme_bw()
chickwts %>% dplyr::group_by(feed) %>% dplyr::summarise_all(funs(mean, sd, length))
chickwts %>%
as_tibble() %>%
ggplot(aes(weight)) +
geom_histogram(bins = 10) +
facet_wrap(vars(feed)) +
theme_bw()
# co2
# crimtab
# discoveries
DNase
esoph
# euro
# eurodist
# EuStockMarkets
(faithful %>%
ggplot(aes(eruptions, waiting)) +
geom_point()) %>%
ggExtra::ggMarginal(type = "histogram")
# freeny
# Formaldehyde
HairEyeColor
# Harman23.cor
# Harman74.cor
Indometh
# infert
InsectSprays %>% head()
iris
iris3
# islands
# JohnsonJohnson
# LakeHuron
# lh
# LifeCycleSavings
# Loblolly
# longley
# lynx
# morley
mtcars
# nhtemp
# Nile
# nottem
# npk
# occupationalStatus
# Orange
OrchardSprays %>% head()
OrchardSprays %>% tidyr::pivot_wider(names_from = colpos, values_from = c(decrease, treatment))
PlantGrowth %>% dplyr::group_by(group) %>% dplyr::summarise_all(mean)
# precip
# presidents
pressure
# Puromycin
quakes %>%
ggplot(aes(long, lat)) +
geom_point(aes(size = mag, colour = depth), alpha = 0.4) +
scale_size_continuous(name = "magnitude", range = c(1, 6), guide = guide_legend(reverse = TRUE)) +
scale_colour_viridis_c(option = "magma", direction = -1, guide = guide_colourbar(reverse = TRUE)) +
labs(title = "Quakes", x = "Longitude", y = "Latitude") +
theme_gray(base_size = 14) +
theme(
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "#8090a0"),
)
# randu %>% ggplot(aes(x, y, colour=z))+geom_point()
# rivers
# rock
Seatbelts
# sleep
# stackloss
tibble::tibble(
name = state.name,
abb = state.abb,
division = state.division,
region = state.region
) %>%
bind_cols(as_tibble(state.x77)) %>%
bind_cols(state.center)
# sunspot.month sunspot.year sunspots
# swiss
# Theoph %>% head()
Titanic
# ToothGrowth
# treering
# trees
UCBAdmissions
UKDriverDeaths
ldeaths # mdeaths + fdeaths
# UKgas
# USAccDeaths
# USArrests
# USJudgeRatings
USPersonalExpenditure %>% as.data.frame() %>% rownames_to_column("category") %>%
tidyr::pivot_longer(!category, "year", names_transform = list(year = as.integer), values_to = "dollar")
# uspop
va_deaths = VADeaths %>%
as.data.frame() %>%
tibble::rownames_to_column("class") %>%
as_tibble() %>%
tidyr::separate(class, c("lbound", "ubound"), "-", convert = TRUE) %>%
print() %>%
tidyr::pivot_longer(!matches("bound$"),
names_to = c("region", "sex"),
names_sep = " ",
values_to = "death_rate") %>%
dplyr::mutate(death_rate = death_rate * 0.1) %>%
print()
ggplot(va_deaths, aes(lbound, death_rate)) +
geom_line(aes(colour = sex, linetype = region), size = 1) +
theme_bw()
# volcano
# warpbreaks
# women
# WWWusage
WorldPhones %>%
as.data.frame() %>%
rownames_to_column("year") %>%
dplyr::mutate(year = as.integer(year)) %>%
tidyr::pivot_longer(!year, "country", values_to = "phones") %>%
ggplot(aes(year, phones)) +
geom_line(aes(colour = country)) +
theme_bw()
# #######1#########2#########3#########4#########5#########6#########7#########
# ggplot2
ggplot(diamonds, aes(carat, price)) +
geom_point(aes(colour = clarity), alpha = 0.5) +
facet_grid(vars(cut), vars(color)) +
scale_colour_viridis_d(
guide = guide_legend(reverse = TRUE, override.aes = list(alpha = 1))
) +
labs(title = "Diamonds") +
theme_gray(base_size = 14) +
theme(
panel.grid = element_blank(),
panel.background = element_rect(fill = "#aaaaaa"),
legend.key = element_rect(fill = "#aaaaaa"),
axis.text = element_blank(), axis.ticks = element_blank()
)
seals %>%
dplyr::mutate(v = sqrt(delta_lat ** 2 + delta_long ** 2)) %>%
ggplot(aes(x = long, y = lat, colour = v)) +
geom_segment(
aes(xend = long + delta_long, yend = lat + delta_lat),
arrow = arrow(length = unit(1.5, "mm")), size = 1
) +
scale_colour_viridis_c(option = "magma", end = 0.7, guide = FALSE) +
labs(title = "Seals", x = "Longitude", y = "Latitude") +
theme_bw(base_size = 14)
economics_long %>%
tidyr::pivot_wider(!value01, names_from = variable, values_from = value)
# #######1#########2#########3#########4#########5#########6#########7#########
# install.packages("AER")
library(AER)
data_AER = data(package = "AER")[["results"]] %>%
as_tibble() %>%
dplyr::select(!LibPath) %>%
print()
data_AER[["Item"]] %>% paste(collapse="\n") %>% cat("\n")
data(list = data_AER[["Item"]], package = "AER")
Affairs %>% as_tibble()
ArgentinaCPI
BankWages %>% as_tibble()
BenderlyZwick
BondYield
CASchools %>% as_tibble()
CPS1985 %>% as_tibble()
CPS1988 %>% as_tibble()
CPSSW04 %>% as_tibble()
CPSSW3 %>% as_tibble()
CPSSW8 %>% as_tibble()
CPSSW9204 %>% as_tibble()
CPSSW9298 %>% as_tibble()
CPSSWEducation %>% as_tibble()
CartelStability %>% as_tibble()
ChinaIncome
CigarettesB %>% rownames_to_column("state") %>% as_tibble()
CigarettesSW %>% as_tibble()
CollegeDistance %>% as_tibble()
ConsumerGood
CreditCard %>% as_tibble()
DJFranses
DJIA8012
DoctorVisits %>% as_tibble()
DutchAdvert
DutchSales
Electricity1955 %>% as_tibble()
Electricity1970 %>% rownames_to_column() %>% as_tibble()
EquationCitations %>% as_tibble()
Equipment %>% rownames_to_column("state") %>% as_tibble()
EuroEnergy %>% rownames_to_column("country") %>% as_tibble()
Fatalities %>% as_tibble()
Fertility %>% as_tibble()
Fertility2 %>% as_tibble()
FrozenJuice
GSOEP9402 %>% as_tibble()
GSS7402 %>% as_tibble()
GermanUnemployment
GoldSilver
GrowthDJ %>% as_tibble()
GrowthSW %>% rownames_to_column("country") %>% as_tibble()
Grunfeld %>% as_tibble()
Guns %>% as_tibble()
HMDA %>% as_tibble()
HealthInsurance %>% as_tibble()
HousePrices %>% as_tibble()
Journals %>% rownames_to_column("abbrev") %>% as_tibble()
KleinI
Longley
MASchools %>% as_tibble()
MSCISwitzerland
ManufactCosts
MarkDollar
MarkPound
Medicaid1986 %>% as_tibble()
Mortgage %>% as_tibble()
MotorCycles
MotorCycles2
Municipalities %>% as_tibble()
MurderRates %>% as_tibble()
NMES1988 %>% as_tibble()
NYSESW
NaturalGas %>% as_tibble()
OECDGas %>% as_tibble()
OECDGrowth %>% rownames_to_column("country") %>% as_tibble()
OlympicTV %>% rownames_to_column("city") %>% as_tibble()
OrangeCounty
PSID1976 %>% as_tibble()
PSID1982 %>% as_tibble()
PSID7682 %>% as_tibble()
Parade2005 %>% as_tibble()
PepperPrice
PhDPublications %>% as_tibble()
ProgramEffectiveness %>% as_tibble()
RecreationDemand %>% as_tibble()
ResumeNames %>% as_tibble()
SIC33 %>% as_tibble()
STAR %>% as_tibble()
ShipAccidents %>% as_tibble()
SmokeBan %>% as_tibble()
SportsCards %>% as_tibble()
StrikeDuration %>% as_tibble()
SwissLabor %>% as_tibble()
TeachingRatings %>% as_tibble()
TechChange
TradeCredit
TravelMode %>% as_tibble()
UKInflation
UKNonDurables
USAirlines %>% as_tibble()
USConsump1950
USConsump1979
USConsump1993
USCrudes %>% as_tibble()
USGasB
USGasG
USInvest
USMacroB
USMacroG
USMacroSW
USMacroSWM
USMacroSWQ
USMoney
USProdIndex
USSeatBelts %>% as_tibble()
USStocksSW
WeakInstrument %>% as_tibble()
# #######1#########2#########3#########4#########5#########6#########7#########
# install.packages("COUNT")
library(COUNT)
data_COUNT = data(package = "COUNT")[["results"]] %>%
as_tibble() %>%
dplyr::select(!LibPath) %>%
print()
data(list = data_COUNT[["Item"]], package = "COUNT")
affairs %>% as_tibble()
azcabgptca %>% as_tibble()
azdrg112 %>% as_tibble()
azpro %>% as_tibble()
azprocedure %>% as_tibble()
badhealth %>% as_tibble() %>%
ggplot() +
aes(numvisit, badh) +
geom_jitter(aes(color = age), height = 0.2, width = 0, alpha = 0.5) +
stat_smooth(formula = y ~ x, method = glm, method.args = list(family = binomial))
fasttrakg %>% as_tibble()
fishing %>% as_tibble()
lbw %>% as_tibble()
lbwgrp %>% as_tibble()
loomis %>% as_tibble()
mdvis %>% as_tibble() %>% plot()
medpar %>% as_tibble()
nuts %>% as_tibble()
rwm %>% as_tibble() %>% plot()
rwm1984 %>% as_tibble()
rwm5yr %>% as_tibble()
ships %>% as_tibble()
smoking %>% as_tibble()
titanic %>% as_tibble()
titanicgrp %>% as_tibble()
MASS::ships %>% as_tibble() %>%
ggplot() +
aes(log10(service), incidents) +
geom_point(aes(color = type), alpha = 0.6)
|
/rstats/datasets.R
|
permissive
|
heavywatal/scribble
|
R
| false | false | 9,479 |
r
|
library(tidyverse)
# https://stat.ethz.ch/R-manual/R-devel/library/datasets/html/00Index.html
# ability.cov
# airmiles
# AirPassengers
airquality %>%
dplyr::group_by(Month) %>%
dplyr::summarise_all(mean, na.rm = TRUE)
tidy_anscombe = anscombe %>%
tibble::rowid_to_column("id") %>%
tidyr::pivot_longer(!id,
names_to = c("axis", "group"),
names_sep = 1L,
names_transform = list(group = as.integer)) %>%
tidyr::pivot_wider(c(id, group), names_from = axis) %>%
dplyr::select(!id) %>%
dplyr::arrange(group)
tidy_anscombe %>%
dplyr::group_by(group) %>%
dplyr::summarise(
x_mean = mean(x), x_sd = sd(x),
y_mean = mean(y), y_sd = sd(y),
cor_xy = cor(x, y)
)
tidy_anscombe %>%
tidyr::nest(data = !group) %>%
dplyr::mutate(data = purrr::map(data, ~{
summarise_all(.x, funs(mean, sd)) %>%
dplyr::mutate(cor = cor(.x$x, .x$y))
})) %>%
tidyr::unnest(data)
tidy_anscombe %>%
ggplot(aes(x, y)) +
geom_point(size = 3) +
stat_smooth(method = lm, se = FALSE, fullrange = TRUE) +
facet_wrap(vars(group), nrow = 1L)
tidy_anscombe %>%
ggplot(aes(x, y)) +
geom_point(size = 2) +
stat_smooth(method = lm, se = FALSE, fullrange = TRUE) +
stat_summary(fun.data = mean_se) +
facet_wrap(vars(group))
# attenu
# attitude
# austres
# beaver1 beaver2
# BJsales
# BOD
# cars
ChickWeight %>%
ggplot(aes(Time, weight, group = Chick)) +
geom_line(aes(colour = Diet)) +
theme_bw()
chickwts %>% dplyr::group_by(feed) %>% dplyr::summarise_all(funs(mean, sd, length))
chickwts %>%
as_tibble() %>%
ggplot(aes(weight)) +
geom_histogram(bins = 10) +
facet_wrap(vars(feed)) +
theme_bw()
# co2
# crimtab
# discoveries
DNase
esoph
# euro
# eurodist
# EuStockMarkets
(faithful %>%
ggplot(aes(eruptions, waiting)) +
geom_point()) %>%
ggExtra::ggMarginal(type = "histogram")
# freeny
# Formaldehyde
HairEyeColor
# Harman23.cor
# Harman74.cor
Indometh
# infert
InsectSprays %>% head()
iris
iris3
# islands
# JohnsonJohnson
# LakeHuron
# lh
# LifeCycleSavings
# Loblolly
# longley
# lynx
# morley
mtcars
# nhtemp
# Nile
# nottem
# npk
# occupationalStatus
# Orange
OrchardSprays %>% head()
OrchardSprays %>% tidyr::pivot_wider(names_from = colpos, values_from = c(decrease, treatment))
PlantGrowth %>% dplyr::group_by(group) %>% dplyr::summarise_all(mean)
# precip
# presidents
pressure
# Puromycin
quakes %>%
ggplot(aes(long, lat)) +
geom_point(aes(size = mag, colour = depth), alpha = 0.4) +
scale_size_continuous(name = "magnitude", range = c(1, 6), guide = guide_legend(reverse = TRUE)) +
scale_colour_viridis_c(option = "magma", direction = -1, guide = guide_colourbar(reverse = TRUE)) +
labs(title = "Quakes", x = "Longitude", y = "Latitude") +
theme_gray(base_size = 14) +
theme(
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "#8090a0"),
)
# randu %>% ggplot(aes(x, y, colour=z))+geom_point()
# rivers
# rock
Seatbelts
# sleep
# stackloss
tibble::tibble(
name = state.name,
abb = state.abb,
division = state.division,
region = state.region
) %>%
bind_cols(as_tibble(state.x77)) %>%
bind_cols(state.center)
# sunspot.month sunspot.year sunspots
# swiss
# Theoph %>% head()
Titanic
# ToothGrowth
# treering
# trees
UCBAdmissions
UKDriverDeaths
ldeaths # mdeaths + fdeaths
# UKgas
# USAccDeaths
# USArrests
# USJudgeRatings
USPersonalExpenditure %>% as.data.frame() %>% rownames_to_column("category") %>%
tidyr::pivot_longer(!category, "year", names_transform = list(year = as.integer), values_to = "dollar")
# uspop
va_deaths = VADeaths %>%
as.data.frame() %>%
tibble::rownames_to_column("class") %>%
as_tibble() %>%
tidyr::separate(class, c("lbound", "ubound"), "-", convert = TRUE) %>%
print() %>%
tidyr::pivot_longer(!matches("bound$"),
names_to = c("region", "sex"),
names_sep = " ",
values_to = "death_rate") %>%
dplyr::mutate(death_rate = death_rate * 0.1) %>%
print()
ggplot(va_deaths, aes(lbound, death_rate)) +
geom_line(aes(colour = sex, linetype = region), size = 1) +
theme_bw()
# volcano
# warpbreaks
# women
# WWWusage
WorldPhones %>%
as.data.frame() %>%
rownames_to_column("year") %>%
dplyr::mutate(year = as.integer(year)) %>%
tidyr::pivot_longer(!year, "country", values_to = "phones") %>%
ggplot(aes(year, phones)) +
geom_line(aes(colour = country)) +
theme_bw()
# #######1#########2#########3#########4#########5#########6#########7#########
# ggplot2
ggplot(diamonds, aes(carat, price)) +
geom_point(aes(colour = clarity), alpha = 0.5) +
facet_grid(vars(cut), vars(color)) +
scale_colour_viridis_d(
guide = guide_legend(reverse = TRUE, override.aes = list(alpha = 1))
) +
labs(title = "Diamonds") +
theme_gray(base_size = 14) +
theme(
panel.grid = element_blank(),
panel.background = element_rect(fill = "#aaaaaa"),
legend.key = element_rect(fill = "#aaaaaa"),
axis.text = element_blank(), axis.ticks = element_blank()
)
seals %>%
dplyr::mutate(v = sqrt(delta_lat ** 2 + delta_long ** 2)) %>%
ggplot(aes(x = long, y = lat, colour = v)) +
geom_segment(
aes(xend = long + delta_long, yend = lat + delta_lat),
arrow = arrow(length = unit(1.5, "mm")), size = 1
) +
scale_colour_viridis_c(option = "magma", end = 0.7, guide = FALSE) +
labs(title = "Seals", x = "Longitude", y = "Latitude") +
theme_bw(base_size = 14)
economics_long %>%
tidyr::pivot_wider(!value01, names_from = variable, values_from = value)
# #######1#########2#########3#########4#########5#########6#########7#########
# install.packages("AER")
library(AER)
data_AER = data(package = "AER")[["results"]] %>%
as_tibble() %>%
dplyr::select(!LibPath) %>%
print()
data_AER[["Item"]] %>% paste(collapse="\n") %>% cat("\n")
data(list = data_AER[["Item"]], package = "AER")
Affairs %>% as_tibble()
ArgentinaCPI
BankWages %>% as_tibble()
BenderlyZwick
BondYield
CASchools %>% as_tibble()
CPS1985 %>% as_tibble()
CPS1988 %>% as_tibble()
CPSSW04 %>% as_tibble()
CPSSW3 %>% as_tibble()
CPSSW8 %>% as_tibble()
CPSSW9204 %>% as_tibble()
CPSSW9298 %>% as_tibble()
CPSSWEducation %>% as_tibble()
CartelStability %>% as_tibble()
ChinaIncome
CigarettesB %>% rownames_to_column("state") %>% as_tibble()
CigarettesSW %>% as_tibble()
CollegeDistance %>% as_tibble()
ConsumerGood
CreditCard %>% as_tibble()
DJFranses
DJIA8012
DoctorVisits %>% as_tibble()
DutchAdvert
DutchSales
Electricity1955 %>% as_tibble()
Electricity1970 %>% rownames_to_column() %>% as_tibble()
EquationCitations %>% as_tibble()
Equipment %>% rownames_to_column("state") %>% as_tibble()
EuroEnergy %>% rownames_to_column("country") %>% as_tibble()
Fatalities %>% as_tibble()
Fertility %>% as_tibble()
Fertility2 %>% as_tibble()
FrozenJuice
GSOEP9402 %>% as_tibble()
GSS7402 %>% as_tibble()
GermanUnemployment
GoldSilver
GrowthDJ %>% as_tibble()
GrowthSW %>% rownames_to_column("country") %>% as_tibble()
Grunfeld %>% as_tibble()
Guns %>% as_tibble()
HMDA %>% as_tibble()
HealthInsurance %>% as_tibble()
HousePrices %>% as_tibble()
Journals %>% rownames_to_column("abbrev") %>% as_tibble()
KleinI
Longley
MASchools %>% as_tibble()
MSCISwitzerland
ManufactCosts
MarkDollar
MarkPound
Medicaid1986 %>% as_tibble()
Mortgage %>% as_tibble()
MotorCycles
MotorCycles2
Municipalities %>% as_tibble()
MurderRates %>% as_tibble()
NMES1988 %>% as_tibble()
NYSESW
NaturalGas %>% as_tibble()
OECDGas %>% as_tibble()
OECDGrowth %>% rownames_to_column("country") %>% as_tibble()
OlympicTV %>% rownames_to_column("city") %>% as_tibble()
OrangeCounty
PSID1976 %>% as_tibble()
PSID1982 %>% as_tibble()
PSID7682 %>% as_tibble()
Parade2005 %>% as_tibble()
PepperPrice
PhDPublications %>% as_tibble()
ProgramEffectiveness %>% as_tibble()
RecreationDemand %>% as_tibble()
ResumeNames %>% as_tibble()
SIC33 %>% as_tibble()
STAR %>% as_tibble()
ShipAccidents %>% as_tibble()
SmokeBan %>% as_tibble()
SportsCards %>% as_tibble()
StrikeDuration %>% as_tibble()
SwissLabor %>% as_tibble()
TeachingRatings %>% as_tibble()
TechChange
TradeCredit
TravelMode %>% as_tibble()
UKInflation
UKNonDurables
USAirlines %>% as_tibble()
USConsump1950
USConsump1979
USConsump1993
USCrudes %>% as_tibble()
USGasB
USGasG
USInvest
USMacroB
USMacroG
USMacroSW
USMacroSWM
USMacroSWQ
USMoney
USProdIndex
USSeatBelts %>% as_tibble()
USStocksSW
WeakInstrument %>% as_tibble()
# #######1#########2#########3#########4#########5#########6#########7#########
# install.packages("COUNT")
library(COUNT)
data_COUNT = data(package = "COUNT")[["results"]] %>%
as_tibble() %>%
dplyr::select(!LibPath) %>%
print()
data(list = data_COUNT[["Item"]], package = "COUNT")
affairs %>% as_tibble()
azcabgptca %>% as_tibble()
azdrg112 %>% as_tibble()
azpro %>% as_tibble()
azprocedure %>% as_tibble()
badhealth %>% as_tibble() %>%
ggplot() +
aes(numvisit, badh) +
geom_jitter(aes(color = age), height = 0.2, width = 0, alpha = 0.5) +
stat_smooth(formula = y ~ x, method = glm, method.args = list(family = binomial))
fasttrakg %>% as_tibble()
fishing %>% as_tibble()
lbw %>% as_tibble()
lbwgrp %>% as_tibble()
loomis %>% as_tibble()
mdvis %>% as_tibble() %>% plot()
medpar %>% as_tibble()
nuts %>% as_tibble()
rwm %>% as_tibble() %>% plot()
rwm1984 %>% as_tibble()
rwm5yr %>% as_tibble()
ships %>% as_tibble()
smoking %>% as_tibble()
titanic %>% as_tibble()
titanicgrp %>% as_tibble()
MASS::ships %>% as_tibble() %>%
ggplot() +
aes(log10(service), incidents) +
geom_point(aes(color = type), alpha = 0.6)
|
createFreqTable = function() {
CountsUM = as.data.frame(xtabs(~ Area + Organisation.Type + Year + Classified + Description, data=UM))
SortedCounts = CountsUM[order(-CountsUM$Freq), ]
names(SortedCounts)[names(SortedCounts)=="Organisation.Type"] <- "Type"
names(SortedCounts)[names(SortedCounts)=="Classified"] <- "Class."
print(SortedCounts[1:30,], row.name=FALSE)
}
|
/R/createFreqTable.R
|
no_license
|
Anjs04/UM
|
R
| false | false | 386 |
r
|
createFreqTable = function() {
CountsUM = as.data.frame(xtabs(~ Area + Organisation.Type + Year + Classified + Description, data=UM))
SortedCounts = CountsUM[order(-CountsUM$Freq), ]
names(SortedCounts)[names(SortedCounts)=="Organisation.Type"] <- "Type"
names(SortedCounts)[names(SortedCounts)=="Classified"] <- "Class."
print(SortedCounts[1:30,], row.name=FALSE)
}
|
################################################################################
# Accompanying code for the paper:
# Root traits influence storm-water performance in a green roof microcosm
#
# Authorship:
# Garland Xie (1)
# Jeremy Lundholm (2)
#
# Corresponding for this script:
# Garland Xie (1)
#
# Institutional affiliations:
# (1) Department of Biological Sciences
# University of Toronto Scarborough,
# 1265 Military Trail, Toronto, ON, M1C 1A4, Canada
# email: garlandxie@gmail.com
# (2) Department of Biology
# Saint Mary's University
# 923 Robie St., Halifax, NS, B3H 3C3, Canada
#
# Purpose of this R script: to conduct statistical analyses on the relationship
# between root traits and ecosystem functions
# libraries --------------------------------------------------------------------
library(here)
library(dplyr)
library(ggplot2)
library(tidyr)
library(car)
library(broom)
library(ggstatsplot)
library(flextable)
library(lme4)
# import -----------------------------------------------------------------------
traits_EF <- readRDS(
here("data/final",
"traits_EF_clean_df.rds")
)
# check packaging --------------------------------------------------------------
str(traits_EF)
# split data set: wet ----------------------------------------------------------
EF_WW <- traits_EF %>%
filter(treatment == "WW") %>%
drop_na() %>%
ungroup() %>%
as.data.frame()
# split data set: dry -----------------------------------------------------------
EF_WD <- traits_EF %>%
filter(treatment == "WD") %>%
drop_na() %>%
ungroup() %>%
as.data.frame()
# linear mixed effect models: water capture (dry treatment) --------------------
lmm_total_ret_WD <- lmer(
total_water_capture ~
# fixed vars
scale(srl) +
scale(mean_radius_mm) +
scale(rld) +
scale(rmf) +
scale(max_root_depth_cm) +
# covariate var
scale(plant_size) +
#bloc random effect
(1|block),
data = EF_WD)
# check the variance component of the random effect
# five levels for block effect
summary(lmm_total_ret_WD)
# result: singularity and zero variance component
# drop the block variable and opt for a more simple model
# linear mixed effect models: water capture (wet treatment) --------------------
lmm_total_ret_WW <- lmer(
total_water_capture ~
# fixed vars
scale(srl) +
scale(mean_radius_mm) +
scale(rld) +
scale(rmf) +
scale(max_root_depth_cm) +
# covariate var
scale(plant_size) +
#bloc random effect
(1|block),
data = EF_WW)
# check the variance component of the random effect
# five levels for block effect
summary(lmm_total_ret_WW)
# multiple regression: water capture for wet treatment -------------------------
# model fitting
lm_total_ret_WW <- lm(
formula =
# response var
total_water_capture ~
# fixed vars
scale(srl) +
scale(mean_radius_mm) +
scale(rld) +
scale(rmf) +
scale(max_root_depth_cm) +
# covariate var
scale(plant_size),
data = EF_WW)
# get coefficients, p-values, R-squared values, degrees of freedom
summary(lm_total_ret_WW)
# check for model diagnostics
plot(lm_total_ret_WW, c(1)) # check for linearity
plot(lm_total_ret_WW, c(2)) # check for normality
plot(lm_total_ret_WW, c(3)) # check for homogeneity of variance
plot(lm_total_ret_WW, c(5)) # check for influential outliers
# check for multicollinearity
vif(lm_total_ret_WW)
# multiple regression: water capture for dry treatment -------------------------
# model fitting
lm_total_ret_WD <- lm(
formula =
# response var
total_water_capture ~
# fixed vars
scale(srl) +
scale(mean_radius_mm) +
scale(rld) +
scale(rmf) +
scale(max_root_depth_cm) +
# covariate var
scale(plant_size),
data = EF_WD)
# get coefficients, p-values, R-squared values, degrees of freedom
summary(lm_total_ret_WD)
# check for model diagnostics
plot(lm_total_ret_WD, c(1)) # check for linearity
plot(lm_total_ret_WD, c(2)) # check for normality
plot(lm_total_ret_WD, c(3)) # check for homogeneity of variance
plot(lm_total_ret_WD, c(5)) # check for influential outliers
# check for multicollinearity
vif(lm_total_ret_WD)
# save to disk -----------------------------------------------------------------
ggsave(plot = plot_WD,
here(
"output/figures/main",
"fig1-RET_WD.png"
),
width = 7.5, height = 5.4,
device = "png")
ggsave(plot = plot_WW,
here(
"output/figures/main",
"fig1-RET_WW.png"
),
width = 7.5, height = 5.4,
device = "png")
|
/src/02-03-LMM_tot_water_ret.R
|
no_license
|
garlandxie/MS_MSc_Roots
|
R
| false | false | 4,708 |
r
|
################################################################################
# Accompanying code for the paper:
# Root traits influence storm-water performance in a green roof microcosm
#
# Authorship:
# Garland Xie (1)
# Jeremy Lundholm (2)
#
# Corresponding for this script:
# Garland Xie (1)
#
# Institutional affiliations:
# (1) Department of Biological Sciences
# University of Toronto Scarborough,
# 1265 Military Trail, Toronto, ON, M1C 1A4, Canada
# email: garlandxie@gmail.com
# (2) Department of Biology
# Saint Mary's University
# 923 Robie St., Halifax, NS, B3H 3C3, Canada
#
# Purpose of this R script: to conduct statistical analyses on the relationship
# between root traits and ecosystem functions
# libraries --------------------------------------------------------------------
library(here)
library(dplyr)
library(ggplot2)
library(tidyr)
library(car)
library(broom)
library(ggstatsplot)
library(flextable)
library(lme4)
# import -----------------------------------------------------------------------
traits_EF <- readRDS(
here("data/final",
"traits_EF_clean_df.rds")
)
# check packaging --------------------------------------------------------------
str(traits_EF)
# split data set: wet ----------------------------------------------------------
EF_WW <- traits_EF %>%
filter(treatment == "WW") %>%
drop_na() %>%
ungroup() %>%
as.data.frame()
# split data set: dry -----------------------------------------------------------
EF_WD <- traits_EF %>%
filter(treatment == "WD") %>%
drop_na() %>%
ungroup() %>%
as.data.frame()
# linear mixed effect models: water capture (dry treatment) --------------------
lmm_total_ret_WD <- lmer(
total_water_capture ~
# fixed vars
scale(srl) +
scale(mean_radius_mm) +
scale(rld) +
scale(rmf) +
scale(max_root_depth_cm) +
# covariate var
scale(plant_size) +
#bloc random effect
(1|block),
data = EF_WD)
# check the variance component of the random effect
# five levels for block effect
summary(lmm_total_ret_WD)
# result: singularity and zero variance component
# drop the block variable and opt for a more simple model
# linear mixed effect models: water capture (wet treatment) --------------------
lmm_total_ret_WW <- lmer(
total_water_capture ~
# fixed vars
scale(srl) +
scale(mean_radius_mm) +
scale(rld) +
scale(rmf) +
scale(max_root_depth_cm) +
# covariate var
scale(plant_size) +
#bloc random effect
(1|block),
data = EF_WW)
# check the variance component of the random effect
# five levels for block effect
summary(lmm_total_ret_WW)
# multiple regression: water capture for wet treatment -------------------------
# model fitting
lm_total_ret_WW <- lm(
formula =
# response var
total_water_capture ~
# fixed vars
scale(srl) +
scale(mean_radius_mm) +
scale(rld) +
scale(rmf) +
scale(max_root_depth_cm) +
# covariate var
scale(plant_size),
data = EF_WW)
# get coefficients, p-values, R-squared values, degrees of freedom
summary(lm_total_ret_WW)
# check for model diagnostics
plot(lm_total_ret_WW, c(1)) # check for linearity
plot(lm_total_ret_WW, c(2)) # check for normality
plot(lm_total_ret_WW, c(3)) # check for homogeneity of variance
plot(lm_total_ret_WW, c(5)) # check for influential outliers
# check for multicollinearity
vif(lm_total_ret_WW)
# multiple regression: water capture for dry treatment -------------------------
# model fitting
lm_total_ret_WD <- lm(
formula =
# response var
total_water_capture ~
# fixed vars
scale(srl) +
scale(mean_radius_mm) +
scale(rld) +
scale(rmf) +
scale(max_root_depth_cm) +
# covariate var
scale(plant_size),
data = EF_WD)
# get coefficients, p-values, R-squared values, degrees of freedom
summary(lm_total_ret_WD)
# check for model diagnostics
plot(lm_total_ret_WD, c(1)) # check for linearity
plot(lm_total_ret_WD, c(2)) # check for normality
plot(lm_total_ret_WD, c(3)) # check for homogeneity of variance
plot(lm_total_ret_WD, c(5)) # check for influential outliers
# check for multicollinearity
vif(lm_total_ret_WD)
# save to disk -----------------------------------------------------------------
ggsave(plot = plot_WD,
here(
"output/figures/main",
"fig1-RET_WD.png"
),
width = 7.5, height = 5.4,
device = "png")
ggsave(plot = plot_WW,
here(
"output/figures/main",
"fig1-RET_WW.png"
),
width = 7.5, height = 5.4,
device = "png")
|
t <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
t$Date <- as.Date(t$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
t <- t[complete.cases(t)]
## Combine Date and Time column
dateTime <- paste(t$Date, t$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
t <- t[ ,!(names(t) %in% c("Date","Time"))]
## Add DateTime column
t <- cbind(dateTime, t)
## Format dateTime Column
t$dateTime <- as.POSIXct(dateTime)
## Create Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(t, {
plot(Global_active_power~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~dateTime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~dateTime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
|
/plot4.R
|
no_license
|
aryangupta07/ExData_Plotting1
|
R
| false | false | 1,514 |
r
|
t <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
t$Date <- as.Date(t$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
t <- t[complete.cases(t)]
## Combine Date and Time column
dateTime <- paste(t$Date, t$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
t <- t[ ,!(names(t) %in% c("Date","Time"))]
## Add DateTime column
t <- cbind(dateTime, t)
## Format dateTime Column
t$dateTime <- as.POSIXct(dateTime)
## Create Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(t, {
plot(Global_active_power~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~dateTime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~dateTime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
|
library(data.table)
library(stringr)
library(data.table)
library(igraph)
library(RColorBrewer)
###############
# Set options #
###############
args=(commandArgs(TRUE))
print(args)
#########################
# Significant SNP-pairs #
#########################
Pvalue_SNPs=fread(paste(args[1], "/pvalues/sign_SNPpairs.txt", sep=""))
data_SNP=Pvalue_SNPs[,c(1,2,4)]
data_SNP=unique(data_SNP)
head(Pvalue_SNPs)
##########################
# Significant gene-pairs #
##########################
Pvalue_genes=fread(paste(args[1], "/pvalues/sign_GenePairs.txt", sep=""))
genePair=Pvalue_genes[,1]
genePair=str_split_fixed(genePair$genePairs_names, " ", 2)
data=data.frame(genePair, Pvalue_genes[,2])
head(data)
########################################################
########
# SNP #
########
node1=data.frame(data_SNP[,1])
node2=data.frame(data_SNP[,2])
dim(node1)
colnames(node1)=colnames(node2)
nodes_snp=rbind(node1,node2)
nodes_snp=data.frame(nodes_snp[!duplicated(nodes_snp),])
dim(nodes_snp)
data_SNP$width=-log10(as.numeric(data_SNP$pvalue))
links_snp=data_SNP[,c(1,2,4)]
colnames(nodes_snp)=c("id")
colnames(links_snp)=c("id1","id2","width")
net_SNP <- graph_from_data_frame(d=links_snp, vertices=nodes_snp)
#Color cluster in Gene-based exhaustive and visualize them in SNP based exhaustive
comp=components(net_SNP, mode = c("weak", "strong"))
nodes_snp=data.frame(comp$membership)
nodes_snp$gene=rownames(nodes_snp)
colnames(nodes_snp)=c("group","id")
nodes_snp=data.frame(nodes_snp$id, nodes_snp$group)
colnames(nodes_snp)=c("id","group")
########
# Gene #
########
node1=data.frame(data[,1])
node2=data.frame(data[,2])
colnames(node1)=colnames(node2)
nodes_gene=rbind(node1,node2)
nodes_gene=data.frame(nodes_gene[!duplicated(nodes_gene),])
data$width=-log10(data$MinP)
links_gene=data[,c(1,2,4)]
colnames(nodes_gene)=c("id")
colnames(links_gene)=c("id1","id2","width")
net_gene <- graph_from_data_frame(d=links_gene, vertices=nodes_gene)
#Color cluster in Gene-based exhaustive and visualize them in SNP based exhaustive
#Create community for the gene-based network
comp=components(net_gene, mode = c("weak", "strong"))
nodes_gene=data.frame(comp$membership)
nodes_gene$gene=rownames(nodes_gene)
colnames(nodes_gene)=c("group","id")
nodes_gene=data.frame(nodes_gene$id, nodes_gene$group)
colnames(nodes_gene)=c("id","group")
#####################
# Visualization SNP #
#####################
V(net_SNP)$size <- 2
V(net_SNP)$frame.color <- "white"
V(net_SNP)$color <- nodes_snp$group
#V(net_SNP)$label <- ""
E(net_SNP)$arrow.mode <- 0
E(net_SNP)$width <- links_snp$width*0.7
E(net_SNP)$label <- ""
plot(net_SNP, main=paste("Eqtl", sep=""), cex.main=50, margin=c(0,0,0,0))
######################
# Visualization Gene #
######################
#Parameters
V(net_gene)$size <- 2
V(net_gene)$frame.color <- "white"
V(net_gene)$color <- nodes_gene$group
#V(net_gene)$label <- ""
E(net_gene)$arrow.mode <- 0
E(net_gene)$width <- links_gene$width*0.7
E(net_gene)$label <- ""
plot(net_gene, main=paste("Eqtl", sep=""), cex.main=50, margin=c(0,0,0,0))
############
# Analysis #
############
#Largest component
comp_snp=components(net_SNP, mode = c("weak", "strong"))
max(comp_snp$csize)
comp_gene=components(net_gene, mode = c("weak", "strong"))
max(comp_gene$csize)
#degree
degSNP=degree(net_SNP,mode="all")
averageDegreeSNP=mean(degSNP)
medDegreeSNP=median(degSNP)
averageDegreeSNP
medDegreeSNP
degGene=degree(net_gene,mode="all")
averageDegreeGene=mean(degGene)
medDegreeGene=median(degGene)
averageDegreeGene
medDegreeGene
|
/pipeline/network_visualization.R
|
permissive
|
DianeDuroux/BiologicalEpistasis
|
R
| false | false | 3,546 |
r
|
library(data.table)
library(stringr)
library(data.table)
library(igraph)
library(RColorBrewer)
###############
# Set options #
###############
args=(commandArgs(TRUE))
print(args)
#########################
# Significant SNP-pairs #
#########################
Pvalue_SNPs=fread(paste(args[1], "/pvalues/sign_SNPpairs.txt", sep=""))
data_SNP=Pvalue_SNPs[,c(1,2,4)]
data_SNP=unique(data_SNP)
head(Pvalue_SNPs)
##########################
# Significant gene-pairs #
##########################
Pvalue_genes=fread(paste(args[1], "/pvalues/sign_GenePairs.txt", sep=""))
genePair=Pvalue_genes[,1]
genePair=str_split_fixed(genePair$genePairs_names, " ", 2)
data=data.frame(genePair, Pvalue_genes[,2])
head(data)
########################################################
########
# SNP #
########
node1=data.frame(data_SNP[,1])
node2=data.frame(data_SNP[,2])
dim(node1)
colnames(node1)=colnames(node2)
nodes_snp=rbind(node1,node2)
nodes_snp=data.frame(nodes_snp[!duplicated(nodes_snp),])
dim(nodes_snp)
data_SNP$width=-log10(as.numeric(data_SNP$pvalue))
links_snp=data_SNP[,c(1,2,4)]
colnames(nodes_snp)=c("id")
colnames(links_snp)=c("id1","id2","width")
net_SNP <- graph_from_data_frame(d=links_snp, vertices=nodes_snp)
#Color cluster in Gene-based exhaustive and visualize them in SNP based exhaustive
comp=components(net_SNP, mode = c("weak", "strong"))
nodes_snp=data.frame(comp$membership)
nodes_snp$gene=rownames(nodes_snp)
colnames(nodes_snp)=c("group","id")
nodes_snp=data.frame(nodes_snp$id, nodes_snp$group)
colnames(nodes_snp)=c("id","group")
########
# Gene #
########
node1=data.frame(data[,1])
node2=data.frame(data[,2])
colnames(node1)=colnames(node2)
nodes_gene=rbind(node1,node2)
nodes_gene=data.frame(nodes_gene[!duplicated(nodes_gene),])
data$width=-log10(data$MinP)
links_gene=data[,c(1,2,4)]
colnames(nodes_gene)=c("id")
colnames(links_gene)=c("id1","id2","width")
net_gene <- graph_from_data_frame(d=links_gene, vertices=nodes_gene)
#Color cluster in Gene-based exhaustive and visualize them in SNP based exhaustive
#Create community for the gene-based network
comp=components(net_gene, mode = c("weak", "strong"))
nodes_gene=data.frame(comp$membership)
nodes_gene$gene=rownames(nodes_gene)
colnames(nodes_gene)=c("group","id")
nodes_gene=data.frame(nodes_gene$id, nodes_gene$group)
colnames(nodes_gene)=c("id","group")
#####################
# Visualization SNP #
#####################
V(net_SNP)$size <- 2
V(net_SNP)$frame.color <- "white"
V(net_SNP)$color <- nodes_snp$group
#V(net_SNP)$label <- ""
E(net_SNP)$arrow.mode <- 0
E(net_SNP)$width <- links_snp$width*0.7
E(net_SNP)$label <- ""
plot(net_SNP, main=paste("Eqtl", sep=""), cex.main=50, margin=c(0,0,0,0))
######################
# Visualization Gene #
######################
#Parameters
V(net_gene)$size <- 2
V(net_gene)$frame.color <- "white"
V(net_gene)$color <- nodes_gene$group
#V(net_gene)$label <- ""
E(net_gene)$arrow.mode <- 0
E(net_gene)$width <- links_gene$width*0.7
E(net_gene)$label <- ""
plot(net_gene, main=paste("Eqtl", sep=""), cex.main=50, margin=c(0,0,0,0))
############
# Analysis #
############
#Largest component
comp_snp=components(net_SNP, mode = c("weak", "strong"))
max(comp_snp$csize)
comp_gene=components(net_gene, mode = c("weak", "strong"))
max(comp_gene$csize)
#degree
degSNP=degree(net_SNP,mode="all")
averageDegreeSNP=mean(degSNP)
medDegreeSNP=median(degSNP)
averageDegreeSNP
medDegreeSNP
degGene=degree(net_gene,mode="all")
averageDegreeGene=mean(degGene)
medDegreeGene=median(degGene)
averageDegreeGene
medDegreeGene
|
## Put comments here that give an overall description of what your
## functions do
## There are two functions here.
## 1. The first function, makeCacheMatrix, creates a special kind of matrix(CacheMatrix). The one which can cache its own inverse (once computed (AND PROVIDED)). This kind of a matrix
## also contains helper functions to set the data (YES, you can dynamically change its data. At which point the cached inverse is invalidated), get the data, set the inverse and get the inverse.
## 2. The Second function, cacheSolve, takes as input, a CacheMatrix. And sees if its inverse is already computed, if so it just returns the inverse. On the other hand, if the inverse
## is not there, it computes the inverse, and does two additional things.
## a. Stores the inverse back in the CacheMatrix
## b. Returns the inverse to the caller
## Write a short comment describing this function
## This is the function, 1, described above. It takes as input a standard matrix, and returns the interface (i.e., set of functions) that can be invoked on it. These being
## set(x) -> To reset the data content of the CachedMatrix, to the one that is provided. At which point the inverse is also invalidated(set to NULL).
## get() -> returns the dataContent of the CacheMatrix Object
## getInv() -> returns the STORED inverse. Note that the inverse is NOT computed. It just returns whatever is stored. And this would be null, till the time someone calls the setInv with a non
## non NULL input. This function should ONLY be called by the cacheSolve function described below. End users should NOT be calling this to get inverse.
## setInv(inv) -> Stores the given inverse, within itself
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set <- function(y){
inv<<-NULL
x<<-y
}
get <- function(){
x
}
setInv <- function(computedInverse){
inv <<- computedInverse
}
getInv <- function(){
inv
}
# return the list of functions created. This could be called as the Interface of a CacheMatrix.
list(set=set, get=get, setInv=setInv, getInv= getInv)
}
## Write a short comment describing this function
## This is the function, 2, described at the top. This is the function called for getting the computed Inverse, and end users should be calling this.
## This takes a CacheMatrix as input. And looks up its inverse. And sees if its inverse is already computed, if so it just returns the inverse. On the other hand, if the inverse
## is not computed, it computes the inverse, and does two additional things.
## a. Stores the inverse back in the Cache Matrix
## b. Returns the inverse to the caller
cacheSolve <- function(x, ...) {
## Note that, here x is a CacheMatrix!
## Return a matrix that is the inverse of 'x'
inv<-x$getInv()
if (!is.null(inv)){
message("Getting Inverse from Cache")
return(inv)
}
mat<-x$get()
message("Inverse Not in Cache. Computing the Inverse")
inv<-solve(mat, ...)
## It is assumed that the Matrix provided is always INVERTIBLE
x$setInv(inv)
inv
}
## I just used the below function to test the code. You could uncomment this and run it, and look at the messages. This also includes the test case where the contents of a CacheMatrix are reset.
# test <- function(){
# m1<-matrix(c(1,2,3,0,1,4,5,6,0), nrow = 3, byrow = T)
# m2<-solve(m1)
#
# cm1<-makeCacheMatrix(m1)
# cacheSolve(cm1)
# cacheSolve(cm1)
#
# cm2<-makeCacheMatrix(m2)
# cacheSolve(cm1)
# cacheSolve(cm2)
# cacheSolve(cm1)
# cacheSolve(cm2)
# cacheSolve(cm1)
# cacheSolve(cm2)
# #This should invalidate the inverse
# cm1$set(m2)
# cacheSolve(cm1)
# cacheSolve(cm1)
#
# }
|
/cachematrix.R
|
no_license
|
gunapemmaraju/ProgrammingAssignment2
|
R
| false | false | 3,867 |
r
|
## Put comments here that give an overall description of what your
## functions do
## There are two functions here.
## 1. The first function, makeCacheMatrix, creates a special kind of matrix(CacheMatrix). The one which can cache its own inverse (once computed (AND PROVIDED)). This kind of a matrix
## also contains helper functions to set the data (YES, you can dynamically change its data. At which point the cached inverse is invalidated), get the data, set the inverse and get the inverse.
## 2. The Second function, cacheSolve, takes as input, a CacheMatrix. And sees if its inverse is already computed, if so it just returns the inverse. On the other hand, if the inverse
## is not there, it computes the inverse, and does two additional things.
## a. Stores the inverse back in the CacheMatrix
## b. Returns the inverse to the caller
## Write a short comment describing this function
## This is the function, 1, described above. It takes as input a standard matrix, and returns the interface (i.e., set of functions) that can be invoked on it. These being
## set(x) -> To reset the data content of the CachedMatrix, to the one that is provided. At which point the inverse is also invalidated(set to NULL).
## get() -> returns the dataContent of the CacheMatrix Object
## getInv() -> returns the STORED inverse. Note that the inverse is NOT computed. It just returns whatever is stored. And this would be null, till the time someone calls the setInv with a non
## non NULL input. This function should ONLY be called by the cacheSolve function described below. End users should NOT be calling this to get inverse.
## setInv(inv) -> Stores the given inverse, within itself
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set <- function(y){
inv<<-NULL
x<<-y
}
get <- function(){
x
}
setInv <- function(computedInverse){
inv <<- computedInverse
}
getInv <- function(){
inv
}
# return the list of functions created. This could be called as the Interface of a CacheMatrix.
list(set=set, get=get, setInv=setInv, getInv= getInv)
}
## Write a short comment describing this function
## This is the function, 2, described at the top. This is the function called for getting the computed Inverse, and end users should be calling this.
## This takes a CacheMatrix as input. And looks up its inverse. And sees if its inverse is already computed, if so it just returns the inverse. On the other hand, if the inverse
## is not computed, it computes the inverse, and does two additional things.
## a. Stores the inverse back in the Cache Matrix
## b. Returns the inverse to the caller
cacheSolve <- function(x, ...) {
## Note that, here x is a CacheMatrix!
## Return a matrix that is the inverse of 'x'
inv<-x$getInv()
if (!is.null(inv)){
message("Getting Inverse from Cache")
return(inv)
}
mat<-x$get()
message("Inverse Not in Cache. Computing the Inverse")
inv<-solve(mat, ...)
## It is assumed that the Matrix provided is always INVERTIBLE
x$setInv(inv)
inv
}
## I just used the below function to test the code. You could uncomment this and run it, and look at the messages. This also includes the test case where the contents of a CacheMatrix are reset.
# test <- function(){
# m1<-matrix(c(1,2,3,0,1,4,5,6,0), nrow = 3, byrow = T)
# m2<-solve(m1)
#
# cm1<-makeCacheMatrix(m1)
# cacheSolve(cm1)
# cacheSolve(cm1)
#
# cm2<-makeCacheMatrix(m2)
# cacheSolve(cm1)
# cacheSolve(cm2)
# cacheSolve(cm1)
# cacheSolve(cm2)
# cacheSolve(cm1)
# cacheSolve(cm2)
# #This should invalidate the inverse
# cm1$set(m2)
# cacheSolve(cm1)
# cacheSolve(cm1)
#
# }
|
defineModule(sim, list(
name = "stateVars",
description = "keep track of stat transitions affecting multiple moduls; also classification",
keywords = c("insert key words here"),
authors = c(person(c("First", "Middle"), "Last", email="email@example.com", role=c("aut", "cre"))),
childModules = character(),
version = numeric_version("1.1.1.9006"),
spatialExtent = raster::extent(rep(NA_real_, 4)),
timeframe = as.POSIXlt(c(NA, NA)),
timeunit = "year",
citation = list("citation.bib"),
documentation = list("README.txt", "stateVars.Rmd"),
reqdPkgs = list("raster", "data.table", "RColorBrewer"),
parameters = rbind(
#defineParameter("paramName", "paramClass", value, min, max, "parameter description")),
defineParameter("startTime", "numeric", 0, 0, NA, "Start time"),
defineParameter("returnInterval", "numeric", 1, 0, NA, "waddya think?"),
defineParameter(".plotInitialTime", "numeric", 0, NA, NA, desc="This describes the simulation time at which the first plot event should occur"),
defineParameter(".plotInterval", "numeric", 1, NA, NA, desc="This describes the simulation time at which the first plot event should occur"),
defineParameter("persistTimes", "numeric", c(40,30,30), c(0,0,0), c(100,100,100), desc= "For how many years do disturbances effect indicators?")
),
inputObjects = bind_rows(
expectsInput(objectName = "disturbanceMap", objectClass = "RasterLayer", desc = "state of burned or harvestd cells"),
expectsInput(objectName = "spreadState", objectClass = "data.table", desc = "table of active initial spread cells"),
expectsInput(objectName = "ageMap", objectClass = "RasterLayer", desc = "time since last disturbance"),
expectsInput(objectName = "cutCells", objectClass = "numeric", desc = "vector of recently cut cells")
),
outputObjects = bind_rows(
createsOutput(objectName = "heightMap", objectClass = "RasterLayer", desc = "crude height from age model"),
createsOutput(objectName = "disturbanceMap", objectClass = "RasterLayer", desc = "update for caribou adjacency"),
createsOutput(objectName = "dtMap", objectClass = "RasterLayer", desc = "timer for disturbances")
)
))
doEvent.stateVars = function(sim, eventTime, eventType) {
switch (eventType,
init = {
sim <- Init(sim)
sim <- scheduleEvent(sim, P(sim)$startTime,"stateVars","update")
sim <- scheduleEvent(sim, P(sim)$.plotInitialTime,"stateVars","plot")
},
update = {
Update(sim)
sim <- scheduleEvent(sim, time(sim) + P(sim)$returnInterval, "stateVars", "update")
},
plot = {
Plot(sim$disturbanceMap)
sim <- scheduleEvent(sim, time(sim) + P(sim)$.plotInterval, "stateVars", "plot")
},
warning(paste("Undefined event type: '", events(sim)[1, "eventType", with = FALSE],
"' in module '", events(sim)[1, "moduleName", with = FALSE], "'", sep = ""))
)
return(invisible(sim))
}
heightFromAge <-function(sim){
x <- sim$ageMap[]/ (sim$ageMap[] + 100)
sim$heightMap[] <- 80*x
return(invisible(sim))
}
Init <- function(sim) {
#use disturbanceMap as template to copy.
#browser()
sim$disturbanceMap <- raster::raster(sim$ageMap)
sim$disturbanceMap[] <- sim$ageMap[] * 0
sim$dtMap <- raster::raster(sim$ageMap)
sim$dtMap[] <- sim$ageMap[] * 0
#raster::setValues(sim$harvestStateMap, values=0) The reason CS people fucking HATE R
#is because of random non-orthogonality like this, and the documentation that tries to be
#like UNIX, but fails. We shoulda gone with Python.
setColors(sim$disturbanceMap,n=4) <- c("grey80", "red", "blue", "yellow")
#0 = none
#1 = burn
#2 = cut
#3 = adjacent to cut
sim$heightMap <- raster::raster(sim$disturbanceMap)
sim$heightMap[] <- sim$disturbanceMap[] * 0
setColors(sim$heightMap, n=10) <- colorRampPalette(c("white","green4"))(10)
sim<-heightFromAge(sim)
return(invisible(sim))
}
Update <- function(sim){
#browser()
#idx <- which(sim$harvestStateMap[] > 0)
#vals <- sim$harvestStateMap[idx]
#sim$harvestStateMap[idx] <- vals - 1 #let't not go all negative
#x <- which(sim$disturbanceMap[] == 2) # 2 codes for harvest, 1 for fire; 3 could code for "adjacent to harvest"
#Let's add this here
#sim$harvestStateMap[x] <- P(sim)$cutPersistanceTime
#browser()
dtidx <- which(sim$dtMap[] > 0)
dtval <- sim$dtMap[dtidx]
dtval <- dtval - 1
sim$dtMap[dtidx] <- dtval
unMark <- dtidx[which(dtval == 0)]
sim$disturbanceMap[unMark] <- 0
if (is.data.table(sim$spreadState) && nrow(sim$spreadState) > 0){ #existant and non-empty?
idx <- sim$spreadState[,indices] #then scfmSpread will define "indices"
raster::values(sim$disturbanceMap)[idx] <- 1
raster::values(sim$dtMap)[idx] <- P(sim)$persistTimes[1]
}
if (is.numeric(sim$cutCells) && length(sim$cutCells) > 0){
adjx <- raster::adjacent(sim$disturbanceMap,sim$cutCells,pairs=FALSE)
raster::values(sim$disturbanceMap)[adjx] <- 3
raster::values(sim$dtMap)[adjx] <- P(sim)$persistTimes[3]
#do these after, because overlap when cutting blocks of cells.
raster::values(sim$disturbanceMap)[sim$cutCells] <- 2
raster::values(sim$dtMap)[sim$cutCells] <- P(sim)$persistTimes[2]
#update ageMap
raster::values(sim$ageMap)[sim$cutCells] <- 0
}
sim <- heightFromAge(sim)
return(invisible(sim))
}
### add additional events as needed by copy/pasting from above
|
/stateVars/stateVars.R
|
no_license
|
SteveCumming/scfmModules
|
R
| false | false | 5,476 |
r
|
defineModule(sim, list(
name = "stateVars",
description = "keep track of stat transitions affecting multiple moduls; also classification",
keywords = c("insert key words here"),
authors = c(person(c("First", "Middle"), "Last", email="email@example.com", role=c("aut", "cre"))),
childModules = character(),
version = numeric_version("1.1.1.9006"),
spatialExtent = raster::extent(rep(NA_real_, 4)),
timeframe = as.POSIXlt(c(NA, NA)),
timeunit = "year",
citation = list("citation.bib"),
documentation = list("README.txt", "stateVars.Rmd"),
reqdPkgs = list("raster", "data.table", "RColorBrewer"),
parameters = rbind(
#defineParameter("paramName", "paramClass", value, min, max, "parameter description")),
defineParameter("startTime", "numeric", 0, 0, NA, "Start time"),
defineParameter("returnInterval", "numeric", 1, 0, NA, "waddya think?"),
defineParameter(".plotInitialTime", "numeric", 0, NA, NA, desc="This describes the simulation time at which the first plot event should occur"),
defineParameter(".plotInterval", "numeric", 1, NA, NA, desc="This describes the simulation time at which the first plot event should occur"),
defineParameter("persistTimes", "numeric", c(40,30,30), c(0,0,0), c(100,100,100), desc= "For how many years do disturbances effect indicators?")
),
inputObjects = bind_rows(
expectsInput(objectName = "disturbanceMap", objectClass = "RasterLayer", desc = "state of burned or harvestd cells"),
expectsInput(objectName = "spreadState", objectClass = "data.table", desc = "table of active initial spread cells"),
expectsInput(objectName = "ageMap", objectClass = "RasterLayer", desc = "time since last disturbance"),
expectsInput(objectName = "cutCells", objectClass = "numeric", desc = "vector of recently cut cells")
),
outputObjects = bind_rows(
createsOutput(objectName = "heightMap", objectClass = "RasterLayer", desc = "crude height from age model"),
createsOutput(objectName = "disturbanceMap", objectClass = "RasterLayer", desc = "update for caribou adjacency"),
createsOutput(objectName = "dtMap", objectClass = "RasterLayer", desc = "timer for disturbances")
)
))
doEvent.stateVars = function(sim, eventTime, eventType) {
switch (eventType,
init = {
sim <- Init(sim)
sim <- scheduleEvent(sim, P(sim)$startTime,"stateVars","update")
sim <- scheduleEvent(sim, P(sim)$.plotInitialTime,"stateVars","plot")
},
update = {
Update(sim)
sim <- scheduleEvent(sim, time(sim) + P(sim)$returnInterval, "stateVars", "update")
},
plot = {
Plot(sim$disturbanceMap)
sim <- scheduleEvent(sim, time(sim) + P(sim)$.plotInterval, "stateVars", "plot")
},
warning(paste("Undefined event type: '", events(sim)[1, "eventType", with = FALSE],
"' in module '", events(sim)[1, "moduleName", with = FALSE], "'", sep = ""))
)
return(invisible(sim))
}
heightFromAge <-function(sim){
x <- sim$ageMap[]/ (sim$ageMap[] + 100)
sim$heightMap[] <- 80*x
return(invisible(sim))
}
Init <- function(sim) {
#use disturbanceMap as template to copy.
#browser()
sim$disturbanceMap <- raster::raster(sim$ageMap)
sim$disturbanceMap[] <- sim$ageMap[] * 0
sim$dtMap <- raster::raster(sim$ageMap)
sim$dtMap[] <- sim$ageMap[] * 0
#raster::setValues(sim$harvestStateMap, values=0) The reason CS people fucking HATE R
#is because of random non-orthogonality like this, and the documentation that tries to be
#like UNIX, but fails. We shoulda gone with Python.
setColors(sim$disturbanceMap,n=4) <- c("grey80", "red", "blue", "yellow")
#0 = none
#1 = burn
#2 = cut
#3 = adjacent to cut
sim$heightMap <- raster::raster(sim$disturbanceMap)
sim$heightMap[] <- sim$disturbanceMap[] * 0
setColors(sim$heightMap, n=10) <- colorRampPalette(c("white","green4"))(10)
sim<-heightFromAge(sim)
return(invisible(sim))
}
Update <- function(sim){
#browser()
#idx <- which(sim$harvestStateMap[] > 0)
#vals <- sim$harvestStateMap[idx]
#sim$harvestStateMap[idx] <- vals - 1 #let't not go all negative
#x <- which(sim$disturbanceMap[] == 2) # 2 codes for harvest, 1 for fire; 3 could code for "adjacent to harvest"
#Let's add this here
#sim$harvestStateMap[x] <- P(sim)$cutPersistanceTime
#browser()
dtidx <- which(sim$dtMap[] > 0)
dtval <- sim$dtMap[dtidx]
dtval <- dtval - 1
sim$dtMap[dtidx] <- dtval
unMark <- dtidx[which(dtval == 0)]
sim$disturbanceMap[unMark] <- 0
if (is.data.table(sim$spreadState) && nrow(sim$spreadState) > 0){ #existant and non-empty?
idx <- sim$spreadState[,indices] #then scfmSpread will define "indices"
raster::values(sim$disturbanceMap)[idx] <- 1
raster::values(sim$dtMap)[idx] <- P(sim)$persistTimes[1]
}
if (is.numeric(sim$cutCells) && length(sim$cutCells) > 0){
adjx <- raster::adjacent(sim$disturbanceMap,sim$cutCells,pairs=FALSE)
raster::values(sim$disturbanceMap)[adjx] <- 3
raster::values(sim$dtMap)[adjx] <- P(sim)$persistTimes[3]
#do these after, because overlap when cutting blocks of cells.
raster::values(sim$disturbanceMap)[sim$cutCells] <- 2
raster::values(sim$dtMap)[sim$cutCells] <- P(sim)$persistTimes[2]
#update ageMap
raster::values(sim$ageMap)[sim$cutCells] <- 0
}
sim <- heightFromAge(sim)
return(invisible(sim))
}
### add additional events as needed by copy/pasting from above
|
Mixture=function(data,pre)
{
resultall=0
for(i in 1:length(pre$mean))
{
resultall=resultall+pre$pro[i]/sum(pre$pro)*stats::dnorm(data,mean=pre$mean[i],sd=pre$sd[i])
}
return(resultall)
}
|
/R/Mixture.R
|
no_license
|
cran/BANFF
|
R
| false | false | 201 |
r
|
Mixture=function(data,pre)
{
resultall=0
for(i in 1:length(pre$mean))
{
resultall=resultall+pre$pro[i]/sum(pre$pro)*stats::dnorm(data,mean=pre$mean[i],sd=pre$sd[i])
}
return(resultall)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sql.R
\name{set_path}
\alias{set_path}
\alias{get_path}
\alias{append_path}
\alias{prepend_path}
\alias{path_contains}
\title{PostgreSQL path variable}
\usage{
set_path(..., default = FALSE)
get_path(default = FALSE)
append_path(..., default = FALSE, no_dup = TRUE)
prepend_path(..., default = FALSE, no_dup = TRUE)
path_contains(..., default = FALSE)
}
\arguments{
\item{...}{path names}
\item{default}{if true, manipulate database default}
\item{no_dup}{do not add if path exists}
}
\description{
Manipulate the PostgreSQL path variable
}
|
/man/path.Rd
|
no_license
|
keittlab/rpg
|
R
| false | true | 625 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sql.R
\name{set_path}
\alias{set_path}
\alias{get_path}
\alias{append_path}
\alias{prepend_path}
\alias{path_contains}
\title{PostgreSQL path variable}
\usage{
set_path(..., default = FALSE)
get_path(default = FALSE)
append_path(..., default = FALSE, no_dup = TRUE)
prepend_path(..., default = FALSE, no_dup = TRUE)
path_contains(..., default = FALSE)
}
\arguments{
\item{...}{path names}
\item{default}{if true, manipulate database default}
\item{no_dup}{do not add if path exists}
}
\description{
Manipulate the PostgreSQL path variable
}
|
library(raster)
library(rgdal)
library(rgeos)
library(prettymapr)
library(rasterVis)
library(ggplot2)
library(cowplot)
library(grid)
rm(list=ls())
dev.off()
# Lectura de archivos ----
setwd('C:/Users/Usuario/Documents/Francisco/proyecto_agua/coberturas_FFMC')
pendiente <- raster('pendiente_marco_trabajo_Rio_Imperial_utm18s.tif')
plot(pendiente)
# setwd('C:/Users/Usuario/Documents/Francisco/proyecto_agua/') # padre las casas
setwd('C:/Users/Usuario/Documents/Francisco/proyecto_agua/coberturas_FFMC/')
nombre.archivo.cuenca <- 'poligono_cuenca_Estero_Poleco_utm18s'
cuenca <- readOGR('.', nombre.archivo.cuenca)
plot(cuenca, add=TRUE)
red.hidrica <- readOGR('.', 'linea_red_hidrografica_Estero_Poleco_utm18s')
plot(red.hidrica, add=TRUE)
orden.maximo <- max(red.hidrica@data$strahler)
id <- which(red.hidrica@data$strahler==orden.maximo)
curso.de.agua.principal <- red.hidrica[id,]
plot(curso.de.agua.principal, col='cyan', lwd=2, add=TRUE)
punto.desembocadura <- readOGR('.', 'punto_desembocadura_de_interes_en_Rio_Imperial_utm18s')
plot(punto.desembocadura, pch=16, col='red', add=TRUE)
# setwd('C:/Users/Usuario/Documents/Francisco/proyecto_agua/coberturas_FFMC/cuencas_buffer_y_clips/')
# cuenca.buffer <- readOGR('.', 'VectorCuencaPadreCasas_buffer_de_1000_m')
# plot(cuenca.buffer, add=TRUE)
# fin ---
# clip ----
pendiente.clip0 <- crop(pendiente, cuenca)
pendiente.clip <- mask(pendiente.clip0, cuenca)
plot(pendiente.clip)
# pendiente.curso.de.agua.principal <- mask(pendiente, curso.de.agua.principal)
# plot(pendiente.curso.de.agua.principal)
# plot(curso.de.agua.principal, col='cyan', lwd=2, add=TRUE)
# zoom(pendiente.curso.de.agua.principal, ext=drawExtent())
# fin ---
# plot ----
setwd('C:/Users/Usuario/Dropbox/Proyecto_agua/mapas/')
# mapa
nombre.mapa <- paste('mapa_de_pendiente_', nombre.archivo.cuenca, '.png', sep = '') ; nombre.mapa
paleta.colores <- hcl.colors(12, palette = "inferno")
myTheme <- rasterTheme(region = paleta.colores)
mapa <- levelplot(pendiente.clip,
margin = list(FUN = median, axis = gpar(col = 'black', fontsize = 10)),
colorkey=myTheme) +
layer(sp.polygons(red.hidrica, lwd=2, col='#0fd8ee')) +
layer(sp.polygons(punto.desembocadura, cex=1.5, pch=16, col='red'))
# png(nombre.mapa, width = 720, height = 720, units = "px")
mapa
dev.off()
# # histogramas
# setwd('C:/Users/Usuario/Dropbox/Proyecto_agua/plots/')
#
#
# nombre.histograma <- paste('histogramas_de_pendiente_', nombre.archivo.cuenca, '.png', sep = '') ; nombre.histograma
#
# histograma.cuenca0 <- hist(pendiente.clip, breaks=30)
# histograma.cuenca1 <- data.frame(counts= histograma.cuenca0$counts,breaks = histograma.cuenca0$mids)
# histograma.cuenca <- ggplot(histograma.cuenca1, aes(x = breaks, y = counts)) +
# geom_bar(stat = "identity") +
# labs(y='Frecuencia', x='Pendiente (%)', title='Cuenca') +
# theme_bw() +
# theme(text = element_text(size=14), panel.spacing = unit(1, "lines"))
#
# histograma.curso.de.agua.principal0 <- hist(pendiente.curso.de.agua.principal, breaks=30)
# histograma.curso.de.agua.principal1 <- data.frame(counts= histograma.curso.de.agua.principal0$counts, breaks = histograma.curso.de.agua.principal0$mids)
# histograma.curso.de.agua.principal <- ggplot(histograma.curso.de.agua.principal1, aes(x = breaks, y = counts)) +
# geom_bar(stat = "identity") +
# labs(y='Frecuencia', x='Pendiente (%)', title = 'Curso de agua principal') +
# theme_bw() +
# theme(text = element_text(size=14), panel.spacing = unit(1, "lines"))
#
#
#
# histogramas <- plot_grid(histograma.cuenca,
# histograma.curso.de.agua.principal,
# ncol = 1, nrow = 2)
#
# # png(nombre.mapa, width = 720, height = 500, units = "px")
#
# histogramas
#
# dev.off()
#
# # fin ---
#
#
#
#
# # otros ----
#
# # plot(pendiente.clip, col=hcl.colors(12, palette = "inferno"))
# # plot(cuenca, border='red', add=TRUE)
# # plot(red.hidrica, col='#0fd8ee', add=TRUE)
# # plot(punto.desembocadura, pch=16, cex=1.5, col='#0fd8ee', add=TRUE)
# # addscalebar(style = 'ticks', linecol = 'black', label.col = 'black', pos = 'bottomleft', plotepsg = 32718)
# # addnortharrow(pos = "topright", cols = c("black", "black"), border = 'black', text.col = 'black', scale = 0.7)
# #
# # legend("bottomright", title=NULL, text.font = 2,
# # legend = c('Cuenca', 'Red hidrográfica', 'Desembocadura'),
# # fill=c('transparent', NA, NA),
# # border = c('red', NA, NA),
# # lty=c(NA, 1, NA),
# # pch = c(NA, NA, 16),
# # col = c(NA, '#0fd8ee', '#0fd8ee'),
# # merge = TRUE,
# # horiz=FALSE, cex=0.8, ncol = 1, bg='transparent')
|
/mapa_de_pendiente.R
|
no_license
|
fmanquehual/proyecto_agua_en_R
|
R
| false | false | 4,793 |
r
|
library(raster)
library(rgdal)
library(rgeos)
library(prettymapr)
library(rasterVis)
library(ggplot2)
library(cowplot)
library(grid)
rm(list=ls())
dev.off()
# Lectura de archivos ----
setwd('C:/Users/Usuario/Documents/Francisco/proyecto_agua/coberturas_FFMC')
pendiente <- raster('pendiente_marco_trabajo_Rio_Imperial_utm18s.tif')
plot(pendiente)
# setwd('C:/Users/Usuario/Documents/Francisco/proyecto_agua/') # padre las casas
setwd('C:/Users/Usuario/Documents/Francisco/proyecto_agua/coberturas_FFMC/')
nombre.archivo.cuenca <- 'poligono_cuenca_Estero_Poleco_utm18s'
cuenca <- readOGR('.', nombre.archivo.cuenca)
plot(cuenca, add=TRUE)
red.hidrica <- readOGR('.', 'linea_red_hidrografica_Estero_Poleco_utm18s')
plot(red.hidrica, add=TRUE)
orden.maximo <- max(red.hidrica@data$strahler)
id <- which(red.hidrica@data$strahler==orden.maximo)
curso.de.agua.principal <- red.hidrica[id,]
plot(curso.de.agua.principal, col='cyan', lwd=2, add=TRUE)
punto.desembocadura <- readOGR('.', 'punto_desembocadura_de_interes_en_Rio_Imperial_utm18s')
plot(punto.desembocadura, pch=16, col='red', add=TRUE)
# setwd('C:/Users/Usuario/Documents/Francisco/proyecto_agua/coberturas_FFMC/cuencas_buffer_y_clips/')
# cuenca.buffer <- readOGR('.', 'VectorCuencaPadreCasas_buffer_de_1000_m')
# plot(cuenca.buffer, add=TRUE)
# fin ---
# clip ----
pendiente.clip0 <- crop(pendiente, cuenca)
pendiente.clip <- mask(pendiente.clip0, cuenca)
plot(pendiente.clip)
# pendiente.curso.de.agua.principal <- mask(pendiente, curso.de.agua.principal)
# plot(pendiente.curso.de.agua.principal)
# plot(curso.de.agua.principal, col='cyan', lwd=2, add=TRUE)
# zoom(pendiente.curso.de.agua.principal, ext=drawExtent())
# fin ---
# plot ----
setwd('C:/Users/Usuario/Dropbox/Proyecto_agua/mapas/')
# mapa
nombre.mapa <- paste('mapa_de_pendiente_', nombre.archivo.cuenca, '.png', sep = '') ; nombre.mapa
paleta.colores <- hcl.colors(12, palette = "inferno")
myTheme <- rasterTheme(region = paleta.colores)
mapa <- levelplot(pendiente.clip,
margin = list(FUN = median, axis = gpar(col = 'black', fontsize = 10)),
colorkey=myTheme) +
layer(sp.polygons(red.hidrica, lwd=2, col='#0fd8ee')) +
layer(sp.polygons(punto.desembocadura, cex=1.5, pch=16, col='red'))
# png(nombre.mapa, width = 720, height = 720, units = "px")
mapa
dev.off()
# # histogramas
# setwd('C:/Users/Usuario/Dropbox/Proyecto_agua/plots/')
#
#
# nombre.histograma <- paste('histogramas_de_pendiente_', nombre.archivo.cuenca, '.png', sep = '') ; nombre.histograma
#
# histograma.cuenca0 <- hist(pendiente.clip, breaks=30)
# histograma.cuenca1 <- data.frame(counts= histograma.cuenca0$counts,breaks = histograma.cuenca0$mids)
# histograma.cuenca <- ggplot(histograma.cuenca1, aes(x = breaks, y = counts)) +
# geom_bar(stat = "identity") +
# labs(y='Frecuencia', x='Pendiente (%)', title='Cuenca') +
# theme_bw() +
# theme(text = element_text(size=14), panel.spacing = unit(1, "lines"))
#
# histograma.curso.de.agua.principal0 <- hist(pendiente.curso.de.agua.principal, breaks=30)
# histograma.curso.de.agua.principal1 <- data.frame(counts= histograma.curso.de.agua.principal0$counts, breaks = histograma.curso.de.agua.principal0$mids)
# histograma.curso.de.agua.principal <- ggplot(histograma.curso.de.agua.principal1, aes(x = breaks, y = counts)) +
# geom_bar(stat = "identity") +
# labs(y='Frecuencia', x='Pendiente (%)', title = 'Curso de agua principal') +
# theme_bw() +
# theme(text = element_text(size=14), panel.spacing = unit(1, "lines"))
#
#
#
# histogramas <- plot_grid(histograma.cuenca,
# histograma.curso.de.agua.principal,
# ncol = 1, nrow = 2)
#
# # png(nombre.mapa, width = 720, height = 500, units = "px")
#
# histogramas
#
# dev.off()
#
# # fin ---
#
#
#
#
# # otros ----
#
# # plot(pendiente.clip, col=hcl.colors(12, palette = "inferno"))
# # plot(cuenca, border='red', add=TRUE)
# # plot(red.hidrica, col='#0fd8ee', add=TRUE)
# # plot(punto.desembocadura, pch=16, cex=1.5, col='#0fd8ee', add=TRUE)
# # addscalebar(style = 'ticks', linecol = 'black', label.col = 'black', pos = 'bottomleft', plotepsg = 32718)
# # addnortharrow(pos = "topright", cols = c("black", "black"), border = 'black', text.col = 'black', scale = 0.7)
# #
# # legend("bottomright", title=NULL, text.font = 2,
# # legend = c('Cuenca', 'Red hidrográfica', 'Desembocadura'),
# # fill=c('transparent', NA, NA),
# # border = c('red', NA, NA),
# # lty=c(NA, 1, NA),
# # pch = c(NA, NA, 16),
# # col = c(NA, '#0fd8ee', '#0fd8ee'),
# # merge = TRUE,
# # horiz=FALSE, cex=0.8, ncol = 1, bg='transparent')
|
library(loggit)
### Name: setLogFile
### Title: Set Log File
### Aliases: setLogFile
### ** Examples
setLogFile(file.path(tempdir(), "loggit.json"))
|
/data/genthat_extracted_code/loggit/examples/setLogFile.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 157 |
r
|
library(loggit)
### Name: setLogFile
### Title: Set Log File
### Aliases: setLogFile
### ** Examples
setLogFile(file.path(tempdir(), "loggit.json"))
|
Part 1
#a)
load("nrw17.RDATA")
length(nrw17)
attributes(nrw17)
#b)
nrw17$inhalt[1]
#c)
ww = strsplit(nrw17$inhalt[1:10], " ")
ww = sapply(ww, function(w) w[!grepl("[^a-zA-Z]", w)])
sapply(ww, "[", 2)
#d)
ww2 = nrw17$inhalt[1:100]
ww2 = ww2[!nrw17$isretweet[1:100]]
ww2 = strsplit(ww2, " ")
ww2 = sapply(ww2, function(w) w[!grepl("[^a-zA-Z]", w)])
sapply(ww2, "[", 1)
#e)
retw = nrw17$inhalt[nrw17$isretweet]
retw = strsplit(retw, " ")
retw = sapply(retw, function(w) w[!grepl("[^a-zA-Z]", w)])
retw = sapply(retw, "[", 1)
retw = strsplit(retw, " ")
sum(retw == "RT")/length(retw)
#f)
sum(regexpr("@" ,nrw17$inhalt)>0)/length(nrw17$inhalt)
#g)
bool_ = regexpr("@", nrw17$inhalt) > 0
no_at = which(bool_ == FALSE)
num_at = sapply(gregexpr("@", nrw17$inhalt),length)
gregexpr("@", nrw17$inhalt)
num_at[no_at] = 0
nrw17 = c(nrw17,list(Num_at=num_at))
table(num_at)
#Part 2
#a)
words_vec = unlist(strsplit(nrw17$inhalt," "))
words_vec
#b)
at_verw = unlist(strsplit(nrw17$inhalt[bool_], split=" "))
bool_at = grepl("@", at_verw)
words_at = at_verw[bool_at]
#c)
sorted_usr = sort(table(nrw17$name))
len = length(sorted_usr)
top_10 = rev(sorted_usr[(len-10):len])
top_10
#d)
tw_table = table(nrw17$name)
unique(sort(tw_table))
len = length(tw_table)
break_points = c(which(diff(rev(sort(tw_table))) < 0), X = len)
break_diff = c(break_points[1], diff(break_points))
names(break_diff) = rev(unique(sort(tw_table)))
rev(break_diff)
#e)
top_100 = rev(sort(table(nrw17$name)))[1:100]
rand_usr = sample(top_100 ,1, replace=FALSE)
num_ref = sum(grepl(paste0("@",names(rand_usr),separate= ""), nrw17$inhalt[bool_]))
num_ref
#Part 3
#a)
all_sgn = unlist(strsplit(nrw17$inhalt, split=""))
table_ = table(all_sgn)
table_
#b)
hauf_tab = table(all_sgn[grepl("[a-zA-Z]", all_sgn)])
hauf_tab
#c)
all_na = ifelse(grepl("[a-zA-Z]", all_sgn), all_sgn[grepl("[a-zA-Z]", all_sgn)] , "NA")
table_all_na = table(all_na)
table_all_na
#d)
times = nrw17$zeit
times_dot = sub(":",".", times)
times_vec = unlist(times_dot)
times_num = as.numeric(times_vec)
hrs = round(times_num)
hrs_sep = paste(hrs, "00" , sep=".")
hrs_table = sort(table(hrs_sep))
hrs_table
#e)
chisq.test(hrs_table) # Nein, p-value ist viel kleiner als Signifikanzniveau, wir können die Nullhypothese nicht annehmen.
#Part 4
#a)
matr = t(sapply(rep(2, 1000), sample, prob=c(0.3,0.7), replace=TRUE, size = 700))
matr[matr==2] = 0
#b)
sam = sample(1000)
ran_row = paste0("Zeile",sam)
rownames(matr) = ran_row
matr
#c)
ja_anteil = rowMeans(matr)
ja_anteil
#d)
sort(ja_anteil)
sort(ja_anteil)[25]
sort(ja_anteil)[25] == quantile(ja_anteil, probs=0.025)
sort(ja_anteil)[975] == quantile(ja_anteil, probs=0.975)
#e)
alpha = 0.05
n = 1000
sch = ja_anteil
q_n = qnorm(1 - (alpha/2))
sd = sqrt(sch*(1-sch))
untere_g = sch - q_n * sd/sqrt(n)
obere_g = sch + q_n * sd/sqrt(n)
conf_int = paste(untere_g,obere_g,sep = ",")
conf_int[975]
quantile(ja_anteil, probs=0.975)
conf_int[25]
quantile(ja_anteil, probs=0.025)
#f)
row_names = names(ja_anteil)
row_names_num = sub("Zeile", replace= "", row_names)
row_nums = (as.numeric(row_names_num))
row_nums_sort = paste0("Zeile", sort(row_nums), sep="")
ja_anteil_sort = ja_anteil[row_nums_sort]
ja_anteil_sort
#g)
sam = sample(700000, 2000)
matr[sam] = NA
ja_anteil_no_na <- rowMeans(matr, na.rm=TRUE)
ja_anteil_no_na
|
/HW2/2. Übungsblatt-20171107/2.r
|
no_license
|
valentyn1boreiko/R_class
|
R
| false | false | 3,334 |
r
|
Part 1
#a)
load("nrw17.RDATA")
length(nrw17)
attributes(nrw17)
#b)
nrw17$inhalt[1]
#c)
ww = strsplit(nrw17$inhalt[1:10], " ")
ww = sapply(ww, function(w) w[!grepl("[^a-zA-Z]", w)])
sapply(ww, "[", 2)
#d)
ww2 = nrw17$inhalt[1:100]
ww2 = ww2[!nrw17$isretweet[1:100]]
ww2 = strsplit(ww2, " ")
ww2 = sapply(ww2, function(w) w[!grepl("[^a-zA-Z]", w)])
sapply(ww2, "[", 1)
#e)
retw = nrw17$inhalt[nrw17$isretweet]
retw = strsplit(retw, " ")
retw = sapply(retw, function(w) w[!grepl("[^a-zA-Z]", w)])
retw = sapply(retw, "[", 1)
retw = strsplit(retw, " ")
sum(retw == "RT")/length(retw)
#f)
sum(regexpr("@" ,nrw17$inhalt)>0)/length(nrw17$inhalt)
#g)
bool_ = regexpr("@", nrw17$inhalt) > 0
no_at = which(bool_ == FALSE)
num_at = sapply(gregexpr("@", nrw17$inhalt),length)
gregexpr("@", nrw17$inhalt)
num_at[no_at] = 0
nrw17 = c(nrw17,list(Num_at=num_at))
table(num_at)
#Part 2
#a)
words_vec = unlist(strsplit(nrw17$inhalt," "))
words_vec
#b)
at_verw = unlist(strsplit(nrw17$inhalt[bool_], split=" "))
bool_at = grepl("@", at_verw)
words_at = at_verw[bool_at]
#c)
sorted_usr = sort(table(nrw17$name))
len = length(sorted_usr)
top_10 = rev(sorted_usr[(len-10):len])
top_10
#d)
tw_table = table(nrw17$name)
unique(sort(tw_table))
len = length(tw_table)
break_points = c(which(diff(rev(sort(tw_table))) < 0), X = len)
break_diff = c(break_points[1], diff(break_points))
names(break_diff) = rev(unique(sort(tw_table)))
rev(break_diff)
#e)
top_100 = rev(sort(table(nrw17$name)))[1:100]
rand_usr = sample(top_100 ,1, replace=FALSE)
num_ref = sum(grepl(paste0("@",names(rand_usr),separate= ""), nrw17$inhalt[bool_]))
num_ref
#Part 3
#a)
all_sgn = unlist(strsplit(nrw17$inhalt, split=""))
table_ = table(all_sgn)
table_
#b)
hauf_tab = table(all_sgn[grepl("[a-zA-Z]", all_sgn)])
hauf_tab
#c)
all_na = ifelse(grepl("[a-zA-Z]", all_sgn), all_sgn[grepl("[a-zA-Z]", all_sgn)] , "NA")
table_all_na = table(all_na)
table_all_na
#d)
times = nrw17$zeit
times_dot = sub(":",".", times)
times_vec = unlist(times_dot)
times_num = as.numeric(times_vec)
hrs = round(times_num)
hrs_sep = paste(hrs, "00" , sep=".")
hrs_table = sort(table(hrs_sep))
hrs_table
#e)
chisq.test(hrs_table) # Nein, p-value ist viel kleiner als Signifikanzniveau, wir können die Nullhypothese nicht annehmen.
#Part 4
#a)
matr = t(sapply(rep(2, 1000), sample, prob=c(0.3,0.7), replace=TRUE, size = 700))
matr[matr==2] = 0
#b)
sam = sample(1000)
ran_row = paste0("Zeile",sam)
rownames(matr) = ran_row
matr
#c)
ja_anteil = rowMeans(matr)
ja_anteil
#d)
sort(ja_anteil)
sort(ja_anteil)[25]
sort(ja_anteil)[25] == quantile(ja_anteil, probs=0.025)
sort(ja_anteil)[975] == quantile(ja_anteil, probs=0.975)
#e)
alpha = 0.05
n = 1000
sch = ja_anteil
q_n = qnorm(1 - (alpha/2))
sd = sqrt(sch*(1-sch))
untere_g = sch - q_n * sd/sqrt(n)
obere_g = sch + q_n * sd/sqrt(n)
conf_int = paste(untere_g,obere_g,sep = ",")
conf_int[975]
quantile(ja_anteil, probs=0.975)
conf_int[25]
quantile(ja_anteil, probs=0.025)
#f)
row_names = names(ja_anteil)
row_names_num = sub("Zeile", replace= "", row_names)
row_nums = (as.numeric(row_names_num))
row_nums_sort = paste0("Zeile", sort(row_nums), sep="")
ja_anteil_sort = ja_anteil[row_nums_sort]
ja_anteil_sort
#g)
sam = sample(700000, 2000)
matr[sam] = NA
ja_anteil_no_na <- rowMeans(matr, na.rm=TRUE)
ja_anteil_no_na
|
\name{mean}
\alias{cor}
\alias{cov}
\alias{favstats}
\alias{fivenum}
\alias{iqr}
\alias{IQR}
\alias{max}
\alias{mean}
\alias{median}
\alias{min}
\alias{prod}
\alias{range}
\alias{sd}
\alias{sum}
\alias{var}
\title{Aggregating functions}
\usage{
mean(x, ..., data, groups = NULL, ..fun.. = base::mean)
median(x, ..., data, groups = NULL,
..fun.. = stats::median)
range(x, ..., data, groups = NULL, ..fun.. = base::range)
sd(x, ..., data, groups = NULL, ..fun.. = stats::sd)
max(x, ..., data, groups = NULL, ..fun.. = base::max)
min(x, ..., data, groups = NULL, ..fun.. = base::min)
sum(x, ..., data, groups = NULL, ..fun.. = base::sum)
IQR(x, ..., data, groups = NULL, ..fun.. = stats::IQR)
fivenum(x, ..., data, groups = NULL,
..fun.. = stats::fivenum)
iqr(x, ..., data, groups = NULL, ..fun.. = stats::IQR)
prod(x, ..., data, groups = NULL, ..fun.. = base::prod)
sum(x, ..., data, groups = NULL, ..fun.. = base::sum)
favstats(x, ..., data, groups = NULL,
..fun.. = fav_stats)
var(x, ..., data, groups = NULL, ..fun.. = stats::var)
cor(x, y = NULL, ..., data = parent.frame())
cov(x, y = NULL, ..., data = parent.frame())
}
\arguments{
\item{x}{an object, often a formula}
\item{y}{an object, often a numeric vector}
\item{..fun..}{the underlyin function used in the
computation}
\item{groups}{a grouping variable, typically a name of a
variable in \code{data}}
\item{data}{a data frame in which to evaluate formulas
(or bare names)}
\item{\dots}{additional arguments}
}
\description{
The \code{mosaic} package makes several summary statistic
functions (like \code{mean} and \code{sd}) formula aware.
}
\examples{
mean( HELPrct$age )
mean( ~ age, data=HELPrct )
mean( age ~ sex + substance, data=HELPrct )
mean( ~ age | sex + substance, data=HELPrct )
mean( sqrt(age), data=HELPrct )
sum( ~ age, data=HELPrct )
sd( HELPrct$age )
sd( ~ age, data=HELPrct )
sd( age ~ sex + substance, data=HELPrct )
var( HELPrct$age )
var( ~ age, data=HELPrct )
var( age ~ sex + substance, data=HELPrct )
IQR( width ~ sex, data=KidsFeet )
iqr( width ~ sex, data=KidsFeet )
favstats( width ~ sex, data=KidsFeet )
cor( length ~ width, data=KidsFeet )
cov ( length ~ width, data=KidsFeet )
}
|
/man/aggregating.Rd
|
no_license
|
datandrews/mosaic
|
R
| false | false | 2,258 |
rd
|
\name{mean}
\alias{cor}
\alias{cov}
\alias{favstats}
\alias{fivenum}
\alias{iqr}
\alias{IQR}
\alias{max}
\alias{mean}
\alias{median}
\alias{min}
\alias{prod}
\alias{range}
\alias{sd}
\alias{sum}
\alias{var}
\title{Aggregating functions}
\usage{
mean(x, ..., data, groups = NULL, ..fun.. = base::mean)
median(x, ..., data, groups = NULL,
..fun.. = stats::median)
range(x, ..., data, groups = NULL, ..fun.. = base::range)
sd(x, ..., data, groups = NULL, ..fun.. = stats::sd)
max(x, ..., data, groups = NULL, ..fun.. = base::max)
min(x, ..., data, groups = NULL, ..fun.. = base::min)
sum(x, ..., data, groups = NULL, ..fun.. = base::sum)
IQR(x, ..., data, groups = NULL, ..fun.. = stats::IQR)
fivenum(x, ..., data, groups = NULL,
..fun.. = stats::fivenum)
iqr(x, ..., data, groups = NULL, ..fun.. = stats::IQR)
prod(x, ..., data, groups = NULL, ..fun.. = base::prod)
sum(x, ..., data, groups = NULL, ..fun.. = base::sum)
favstats(x, ..., data, groups = NULL,
..fun.. = fav_stats)
var(x, ..., data, groups = NULL, ..fun.. = stats::var)
cor(x, y = NULL, ..., data = parent.frame())
cov(x, y = NULL, ..., data = parent.frame())
}
\arguments{
\item{x}{an object, often a formula}
\item{y}{an object, often a numeric vector}
\item{..fun..}{the underlyin function used in the
computation}
\item{groups}{a grouping variable, typically a name of a
variable in \code{data}}
\item{data}{a data frame in which to evaluate formulas
(or bare names)}
\item{\dots}{additional arguments}
}
\description{
The \code{mosaic} package makes several summary statistic
functions (like \code{mean} and \code{sd}) formula aware.
}
\examples{
mean( HELPrct$age )
mean( ~ age, data=HELPrct )
mean( age ~ sex + substance, data=HELPrct )
mean( ~ age | sex + substance, data=HELPrct )
mean( sqrt(age), data=HELPrct )
sum( ~ age, data=HELPrct )
sd( HELPrct$age )
sd( ~ age, data=HELPrct )
sd( age ~ sex + substance, data=HELPrct )
var( HELPrct$age )
var( ~ age, data=HELPrct )
var( age ~ sex + substance, data=HELPrct )
IQR( width ~ sex, data=KidsFeet )
iqr( width ~ sex, data=KidsFeet )
favstats( width ~ sex, data=KidsFeet )
cor( length ~ width, data=KidsFeet )
cov ( length ~ width, data=KidsFeet )
}
|
### bag-grid.R file ###
source("blind.R") # load the blind search methods
source("grid.R") # load the grid search methods
source("functions.R") # load the profit function
# grid search for all bag prices, step of 100$
PTM <- proc.time() # start clock
S1 <- gsearch(rep(100, 5), rep(1, 5), rep(1000, 5), profit, "max")
sec <- (proc.time() - PTM)[3] # get seconds elapsed
cat("gsearch best s:", S1$sol, "f:", S1$eval, "time:", sec, "s\n")
# grid search 2 for all bag prices, step of 100$
PTM <- proc.time() # start clock
S2 <- gsearch2(rep(100, 5), rep(1, 5), rep(1000, 5), profit, "max")
sec <- (proc.time() - PTM)[3] # get seconds elapsed
cat("gsearch2 best s:", S2$sol, "f:", S2$eval, "time:", sec, "s\n")
# nested grid with 3 levels and initial step of 500$
PTM <- proc.time() # start clock
S3 <- ngsearch(3, rep(500, 5), rep(1, 5), rep(1000, 5), profit, "max")
sec <- (proc.time() - PTM)[3] # get seconds elapsed
cat("ngsearch best s:", S3$sol, "f:", S3$eval, "time:", sec, "s\n")
|
/src/chapters/Chapter3/bag-grid.R
|
permissive
|
wilsonify/ModernOptimization
|
R
| false | false | 988 |
r
|
### bag-grid.R file ###
source("blind.R") # load the blind search methods
source("grid.R") # load the grid search methods
source("functions.R") # load the profit function
# grid search for all bag prices, step of 100$
PTM <- proc.time() # start clock
S1 <- gsearch(rep(100, 5), rep(1, 5), rep(1000, 5), profit, "max")
sec <- (proc.time() - PTM)[3] # get seconds elapsed
cat("gsearch best s:", S1$sol, "f:", S1$eval, "time:", sec, "s\n")
# grid search 2 for all bag prices, step of 100$
PTM <- proc.time() # start clock
S2 <- gsearch2(rep(100, 5), rep(1, 5), rep(1000, 5), profit, "max")
sec <- (proc.time() - PTM)[3] # get seconds elapsed
cat("gsearch2 best s:", S2$sol, "f:", S2$eval, "time:", sec, "s\n")
# nested grid with 3 levels and initial step of 500$
PTM <- proc.time() # start clock
S3 <- ngsearch(3, rep(500, 5), rep(1, 5), rep(1000, 5), profit, "max")
sec <- (proc.time() - PTM)[3] # get seconds elapsed
cat("ngsearch best s:", S3$sol, "f:", S3$eval, "time:", sec, "s\n")
|
# Normalise la liste des logiciels
standardise_logiciels <- function(liste){
a <- as.character(liste)
a[a == ""] <- NA
a[a == "autre"] <- NA
a[a == "Christalnet (module DMU)"] <- "Cristalnet"
a[a == "Clinicom (Siemens)"] <- "Clinicom"
a[a == "CORA McKesson"] <- "Cora"
a[a == "CrystalNet"] <- "Cristalnet"
a[a == "DMU (CristalNet)"] <- "Cristalnet"
a[a == "RESURGENCE"] <- "ResUrgences"
a[a == "Resurgences"] <- "ResUrgences"
a[a == "RESURGENCE (Adulte) DxCare Medasys (Pédiatrie)"] <- "ResUrgences"
a[a == "urqual"] <- "UrQual"
a[a == "UrQual (McKesson)"] <- "UrQual"
a[a == "Urqual (McKesson)"] <- "UrQual"
a[a == "Cristalnet"] <- "CristalNet"
a[a == "CRISTALNET"] <- "CristalNet"
a[a == "Cotexte"] <- "Coretexte"
a[a == "Cortexe"] <- "Coretexte"
a[a == "CROSSWAY"] <- "Crossway"
a[a == "crossway"] <- "Crossway"
a[a == "crosway"] <- "Crossway"
a[a == "CORA"] <- "Cora"
a[a == "CLINICOM (Creil)"] <- "Clinicom"
a[a == "DxCare (Medasys)"] <- "DXCare"
a[a == "DXCARE (Medasys)"] <- "DXCare"
a[a == "DxCare MEDASYS"] <- "DXCare"
a[a == "dxcare (vittel)"] <- "DXCare"
a[a == "DX CARE"] <- "DXCare"
a[a == "DxCare"] <- "DXCare"
a[a == "hopital manager"] <- "Hopital Manager"
a[a == "Osiris (Cormin)"] <- "Osiris"
a[a == "OSIRIS Evolucare"] <- "Osiris"
a[a == "Oriris (cormin)"] <- "Osiris"
a[a == "OSOFT"] <- "Osoft"
a[a == "ATALANTE Pmsi"] <- "Atalante"
a[a == "ATALANTE"] <- "Atalante"
a[a == "CORETEXTE"] <- "Cortext"
a[a == "POLIMEDIS - EQUAFILE"] <- "Polymedis"
a[a == "ᅠ"] <- NA
a[a == "cf ch lodeve"] <- NA
a[a == "Etablissement prioritaire !"] <- NA
a[a == "Pas de SAU"] <- NA
a[!is.na(a) & nchar(a) < 3] <- NA
as.factor(toupper(a))
}
#===========================================================================
# copyrigth
#===========================================================================
#'@title copyrigth
#'@author JcB
#'@description Place un copyright Resural sur un graphique.
#'Par défaut la phrase est inscrite verticalement sur le bord droit de l'image
#'@param an (str) année du copyright (par défaut 2013)
#'@param side coté de l'écriture (défaut = 4)
#'@param line distance par rapport au bord. Défaut=-1, immédiatement à l'intérieur du cadre
#'@param titre
#'@param cex taille du texte (défaut 0.8)
#'@return "© 2012 Resural"
#'@usage copyright()
#'
copyright<-function(an ="2014",side=4,line=-1,cex=0.8, titre = "IGN & FEDORU"){
titre<-paste("©", an, titre, sep=" ")
mtext(titre,side=side,line=line,cex=cex)
}
|
/functions.R
|
no_license
|
jcrb/InformatisationSU
|
R
| false | false | 2,476 |
r
|
# Normalise la liste des logiciels
standardise_logiciels <- function(liste){
a <- as.character(liste)
a[a == ""] <- NA
a[a == "autre"] <- NA
a[a == "Christalnet (module DMU)"] <- "Cristalnet"
a[a == "Clinicom (Siemens)"] <- "Clinicom"
a[a == "CORA McKesson"] <- "Cora"
a[a == "CrystalNet"] <- "Cristalnet"
a[a == "DMU (CristalNet)"] <- "Cristalnet"
a[a == "RESURGENCE"] <- "ResUrgences"
a[a == "Resurgences"] <- "ResUrgences"
a[a == "RESURGENCE (Adulte) DxCare Medasys (Pédiatrie)"] <- "ResUrgences"
a[a == "urqual"] <- "UrQual"
a[a == "UrQual (McKesson)"] <- "UrQual"
a[a == "Urqual (McKesson)"] <- "UrQual"
a[a == "Cristalnet"] <- "CristalNet"
a[a == "CRISTALNET"] <- "CristalNet"
a[a == "Cotexte"] <- "Coretexte"
a[a == "Cortexe"] <- "Coretexte"
a[a == "CROSSWAY"] <- "Crossway"
a[a == "crossway"] <- "Crossway"
a[a == "crosway"] <- "Crossway"
a[a == "CORA"] <- "Cora"
a[a == "CLINICOM (Creil)"] <- "Clinicom"
a[a == "DxCare (Medasys)"] <- "DXCare"
a[a == "DXCARE (Medasys)"] <- "DXCare"
a[a == "DxCare MEDASYS"] <- "DXCare"
a[a == "dxcare (vittel)"] <- "DXCare"
a[a == "DX CARE"] <- "DXCare"
a[a == "DxCare"] <- "DXCare"
a[a == "hopital manager"] <- "Hopital Manager"
a[a == "Osiris (Cormin)"] <- "Osiris"
a[a == "OSIRIS Evolucare"] <- "Osiris"
a[a == "Oriris (cormin)"] <- "Osiris"
a[a == "OSOFT"] <- "Osoft"
a[a == "ATALANTE Pmsi"] <- "Atalante"
a[a == "ATALANTE"] <- "Atalante"
a[a == "CORETEXTE"] <- "Cortext"
a[a == "POLIMEDIS - EQUAFILE"] <- "Polymedis"
a[a == "ᅠ"] <- NA
a[a == "cf ch lodeve"] <- NA
a[a == "Etablissement prioritaire !"] <- NA
a[a == "Pas de SAU"] <- NA
a[!is.na(a) & nchar(a) < 3] <- NA
as.factor(toupper(a))
}
#===========================================================================
# copyrigth
#===========================================================================
#'@title copyrigth
#'@author JcB
#'@description Place un copyright Resural sur un graphique.
#'Par défaut la phrase est inscrite verticalement sur le bord droit de l'image
#'@param an (str) année du copyright (par défaut 2013)
#'@param side coté de l'écriture (défaut = 4)
#'@param line distance par rapport au bord. Défaut=-1, immédiatement à l'intérieur du cadre
#'@param titre
#'@param cex taille du texte (défaut 0.8)
#'@return "© 2012 Resural"
#'@usage copyright()
#'
copyright<-function(an ="2014",side=4,line=-1,cex=0.8, titre = "IGN & FEDORU"){
titre<-paste("©", an, titre, sep=" ")
mtext(titre,side=side,line=line,cex=cex)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-cwl.R
\docType{class}
\name{WorkflowStepInput-class}
\alias{WorkflowStepInput-class}
\alias{WorkflowStepInput}
\alias{WorkflowStepOutput-class}
\alias{WorkflowStepOutput}
\alias{WorkflowStepInputList}
\alias{WorkflowStepInputList-class}
\alias{WorkflowStepOutputList}
\alias{WorkflowStepOutputList-class}
\alias{WorkflowStepList}
\alias{WorkflowStepList-class}
\alias{WorkflowStep-class}
\alias{WorkflowStep}
\title{WorkflowStepInputList}
\usage{
WorkflowStepInputList(...)
WorkflowStepOutputList(...)
WorkflowStepList(...)
}
\arguments{
\item{\dots}{element or list of the element.}
}
\value{
a WorkflowStep object or subclass object.
}
\description{
A workflow step is an executable element of a workflow.
It specifies the underlying process implementation (such as
CommandLineTool) in the run field and connects the input and output
parameters of the underlying process to workflow parameters.
}
\section{Fields}{
\describe{
\item{\code{id}}{[character] The unique identifier for this workflow step.}
\item{\code{inputs}}{(WorkflowStepInputList) Defines the input parameters
of the workflow step. The process is ready to run when all required
input parameters are associated with concrete values. Input
parameters include a schema for each parameter and is used to
validate the input object, it may also be used build a user
interface for constructing the input object.}
\item{\code{outputs}}{(WorkflowStepOutputList) Defines the parameters
representing the output of the process. May be used to generate
and/or validate the output object.}
\item{\code{requirements}}{[ProcessRequirement] Declares requirements that
apply to either the runtime environment or the workflow engine that
must be met in order to execute this workflow step. If an
implementation cannot satisfy all requirements, or a requirement is
listed which is not recognized by the implementation, it is a fatal
error and the implementation must not attempt to run the process,
unless overridden at user option.}
\item{\code{hints}}{[ANY] Declares hints applying to either the runtime
environment or the workflow engine that may be helpful in executing
this workflow step. It is not an error if an implementation cannot
satisfy all hints, however the implementation may report a warning.}
\item{\code{label}}{[character] A short, human-readable label of this
process object.}
\item{\code{description}}{[character] A long, human-readable description
of this process object.}
\item{\code{run}}{(CommandLineToolORExpressionToolORWorkflow) Specifies
the process to run.}
\item{\code{scatter}}{[character]}
\item{\code{scatterMethod}}{[ScatterMethod] Required if scatter is an array of more
than one element.}
}}
\section{WorkflowStepInput Class}{
\describe{
The input of a workflow step connects an upstream parameter (from
the workflow inputs, or the outputs of other workflows steps) with
the input parameters of the underlying process.
If the sink parameter is an array, or named in a workflow scatter
operation, there may be multiple inbound data links listed in the
connect field. The values from the input links are merged depending
on the method specified in the linkMerge field. If not specified,
the default method is merge_nested:
\item{merge_nested}{ The input shall be an array consisting of
exactly one entry for each input link. If merge_nested is specified
with a single link, the value from the link is wrapped in a
single-item list. }
\item{merge_flattened}{ 1) The source and sink parameters must be
compatible types, or the source type must be compatible with single
element from the "items" type of the destination array
parameter. 2) Source parameters which are arrays are concatenated;
source parameters which are single element types are appended as
single elements. }
Fields:
\item{\code{id}}{ (character) A unique identifier for this workflow input
parameter.}
\item{\code{source}}{[character] Specifies one or more workflow parameters
that will provide input to the underlying process parameter.}
\item{\code{linkMerge}}{[LineMergeMethod] The method to use to merge
multiple inbound links into a single array. If not specified, the
default method is merge_nested:}
\item{\code{default}}{ [ANY] The default value for this parameter if there
is no source field.}
}
}
\section{WorkflowStepOutput Class}{
\describe{
Associate an output parameter of the underlying process with a
workflow parameter. The workflow parameter (given in the id field)
be may be used as a source to connect with input parameters of
other workflow steps, or with an output parameter of the process.
\item{\code{id}}{ (character) A unique identifier for this workflow output
parameter. This is the identifier to use in the source field of
WorkflowStepInput to connect the output value to downstream
parameters.}
}
}
\section{Scatter/gather}{
To use scatter/gather, ScatterFeatureRequirement must be specified
in the workflow or workflow step requirements.
A "scatter" operation specifies that the associated workflow step
or subworkflow should execute separately over a list of input
elements. Each job making up a scatter operaution is independent
and may be executed concurrently.
The scatter field specifies one or more input parameters which will
be scattered. An input parameter may be listed more than once. The
declared type of each input parameter is implicitly wrapped in an
array for each time it appears in the scatter field. As a result,
upstream parameters which are connected to scattered parameters may
be arrays.
All output parameters types are also implicitly wrapped in arrays;
each job in the scatter results in an entry in the output array.
If scatter declares more than one input parameter, scatterMethod
describes how to decompose the input into a discrete set of jobs.
\itemize{
\item{dotproduct}{ specifies that each the input arrays are aligned
and one element taken from each array to construct each job. It is
an error if all input arrays are not the same length.}
\item{nested_crossproduct}{specifies the cartesian product of the
inputs, producing a job for every combination of the scattered
inputs. The output must be nested arrays for each level of
scattering, in the order that the input arrays are listed in the
scatter field.}
\item{flat_crossproduct}{specifies the cartesian product of the
inputs, producing a job for every combination of the scattered
inputs. The output arrays must be flattened to a single level, but
otherwise listed in the order that the input arrays are listed in
the scatter field.}
}
}
\section{Subworkflows}{
To specify a nested workflow as part of a workflow step,
SubworkflowFeatureRequirement must be specified in the workflow or
workflow step requirements.
}
\examples{
ws <- WorkflowStepList(WorkflowStep(
id = "step1", label = "align-and-sort",
description = "align and sort",
inputs = WorkflowStepInputList(
WorkflowStepInput(id = "id1"),
WorkflowStepInput(id = "id2")
)
))
}
|
/man/WorkflowStep.Rd
|
permissive
|
sbg/sevenbridges-r
|
R
| false | true | 7,052 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-cwl.R
\docType{class}
\name{WorkflowStepInput-class}
\alias{WorkflowStepInput-class}
\alias{WorkflowStepInput}
\alias{WorkflowStepOutput-class}
\alias{WorkflowStepOutput}
\alias{WorkflowStepInputList}
\alias{WorkflowStepInputList-class}
\alias{WorkflowStepOutputList}
\alias{WorkflowStepOutputList-class}
\alias{WorkflowStepList}
\alias{WorkflowStepList-class}
\alias{WorkflowStep-class}
\alias{WorkflowStep}
\title{WorkflowStepInputList}
\usage{
WorkflowStepInputList(...)
WorkflowStepOutputList(...)
WorkflowStepList(...)
}
\arguments{
\item{\dots}{element or list of the element.}
}
\value{
a WorkflowStep object or subclass object.
}
\description{
A workflow step is an executable element of a workflow.
It specifies the underlying process implementation (such as
CommandLineTool) in the run field and connects the input and output
parameters of the underlying process to workflow parameters.
}
\section{Fields}{
\describe{
\item{\code{id}}{[character] The unique identifier for this workflow step.}
\item{\code{inputs}}{(WorkflowStepInputList) Defines the input parameters
of the workflow step. The process is ready to run when all required
input parameters are associated with concrete values. Input
parameters include a schema for each parameter and is used to
validate the input object, it may also be used build a user
interface for constructing the input object.}
\item{\code{outputs}}{(WorkflowStepOutputList) Defines the parameters
representing the output of the process. May be used to generate
and/or validate the output object.}
\item{\code{requirements}}{[ProcessRequirement] Declares requirements that
apply to either the runtime environment or the workflow engine that
must be met in order to execute this workflow step. If an
implementation cannot satisfy all requirements, or a requirement is
listed which is not recognized by the implementation, it is a fatal
error and the implementation must not attempt to run the process,
unless overridden at user option.}
\item{\code{hints}}{[ANY] Declares hints applying to either the runtime
environment or the workflow engine that may be helpful in executing
this workflow step. It is not an error if an implementation cannot
satisfy all hints, however the implementation may report a warning.}
\item{\code{label}}{[character] A short, human-readable label of this
process object.}
\item{\code{description}}{[character] A long, human-readable description
of this process object.}
\item{\code{run}}{(CommandLineToolORExpressionToolORWorkflow) Specifies
the process to run.}
\item{\code{scatter}}{[character]}
\item{\code{scatterMethod}}{[ScatterMethod] Required if scatter is an array of more
than one element.}
}}
\section{WorkflowStepInput Class}{
\describe{
The input of a workflow step connects an upstream parameter (from
the workflow inputs, or the outputs of other workflows steps) with
the input parameters of the underlying process.
If the sink parameter is an array, or named in a workflow scatter
operation, there may be multiple inbound data links listed in the
connect field. The values from the input links are merged depending
on the method specified in the linkMerge field. If not specified,
the default method is merge_nested:
\item{merge_nested}{ The input shall be an array consisting of
exactly one entry for each input link. If merge_nested is specified
with a single link, the value from the link is wrapped in a
single-item list. }
\item{merge_flattened}{ 1) The source and sink parameters must be
compatible types, or the source type must be compatible with single
element from the "items" type of the destination array
parameter. 2) Source parameters which are arrays are concatenated;
source parameters which are single element types are appended as
single elements. }
Fields:
\item{\code{id}}{ (character) A unique identifier for this workflow input
parameter.}
\item{\code{source}}{[character] Specifies one or more workflow parameters
that will provide input to the underlying process parameter.}
\item{\code{linkMerge}}{[LineMergeMethod] The method to use to merge
multiple inbound links into a single array. If not specified, the
default method is merge_nested:}
\item{\code{default}}{ [ANY] The default value for this parameter if there
is no source field.}
}
}
\section{WorkflowStepOutput Class}{
\describe{
Associate an output parameter of the underlying process with a
workflow parameter. The workflow parameter (given in the id field)
be may be used as a source to connect with input parameters of
other workflow steps, or with an output parameter of the process.
\item{\code{id}}{ (character) A unique identifier for this workflow output
parameter. This is the identifier to use in the source field of
WorkflowStepInput to connect the output value to downstream
parameters.}
}
}
\section{Scatter/gather}{
To use scatter/gather, ScatterFeatureRequirement must be specified
in the workflow or workflow step requirements.
A "scatter" operation specifies that the associated workflow step
or subworkflow should execute separately over a list of input
elements. Each job making up a scatter operaution is independent
and may be executed concurrently.
The scatter field specifies one or more input parameters which will
be scattered. An input parameter may be listed more than once. The
declared type of each input parameter is implicitly wrapped in an
array for each time it appears in the scatter field. As a result,
upstream parameters which are connected to scattered parameters may
be arrays.
All output parameters types are also implicitly wrapped in arrays;
each job in the scatter results in an entry in the output array.
If scatter declares more than one input parameter, scatterMethod
describes how to decompose the input into a discrete set of jobs.
\itemize{
\item{dotproduct}{ specifies that each the input arrays are aligned
and one element taken from each array to construct each job. It is
an error if all input arrays are not the same length.}
\item{nested_crossproduct}{specifies the cartesian product of the
inputs, producing a job for every combination of the scattered
inputs. The output must be nested arrays for each level of
scattering, in the order that the input arrays are listed in the
scatter field.}
\item{flat_crossproduct}{specifies the cartesian product of the
inputs, producing a job for every combination of the scattered
inputs. The output arrays must be flattened to a single level, but
otherwise listed in the order that the input arrays are listed in
the scatter field.}
}
}
\section{Subworkflows}{
To specify a nested workflow as part of a workflow step,
SubworkflowFeatureRequirement must be specified in the workflow or
workflow step requirements.
}
\examples{
ws <- WorkflowStepList(WorkflowStep(
id = "step1", label = "align-and-sort",
description = "align and sort",
inputs = WorkflowStepInputList(
WorkflowStepInput(id = "id1"),
WorkflowStepInput(id = "id2")
)
))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.