content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
library(ISLR) library(tree) library(OneR) library(Hmisc) library(gmodels) library(rJava) library(RWeka) library(caret) library(e1071) library(MASS) library(rpart) library(mice) library(VIM) library(ggplot2) library(statip) library(rpart.plot) library(tidyrules) library(dplyr) library(rpart) #Ripper attrition_train[,c(2:3,5,7:8,11:12,14:18,22:23,25:26,28,31,36)]<-lapply(attrition_train[,c(2:3,5,7:8,11:12,14:18,22:23,25:26,28,31,36)],as.factor) jRip.attrition<-JRip(Attrition~., data=attrition_train) JRip.pred<-predict(jRip.attrition, attrition_test, type ="class") t<-table(JRip.pred, attrition_test$Attrition) (t[1]+t[4])/(t[1]+t[2]+t[3]+t[4]) summary(jRip.attrition) #display rules: jRip.attrition #pravila preko rpart attrition_train$Attrition <- as.factor(attrition_train$Attrition) attrition_test$Attrition <- as.factor(attrition_test$Attrition) rpart.attrition <- rpart(Attrition~., data=attrition_train) rpart.pred <- predict(rpart.attrition, attrition_test, type="class") confusionMatrix(rpart.pred, attrition_test$Attrition) plot(rpart.attrition) text(rpart.attrition,cex=.6, pos=1, offset=0.7) rpart.attrition #rpart.rules(rpart.attrition) rules_bc <- tidyRules(rpart.attrition) rules_bc
/Coverage.R
no_license
emirpita/Machine-Learning-Homework
R
false
false
1,208
r
library(ISLR) library(tree) library(OneR) library(Hmisc) library(gmodels) library(rJava) library(RWeka) library(caret) library(e1071) library(MASS) library(rpart) library(mice) library(VIM) library(ggplot2) library(statip) library(rpart.plot) library(tidyrules) library(dplyr) library(rpart) #Ripper attrition_train[,c(2:3,5,7:8,11:12,14:18,22:23,25:26,28,31,36)]<-lapply(attrition_train[,c(2:3,5,7:8,11:12,14:18,22:23,25:26,28,31,36)],as.factor) jRip.attrition<-JRip(Attrition~., data=attrition_train) JRip.pred<-predict(jRip.attrition, attrition_test, type ="class") t<-table(JRip.pred, attrition_test$Attrition) (t[1]+t[4])/(t[1]+t[2]+t[3]+t[4]) summary(jRip.attrition) #display rules: jRip.attrition #pravila preko rpart attrition_train$Attrition <- as.factor(attrition_train$Attrition) attrition_test$Attrition <- as.factor(attrition_test$Attrition) rpart.attrition <- rpart(Attrition~., data=attrition_train) rpart.pred <- predict(rpart.attrition, attrition_test, type="class") confusionMatrix(rpart.pred, attrition_test$Attrition) plot(rpart.attrition) text(rpart.attrition,cex=.6, pos=1, offset=0.7) rpart.attrition #rpart.rules(rpart.attrition) rules_bc <- tidyRules(rpart.attrition) rules_bc
#' @title Check preservation #' @description Checks if a partial ranking is preserved in the ranking induced by `scores`. #' @param P A partial ranking as matrix object calculated with [neighborhood_inclusion] #' or [positional_dominance]. #' @param scores Numeric vector containing the scores of a centrality index. #' @details In order for a score vector to preserve a partial ranking, the following #' condition must be fulfilled: #' \code{P[u,v]==1 & scores[i]<=scores[j]}. #' @return Logical scaler whether \code{scores} preserves the relations in \code{P}. #' @author David Schoch #' @examples #' #' library(igraph) #' # standard measures of centrality preserve the neighborhood inclusion preorder #' g <- graph.empty(n=11,directed = FALSE) #' g <- add_edges(g,c(1,11,2,4,3,5,3,11,4,8,5,9,5,11,6,7,6,8, #' 6,10,6,11,7,9,7,10,7,11,8,9,8,10,9,10)) #' P<-neighborhood_inclusion(g) #' #' is_preserved(P,degree(g)) #' is_preserved(P,betweenness(g)) #' is_preserved(P,closeness(g)) #' @export is_preserved <- function(P, scores) { n <- nrow(P) preserved <- preserve(P, scores, n) == 0 return(preserved) }
/R/check.preservation.R
no_license
knapply/netrankr
R
false
false
1,147
r
#' @title Check preservation #' @description Checks if a partial ranking is preserved in the ranking induced by `scores`. #' @param P A partial ranking as matrix object calculated with [neighborhood_inclusion] #' or [positional_dominance]. #' @param scores Numeric vector containing the scores of a centrality index. #' @details In order for a score vector to preserve a partial ranking, the following #' condition must be fulfilled: #' \code{P[u,v]==1 & scores[i]<=scores[j]}. #' @return Logical scaler whether \code{scores} preserves the relations in \code{P}. #' @author David Schoch #' @examples #' #' library(igraph) #' # standard measures of centrality preserve the neighborhood inclusion preorder #' g <- graph.empty(n=11,directed = FALSE) #' g <- add_edges(g,c(1,11,2,4,3,5,3,11,4,8,5,9,5,11,6,7,6,8, #' 6,10,6,11,7,9,7,10,7,11,8,9,8,10,9,10)) #' P<-neighborhood_inclusion(g) #' #' is_preserved(P,degree(g)) #' is_preserved(P,betweenness(g)) #' is_preserved(P,closeness(g)) #' @export is_preserved <- function(P, scores) { n <- nrow(P) preserved <- preserve(P, scores, n) == 0 return(preserved) }
library(dplyr) df <- read.csv('data/players.csv', stringsAsFactors = F) df <- filter(df, isActive == T) teams <- sort(unique(df$nameTeam)) players <- sort(unique(filter(df, nameTeam == "Los Angeles Lakers")$namePlayer))
/showcase/global.R
no_license
sdgass13/nba_shiny_app
R
false
false
225
r
library(dplyr) df <- read.csv('data/players.csv', stringsAsFactors = F) df <- filter(df, isActive == T) teams <- sort(unique(df$nameTeam)) players <- sort(unique(filter(df, nameTeam == "Los Angeles Lakers")$namePlayer))
library(data.table) ultimatePai = function (test, uniqueBool = T){ #modify incident name of testprob, test test$Incident.Name = gsub(x = test$Incident.Name, pattern = " ", replacement = ".") if (uniqueBool == T) { test = unique(test[,c("Week","Incident.Name","Weekday","Hour","Month","Day", "Year","Black","Vacant.Housing","Mean.Income","Income.0.10000","Leave.to.Work.12.5am", "Weekday Density", "Weekly Density","Spatial Density", "Hourly 2d Density", "Hourly 1d Density", "Weekday1dDensity", "Week1dDensity","WeekDens","OverallWeekDens", "Cell", "Cost")]) testProb = readRDS("testProb2.rds") testProb$Incident.Name = gsub(x = testProb$Incident.Name, pattern = " ", replacement = ".") crimetype = colnames(testProb)[18:35] crimecounts = readRDS('predictionsListUniqueGBM.rds') }else{ testProb = readRDS("testProb.rds") testProb$Incident.Name = gsub(x = testProb$Incident.Name, pattern = " ", replacement = ".") crimetype = colnames(testProb)[24:41] crimecounts = readRDS('predictionsListNonUnique.rds') } summary = data.table(Cell = unique(test$Cell)) paitable = data.table(matrix(0, ncol=7)) for (i in 1:18){ if (uniqueBool == T){ dat = testProb[Incident.Name == crimetype[i],][order(Cell),17:35] } else{ dat = testProb[Incident.Name == crimetype[i],][order(Cell),c(22,24:41)] } dat = data.table(Cell = dat$Cell, Prob = dat[[i+1]]) crimeCount = data.table(crimecounts[[i]])# with 7 columns MaxCount = data.table(crimeCount[,-1] * dat$Prob) # change column to -1 when have new data Cost = test[Incident.Name == crimetype[i], Cost] currCrimeCost = MaxCount * Cost currCrimeCost$Cell = dat$Cell name = head(colnames(currCrimeCost),-1) colnames(currCrimeCost) = c(paste0(crimetype[i], ".",name), "Cell") setcolorder(currCrimeCost,c("Cell", names(currCrimeCost)[1:length(currCrimeCost)-1])) currCrimeCost2 = data.table(aggregate(currCrimeCost[,-1], by = list(Cell = currCrimeCost$Cell), FUN = sum)) summary = merge(summary, currCrimeCost2, all.x = TRUE, by= "Cell") summary[is.na(summary)] =0 } colnames(paitable) = name #caculate pai for each model: for (i in 2:8){ currModel = data.table(Cell = summary$Cell) colidx = seq(i, ncol(summary), by =7) for (idx in colidx){ currModel= cbind(currModel, summary[[idx]]) } colnames(currModel) = c("Cell",crimetype) currModel$TotalCost= apply(currModel[,c(2:19)], 1, sum) currModel = currModel[order(-TotalCost),] nbflagged = ceiling(nrow(currModel) * 0.01) flagged_cost = sum(currModel[1:nbflagged, TotalCost]) total_cost = sum(currModel[,TotalCost]) fac1 = flagged_cost/total_cost fac2 = nrow(currModel)/nbflagged pai = fac1 *fac2 paitable[[i-1]] = pai } return (paitable) } #test = unique(test[,c("Week", "Incident.Name", "Hour", "Month", "Day", "Year", "Weekday", "WeekDens", "OverallWeekDens","Weekly Density","Spatial Density","Hourly 2d Density","Hourly 1d Density", "Weekday Density", "Weekday1dDensity", "Week1dDensity", "Cell", "Cost")]) #probU = readRDS("testProb.rds") test = readRDS("test.rds") train = readRDS('train.rds') ultimatePai(test = test, uniqueBool = T)
/paiultimate.R
no_license
mdinh186/Google_Queries_Time_Series
R
false
false
3,263
r
library(data.table) ultimatePai = function (test, uniqueBool = T){ #modify incident name of testprob, test test$Incident.Name = gsub(x = test$Incident.Name, pattern = " ", replacement = ".") if (uniqueBool == T) { test = unique(test[,c("Week","Incident.Name","Weekday","Hour","Month","Day", "Year","Black","Vacant.Housing","Mean.Income","Income.0.10000","Leave.to.Work.12.5am", "Weekday Density", "Weekly Density","Spatial Density", "Hourly 2d Density", "Hourly 1d Density", "Weekday1dDensity", "Week1dDensity","WeekDens","OverallWeekDens", "Cell", "Cost")]) testProb = readRDS("testProb2.rds") testProb$Incident.Name = gsub(x = testProb$Incident.Name, pattern = " ", replacement = ".") crimetype = colnames(testProb)[18:35] crimecounts = readRDS('predictionsListUniqueGBM.rds') }else{ testProb = readRDS("testProb.rds") testProb$Incident.Name = gsub(x = testProb$Incident.Name, pattern = " ", replacement = ".") crimetype = colnames(testProb)[24:41] crimecounts = readRDS('predictionsListNonUnique.rds') } summary = data.table(Cell = unique(test$Cell)) paitable = data.table(matrix(0, ncol=7)) for (i in 1:18){ if (uniqueBool == T){ dat = testProb[Incident.Name == crimetype[i],][order(Cell),17:35] } else{ dat = testProb[Incident.Name == crimetype[i],][order(Cell),c(22,24:41)] } dat = data.table(Cell = dat$Cell, Prob = dat[[i+1]]) crimeCount = data.table(crimecounts[[i]])# with 7 columns MaxCount = data.table(crimeCount[,-1] * dat$Prob) # change column to -1 when have new data Cost = test[Incident.Name == crimetype[i], Cost] currCrimeCost = MaxCount * Cost currCrimeCost$Cell = dat$Cell name = head(colnames(currCrimeCost),-1) colnames(currCrimeCost) = c(paste0(crimetype[i], ".",name), "Cell") setcolorder(currCrimeCost,c("Cell", names(currCrimeCost)[1:length(currCrimeCost)-1])) currCrimeCost2 = data.table(aggregate(currCrimeCost[,-1], by = list(Cell = currCrimeCost$Cell), FUN = sum)) summary = merge(summary, currCrimeCost2, all.x = TRUE, by= "Cell") summary[is.na(summary)] =0 } colnames(paitable) = name #caculate pai for each model: for (i in 2:8){ currModel = data.table(Cell = summary$Cell) colidx = seq(i, ncol(summary), by =7) for (idx in colidx){ currModel= cbind(currModel, summary[[idx]]) } colnames(currModel) = c("Cell",crimetype) currModel$TotalCost= apply(currModel[,c(2:19)], 1, sum) currModel = currModel[order(-TotalCost),] nbflagged = ceiling(nrow(currModel) * 0.01) flagged_cost = sum(currModel[1:nbflagged, TotalCost]) total_cost = sum(currModel[,TotalCost]) fac1 = flagged_cost/total_cost fac2 = nrow(currModel)/nbflagged pai = fac1 *fac2 paitable[[i-1]] = pai } return (paitable) } #test = unique(test[,c("Week", "Incident.Name", "Hour", "Month", "Day", "Year", "Weekday", "WeekDens", "OverallWeekDens","Weekly Density","Spatial Density","Hourly 2d Density","Hourly 1d Density", "Weekday Density", "Weekday1dDensity", "Week1dDensity", "Cell", "Cost")]) #probU = readRDS("testProb.rds") test = readRDS("test.rds") train = readRDS('train.rds') ultimatePai(test = test, uniqueBool = T)
# DiagnosticPlots.r modified from: http://ucfagls.wordpress.com/2011/06/12/additive-modelling-and-the-hadcrut3v-global-mean-temperature-series/ # Load required libraries library(mgcv) library(nlme) library(MASS) library(emdbook) script.dir <- dirname(sys.frame(1)$ofile) setwd(script.dir) # Load required functions source("common functions/Bootstrapping.r") source("common functions/DiagnosticPlots.r") source("common functions/ParametricFunctions.r") source("common functions/WaldParametricFunctions.r") source("common functions/PlotIndicatorProjections.r") source("common functions/PlotIndicatorProjections.r") source("common functions/Fit_autocorrelated_models.r") source("common functions/writepdfsandresults.r") # Set working directory for data setwd("../../data/Target 11") # Turn warnings off for attempting model fits options(warn = -1) output_name = "T11_Marine_PA_coverage" # Read in and attach data #eafs<-read.csv("WDPA_limited.csv") #eafs <-eafs[1:(dim(eafs)[1]),c(1,3)] eafs<-read.csv("marine_PAs.csv") minx = min(eafs[,2]) colnames(eafs) = c("Year", "data") # Convert from area to percent #eafs[,2] = eafs[,2] / (359829058) * 100 # Transform data back_transform <- function(x) { aa = sin(x)^2 * 100 aa = aa + minx } eafs[,2] = eafs[,2] - minx eafs[,2] = asin(sqrt(eafs[,2] / 100)) attach(eafs) # Num data points num_data_points = sum(!is.na(eafs[,2])) # Get the years for which to predict values PredictYears = c(min(Year):2020) PredictYearsDataFrame = data.frame(Year = PredictYears) # Years which are projected PredictionSpan = which(PredictYears == (max(Year) + 1)):length(PredictYears) # Target and plot values target_value = 10 y_limit = 12 target_value_text = 10.5 ylabel = "Percentage ocean area protected" maintitle = "Marine protected area" #------------------------------------------------------------------------------------- ### MODELLING m <- list() plot(Year - min(Year) + 1, data, pch = 20) #------------------------------------------------------------------------------------- # MODEL 1: FIRST ORDER POLYNOMIAL # No autocorrelation function m[[1]] <- FitAutocorrelatedModel("poly1fun", "(a,b,Year)", 2, "eafs", "list(a =-22, b = 50)", "coef(m1)", "coef(m1)", TRUE, fitAR2 = FALSE, num_data_points = num_data_points, ylabel, "First order polynomial") #------------------------------------------------------------------------------------- # MODEL 2: 4.2.30 FROM RATKOWSKY m[[2]] <- FitAutocorrelatedModel("ratk423", "(a,b,c,Year)", 3, "eafs", "list(a = 1, b = 1, c = 1)", "coef(m1)", "coef(m1)", TRUE, num_data_points = num_data_points, ylabel, "Second order polynomial") #------------------------------------------------------------------------------------- # MODEL 3: 4.3.33 FROM RATKOWSKY m[[3]] <- FitAutocorrelatedModel("ratk4333", "(a,b,c,d,Year)", 4, "eafs", "list(a = 300, b = 800, c = 670, d = 660)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Third order polynomial") #-------------------------------------------------------------------------------------- # MODEL 4: HYPERBOLIC m[[4]] <- FitAutocorrelatedModel("hyperfun", "(a,b,Year)", 2, "eafs", "list(a = 200, b = -1)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Hyperbolic") #-------------------------------------------------------------------------------------- # MODEL 5: MICHAELIS-MENTEN m[[5]] <- FitAutocorrelatedModel("mmfun", "(a,b,Year)", 2, "eafs", "list(a = 1, b = 1)", "coef(m1)", "list(a = 1.3, b = 1.3)", TRUE, num_data_points = num_data_points, ylabel, "Michaelis-Menten") #-------------------------------------------------------------------------------------- # MODEL 6: HOLLING TYPE III m[[6]] <- FitAutocorrelatedModel("hiiifun", "(a,b,Year)", 2, "eafs", "list(a = 1.1, b = 1)", "coef(m1)", "list(a = 1.1, b = 0.03)", TRUE, num_data_points = num_data_points, ylabel, "Hollings Type III") #-------------------------------------------------------------------------------------- # MODEL 7: HOLLING TYPE IV m[[7]] <- FitAutocorrelatedModel("hivfun", "(a,b,c,Year)", 3, "eafs", "list(a = 0.33, b = 10, c = 0.2)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Hollings Type IV", 1,1,1) #-------------------------------------------------------------------------------------- # MODEL 8: RATIONAL m[[8]] <- FitAutocorrelatedModel("rationalfun", "(a,b,d,Year)", 3, "eafs", "list(a = 1.1, b = 1, d = 1)", "coef(m1)", "list(a = 1.1, b = 0.03, d = 1)", TRUE, num_data_points = num_data_points, ylabel, "Rational") #------------------------------------------------------------------------------------- ## MODEL 9: EXPONENTIAL m[[9]] <- FitAutocorrelatedModel("expfun", "(a,b,Year)", 2, "eafs", "list(a = 0.2, b = 0.03)", "coef(m1)", "list(a = 0.05, b = 0.01)", TRUE, num_data_points = num_data_points, fitAR2 = FALSE, ylabel, "Exponential") #-------------------------------------------------------------------------------------- # MODEL 10: MONOMOLECULAR m[[10]] <- FitAutocorrelatedModel("monofun", "(a,b,Year)", 2, "eafs", "list(a = 1000, b = 0.1)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, fitAR2 = FALSE, ylabel, "Monomolecular") #-------------------------------------------------------------------------------------- # MODEL 11: RICKER m[[11]] <- FitAutocorrelatedModel("rickerfun", "(a,b,Year)", 2, "eafs", "list(a = 0.1, b = 0.1)", "coef(m1)", "coef(m2)", TRUE, fitAR2 = FALSE, num_data_points = num_data_points, ylabel, "Ricker") #-------------------------------------------------------------------------------------- # MODEL 12: GOMPERETZ m[[12]] <- FitAutocorrelatedModel("gompertzfun", "(a,b,d,Year)", 3, "eafs", "list(a = 1.1, b =0.1,d = 0.1)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Gompertz") #-------------------------------------------------------------------------------------- # MODEL 13: CHAPMAN-RICHARDS m[[13]] <- FitAutocorrelatedModel("chaprichfun", "(a,b,d,Year)", 3, "eafs", "list(a = 12, b = 0.2,d = 0.02)", "list(a = 1.1, b = 0.1,d = 5)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Chapman-Richards") #--------------------------------------------------------------------------------------- # MODEL 14: WEBIFULL m[[14]] <- FitAutocorrelatedModel("weibullfun", "(a,b,d,e,Year)", 4, "eafs", "list(a = 0.01, b = 0.33, d = 0.0001, e = 0.01)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Weibull") #--------------------------------------------------------------------------------------- # MODEL 15: POWER-LAW m[[15]] <- FitAutocorrelatedModel("powerfun", "(a,b,Year)", 2, "eafs", "list(a = 200, b = 0.8)", "list(a = 200, b = 0.8)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Power") #-------------------------------------------------------------------------------------- # MODEL 16: ASYMPTOTIC m[[16]] <- FitAutocorrelatedModel("asymptoticfun", "(a,b,d,Year)", 3, "eafs", "list(a = 1000, b = 2, d = 1)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Asymptotic") #-------------------------------------------------------------------------------------- # MODEL 17: SHEPHERD m[[17]] <- FitAutocorrelatedModel("shepherdfun", "(a,b,d,Year)", 3, "eafs", "list(a = 2, b = 2, d = 0.98)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Shepherd") #-------------------------------------------------------------------------------------- # MODEL 18: HASSELL m[[18]] <- FitAutocorrelatedModel("hassellfun", "(a,b,d,Year)", 3, "eafs", "list(a = 28, b = -0.02, d = 0.-2)", "list(a = 28, b = -0.02, d = 0.-2)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Hassell") #-------------------------------------------------------------------------------------- # MULTI-MODEL AVERAGING # Create a data frame to hold the results Modelnames = vector() for (ii in 1:18) Modelnames[[ii]] = m[[ii]]$modelname # Calculate small sample size AIC value ssAIC <- function(model) { num_params = length(coef(model)) + 1 + length(unlist(model$modelStruct)) AIC(model) + (2 * num_params * (num_params + 1)) / (num_data_points - num_params - 1) } ModelAICs = vector() for (ii in 1:18) { if (!is.na(m[[ii]]$model[1])) ModelAICs[[ii]] = ssAIC(m[[ii]]$model) else ModelAICs[[ii]] = NA } Results = data.frame(Modelnames, ModelAICs) # Filter out NA values Results_noNAs = Results[!is.na(Results[,2]),] # Sort models by AIC value sorted_models = sort(Results_noNAs[,2], index.return = T)$ix sorted_models_original_list = order(Results[,2])[1:length(sorted_models)] # Get the minimum AIC min_AIC<-min(Results_noNAs$ModelAICs[sorted_models], na.rm = T) # Get the delta AICs delta_AIC<-Results_noNAs$ModelAICs[sorted_models] - min_AIC # Keep only models for which delta AIC < 2 num_models_to_be_averaged = length(which(delta_AIC < 2)) # Calculate the Akaike weights temp_val<-sum(exp(-delta_AIC/2),na.rm=TRUE) akaike_weights<-exp(-delta_AIC/2) / temp_val # Calculate normalised AIC weights norm_akaike_weights = akaike_weights[1:num_models_to_be_averaged] norm_akaike_weights = norm_akaike_weights / sum(norm_akaike_weights) # Combine predictions from all models to be averaged into a matrix modelpred = matrix(nrow = num_models_to_be_averaged, ncol= length(PredictYears)) for (ii in 1:num_models_to_be_averaged) modelpred[ii,] = as.vector(m[[sorted_models_original_list[ii]]]$prediction) # Calculate model-weighted averages model_average<-matrix(nrow = 1,ncol = length(PredictYears)) for (ii in 1:length(PredictYears)) { model_average[ii]<-sum(modelpred[,ii] * norm_akaike_weights) } # Combine standard errors from all models to be averaged into a matrix modelses = matrix(nrow = length(sorted_models), ncol= length(PredictYears)) for (ii in 1:num_models_to_be_averaged) modelses[ii,] = as.vector(m[[sorted_models_original_list[ii]]]$CIs$SEs) # Now calculate the unconditional variance for the fitted values S fitted_se<-vector(mode="numeric",length=length(PredictYears)); for (jj in 1:length(PredictYears)) { for (ii in 1:num_models_to_be_averaged) { fitted_se[jj]<-fitted_se[jj] + norm_akaike_weights[ii] * sqrt(modelses[ii,jj]^2 + ( modelpred[ii,jj] - model_average[jj])^2) } } nparams <- vector(mode = "numeric", length = length(num_models_to_be_averaged)) for (ii in 1: num_models_to_be_averaged) { nparams[ii] = length(coef(m[[sorted_models_original_list[ii]]]$model)) + length(unlist(m[[sorted_models_original_list[ii]]]$model$modelStruct)) } nparams = mean(nparams) nparams fitted_var<-fitted_se^2 model_avg_fitted_se<-sqrt(fitted_var) model_avg_fitted_ci<-rbind(model_average - qt(0.975,num_data_points - nparams) * model_avg_fitted_se, model_average + qt(0.975,num_data_points - nparams) * model_avg_fitted_se) model_avg_fitted_ci99<-rbind(model_average - qt(0.995,num_data_points - nparams) * model_avg_fitted_se, model_average + qt(0.995,num_data_points - nparams) * model_avg_fitted_se) # Write out images of fit and results matrix containing fit and bootstrapped values writepdfsandresults(output_name, 0)
/scripts/Multimodel averaging parametric/T11_marinePA.r
no_license
mikeharfoot/Aichi-2020-analysis
R
false
false
11,275
r
# DiagnosticPlots.r modified from: http://ucfagls.wordpress.com/2011/06/12/additive-modelling-and-the-hadcrut3v-global-mean-temperature-series/ # Load required libraries library(mgcv) library(nlme) library(MASS) library(emdbook) script.dir <- dirname(sys.frame(1)$ofile) setwd(script.dir) # Load required functions source("common functions/Bootstrapping.r") source("common functions/DiagnosticPlots.r") source("common functions/ParametricFunctions.r") source("common functions/WaldParametricFunctions.r") source("common functions/PlotIndicatorProjections.r") source("common functions/PlotIndicatorProjections.r") source("common functions/Fit_autocorrelated_models.r") source("common functions/writepdfsandresults.r") # Set working directory for data setwd("../../data/Target 11") # Turn warnings off for attempting model fits options(warn = -1) output_name = "T11_Marine_PA_coverage" # Read in and attach data #eafs<-read.csv("WDPA_limited.csv") #eafs <-eafs[1:(dim(eafs)[1]),c(1,3)] eafs<-read.csv("marine_PAs.csv") minx = min(eafs[,2]) colnames(eafs) = c("Year", "data") # Convert from area to percent #eafs[,2] = eafs[,2] / (359829058) * 100 # Transform data back_transform <- function(x) { aa = sin(x)^2 * 100 aa = aa + minx } eafs[,2] = eafs[,2] - minx eafs[,2] = asin(sqrt(eafs[,2] / 100)) attach(eafs) # Num data points num_data_points = sum(!is.na(eafs[,2])) # Get the years for which to predict values PredictYears = c(min(Year):2020) PredictYearsDataFrame = data.frame(Year = PredictYears) # Years which are projected PredictionSpan = which(PredictYears == (max(Year) + 1)):length(PredictYears) # Target and plot values target_value = 10 y_limit = 12 target_value_text = 10.5 ylabel = "Percentage ocean area protected" maintitle = "Marine protected area" #------------------------------------------------------------------------------------- ### MODELLING m <- list() plot(Year - min(Year) + 1, data, pch = 20) #------------------------------------------------------------------------------------- # MODEL 1: FIRST ORDER POLYNOMIAL # No autocorrelation function m[[1]] <- FitAutocorrelatedModel("poly1fun", "(a,b,Year)", 2, "eafs", "list(a =-22, b = 50)", "coef(m1)", "coef(m1)", TRUE, fitAR2 = FALSE, num_data_points = num_data_points, ylabel, "First order polynomial") #------------------------------------------------------------------------------------- # MODEL 2: 4.2.30 FROM RATKOWSKY m[[2]] <- FitAutocorrelatedModel("ratk423", "(a,b,c,Year)", 3, "eafs", "list(a = 1, b = 1, c = 1)", "coef(m1)", "coef(m1)", TRUE, num_data_points = num_data_points, ylabel, "Second order polynomial") #------------------------------------------------------------------------------------- # MODEL 3: 4.3.33 FROM RATKOWSKY m[[3]] <- FitAutocorrelatedModel("ratk4333", "(a,b,c,d,Year)", 4, "eafs", "list(a = 300, b = 800, c = 670, d = 660)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Third order polynomial") #-------------------------------------------------------------------------------------- # MODEL 4: HYPERBOLIC m[[4]] <- FitAutocorrelatedModel("hyperfun", "(a,b,Year)", 2, "eafs", "list(a = 200, b = -1)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Hyperbolic") #-------------------------------------------------------------------------------------- # MODEL 5: MICHAELIS-MENTEN m[[5]] <- FitAutocorrelatedModel("mmfun", "(a,b,Year)", 2, "eafs", "list(a = 1, b = 1)", "coef(m1)", "list(a = 1.3, b = 1.3)", TRUE, num_data_points = num_data_points, ylabel, "Michaelis-Menten") #-------------------------------------------------------------------------------------- # MODEL 6: HOLLING TYPE III m[[6]] <- FitAutocorrelatedModel("hiiifun", "(a,b,Year)", 2, "eafs", "list(a = 1.1, b = 1)", "coef(m1)", "list(a = 1.1, b = 0.03)", TRUE, num_data_points = num_data_points, ylabel, "Hollings Type III") #-------------------------------------------------------------------------------------- # MODEL 7: HOLLING TYPE IV m[[7]] <- FitAutocorrelatedModel("hivfun", "(a,b,c,Year)", 3, "eafs", "list(a = 0.33, b = 10, c = 0.2)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Hollings Type IV", 1,1,1) #-------------------------------------------------------------------------------------- # MODEL 8: RATIONAL m[[8]] <- FitAutocorrelatedModel("rationalfun", "(a,b,d,Year)", 3, "eafs", "list(a = 1.1, b = 1, d = 1)", "coef(m1)", "list(a = 1.1, b = 0.03, d = 1)", TRUE, num_data_points = num_data_points, ylabel, "Rational") #------------------------------------------------------------------------------------- ## MODEL 9: EXPONENTIAL m[[9]] <- FitAutocorrelatedModel("expfun", "(a,b,Year)", 2, "eafs", "list(a = 0.2, b = 0.03)", "coef(m1)", "list(a = 0.05, b = 0.01)", TRUE, num_data_points = num_data_points, fitAR2 = FALSE, ylabel, "Exponential") #-------------------------------------------------------------------------------------- # MODEL 10: MONOMOLECULAR m[[10]] <- FitAutocorrelatedModel("monofun", "(a,b,Year)", 2, "eafs", "list(a = 1000, b = 0.1)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, fitAR2 = FALSE, ylabel, "Monomolecular") #-------------------------------------------------------------------------------------- # MODEL 11: RICKER m[[11]] <- FitAutocorrelatedModel("rickerfun", "(a,b,Year)", 2, "eafs", "list(a = 0.1, b = 0.1)", "coef(m1)", "coef(m2)", TRUE, fitAR2 = FALSE, num_data_points = num_data_points, ylabel, "Ricker") #-------------------------------------------------------------------------------------- # MODEL 12: GOMPERETZ m[[12]] <- FitAutocorrelatedModel("gompertzfun", "(a,b,d,Year)", 3, "eafs", "list(a = 1.1, b =0.1,d = 0.1)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Gompertz") #-------------------------------------------------------------------------------------- # MODEL 13: CHAPMAN-RICHARDS m[[13]] <- FitAutocorrelatedModel("chaprichfun", "(a,b,d,Year)", 3, "eafs", "list(a = 12, b = 0.2,d = 0.02)", "list(a = 1.1, b = 0.1,d = 5)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Chapman-Richards") #--------------------------------------------------------------------------------------- # MODEL 14: WEBIFULL m[[14]] <- FitAutocorrelatedModel("weibullfun", "(a,b,d,e,Year)", 4, "eafs", "list(a = 0.01, b = 0.33, d = 0.0001, e = 0.01)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Weibull") #--------------------------------------------------------------------------------------- # MODEL 15: POWER-LAW m[[15]] <- FitAutocorrelatedModel("powerfun", "(a,b,Year)", 2, "eafs", "list(a = 200, b = 0.8)", "list(a = 200, b = 0.8)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Power") #-------------------------------------------------------------------------------------- # MODEL 16: ASYMPTOTIC m[[16]] <- FitAutocorrelatedModel("asymptoticfun", "(a,b,d,Year)", 3, "eafs", "list(a = 1000, b = 2, d = 1)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Asymptotic") #-------------------------------------------------------------------------------------- # MODEL 17: SHEPHERD m[[17]] <- FitAutocorrelatedModel("shepherdfun", "(a,b,d,Year)", 3, "eafs", "list(a = 2, b = 2, d = 0.98)", "coef(m1)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Shepherd") #-------------------------------------------------------------------------------------- # MODEL 18: HASSELL m[[18]] <- FitAutocorrelatedModel("hassellfun", "(a,b,d,Year)", 3, "eafs", "list(a = 28, b = -0.02, d = 0.-2)", "list(a = 28, b = -0.02, d = 0.-2)", "coef(m2)", TRUE, num_data_points = num_data_points, ylabel, "Hassell") #-------------------------------------------------------------------------------------- # MULTI-MODEL AVERAGING # Create a data frame to hold the results Modelnames = vector() for (ii in 1:18) Modelnames[[ii]] = m[[ii]]$modelname # Calculate small sample size AIC value ssAIC <- function(model) { num_params = length(coef(model)) + 1 + length(unlist(model$modelStruct)) AIC(model) + (2 * num_params * (num_params + 1)) / (num_data_points - num_params - 1) } ModelAICs = vector() for (ii in 1:18) { if (!is.na(m[[ii]]$model[1])) ModelAICs[[ii]] = ssAIC(m[[ii]]$model) else ModelAICs[[ii]] = NA } Results = data.frame(Modelnames, ModelAICs) # Filter out NA values Results_noNAs = Results[!is.na(Results[,2]),] # Sort models by AIC value sorted_models = sort(Results_noNAs[,2], index.return = T)$ix sorted_models_original_list = order(Results[,2])[1:length(sorted_models)] # Get the minimum AIC min_AIC<-min(Results_noNAs$ModelAICs[sorted_models], na.rm = T) # Get the delta AICs delta_AIC<-Results_noNAs$ModelAICs[sorted_models] - min_AIC # Keep only models for which delta AIC < 2 num_models_to_be_averaged = length(which(delta_AIC < 2)) # Calculate the Akaike weights temp_val<-sum(exp(-delta_AIC/2),na.rm=TRUE) akaike_weights<-exp(-delta_AIC/2) / temp_val # Calculate normalised AIC weights norm_akaike_weights = akaike_weights[1:num_models_to_be_averaged] norm_akaike_weights = norm_akaike_weights / sum(norm_akaike_weights) # Combine predictions from all models to be averaged into a matrix modelpred = matrix(nrow = num_models_to_be_averaged, ncol= length(PredictYears)) for (ii in 1:num_models_to_be_averaged) modelpred[ii,] = as.vector(m[[sorted_models_original_list[ii]]]$prediction) # Calculate model-weighted averages model_average<-matrix(nrow = 1,ncol = length(PredictYears)) for (ii in 1:length(PredictYears)) { model_average[ii]<-sum(modelpred[,ii] * norm_akaike_weights) } # Combine standard errors from all models to be averaged into a matrix modelses = matrix(nrow = length(sorted_models), ncol= length(PredictYears)) for (ii in 1:num_models_to_be_averaged) modelses[ii,] = as.vector(m[[sorted_models_original_list[ii]]]$CIs$SEs) # Now calculate the unconditional variance for the fitted values S fitted_se<-vector(mode="numeric",length=length(PredictYears)); for (jj in 1:length(PredictYears)) { for (ii in 1:num_models_to_be_averaged) { fitted_se[jj]<-fitted_se[jj] + norm_akaike_weights[ii] * sqrt(modelses[ii,jj]^2 + ( modelpred[ii,jj] - model_average[jj])^2) } } nparams <- vector(mode = "numeric", length = length(num_models_to_be_averaged)) for (ii in 1: num_models_to_be_averaged) { nparams[ii] = length(coef(m[[sorted_models_original_list[ii]]]$model)) + length(unlist(m[[sorted_models_original_list[ii]]]$model$modelStruct)) } nparams = mean(nparams) nparams fitted_var<-fitted_se^2 model_avg_fitted_se<-sqrt(fitted_var) model_avg_fitted_ci<-rbind(model_average - qt(0.975,num_data_points - nparams) * model_avg_fitted_se, model_average + qt(0.975,num_data_points - nparams) * model_avg_fitted_se) model_avg_fitted_ci99<-rbind(model_average - qt(0.995,num_data_points - nparams) * model_avg_fitted_se, model_average + qt(0.995,num_data_points - nparams) * model_avg_fitted_se) # Write out images of fit and results matrix containing fit and bootstrapped values writepdfsandresults(output_name, 0)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/mr_cML-methods.R \docType{methods} \name{mr_cML} \alias{mr_cML} \alias{mr_cML,MRInput-method} \title{Constrained maximum likelihood (cML) method} \usage{ mr_cML( object, MA = TRUE, DP = TRUE, K_vec = 0:(length(object@betaX) - 2), random_start = 0, num_pert = 200, random_start_pert = 0, maxit = 100, random_seed = 314, n, Alpha = 0.05 ) \S4method{mr_cML}{MRInput}( object, MA = TRUE, DP = TRUE, K_vec = 0:(length(object@betaX) - 2), random_start = 0, num_pert = 200, random_start_pert = 0, maxit = 100, random_seed = 314, n, Alpha = 0.05 ) } \arguments{ \item{object}{An \code{MRInput} object.} \item{MA}{Whether model average is applied or not. Default is TRUE.} \item{DP}{Whether data perturbation is applied or not. Default is TRUE.} \item{K_vec}{Set of candidate K's, the constraint parameter representing number of invalid IVs. Default is from 0 to (#IV - 2).} \item{random_start}{Number of random starting points for cML, default is 0.} \item{num_pert}{Number of perturbation when DP is TRUE, default is 200.} \item{random_start_pert}{Number of random start points for cML with data perturbation, default is 0.} \item{maxit}{Maximum number of iterations for each optimization. Default is 100.} \item{random_seed}{Random seed, default is 314. When \code{random_seed=NULL}, no random seed will be used and the results may not be reproducible.} \item{n}{Sample size. When sample sizes of GWAS for exposure and outcome are different, and/or when sample sizes of different SNPs are different, the smallest sample size is recommended to get conservative result and avoid type-I error. See reference for more discussions.} \item{Alpha}{Significance level for the confidence interval for estimate, default is 0.05.} } \value{ The output from the function is an \code{MRcML} object containing: \item{Exposure}{A character string giving the name given to the exposure.} \item{Outcome}{A character string giving the name given to the outcome.} \item{Estimate}{Estimate of theta.} \item{StdError}{Standard error of estimate.} \item{Pvalue}{p-value of estimate.} \item{BIC_invalid}{Set of selected invalid IVs if cML-BIC is performed, i.e. without MA or DP.} \item{GOF1_p}{p-value of the first goodness-of-fit test.} \item{GOF2_p}{p-value of the second goodness-of-fit test.} \item{SNPs}{The number of SNPs that were used in the calculation.} \item{Alpha}{Significance level for the confidence interval for estimate, default is 0.05.} \item{CILower}{Lower bound of the confidence interval for estimate.} \item{CIUpper}{Upper bound of the confidence interval for estimate.} \item{MA}{Indicator of whether model average is applied.} \item{DP}{Indicator of whether data perturbation is applied.} } \description{ Constrained maximum likelihood (cML) based Mendelian Randomization method robust to both correlated and uncorrelated pleiotropy. } \details{ The MRcML method selects invalid IVs with correlated and/or uncorrelated peliotropic effects using constrained maximum likelihood. \code{cML-BIC} gives results of the selected model with original data, while \code{cML-MA-BIC} averages over all candidate models. \code{cML-BIC-DP} and \code{cML-MA-BIC-DP} are the versions with data-perturbation to account for selection uncertainty when many invalid IVs have weak pleiotropic effects. When DP is performed, two goodness-of-fit (GOF) tests are developed to check whether the model-based and DP- based variance estimates converge to the same estimate. Small p-values of GOF tests indicate selection uncertainty is not ignorable, and results from DP is more reliable. See reference for more details. As the constrained maximum likelihood function is non-convex, multiple random starting points could be used to find a global minimum. For some starting points the algorithm may not converge and a warning message will be prompted, typically this will not affect the results. } \examples{ # Perform cML-MA-BIC-DP: mr_cML(mr_input(bx = ldlc, bxse = ldlcse, by = chdlodds, byse = chdloddsse), num_pert=5, MA = TRUE, DP = TRUE, n = 17723) # num_pert is set to 5 to reduce computational time # the default value of 200 is recommended in practice # Perform cML-BIC-DP: mr_cML(mr_input(bx = ldlc, bxse = ldlcse, by = chdlodds, byse = chdloddsse), MA = TRUE, DP = FALSE,, n = 17723) } \references{ Xue, H., Shen, X., & Pan, W. (2021). Constrained maximum likelihood-based Mendelian randomization robust to both correlated and uncorrelated pleiotropic effects. The American Journal of Human Genetics, 108(7), 1251-1269. }
/man/mr_cML.Rd
no_license
cran/MendelianRandomization
R
false
true
4,692
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/mr_cML-methods.R \docType{methods} \name{mr_cML} \alias{mr_cML} \alias{mr_cML,MRInput-method} \title{Constrained maximum likelihood (cML) method} \usage{ mr_cML( object, MA = TRUE, DP = TRUE, K_vec = 0:(length(object@betaX) - 2), random_start = 0, num_pert = 200, random_start_pert = 0, maxit = 100, random_seed = 314, n, Alpha = 0.05 ) \S4method{mr_cML}{MRInput}( object, MA = TRUE, DP = TRUE, K_vec = 0:(length(object@betaX) - 2), random_start = 0, num_pert = 200, random_start_pert = 0, maxit = 100, random_seed = 314, n, Alpha = 0.05 ) } \arguments{ \item{object}{An \code{MRInput} object.} \item{MA}{Whether model average is applied or not. Default is TRUE.} \item{DP}{Whether data perturbation is applied or not. Default is TRUE.} \item{K_vec}{Set of candidate K's, the constraint parameter representing number of invalid IVs. Default is from 0 to (#IV - 2).} \item{random_start}{Number of random starting points for cML, default is 0.} \item{num_pert}{Number of perturbation when DP is TRUE, default is 200.} \item{random_start_pert}{Number of random start points for cML with data perturbation, default is 0.} \item{maxit}{Maximum number of iterations for each optimization. Default is 100.} \item{random_seed}{Random seed, default is 314. When \code{random_seed=NULL}, no random seed will be used and the results may not be reproducible.} \item{n}{Sample size. When sample sizes of GWAS for exposure and outcome are different, and/or when sample sizes of different SNPs are different, the smallest sample size is recommended to get conservative result and avoid type-I error. See reference for more discussions.} \item{Alpha}{Significance level for the confidence interval for estimate, default is 0.05.} } \value{ The output from the function is an \code{MRcML} object containing: \item{Exposure}{A character string giving the name given to the exposure.} \item{Outcome}{A character string giving the name given to the outcome.} \item{Estimate}{Estimate of theta.} \item{StdError}{Standard error of estimate.} \item{Pvalue}{p-value of estimate.} \item{BIC_invalid}{Set of selected invalid IVs if cML-BIC is performed, i.e. without MA or DP.} \item{GOF1_p}{p-value of the first goodness-of-fit test.} \item{GOF2_p}{p-value of the second goodness-of-fit test.} \item{SNPs}{The number of SNPs that were used in the calculation.} \item{Alpha}{Significance level for the confidence interval for estimate, default is 0.05.} \item{CILower}{Lower bound of the confidence interval for estimate.} \item{CIUpper}{Upper bound of the confidence interval for estimate.} \item{MA}{Indicator of whether model average is applied.} \item{DP}{Indicator of whether data perturbation is applied.} } \description{ Constrained maximum likelihood (cML) based Mendelian Randomization method robust to both correlated and uncorrelated pleiotropy. } \details{ The MRcML method selects invalid IVs with correlated and/or uncorrelated peliotropic effects using constrained maximum likelihood. \code{cML-BIC} gives results of the selected model with original data, while \code{cML-MA-BIC} averages over all candidate models. \code{cML-BIC-DP} and \code{cML-MA-BIC-DP} are the versions with data-perturbation to account for selection uncertainty when many invalid IVs have weak pleiotropic effects. When DP is performed, two goodness-of-fit (GOF) tests are developed to check whether the model-based and DP- based variance estimates converge to the same estimate. Small p-values of GOF tests indicate selection uncertainty is not ignorable, and results from DP is more reliable. See reference for more details. As the constrained maximum likelihood function is non-convex, multiple random starting points could be used to find a global minimum. For some starting points the algorithm may not converge and a warning message will be prompted, typically this will not affect the results. } \examples{ # Perform cML-MA-BIC-DP: mr_cML(mr_input(bx = ldlc, bxse = ldlcse, by = chdlodds, byse = chdloddsse), num_pert=5, MA = TRUE, DP = TRUE, n = 17723) # num_pert is set to 5 to reduce computational time # the default value of 200 is recommended in practice # Perform cML-BIC-DP: mr_cML(mr_input(bx = ldlc, bxse = ldlcse, by = chdlodds, byse = chdloddsse), MA = TRUE, DP = FALSE,, n = 17723) } \references{ Xue, H., Shen, X., & Pan, W. (2021). Constrained maximum likelihood-based Mendelian randomization robust to both correlated and uncorrelated pleiotropic effects. The American Journal of Human Genetics, 108(7), 1251-1269. }
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 3303 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 3293 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 3293 c c Input Parameter (command line, file): c input filename QBFLIB/Wintersteiger/RankingFunctions/rankfunc60_signed_32.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 1306 c no.of clauses 3303 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 3293 c c QBFLIB/Wintersteiger/RankingFunctions/rankfunc60_signed_32.qdimacs 1306 3303 E1 [289 290 387 388 716 717 910 911 1104 1105] 0 192 1102 3293 RED
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Wintersteiger/RankingFunctions/rankfunc60_signed_32/rankfunc60_signed_32.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
791
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 3303 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 3293 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 3293 c c Input Parameter (command line, file): c input filename QBFLIB/Wintersteiger/RankingFunctions/rankfunc60_signed_32.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 1306 c no.of clauses 3303 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 3293 c c QBFLIB/Wintersteiger/RankingFunctions/rankfunc60_signed_32.qdimacs 1306 3303 E1 [289 290 387 388 716 717 910 911 1104 1105] 0 192 1102 3293 RED
library(readr) library(dplyr) library(ggplot2) library(highcharter) bsg= read_rds("~/Downloads/bsg.rds") bsg %>% select(bsmmat01, bsmmat02, bsmmat03, bsmmat04, bsmmat05, bsbg07a, bsbg07b) -> scores scores %>% mutate(nomre = round((bsmmat01 + bsmmat02 + bsmmat03 + bsmmat04 + bsmmat05) / 5, 0)) -> scores scores %>% filter(bsbg07a != 8 & bsbg07b != 8) -> scores scores %>% mutate(edu = round((bsbg07a + bsbg07b)/5, 0)) -> fives fives %>% select(nomre, edu) -> p p = na.omit(p) p %>% mutate(education = as.character(edu)) -> p ggplot(data = p) + geom_density( aes(x = as.numeric(nomre), fill = education), alpha = 0.3) aov(nomre ~ edu , data = p) -> fit summary(fit) scores %>% mutate(edu = round((bsbg07a + bsbg07b)/10, 0)) -> two two %>% filter(edu == 0) -> yek two %>% filter(edu == 1) -> dow hchart(density(yek$nomre), type = "area", name = "low educated") %>% hc_add_series(density(dow$nomre), type = "area", name = "high educated") t.test(dow$nomre, yek$nomre, alt = "greater")
/HW4/HW4-2.R
no_license
praal/data_analysis_course
R
false
false
997
r
library(readr) library(dplyr) library(ggplot2) library(highcharter) bsg= read_rds("~/Downloads/bsg.rds") bsg %>% select(bsmmat01, bsmmat02, bsmmat03, bsmmat04, bsmmat05, bsbg07a, bsbg07b) -> scores scores %>% mutate(nomre = round((bsmmat01 + bsmmat02 + bsmmat03 + bsmmat04 + bsmmat05) / 5, 0)) -> scores scores %>% filter(bsbg07a != 8 & bsbg07b != 8) -> scores scores %>% mutate(edu = round((bsbg07a + bsbg07b)/5, 0)) -> fives fives %>% select(nomre, edu) -> p p = na.omit(p) p %>% mutate(education = as.character(edu)) -> p ggplot(data = p) + geom_density( aes(x = as.numeric(nomre), fill = education), alpha = 0.3) aov(nomre ~ edu , data = p) -> fit summary(fit) scores %>% mutate(edu = round((bsbg07a + bsbg07b)/10, 0)) -> two two %>% filter(edu == 0) -> yek two %>% filter(edu == 1) -> dow hchart(density(yek$nomre), type = "area", name = "low educated") %>% hc_add_series(density(dow$nomre), type = "area", name = "high educated") t.test(dow$nomre, yek$nomre, alt = "greater")
library(reshape2) ##### Download Data Files & Extract From Archive If The UCI Directory Doesn't Already Exist ##### # I look for the existance of the phone data directory - if not present, download & extract if(!file.exists("UCI\ HAR\ Dataset")) { fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(fileURL, dest="./dataset.zip", method="curl") unzip("./dataset.zip") } # Set this as my working directory setwd("./UCI\ HAR\ Dataset") ##### 1. Merge Test & Training Data ##### # get the data files required for this analysis - initially x_test & x_train. # .. later I add the activities & subjects test_data <- read.table("./test/X_test.txt") # test data train_data <- read.table("./train/X_train.txt") # training data features <- read.table("./features.txt") # use features.txt file to get colnames cols <- as.data.frame(features$V2) # assign df to column 2 of features file cols <- t(cols) # transpose names into column names merged_data <- rbind(test_data,train_data) # rbind test & training data into one colnames(merged_data) <- cols # assign colnames from cols # get subject data test_subjects <- read.table("./test/subject_test.txt") train_subjects <- read.table("./train/subject_train.txt") merged_subjects <- rbind(test_subjects,train_subjects) colnames(merged_subjects) <- c("Subject") ##### 2. Extract Mean & SD ##### # I'm using grep to weed out all data with colnames containing either 'mean()' or 'std()'. merged_data <- merged_data[grep("mean\\(\\)|std\\(\\)",names(merged_data))] ##### 3. Add Descriptive Activity Names ##### # first get the files associated with activity test_acts <- read.table("./test/y_test.txt") # test activity ids train_acts <- read.table("./train/y_train.txt") # training activity ids act_names <- read.table("./activity_labels.txt") # ids2names file merged_activities <- rbind(test_acts,train_acts) # merge both id sets colnames(merged_activities) <- c("Activity") # Assign a colname # Use factor to relabel Activity IDs into Names using the act_names df from above merged_data$Activity <- factor(merged_data$Activity, levels=act_names$V1, labels=act_names$V2) # Now, I finally merge all 3 components (data, activity & subject) into one DF. merged_data <- cbind(merged_data,merged_activities,merged_subjects) ##### 4. Label Dataset With Descriptive Variable Names ##### new_cols <- gsub("^f(.*?)", "FrequencyOf\\1", names(merged_data)) # replace 'f' at start of name with full desciption new_cols <- gsub("^t(.*?)", "TimeOf\\1", new_cols) # same with 't' new_cols <- gsub("[\\(\\)\\-]", "", new_cols) # remove all unwanted characters "()-" new_cols <- gsub("BodyBody", "Body", new_cols) # replace duplicate entries new_cols <- gsub("mean", "Mean", new_cols) # .. general new_cols <- gsub("std", "StandardDeviation", new_cols) # .. tidying new_cols <- gsub("Acc", "Acceleration", new_cols) # .. up new_cols <- gsub("Mag", "Magnitude", new_cols) # .. of new_cols <- gsub("Gyro", "Gyroscope", new_cols) # .. names new_cols <- gsub("([XYZ])", "ForThe\\1Axis", new_cols) # Give the axes some literal description new_cols <- gsub("([A-Z])", " \\1", new_cols) # Finally place a space before uppercase letters new_cols <- gsub("^\\s+|\\s+$", "", new_cols) # Remove leading & trailing whitespace colnames(merged_data) <- new_cols # Assign new changes to the colnames ##### 5. Tidy Data ##### # I'm using the melt & dcast functions from the reshape2 library to tidy by Subject and Activity. melt_data <- melt(merged_data, id.vars=c("Subject","Activity")) tidy <- dcast(melt_data, Subject + Activity ~ variable, fun = mean) # Finally write out the finished table - woohoo!! write.table(tidy, file="./tidy_data.txt", row.names=FALSE)
/run_analysis.R
no_license
limrock1/Getting_Cleaning_Data
R
false
false
4,002
r
library(reshape2) ##### Download Data Files & Extract From Archive If The UCI Directory Doesn't Already Exist ##### # I look for the existance of the phone data directory - if not present, download & extract if(!file.exists("UCI\ HAR\ Dataset")) { fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(fileURL, dest="./dataset.zip", method="curl") unzip("./dataset.zip") } # Set this as my working directory setwd("./UCI\ HAR\ Dataset") ##### 1. Merge Test & Training Data ##### # get the data files required for this analysis - initially x_test & x_train. # .. later I add the activities & subjects test_data <- read.table("./test/X_test.txt") # test data train_data <- read.table("./train/X_train.txt") # training data features <- read.table("./features.txt") # use features.txt file to get colnames cols <- as.data.frame(features$V2) # assign df to column 2 of features file cols <- t(cols) # transpose names into column names merged_data <- rbind(test_data,train_data) # rbind test & training data into one colnames(merged_data) <- cols # assign colnames from cols # get subject data test_subjects <- read.table("./test/subject_test.txt") train_subjects <- read.table("./train/subject_train.txt") merged_subjects <- rbind(test_subjects,train_subjects) colnames(merged_subjects) <- c("Subject") ##### 2. Extract Mean & SD ##### # I'm using grep to weed out all data with colnames containing either 'mean()' or 'std()'. merged_data <- merged_data[grep("mean\\(\\)|std\\(\\)",names(merged_data))] ##### 3. Add Descriptive Activity Names ##### # first get the files associated with activity test_acts <- read.table("./test/y_test.txt") # test activity ids train_acts <- read.table("./train/y_train.txt") # training activity ids act_names <- read.table("./activity_labels.txt") # ids2names file merged_activities <- rbind(test_acts,train_acts) # merge both id sets colnames(merged_activities) <- c("Activity") # Assign a colname # Use factor to relabel Activity IDs into Names using the act_names df from above merged_data$Activity <- factor(merged_data$Activity, levels=act_names$V1, labels=act_names$V2) # Now, I finally merge all 3 components (data, activity & subject) into one DF. merged_data <- cbind(merged_data,merged_activities,merged_subjects) ##### 4. Label Dataset With Descriptive Variable Names ##### new_cols <- gsub("^f(.*?)", "FrequencyOf\\1", names(merged_data)) # replace 'f' at start of name with full desciption new_cols <- gsub("^t(.*?)", "TimeOf\\1", new_cols) # same with 't' new_cols <- gsub("[\\(\\)\\-]", "", new_cols) # remove all unwanted characters "()-" new_cols <- gsub("BodyBody", "Body", new_cols) # replace duplicate entries new_cols <- gsub("mean", "Mean", new_cols) # .. general new_cols <- gsub("std", "StandardDeviation", new_cols) # .. tidying new_cols <- gsub("Acc", "Acceleration", new_cols) # .. up new_cols <- gsub("Mag", "Magnitude", new_cols) # .. of new_cols <- gsub("Gyro", "Gyroscope", new_cols) # .. names new_cols <- gsub("([XYZ])", "ForThe\\1Axis", new_cols) # Give the axes some literal description new_cols <- gsub("([A-Z])", " \\1", new_cols) # Finally place a space before uppercase letters new_cols <- gsub("^\\s+|\\s+$", "", new_cols) # Remove leading & trailing whitespace colnames(merged_data) <- new_cols # Assign new changes to the colnames ##### 5. Tidy Data ##### # I'm using the melt & dcast functions from the reshape2 library to tidy by Subject and Activity. melt_data <- melt(merged_data, id.vars=c("Subject","Activity")) tidy <- dcast(melt_data, Subject + Activity ~ variable, fun = mean) # Finally write out the finished table - woohoo!! write.table(tidy, file="./tidy_data.txt", row.names=FALSE)
var_mlm <- function(df = pheno, nullmodel="~ (1 | GCA1.all) + (1 | GCA2.all)", fullmodel="~ (1 | GCA1.all) + (1 | GCA2.all) + (1 | SCA.all)"){ # Structures to hold results r2marg <- data.frame(trait=vector(), r2marg=vector(), r2cond=vector()) sig <- data.frame(trait=vector(), chisq=vector(), df=vector(), p.value=vector()) # Construct MLMs for each of the 61 time points nms <- names(moon) idx1 <- which(nms == "V1") idx2 <- which(nms == "V61") for (i in idx1:idx2) { trait <- names(df)[2] # Construct the null model null.formula <- as.formula(paste(trait, nullmodel, sep=" ")) model.null <- lmer(null.formula, data = df, REML = FALSE) # Construct the model full.formula <- as.formula(paste(trait, fullmodel, sep=" ")) model.full <- lmer(full.formula, data = df, REML = FALSE) # Retrieve the p-value inter1a <- anova(model.null, model.full)[2, 6:8] inter1b <- data.frame(trait=trait, chisq=inter1a[1, 1], df=inter1a[1, 2], p.value=inter1a[1, 3]) sig <- rbind(sig, inter1b) # Calculate the marginal R-squared inter2a <- rsquared.glmm(model.full) inter2b <- data.frame(trait=trait, r2marg=inter2a[1, 4], r2cond=inter2a[1, 5]) r2marg <- rbind(r2marg, inter2b) } # Calculate q-values sig$q.value <- p.adjust(sig[, "p.value"], method="fdr") time.points <- seq(0, 180, 3) res <- data.frame(time=time.points, sig=sig, r2=r2marg) return(res) }
/Rcodes/var_mlm.R
no_license
yangjl/zmSNPtools
R
false
false
1,581
r
var_mlm <- function(df = pheno, nullmodel="~ (1 | GCA1.all) + (1 | GCA2.all)", fullmodel="~ (1 | GCA1.all) + (1 | GCA2.all) + (1 | SCA.all)"){ # Structures to hold results r2marg <- data.frame(trait=vector(), r2marg=vector(), r2cond=vector()) sig <- data.frame(trait=vector(), chisq=vector(), df=vector(), p.value=vector()) # Construct MLMs for each of the 61 time points nms <- names(moon) idx1 <- which(nms == "V1") idx2 <- which(nms == "V61") for (i in idx1:idx2) { trait <- names(df)[2] # Construct the null model null.formula <- as.formula(paste(trait, nullmodel, sep=" ")) model.null <- lmer(null.formula, data = df, REML = FALSE) # Construct the model full.formula <- as.formula(paste(trait, fullmodel, sep=" ")) model.full <- lmer(full.formula, data = df, REML = FALSE) # Retrieve the p-value inter1a <- anova(model.null, model.full)[2, 6:8] inter1b <- data.frame(trait=trait, chisq=inter1a[1, 1], df=inter1a[1, 2], p.value=inter1a[1, 3]) sig <- rbind(sig, inter1b) # Calculate the marginal R-squared inter2a <- rsquared.glmm(model.full) inter2b <- data.frame(trait=trait, r2marg=inter2a[1, 4], r2cond=inter2a[1, 5]) r2marg <- rbind(r2marg, inter2b) } # Calculate q-values sig$q.value <- p.adjust(sig[, "p.value"], method="fdr") time.points <- seq(0, 180, 3) res <- data.frame(time=time.points, sig=sig, r2=r2marg) return(res) }
# Introduction ## ══════════════ # • Learning objectives: ## • Learn the R formula interface ## • Specify factor contrasts to test specific hypotheses ## • Perform model comparisons ## • Run and interpret variety of regression models in R ## Set working directory ## ───────────────────────── ## It is often helpful to start your R session by setting your working ## directory so you don't have to type the full path names to your data ## and other files # set the working directory # setwd("~/Desktop/Rstatistics") # setwd("C:/Users/dataclass/Desktop/Rstatistics") setwd("/Users/katherineangell/Documents/Course_Work /Springboard/Data/linear_regression") ## You might also start by listing the files in your working directory getwd() # where am I? list.files("dataSets") # files in the dataSets folder ## Load the states data ## ──────────────────────── # read the states data states.data <- readRDS("dataSets/states.rds") #get labels states.info <- data.frame(attributes(states.data)[c("names", "var.labels")]) #look at last few labels tail(states.info, 8) ## Linear regression ## ═══════════════════ ## Examine the data before fitting models ## ────────────────────────────────────────── str(states.data) ## Start by examining the data to check for problems. # summary of expense and csat columns, all rows sts.ex.sat <- subset(states.data, select = c("expense", "csat")) summary(sts.ex.sat) # correlation between expense and csat cor(sts.ex.sat) ## Plot the data before fitting models ## ─────────────────────────────────────── ## Plot the data to look for multivariate outliers, non-linear ## relationships etc. # scatter plot of expense vs csat plot(sts.ex.sat) ## Linear regression example ## ───────────────────────────── ## • Linear regression models can be fit with the `lm()' function ## • For example, we can use `lm' to predict SAT scores based on ## per-pupal expenditures: # Fit our regression model sat.mod <- lm(csat ~ expense, # regression formula data=states.data) # data set # Summarize and print the results summary(sat.mod) # show regression coefficients table ## Why is the association between expense and SAT scores /negative/? ## ───────────────────────────────────────────────────────────────────── ## Many people find it surprising that the per-capita expenditure on ## students is negatively related to SAT scores. The beauty of multiple ## regression is that we can try to pull these apart. What would the ## association between expense and SAT scores be if there were no ## difference among the states in the percentage of students taking the ## SAT? summary(lm(csat ~ expense + percent, data = states.data)) ## The lm class and methods ## ──────────────────────────── ## OK, we fit our model. Now what? ## • Examine the model object: class(sat.mod) names(sat.mod) methods(class = class(sat.mod))[1:9] ## • Use function methods to get more information about the fit confint(sat.mod) hist(residuals(sat.mod)) ## Linear Regression Assumptions ## ───────────────────────────────── ## • Ordinary least squares regression relies on several assumptions, ## including that the residuals are normally distributed and ## homoscedastic, the errors are independent and the relationships are ## linear. ## • Investigate these assumptions visually by plotting your model: par(mar = c(4, 4, 2, 2), mfrow = c(1, 2)) #optional plot(sat.mod, which = c(1, 2)) # "which" argument optional ## Comparing models ## ──────────────────── ## Do congressional voting patterns predict SAT scores over and above ## expense? Fit two models and compare them: # fit another model, adding house and senate as predictors sat.voting.mod <- lm(csat ~ expense + house + senate, data = na.omit(states.data)) sat.mod <- update(sat.mod, data=na.omit(states.data)) # compare using the anova() function anova(sat.mod, sat.voting.mod) coef(summary(sat.voting.mod)) ## Exercise: least squares regression ## ──────────────────────────────────────── ## Use the /states.rds/ data set. Fit a model predicting energy consumed ## per capita (energy) from the percentage of residents living in ## metropolitan areas (metro). Be sure to ## 1. Examine/plot the data before fitting the model plot(states.data) model1<- lm(energy~metro, data=states.data) ## 2. Print and interpret the model `summary' summary(model1) ## 3. `plot' the model to look for deviations from modeling assumptions plot(model1) ## Select one or more additional predictors to add to your model and ## repeat steps 1-3. Is this model significantly better than the model ## with /metro/ as the only predictor? model2<- lm(energy~metro+income+green, data=states.data) summary(model2) plot(model2) #Yes, model 2 with factors income and greenhouse gas significantly improves the gression model than in comparison to the single predictor model. Adjusted R-squared for the first model is .097, while the adjusted R-squared for the second model with more factors is .6242, indicating this regression line is a much better fit for the data and has less residuals. ## Interactions and factors ## ══════════════════════════ ## Modeling interactions ## ───────────────────────── ## Interactions allow us assess the extent to which the association ## between one predictor and the outcome depends on a second predictor. ## For example: Does the association between expense and SAT scores ## depend on the median income in the state? #Add the interaction to the model sat.expense.by.percent <- lm(csat ~ expense*income, data=states.data) #Show the results coef(summary(sat.expense.by.percent)) # show regression coefficients table ## Regression with categorical predictors ## ────────────────────────────────────────── ## Let's try to predict SAT scores from region, a categorical variable. ## Note that you must make sure R does not think your categorical ## variable is numeric. # make sure R knows region is categorical str(states.data$region) states.data$region <- factor(states.data$region) #Add region to the model sat.region <- lm(csat ~ region, data=states.data) #Show the results coef(summary(sat.region)) # show regression coefficients table anova(sat.region) # show ANOVA table ## Again, *make sure to tell R which variables are categorical by ## converting them to factors!* ## Setting factor reference groups and contrasts ## ───────────────────────────────────────────────── ## In the previous example we use the default contrasts for region. The ## default in R is treatment contrasts, with the first level as the ## reference. We can change the reference group or use another coding ## scheme using the `C' function. # print default contrasts contrasts(states.data$region) # change the reference group coef(summary(lm(csat ~ C(region, base=4), data=states.data))) # change the coding scheme coef(summary(lm(csat ~ C(region, contr.helmert), data=states.data))) ## See also `?contrasts', `?contr.treatment', and `?relevel'. ## Exercise: interactions and factors ## ──────────────────────────────────────── ## Use the states data set. ## 1. Add on to the regression equation that you created in exercise 1 by ## generating an interaction term and testing the interaction. model3<-lm(energy~ metro+green+income + metro*income, data=states.data) summary(model3) ## 2. Try adding region to the model. Are there significant differences ## across the four regions? energy.region<-lm(energy~region, data=states.data) coef(summary(energy.region)) #Yes, the regions compared to the reference group have big disparities between coefficients and the impact on energy consumption. For example, states in the northeast are estimated to consume 156 BTU less than the reference group, while states in the South are estimated to consume 25 BTU less energy than the reference group, and states in the Midwest are estimated to consume 61 BTU less than the reference group. I believe the reference group is the West.
/LinearRegProject.R
no_license
Kangell1/Springboard_Coursework
R
false
false
9,318
r
# Introduction ## ══════════════ # • Learning objectives: ## • Learn the R formula interface ## • Specify factor contrasts to test specific hypotheses ## • Perform model comparisons ## • Run and interpret variety of regression models in R ## Set working directory ## ───────────────────────── ## It is often helpful to start your R session by setting your working ## directory so you don't have to type the full path names to your data ## and other files # set the working directory # setwd("~/Desktop/Rstatistics") # setwd("C:/Users/dataclass/Desktop/Rstatistics") setwd("/Users/katherineangell/Documents/Course_Work /Springboard/Data/linear_regression") ## You might also start by listing the files in your working directory getwd() # where am I? list.files("dataSets") # files in the dataSets folder ## Load the states data ## ──────────────────────── # read the states data states.data <- readRDS("dataSets/states.rds") #get labels states.info <- data.frame(attributes(states.data)[c("names", "var.labels")]) #look at last few labels tail(states.info, 8) ## Linear regression ## ═══════════════════ ## Examine the data before fitting models ## ────────────────────────────────────────── str(states.data) ## Start by examining the data to check for problems. # summary of expense and csat columns, all rows sts.ex.sat <- subset(states.data, select = c("expense", "csat")) summary(sts.ex.sat) # correlation between expense and csat cor(sts.ex.sat) ## Plot the data before fitting models ## ─────────────────────────────────────── ## Plot the data to look for multivariate outliers, non-linear ## relationships etc. # scatter plot of expense vs csat plot(sts.ex.sat) ## Linear regression example ## ───────────────────────────── ## • Linear regression models can be fit with the `lm()' function ## • For example, we can use `lm' to predict SAT scores based on ## per-pupal expenditures: # Fit our regression model sat.mod <- lm(csat ~ expense, # regression formula data=states.data) # data set # Summarize and print the results summary(sat.mod) # show regression coefficients table ## Why is the association between expense and SAT scores /negative/? ## ───────────────────────────────────────────────────────────────────── ## Many people find it surprising that the per-capita expenditure on ## students is negatively related to SAT scores. The beauty of multiple ## regression is that we can try to pull these apart. What would the ## association between expense and SAT scores be if there were no ## difference among the states in the percentage of students taking the ## SAT? summary(lm(csat ~ expense + percent, data = states.data)) ## The lm class and methods ## ──────────────────────────── ## OK, we fit our model. Now what? ## • Examine the model object: class(sat.mod) names(sat.mod) methods(class = class(sat.mod))[1:9] ## • Use function methods to get more information about the fit confint(sat.mod) hist(residuals(sat.mod)) ## Linear Regression Assumptions ## ───────────────────────────────── ## • Ordinary least squares regression relies on several assumptions, ## including that the residuals are normally distributed and ## homoscedastic, the errors are independent and the relationships are ## linear. ## • Investigate these assumptions visually by plotting your model: par(mar = c(4, 4, 2, 2), mfrow = c(1, 2)) #optional plot(sat.mod, which = c(1, 2)) # "which" argument optional ## Comparing models ## ──────────────────── ## Do congressional voting patterns predict SAT scores over and above ## expense? Fit two models and compare them: # fit another model, adding house and senate as predictors sat.voting.mod <- lm(csat ~ expense + house + senate, data = na.omit(states.data)) sat.mod <- update(sat.mod, data=na.omit(states.data)) # compare using the anova() function anova(sat.mod, sat.voting.mod) coef(summary(sat.voting.mod)) ## Exercise: least squares regression ## ──────────────────────────────────────── ## Use the /states.rds/ data set. Fit a model predicting energy consumed ## per capita (energy) from the percentage of residents living in ## metropolitan areas (metro). Be sure to ## 1. Examine/plot the data before fitting the model plot(states.data) model1<- lm(energy~metro, data=states.data) ## 2. Print and interpret the model `summary' summary(model1) ## 3. `plot' the model to look for deviations from modeling assumptions plot(model1) ## Select one or more additional predictors to add to your model and ## repeat steps 1-3. Is this model significantly better than the model ## with /metro/ as the only predictor? model2<- lm(energy~metro+income+green, data=states.data) summary(model2) plot(model2) #Yes, model 2 with factors income and greenhouse gas significantly improves the gression model than in comparison to the single predictor model. Adjusted R-squared for the first model is .097, while the adjusted R-squared for the second model with more factors is .6242, indicating this regression line is a much better fit for the data and has less residuals. ## Interactions and factors ## ══════════════════════════ ## Modeling interactions ## ───────────────────────── ## Interactions allow us assess the extent to which the association ## between one predictor and the outcome depends on a second predictor. ## For example: Does the association between expense and SAT scores ## depend on the median income in the state? #Add the interaction to the model sat.expense.by.percent <- lm(csat ~ expense*income, data=states.data) #Show the results coef(summary(sat.expense.by.percent)) # show regression coefficients table ## Regression with categorical predictors ## ────────────────────────────────────────── ## Let's try to predict SAT scores from region, a categorical variable. ## Note that you must make sure R does not think your categorical ## variable is numeric. # make sure R knows region is categorical str(states.data$region) states.data$region <- factor(states.data$region) #Add region to the model sat.region <- lm(csat ~ region, data=states.data) #Show the results coef(summary(sat.region)) # show regression coefficients table anova(sat.region) # show ANOVA table ## Again, *make sure to tell R which variables are categorical by ## converting them to factors!* ## Setting factor reference groups and contrasts ## ───────────────────────────────────────────────── ## In the previous example we use the default contrasts for region. The ## default in R is treatment contrasts, with the first level as the ## reference. We can change the reference group or use another coding ## scheme using the `C' function. # print default contrasts contrasts(states.data$region) # change the reference group coef(summary(lm(csat ~ C(region, base=4), data=states.data))) # change the coding scheme coef(summary(lm(csat ~ C(region, contr.helmert), data=states.data))) ## See also `?contrasts', `?contr.treatment', and `?relevel'. ## Exercise: interactions and factors ## ──────────────────────────────────────── ## Use the states data set. ## 1. Add on to the regression equation that you created in exercise 1 by ## generating an interaction term and testing the interaction. model3<-lm(energy~ metro+green+income + metro*income, data=states.data) summary(model3) ## 2. Try adding region to the model. Are there significant differences ## across the four regions? energy.region<-lm(energy~region, data=states.data) coef(summary(energy.region)) #Yes, the regions compared to the reference group have big disparities between coefficients and the impact on energy consumption. For example, states in the northeast are estimated to consume 156 BTU less than the reference group, while states in the South are estimated to consume 25 BTU less energy than the reference group, and states in the Midwest are estimated to consume 61 BTU less than the reference group. I believe the reference group is the West.
##%######################################################%## # # #### Script para análisis de datos de Star Wars #### # # ##%######################################################%## # Cargamos los datos library(dplyr) data(starwars) # Queremos saber el número de individuos, la altura media y el peso medio, para cada grupo de especies, planetas y género. # Excluimos aquellos individuos que no tienen todos los datos necesarios y no tengan una nave espacial asignada. starwars %>% group_by(species, homeworld, gender) %>% filter(!is.na(species), !is.na(homeworld), !is.na(gender), !is.na(height), !is.na(mass), !is.na(starships)) %>% summarise(count = n(), av_height = mean(height), av_weight = mean(mass)) # Las autoridades de Naboo están buscando a un lord del Sith oculto que ha saboteado el ponche en la última reunión jedi del planeta. Interrogando a varios asistentes han coincidido en las descripción de un sujeto que habría que encontrar en la base de datos. Estas características son: tez oscura, cabello oscuro, ojos marrones, más de 180 cm de altura y más de 70 kg de peso. ¿Quién es el principal sospechoso? starwars %>% filter(skin_color == "dark", hair_color == "black", eye_color == "brown", height > 180, mass > 70) %>% select(name, height) %>% arrange(desc(height)) # La Princesa Leia Organa de Alderaan va a organizar su guardia de seguridad. Para ello, va a necesitar un grupo de humanos que midan más de 165 cm y además, que sean rubios. # Para una posterior selección, estos humanos deben agruparse en primer lugar de acuerdo a su planeta de origen y en segundo lugar, por su altura y peso. # Serán elegidos los tres más altos y más pesados. # Estas características se deben a que estarán destinados en un planeta con una intensidad de luz y gravedad muy bajas. starwars %>% filter(species == "Human", height > 165, hair_color %in% c("blond", "blonde")) %>% group_by(homeworld, height, mass) %>% select(name, height, mass, homeworld, gender) %>% arrange(desc(height), desc(mass)) %>% top_n(n = 5, height) # Existe alguna relación entre la estatura y el peso de los personajes? ggplot(data = starwars, aes(x = height, y = mass)) + geom_point() + geom_smooth()
/20190926_Octava_reunion/script_base.R
no_license
RMalagaGroup/Presentaciones
R
false
false
2,568
r
##%######################################################%## # # #### Script para análisis de datos de Star Wars #### # # ##%######################################################%## # Cargamos los datos library(dplyr) data(starwars) # Queremos saber el número de individuos, la altura media y el peso medio, para cada grupo de especies, planetas y género. # Excluimos aquellos individuos que no tienen todos los datos necesarios y no tengan una nave espacial asignada. starwars %>% group_by(species, homeworld, gender) %>% filter(!is.na(species), !is.na(homeworld), !is.na(gender), !is.na(height), !is.na(mass), !is.na(starships)) %>% summarise(count = n(), av_height = mean(height), av_weight = mean(mass)) # Las autoridades de Naboo están buscando a un lord del Sith oculto que ha saboteado el ponche en la última reunión jedi del planeta. Interrogando a varios asistentes han coincidido en las descripción de un sujeto que habría que encontrar en la base de datos. Estas características son: tez oscura, cabello oscuro, ojos marrones, más de 180 cm de altura y más de 70 kg de peso. ¿Quién es el principal sospechoso? starwars %>% filter(skin_color == "dark", hair_color == "black", eye_color == "brown", height > 180, mass > 70) %>% select(name, height) %>% arrange(desc(height)) # La Princesa Leia Organa de Alderaan va a organizar su guardia de seguridad. Para ello, va a necesitar un grupo de humanos que midan más de 165 cm y además, que sean rubios. # Para una posterior selección, estos humanos deben agruparse en primer lugar de acuerdo a su planeta de origen y en segundo lugar, por su altura y peso. # Serán elegidos los tres más altos y más pesados. # Estas características se deben a que estarán destinados en un planeta con una intensidad de luz y gravedad muy bajas. starwars %>% filter(species == "Human", height > 165, hair_color %in% c("blond", "blonde")) %>% group_by(homeworld, height, mass) %>% select(name, height, mass, homeworld, gender) %>% arrange(desc(height), desc(mass)) %>% top_n(n = 5, height) # Existe alguna relación entre la estatura y el peso de los personajes? ggplot(data = starwars, aes(x = height, y = mass)) + geom_point() + geom_smooth()
## Many cases handled here are simple replacements, e.g. ^ to pow. ## These could easily be done elsewhere, but it works cleanly to do them here. #################################### ## System to processSpecificCalls ## #################################### compile_simpleTransformations <- function(code, symTab, auxEnv, opInfoName = "simpleTransformations", handlerEnv = simpleTransformationsEnv) { nErrorEnv$stateInfo <- paste0("handling ", opInfoName, " for ", code$name, ".") if(code$isName) return(invisible()) if(code$isCall) { for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { compile_simpleTransformations(code$args[[i]], symTab, auxEnv, opInfoName, handlerEnv) } } opInfo <- operatorDefEnv[[code$name]] if(!is.null(opInfo)) { handlingInfo <- opInfo[[opInfoName]] if(!is.null(handlingInfo)) { handler <- handlingInfo[['handler']] if(!is.null(handler)) eval(call(handler, code, symTab, auxEnv, handlingInfo), envir = handlerEnv) } } } nErrorEnv$stateInfo <- character() invisible(NULL) } simpleTransformationsEnv <- new.env() simpleTransformationsEnv$.debug <- FALSE ## for min(V), no change. for min(v1, v2), change to pairmin(v1, v2) simpleTransformationsEnv$minMax <- function(code, symTab, auxEnv, info) { if(length(code$args) == 2) code$name <- paste0('pair',code$name) } simpleTransformationsEnv$replace <- function(code, symTab, auxEnv, info) { repl <- info$replacement if(is.null(repl)) stop(paste0("No valid replacement for ", code$name), call. = FALSE) code$name <- repl }
/nCompiler/R/compile_simpleTransformations.R
permissive
nimble-dev/nCompiler
R
false
false
2,027
r
## Many cases handled here are simple replacements, e.g. ^ to pow. ## These could easily be done elsewhere, but it works cleanly to do them here. #################################### ## System to processSpecificCalls ## #################################### compile_simpleTransformations <- function(code, symTab, auxEnv, opInfoName = "simpleTransformations", handlerEnv = simpleTransformationsEnv) { nErrorEnv$stateInfo <- paste0("handling ", opInfoName, " for ", code$name, ".") if(code$isName) return(invisible()) if(code$isCall) { for(i in seq_along(code$args)) { if(inherits(code$args[[i]], 'exprClass')) { compile_simpleTransformations(code$args[[i]], symTab, auxEnv, opInfoName, handlerEnv) } } opInfo <- operatorDefEnv[[code$name]] if(!is.null(opInfo)) { handlingInfo <- opInfo[[opInfoName]] if(!is.null(handlingInfo)) { handler <- handlingInfo[['handler']] if(!is.null(handler)) eval(call(handler, code, symTab, auxEnv, handlingInfo), envir = handlerEnv) } } } nErrorEnv$stateInfo <- character() invisible(NULL) } simpleTransformationsEnv <- new.env() simpleTransformationsEnv$.debug <- FALSE ## for min(V), no change. for min(v1, v2), change to pairmin(v1, v2) simpleTransformationsEnv$minMax <- function(code, symTab, auxEnv, info) { if(length(code$args) == 2) code$name <- paste0('pair',code$name) } simpleTransformationsEnv$replace <- function(code, symTab, auxEnv, info) { repl <- info$replacement if(is.null(repl)) stop(paste0("No valid replacement for ", code$name), call. = FALSE) code$name <- repl }
#=========================================================================================== # Statistical test (Vrd) for within-participant AR comparisons as proposed by Vatavu & Wobbrock 2015 # Author: Theophanis Tsandilas #====================================================================================== #Requires the installation of the coin package, which provides an implementation of Cohran's Q test: # https://cran.r-project.org/web/packages/coin/coin.pdf # -> install.packages("coin") rm(list=ls()) # Clean up R's memory library("coin") # data is a matrix containing the signs proposed for each referent (columns) by each participant (rows) # return a binary matrix with agreement values (1 = agreement, 0 = disagreement) for all the pairs of participants (rows) for each referent (columns) agreementPairs <- function(data){ nrows <- nrow(data) ncols <- ncol(data) mat <- matrix(, nrow = nrows * (nrows - 1) / 2, ncol = ncols) index <- 1 for(i1 in 1:(nrows - 1)){ for(i2 in (i1 + 1):nrows){ for(r in 1:ncols){ mat[index, r] <- as.integer(data[i1, r] == data[i2, r]) } index <- index + 1 } } mat } # coin implementation : https://cran.r-project.org/web/packages/coin/coin.pdf vrd <- function(pairs){ nrefs <- ncol(pairs) npairs <- nrow(pairs) #vectorize the pairs response <- c(t(pairs)) fact <- gl(nrefs, 1, nrefs*npairs) block <- gl(npairs, nrefs) symmetry_test(response~fact|block,teststat = "quad") } ############## Correct generation of pairs for Cohran's Q test by only considering the n - 1 independent pairs ###################################################################################### agreementPairs.independent <- function(data){ nrows <- nrow(data) ncols <- ncol(data) mat <- matrix(, nrow = nrows - 1, ncol = ncols) for(i in 1:(nrows - 1)){ for(r in 1:ncols){ mat[i, r] <- as.integer(data[i, r] == data[i + 1, r]) } } mat } ############################################################################################## ###################################################################################### ###################################################################################### # Vatavu's implemetation for 2 referents (k = 2) # Note that their equation 9 (k > 2) seems to be incorrect # Vatavu et al' equations are unnecessary. There are numerous statistical packages that calculate Cohran's Q test (see above) # agreement ar <- function(v){ sum(v)/length(v) } # co-agreement cr <- function(v1, v2){ sum(v1*v2)/length(v1) } vrd.vatavu <- function(pairs){ n <- nrow(pairs) ar.vector <- apply(pairs, 2, ar) cr.total <- cr(pairs[,1], pairs[,2]) n*(ar.vector[1] - ar.vector[2])^2 / (ar.vector[1] + ar.vector[2] - 2*cr.total) } ###################################################################################### ######################################################################################
/section6.within.vatavu2015.Vrd.R
no_license
raduvro/TOCHI-supplementary
R
false
false
3,231
r
#=========================================================================================== # Statistical test (Vrd) for within-participant AR comparisons as proposed by Vatavu & Wobbrock 2015 # Author: Theophanis Tsandilas #====================================================================================== #Requires the installation of the coin package, which provides an implementation of Cohran's Q test: # https://cran.r-project.org/web/packages/coin/coin.pdf # -> install.packages("coin") rm(list=ls()) # Clean up R's memory library("coin") # data is a matrix containing the signs proposed for each referent (columns) by each participant (rows) # return a binary matrix with agreement values (1 = agreement, 0 = disagreement) for all the pairs of participants (rows) for each referent (columns) agreementPairs <- function(data){ nrows <- nrow(data) ncols <- ncol(data) mat <- matrix(, nrow = nrows * (nrows - 1) / 2, ncol = ncols) index <- 1 for(i1 in 1:(nrows - 1)){ for(i2 in (i1 + 1):nrows){ for(r in 1:ncols){ mat[index, r] <- as.integer(data[i1, r] == data[i2, r]) } index <- index + 1 } } mat } # coin implementation : https://cran.r-project.org/web/packages/coin/coin.pdf vrd <- function(pairs){ nrefs <- ncol(pairs) npairs <- nrow(pairs) #vectorize the pairs response <- c(t(pairs)) fact <- gl(nrefs, 1, nrefs*npairs) block <- gl(npairs, nrefs) symmetry_test(response~fact|block,teststat = "quad") } ############## Correct generation of pairs for Cohran's Q test by only considering the n - 1 independent pairs ###################################################################################### agreementPairs.independent <- function(data){ nrows <- nrow(data) ncols <- ncol(data) mat <- matrix(, nrow = nrows - 1, ncol = ncols) for(i in 1:(nrows - 1)){ for(r in 1:ncols){ mat[i, r] <- as.integer(data[i, r] == data[i + 1, r]) } } mat } ############################################################################################## ###################################################################################### ###################################################################################### # Vatavu's implemetation for 2 referents (k = 2) # Note that their equation 9 (k > 2) seems to be incorrect # Vatavu et al' equations are unnecessary. There are numerous statistical packages that calculate Cohran's Q test (see above) # agreement ar <- function(v){ sum(v)/length(v) } # co-agreement cr <- function(v1, v2){ sum(v1*v2)/length(v1) } vrd.vatavu <- function(pairs){ n <- nrow(pairs) ar.vector <- apply(pairs, 2, ar) cr.total <- cr(pairs[,1], pairs[,2]) n*(ar.vector[1] - ar.vector[2])^2 / (ar.vector[1] + ar.vector[2] - 2*cr.total) } ###################################################################################### ######################################################################################
######################################## # Shoo the Flu evaluation # Analysis of student influenza vaccination coverage # Create figures showing vaccination coverage by education ######################################## rm(list=ls()) # define directories, load libraries source(here::here("0-config.R")) load(vax_results_2017_path) load(vax_results_2018_path) vx.y1.o.edu$yr="2014-15" vx.y2.o.edu$yr="2015-16" vx.y3.o.edu$yr="2016-17" vx.y4.o.edu$yr="2017-18" vx.y1.w.edu$yr="2014-15" vx.y2.w.edu$yr="2015-16" vx.y3.w.edu$yr="2016-17" vx.y4.w.edu$yr="2017-18" vx.y1.o.edu$dist="OUSD" vx.y2.o.edu$dist="OUSD" vx.y3.o.edu$dist="OUSD" vx.y4.o.edu$dist="OUSD" vx.y1.w.edu$dist="WCCUSD" vx.y2.w.edu$dist="WCCUSD" vx.y3.w.edu$dist="WCCUSD" vx.y4.w.edu$dist="WCCUSD" vx.y1.o.edu$edu=rownames(vx.y1.o.edu) vx.y2.o.edu$edu=rownames(vx.y2.o.edu) vx.y3.o.edu$edu=rownames(vx.y3.o.edu) vx.y4.o.edu$edu=rownames(vx.y4.o.edu) vx.y1.w.edu$edu=rownames(vx.y1.w.edu) vx.y2.w.edu$edu=rownames(vx.y2.w.edu) vx.y3.w.edu$edu=rownames(vx.y3.w.edu) vx.y4.w.edu$edu=rownames(vx.y4.w.edu) vx.edu=rbind(vx.y1.o.edu,vx.y2.o.edu,vx.y3.o.edu,vx.y4.o.edu, vx.y1.w.edu,vx.y2.w.edu,vx.y3.w.edu,vx.y4.w.edu) vx.edu$yr=as.factor(vx.edu$yr) vx.edu$Mean=vx.edu$Mean*100 vx.edu$Mean.f=sprintf("%0.0f",vx.edu$Mean) vx.edu$lower=vx.edu$LowerCI*100 vx.edu$upper=vx.edu$UpperCI*100 # drop group with too few data vx.edu=vx.edu[!vx.edu$edu %in% c("Error"),] vx.edu=vx.edu[!vx.edu$edu %in% c("Missing"),] vx.edu$edu=factor(vx.edu$edu,levels=c("Less than high school", "High school","Associate/College","Postgrad")) # levels(vx.edu$edu)[1]="Less than\nhigh school" # levels(vx.edu$edu)[3]="Associate/\nCollege degree" # pdf(file=paste0(plot_path,"fig-vxcov-edu-y4.pdf"),width=15,height=3) # ggplot(vx.edu,aes(x=edu,y=Mean,group=dist))+ # geom_point(aes(shape=dist,col=dist),position=position_dodge(width=0.5),size=2)+ # geom_errorbar(aes(ymin=lower,ymax=upper,col=dist), # position=position_dodge(width=0.5), width=0.15,size=.7)+ # ylab("Percent")+xlab("")+ # scale_y_continuous(limits=c(40,90),labels=seq(40,90,5),breaks=seq(40,90,5))+ # scale_color_manual("",values=c("#2185c5","#ff9715"))+ # scale_shape_manual("",values=c(16,17))+ # geom_text(mapping=aes(x=edu,y=Mean,label=Mean.f,col=dist), # position=position_dodge(width=0.5),hjust=-0.4,show.legend=FALSE,size=2.5)+ # theme_complete_bw()+facet_grid(~yr) # dev.off() # levels(vx.edu$edu)[1]="Less than high school" # levels(vx.edu$edu)[3]="Associate/College degree" pdf(file=paste0(plot_path,"fig-vxcov-edu-time.pdf"),width=13,height=4) ggplot(vx.edu,aes(x=yr,y=Mean,group=dist))+ geom_linerange(aes(ymin=lower,ymax=upper,col=dist),size=.7, alpha = 0.7)+ geom_line(aes(col=dist), size = 1.5)+ ylab("Percent of students vaccinated for influenza")+xlab("")+ scale_y_continuous(limits=c(45,80),labels=seq(45,80,5),breaks=seq(45,80,5))+ scale_color_manual("",values=c("#2185c5","#ff9715"))+ scale_shape_manual("",values=c(16,17))+ facet_grid(~edu)+ theme_complete_bw()+ theme(strip.text.x = element_text(size = 14)) + theme(legend.position="bottom") dev.off()
/2-tab-fig/4-fig-vxcov-edu.R
no_license
jadebc/vax-cov-public
R
false
false
3,171
r
######################################## # Shoo the Flu evaluation # Analysis of student influenza vaccination coverage # Create figures showing vaccination coverage by education ######################################## rm(list=ls()) # define directories, load libraries source(here::here("0-config.R")) load(vax_results_2017_path) load(vax_results_2018_path) vx.y1.o.edu$yr="2014-15" vx.y2.o.edu$yr="2015-16" vx.y3.o.edu$yr="2016-17" vx.y4.o.edu$yr="2017-18" vx.y1.w.edu$yr="2014-15" vx.y2.w.edu$yr="2015-16" vx.y3.w.edu$yr="2016-17" vx.y4.w.edu$yr="2017-18" vx.y1.o.edu$dist="OUSD" vx.y2.o.edu$dist="OUSD" vx.y3.o.edu$dist="OUSD" vx.y4.o.edu$dist="OUSD" vx.y1.w.edu$dist="WCCUSD" vx.y2.w.edu$dist="WCCUSD" vx.y3.w.edu$dist="WCCUSD" vx.y4.w.edu$dist="WCCUSD" vx.y1.o.edu$edu=rownames(vx.y1.o.edu) vx.y2.o.edu$edu=rownames(vx.y2.o.edu) vx.y3.o.edu$edu=rownames(vx.y3.o.edu) vx.y4.o.edu$edu=rownames(vx.y4.o.edu) vx.y1.w.edu$edu=rownames(vx.y1.w.edu) vx.y2.w.edu$edu=rownames(vx.y2.w.edu) vx.y3.w.edu$edu=rownames(vx.y3.w.edu) vx.y4.w.edu$edu=rownames(vx.y4.w.edu) vx.edu=rbind(vx.y1.o.edu,vx.y2.o.edu,vx.y3.o.edu,vx.y4.o.edu, vx.y1.w.edu,vx.y2.w.edu,vx.y3.w.edu,vx.y4.w.edu) vx.edu$yr=as.factor(vx.edu$yr) vx.edu$Mean=vx.edu$Mean*100 vx.edu$Mean.f=sprintf("%0.0f",vx.edu$Mean) vx.edu$lower=vx.edu$LowerCI*100 vx.edu$upper=vx.edu$UpperCI*100 # drop group with too few data vx.edu=vx.edu[!vx.edu$edu %in% c("Error"),] vx.edu=vx.edu[!vx.edu$edu %in% c("Missing"),] vx.edu$edu=factor(vx.edu$edu,levels=c("Less than high school", "High school","Associate/College","Postgrad")) # levels(vx.edu$edu)[1]="Less than\nhigh school" # levels(vx.edu$edu)[3]="Associate/\nCollege degree" # pdf(file=paste0(plot_path,"fig-vxcov-edu-y4.pdf"),width=15,height=3) # ggplot(vx.edu,aes(x=edu,y=Mean,group=dist))+ # geom_point(aes(shape=dist,col=dist),position=position_dodge(width=0.5),size=2)+ # geom_errorbar(aes(ymin=lower,ymax=upper,col=dist), # position=position_dodge(width=0.5), width=0.15,size=.7)+ # ylab("Percent")+xlab("")+ # scale_y_continuous(limits=c(40,90),labels=seq(40,90,5),breaks=seq(40,90,5))+ # scale_color_manual("",values=c("#2185c5","#ff9715"))+ # scale_shape_manual("",values=c(16,17))+ # geom_text(mapping=aes(x=edu,y=Mean,label=Mean.f,col=dist), # position=position_dodge(width=0.5),hjust=-0.4,show.legend=FALSE,size=2.5)+ # theme_complete_bw()+facet_grid(~yr) # dev.off() # levels(vx.edu$edu)[1]="Less than high school" # levels(vx.edu$edu)[3]="Associate/College degree" pdf(file=paste0(plot_path,"fig-vxcov-edu-time.pdf"),width=13,height=4) ggplot(vx.edu,aes(x=yr,y=Mean,group=dist))+ geom_linerange(aes(ymin=lower,ymax=upper,col=dist),size=.7, alpha = 0.7)+ geom_line(aes(col=dist), size = 1.5)+ ylab("Percent of students vaccinated for influenza")+xlab("")+ scale_y_continuous(limits=c(45,80),labels=seq(45,80,5),breaks=seq(45,80,5))+ scale_color_manual("",values=c("#2185c5","#ff9715"))+ scale_shape_manual("",values=c(16,17))+ facet_grid(~edu)+ theme_complete_bw()+ theme(strip.text.x = element_text(size = 14)) + theme(legend.position="bottom") dev.off()
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(9.92835148599348e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, NaN, 9.70419048763886e-101, 9.70418696071228e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.69721082575941e-101, 9.70418706718012e-101, 2.85876263996662e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(grattan::IncomeTax,testlist) str(result)
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051557-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
609
r
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(9.92835148599348e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, NaN, 9.70419048763886e-101, 9.70418696071228e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.69721082575941e-101, 9.70418706718012e-101, 2.85876263996662e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(grattan::IncomeTax,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getbacon.R \name{getbacon} \alias{getbacon} \title{getbacon} \usage{ getbacon(width, height) } \arguments{ \item{width}{numeric} \item{height}{numeric} } \value{ link character } \description{ Places the link for a random picture of bacon when specifying the height and width } \examples{ getbacon(300, 400) }
/man/getbacon.Rd
no_license
bdacunha/randompic
R
false
true
390
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getbacon.R \name{getbacon} \alias{getbacon} \title{getbacon} \usage{ getbacon(width, height) } \arguments{ \item{width}{numeric} \item{height}{numeric} } \value{ link character } \description{ Places the link for a random picture of bacon when specifying the height and width } \examples{ getbacon(300, 400) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pass_recieve.R \name{Pass} \alias{Pass} \title{Simple way to send objects from one script to another.} \usage{ Pass(x, description = "results") } \arguments{ \item{x}{R object to send} \item{description}{String describing R object stored in tree} } \value{ Value of Write. } \description{ Simple way to send objects from one script to another. } \details{ Adds '.rda' to object name and writes to Result file. Works with 'Receive'. } \examples{ \dontrun{ # Requires internet connection to access GitHub. Pass(myObject,"great result") } }
/man/Pass.Rd
no_license
gelfondjal/adapr
R
false
true
618
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pass_recieve.R \name{Pass} \alias{Pass} \title{Simple way to send objects from one script to another.} \usage{ Pass(x, description = "results") } \arguments{ \item{x}{R object to send} \item{description}{String describing R object stored in tree} } \value{ Value of Write. } \description{ Simple way to send objects from one script to another. } \details{ Adds '.rda' to object name and writes to Result file. Works with 'Receive'. } \examples{ \dontrun{ # Requires internet connection to access GitHub. Pass(myObject,"great result") } }
library(cec2005benchmark) ### Name: cec2005benchmark1 ### Title: Function Number 1 ### Aliases: cec2005benchmark1 ### ** Examples cec2005benchmark1(runif(10, -100, 100))
/data/genthat_extracted_code/cec2005benchmark/examples/cec2005benchmark1.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
177
r
library(cec2005benchmark) ### Name: cec2005benchmark1 ### Title: Function Number 1 ### Aliases: cec2005benchmark1 ### ** Examples cec2005benchmark1(runif(10, -100, 100))
#' plot an intensity surface on the sphere plot.sphppm <- function(x, eye=place("nedlands"), top=place("northpole"), w, ..., eps=NULL, dimyx=NULL, main="", action=c("image", "contour", "imagecontour"), col.image=NULL, col.lines=NULL) { action <- match.arg(action) if(inherits(x, "sphppm")) { #' make grid of points for function evaluation lonlat <- expand.grid(lon=0:359, lat=asin(seq(-0.999, 0.999, length=200)) * 180/pi) lonlat <- rbind(lonlat, data.frame(lon=0, lat=c(90, -90))) lon <- lonlat$lon lat <- lonlat$lat phi <- (lon/180)*pi theta <- ((90-lat)/180)*pi Xgrid <- sp2(cbind(theta=theta,phi=phi)) #' restrict to window if(missing(w)) w <- x$X$win if(!is.null(w)) { inside <- in.W(Xgrid, w) Xgrid$X <- Xgrid$X[inside,,drop=FALSE] Xgrid$win <- w } #' compute fitted intensity values <- predict(x, newdata=Xgrid) df <- cbind(Convert.globe(Xgrid$X), data.frame(values=values)) } else if(is.data.frame(x)) { stopifnot(ncol(x) == 3) if(is.null(cnames <- colnames(x)) || !identical(cnames[1:2], c("lon", "lat"))) warning(paste("Interpreting the first two columns of data as", "longitude and latitude respectively, in degrees"), call.=FALSE) df <- x if(missing(w)) w <- NULL } else stop("x should be a fitted model (sphppm)") if(!is.globe.point(eye)) eye <- Convert.globe(eye) if(!is.globe.point(top)) eye <- Convert.globe(top) eye3 <- ensure3d(eye) top3 <- ensure3d(top) spos <- spatialpos(df[,1], df[,2]) mpos <- orthogproj(eye3, top3, spos) xx <- mpos[,1] yy <- mpos[,2] ok <- (mpos[,3] < 0) D <- disc(1) W <- disc(1, mask=TRUE, eps=eps, dimyx=dimyx) if(!is.null(w)) { wow <- sphwin2owin(w, eye=eye, top=top) W <- intersect.owin(W, wow) } X <- ppp(xx[ok], yy[ok], marks=df[ok,3], window=W, check=FALSE) ## sigma <- 2 * mean(nndist(X)) sigma <- 0.0125 Y <- Smooth(X, sigma=sigma, eps=eps, dimyx=dimyx) switch(action, image = { do.call(plot.im, resolve.defaults(list(x=Y, col=col.image), list(...), list(main=main, box=FALSE), .MatchNull=FALSE, .StripNull=TRUE)) plot(D, add=TRUE) }, contour = { plot(D, main=main) do.call(contour.im, resolve.defaults(list(x=Y, col=col.lines, add=TRUE), list(...), .MatchNull=FALSE, .StripNull=TRUE)) }, imagecontour = { do.call.matched(plot.im, resolve.defaults(list(x=Y, col=col.image), list(...), list(main=main, box=FALSE), .MatchNull=FALSE, .StripNull=TRUE), extrargs="box") extrargs <- setdiff(names(formals(contour.default)), union(c("x", "y", "z", "..."), names(formals(contour.im)))) do.call.matched(contour.im, resolve.defaults(list(x=Y, col=col.lines, add=TRUE), list(...), .MatchNull=FALSE, .StripNull=TRUE), extrargs=extrargs) plot(D, add=TRUE) }) if(!is.null(w)) plot(wow, add=TRUE) return(invisible(NULL)) }
/R/plot.sphppm.R
no_license
baddstats/spherstat
R
false
false
3,778
r
#' plot an intensity surface on the sphere plot.sphppm <- function(x, eye=place("nedlands"), top=place("northpole"), w, ..., eps=NULL, dimyx=NULL, main="", action=c("image", "contour", "imagecontour"), col.image=NULL, col.lines=NULL) { action <- match.arg(action) if(inherits(x, "sphppm")) { #' make grid of points for function evaluation lonlat <- expand.grid(lon=0:359, lat=asin(seq(-0.999, 0.999, length=200)) * 180/pi) lonlat <- rbind(lonlat, data.frame(lon=0, lat=c(90, -90))) lon <- lonlat$lon lat <- lonlat$lat phi <- (lon/180)*pi theta <- ((90-lat)/180)*pi Xgrid <- sp2(cbind(theta=theta,phi=phi)) #' restrict to window if(missing(w)) w <- x$X$win if(!is.null(w)) { inside <- in.W(Xgrid, w) Xgrid$X <- Xgrid$X[inside,,drop=FALSE] Xgrid$win <- w } #' compute fitted intensity values <- predict(x, newdata=Xgrid) df <- cbind(Convert.globe(Xgrid$X), data.frame(values=values)) } else if(is.data.frame(x)) { stopifnot(ncol(x) == 3) if(is.null(cnames <- colnames(x)) || !identical(cnames[1:2], c("lon", "lat"))) warning(paste("Interpreting the first two columns of data as", "longitude and latitude respectively, in degrees"), call.=FALSE) df <- x if(missing(w)) w <- NULL } else stop("x should be a fitted model (sphppm)") if(!is.globe.point(eye)) eye <- Convert.globe(eye) if(!is.globe.point(top)) eye <- Convert.globe(top) eye3 <- ensure3d(eye) top3 <- ensure3d(top) spos <- spatialpos(df[,1], df[,2]) mpos <- orthogproj(eye3, top3, spos) xx <- mpos[,1] yy <- mpos[,2] ok <- (mpos[,3] < 0) D <- disc(1) W <- disc(1, mask=TRUE, eps=eps, dimyx=dimyx) if(!is.null(w)) { wow <- sphwin2owin(w, eye=eye, top=top) W <- intersect.owin(W, wow) } X <- ppp(xx[ok], yy[ok], marks=df[ok,3], window=W, check=FALSE) ## sigma <- 2 * mean(nndist(X)) sigma <- 0.0125 Y <- Smooth(X, sigma=sigma, eps=eps, dimyx=dimyx) switch(action, image = { do.call(plot.im, resolve.defaults(list(x=Y, col=col.image), list(...), list(main=main, box=FALSE), .MatchNull=FALSE, .StripNull=TRUE)) plot(D, add=TRUE) }, contour = { plot(D, main=main) do.call(contour.im, resolve.defaults(list(x=Y, col=col.lines, add=TRUE), list(...), .MatchNull=FALSE, .StripNull=TRUE)) }, imagecontour = { do.call.matched(plot.im, resolve.defaults(list(x=Y, col=col.image), list(...), list(main=main, box=FALSE), .MatchNull=FALSE, .StripNull=TRUE), extrargs="box") extrargs <- setdiff(names(formals(contour.default)), union(c("x", "y", "z", "..."), names(formals(contour.im)))) do.call.matched(contour.im, resolve.defaults(list(x=Y, col=col.lines, add=TRUE), list(...), .MatchNull=FALSE, .StripNull=TRUE), extrargs=extrargs) plot(D, add=TRUE) }) if(!is.null(w)) plot(wow, add=TRUE) return(invisible(NULL)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/dimred_sample_page.R \name{add_dimred_sample_page} \alias{add_dimred_sample_page} \alias{add_dimred_sample_page,i2dashboard,missing-method} \alias{add_dimred_sample_page,i2dashboard,SingleCellExperiment-method} \alias{add_dimred_sample_page,i2dashboard,Seurat-method} \title{Characterize and visualize dimension reductions and sample groupings / metadata.} \usage{ add_dimred_sample_page(dashboard, object, ...) \S4method{add_dimred_sample_page}{i2dashboard,missing}( dashboard, use_dimred, sample_metadata, group_by, page = "dimred_sample_page", title = "Dim. reduction & sample metadata", labels = rownames(use_dimred), show_group_sizes = TRUE, show_silhouette = FALSE, menu = NULL ) \S4method{add_dimred_sample_page}{i2dashboard,SingleCellExperiment}(dashboard, object, use_dimred, sample_metadata, ...) \S4method{add_dimred_sample_page}{i2dashboard,Seurat}(dashboard, object, use_dimred, sample_metadata, ...) } \arguments{ \item{dashboard}{A \linkS4class{i2dashboard}.} \item{object}{An object of class \linkS4class{Seurat} or \linkS4class{SingleCellExperiment}.} \item{...}{Further parameters passed to the core function.} \item{use_dimred}{Data containing coordinates of the reduced dimensions or a string indicating a dimension reduction from "reductions" of a Seurat \code{object}. Rownames are used as sample labels.} \item{sample_metadata}{Sample metadata in columns and samples in rows (see Details).} \item{group_by}{A string indicating a column in \code{sample_metadata} that is used to group observations.} \item{page}{The name of the page to be added.} \item{title}{The title of the page.} \item{labels}{An optional vector with sample labels.} \item{show_group_sizes}{A logical value indicating if a barplot showing the number of observations from \code{group_by} will be creaed (default \code{TRUE}).} \item{show_silhouette}{A logical value indicating if a silhouette plot should be shown (default \code{FALSE}).} \item{menu}{The name of the menu, under which the page should appear.} } \value{ An object of class \linkS4class{i2dashboard}. } \description{ Creates a page with up to four different linked components, including a scatterplot for dimension reductions, a bar plot showing numbers of observations by group, and a silhouette plot to assess grouping consistency. Additional sample metadata is visualized using boxplots and barplots, depending on the data type of the underlying variable. } \details{ The parameters \code{use_dimred}, \code{sample_metadata} (or \code{assay}) and \code{group_by} take different arguments depending on the class of \code{object}. In case no object is supplied (\emph{i2dashboard,missing}-method), the parameters are expected to be of class \code{data.frame} or \code{matrix}. In case a \linkS4class{SingleCellExperiment} or \linkS4class{Seurat} object is supplied, the parameters are expected to be of class \code{character}, containing \itemize{ \item the name of an item in \code{reducedDims(object)} or \code{object@reductions}, \item a valid assay name from \code{assayNames(object)} or \code{names(object@assays)}, \item column names of \code{colData(object)} or \code{object@meta.data}. } }
/man/dimred-sample-page.Rd
permissive
loosolab/i2dash.scrnaseq
R
false
true
3,305
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/dimred_sample_page.R \name{add_dimred_sample_page} \alias{add_dimred_sample_page} \alias{add_dimred_sample_page,i2dashboard,missing-method} \alias{add_dimred_sample_page,i2dashboard,SingleCellExperiment-method} \alias{add_dimred_sample_page,i2dashboard,Seurat-method} \title{Characterize and visualize dimension reductions and sample groupings / metadata.} \usage{ add_dimred_sample_page(dashboard, object, ...) \S4method{add_dimred_sample_page}{i2dashboard,missing}( dashboard, use_dimred, sample_metadata, group_by, page = "dimred_sample_page", title = "Dim. reduction & sample metadata", labels = rownames(use_dimred), show_group_sizes = TRUE, show_silhouette = FALSE, menu = NULL ) \S4method{add_dimred_sample_page}{i2dashboard,SingleCellExperiment}(dashboard, object, use_dimred, sample_metadata, ...) \S4method{add_dimred_sample_page}{i2dashboard,Seurat}(dashboard, object, use_dimred, sample_metadata, ...) } \arguments{ \item{dashboard}{A \linkS4class{i2dashboard}.} \item{object}{An object of class \linkS4class{Seurat} or \linkS4class{SingleCellExperiment}.} \item{...}{Further parameters passed to the core function.} \item{use_dimred}{Data containing coordinates of the reduced dimensions or a string indicating a dimension reduction from "reductions" of a Seurat \code{object}. Rownames are used as sample labels.} \item{sample_metadata}{Sample metadata in columns and samples in rows (see Details).} \item{group_by}{A string indicating a column in \code{sample_metadata} that is used to group observations.} \item{page}{The name of the page to be added.} \item{title}{The title of the page.} \item{labels}{An optional vector with sample labels.} \item{show_group_sizes}{A logical value indicating if a barplot showing the number of observations from \code{group_by} will be creaed (default \code{TRUE}).} \item{show_silhouette}{A logical value indicating if a silhouette plot should be shown (default \code{FALSE}).} \item{menu}{The name of the menu, under which the page should appear.} } \value{ An object of class \linkS4class{i2dashboard}. } \description{ Creates a page with up to four different linked components, including a scatterplot for dimension reductions, a bar plot showing numbers of observations by group, and a silhouette plot to assess grouping consistency. Additional sample metadata is visualized using boxplots and barplots, depending on the data type of the underlying variable. } \details{ The parameters \code{use_dimred}, \code{sample_metadata} (or \code{assay}) and \code{group_by} take different arguments depending on the class of \code{object}. In case no object is supplied (\emph{i2dashboard,missing}-method), the parameters are expected to be of class \code{data.frame} or \code{matrix}. In case a \linkS4class{SingleCellExperiment} or \linkS4class{Seurat} object is supplied, the parameters are expected to be of class \code{character}, containing \itemize{ \item the name of an item in \code{reducedDims(object)} or \code{object@reductions}, \item a valid assay name from \code{assayNames(object)} or \code{names(object@assays)}, \item column names of \code{colData(object)} or \code{object@meta.data}. } }
#' Generates CUSUM plot for specified IDs. #' #' @param cu.object an object returned from cusum or cusum.poly #' @param ID a numeric ID. #' @return Returns a plot for specified cusum person chart. #' @export #' @import graphics cusum.plot <- function(cu.object, ID){ cu.dat <- cu.object$CusumChart c.max <- cu.dat[seq(1,nrow(cu.dat),by=2),] c.min <- cu.dat[seq(2,nrow(cu.dat),by=2),] plot(0:ncol(cu.dat),c(0,c.max[ID,]),type = "l", col= "green", xlim=c(0, ncol(cu.dat)), ylim=c(min(c.max[ID,])-.5, max(c.max[ID,])+.5), ylab= "CUSUM", xlab = "Item", main = paste("Chart Number",ID)) lines(0:ncol(cu.dat),c(0,c.min[ID,]),type = "l",col = "red") }
/R/cusum.plot.R
no_license
cran/PsyControl
R
false
false
665
r
#' Generates CUSUM plot for specified IDs. #' #' @param cu.object an object returned from cusum or cusum.poly #' @param ID a numeric ID. #' @return Returns a plot for specified cusum person chart. #' @export #' @import graphics cusum.plot <- function(cu.object, ID){ cu.dat <- cu.object$CusumChart c.max <- cu.dat[seq(1,nrow(cu.dat),by=2),] c.min <- cu.dat[seq(2,nrow(cu.dat),by=2),] plot(0:ncol(cu.dat),c(0,c.max[ID,]),type = "l", col= "green", xlim=c(0, ncol(cu.dat)), ylim=c(min(c.max[ID,])-.5, max(c.max[ID,])+.5), ylab= "CUSUM", xlab = "Item", main = paste("Chart Number",ID)) lines(0:ncol(cu.dat),c(0,c.min[ID,]),type = "l",col = "red") }
# ================================================== # project: Data for Jan # Author: Andres Castaneda # Dependencies: The World Bank # ---------------------------------------------------- # Creation Date: 2020-05-21 # Modification Date: # Script version: 01 # References: # # # Output: csv files # ================================================== #---------------------------------------------------------- # Load libraries #---------------------------------------------------------- library("tidyverse") library("data.table") library("janitor") library("pins") #---------------------------------------------------------- # subfunctions #---------------------------------------------------------- source("R/utils.R") board_register("rsconnect", server = "http://w0lxopshyprd1b.worldbank.org:3939/") #---------------------------------------------------------- # hundred people #---------------------------------------------------------- source("R/SDG_10_hundred_people.R") cf <- cf[, .(welfare, population = weight, countrycode)] write_csv(cf, file = "data/SDG10_daily_income.csv", col_names = TRUE, na = "") pe <- pe[, .( percentile = hcf, headcount = CSy, welfare, cumm_income = qc, share_income = Sy )] write_csv(pe, file = "data/SDG10_perfect_equality.csv", col_names = TRUE, na = "") df <- pin_get("acastanedaa/percentiles_country_povcalnet", board = "rsconnect") df <- df[, .( countrycode, year, percentile = quantile, headcount, share_income = Sy, welfare, cumm_income = qc )] write_csv(df, file = "data/SDG10_share_income.csv", col_names = TRUE, na = "") df2 <- df[countrycode %chin% c("BRA", "FIN") & year %in% c(1993, 2017)] write_csv(df2, file = "data/SDG10_share_income_BRA_FIN.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # Change in Gini #---------------------------------------------------------- source("R/SDG_10_Gini_Change.R") df_g <- df_g %>% filter(gini_1 != gini_2) %>% rename(gini_2000 = gini_1, gini_2018 = gini_2) write_csv(df_g, file = "data/SDG10_change_gini.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # P10 P50 and P90 charts #---------------------------------------------------------- source("R/SDG_10_p10p90p50.R") dfc_2c <- dfc_2c %>% rename(ratio_90_10 = r9010) %>% select(-c(lending, fcountrycode, countryx)) write_csv(dfc_2c, file = "data/SDG10_p10p90p50_two_countries.csv", col_names = TRUE, na = "") dfc_1 <- dfc_1 %>% rename(ratio_90_10 = r9010) %>% select(-c(lending, fcountrycode)) write_csv(dfc_1, file = "data/SDG10_p10p90p50_ALL_countries.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # Inequality between countries #---------------------------------------------------------- source("R/SDG_10_global_ineq_between_countries.R") p50d_15 <- p50d_15[, .(countrycode, meadian = p50, decile = qp50)] write_csv(p50d_15, file = "data/SDG10_medians_2015.csv", col_names = TRUE, na = "") pr <- pr[, .(year, palma)] write_csv(pr, file = "data/SDG10_palma_ratio.csv", col_names = TRUE, na = "") dfq <- dfq %>% filter(!is.na(p50)) %>% select(countrycode, year, median = p50) write_csv(dfq, file = "data/SDG10_medians_overTime.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # All percentiles #---------------------------------------------------------- dfc <- read_rds("data/dfc.rds") names <- read_rds("data/cty_regs_names.rds") setDT(names) qtile <- function(x) { nq <- 10 N <- length(x) csw <- 1:N qp <- floor(csw / ((N + 1) / nq)) + 1 return(qp) } # set data.table DT <- as.data.table(dfc) oldn <- c("threshold", "goal") newn <- c("percentile_value", "percentile") setnames(DT, oldn, newn, skip_absent = TRUE) setorder(DT, year, percentile, percentile_value) # Sort DT <- DT[# remove old years year >= 1990 ][# filter negative values (which we should not have) percentile_value > 0 & !is.na(percentile_value) ][, # multiply by 100 percentile := 100 * percentile ][, # Create deciles in each percentile decile_within_percentile := qtile(percentile_value), by = .(year, percentile) ][, headcount := NULL ] # Add countryname and region DT[names, on = .(countrycode), `:=`( countryname = i.countryname, region = i.region ) ] write_csv(DT, file = "data/SDG10_percentiles_overTime.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # Non-monetary measures #---------------------------------------------------------- source("R/SDG_10_non_monetary_Growth.R") # Social protection coverage coverd <- coverd %>% select(Year, countrycode, countryname, region, incomegroup, quintile, value, growth, val) %>% rename(mean2007 = val) #This mean is used for sorting in the graph write_csv(coverd, file = "data/SDG10_social_protection_cover.csv", col_names = TRUE, na = "") # Social Protection: CPI social protection rating CPIss <- CPIss %>% select( Year, countrycode, countryname, region, incomegroup, value, v2007, growth, win, loss, initial ) %>% rename(value2007 = v2007) #for sorting write_csv(CPIss, file = "data/SDG10_social_protection_rating.csv", col_names = TRUE, na = "") # Remittances remit_from <- remit_from %>% select(year, countrycode, countryname, region, incomegroup, value, growth, span) %>% rename(time_span = span) # For graphs of growth I keep those w/ an span greater than 5 years write_csv(remit_from, file = "data/SDG10_remittances_origin.csv", col_names = TRUE, na = "") remit_to <- remit_to %>% select(year, countrycode, countryname, region, incomegroup, value, growth, span) %>% rename(time_span = span) # For graphs of growth I keep those w/ an span greater than 5 years write_csv(remit_to, file = "data/SDG10_remittances_desstination.csv", col_names = TRUE, na = "") # policies for social inclusion/equity CPIinc <- CPIinc %>% select( Year, countrycode, countryname, region, incomegroup, value, v2007, growth, win, loss, initial ) %>% rename(value2007 = v2007) #for sorting write_csv(CPIinc, file = "data/SDG10_social_inclusion_rating.csv", col_names = TRUE, na = "") # AID AID <- AID %>% filter(incomegroup != "High income") %>% group_by(countrycode) %>% filter(row_number() == n()) %>% select(countrycode, countryname, region, incomegroup, v2007, v2017, change, growth) %>% rename(value2007 = v2007, value2017 = v2017) write_csv(AID, file = "data/SDG10_aid.csv", col_names = TRUE, na = "") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #--------- Bottom 40 --------- #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ source("R/SDG_10_B40_simple.R") write_csv(df, file = "data/SDG10_B40_simple.csv", col_names = TRUE, na = "") cb40 <- read_csv("data/PEB_b40_profiling.csv") %>% janitor::clean_names() %>% select(region, countrycode = code, countryname, characteristic = precase, incomegroup, year, share_b40 = b40, share_t60 = t60 ) write_csv(cb40, file = "data/SDG10_B40_characteristics.csv", col_names = TRUE, na = "") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #--------- Share below 50% of median --------- #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ source(here("R", "SDG_10_50percent_median.R")) write_csv(d45, file = "data/SDG10_50percent_median_2000-2017.csv", col_names = TRUE, na = "")
/R/sdg_10_dataJan.R
no_license
randrescastaneda/SDG_2020
R
false
false
8,596
r
# ================================================== # project: Data for Jan # Author: Andres Castaneda # Dependencies: The World Bank # ---------------------------------------------------- # Creation Date: 2020-05-21 # Modification Date: # Script version: 01 # References: # # # Output: csv files # ================================================== #---------------------------------------------------------- # Load libraries #---------------------------------------------------------- library("tidyverse") library("data.table") library("janitor") library("pins") #---------------------------------------------------------- # subfunctions #---------------------------------------------------------- source("R/utils.R") board_register("rsconnect", server = "http://w0lxopshyprd1b.worldbank.org:3939/") #---------------------------------------------------------- # hundred people #---------------------------------------------------------- source("R/SDG_10_hundred_people.R") cf <- cf[, .(welfare, population = weight, countrycode)] write_csv(cf, file = "data/SDG10_daily_income.csv", col_names = TRUE, na = "") pe <- pe[, .( percentile = hcf, headcount = CSy, welfare, cumm_income = qc, share_income = Sy )] write_csv(pe, file = "data/SDG10_perfect_equality.csv", col_names = TRUE, na = "") df <- pin_get("acastanedaa/percentiles_country_povcalnet", board = "rsconnect") df <- df[, .( countrycode, year, percentile = quantile, headcount, share_income = Sy, welfare, cumm_income = qc )] write_csv(df, file = "data/SDG10_share_income.csv", col_names = TRUE, na = "") df2 <- df[countrycode %chin% c("BRA", "FIN") & year %in% c(1993, 2017)] write_csv(df2, file = "data/SDG10_share_income_BRA_FIN.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # Change in Gini #---------------------------------------------------------- source("R/SDG_10_Gini_Change.R") df_g <- df_g %>% filter(gini_1 != gini_2) %>% rename(gini_2000 = gini_1, gini_2018 = gini_2) write_csv(df_g, file = "data/SDG10_change_gini.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # P10 P50 and P90 charts #---------------------------------------------------------- source("R/SDG_10_p10p90p50.R") dfc_2c <- dfc_2c %>% rename(ratio_90_10 = r9010) %>% select(-c(lending, fcountrycode, countryx)) write_csv(dfc_2c, file = "data/SDG10_p10p90p50_two_countries.csv", col_names = TRUE, na = "") dfc_1 <- dfc_1 %>% rename(ratio_90_10 = r9010) %>% select(-c(lending, fcountrycode)) write_csv(dfc_1, file = "data/SDG10_p10p90p50_ALL_countries.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # Inequality between countries #---------------------------------------------------------- source("R/SDG_10_global_ineq_between_countries.R") p50d_15 <- p50d_15[, .(countrycode, meadian = p50, decile = qp50)] write_csv(p50d_15, file = "data/SDG10_medians_2015.csv", col_names = TRUE, na = "") pr <- pr[, .(year, palma)] write_csv(pr, file = "data/SDG10_palma_ratio.csv", col_names = TRUE, na = "") dfq <- dfq %>% filter(!is.na(p50)) %>% select(countrycode, year, median = p50) write_csv(dfq, file = "data/SDG10_medians_overTime.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # All percentiles #---------------------------------------------------------- dfc <- read_rds("data/dfc.rds") names <- read_rds("data/cty_regs_names.rds") setDT(names) qtile <- function(x) { nq <- 10 N <- length(x) csw <- 1:N qp <- floor(csw / ((N + 1) / nq)) + 1 return(qp) } # set data.table DT <- as.data.table(dfc) oldn <- c("threshold", "goal") newn <- c("percentile_value", "percentile") setnames(DT, oldn, newn, skip_absent = TRUE) setorder(DT, year, percentile, percentile_value) # Sort DT <- DT[# remove old years year >= 1990 ][# filter negative values (which we should not have) percentile_value > 0 & !is.na(percentile_value) ][, # multiply by 100 percentile := 100 * percentile ][, # Create deciles in each percentile decile_within_percentile := qtile(percentile_value), by = .(year, percentile) ][, headcount := NULL ] # Add countryname and region DT[names, on = .(countrycode), `:=`( countryname = i.countryname, region = i.region ) ] write_csv(DT, file = "data/SDG10_percentiles_overTime.csv", col_names = TRUE, na = "") #---------------------------------------------------------- # Non-monetary measures #---------------------------------------------------------- source("R/SDG_10_non_monetary_Growth.R") # Social protection coverage coverd <- coverd %>% select(Year, countrycode, countryname, region, incomegroup, quintile, value, growth, val) %>% rename(mean2007 = val) #This mean is used for sorting in the graph write_csv(coverd, file = "data/SDG10_social_protection_cover.csv", col_names = TRUE, na = "") # Social Protection: CPI social protection rating CPIss <- CPIss %>% select( Year, countrycode, countryname, region, incomegroup, value, v2007, growth, win, loss, initial ) %>% rename(value2007 = v2007) #for sorting write_csv(CPIss, file = "data/SDG10_social_protection_rating.csv", col_names = TRUE, na = "") # Remittances remit_from <- remit_from %>% select(year, countrycode, countryname, region, incomegroup, value, growth, span) %>% rename(time_span = span) # For graphs of growth I keep those w/ an span greater than 5 years write_csv(remit_from, file = "data/SDG10_remittances_origin.csv", col_names = TRUE, na = "") remit_to <- remit_to %>% select(year, countrycode, countryname, region, incomegroup, value, growth, span) %>% rename(time_span = span) # For graphs of growth I keep those w/ an span greater than 5 years write_csv(remit_to, file = "data/SDG10_remittances_desstination.csv", col_names = TRUE, na = "") # policies for social inclusion/equity CPIinc <- CPIinc %>% select( Year, countrycode, countryname, region, incomegroup, value, v2007, growth, win, loss, initial ) %>% rename(value2007 = v2007) #for sorting write_csv(CPIinc, file = "data/SDG10_social_inclusion_rating.csv", col_names = TRUE, na = "") # AID AID <- AID %>% filter(incomegroup != "High income") %>% group_by(countrycode) %>% filter(row_number() == n()) %>% select(countrycode, countryname, region, incomegroup, v2007, v2017, change, growth) %>% rename(value2007 = v2007, value2017 = v2017) write_csv(AID, file = "data/SDG10_aid.csv", col_names = TRUE, na = "") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #--------- Bottom 40 --------- #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ source("R/SDG_10_B40_simple.R") write_csv(df, file = "data/SDG10_B40_simple.csv", col_names = TRUE, na = "") cb40 <- read_csv("data/PEB_b40_profiling.csv") %>% janitor::clean_names() %>% select(region, countrycode = code, countryname, characteristic = precase, incomegroup, year, share_b40 = b40, share_t60 = t60 ) write_csv(cb40, file = "data/SDG10_B40_characteristics.csv", col_names = TRUE, na = "") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #--------- Share below 50% of median --------- #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ source(here("R", "SDG_10_50percent_median.R")) write_csv(d45, file = "data/SDG10_50percent_median_2000-2017.csv", col_names = TRUE, na = "")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print_and_plot.R \name{plot_model_set} \alias{plot_model_set} \title{Plot several causal hypothesis at once.} \usage{ plot_model_set( model_set, labels = NULL, algorithm = "kk", manual_layout = NULL, text_size = 5, box_x = 12, box_y = 10, edge_width = 1, curvature = 0.05, rotation = 0, flip_x = FALSE, flip_y = FALSE, nrow = NULL, arrow = grid::arrow(type = "closed", 15, grid::unit(10, "points")) ) } \arguments{ \item{model_set}{A list of \code{DAG} objects, usually created with \code{\link[=define_model_set]{define_model_set()}}.} \item{labels}{An optional set of labels to use for the nodes. This should be a named vector, of the form \code{c(var1 = "label1", var2 = "label2")}. If left at `NULL``, the variable names of the DAGs are used.} \item{algorithm}{A layout algorithm from \code{igraph}, see \code{\link[ggraph:create_layout]{ggraph::create_layout()}}. By default, uses the Kamada-Kawai layout algorithm. Another good option is \code{"sugiyama"}, which is designed to minimize edge crossing in DAGs. However, it can often plot nodes too close together.} \item{manual_layout}{Alternatively, precisely define the layout yourself, by providing a \code{data.frame} that at least has a column \code{name} with all variable names, and columns \code{x} and \code{y} with positions to be plotted. Setting this parameter overrides \code{algorithm} but other changes, such as \code{rotation} and \code{flip}s will still be applied.} \item{text_size}{Size of the node label text.} \item{box_x}{To avoid the arrows colliding with the nodes, specify the rectangular dimensions of an invisible box around each node. If you have long labels, you need to increase this.} \item{box_y}{To avoid the arrows colliding with the nodes, specify the rectangular dimensions of an invisible box around each node. If you have multi-line labels, you need to increase this.} \item{edge_width}{Width of the edges.} \item{curvature}{Curvature of the edges. A slight curvature can look pretty.} \item{rotation}{Supply the degrees you want to rotate the layout by. This is useful in order to put rotate your upstream nodes towards the top if needed.} \item{flip_x}{Whether to flip the node positions horizontally.} \item{flip_y}{Whether to flip the node positions vertically.} \item{nrow}{Number of rows to display the models on.} \item{arrow}{A \code{grid::arrow} object, specifying the shape and size of the arrowheads. The order of facets is taken from the ordering of the list, with the facet labels coming from the names of the list. If the list is unnamed, sequential lettering is used.} } \value{ A \code{ggplot} object. } \description{ Plot several causal hypothesis at once. } \examples{ m <- list(one = DAG(a ~ b + c + d), two = DAG(a ~ b, b ~ c, d ~ d)) plot_model_set(m) plot_model_set(m, algorithm = "sugiyama") }
/man/plot_model_set.Rd
no_license
achazhardenberg/phylopath
R
false
true
2,930
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print_and_plot.R \name{plot_model_set} \alias{plot_model_set} \title{Plot several causal hypothesis at once.} \usage{ plot_model_set( model_set, labels = NULL, algorithm = "kk", manual_layout = NULL, text_size = 5, box_x = 12, box_y = 10, edge_width = 1, curvature = 0.05, rotation = 0, flip_x = FALSE, flip_y = FALSE, nrow = NULL, arrow = grid::arrow(type = "closed", 15, grid::unit(10, "points")) ) } \arguments{ \item{model_set}{A list of \code{DAG} objects, usually created with \code{\link[=define_model_set]{define_model_set()}}.} \item{labels}{An optional set of labels to use for the nodes. This should be a named vector, of the form \code{c(var1 = "label1", var2 = "label2")}. If left at `NULL``, the variable names of the DAGs are used.} \item{algorithm}{A layout algorithm from \code{igraph}, see \code{\link[ggraph:create_layout]{ggraph::create_layout()}}. By default, uses the Kamada-Kawai layout algorithm. Another good option is \code{"sugiyama"}, which is designed to minimize edge crossing in DAGs. However, it can often plot nodes too close together.} \item{manual_layout}{Alternatively, precisely define the layout yourself, by providing a \code{data.frame} that at least has a column \code{name} with all variable names, and columns \code{x} and \code{y} with positions to be plotted. Setting this parameter overrides \code{algorithm} but other changes, such as \code{rotation} and \code{flip}s will still be applied.} \item{text_size}{Size of the node label text.} \item{box_x}{To avoid the arrows colliding with the nodes, specify the rectangular dimensions of an invisible box around each node. If you have long labels, you need to increase this.} \item{box_y}{To avoid the arrows colliding with the nodes, specify the rectangular dimensions of an invisible box around each node. If you have multi-line labels, you need to increase this.} \item{edge_width}{Width of the edges.} \item{curvature}{Curvature of the edges. A slight curvature can look pretty.} \item{rotation}{Supply the degrees you want to rotate the layout by. This is useful in order to put rotate your upstream nodes towards the top if needed.} \item{flip_x}{Whether to flip the node positions horizontally.} \item{flip_y}{Whether to flip the node positions vertically.} \item{nrow}{Number of rows to display the models on.} \item{arrow}{A \code{grid::arrow} object, specifying the shape and size of the arrowheads. The order of facets is taken from the ordering of the list, with the facet labels coming from the names of the list. If the list is unnamed, sequential lettering is used.} } \value{ A \code{ggplot} object. } \description{ Plot several causal hypothesis at once. } \examples{ m <- list(one = DAG(a ~ b + c + d), two = DAG(a ~ b, b ~ c, d ~ d)) plot_model_set(m) plot_model_set(m, algorithm = "sugiyama") }
#' all_intersect #' #' @param geom () #' #' @return the geometry that contain the intersect of all geom contain in param #' @import sf #' all_intersect<-function(geom){ geom_intersect = sf::st_intersection(x=geom[[1]], y=geom[[2]]) return(geom_intersect) }
/R/all_intersect.R
no_license
Caoimhyn/OuVivre
R
false
false
263
r
#' all_intersect #' #' @param geom () #' #' @return the geometry that contain the intersect of all geom contain in param #' @import sf #' all_intersect<-function(geom){ geom_intersect = sf::st_intersection(x=geom[[1]], y=geom[[2]]) return(geom_intersect) }
library(NoiseFiltersR) ### Name: ModeFilter ### Title: Mode Filter ### Aliases: ModeFilter ModeFilter.default ModeFilter.formula ### ** Examples # Next example is not run because in some cases it can be rather slow ## Not run: ##D data(iris) ##D out <- ModeFilter(Species~., data = iris, type = "classical", noiseAction = "remove") ##D print(out) ##D identical(out$cleanData, iris[setdiff(1:nrow(iris),out$remIdx),]) ## End(Not run)
/data/genthat_extracted_code/NoiseFiltersR/examples/ModeFilter.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
453
r
library(NoiseFiltersR) ### Name: ModeFilter ### Title: Mode Filter ### Aliases: ModeFilter ModeFilter.default ModeFilter.formula ### ** Examples # Next example is not run because in some cases it can be rather slow ## Not run: ##D data(iris) ##D out <- ModeFilter(Species~., data = iris, type = "classical", noiseAction = "remove") ##D print(out) ##D identical(out$cleanData, iris[setdiff(1:nrow(iris),out$remIdx),]) ## End(Not run)
## ## A bunch of handy functions. ## #' Load a list of libraries, checking if the attachment to the calling script #' is successful. #' #' @param librariesList #' #' @return #' loadLibraries <- function(librariesList){ result <- TRUE for (lib in librariesList) { if(FALSE == library(lib, character.only = TRUE, logical.return = TRUE)){ result <- lib break() } } return(result) }
/helpers.R
no_license
valieg/ExData_Plotting1
R
false
false
450
r
## ## A bunch of handy functions. ## #' Load a list of libraries, checking if the attachment to the calling script #' is successful. #' #' @param librariesList #' #' @return #' loadLibraries <- function(librariesList){ result <- TRUE for (lib in librariesList) { if(FALSE == library(lib, character.only = TRUE, logical.return = TRUE)){ result <- lib break() } } return(result) }
library(readxl) library(dplyr) # setwd("~/Desktop/Competition/original data") # july = read_excel('ActiveSoybeanContractsfor2020.CSV.xlsx') # weather setwd("~/Desktop/Competition/new_weather") july = read.csv('may_contract.csv') arkansas = read.csv('arkansas_temp.csv') illinois = read.csv('illinois_temp.csv') indiana = read.csv('indiana_temp.csv') lowa = read.csv('iowa_temp.csv') minnesota = read.csv('minnesota_temp.csv') missouri = read.csv('missouri_temp.csv') north_dakota = read.csv('north_temp.csv') ohio = read.csv('ohio_temp.csv') south_dakota = read.csv('south_temp.csv') nebraska = read.csv('nebraska_temp.csv') july$Time = as.Date(july$Time) arkansas$YEARMODA = as.Date(arkansas$YEARMODA) illinois$YEARMODA = as.Date(illinois$YEARMODA) indiana$YEARMODA = as.Date(indiana$YEARMODA) lowa$YEARMODA = as.Date(lowa$YEARMODA) minnesota$YEARMODA = as.Date(minnesota$YEARMODA) missouri$YEARMODA = as.Date(missouri$YEARMODA) north_dakota$YEARMODA = as.Date(north_dakota$YEARMODA) ohio$YEARMODA = as.Date(ohio$YEARMODA) south_dakota$YEARMODA = as.Date(south_dakota$YEARMODA) nebraska$YEARMODA = as.Date(nebraska$YEARMODA) minnesota_t = minnesota %>% group_by(YEARMODA) %>% summarize(minnesota_temp = mean(TEMP, na.rm = TRUE)) illinois_t = illinois %>% group_by(YEARMODA) %>% summarize(illinois_temp = mean(TEMP, na.rm = TRUE)) lowa_t = lowa %>% group_by(YEARMODA) %>% summarize(lowa_temp = mean(TEMP, na.rm = TRUE)) nebraska_t = nebraska %>% group_by(YEARMODA) %>% summarize(nebraska_temp = mean(TEMP, na.rm = TRUE)) ohio_t = ohio %>% group_by(YEARMODA) %>% summarize(ohio_temp = mean(TEMP, na.rm = TRUE)) arkansas_t = arkansas %>% group_by(YEARMODA) %>% summarize(arkansas_temp = mean(TEMP, na.rm = TRUE)) indiana_t = indiana %>% group_by(YEARMODA) %>% summarize(indiana_temp = mean(TEMP, na.rm = TRUE)) missouri_t = missouri %>% group_by(YEARMODA) %>% summarize(missouri_temp = mean(TEMP, na.rm = TRUE)) north_dakota_t = north_dakota %>% group_by(YEARMODA) %>% summarize(northdakota_temp = mean(TEMP, na.rm = TRUE)) south_dakota_t = south_dakota %>% group_by(YEARMODA) %>% summarize(southdakota_temp = mean(TEMP, na.rm = TRUE)) # arkansas_t = arkansas %>% select(YEARMODA,arkansas_temp = TEMP) # illinois_t = illinois %>% select(YEARMODA,illinois_temp = TEMP) # indiana_t = indiana %>% select(YEARMODA,indiana_temp = TEMP) # lowa_t = lowa %>% select(YEARMODA,lowa_temp = TEMP) # minnesota_t = minnesota %>% select(YEARMODA,minnesota_temp = TEMP) # missouri_t = missouri %>% select(YEARMODA,missouri_temp = TEMP) # north_dakota_t = north_dakota %>% select(YEARMODA,north_dakota_temp = TEMP) # ohio_t = ohio %>% select(YEARMODA,ohio_temp = TEMP) # south_dakota_t = south_dakota %>% select(YEARMODA,south_dakota_temp = TEMP) # nebraska_t = nebraska %>% select(YEARMODA,nebraska_temp = TEMP) weather_10 = merge(july,arkansas_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,illinois_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,indiana_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,lowa_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,minnesota_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,missouri_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,north_dakota_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,ohio_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,south_dakota_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,nebraska_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) #weather_10$avg_temp = rowMeans(weather_10[,6:15], na.rm = TRUE) out_temp = weather_10 %>% select(arkansas_temp:nebraska_temp) write.csv(out_temp, 'temp.csv', row.names = FALSE) # ccf1 = ccf(weather_10$Close, weather_10$avg_temp) # noem_data = as.data.frame(scale(weather_10[,-1])) # ccf2 = ccf(noem_data$Close, weather_10$avg_temp) # lm1 = lm(Close ~ .-Open-High-Low, noem_data) # summary(lm1) ###################################rainfall################################### # setwd("~/Desktop/Competition/rainfall") # minnesota_r = read.csv('MinnesotaDailyWeather_09-2017_10-2019.csv') # illinois_r = read.csv('IllinoisDailyWeather_09-2017_10-2019.csv') # lowa_r = read.csv('IowaDailyWeather_09-2017_10-2019.csv') # nebraska_r = read.csv('NebraskaDailyWeather_09-2017_10-2019.csv') # ohio_r = read.csv('OhioDailyWeather_09-2017_10-2019.csv') # arkansas_r = read.csv('ArkansasDailyWeather_09-2017_10-2019.csv') # indiana_r = read.csv('IndianaDailyWeather_09-2017_10-2019.csv') # missouri_r = read.csv('MissouriDailyWeather_09-2017_10-2019.csv') # northdakota_r = read.csv('NorthDakotaDailyWeather_09-2017_10-2019.csv') # southdakota_r = read.csv('SouthDakotaDailyWeather_09-2017_10-2019.csv') arkansas_r = read.csv('Arkansas_Weather.csv') illinois_r = read.csv('Illinois_Weather.csv') indiana_r = read.csv('Indiana_Weather.csv') lowa_r = read.csv('Iowa_Weather.csv') minnesota_r = read.csv('Minnesota_Weather.csv') missouri_r = read.csv('Missouri_Weather.csv') northdakota_r = read.csv('North_Dakota_Weather.csv') ohio_r = read.csv('Ohio_Weather.csv') southdakota_r = read.csv('South_Dakota_Weather.csv') nebraska_r = read.csv('Nebraska_Weather.csv') nrow(distinct(ohio_r)) minnesota_rs = minnesota_r %>% group_by(DATE) %>% summarize(minnesota_prcp = mean(PRCP, na.rm = TRUE)) illinois_rs = illinois_r %>% group_by(DATE) %>% summarize(illinois_prcp = mean(PRCP, na.rm = TRUE)) lowa_rs = lowa_r %>% group_by(DATE) %>% summarize(lowa_prcp = mean(PRCP, na.rm = TRUE)) nebraska_rs = nebraska_r %>% group_by(DATE) %>% summarize(nebraska_prcp = mean(PRCP, na.rm = TRUE)) ohio_rs = ohio_r %>% group_by(DATE) %>% summarize(ohio_prcp = mean(PRCP, na.rm = TRUE)) arkansas_rs = arkansas_r %>% group_by(DATE) %>% summarize(arkansas_prcp = mean(PRCP, na.rm = TRUE)) indiana_rs = indiana_r %>% group_by(DATE) %>% summarize(indiana_prcp = mean(PRCP, na.rm = TRUE)) missouri_rs = missouri_r %>% group_by(DATE) %>% summarize(missouri_prcp = mean(PRCP, na.rm = TRUE)) northdakota_rs = northdakota_r %>% group_by(DATE) %>% summarize(northdakota_prcp = mean(PRCP, na.rm = TRUE)) southdakota_rs = southdakota_r %>% group_by(DATE) %>% summarize(southdakota_prcp = mean(PRCP, na.rm = TRUE)) # minnesota_rs = minnesota_r %>% filter(STATION == levels(minnesota_r$STATION)[4]) %>% select(DATE, minnesota_prcp = PRCP) # illinois_rs = illinois_r %>% filter(STATION == levels(illinois_r$STATION)[7]) %>% select(DATE, illinois_prcp = PRCP) # lowa_rs = lowa_r %>% filter(STATION == levels(lowa_r$STATION)[2]) %>% select(DATE, lowa_prcp = PRCP) # nebraska_rs = nebraska_r %>% filter(STATION == levels(nebraska_r$STATION)[3]) %>% select(DATE, nebraska_prcp = PRCP) # ohio_rs = ohio_r %>% filter(STATION == levels(ohio_r$STATION)[1]) %>% select(DATE, ohio_prcp = PRCP) minnesota_rs$DATE = as.Date(minnesota_rs$DATE) illinois_rs$DATE = as.Date(illinois_rs$DATE) lowa_rs$DATE = as.Date(lowa_rs$DATE) nebraska_rs$DATE = as.Date(nebraska_rs$DATE) ohio_rs$DATE = as.Date(ohio_rs$DATE) arkansas_rs$DATE = as.Date(arkansas_rs$DATE) indiana_rs$DATE = as.Date(indiana_rs$DATE) missouri_rs$DATE = as.Date(missouri_rs$DATE) northdakota_rs$DATE = as.Date(northdakota_rs$DATE) southdakota_rs$DATE = as.Date(southdakota_rs$DATE) weather_15 = merge(weather_10,minnesota_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,illinois_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,lowa_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,nebraska_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,ohio_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,arkansas_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,indiana_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,missouri_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,northdakota_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,southdakota_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) out_prep = weather_15 %>% select(minnesota_prcp:southdakota_prcp) write.csv(out_prep, 'prep.csv', row.names = FALSE) colnames(weather_15) weather_15$avg_temp = rowMeans(weather_15[,11:20], na.rm = TRUE) weather_15$avg_prcp = rowMeans(weather_15[,21:30], na.rm = TRUE) out = weather_15 %>% select(Time:sent_agri, avg_temp, avg_prcp) write.csv(out,'MayContract_latest.csv', row.names = FALSE) lm1 = lm(Close ~ .-Date-avg_temp, out) summary(lm1) lm2 = lm(Close ~ .-Date-avg_prcp, out) summary(lm2) lm3 = lm(Close ~ .-Date, out) summary(lm3) ##################### corn = read.csv('May_Super_Final.csv') weather = read.csv('MayContract_latest.csv') corn = corn %>% select(Time = Date, Soybean_Imports:Fertilizer_Price) corn$Time = as.Date(corn$Time,format = '%m/%d/%y') weather$Time = as.Date(weather$Time) final = merge(weather, corn, by = 'Time') write.csv(final, '5Contract_Latest.csv',row.names = FALSE) latest = read.csv('MayContract_latest.csv') str(latest) latest$soyoil_imports = as.numeric(latest$soyoil_imports) latest$soyoil_exports = as.numeric(latest$soyoil_exports) latest$US_Dollar_Index = as.numeric(latest$US_Dollar_Index) latest$SunflowerPrice = as.numeric(latest$SunflowerPrice) latest$CottonseedPrice = as.numeric(latest$CottonseedPrice) latest[is.na(latest)] = 0 summary(latest) write.csv(latest,'5contrat.csv',row.names = FALSE)
/weather_selection.R
permissive
PatrickJamesHoban/soybean-market-price-forecasting
R
false
false
9,674
r
library(readxl) library(dplyr) # setwd("~/Desktop/Competition/original data") # july = read_excel('ActiveSoybeanContractsfor2020.CSV.xlsx') # weather setwd("~/Desktop/Competition/new_weather") july = read.csv('may_contract.csv') arkansas = read.csv('arkansas_temp.csv') illinois = read.csv('illinois_temp.csv') indiana = read.csv('indiana_temp.csv') lowa = read.csv('iowa_temp.csv') minnesota = read.csv('minnesota_temp.csv') missouri = read.csv('missouri_temp.csv') north_dakota = read.csv('north_temp.csv') ohio = read.csv('ohio_temp.csv') south_dakota = read.csv('south_temp.csv') nebraska = read.csv('nebraska_temp.csv') july$Time = as.Date(july$Time) arkansas$YEARMODA = as.Date(arkansas$YEARMODA) illinois$YEARMODA = as.Date(illinois$YEARMODA) indiana$YEARMODA = as.Date(indiana$YEARMODA) lowa$YEARMODA = as.Date(lowa$YEARMODA) minnesota$YEARMODA = as.Date(minnesota$YEARMODA) missouri$YEARMODA = as.Date(missouri$YEARMODA) north_dakota$YEARMODA = as.Date(north_dakota$YEARMODA) ohio$YEARMODA = as.Date(ohio$YEARMODA) south_dakota$YEARMODA = as.Date(south_dakota$YEARMODA) nebraska$YEARMODA = as.Date(nebraska$YEARMODA) minnesota_t = minnesota %>% group_by(YEARMODA) %>% summarize(minnesota_temp = mean(TEMP, na.rm = TRUE)) illinois_t = illinois %>% group_by(YEARMODA) %>% summarize(illinois_temp = mean(TEMP, na.rm = TRUE)) lowa_t = lowa %>% group_by(YEARMODA) %>% summarize(lowa_temp = mean(TEMP, na.rm = TRUE)) nebraska_t = nebraska %>% group_by(YEARMODA) %>% summarize(nebraska_temp = mean(TEMP, na.rm = TRUE)) ohio_t = ohio %>% group_by(YEARMODA) %>% summarize(ohio_temp = mean(TEMP, na.rm = TRUE)) arkansas_t = arkansas %>% group_by(YEARMODA) %>% summarize(arkansas_temp = mean(TEMP, na.rm = TRUE)) indiana_t = indiana %>% group_by(YEARMODA) %>% summarize(indiana_temp = mean(TEMP, na.rm = TRUE)) missouri_t = missouri %>% group_by(YEARMODA) %>% summarize(missouri_temp = mean(TEMP, na.rm = TRUE)) north_dakota_t = north_dakota %>% group_by(YEARMODA) %>% summarize(northdakota_temp = mean(TEMP, na.rm = TRUE)) south_dakota_t = south_dakota %>% group_by(YEARMODA) %>% summarize(southdakota_temp = mean(TEMP, na.rm = TRUE)) # arkansas_t = arkansas %>% select(YEARMODA,arkansas_temp = TEMP) # illinois_t = illinois %>% select(YEARMODA,illinois_temp = TEMP) # indiana_t = indiana %>% select(YEARMODA,indiana_temp = TEMP) # lowa_t = lowa %>% select(YEARMODA,lowa_temp = TEMP) # minnesota_t = minnesota %>% select(YEARMODA,minnesota_temp = TEMP) # missouri_t = missouri %>% select(YEARMODA,missouri_temp = TEMP) # north_dakota_t = north_dakota %>% select(YEARMODA,north_dakota_temp = TEMP) # ohio_t = ohio %>% select(YEARMODA,ohio_temp = TEMP) # south_dakota_t = south_dakota %>% select(YEARMODA,south_dakota_temp = TEMP) # nebraska_t = nebraska %>% select(YEARMODA,nebraska_temp = TEMP) weather_10 = merge(july,arkansas_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,illinois_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,indiana_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,lowa_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,minnesota_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,missouri_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,north_dakota_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,ohio_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,south_dakota_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) weather_10 = merge(weather_10,nebraska_t,by.x = 'Time', by.y = 'YEARMODA', all.x = TRUE) #weather_10$avg_temp = rowMeans(weather_10[,6:15], na.rm = TRUE) out_temp = weather_10 %>% select(arkansas_temp:nebraska_temp) write.csv(out_temp, 'temp.csv', row.names = FALSE) # ccf1 = ccf(weather_10$Close, weather_10$avg_temp) # noem_data = as.data.frame(scale(weather_10[,-1])) # ccf2 = ccf(noem_data$Close, weather_10$avg_temp) # lm1 = lm(Close ~ .-Open-High-Low, noem_data) # summary(lm1) ###################################rainfall################################### # setwd("~/Desktop/Competition/rainfall") # minnesota_r = read.csv('MinnesotaDailyWeather_09-2017_10-2019.csv') # illinois_r = read.csv('IllinoisDailyWeather_09-2017_10-2019.csv') # lowa_r = read.csv('IowaDailyWeather_09-2017_10-2019.csv') # nebraska_r = read.csv('NebraskaDailyWeather_09-2017_10-2019.csv') # ohio_r = read.csv('OhioDailyWeather_09-2017_10-2019.csv') # arkansas_r = read.csv('ArkansasDailyWeather_09-2017_10-2019.csv') # indiana_r = read.csv('IndianaDailyWeather_09-2017_10-2019.csv') # missouri_r = read.csv('MissouriDailyWeather_09-2017_10-2019.csv') # northdakota_r = read.csv('NorthDakotaDailyWeather_09-2017_10-2019.csv') # southdakota_r = read.csv('SouthDakotaDailyWeather_09-2017_10-2019.csv') arkansas_r = read.csv('Arkansas_Weather.csv') illinois_r = read.csv('Illinois_Weather.csv') indiana_r = read.csv('Indiana_Weather.csv') lowa_r = read.csv('Iowa_Weather.csv') minnesota_r = read.csv('Minnesota_Weather.csv') missouri_r = read.csv('Missouri_Weather.csv') northdakota_r = read.csv('North_Dakota_Weather.csv') ohio_r = read.csv('Ohio_Weather.csv') southdakota_r = read.csv('South_Dakota_Weather.csv') nebraska_r = read.csv('Nebraska_Weather.csv') nrow(distinct(ohio_r)) minnesota_rs = minnesota_r %>% group_by(DATE) %>% summarize(minnesota_prcp = mean(PRCP, na.rm = TRUE)) illinois_rs = illinois_r %>% group_by(DATE) %>% summarize(illinois_prcp = mean(PRCP, na.rm = TRUE)) lowa_rs = lowa_r %>% group_by(DATE) %>% summarize(lowa_prcp = mean(PRCP, na.rm = TRUE)) nebraska_rs = nebraska_r %>% group_by(DATE) %>% summarize(nebraska_prcp = mean(PRCP, na.rm = TRUE)) ohio_rs = ohio_r %>% group_by(DATE) %>% summarize(ohio_prcp = mean(PRCP, na.rm = TRUE)) arkansas_rs = arkansas_r %>% group_by(DATE) %>% summarize(arkansas_prcp = mean(PRCP, na.rm = TRUE)) indiana_rs = indiana_r %>% group_by(DATE) %>% summarize(indiana_prcp = mean(PRCP, na.rm = TRUE)) missouri_rs = missouri_r %>% group_by(DATE) %>% summarize(missouri_prcp = mean(PRCP, na.rm = TRUE)) northdakota_rs = northdakota_r %>% group_by(DATE) %>% summarize(northdakota_prcp = mean(PRCP, na.rm = TRUE)) southdakota_rs = southdakota_r %>% group_by(DATE) %>% summarize(southdakota_prcp = mean(PRCP, na.rm = TRUE)) # minnesota_rs = minnesota_r %>% filter(STATION == levels(minnesota_r$STATION)[4]) %>% select(DATE, minnesota_prcp = PRCP) # illinois_rs = illinois_r %>% filter(STATION == levels(illinois_r$STATION)[7]) %>% select(DATE, illinois_prcp = PRCP) # lowa_rs = lowa_r %>% filter(STATION == levels(lowa_r$STATION)[2]) %>% select(DATE, lowa_prcp = PRCP) # nebraska_rs = nebraska_r %>% filter(STATION == levels(nebraska_r$STATION)[3]) %>% select(DATE, nebraska_prcp = PRCP) # ohio_rs = ohio_r %>% filter(STATION == levels(ohio_r$STATION)[1]) %>% select(DATE, ohio_prcp = PRCP) minnesota_rs$DATE = as.Date(minnesota_rs$DATE) illinois_rs$DATE = as.Date(illinois_rs$DATE) lowa_rs$DATE = as.Date(lowa_rs$DATE) nebraska_rs$DATE = as.Date(nebraska_rs$DATE) ohio_rs$DATE = as.Date(ohio_rs$DATE) arkansas_rs$DATE = as.Date(arkansas_rs$DATE) indiana_rs$DATE = as.Date(indiana_rs$DATE) missouri_rs$DATE = as.Date(missouri_rs$DATE) northdakota_rs$DATE = as.Date(northdakota_rs$DATE) southdakota_rs$DATE = as.Date(southdakota_rs$DATE) weather_15 = merge(weather_10,minnesota_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,illinois_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,lowa_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,nebraska_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,ohio_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,arkansas_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,indiana_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,missouri_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,northdakota_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) weather_15 = merge(weather_15,southdakota_rs,by.x = 'Time', by.y = 'DATE', all.x = TRUE) out_prep = weather_15 %>% select(minnesota_prcp:southdakota_prcp) write.csv(out_prep, 'prep.csv', row.names = FALSE) colnames(weather_15) weather_15$avg_temp = rowMeans(weather_15[,11:20], na.rm = TRUE) weather_15$avg_prcp = rowMeans(weather_15[,21:30], na.rm = TRUE) out = weather_15 %>% select(Time:sent_agri, avg_temp, avg_prcp) write.csv(out,'MayContract_latest.csv', row.names = FALSE) lm1 = lm(Close ~ .-Date-avg_temp, out) summary(lm1) lm2 = lm(Close ~ .-Date-avg_prcp, out) summary(lm2) lm3 = lm(Close ~ .-Date, out) summary(lm3) ##################### corn = read.csv('May_Super_Final.csv') weather = read.csv('MayContract_latest.csv') corn = corn %>% select(Time = Date, Soybean_Imports:Fertilizer_Price) corn$Time = as.Date(corn$Time,format = '%m/%d/%y') weather$Time = as.Date(weather$Time) final = merge(weather, corn, by = 'Time') write.csv(final, '5Contract_Latest.csv',row.names = FALSE) latest = read.csv('MayContract_latest.csv') str(latest) latest$soyoil_imports = as.numeric(latest$soyoil_imports) latest$soyoil_exports = as.numeric(latest$soyoil_exports) latest$US_Dollar_Index = as.numeric(latest$US_Dollar_Index) latest$SunflowerPrice = as.numeric(latest$SunflowerPrice) latest$CottonseedPrice = as.numeric(latest$CottonseedPrice) latest[is.na(latest)] = 0 summary(latest) write.csv(latest,'5contrat.csv',row.names = FALSE)
#Script transforming simuPOP csv into BayEnv intput file #Set working directory setwd('some directory') #Setting up some variables #Range of the simulations for the "for loop" range_sim=1:100 #Prefix for the CSV file names prefCSV='sim' #Number of populations nb_pop=16 #Number of individuals sampled per population nb_ind=20 #Folder for the output foldout='BayEnv' #Beginning a loop over the 100 simulations for (k in range_sim) { print(paste('Simulation number ',k,sep='')) #Reading data data<-read.csv(paste(prefCSV,k,'.csv',sep=''),header=FALSE) #Creating a vector for env values env<-data[,2] env<-env[1:nb_pop*nb_ind] env<-(env-mean(env))/sd(env) #Creating fake enviromental values fake_env<-runif(nb_pop,-10,10) fake_env<-(fake_env-mean(fake_env))/sd(fake_env) #Saving env variables into a file for BayEnv write.table(rbind(env,fake_env),file=paste('./',foldout,'/Env/sim',k,'.env',sep=''),row.names=FALSE,col.names=FALSE,sep='\t',eol='\t\n') }
/MathieuGautier/ScriptsR/scriptBayEnv_env.R
no_license
cayek/Data2016_2017
R
false
false
988
r
#Script transforming simuPOP csv into BayEnv intput file #Set working directory setwd('some directory') #Setting up some variables #Range of the simulations for the "for loop" range_sim=1:100 #Prefix for the CSV file names prefCSV='sim' #Number of populations nb_pop=16 #Number of individuals sampled per population nb_ind=20 #Folder for the output foldout='BayEnv' #Beginning a loop over the 100 simulations for (k in range_sim) { print(paste('Simulation number ',k,sep='')) #Reading data data<-read.csv(paste(prefCSV,k,'.csv',sep=''),header=FALSE) #Creating a vector for env values env<-data[,2] env<-env[1:nb_pop*nb_ind] env<-(env-mean(env))/sd(env) #Creating fake enviromental values fake_env<-runif(nb_pop,-10,10) fake_env<-(fake_env-mean(fake_env))/sd(fake_env) #Saving env variables into a file for BayEnv write.table(rbind(env,fake_env),file=paste('./',foldout,'/Env/sim',k,'.env',sep=''),row.names=FALSE,col.names=FALSE,sep='\t',eol='\t\n') }
#NFL team stats # Section 0: load packages ---- library(dplyr) library(ggplot2) library(ggthemes) library(rvest) library(lubridate) library(knitr) library(rmarkdown) library(reshape2) library(extrafont) # # # # Section 1: OFFENSE ---- #scrape offense data using rvest package url.offense.2015 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2015&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2014 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2014&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2013 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2013&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2012 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2012&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2011 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2011&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2010 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2010&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2009 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2009&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2008 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2008&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2007 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2007&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2006 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2006&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2005 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2005&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" #builds data frames from tables offense.2015 <- as.data.frame(url.offense.2015 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2014 <- as.data.frame(url.offense.2014 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2013 <- as.data.frame(url.offense.2013 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2012 <- as.data.frame(url.offense.2012 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2011 <- as.data.frame(url.offense.2011 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2010 <- as.data.frame(url.offense.2010 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2009 <- as.data.frame(url.offense.2009 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2008 <- as.data.frame(url.offense.2008 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2007 <- as.data.frame(url.offense.2007 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2006 <- as.data.frame(url.offense.2006 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2005 <- as.data.frame(url.offense.2005 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) #adds year column to each dataframe offense.2015$year <- "2015" offense.2014$year <- "2014" offense.2013$year <- "2013" offense.2012$year <- "2012" offense.2011$year <- "2011" offense.2010$year <- "2010" offense.2009$year <- "2009" offense.2008$year <- "2008" offense.2007$year <- "2007" offense.2006$year <- "2006" offense.2005$year <- "2005" #combines and formats offensive data frames offense <- rbind(offense.2015, offense.2014, offense.2013, offense.2012, offense.2011, offense.2010, offense.2009, offense.2008, offense.2007, offense.2006, offense.2005) names(offense) <- tolower(names(offense)) names(offense)[1] <- "rank" names(offense)[3] <- "games" names(offense)[4] <- "pts.game" names(offense)[5] <- "tot.pts" names(offense)[9] <- "firstdowns.game" names(offense)[10] <- "conv.third" names(offense)[11] <- "att.third" names(offense)[12] <- "third.rate" names(offense)[13] <- "conv.fourth" names(offense)[14] <- "att.fourth" names(offense)[15] <- "fourth.rate" names(offense)[18] <- "timepos.game" names(offense)[21] <- "turnovers" offense$rank <- as.factor(offense$rank) offense$team <- as.factor(offense$team) offense$year <- as.factor(offense$year) #adds PHI flag, useful for colors when plotting offense$flag <- "Other" offense$flag[offense$team == "Philadelphia Eagles"] <- "PHI" offense$flag <- as.factor(offense$flag) #removes comma separator and formats as.numeric offense$scrm.plys <- as.numeric(gsub(',', '', offense$scrm.plys)) offense$pen.yds <- as.numeric(gsub(',', '', offense$pen.yds)) #formats time of possession using lubridate package offense$timepos.game <- ms(offense$timepos.game) #removes rank vector offense$rank <- NULL #checks structure str(offense) # # # # Section 2: DEFENSE ---- #scrape offense data using rvest package url.defense.2015 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2015&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2014 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2014&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2013 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2013&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2012 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2012&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2011 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2011&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2010 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2010&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2009 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2009&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2008 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2008&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2007 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2007&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2006 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2006&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2005 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2005&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" #builds data frames from tables defense.2015 <- as.data.frame(url.defense.2015 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2014 <- as.data.frame(url.defense.2014 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2013 <- as.data.frame(url.defense.2013 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2012 <- as.data.frame(url.defense.2012 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2011 <- as.data.frame(url.defense.2011 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2010 <- as.data.frame(url.defense.2010 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2009 <- as.data.frame(url.defense.2009 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2008 <- as.data.frame(url.defense.2008 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2007 <- as.data.frame(url.defense.2007 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2006 <- as.data.frame(url.defense.2006 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2005 <- as.data.frame(url.defense.2005 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) #adds year column to each dataframe defense.2015$year <- "2015" defense.2014$year <- "2014" defense.2013$year <- "2013" defense.2012$year <- "2012" defense.2011$year <- "2011" defense.2010$year <- "2010" defense.2009$year <- "2009" defense.2008$year <- "2008" defense.2007$year <- "2007" defense.2006$year <- "2006" defense.2005$year <- "2005" #combines and formats offensive data frames defense <- rbind(defense.2015, defense.2014, defense.2013, defense.2012, defense.2011, defense.2010, defense.2009, defense.2008, defense.2007, defense.2006, defense.2005) names(defense) <- tolower(names(defense)) names(defense)[3] <- "def.games" names(defense)[4] <- "pts.game.allowed" names(defense)[5] <- "tot.pts.allowed" names(defense)[6] <- "def.scrm.plys" names(defense)[7] <- "def.yds.g" names(defense)[8] <- "def.yds.p" names(defense)[9] <- "firstdowns.game.allowed" names(defense)[10] <- "conv.third.allowed" names(defense)[11] <- "att.third.allowed" names(defense)[12] <- "third.rate.allowed" names(defense)[13] <- "conv.fourth.allowed" names(defense)[14] <- "att.fourth.allowed" names(defense)[15] <- "fourth.rate.allowed" names(defense)[16] <- "def.pen" names(defense)[17] <- "def.pen.yds" names(defense)[18] <- "time.on.field" names(defense)[19] <- "def.fum" names(defense)[20] <- "def.fum.lost" defense$team <- as.factor(defense$team) defense$year <- as.factor(defense$year) #removes comma separator and formats as.numeric defense$def.scrm.plys <- as.numeric(gsub(',', '', defense$def.scrm.plys)) defense$def.pen.yds <- as.numeric(gsub(',', '', defense$def.pen.yds)) #formats time of possession using lubridate package defense$time.on.field <- ms(defense$time.on.field) #removes rank vector - dont need it defense$rk <- NULL #checks structure str(defense) # # # #merges offense and defense data stats <- left_join(offense, defense, by = c("team", "year")) View(stats) # # # # Section 3: ggplot theme ---- #Global theme options - to easily all plots at once font.type <- "Garamond" background.color <- "#f1f1f1" line.color <- "#d8d8d8" title.color <- "#3C3C3C" title.size <- 22 axis.color <- "#535353" axis.size <- 14 transparency <- .7 #for alpha line.size <- 1.6 #for geom_line() point.size <- 3 #for geom_point() theme_bg <-theme(panel.background=element_rect(fill=background.color)) + theme(plot.background=element_rect(fill=background.color)) + theme(panel.grid.major=element_line(colour=line.color,size=.60)) + theme(panel.grid.minor=element_line(colour=line.color,size=.05)) + theme(axis.ticks=element_blank()) + theme(plot.title=element_text(face="bold",vjust=2, hjust=-.07, colour=title.color,size=title.size)) + theme(axis.text.x=element_text(size=axis.size,colour=axis.color)) + theme(axis.text.y=element_text(size=axis.size,colour=axis.color)) + theme(axis.title.y=element_text(size=axis.size,colour=axis.color,vjust=1.5)) + theme(axis.title.x=element_text(size=axis.size,colour=axis.color,vjust=-.5)) + theme(text=element_text(family=font.type)) #Fonts to plug into font.type variable "Lucida Sans" "Gil Sans MT" "Verdana" "Trebuchet MS" "Georgia" "Garamond" # Section 4: Stuff for R Markdown doc ---- #table of avg points per game by team for 2005-2015 team.pts <- stats %>% group_by(team) %>% summarise(mean.points = mean(pts.game)) %>% ungroup() %>% arrange(desc(mean.points)) names(team.pts)[1] <- "Team" names(team.pts)[2] <- "Avg. Points per Game: (2005-2015)" #table of avg points per game by team for 2015 team.pts.2015 <- stats %>% filter(year == "2015") %>% group_by(team) %>% summarise(mean.points = mean(pts.game)) %>% ungroup() %>% arrange(desc(mean.points)) #plots (pts.vs.pen.yds <- ggplot(subset(stats, year == "2015"), aes(x=pen.yds/games, y=pts.game)) + geom_point(size=3, alpha=.6) + labs(x="\nPenalty Yards per Game", y="Points per Game\n", title = "Points per Game vs. Penalty Yards per Game (2015)\n") + theme_bg) (pts.vs.pen <- ggplot(subset(stats, year == "2015"), aes(x=pen/games, y=pts.game)) + geom_point(size=3, alpha=.6) + labs(x="\nPenalties per Game", y="Points per Game\n", title = "Points per Game vs. Penalties per Game (2015)\n") + theme_bg) (def.pts.2015 <- ggplot(subset(stats,year == "2015"), aes(x=reorder(team,-pts.game.allowed), y=pts.game.allowed, fill=flag)) + geom_bar(stat="identity", alpha=.6) + labs(x="", y="Points per Game Allowed\n", title = "Points Allowed: 2005-2015\n") + guides(fill=FALSE) + scale_fill_manual(values = c("gray37", "darkgreen")) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + theme_bg) (pts.vs.defpen <- ggplot(subset(stats, year == "2015"), aes(x=def.pen/games, y=pts.game.allowed)) + geom_point(size=3, alpha=.6) + labs(x="\n Defensive Penalties per Game", y="Points per GameAllowed\n", title = "Points per Game Allowed vs. Defensive Penalties per Game\n(2015)\n") + theme_bg) (pts.vs.defpenyds <- ggplot(subset(stats, year == "2015"), aes(x=def.pen.yds/games, y=pts.game.allowed)) + geom_point(size=3, alpha=.6) + labs(x="\n Defensive Penalties per Game", y="Points per GameAllowed\n", title = "Points per Game Allowed vs. Defensive Penalties per Game\n(2015)\n") + theme_bg) (first.downs <- ggplot(stats, aes(x=year, y=firstdowns.game)) + geom_boxplot(alpha=.6) + labs(x="", y="First Downs per Game", title = "Distribution of 1st Downs") + theme_bg) (points <- ggplot(stats, aes(x=year, y=pts.game)) + geom_boxplot(alpha=.6) + labs(x="", y="Points per Game", title = "Distribution of Points") + theme_bg) (third.downs <- ggplot(subset(stats, year == "2015"), aes(x=reorder(team, -third.rate.allowed), y=third.rate.allowed, fill = flag)) + geom_bar(stat="identity", alpha=transparency) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=.2)) + theme_bg + guides(fill=FALSE) + labs(x="", y="Percentage of 3rd-Down Conversionas Allowed by Defense", title="3rd Downs Allowed in 2015") + scale_fill_manual(values = c("gray37","darkgreen"))) (def.pen.yds <- ggplot(subset(stats, year == "2015"), aes(x=reorder(team, -def.pen.yds/def.games), y=def.pen.yds/def.games, fill = flag)) + geom_bar(stat="identity", alpha=transparency) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=.2)) + theme_bg + guides(fill=FALSE) + labs(x="", y="Yards", title="Defensive Penalty Yards Per Game in 2015") + scale_fill_manual(values = c("gray37","darkgreen"))) (off.pen.yds <- ggplot(subset(stats, year == "2015"), aes(x=reorder(team, -pen.yds/def.games), y=pen.yds/def.games, fill = flag)) + geom_bar(stat="identity", alpha=transparency) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=.2)) + theme_bg + guides(fill=FALSE) + labs(x="", y="Yards", title="Offensive Penalty Yards Per Game in 2015") + scale_fill_manual(values = c("gray37","darkgreen"))) (pen.yds <- ggplot(subset(stats, year == "2015"), aes(x=pen.yds, y=def.pen.yds)) + geom_point(size=point.size, alpha=transparency) + labs(x="Offensive Penalty Yards", y="Defensive Penalty Yards", title = "Defensive vs Offensive Penalty Yards") + theme_bg) (first.downs.allowed <- ggplot(subset(stats, year == "2015"), aes(x=reorder(team, -firstdowns.game.allowed), y=firstdowns.game.allowed, fill = flag)) + geom_bar(stat="identity", alpha=transparency) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=.2)) + theme_bg + guides(fill=FALSE) + labs(x="", y="First Downs", title="First Downs Allowed Per Game in 2015") + scale_fill_manual(values = c("gray37","darkgreen")))
/nfl_team_stats.R
no_license
brndngrhm/nfl_team_stats
R
false
false
17,118
r
#NFL team stats # Section 0: load packages ---- library(dplyr) library(ggplot2) library(ggthemes) library(rvest) library(lubridate) library(knitr) library(rmarkdown) library(reshape2) library(extrafont) # # # # Section 1: OFFENSE ---- #scrape offense data using rvest package url.offense.2015 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2015&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2014 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2014&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2013 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2013&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2012 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2012&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2011 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2011&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2010 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2010&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2009 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2009&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2008 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2008&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2007 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2007&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2006 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2006&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" url.offense.2005 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&offensiveStatisticCategory=GAME_STATS&conference=ALL&role=TM&season=2005&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=2&d-447263-n=1" #builds data frames from tables offense.2015 <- as.data.frame(url.offense.2015 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2014 <- as.data.frame(url.offense.2014 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2013 <- as.data.frame(url.offense.2013 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2012 <- as.data.frame(url.offense.2012 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2011 <- as.data.frame(url.offense.2011 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2010 <- as.data.frame(url.offense.2010 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2009 <- as.data.frame(url.offense.2009 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2008 <- as.data.frame(url.offense.2008 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2007 <- as.data.frame(url.offense.2007 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2006 <- as.data.frame(url.offense.2006 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) offense.2005 <- as.data.frame(url.offense.2005 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) #adds year column to each dataframe offense.2015$year <- "2015" offense.2014$year <- "2014" offense.2013$year <- "2013" offense.2012$year <- "2012" offense.2011$year <- "2011" offense.2010$year <- "2010" offense.2009$year <- "2009" offense.2008$year <- "2008" offense.2007$year <- "2007" offense.2006$year <- "2006" offense.2005$year <- "2005" #combines and formats offensive data frames offense <- rbind(offense.2015, offense.2014, offense.2013, offense.2012, offense.2011, offense.2010, offense.2009, offense.2008, offense.2007, offense.2006, offense.2005) names(offense) <- tolower(names(offense)) names(offense)[1] <- "rank" names(offense)[3] <- "games" names(offense)[4] <- "pts.game" names(offense)[5] <- "tot.pts" names(offense)[9] <- "firstdowns.game" names(offense)[10] <- "conv.third" names(offense)[11] <- "att.third" names(offense)[12] <- "third.rate" names(offense)[13] <- "conv.fourth" names(offense)[14] <- "att.fourth" names(offense)[15] <- "fourth.rate" names(offense)[18] <- "timepos.game" names(offense)[21] <- "turnovers" offense$rank <- as.factor(offense$rank) offense$team <- as.factor(offense$team) offense$year <- as.factor(offense$year) #adds PHI flag, useful for colors when plotting offense$flag <- "Other" offense$flag[offense$team == "Philadelphia Eagles"] <- "PHI" offense$flag <- as.factor(offense$flag) #removes comma separator and formats as.numeric offense$scrm.plys <- as.numeric(gsub(',', '', offense$scrm.plys)) offense$pen.yds <- as.numeric(gsub(',', '', offense$pen.yds)) #formats time of possession using lubridate package offense$timepos.game <- ms(offense$timepos.game) #removes rank vector offense$rank <- NULL #checks structure str(offense) # # # # Section 2: DEFENSE ---- #scrape offense data using rvest package url.defense.2015 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2015&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2014 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2014&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2013 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2013&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2012 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2012&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2011 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2011&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2010 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2010&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2009 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2009&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2008 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2008&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2007 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2007&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2006 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2006&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" url.defense.2005 <- "http://www.nfl.com/stats/categorystats?tabSeq=2&defensiveStatisticCategory=GAME_STATS&conference=ALL&role=OPP&season=2005&seasonType=REG&d-447263-s=TOTAL_YARDS_GAME_AVG&d-447263-o=1&d-447263-n=1" #builds data frames from tables defense.2015 <- as.data.frame(url.defense.2015 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2014 <- as.data.frame(url.defense.2014 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2013 <- as.data.frame(url.defense.2013 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2012 <- as.data.frame(url.defense.2012 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2011 <- as.data.frame(url.defense.2011 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2010 <- as.data.frame(url.defense.2010 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2009 <- as.data.frame(url.defense.2009 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2008 <- as.data.frame(url.defense.2008 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2007 <- as.data.frame(url.defense.2007 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2006 <- as.data.frame(url.defense.2006 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) defense.2005 <- as.data.frame(url.defense.2005 %>% html() %>% html_nodes(xpath = '//*[@id="result"]') %>% html_table()) #adds year column to each dataframe defense.2015$year <- "2015" defense.2014$year <- "2014" defense.2013$year <- "2013" defense.2012$year <- "2012" defense.2011$year <- "2011" defense.2010$year <- "2010" defense.2009$year <- "2009" defense.2008$year <- "2008" defense.2007$year <- "2007" defense.2006$year <- "2006" defense.2005$year <- "2005" #combines and formats offensive data frames defense <- rbind(defense.2015, defense.2014, defense.2013, defense.2012, defense.2011, defense.2010, defense.2009, defense.2008, defense.2007, defense.2006, defense.2005) names(defense) <- tolower(names(defense)) names(defense)[3] <- "def.games" names(defense)[4] <- "pts.game.allowed" names(defense)[5] <- "tot.pts.allowed" names(defense)[6] <- "def.scrm.plys" names(defense)[7] <- "def.yds.g" names(defense)[8] <- "def.yds.p" names(defense)[9] <- "firstdowns.game.allowed" names(defense)[10] <- "conv.third.allowed" names(defense)[11] <- "att.third.allowed" names(defense)[12] <- "third.rate.allowed" names(defense)[13] <- "conv.fourth.allowed" names(defense)[14] <- "att.fourth.allowed" names(defense)[15] <- "fourth.rate.allowed" names(defense)[16] <- "def.pen" names(defense)[17] <- "def.pen.yds" names(defense)[18] <- "time.on.field" names(defense)[19] <- "def.fum" names(defense)[20] <- "def.fum.lost" defense$team <- as.factor(defense$team) defense$year <- as.factor(defense$year) #removes comma separator and formats as.numeric defense$def.scrm.plys <- as.numeric(gsub(',', '', defense$def.scrm.plys)) defense$def.pen.yds <- as.numeric(gsub(',', '', defense$def.pen.yds)) #formats time of possession using lubridate package defense$time.on.field <- ms(defense$time.on.field) #removes rank vector - dont need it defense$rk <- NULL #checks structure str(defense) # # # #merges offense and defense data stats <- left_join(offense, defense, by = c("team", "year")) View(stats) # # # # Section 3: ggplot theme ---- #Global theme options - to easily all plots at once font.type <- "Garamond" background.color <- "#f1f1f1" line.color <- "#d8d8d8" title.color <- "#3C3C3C" title.size <- 22 axis.color <- "#535353" axis.size <- 14 transparency <- .7 #for alpha line.size <- 1.6 #for geom_line() point.size <- 3 #for geom_point() theme_bg <-theme(panel.background=element_rect(fill=background.color)) + theme(plot.background=element_rect(fill=background.color)) + theme(panel.grid.major=element_line(colour=line.color,size=.60)) + theme(panel.grid.minor=element_line(colour=line.color,size=.05)) + theme(axis.ticks=element_blank()) + theme(plot.title=element_text(face="bold",vjust=2, hjust=-.07, colour=title.color,size=title.size)) + theme(axis.text.x=element_text(size=axis.size,colour=axis.color)) + theme(axis.text.y=element_text(size=axis.size,colour=axis.color)) + theme(axis.title.y=element_text(size=axis.size,colour=axis.color,vjust=1.5)) + theme(axis.title.x=element_text(size=axis.size,colour=axis.color,vjust=-.5)) + theme(text=element_text(family=font.type)) #Fonts to plug into font.type variable "Lucida Sans" "Gil Sans MT" "Verdana" "Trebuchet MS" "Georgia" "Garamond" # Section 4: Stuff for R Markdown doc ---- #table of avg points per game by team for 2005-2015 team.pts <- stats %>% group_by(team) %>% summarise(mean.points = mean(pts.game)) %>% ungroup() %>% arrange(desc(mean.points)) names(team.pts)[1] <- "Team" names(team.pts)[2] <- "Avg. Points per Game: (2005-2015)" #table of avg points per game by team for 2015 team.pts.2015 <- stats %>% filter(year == "2015") %>% group_by(team) %>% summarise(mean.points = mean(pts.game)) %>% ungroup() %>% arrange(desc(mean.points)) #plots (pts.vs.pen.yds <- ggplot(subset(stats, year == "2015"), aes(x=pen.yds/games, y=pts.game)) + geom_point(size=3, alpha=.6) + labs(x="\nPenalty Yards per Game", y="Points per Game\n", title = "Points per Game vs. Penalty Yards per Game (2015)\n") + theme_bg) (pts.vs.pen <- ggplot(subset(stats, year == "2015"), aes(x=pen/games, y=pts.game)) + geom_point(size=3, alpha=.6) + labs(x="\nPenalties per Game", y="Points per Game\n", title = "Points per Game vs. Penalties per Game (2015)\n") + theme_bg) (def.pts.2015 <- ggplot(subset(stats,year == "2015"), aes(x=reorder(team,-pts.game.allowed), y=pts.game.allowed, fill=flag)) + geom_bar(stat="identity", alpha=.6) + labs(x="", y="Points per Game Allowed\n", title = "Points Allowed: 2005-2015\n") + guides(fill=FALSE) + scale_fill_manual(values = c("gray37", "darkgreen")) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + theme_bg) (pts.vs.defpen <- ggplot(subset(stats, year == "2015"), aes(x=def.pen/games, y=pts.game.allowed)) + geom_point(size=3, alpha=.6) + labs(x="\n Defensive Penalties per Game", y="Points per GameAllowed\n", title = "Points per Game Allowed vs. Defensive Penalties per Game\n(2015)\n") + theme_bg) (pts.vs.defpenyds <- ggplot(subset(stats, year == "2015"), aes(x=def.pen.yds/games, y=pts.game.allowed)) + geom_point(size=3, alpha=.6) + labs(x="\n Defensive Penalties per Game", y="Points per GameAllowed\n", title = "Points per Game Allowed vs. Defensive Penalties per Game\n(2015)\n") + theme_bg) (first.downs <- ggplot(stats, aes(x=year, y=firstdowns.game)) + geom_boxplot(alpha=.6) + labs(x="", y="First Downs per Game", title = "Distribution of 1st Downs") + theme_bg) (points <- ggplot(stats, aes(x=year, y=pts.game)) + geom_boxplot(alpha=.6) + labs(x="", y="Points per Game", title = "Distribution of Points") + theme_bg) (third.downs <- ggplot(subset(stats, year == "2015"), aes(x=reorder(team, -third.rate.allowed), y=third.rate.allowed, fill = flag)) + geom_bar(stat="identity", alpha=transparency) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=.2)) + theme_bg + guides(fill=FALSE) + labs(x="", y="Percentage of 3rd-Down Conversionas Allowed by Defense", title="3rd Downs Allowed in 2015") + scale_fill_manual(values = c("gray37","darkgreen"))) (def.pen.yds <- ggplot(subset(stats, year == "2015"), aes(x=reorder(team, -def.pen.yds/def.games), y=def.pen.yds/def.games, fill = flag)) + geom_bar(stat="identity", alpha=transparency) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=.2)) + theme_bg + guides(fill=FALSE) + labs(x="", y="Yards", title="Defensive Penalty Yards Per Game in 2015") + scale_fill_manual(values = c("gray37","darkgreen"))) (off.pen.yds <- ggplot(subset(stats, year == "2015"), aes(x=reorder(team, -pen.yds/def.games), y=pen.yds/def.games, fill = flag)) + geom_bar(stat="identity", alpha=transparency) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=.2)) + theme_bg + guides(fill=FALSE) + labs(x="", y="Yards", title="Offensive Penalty Yards Per Game in 2015") + scale_fill_manual(values = c("gray37","darkgreen"))) (pen.yds <- ggplot(subset(stats, year == "2015"), aes(x=pen.yds, y=def.pen.yds)) + geom_point(size=point.size, alpha=transparency) + labs(x="Offensive Penalty Yards", y="Defensive Penalty Yards", title = "Defensive vs Offensive Penalty Yards") + theme_bg) (first.downs.allowed <- ggplot(subset(stats, year == "2015"), aes(x=reorder(team, -firstdowns.game.allowed), y=firstdowns.game.allowed, fill = flag)) + geom_bar(stat="identity", alpha=transparency) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=.2)) + theme_bg + guides(fill=FALSE) + labs(x="", y="First Downs", title="First Downs Allowed Per Game in 2015") + scale_fill_manual(values = c("gray37","darkgreen")))
require(bio.base) require(bio.lobster) p = bio.lobster::load.environment() require(bio.polygons) p$libs = NULL require(PBSmapping) require(bio.lobster) require(bio.utilities) require(dplyr) require(bio.survey) p = bio.lobster::load.environment() p$libs = NULL fp = file.path(project.datadirectory('bio.lobster'),"analysis") la() p$yrs = 1947:p$current.assessment.year load_all('~/GitHub/bio.survey/') ###DATA IMPORT### windows=T # run in windows environment if(windows){ lobster.db( DS = "logs41.redo", p=p) # Offshore logs monitoring documents lobster.db( DS = "atSea.redo", p=p) # at Sea sampling from materialized view lobster.db( DS = "cris.redo", p=p) # CRIS database lobster.db( DS = 'annual.landings.redo', p=p) #static annual landings tabke needs to be updated by CDenton lobster.db( DS = 'seasonal.landings.redo', p=p) #static seasonal landings table needs to be updated by CDenton inf = nefsc.db( DS = 'usinf.redo.odbc',fn.root = NULL,p=p) ca = nefsc.db( DS = 'uscat.redo.odbc',fn.root = NULL,p=p) de = nefsc.db( DS = 'usdet.redo.odbc',fn.root = NULL,p=p) inf = nefsc.db( DS = 'usinf.clean.redo',fn.root = NULL,p=p) de = nefsc.db( DS = 'usdet.clean.redo',fn.root = NULL,p=p) ca = nefsc.db( DS = 'uscat.clean.redo',fn.root = NULL,p=p) lobster.db(DS = 'lfa41.observer.samples.redo') } # load .RData objects lobster.db( DS="logs41", p=p) # Offshore logs monitoring documents lobster.db( DS="atSea", p=p) # at Sea sampling from materialized view lobster.db( DS="cris", p=p) # CRIS database lobster.db( DS = 'annual.landings', p=p) #static annual landings tabke needs to be updated by CDenton lobster.db( DS = 'seasonal.landings', p=p) #static seasonal landings table needs to be updated by CDenton #American Trawl Survey Results inf = nefsc.db( DS = 'usinf.clean.redo',fn.root = NULL,p=p) ca = nefsc.db( DS = 'uscat.clean.redo',fn.root = NULL,p=p) de = nefsc.db( DS = 'usdet.clean.redo',fn.root = NULL,p=p) nefsc.db(DS = 'usstrata.area.redo') source("C:/Users/Howsevj/Documents/LFA41_UpdateMD/2.stratifiedAnalysis.r") source("C:/Users/Howsevj/Documents/LFA41_UpdateMD/2d.stratifiedAnalysisCommercial.r") source("C:/Users/Howsevj/Documents/LFA41_UpdateMD/2e.femalestratifiedSizeFrequencies.r") source("C:/Users/Howsevj/Documents/LFA41_UpdateMD/3.reproductivePotential.r")
/inst/Updates/LFA41/LFA41_UpdateMD/LFA41_analysis.R
no_license
LobsterScience/bio.lobster
R
false
false
2,370
r
require(bio.base) require(bio.lobster) p = bio.lobster::load.environment() require(bio.polygons) p$libs = NULL require(PBSmapping) require(bio.lobster) require(bio.utilities) require(dplyr) require(bio.survey) p = bio.lobster::load.environment() p$libs = NULL fp = file.path(project.datadirectory('bio.lobster'),"analysis") la() p$yrs = 1947:p$current.assessment.year load_all('~/GitHub/bio.survey/') ###DATA IMPORT### windows=T # run in windows environment if(windows){ lobster.db( DS = "logs41.redo", p=p) # Offshore logs monitoring documents lobster.db( DS = "atSea.redo", p=p) # at Sea sampling from materialized view lobster.db( DS = "cris.redo", p=p) # CRIS database lobster.db( DS = 'annual.landings.redo', p=p) #static annual landings tabke needs to be updated by CDenton lobster.db( DS = 'seasonal.landings.redo', p=p) #static seasonal landings table needs to be updated by CDenton inf = nefsc.db( DS = 'usinf.redo.odbc',fn.root = NULL,p=p) ca = nefsc.db( DS = 'uscat.redo.odbc',fn.root = NULL,p=p) de = nefsc.db( DS = 'usdet.redo.odbc',fn.root = NULL,p=p) inf = nefsc.db( DS = 'usinf.clean.redo',fn.root = NULL,p=p) de = nefsc.db( DS = 'usdet.clean.redo',fn.root = NULL,p=p) ca = nefsc.db( DS = 'uscat.clean.redo',fn.root = NULL,p=p) lobster.db(DS = 'lfa41.observer.samples.redo') } # load .RData objects lobster.db( DS="logs41", p=p) # Offshore logs monitoring documents lobster.db( DS="atSea", p=p) # at Sea sampling from materialized view lobster.db( DS="cris", p=p) # CRIS database lobster.db( DS = 'annual.landings', p=p) #static annual landings tabke needs to be updated by CDenton lobster.db( DS = 'seasonal.landings', p=p) #static seasonal landings table needs to be updated by CDenton #American Trawl Survey Results inf = nefsc.db( DS = 'usinf.clean.redo',fn.root = NULL,p=p) ca = nefsc.db( DS = 'uscat.clean.redo',fn.root = NULL,p=p) de = nefsc.db( DS = 'usdet.clean.redo',fn.root = NULL,p=p) nefsc.db(DS = 'usstrata.area.redo') source("C:/Users/Howsevj/Documents/LFA41_UpdateMD/2.stratifiedAnalysis.r") source("C:/Users/Howsevj/Documents/LFA41_UpdateMD/2d.stratifiedAnalysisCommercial.r") source("C:/Users/Howsevj/Documents/LFA41_UpdateMD/2e.femalestratifiedSizeFrequencies.r") source("C:/Users/Howsevj/Documents/LFA41_UpdateMD/3.reproductivePotential.r")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidy_FIA.R \name{read_ref_table} \alias{read_ref_table} \title{Read FIA reference table} \usage{ read_ref_table(table) } \arguments{ \item{table_name}{name of reference table e.g. "REF_SPECIES"} } \value{ dataframe of reference table } \description{ Read FIA reference table }
/man/read_ref_table.Rd
no_license
General-Directorate-Forestry/tidyFIA
R
false
true
355
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidy_FIA.R \name{read_ref_table} \alias{read_ref_table} \title{Read FIA reference table} \usage{ read_ref_table(table) } \arguments{ \item{table_name}{name of reference table e.g. "REF_SPECIES"} } \value{ dataframe of reference table } \description{ Read FIA reference table }
%% File Name: systime.Rd %% File Version: 0.15 \name{systime} \alias{systime} %- Also NEED an '\alias' for EACH other topic documented here. \title{ \R Utilities: Various Strings Representing System Time } \description{ This function generates system time strings in several formats. } \usage{ systime() } %- maybe also 'usage' for other objects documented here. %\details{ %% ~~ If necessary, more details than the description above ~~ %} \value{ A vector with entries of system time (see Examples). } %\references{ %% ~put references to the literature/web site here ~ %} %\note{ %% ~~further notes~~ %} %% ~Make other sections like Warning with \section{Warning }{....} ~ %\seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ %} \examples{ ############################################################################# # EXAMPLE 1: Output of systime ############################################################################# systime() ## ## > miceadds::systime() ## [1] "2016-02-29 10:25:44" ## [2] "2016-02-29" ## [3] "20160229" ## [4] "2016-02-29_1025" ## [5] "2016-02-29_1000" ## [6] "20160229_102544" ## [7] "20160229102544" ## [8] "IPNERZW-C014_20160229102544" } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. %% \keyword{R utilities} %%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/man/systime.Rd
no_license
alexanderrobitzsch/miceadds
R
false
false
1,386
rd
%% File Name: systime.Rd %% File Version: 0.15 \name{systime} \alias{systime} %- Also NEED an '\alias' for EACH other topic documented here. \title{ \R Utilities: Various Strings Representing System Time } \description{ This function generates system time strings in several formats. } \usage{ systime() } %- maybe also 'usage' for other objects documented here. %\details{ %% ~~ If necessary, more details than the description above ~~ %} \value{ A vector with entries of system time (see Examples). } %\references{ %% ~put references to the literature/web site here ~ %} %\note{ %% ~~further notes~~ %} %% ~Make other sections like Warning with \section{Warning }{....} ~ %\seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ %} \examples{ ############################################################################# # EXAMPLE 1: Output of systime ############################################################################# systime() ## ## > miceadds::systime() ## [1] "2016-02-29 10:25:44" ## [2] "2016-02-29" ## [3] "20160229" ## [4] "2016-02-29_1025" ## [5] "2016-02-29_1000" ## [6] "20160229_102544" ## [7] "20160229102544" ## [8] "IPNERZW-C014_20160229102544" } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. %% \keyword{R utilities} %%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
\name{hemorrhage} \alias{hemorrhage} \docType{data} \title{ Hemorrhage data from Dupont. } \description{ Hemorrhage data from Dupont. } \usage{data(hemorrhage)} \format{ A data frame with 71 observations on the following 3 variables. \describe{ \item{\code{genotype}}{a numeric vector} \item{\code{time}}{a numeric vector} \item{\code{recur}}{a numeric vector} } } \references{ Dupont } \examples{ data(hemorrhage) ## maybe str(hemorrhage) ; plot(hemorrhage) ... } \keyword{datasets}
/man/hemorrhage.Rd
no_license
kloke/npsm
R
false
false
502
rd
\name{hemorrhage} \alias{hemorrhage} \docType{data} \title{ Hemorrhage data from Dupont. } \description{ Hemorrhage data from Dupont. } \usage{data(hemorrhage)} \format{ A data frame with 71 observations on the following 3 variables. \describe{ \item{\code{genotype}}{a numeric vector} \item{\code{time}}{a numeric vector} \item{\code{recur}}{a numeric vector} } } \references{ Dupont } \examples{ data(hemorrhage) ## maybe str(hemorrhage) ; plot(hemorrhage) ... } \keyword{datasets}
FunctionMinusPseudoLogLikeReliabilityXY<-function(VecPar,datosX,datosY,precision,Reliability,Threshold) { ScaleX<-VecPar[1] Shape<-VecPar[2] ScaleY<-ScaleX*(1-Reliability)/Reliability MenosLogVeroX<-FunctionMinusLogLike(c(Threshold,ScaleX,Shape),datosX,precision) MenosLogVeroY<-FunctionMinusLogLike(c(Threshold,ScaleY,Shape),datosY,precision) MenosLogVero<-MenosLogVeroX+MenosLogVeroY return(MenosLogVero) }
/R_Programs_Discretized_Likelihood/02ProfileLikelihoodApproach/FunctionMinusPseudoLogLikeReliabilityXY.R
no_license
gudeliafp/Programs-Discretized-Likelihood
R
false
false
423
r
FunctionMinusPseudoLogLikeReliabilityXY<-function(VecPar,datosX,datosY,precision,Reliability,Threshold) { ScaleX<-VecPar[1] Shape<-VecPar[2] ScaleY<-ScaleX*(1-Reliability)/Reliability MenosLogVeroX<-FunctionMinusLogLike(c(Threshold,ScaleX,Shape),datosX,precision) MenosLogVeroY<-FunctionMinusLogLike(c(Threshold,ScaleY,Shape),datosY,precision) MenosLogVero<-MenosLogVeroX+MenosLogVeroY return(MenosLogVero) }
# Clear variables and load r packages used rm(list=ls(all=TRUE)) library(graphics) library(data.table) # Read all data in data <- read.table("household_power_consumption.txt",header=TRUE, sep=";") # Modify Date to be of Date class and subset only the data from 2007-02-01 and 2007-02-02 data$Date<- as.Date(data$Date, format="%d/%m/%Y") date1 <- as.Date("2007-02-01") date2 <- as.Date("2007-02-02") subdata <- subset(data, data$Date>=date1 & data$Date <= date2) # Add DateTime column and use strptime to make it of class POSIXlt subdata["DateTime"] <- paste(subdata$Date, subdata$Time) subdata$DateTime <- strptime(subdata$DateTime, format="%Y-%m-%d %H:%M:%S") # Prepare data for plotting col <- colnames(subdata)[4:length(subdata)-1] subdata[col] <- sapply(sapply(subdata[col], as.character), as.numeric) # Create and plot to plot2.png png("plot2.png", width = 480, height = 480) par(bg="white") with (subdata, plot(DateTime, Global_active_power, type="l", xlab="", ylab="Global Active Power (killowatts)")) dev.off()
/plot2.R
no_license
iacooana/ExData_Plotting1
R
false
false
1,039
r
# Clear variables and load r packages used rm(list=ls(all=TRUE)) library(graphics) library(data.table) # Read all data in data <- read.table("household_power_consumption.txt",header=TRUE, sep=";") # Modify Date to be of Date class and subset only the data from 2007-02-01 and 2007-02-02 data$Date<- as.Date(data$Date, format="%d/%m/%Y") date1 <- as.Date("2007-02-01") date2 <- as.Date("2007-02-02") subdata <- subset(data, data$Date>=date1 & data$Date <= date2) # Add DateTime column and use strptime to make it of class POSIXlt subdata["DateTime"] <- paste(subdata$Date, subdata$Time) subdata$DateTime <- strptime(subdata$DateTime, format="%Y-%m-%d %H:%M:%S") # Prepare data for plotting col <- colnames(subdata)[4:length(subdata)-1] subdata[col] <- sapply(sapply(subdata[col], as.character), as.numeric) # Create and plot to plot2.png png("plot2.png", width = 480, height = 480) par(bg="white") with (subdata, plot(DateTime, Global_active_power, type="l", xlab="", ylab="Global Active Power (killowatts)")) dev.off()
# Session 1 Assignment 2 # What should be the output of the following Script? v <- c( 2,5.5,6) t <- c(8, 3, 4) print(v%/%t) #====================================================================================== # 2. You have 25 excel files with names as xx_1.xlsx, xx_2.xlsx,....xx_25.xlsx in a dir. # Write a program to extract the contents of each excel sheet and make it one df. setwd("Path containing your excel files") files=list.files(pattern=".xlsx") for(i in 1:length(files)) { filename=files[i] data=read.xlsx(file = filename,header = T) assign(x = filename,value = data) } #Suppose the columns are the same for each file, #you can bind them together in one dataframe with bind_rows from dplyr: library(dplyr) df <- bind_rows(files, .id = "id") #one more option is as follows df<-lapply(files, read_xlsx) %>% bind_rows() #================================================================================== # 3. If the above 25 files were csv files, what would be your script to read? setwd("Path containing your excel files") files=list.files(pattern=".csv") for(i in 1:length(files)) { filename=files[i] data=read.csv(file = filename,header = T) assign(x = filename,value = data) } #Suppose the columns are the same for each file, #you can bind them together in one dataframe with bind_rows from dplyr: library(dplyr) df <- bind_rows(files, .id = "id") #one more option is as follows df<-lapply(files, read_csv) %>% bind_rows()
/Session 1- Assignment 2.R
no_license
ayushyuvraj/Assignment1.2
R
false
false
1,516
r
# Session 1 Assignment 2 # What should be the output of the following Script? v <- c( 2,5.5,6) t <- c(8, 3, 4) print(v%/%t) #====================================================================================== # 2. You have 25 excel files with names as xx_1.xlsx, xx_2.xlsx,....xx_25.xlsx in a dir. # Write a program to extract the contents of each excel sheet and make it one df. setwd("Path containing your excel files") files=list.files(pattern=".xlsx") for(i in 1:length(files)) { filename=files[i] data=read.xlsx(file = filename,header = T) assign(x = filename,value = data) } #Suppose the columns are the same for each file, #you can bind them together in one dataframe with bind_rows from dplyr: library(dplyr) df <- bind_rows(files, .id = "id") #one more option is as follows df<-lapply(files, read_xlsx) %>% bind_rows() #================================================================================== # 3. If the above 25 files were csv files, what would be your script to read? setwd("Path containing your excel files") files=list.files(pattern=".csv") for(i in 1:length(files)) { filename=files[i] data=read.csv(file = filename,header = T) assign(x = filename,value = data) } #Suppose the columns are the same for each file, #you can bind them together in one dataframe with bind_rows from dplyr: library(dplyr) df <- bind_rows(files, .id = "id") #one more option is as follows df<-lapply(files, read_csv) %>% bind_rows()
complete <- function(directory, id = 1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files cwd <- getwd() filename <- function(dir,n){ sprintf("%s/%s/%03d%s",cwd,dir,n,".csv") } ## 'id' is an integer vector indicating the monitor ID numbers ## to be used dframe = NULL for (i in id){ fname <- filename(directory,i) data <- read.csv(fname, header=TRUE, sep="," ) comp_case <- complete.cases(data$sulfate, data$nitrate) number <- nrow(data[comp_case,]) dframe <- rbind(dframe,c(i,number)) } ## Return a data frame of the form: ## id nobs ## 1 117 ## 2 1041 ## ... ## where 'id' is the monitor ID number and 'nobs' is the ## number of complete cases dframe <- as.data.frame(dframe) names(dframe) <- c("id","nobs") dframe }
/R_Programming/PA1/complete.R
no_license
chenxiao4/DataScienceCoursera
R
false
false
1,061
r
complete <- function(directory, id = 1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files cwd <- getwd() filename <- function(dir,n){ sprintf("%s/%s/%03d%s",cwd,dir,n,".csv") } ## 'id' is an integer vector indicating the monitor ID numbers ## to be used dframe = NULL for (i in id){ fname <- filename(directory,i) data <- read.csv(fname, header=TRUE, sep="," ) comp_case <- complete.cases(data$sulfate, data$nitrate) number <- nrow(data[comp_case,]) dframe <- rbind(dframe,c(i,number)) } ## Return a data frame of the form: ## id nobs ## 1 117 ## 2 1041 ## ... ## where 'id' is the monitor ID number and 'nobs' is the ## number of complete cases dframe <- as.data.frame(dframe) names(dframe) <- c("id","nobs") dframe }
context("calibration") #------------------------------------------------ test_that("calibrate particle works", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_named(out, c("output","parameters","model","interventions","scan_results","replicate_parameters")) expect_s3_class(plot(out$scan_results, log = TRUE), "gg") expect_s3_class(plot(out$scan_results), "gg") expect_s3_class(plot(out$scan_results, what = "probability", log = FALSE), "gg") expect_s3_class(plot(out$scan_results, what = "probability", log = TRUE), "gg") expect_warning(expect_s3_class(plot(out, what = "cases", particle_fit = TRUE), "gg")) expect_warning(expect_s3_class(plot(out, what = "deaths", particle_fit = TRUE), "gg")) expect_error(plot(out, what = "rubbish", particle_fit = TRUE),"must be one of") # DATE CHECKS DATE_R0 expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = "2020-03-01", day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 )) expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = last_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 )) # DATE CHECKS DATE_CONTACT expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, contact_matrix_set = list(contact_matrices[[1]]), date_contact_matrix_set_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ), "baseline_contact_matrix can") out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_contact_matrix = contact_matrices[[1]], contact_matrix_set = list(contact_matrices[[1]]), date_contact_matrix_set_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ) expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, hosp_bed_capacity = 10, date_hosp_bed_capacity_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ), "baseline_hosp_bed_capacity can") out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_hosp_bed_capacity = 5, hosp_bed_capacity = 10, date_hosp_bed_capacity_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ) # DATE CHECKS DATE_ICU expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_ICU_bed_capacity = 10, ICU_bed_capacity = 100, date_ICU_bed_capacity_change = "2020-05-10", replicates = replicates, country = country, forecast = 0 )) expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, ICU_bed_capacity = 10, date_ICU_bed_capacity_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ), "baseline_ICU_bed_capacity can") out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_ICU_bed_capacity = 5, ICU_bed_capacity = 10, date_ICU_bed_capacity_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ) }) #------------------------------------------------ test_that("calibrate non future works", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 0.00001 R0_max = 0.00001 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 Sys.setenv("SQUIRE_PARALLEL_DEBUG"=TRUE) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) }) #------------------------------------------------ test_that("calibrate deterministic", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = deterministic_model(), pars_obs = pars_obs, n_particles = 2, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_true(inherits(out$scan_results$inputs$squire_model, "deterministic")) expect_error(get <- projections(out), "projections needs either time") get <- projections(out, time_period = 5) }) test_that("calibrate user pop and contact", { pop <- get_population("Nigeria") mat <- get_mixing_matrix("Nigeria")*0.9 data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_contact_matrix = mat, replicates = replicates, population = pop$n, forecast = 0 ) expect_true( identical(out$scan_results$inputs$model_params$contact_matrix_set[[1]], mat) ) }) #------------------------------------------------ test_that("calibrate dt not playing with day stepping well", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, replicates = replicates, country = "Algeria", forecast = 0, dt = 0.61 ), "must result in an integer") }) #------------------------------------------------ test_that("calibrate 3d particle works", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.7 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" Meff_min = 0.1 Meff_max = 1.8 Meff_step = 0.8 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 Sys.setenv("SQUIRE_PARALLEL_DEBUG"=FALSE) set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = Meff_min, Meff_max = Meff_max, Meff_step = Meff_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, Rt_func = function(R0_change, R0, Meff) {R0_change*Meff*R0}, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_s3_class(plot(out$scan_results), "gg") expect_s3_class(plot(out$scan_results, what = "probability"), "gg") expect_s3_class(plot(out$scan_results, what = "probability", show = c(1,3)), "gg") expect_s3_class(plot(out$scan_results, what = "probability", show = c(2,3)), "gg") expect_s3_class(plot(out$scan_results, show = c(2,3)), "gg") expect_true(which.max(apply(out$scan_results$mat_log_ll,3,sum))==2) }) #------------------------------------------------ test_that("calibrate 3d non future works", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.7 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" Meff_min = 0.5 Meff_max = 1 Meff_step = 0.5 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 set.seed(93L) Sys.setenv("SQUIRE_PARALLEL_DEBUG"=TRUE) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = Meff_min, Meff_max = Meff_max, Meff_step = Meff_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_is(out,"squire_simulation") Sys.setenv("SQUIRE_PARALLEL_DEBUG"=FALSE) }) #------------------------------------------------ test_that("R0 and Meff arge checking", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.7 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" Meff_min = 0.5 Meff_max = 1 Meff_step = 0.5 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 set.seed(93L) expect_error(out <- calibrate( data = data, R0_min = 2, R0_max = 1.5, R0_step = R0_step, Meff_min = Meff_min, Meff_max = Meff_max, Meff_step = Meff_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ), "must be greater") expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 2, Meff_max = 1, Meff_step = Meff_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ), "must be greater") }) #------------------------------------------------ test_that("reporting fraction into pars_obs", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs <- list(phi_cases = 1, k_cases = 2, phi_death = 1, k_death = 2, exp_noise = 1e6) n_particles = 5 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = 0.1, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) set.seed(93L) out2 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = 1, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_true(all(out2$replicate_parameters$R0 == out$replicate_parameters$R0)) }) #------------------------------------------------ test_that("date_changes before last start date", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) data <- data[which(data$deaths>0)[1]:nrow(data),] interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-03-04" last_start_date = "2020-03-05" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_R0_change[1] <- date_R0_change[1] - 8 date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 5 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = 0.1, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) date_R0_change[2] <- "2020-03-05" out2 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = 0.1, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) }) #------------------------------------------------ test_that("R0_prior", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) data <- data[which(data$deaths>0)[1]:nrow(data),] interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) country = "Algeria" first_start_date = "2020-02-01" last_start_date = "2020-02-02" replicates = 2 R0_min = 1.5 R0_max = 4.5 R0_step = 1 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 5 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, R0_prior = list("func" = dnorm, "args"=list("mean"=2.5,"sd"=0.5,"log"=TRUE)), first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) out2 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, R0_prior = list("func" = dnorm, "args"=list("mean"=0.1,"sd"=0.01,"log"=TRUE)), first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) out3 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 0.9, Meff_max = 1, Meff_step = 0.1, R0_prior = list("func" = dnorm, "args"=list("mean"=0.1,"sd"=0.01,"log"=TRUE)), first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) get <- lapply(list(out,out2, out3), format_output, "deaths") expect_true(max(get[[1]]$y,na.rm=TRUE) > max(get[[2]]$y,na.rm=TRUE)) expect_true(max(get[[1]]$y,na.rm=TRUE) > max(get[[3]]$y,na.rm=TRUE)) }) #------------------------------------------------ test_that("Rt_func", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.7 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" Meff_min = 0.6 Meff_max = 0.7 Meff_step = 0.05 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 10 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 1.5, Meff_max = 2.5, Meff_step = 0.5, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) out2 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 1.5, Meff_max = 2.5, Meff_step = 0.5, Rt_func = function(R0_change, R0, Meff){R0_change*Meff*R0}, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = list(phi_cases = 1, k_cases = 2, phi_death = 1, k_death = 30, exp_noise = 1e6), n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) out3 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 1, Meff_max = 1.1, Meff_step = Meff_step, Rt_func = function(R0_change, R0, Meff){R0_change*Meff*R0}, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = list(phi_cases = 1, k_cases = 2, phi_death = 1, k_death = 30, exp_noise = 1e6), n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_gt(mean(out$scan_results$mat_log_ll),mean(out2$scan_results$mat_log_ll)) expect_gt(mean(out3$scan_results$mat_log_ll),mean(out2$scan_results$mat_log_ll)) })
/tests/testthat/test-calibration.R
permissive
mrc-ide/squire
R
false
false
26,177
r
context("calibration") #------------------------------------------------ test_that("calibrate particle works", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_named(out, c("output","parameters","model","interventions","scan_results","replicate_parameters")) expect_s3_class(plot(out$scan_results, log = TRUE), "gg") expect_s3_class(plot(out$scan_results), "gg") expect_s3_class(plot(out$scan_results, what = "probability", log = FALSE), "gg") expect_s3_class(plot(out$scan_results, what = "probability", log = TRUE), "gg") expect_warning(expect_s3_class(plot(out, what = "cases", particle_fit = TRUE), "gg")) expect_warning(expect_s3_class(plot(out, what = "deaths", particle_fit = TRUE), "gg")) expect_error(plot(out, what = "rubbish", particle_fit = TRUE),"must be one of") # DATE CHECKS DATE_R0 expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = "2020-03-01", day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 )) expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = last_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 )) # DATE CHECKS DATE_CONTACT expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, contact_matrix_set = list(contact_matrices[[1]]), date_contact_matrix_set_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ), "baseline_contact_matrix can") out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_contact_matrix = contact_matrices[[1]], contact_matrix_set = list(contact_matrices[[1]]), date_contact_matrix_set_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ) expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, hosp_bed_capacity = 10, date_hosp_bed_capacity_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ), "baseline_hosp_bed_capacity can") out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_hosp_bed_capacity = 5, hosp_bed_capacity = 10, date_hosp_bed_capacity_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ) # DATE CHECKS DATE_ICU expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_ICU_bed_capacity = 10, ICU_bed_capacity = 100, date_ICU_bed_capacity_change = "2020-05-10", replicates = replicates, country = country, forecast = 0 )) expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, ICU_bed_capacity = 10, date_ICU_bed_capacity_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ), "baseline_ICU_bed_capacity can") out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_ICU_bed_capacity = 5, ICU_bed_capacity = 10, date_ICU_bed_capacity_change = date_R0_change[1], replicates = replicates, country = country, forecast = 0 ) }) #------------------------------------------------ test_that("calibrate non future works", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 0.00001 R0_max = 0.00001 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 Sys.setenv("SQUIRE_PARALLEL_DEBUG"=TRUE) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) }) #------------------------------------------------ test_that("calibrate deterministic", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = deterministic_model(), pars_obs = pars_obs, n_particles = 2, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_true(inherits(out$scan_results$inputs$squire_model, "deterministic")) expect_error(get <- projections(out), "projections needs either time") get <- projections(out, time_period = 5) }) test_that("calibrate user pop and contact", { pop <- get_population("Nigeria") mat <- get_mixing_matrix("Nigeria")*0.9 data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, baseline_contact_matrix = mat, replicates = replicates, population = pop$n, forecast = 0 ) expect_true( identical(out$scan_results$inputs$model_params$contact_matrix_set[[1]], mat) ) }) #------------------------------------------------ test_that("calibrate dt not playing with day stepping well", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, replicates = replicates, country = "Algeria", forecast = 0, dt = 0.61 ), "must result in an integer") }) #------------------------------------------------ test_that("calibrate 3d particle works", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.7 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" Meff_min = 0.1 Meff_max = 1.8 Meff_step = 0.8 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 Sys.setenv("SQUIRE_PARALLEL_DEBUG"=FALSE) set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = Meff_min, Meff_max = Meff_max, Meff_step = Meff_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, Rt_func = function(R0_change, R0, Meff) {R0_change*Meff*R0}, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_s3_class(plot(out$scan_results), "gg") expect_s3_class(plot(out$scan_results, what = "probability"), "gg") expect_s3_class(plot(out$scan_results, what = "probability", show = c(1,3)), "gg") expect_s3_class(plot(out$scan_results, what = "probability", show = c(2,3)), "gg") expect_s3_class(plot(out$scan_results, show = c(2,3)), "gg") expect_true(which.max(apply(out$scan_results$mat_log_ll,3,sum))==2) }) #------------------------------------------------ test_that("calibrate 3d non future works", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.7 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" Meff_min = 0.5 Meff_max = 1 Meff_step = 0.5 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 set.seed(93L) Sys.setenv("SQUIRE_PARALLEL_DEBUG"=TRUE) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = Meff_min, Meff_max = Meff_max, Meff_step = Meff_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_is(out,"squire_simulation") Sys.setenv("SQUIRE_PARALLEL_DEBUG"=FALSE) }) #------------------------------------------------ test_that("R0 and Meff arge checking", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.7 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" Meff_min = 0.5 Meff_max = 1 Meff_step = 0.5 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 2 set.seed(93L) expect_error(out <- calibrate( data = data, R0_min = 2, R0_max = 1.5, R0_step = R0_step, Meff_min = Meff_min, Meff_max = Meff_max, Meff_step = Meff_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ), "must be greater") expect_error(out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 2, Meff_max = 1, Meff_step = Meff_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ), "must be greater") }) #------------------------------------------------ test_that("reporting fraction into pars_obs", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs <- list(phi_cases = 1, k_cases = 2, phi_death = 1, k_death = 2, exp_noise = 1e6) n_particles = 5 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = 0.1, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) set.seed(93L) out2 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = 1, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_true(all(out2$replicate_parameters$R0 == out$replicate_parameters$R0)) }) #------------------------------------------------ test_that("date_changes before last start date", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) data <- data[which(data$deaths>0)[1]:nrow(data),] interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.6 R0_step = 0.1 first_start_date = "2020-03-04" last_start_date = "2020-03-05" day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_R0_change[1] <- date_R0_change[1] - 8 date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 5 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = 0.1, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) date_R0_change[2] <- "2020-03-05" out2 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = 0.1, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) }) #------------------------------------------------ test_that("R0_prior", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) data <- data[which(data$deaths>0)[1]:nrow(data),] interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) country = "Algeria" first_start_date = "2020-02-01" last_start_date = "2020-02-02" replicates = 2 R0_min = 1.5 R0_max = 4.5 R0_step = 1 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 5 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, R0_prior = list("func" = dnorm, "args"=list("mean"=2.5,"sd"=0.5,"log"=TRUE)), first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) out2 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, R0_prior = list("func" = dnorm, "args"=list("mean"=0.1,"sd"=0.01,"log"=TRUE)), first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) out3 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 0.9, Meff_max = 1, Meff_step = 0.1, R0_prior = list("func" = dnorm, "args"=list("mean"=0.1,"sd"=0.01,"log"=TRUE)), first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) get <- lapply(list(out,out2, out3), format_output, "deaths") expect_true(max(get[[1]]$y,na.rm=TRUE) > max(get[[2]]$y,na.rm=TRUE)) expect_true(max(get[[1]]$y,na.rm=TRUE) > max(get[[3]]$y,na.rm=TRUE)) }) #------------------------------------------------ test_that("Rt_func", { data <- read.csv(squire_file("extdata/example.csv"),stringsAsFactors = FALSE) interventions <- read.csv(squire_file("extdata/example_intervention.csv")) int_unique <- interventions_unique(interventions) reporting_fraction = 1 country = "Algeria" replicates = 2 R0_min = 2.6 R0_max = 2.7 R0_step = 0.1 first_start_date = "2020-02-01" last_start_date = "2020-02-02" Meff_min = 0.6 Meff_max = 0.7 Meff_step = 0.05 day_step = 1 R0_change = int_unique$change date_R0_change = as.Date(int_unique$dates_change) date_contact_matrix_set_change = NULL squire_model = explicit_model() pars_obs = NULL n_particles = 10 set.seed(93L) out <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 1.5, Meff_max = 2.5, Meff_step = 0.5, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = pars_obs, n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) out2 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 1.5, Meff_max = 2.5, Meff_step = 0.5, Rt_func = function(R0_change, R0, Meff){R0_change*Meff*R0}, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = list(phi_cases = 1, k_cases = 2, phi_death = 1, k_death = 30, exp_noise = 1e6), n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) out3 <- calibrate( data = data, R0_min = R0_min, R0_max = R0_max, R0_step = R0_step, Meff_min = 1, Meff_max = 1.1, Meff_step = Meff_step, Rt_func = function(R0_change, R0, Meff){R0_change*Meff*R0}, first_start_date = first_start_date, last_start_date = last_start_date, day_step = day_step, squire_model = squire_model, pars_obs = list(phi_cases = 1, k_cases = 2, phi_death = 1, k_death = 30, exp_noise = 1e6), n_particles = n_particles, reporting_fraction = reporting_fraction, R0_change = R0_change, date_R0_change = date_R0_change, replicates = replicates, country = country, forecast = 0 ) expect_gt(mean(out$scan_results$mat_log_ll),mean(out2$scan_results$mat_log_ll)) expect_gt(mean(out3$scan_results$mat_log_ll),mean(out2$scan_results$mat_log_ll)) })
#!/usr/bin/env Rscript #### SET-UP -------------------------------------------------------------------- cat('\n\n#### vcf2loci_filterloci.R: Starting script.\n\n') ## Libraries: if(!'pacman' %in% rownames(installed.packages())) install.packages('pacman') library(pacman) packages <- c('valr', 'tidyverse') p_load(char = packages, install = TRUE) ## Command-line args: options(warn = 1) args <- commandArgs(trailingOnly = TRUE) infile_locusstats <- args[1] infile_LD <- args[2] maxmiss <- as.integer(args[3]) mindist <- as.integer(args[4]) maxLD <- as.numeric(args[5]) indir_fasta <- args[6] outdir_fasta <- args[7] cat('\n#### vcf2loci_filterloci.R: Input file with locus stats:', infile_locusstats, '\n') cat('#### vcf2loci_filterloci.R: Input file with LD stats:', infile_LD, '\n') cat('#### vcf2loci_filterloci.R: Maximum prop of missing data (maxmiss):', maxmiss, '\n') cat('#### vcf2loci_filterloci.R: Minimum distance (bp) between loci (mindist):', mindist, '\n') cat('#### vcf2loci_filterloci.R: Maximum LD (r2) between loci (maxLD):', maxLD, '\n') cat('#### vcf2loci_filterloci.R: Fasta input dir:', indir_fasta, '\n') cat('#### vcf2loci_filterloci.R: Fasta output dir:', outdir_fasta, '\n') ## Other vars: if(!dir.exists(outdir_fasta)) dir.create(outdir_fasta, recursive = TRUE) nrfiles <- length(list.files(indir_fasta)) cat("\n#### vcf2loci_filterloci.R: Number of files in indir_fasta:", nrfiles, '\n') #### PROCESS INPUT FILES ------------------------------------------------------- ## Locus stats: lstats <- read.delim(infile_locusstats, as.is = TRUE) %>% select(1:12) colnames(lstats) <- c('locus.full', 'nInd', 'bp', 'nCells', 'nN', 'pN', 'nvar', 'pvar', 'nPars', 'pPars', 'AT', 'GC') lstats <- lstats %>% mutate(locus = gsub('.fa', '', locus.full)) %>% separate(locus, into = c('scaffold', 'pos'), sep = ':') %>% separate(pos, into = c('start', 'end'), sep = '-') %>% mutate(start = as.integer(start), end = as.integer(end)) %>% select(locus.full, scaffold, start, end, nInd, bp, nN, pN, nvar, pvar, nPars, pPars, AT, GC) %>% #filter(scaffold != 'Super_Scaffold0') %>% arrange(scaffold, start) %>% group_by(scaffold) %>% mutate(distToNext = lead(start) - (start + bp), # Include distance-to-next locus calledSites = round((bp * nInd) * (100 - pN))) lstats.bed <- lstats %>% dplyr::select(scaffold, start, end, locus.full) %>% dplyr::rename(chrom = scaffold) ## LD stats: LD <- read.delim(gzfile(infile_LD), as.is = TRUE, row.names = NULL) %>% dplyr::rename(scaffold = X.chr, site1 = Site1, site2 = Site2, r2 = r.2, dist = Dist) %>% dplyr::mutate(pair = as.character(paste0(scaffold, ':', site1, '-', site2))) %>% dplyr::arrange(dist, site1) cat('\n#### vcf2loci_filterloci.R: Quantiles of locus length:\n') print(quantile(lstats$bp)) #### FILTER - MISSING DATA ----------------------------------------------------- ## Nr of loci with certain amount of missing data: nrow.filter <- function(threshold) { lstats %>% filter(pN < threshold) %>% nrow() } cat('\n#### vcf2loci_filterloci.R: Nr of loci:', nrow(lstats), '\n') cat('#### vcf2loci_filterloci.R: Nr of loci with <10% N:', nrow.filter(10), '\n') cat('#### vcf2loci_filterloci.R: Nr of loci with <5% N:', nrow.filter(5), '\n') cat('#### vcf2loci_filterloci.R: Nr of loci with <1% N:', nrow.filter(1), '\n') cat('#### vcf2loci_filterloci.R: Nr of loci with no Ns:', nrow.filter(0.001), '\n') cat('\n#### vcf2loci_filterloci.R: Quantiles of missing data:\n') quantile(lstats$pN) missHi.rm <- lstats$locus.full[lstats$pN > maxmiss] #### FILTER - HIGH LD ---------------------------------------------------------- LDhi <- LD %>% filter(dist > mindist & r2 > maxLD) site1 <- LDhi %>% mutate(start = as.integer(site1 - 1), end = site1) %>% rename(chrom = scaffold) %>% select(chrom, start, end, pair) site1.ovl <- bed_intersect(site1, lstats.bed) site2 <- LDhi %>% mutate(start = as.integer(site2 - 1), end = site2) %>% rename(chrom = scaffold) %>% select(chrom, start, end, pair) site2.ovl <- bed_intersect(site2, lstats.bed) LDhi$locus1 <- site1.ovl$locus.full.y[match(LDhi$pair, site1.ovl$pair.x)] LDhi$locus2 <- site2.ovl$locus.full.y[match(LDhi$pair, site2.ovl$pair.x)] LDhi <- LDhi %>% filter(!is.na(locus1), !is.na(locus2)) %>% mutate(locus.comb = paste0(locus1, '_', locus2)) %>% select(-pair) LDhi <- LDhi %>% distinct(locus.comb, .keep_all = TRUE) LDhi$calledSites1 <- lstats$calledSites[match(LDhi$locus1, lstats$locus.full)] LDhi$calledSites2 <- lstats$calledSites[match(LDhi$locus2, lstats$locus.full)] LDhi.rm <- unique(c(LDhi$locus2[which(LDhi$calledSites1 >= LDhi$calledSites2)], LDhi$locus1[which(LDhi$calledSites2 > LDhi$calledSites1)])) cat("\n#### vcf2loci_filterloci.R: Number of loci to remove due to LD:", length(LDhi.rm), '\n') #### FILTER - CLOSE PROXIMITY -------------------------------------------------- tooClose.locus1.index <- which(lstats$distToNext < mindist) tooClose.locus2.index <- tooClose.locus1.index + 1 tooClose <- cbind(lstats[tooClose.locus1.index, c("locus.full", "calledSites")], lstats[tooClose.locus2.index, c("locus.full", "calledSites")]) colnames(tooClose) <- c('locus1', 'calledSites1', 'locus2', 'calledSites2') tooClose.rm <- unique(c(tooClose$locus2[which(tooClose$calledSites1 >= tooClose$calledSites2)], tooClose$locus1[which(tooClose$calledSites2 > tooClose$calledSites1)])) cat("\n#### vcf2loci_filterloci.R: Number of loci to remove due to close proximity:", length(tooClose.rm), '\n') #### COPY GOOD LOCI ------------------------------------------------------------ badloci <- unique(c(LDhi.rm, tooClose.rm, missHi.rm)) cat("\n#### vcf2loci_filterloci.R: Total number of loci to remove:", length(badloci), '\n') if(length(badloci) > 0) loci.ok <- lstats %>% filter(! locus.full %in% badloci) %>% pull(locus.full) if(length(badloci) == 0) loci.ok <- lstats %>% pull(locus.full) cat("\n#### vcf2loci_filterloci.R: Nr of loci selected:", length(loci.ok), '\n') cat("\n#### vcf2loci_filterloci.R: First 10 loci:\n") print(head(loci.ok)) loci.ok.files <- paste0(indir_fasta, '/', loci.ok) nrfiles.found <- sum(file.exists(loci.ok.files)) cat("\n#### vcf2loci_filterloci.R: Nr of files found:", nrfiles.found, '\n') cat("\n#### vcf2loci_filterloci.R: Copying files to final dir...\n") loci.copied.files <- paste0(outdir_fasta, '/', loci.ok) file.copy(from = loci.ok.files, to = loci.copied.files, overwrite = TRUE) #### REPORT -------------------------------------------------------------------- nrfiles <- length(list.files(outdir_fasta)) cat("\n#### vcf2loci_filterloci.R: Number of files in indir_fasta:", nrfiles, '\n') cat('\n\n#### vcf2loci_filterloci.R: Done with script.\n')
/geno/vcf2loci/vcf2loci_filterloci.R
no_license
jelmerp/genomics
R
false
false
6,798
r
#!/usr/bin/env Rscript #### SET-UP -------------------------------------------------------------------- cat('\n\n#### vcf2loci_filterloci.R: Starting script.\n\n') ## Libraries: if(!'pacman' %in% rownames(installed.packages())) install.packages('pacman') library(pacman) packages <- c('valr', 'tidyverse') p_load(char = packages, install = TRUE) ## Command-line args: options(warn = 1) args <- commandArgs(trailingOnly = TRUE) infile_locusstats <- args[1] infile_LD <- args[2] maxmiss <- as.integer(args[3]) mindist <- as.integer(args[4]) maxLD <- as.numeric(args[5]) indir_fasta <- args[6] outdir_fasta <- args[7] cat('\n#### vcf2loci_filterloci.R: Input file with locus stats:', infile_locusstats, '\n') cat('#### vcf2loci_filterloci.R: Input file with LD stats:', infile_LD, '\n') cat('#### vcf2loci_filterloci.R: Maximum prop of missing data (maxmiss):', maxmiss, '\n') cat('#### vcf2loci_filterloci.R: Minimum distance (bp) between loci (mindist):', mindist, '\n') cat('#### vcf2loci_filterloci.R: Maximum LD (r2) between loci (maxLD):', maxLD, '\n') cat('#### vcf2loci_filterloci.R: Fasta input dir:', indir_fasta, '\n') cat('#### vcf2loci_filterloci.R: Fasta output dir:', outdir_fasta, '\n') ## Other vars: if(!dir.exists(outdir_fasta)) dir.create(outdir_fasta, recursive = TRUE) nrfiles <- length(list.files(indir_fasta)) cat("\n#### vcf2loci_filterloci.R: Number of files in indir_fasta:", nrfiles, '\n') #### PROCESS INPUT FILES ------------------------------------------------------- ## Locus stats: lstats <- read.delim(infile_locusstats, as.is = TRUE) %>% select(1:12) colnames(lstats) <- c('locus.full', 'nInd', 'bp', 'nCells', 'nN', 'pN', 'nvar', 'pvar', 'nPars', 'pPars', 'AT', 'GC') lstats <- lstats %>% mutate(locus = gsub('.fa', '', locus.full)) %>% separate(locus, into = c('scaffold', 'pos'), sep = ':') %>% separate(pos, into = c('start', 'end'), sep = '-') %>% mutate(start = as.integer(start), end = as.integer(end)) %>% select(locus.full, scaffold, start, end, nInd, bp, nN, pN, nvar, pvar, nPars, pPars, AT, GC) %>% #filter(scaffold != 'Super_Scaffold0') %>% arrange(scaffold, start) %>% group_by(scaffold) %>% mutate(distToNext = lead(start) - (start + bp), # Include distance-to-next locus calledSites = round((bp * nInd) * (100 - pN))) lstats.bed <- lstats %>% dplyr::select(scaffold, start, end, locus.full) %>% dplyr::rename(chrom = scaffold) ## LD stats: LD <- read.delim(gzfile(infile_LD), as.is = TRUE, row.names = NULL) %>% dplyr::rename(scaffold = X.chr, site1 = Site1, site2 = Site2, r2 = r.2, dist = Dist) %>% dplyr::mutate(pair = as.character(paste0(scaffold, ':', site1, '-', site2))) %>% dplyr::arrange(dist, site1) cat('\n#### vcf2loci_filterloci.R: Quantiles of locus length:\n') print(quantile(lstats$bp)) #### FILTER - MISSING DATA ----------------------------------------------------- ## Nr of loci with certain amount of missing data: nrow.filter <- function(threshold) { lstats %>% filter(pN < threshold) %>% nrow() } cat('\n#### vcf2loci_filterloci.R: Nr of loci:', nrow(lstats), '\n') cat('#### vcf2loci_filterloci.R: Nr of loci with <10% N:', nrow.filter(10), '\n') cat('#### vcf2loci_filterloci.R: Nr of loci with <5% N:', nrow.filter(5), '\n') cat('#### vcf2loci_filterloci.R: Nr of loci with <1% N:', nrow.filter(1), '\n') cat('#### vcf2loci_filterloci.R: Nr of loci with no Ns:', nrow.filter(0.001), '\n') cat('\n#### vcf2loci_filterloci.R: Quantiles of missing data:\n') quantile(lstats$pN) missHi.rm <- lstats$locus.full[lstats$pN > maxmiss] #### FILTER - HIGH LD ---------------------------------------------------------- LDhi <- LD %>% filter(dist > mindist & r2 > maxLD) site1 <- LDhi %>% mutate(start = as.integer(site1 - 1), end = site1) %>% rename(chrom = scaffold) %>% select(chrom, start, end, pair) site1.ovl <- bed_intersect(site1, lstats.bed) site2 <- LDhi %>% mutate(start = as.integer(site2 - 1), end = site2) %>% rename(chrom = scaffold) %>% select(chrom, start, end, pair) site2.ovl <- bed_intersect(site2, lstats.bed) LDhi$locus1 <- site1.ovl$locus.full.y[match(LDhi$pair, site1.ovl$pair.x)] LDhi$locus2 <- site2.ovl$locus.full.y[match(LDhi$pair, site2.ovl$pair.x)] LDhi <- LDhi %>% filter(!is.na(locus1), !is.na(locus2)) %>% mutate(locus.comb = paste0(locus1, '_', locus2)) %>% select(-pair) LDhi <- LDhi %>% distinct(locus.comb, .keep_all = TRUE) LDhi$calledSites1 <- lstats$calledSites[match(LDhi$locus1, lstats$locus.full)] LDhi$calledSites2 <- lstats$calledSites[match(LDhi$locus2, lstats$locus.full)] LDhi.rm <- unique(c(LDhi$locus2[which(LDhi$calledSites1 >= LDhi$calledSites2)], LDhi$locus1[which(LDhi$calledSites2 > LDhi$calledSites1)])) cat("\n#### vcf2loci_filterloci.R: Number of loci to remove due to LD:", length(LDhi.rm), '\n') #### FILTER - CLOSE PROXIMITY -------------------------------------------------- tooClose.locus1.index <- which(lstats$distToNext < mindist) tooClose.locus2.index <- tooClose.locus1.index + 1 tooClose <- cbind(lstats[tooClose.locus1.index, c("locus.full", "calledSites")], lstats[tooClose.locus2.index, c("locus.full", "calledSites")]) colnames(tooClose) <- c('locus1', 'calledSites1', 'locus2', 'calledSites2') tooClose.rm <- unique(c(tooClose$locus2[which(tooClose$calledSites1 >= tooClose$calledSites2)], tooClose$locus1[which(tooClose$calledSites2 > tooClose$calledSites1)])) cat("\n#### vcf2loci_filterloci.R: Number of loci to remove due to close proximity:", length(tooClose.rm), '\n') #### COPY GOOD LOCI ------------------------------------------------------------ badloci <- unique(c(LDhi.rm, tooClose.rm, missHi.rm)) cat("\n#### vcf2loci_filterloci.R: Total number of loci to remove:", length(badloci), '\n') if(length(badloci) > 0) loci.ok <- lstats %>% filter(! locus.full %in% badloci) %>% pull(locus.full) if(length(badloci) == 0) loci.ok <- lstats %>% pull(locus.full) cat("\n#### vcf2loci_filterloci.R: Nr of loci selected:", length(loci.ok), '\n') cat("\n#### vcf2loci_filterloci.R: First 10 loci:\n") print(head(loci.ok)) loci.ok.files <- paste0(indir_fasta, '/', loci.ok) nrfiles.found <- sum(file.exists(loci.ok.files)) cat("\n#### vcf2loci_filterloci.R: Nr of files found:", nrfiles.found, '\n') cat("\n#### vcf2loci_filterloci.R: Copying files to final dir...\n") loci.copied.files <- paste0(outdir_fasta, '/', loci.ok) file.copy(from = loci.ok.files, to = loci.copied.files, overwrite = TRUE) #### REPORT -------------------------------------------------------------------- nrfiles <- length(list.files(outdir_fasta)) cat("\n#### vcf2loci_filterloci.R: Number of files in indir_fasta:", nrfiles, '\n') cat('\n\n#### vcf2loci_filterloci.R: Done with script.\n')
# Authors: Traci Lim, Willian Skinner, Yi Luo # Dataset Description: # -> This data contains airline data from the US Bureau of Transportation for the period of January 2017 # -> We intend to collate all 12 months of data and attempt to predict airline delays using multiclass classifcation. # -> Some information about features: # OriginAirportID: Identification number assigned by US DOT to identify a unique airport. # DepTime: Actual Departure Time (local time: hhmm) # ArrDelay: (how early/late the plane was at its final destination in minutes # CRSDep: Scheduled Departure Time (local time: hhmm) # CRSElapsedTime (the scheduled difference between departure and arrival) # FLDATE: contains date of flight (for time-series analysis) # CARRIER_DELAY, WEATHER_DELAY, NAS_DELAY, SECURITY_DELAY, LATE_AIRCRAFT_DELAY: are reasons for delay (in mins) # SOME NOTES: # -> "# ->" indicate personal comments/intuition # "#" indicate the main objective of code # -> ***Changelog.v1*** Date: 19/03/2018 # -> Added CARRIER_DELAY, WEATHER_DELAY, NAS_DELAY, SECURITY_DELAY, LATE_AIRCRAFT_DELAY features: # Yi's suggestion made alot of sense, we can use this to gather more information about what causes delays. # Although there's alot of missing data in these features, it has at least 50k rows, which is enough for a sound analysis. # -> Added FLTIME feature: we converted it to date format, can potential lead to simple time-series plots # -> Added DELAY_GROUPS feature: as a response variable for multiclass classification (justification below) # -> Added code for plotting latitude and longitude data # -> Please use the data i uploaded, i used a vlookup to fill in the longitude and latitude from a new data source, # it's different from the one bill did. Apparently the latlong information is not accurate. # -> Section 1 to 6 can be run smoothly. # -> ***Changelog.v4*** Date: 03/04/2018 # -> Changed section 5.0: Feature Engineering # -> Changed section 1.0: added another data, Airport_Lookup.csv, for section 12.0 function implemention # -> Added section 8: Evaluate Algorithms: 5 models added # -> Added section 9: Compare Algorithms # -> Added section 12: Function Implementation: nearly done with a working R function that implements predictions. # -> Tasks left: Download full 2017 dataset, re-train models on full dataset, # Change metric to area under ROC curve and re-train models, Change metric to area under ROC curve, # because after selecting the model that gave the best accuracy gave me spurious predictions. # 1.0 Importing Data =========================================================================================== setwd("C:/Users/longwind48/Google Drive/Programming/Projects/Airline Delay") setwd("C:/Users/Bill/Google Drive/MA429_Shared/3 Final_Project") setwd("C:/Users/longwind48/Google Drive/Programming/Projects/Airline Delay Gitlab/LSEMA429") setwd("C:/Users/luoyi/Desktop/LSEMA429") # Import csv file #dataset <- read.csv("data/Airline_Delay_Edited_19032018.csv", header = TRUE, stringsAsFactors = FALSE) #load("data/Flight_Data_2017.RData") load("data/Flight_Data_2017_Sampled.RData") dataset <- data_2017_100000 dataset$X <- NULL #airport_lookupDF <- read.csv("data/Airport_Lookup.csv", header = TRUE, stringsAsFactors = FALSE) #airports.raw <- read.csv("data/airports.csv", header=TRUE, stringsAsFactors = FALSE) #airports.raw$X <- NULL #airports <- airports.raw # 1.1 Load libraries load.libraries <- c('dplyr', 'Hmisc', 'ggplot2', 'ggmap', 'igraph', 'ggplot2', 'lattice', 'OpenStreetMap', 'GoodmanKruskal', 'caret', 'ROCR', 'corrplot', 'ggthemes', 'tictoc', 'MASS', 'caTools', 'xgboost', 'Matrix', 'MLmetrics', 'doParallel', 'bst', 'RSNNS', 'caret', 'caretEnsemble', 'ROCR',"timeDate","gridExtra") sapply(load.libraries, require, character = TRUE) # 1.1.5 install missing packages function ipak <- function(pkg){ new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])] if (length(new.pkg)) install.packages(new.pkg, dependencies = TRUE) sapply(pkg, require, character.only = TRUE) } ipak(load.libraries) # 1.2 Create multiclass categorical response variable from numerical variable ARR_DELAY # Check the distribution of ARR_DELAY #hist(dataset$ARR_DELAY, xlim = c(-100,500)) hist(dataset$ARR_DELAY, 2000, xlim = c(-60,120)) # -> The number of times ARR_DELAY>100mins is much lesser than -100 to 100mins range. # -> Mean: 6.954, Median: -5. # -> We could settle for 15mins blocks because majority of the delays are between -100 and 100 mins. # Convert numerical ARR_DELAY into discrete categories, for multiclass classification dataset$DELAY_GROUPS <- cut(dataset$ARR_DELAY, breaks = c(-Inf, 1, 16, 31, 46, 61, 121, Inf), labels = c("no_delay", "delay.1.to.15.mins", "delay.16.to.30.mins", "delay.31.to.45.mins", "delay.46.to.60.mins", "delay.61.to.120.mins", "delay.121.mins.or.more"), right = FALSE) # -> Any number less than 0 is grouped as 'no delay', and numbers>0 are grouped in 15mins intervals, # -> DELAY_GROUPS has 7 classes. # -> This will be our reponse variable. # 1.3 Preprocessing # Convert FL_DATE feature from string type to date type dataset$FL_DATE <- as.Date(dataset$FL_DATE, "%d/%m/%Y") # Convert features to categorical type #dataset$CARRIER <- factor(dataset$CARRIER) dataset$ORIGIN_AIRPORT_ID <- factor(dataset$ORIGIN_AIRPORT_ID) dataset$DEST_AIRPORT_ID <- factor(dataset$DEST_AIRPORT_ID) dataset$MONTH <- factor(dataset$MONTH) dataset$DAY_OF_MONTH <- factor(dataset$DAY_OF_MONTH) dataset$DAY_OF_WEEK <- factor(dataset$DAY_OF_WEEK) dataset$DEP_TIME_BINS <- cut(dataset$CRS_DEP_TIME, breaks = c(1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300), right=FALSE) levels(dataset$DEP_TIME_BINS) <- make.names(c("0000.to.0100", "0100.to.0159", "0200.to.0259", "0300.to.0359", "0400.to.0459", "0500.to.0559", "0600.to.0659", "0700.to.0759", "0800.to.0859", "0900.to.0959", "1000.to.1059", "1100.to.1159", "1200.to.1259", "1300.to.1359", "1400.to.1459", "1500.to.1559", "1600.to.1659", "1700.to.1759", "1800.to.1859", "1900.to.1959", "2000.to.2059", "2100.to.2159", "2200.to.2259", "2300.to.2359")) dataset$ARR_TIME_BINS <- cut(dataset$CRS_ARR_TIME, breaks = c(1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300), right=FALSE) levels(dataset$ARR_TIME_BINS) <- make.names(c("0000.to.0100", "0100.to.0159", "0200.to.0259", "0300.to.0359", "0400.to.0459", "0500.to.0559", "0600.to.0659", "0700.to.0759", "0800.to.0859", "0900.to.0959", "1000.to.1059", "1100.to.1159", "1200.to.1259", "1300.to.1359", "1400.to.1459", "1500.to.1559", "1600.to.1659", "1700.to.1759", "1800.to.1859", "1900.to.1959", "2000.to.2059", "2100.to.2159", "2200.to.2259", "2300.to.2359")) # Add lat long into dataset (run section 12.3 before this) ORIGIN_AIRPORT_LAT <- as.numeric(lookupLatitude(dataset$ORIGIN_AIRPORT_ID)) ORIGIN_AIRPORT_LONG <- as.numeric(lookupLongitude(dataset$ORIGIN_AIRPORT_ID)) DEST_AIRPORT_LAT <- as.numeric(lookupLatitude(dataset$DEST_AIRPORT_ID)) DEST_AIRPORT_LONG <- as.numeric(lookupLongitude(dataset$DEST_AIRPORT_ID)) dataset <- cbind(dataset, ORIGIN_AIRPORT_LAT, ORIGIN_AIRPORT_LONG, DEST_AIRPORT_LAT, DEST_AIRPORT_LONG) # 1.4 Analyzing Causes of Delay ====================================================================================== attach(dataset) delayedFlightsWithKnownReason <- subset(dataset, CARRIER_DELAY > 0 | NAS_DELAY > 0 | WEATHER_DELAY > 0 | SECURITY_DELAY > 0 | LATE_AIRCRAFT_DELAY > 0) delayedFlightsWithWeatherAndAircraftDelay <- subset(dataset, WEATHER_DELAY > 0 & LATE_AIRCRAFT_DELAY > 0) totalNumDelayedFlights <- length(na.omit(DELAY_GROUPS[DELAY_GROUPS!="no delay"])) numDelayedFlightsWithReason <- length(na.omit(delayedFlightsWithKnownReason$DELAY_GROUPS)) numDelayedFlightsWithUnknownReason <- totalNumDelayedFlights - numDelayedFlightsWithReason reason1 <- length(na.omit(CARRIER_DELAY[CARRIER_DELAY>0])) reason2 <- length(na.omit(NAS_DELAY[NAS_DELAY>0])) reason3 <- length(na.omit(WEATHER_DELAY[WEATHER_DELAY>0])) reason4 <- length(na.omit(SECURITY_DELAY[SECURITY_DELAY>0])) reason5 <- length(na.omit(LATE_AIRCRAFT_DELAY[LATE_AIRCRAFT_DELAY>0])) par(mai = c(1,2,0.5,0.5)) barplot(c(numDelayedFlightsWithUnknownReason,reason1,reason2,reason3,reason4,reason5), horiz = TRUE, las = 1, main = "Reasons for Flight Delays", xlab = "Number of Delayed Flights", names.arg = c("Reason Not Given","Carrier","National Air Service","Weather","Security","Late Aircraft"), cex.names = 1.0 ) par(mai = c(0.5,0.5,0.5,0.5)) hist(na.omit(CARRIER_DELAY[CARRIER_DELAY>0]), breaks = 1000, xlim = c(-60,240)) hist(na.omit(NAS_DELAY[NAS_DELAY>0]), breaks = 1000, xlim = c(-10,60)) hist(na.omit(WEATHER_DELAY[WEATHER_DELAY>0]), breaks = 1000, xlim = c(-60,240)) hist(na.omit(SECURITY_DELAY[SECURITY_DELAY>0]), breaks = 1000, xlim = c(-60,240)) hist(na.omit(LATE_AIRCRAFT_DELAY[LATE_AIRCRAFT_DELAY>0]), breaks = 1000, xlim = c(-60,240)) d1_carrierDelayedFlighs <- subset(delayedFlights, CARRIER_DELAY > 0) d2_nasDelayedFlights <- subset(delayedFlights, NAS_DELAY > 0) d3_weatherDelayedFlights <- subset(delayedFlights, WEATHER_DELAY > 0) d4_securityDelayedFlights <- subset(delayedFlights, SECURITY_DELAY > 0) d5_lateAircraftDelayedFlights <- subset(delayedFlights, LATE_AIRCRAFT_DELAY > 0) d6_lateFlights <- subset(dataset, DELAY_GROUPS != "no delay") percentageAllFlightsWeatherDelayed <- 5661/450018 percentageAllFlightsWeatherDelayed percentageDelayedFlightsWithKnownCauseWeatherDelayed <- 5661/97699 percentageDelayedFlightsWithKnownCauseWeatherDelayed delayedFlights <- subset(dataset, ARR_DELAY > 0) #create box plots for categorical features correlation since TKTau has too many question marks boxplot(delayedFlights$ARR_DELAY~delayedFlights$DAY_OF_WEEK,data=delayedFlights, main="Delay on each weekday", xlab="Day of Week", ylab="Delay", outline = F) boxplot(delayedFlights$CRS_DEP_TIME~delayedFlights$DELAY_GROUPS,data=delayedFlights, main="Delay group and departure time", xlab="Delay Group", ylab="Departure Time", outline = F) # dataset$NAS_DELAY # dataset$WEATHER_DELAY # dataset$SECURITY_DELAY # dataset$LATE_AIRCRAFT_DELAY # 1.5 Plotting Latitude and Longitude ================================================================================ # -> Reference: https://rstudio-pubs-static.s3.amazonaws.com/254085_f70afc81201b40c8989091a3e6173dc1.html # Define Map parameters bbox2 <- make_bbox(dataset$DEST_AIRPORT_LONG, dataset$DEST_AIRPORT_LAT, f = .00001) # Create Map map <- openmap(c(bbox2[4], bbox2[1]), c(bbox2[2], bbox2[3]), type = "esri") map <- openproj(map) # All Airports # Plot p0 <- autoplot(map) # Plot Points p <- p0 + geom_point( aes(x = DEST_AIRPORT_LONG, y = DEST_AIRPORT_LAT), data = dataset, alpha = 1, size = 0.5 ) + ggtitle("Airports in the USA") p1 <- p + geom_point(aes(x = DEST_AIRPORT_LONG, y = DEST_AIRPORT_LAT, size = ARR_DELAY ), colour = I(alpha("black", 5/10)), data = dataset) + scale_size(name = "Arrival\nDelays", breaks = c(15, 30, 45, 60, 120), labels = c("<15 min", "15 to 30min", "30 to 45min", "45 to 60min", ">120mins"), limits = c(0,1000)) p1 # -> Need to increase the resolution of the map, # -> and suggest what how else do we wanna make use of longitude and langitude # -> Spent way too much time on plotting this map # 1.9 Spliting the dataset ============================================================================ # -> Data is split in 2 different sets: training and testing sets # Shrink the dataset to train our models faster, 10% # cutoff = round(0.1*nrow(dataset)) # dataset <- dataset[1:cutoff,] # create a list of 70% of the rows in the original dataset we can use for training set.seed(7) test.index <- createDataPartition(dataset$DELAY_GROUPS, p=0.7, list=FALSE) # select 30% of the data for testing testdf.unprocessed <- dataset[-test.index,] # use the remaining 70% of data for creating training dataset traindf.unprocessed <- dataset[test.index,] # 2.0 Analyse Data ============================================================================================ head(traindf.unprocessed) dim(traindf.unprocessed) # -> We have 315016 instances to work with and 22 features/attributes. str(traindf.unprocessed) # Looking at what type of features do we have to work with sapply(traindf.unprocessed, class) # -> class types of some features have to be converted to factors # Look at which feature has missing values describe(traindf.unprocessed) # -> ARR_DELAY feature has 10372 missing values, CRS_ELAPSED_TIME has 4 missing values, # CARRIER_DELAY, WEATHER_DELAY, NAS_DELAY, SECURITY_DELAY, LATE_AIRCRAFT_DELAY has 352318 missing values. # 3.0 Visualize traindf.unprocessed ================================================================================ # Creating functions for plotting # Reference: https://www.kaggle.com/notaapple/detailed-exploratory-data-analysis-using-r plotHist <- function(data_in, i) { data <- data.frame(x=data_in[[i]]) p <- ggplot(data=data, aes(x=factor(x))) + stat_count() + xlab(colnames(data_in)[i]) + theme_light() + theme(axis.text.x = element_text(angle = 90, hjust =1)) return (p) } plotDen <- function(data_in, i){ data <- data.frame(x=data_in[[i]], delay_groups = data_in$DELAY_GROUPS) p <- ggplot(data= data) + geom_line(aes(x = x), stat = 'density', size = 1,alpha = 1.0) + xlab(paste0((colnames(data_in)[i]), '\n', 'Skewness: ',round(skewness(data_in[[i]], na.rm = TRUE), 2))) + theme_light() return(p) } doPlots <- function(data_in, fun, ii, ncol=3) { pp <- list() for (i in ii) { p <- fun(data_in=data_in, i=i) pp <- c(pp, list(p)) } do.call("grid.arrange", c(pp, ncol=ncol)) } # 3.1 Create Histograms and Kernel density plots for each numerical feature doPlots(traindf.unprocessed, fun = plotDen, ii = 1:3, ncol = 2) doPlots(traindf.unprocessed, fun = plotDen, ii = 8:12, ncol = 2) doPlots(traindf.unprocessed, fun = plotDen, ii = 13:17, ncol = 2) doPlots(traindf.unprocessed, fun = plotHist, ii = 8:9, ncol = 1) # 3.2 Create barplots for each categorical feature doPlots(traindf.unprocessed, fun = plotHist, ii = 5, ncol = 1) doPlots(traindf.unprocessed, fun = plotHist, ii = 20, ncol = 1) # -> Looking at the frequencies for our response variable, its distribution follows a right skew. # 4.0 Cleaning Data ============================================================================================ # 4.1 Handling missing data colSums(sapply(dataset, is.na)) # -> There are 10372 rows with missing ARR_DELAY values. # -> Action? Since 10372 rows is just 2.3% of the total number of rows, we can remove the rows with missing values # Visualisation of missing data plot_Missing <- function(data_in, title = NULL){ temp_df <- as.data.frame(ifelse(is.na(data_in), 0, 1)) temp_df <- temp_df[,order(colSums(temp_df))] data_temp <- expand.grid(list(x = 1:nrow(temp_df), y = colnames(temp_df))) data_temp$m <- as.vector(as.matrix(temp_df)) data_temp <- data.frame(x = unlist(data_temp$x), y = unlist(data_temp$y), m = unlist(data_temp$m)) ggplot(data_temp) + geom_tile(aes(x=x, y=y, fill=factor(m))) + scale_fill_manual(values=c("white", "black"), name="Missing\n(0=Yes, 1=No)") + theme_light() + ylab("") + xlab("") + ggtitle(title) } plot_Missing(dataset[,(colSums(is.na(dataset)) > 0)], title = "Missing Values") # 4.2 Remove rows with missing ARR_DELAY and CRS_ELAPSED_TIME values dataset <- dataset %>% filter(!is.na(ARR_DELAY)) %>% filter(!is.na(DEP_TIME_BINS)) %>% filter(!is.na(ARR_TIME_BINS)) describe(dataset) # 5.0 Feature Engineering ========================================================== # One-Hot-Encode categorical features # -> we use OHE to perform “binarization??? of the categories and include them as a feature to train the model. # -> OHE transforms categorical features to a format that works better with classification and regression algorithms. # -> However, algorithms like randomF handles categorical features natively, so OHE is no necessary. # -> Before we OHE, we must convert our response variable to numerical, # because the following code transforms all factor type (categorical) features into OHE format. # We don't want to OHE our reponse variable, we want it to stay as a factor type for modelling purposes. included.features <- c("DELAY_GROUPS", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "CARRIER", "ORIGIN_AIRPORT_ID", "DEST_AIRPORT_ID", "CRS_DEP_TIME", "CRS_ARR_TIME", "DISTANCE", "ORIGIN_AIRPORT_LAT", "ORIGIN_AIRPORT_LONG", "DEST_AIRPORT_LAT", "DEST_AIRPORT_LONG") included.features <- c("ARR_DELAY", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "CARRIER", "ORIGIN_AIRPORT_ID", "DEST_AIRPORT_ID", "CRS_DEP_TIME", "CRS_ARR_TIME", "DISTANCE", "ORIGIN_AIRPORT_LAT", "ORIGIN_AIRPORT_LONG", "DEST_AIRPORT_LAT", "DEST_AIRPORT_LONG") included.features <- c("ARR_DELAY", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "CARRIER", "ORIGIN_AIRPORT_ID", "DEST_AIRPORT_ID", "DEP_TIME_BINS", "ARR_TIME_BINS", "DISTANCE") dataset <- dataset[included.features] dataset$DELAY_GROUPS <- as.numeric(dataset$DELAY_GROUPS) # One-Hot-Encode all factor type features, put all encoded features into new dataset.ohe dmy <- dummyVars(" ~ .", data = dataset) dataset.ohe <- data.frame(predict(dmy, newdata = dataset)) # Change response variable back to factor type dataset.ohe$DELAY_GROUPS <- factor(dataset.ohe$DELAY_GROUPS) dataset$DELAY_GROUPS <- factor(dataset$DELAY_GROUPS) # Make valid names for response variable's classes since some models require valid names to work. levels(dataset$DELAY_GROUPS) <- make.names(c("no_delay", "delay.1.to.15.mins", "delay.16.to.30.mins", "delay.31.to.45.mins", "delay.46.to.60.mins", "delay.61.to.120.mins", "delay.121.mins.or.more")) levels(dataset.ohe$DELAY_GROUPS) <- make.names(c("no_delay", "delay.1.to.15.mins", "delay.16.to.30.mins", "delay.31.to.45.mins", "delay.46.to.60.mins", "delay.61.to.120.mins", "delay.121.mins.or.more")) # 6.0 Train/Test split ============================================================================ # -> Data is split in 2 different sets: training and testing sets # Shrink the dataset to train our models faster, 10% cutoff = round(0.1*nrow(dataset)) dataset <- dataset[1:cutoff,] cutoff.ohe = round(0.1*nrow(dataset.ohe)) dataset.ohe <- dataset.ohe[1:cutoff.ohe,] # Use the same index we used to split earlier set.seed(7) #test.index <- createDataPartition(dataset$DELAY_GROUPS, p=0.7, list=FALSE) test.index <- createDataPartition(dataset$ARR_DELAY, p=0.7, list=FALSE) # Select 30% of the data for testing testdf <- dataset[-test.index,] # Use the remaining 70% of data for creating training dataset traindf <- dataset[test.index,] # Create train and test sets for one-hot-encoded dataset as well testdf.ohe <- dataset.ohe[-test.index,] traindf.ohe <- dataset.ohe[test.index,] # 7.0 Correlations between categorical variables ============================================================== # Check for relationships between all categorical variables subset <- c(5:7, 1) GKmatrix <- GKtauDataframe(traindf[,subset]) plot(GKmatrix) # -> The Goodman-Kruskal tau measure: knowledge of marital.status is predictive of relationship, and similar otherwise. # -> Reference: https://cran.r-project.org/web/packages/GoodmanKruskal/vignettes/GoodmanKruskal.html # 7.5 Correlations between numerical variables ============================================================ # -> To show this, we combining correlogram with the significance test, # -> Reference: http://www.sthda.com/english/wiki/visualize-correlation-matrix-using-correlogram # Build a function to compute a matrix of p-values cor.mtest <- function(mat, ...) { mat <- as.matrix(mat) n <- ncol(mat) p.mat<- matrix(NA, n, n) diag(p.mat) <- 0 for (i in 1:(n - 1)) { for (j in (i + 1):n) { tmp <- cor.test(mat[, i], mat[, j], ...) p.mat[i, j] <- p.mat[j, i] <- tmp$p.value } } colnames(p.mat) <- rownames(p.mat) <- colnames(mat) p.mat } cor <- cor(traindf[,c(2:3,7:12)]) p.mat <- cor.mtest(traindf[,c(2:3,7:12)]) # Build a correlogram col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA")) corrplot(cor, method="color", col=col(200), type="upper", order="hclust", addCoef.col = "black", # Add coefficient of correlation tl.col="black", tl.srt=45, #Text label color and rotation # Combine with significance p.mat = p.mat, sig.level = 0.01, # hide correlation coefficient on the principal diagonal diag=FALSE) # -> correlations with p-value > 0.05 are considered as insignificant, and crosses are added for those. # -> Don't seem to have any correlation among numerical variables, which is a good thing. # 8.0 Evaluate Algorithms ======================================================= control <- trainControl( method = "cv", number = 5, verboseIter = TRUE, returnData = FALSE, returnResamp = "all", # save losses across all models #classProbs = TRUE, # set to TRUE for AUC to be computed #summaryFunction=multiClassSummary, #summaryFunction=mnLogLoss, allowParallel = TRUE ) # -> The function trainControl can be used to specifiy the type of resampling, # in this case, 5-fold cross validation. metric <- "Accuracy" metric <- "ROC" # Should use this!! metric <- "logLoss" metric <- "RMSE" # -> Algorithms: # -> CART # -> LogitBoost - Boosted Logistic Regression # -> xgbTree - eXtreme Gradient Boosting # -> NNET - Neural Network # -> MLP - Multi-Layer Perceptron # -> All timings recording are trained on 10% of data # Regression attempt xgbTreeGrid <- expand.grid(nrounds = c(100,200), max_depth = seq(7,10,by = 1), eta = 0.1, gamma = 0, colsample_bytree = 1.0, subsample = 1.0, min_child_weight = 4) set.seed(7) system.time(fit.xgb.reg <- caret::train(ARR_DELAY ~., data = traindf.ohe, trControl = control, tuneGrid = xgbTreeGrid, metric = metric, method = "xgbTree", nthread=4)) set.seed(7) system.time(fit.lm <- caret::train(ARR_DELAY ~., data = traindf, trControl = control, metric = metric, method = "lm")) # -> 198.81 set.seed(7) system.time(fit.lm1 <- caret::train(ARR_DELAY ~., data = traindf.ohe, trControl = control, metric = metric, method = "lm")) # -> 189.92 set.seed(7) system.time(fit.lm2 <- caret::train(ARR_DELAY ~.-ORIGIN_AIRPORT_LAT-ORIGIN_AIRPORT_LONG-DEST_AIRPORT_LAT-DEST_AIRPORT_LONG, data = traindf.ohe, trControl = control, metric = metric, method = "lm")) set.seed(7) system.time(fit.glm <- caret::train(ARR_DELAY~., data=traindf.ohe, method="glm", metric=metric, trControl=control)) # -> 437.04 set.seed(7) system.time(fit.glmnet <- caret::train(ARR_DELAY~., data=traindf.ohe, method="glmnet", metric=metric, trControl=control)) # -> 275.94 set.seed(7) system.time(fit.nn <- caret::train(ARR_DELAY~., data=traindf.ohe, method="nnet", metric=metric, trControl=control)) # -> 4876.75 set.seed(7) system.time(fit.bsttree <- caret::train(ARR_DELAY~., data=traindf, method="bstTree", metric=metric, trControl=control)) # eXtreme Gradient Boosting ******************************************************************************** xgbgrid <- expand.grid( nrounds = c(100,200), eta = 0.01, max_depth = c(9,10), gamma = 0, colsample_bytree = c(0.4,0.5), min_child_weight = 1, subsample = 1 ) set.seed(7) system.time(fit.xgb <- caret::train(DELAY_GROUPS ~ ., data = traindf.ohe, trControl = control, tuneGrid = xgbgrid, metric = metric, method = "xgbTree", verbose = 1, num_class = 7)) # -> fit.xgb: 7680.36 sec elapsed # -> Fitting nrounds = 200, max_depth = 10, eta = 0.01, gamma = 0, colsample_bytree = 0.5, min_child_weight = 1, subsample = 1 on full training set fit.xgb$bestTune plot(fit.xgb) predictions.xgb<-predict(fit.xgb,testdf.ohe) caret::confusionMatrix(predictions.xgb, testdf.ohe$DELAY_GROUPS) # Boosted Logistic Regression ******************************************************************************** set.seed(7) system.time(fit.LogitBoost <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="LogitBoost", metric=metric, trControl=control)) # -> fit.logitboost: 927.15 set.seed(7) system.time(fit.LogitBoost1 <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="LogitBoost", metric=metric, trControl=control, preProcess = c('BoxCox'))) # -> fit.logitboost1: 948.52 # CART ******************************************************************************** set.seed(7) system.time(fit.cart <- caret::train(DELAY_GROUPS~., data=traindf, method="rpart", parms = list(split = "information"), #or 'information' metric=metric, trControl=control, tuneLength = 10)) # -> fit.cart: 540.52 set.seed(7) system.time(fit.cart1 <- caret::train(DELAY_GROUPS~., data=traindf, method="rpart", parms = list(split = "gini"), #or 'information' metric=metric, trControl=control, tuneLength = 10)) # -> fit.cart1: 557.76 set.seed(7) system.time(fit.cart2 <- caret::train(DELAY_GROUPS~.-CRS_ELAPSED_TIME, data=traindf, method="rpart", parms = list(split = "gini"), #or 'information' metric=metric, trControl=control, tuneLength = 10)) # -> fit.cart1: 557.76 set.seed(7) system.time(fit.cart3 <- caret::train(DELAY_GROUPS~.-CRS_ELAPSED_TIME, data=traindf, method="rpart", parms = list(split = "information"), #or 'information' metric=metric, trControl=control, tuneLength = 10)) # -> fit.cart1: 557.76 predictions.caret2<-predict(fit.cart2,testdf) caret::confusionMatrix(predictions.caret2, testdf$DELAY_GROUPS) predictions.caret1<-predict(fit.cart1,testdf) caret::confusionMatrix(predictions.caret1, testdf$DELAY_GROUPS) # Neural Network ******************************************************************************** nnetGrid <- expand.grid(size = seq(from = 1, to = 10, by = 1), decay = seq(from = 0.1, to = 0.5, by = 0.1)) set.seed(7) system.time(fit.nnet <- caret::train(DELAY_GROUPS~., data=traindf, method="nnet", metric=metric, trControl=control)) # -> fit.nnet: 334.23 set.seed(7) system.time(fit.nnet1 <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="nnet", metric=metric, trControl=control, preProcess = 'BoxCox', tuneLength = 10)) # -> fit.nnet1: 574.64 set.seed(7) system.time(fit.nnet2 <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="nnet", metric=metric, trControl=control, tuneGrid = nnetGrid)) # -> fit,nnet2: 2527. #The final values used for the model were size = 1 and decay = 0.4. # -> Turns out performance is the same regardless of BoxCox transformation and encoding. # Multi-Layer Perceptron ******************************************************************************** set.seed(7) system.time(fit.mlp <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="mlp", preProcess = 'BoxCox', metric="Accuracy", trControl=control)) # -> fit.mlp: 794.86 # Yet to run ******************************************************************************************** set.seed(7) system.time(fit.LogitBoost <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="LogitBoost", metric="Accuracy", trControl=control, tuneLength = 10)) set.seed(7) system.time(fit.gbm <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="gbm", metric="Accuracy", trControl=control)) xgbgrid2 <- expand.grid(max_depth=c(seq(from = 2, to = 10, by = 1)), eta=c(seq(from = 0.1, to = 1, by = 0.3)), nrounds=c(seq(from = 1, to = 500, by = 50)), colsample_bytree=c(seq(from = 0.5, to = 1, by = 0.1)), min_child_weight=c(seq(from = 0.1, to = 1, by = 0.4)), subsample=c(seq(from = 0.1, to = 1, by = 0.4)), gamma =c(seq(from = 0.1, to = 1, by = 0.4)) ) set.seed(7) system.time(fit.xgb1 <- caret::train(DELAY_GROUPS ~ ., data = traindf.ohe, trControl = control, tuneGrid = xgbgrid2, metric = "Accuracy", method = "xgbTree", verbose = 1, num_class = 7)) set.seed(7) system.time(fit.ada <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="ada", metric=metric, trControl=control)) print(fit.xgbLinear_1) #plot(fit.xgbLinear_1) library(mlbench) control.rfe <- rfeControl(functions=nbFuncs, method="cv", number=10) # run the RFE algorithm results <- rfe(traindf[,2:14], traindf[,1], sizes=c(1:14), rfeControl=control.rfe) # 9.0 Compare algorithms ============================================================= # collect resampling statistics of ALL trained models results <- resamples(list( #LogitBoost = fit.LogitBoost, #LogitBoost1 = fit.LogitBoost1, #Cart = fit.cart, #Cart1 = fit.cart1, #Cart2 = fit.cart2 #XGBTree = fit.xgb, #NNET = fit.nnet, #NNET1 = fit.nnet1, #NNET2 = fit.nnet2 #MLP = fit.mlp lm = fit.lm, lm1 = fit.lm1, lm2 = fit.lm2 )) # Summarize the fitted models summary(results) # Plot and rank the fitted models dotplot(results) dotplot(results, metric=metric) # Test skill of the BEST trained model on validation/testing dataset predictions.LogitBoost1 <- predict(fit.LogitBoost, newdata=testdf.ohe) predictions.xgb <- predict(fit.xgb, newdata=testdf.ohe) predictions.cart.prob <- predict(fit.cart, newdata = testdf, type='prob') # Evaluate the BEST trained model and print results result.LogitBoost1 <- caret::confusionMatrix(predictions.LogitBoost1, testdf.ohe$DELAY_GROUPS) result.xgb <- caret::confusionMatrix(predictions.xgb, testdf.ohe$DELAY_GROUPS) # summarize Best Model print(result.LogitBoost1) # -> Accuracy : 0.6935 # -> Basically it's predicting all as no delay, this is really bad... # -> Very misleading to use accuracy as a metric. Should we use Area under ROC? print(result.xgb) # -> Accuracy : 0.6433 # -> Predictions are spreaded nicely throughout 7 classes. # -> I picked this because fit.xgb gives the best cv AUC score. # 11.0 Visualising Results ========================================================================================= # Create a ROC plot comparing performance of all models # colors <- randomColor(count = 10, hue = c("random"), luminosity = c("dark")) # roc1 <- roc(testdf$annual.income, predictions.stack.glm5.prob, col=colors[1], percent=TRUE, asp = NA, # plot=TRUE, print.auc=TRUE, grid=TRUE, main="ROC comparison", print.auc.x=70, print.auc.y=80) # roc2 <- roc(testdf$annual.income, predictions.stack.rpart6.probs, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[2], print.auc=TRUE, print.auc.x=70, print.auc.y=70) # roc3 <- roc(testdf$annual.income, predictions.C50.prob, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[3], print.auc=TRUE, print.auc.x=70, print.auc.y=60) # roc4 <- roc(testdf$annual.income, predictions.cart.prob, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[4], print.auc=TRUE, print.auc.x=70, print.auc.y=50) # roc5 <- roc(testdf$annual.income, predictions.lda1.prob, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[5], print.auc=TRUE, print.auc.x=70, print.auc.y=40) # roc6 <- roc(testdf$annual.income, predictions.logit5.prob, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[6], print.auc=TRUE, print.auc.x=70, print.auc.y=30) # legend("bottomright", legend=c("stack.glm", "stack.rpart", "C5.0", "CART", "LDA", "logistic"), col=c(colors[1:6]), lwd=2) # 12.0 Function Implementation ================================================================================ # -> https://jessesw.com/Air-Delays/ # -> Converted the function implementation of link from python code to R code. Very time consuming. # -> Final function is still not ready, but it's close to done # -> We still need to build a function that retrieves longitude and latitude for the final function to work. # 12.1 max_num_flights function # -> max_num_flights is a function for the delay prediction function to use for calculating the number of # flights in the database for a given city. # -> Inputs: list of codes retrived in the delay_prediction function # -> Output: The code with the largest number of flights. max_num_flights <- function(codes) { # Array to store all airport codes num_store <- list() if (length(codes)<1) { print('Try entering your city/airport again. No matching airports found.') return } for (i in 1:length(codes)) { num_flights <- sum(grepl(codes[i],dataset$ORIGIN_AIRPORT_ID)) num_store[i] <- num_flights } # Now find the maximum row max_num_store <- max(unlist(num_store)) max_ind = match(max_num_store, num_store) # Now we know which code had the most flights. Return it. return(codes[max_ind]) } codes <- c(13930, 11298) codes1 <- c(11703,12478, 12541, 12545, 12546, 12548, 12953, 13784, 15346, 15859) max_num_flights(codes1) df <- data.frame() df[1,1] <- 1 df[1,2] <- 1 # 12.2 delay_prediction function # -> The function will allow the user to enter all of the information about their flight # -> and return the predicted delay time in minutes. delay_prediction <- function(origin = 'Fort Worth', destination = 'Chicago', carrier = 'American', dept_time = 17, arr_time = 19, month = 5, day = 15, weekday = 'Wednesday') { # Create a dict for our Airlines. Based on the carrierDF. carrier_dict <- list('Endeavor'=1, 'American'=2, 'Alaska'=3, 'JetBlue'=4, 'Delta'=5, 'ExpressJet'=6, 'Frontier'=7, 'AirTran'=8, 'Hawaiian'=9, 'Envoy'=10, 'SkyWest'=11, 'United'=12, 'US Airways'=13, 'Virgin'=14, 'Southwest'=15, 'Mesa'=16) # Another for day of the week weekday_dict <- list('Monday'=1, 'Tuesday'=2, 'Wednesday'=3, 'Thursday'=4, 'Friday'=5, 'Saturday'=6, 'Sunday'=7) # Now find the corresponding airport codes for our origin and destination. #origin_codes = list(airport_lookupDF[airport_lookupDF.Description.str.contains(origin)].Code) origin_codes <- grep(origin, airport_lookupDF$Description) #destination_codes = list(airport_lookupDF[airport_lookupDF.Description.str.contains(destination)].Code) destination_codes <- grep(destination, airport_lookupDF$Description) # From these codes found in the lookup table, see which one had the largest number of flights. origin_code <- max_num_flights(as.list(airport_lookupDF[origin_codes,][1])$Code) destination_code <- max_num_flights(as.list(airport_lookupDF[destination_codes,][1])$Code) # Now that we have these codes, we can look up the other parameters necessary. # Now find the distance between the two airports. distance <- dataset$DISTANCE[dataset$ORIGIN_AIRPORT_ID==origin_code & dataset$DEST_AIRPORT_ID==destination_code][1] carrier_num <- carrier_dict$carrier weekday_num <- weekday_dict$weekday # Now that we have all of our values, we can start combining them together # Now create our array of categorical values. categorical_values = data.frame() categorical_values[1,1] = as.numeric(day) categorical_values[1,2] = as.numeric(weekday_num) categorical_values[1,3] = as.factor(carrier_num) categorical_values[1,4] = as.factor(origin_code) categorical_values[1,5] = as.factor(destination_code) categorical_values[1,6] = as.numeric(dept_time) categorical_values[1,7] = as.numeric(arr_time) categorical_values[1,8] = as.numeric(arr_time-dept_time) categorical_values[1,9] = as.numeric(distance) categorical_values[1,10] = as.numeric(long) categorical_values[1,11] = as.numeric(lat) # Apply the one-hot encoding to these. dmy1 <- dummyVars(" ~ .", data = categorical_values) categorical_values.ohe <- data.frame(predict(dmy1, newdata = categorical_values)) # Now predict this with the model prediction <- predict(fit.LogitBoost1, newdata=testdf.ohe) print ('Your prediction delay is', prediction[0]) return # End of function } # 12.3 lookupAirportData function ======================================================= # Input: # AIRPORT_ID: the five digit number such as from either ORIGIN_AIRPORT_ID or DEST_AIRPORT_ID # RETURN: a string with the value to return e.g. "LATITUDE" or "LONGITUDE" or "DISPLAY_AIRPORT_CITY_NAME_FULL" # Output: value in row corresponding to AIRPORT_ID in Airport_Data.csv and column specified by RETURN # Example: # airport.name <- lookupAirportData(11057, RETURN = "DISPLAY_AIRPORT_NAME") # Special functions for lat and long for faster performance and extra convenience # Examples: # lat <- lookupLatitude(11057) # long <- lookupLongitude(11057) # Data for Functions ===================================================================== airportData <- read.csv("data/Airport_Data.csv", header = TRUE, stringsAsFactors = FALSE) AIRPORT_ID_LIST <- airportData$AIRPORT_ID LATITIUDE_LIST <- airportData$LATITUDE LONGITUDE_LIST <- airportData$LONGITUDE # Function Definitions =================================================================== lookupAirportData <- function(AIRPORT_ID, RETURN) { row.index <- match(AIRPORT_ID,AIRPORT_ID_LIST) column.of.interest <- airportData[,RETURN] return(column.of.interest[row.index]) } lookupLatitude <- function(AIRPORT_ID) { row.index <- match(AIRPORT_ID,AIRPORT_ID_LIST) return(LATITIUDE_LIST[row.index]) } lookupLongitude <- function(AIRPORT_ID) { row.index <- match(AIRPORT_ID,AIRPORT_ID_LIST) return(LONGITUDE_LIST[row.index]) } # 13.0 Conclusions ===================================================================================================
/full code/delay_prediction_030418.r
no_license
longwind48/airline-delay-prediction
R
false
false
41,375
r
# Authors: Traci Lim, Willian Skinner, Yi Luo # Dataset Description: # -> This data contains airline data from the US Bureau of Transportation for the period of January 2017 # -> We intend to collate all 12 months of data and attempt to predict airline delays using multiclass classifcation. # -> Some information about features: # OriginAirportID: Identification number assigned by US DOT to identify a unique airport. # DepTime: Actual Departure Time (local time: hhmm) # ArrDelay: (how early/late the plane was at its final destination in minutes # CRSDep: Scheduled Departure Time (local time: hhmm) # CRSElapsedTime (the scheduled difference between departure and arrival) # FLDATE: contains date of flight (for time-series analysis) # CARRIER_DELAY, WEATHER_DELAY, NAS_DELAY, SECURITY_DELAY, LATE_AIRCRAFT_DELAY: are reasons for delay (in mins) # SOME NOTES: # -> "# ->" indicate personal comments/intuition # "#" indicate the main objective of code # -> ***Changelog.v1*** Date: 19/03/2018 # -> Added CARRIER_DELAY, WEATHER_DELAY, NAS_DELAY, SECURITY_DELAY, LATE_AIRCRAFT_DELAY features: # Yi's suggestion made alot of sense, we can use this to gather more information about what causes delays. # Although there's alot of missing data in these features, it has at least 50k rows, which is enough for a sound analysis. # -> Added FLTIME feature: we converted it to date format, can potential lead to simple time-series plots # -> Added DELAY_GROUPS feature: as a response variable for multiclass classification (justification below) # -> Added code for plotting latitude and longitude data # -> Please use the data i uploaded, i used a vlookup to fill in the longitude and latitude from a new data source, # it's different from the one bill did. Apparently the latlong information is not accurate. # -> Section 1 to 6 can be run smoothly. # -> ***Changelog.v4*** Date: 03/04/2018 # -> Changed section 5.0: Feature Engineering # -> Changed section 1.0: added another data, Airport_Lookup.csv, for section 12.0 function implemention # -> Added section 8: Evaluate Algorithms: 5 models added # -> Added section 9: Compare Algorithms # -> Added section 12: Function Implementation: nearly done with a working R function that implements predictions. # -> Tasks left: Download full 2017 dataset, re-train models on full dataset, # Change metric to area under ROC curve and re-train models, Change metric to area under ROC curve, # because after selecting the model that gave the best accuracy gave me spurious predictions. # 1.0 Importing Data =========================================================================================== setwd("C:/Users/longwind48/Google Drive/Programming/Projects/Airline Delay") setwd("C:/Users/Bill/Google Drive/MA429_Shared/3 Final_Project") setwd("C:/Users/longwind48/Google Drive/Programming/Projects/Airline Delay Gitlab/LSEMA429") setwd("C:/Users/luoyi/Desktop/LSEMA429") # Import csv file #dataset <- read.csv("data/Airline_Delay_Edited_19032018.csv", header = TRUE, stringsAsFactors = FALSE) #load("data/Flight_Data_2017.RData") load("data/Flight_Data_2017_Sampled.RData") dataset <- data_2017_100000 dataset$X <- NULL #airport_lookupDF <- read.csv("data/Airport_Lookup.csv", header = TRUE, stringsAsFactors = FALSE) #airports.raw <- read.csv("data/airports.csv", header=TRUE, stringsAsFactors = FALSE) #airports.raw$X <- NULL #airports <- airports.raw # 1.1 Load libraries load.libraries <- c('dplyr', 'Hmisc', 'ggplot2', 'ggmap', 'igraph', 'ggplot2', 'lattice', 'OpenStreetMap', 'GoodmanKruskal', 'caret', 'ROCR', 'corrplot', 'ggthemes', 'tictoc', 'MASS', 'caTools', 'xgboost', 'Matrix', 'MLmetrics', 'doParallel', 'bst', 'RSNNS', 'caret', 'caretEnsemble', 'ROCR',"timeDate","gridExtra") sapply(load.libraries, require, character = TRUE) # 1.1.5 install missing packages function ipak <- function(pkg){ new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])] if (length(new.pkg)) install.packages(new.pkg, dependencies = TRUE) sapply(pkg, require, character.only = TRUE) } ipak(load.libraries) # 1.2 Create multiclass categorical response variable from numerical variable ARR_DELAY # Check the distribution of ARR_DELAY #hist(dataset$ARR_DELAY, xlim = c(-100,500)) hist(dataset$ARR_DELAY, 2000, xlim = c(-60,120)) # -> The number of times ARR_DELAY>100mins is much lesser than -100 to 100mins range. # -> Mean: 6.954, Median: -5. # -> We could settle for 15mins blocks because majority of the delays are between -100 and 100 mins. # Convert numerical ARR_DELAY into discrete categories, for multiclass classification dataset$DELAY_GROUPS <- cut(dataset$ARR_DELAY, breaks = c(-Inf, 1, 16, 31, 46, 61, 121, Inf), labels = c("no_delay", "delay.1.to.15.mins", "delay.16.to.30.mins", "delay.31.to.45.mins", "delay.46.to.60.mins", "delay.61.to.120.mins", "delay.121.mins.or.more"), right = FALSE) # -> Any number less than 0 is grouped as 'no delay', and numbers>0 are grouped in 15mins intervals, # -> DELAY_GROUPS has 7 classes. # -> This will be our reponse variable. # 1.3 Preprocessing # Convert FL_DATE feature from string type to date type dataset$FL_DATE <- as.Date(dataset$FL_DATE, "%d/%m/%Y") # Convert features to categorical type #dataset$CARRIER <- factor(dataset$CARRIER) dataset$ORIGIN_AIRPORT_ID <- factor(dataset$ORIGIN_AIRPORT_ID) dataset$DEST_AIRPORT_ID <- factor(dataset$DEST_AIRPORT_ID) dataset$MONTH <- factor(dataset$MONTH) dataset$DAY_OF_MONTH <- factor(dataset$DAY_OF_MONTH) dataset$DAY_OF_WEEK <- factor(dataset$DAY_OF_WEEK) dataset$DEP_TIME_BINS <- cut(dataset$CRS_DEP_TIME, breaks = c(1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300), right=FALSE) levels(dataset$DEP_TIME_BINS) <- make.names(c("0000.to.0100", "0100.to.0159", "0200.to.0259", "0300.to.0359", "0400.to.0459", "0500.to.0559", "0600.to.0659", "0700.to.0759", "0800.to.0859", "0900.to.0959", "1000.to.1059", "1100.to.1159", "1200.to.1259", "1300.to.1359", "1400.to.1459", "1500.to.1559", "1600.to.1659", "1700.to.1759", "1800.to.1859", "1900.to.1959", "2000.to.2059", "2100.to.2159", "2200.to.2259", "2300.to.2359")) dataset$ARR_TIME_BINS <- cut(dataset$CRS_ARR_TIME, breaks = c(1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300), right=FALSE) levels(dataset$ARR_TIME_BINS) <- make.names(c("0000.to.0100", "0100.to.0159", "0200.to.0259", "0300.to.0359", "0400.to.0459", "0500.to.0559", "0600.to.0659", "0700.to.0759", "0800.to.0859", "0900.to.0959", "1000.to.1059", "1100.to.1159", "1200.to.1259", "1300.to.1359", "1400.to.1459", "1500.to.1559", "1600.to.1659", "1700.to.1759", "1800.to.1859", "1900.to.1959", "2000.to.2059", "2100.to.2159", "2200.to.2259", "2300.to.2359")) # Add lat long into dataset (run section 12.3 before this) ORIGIN_AIRPORT_LAT <- as.numeric(lookupLatitude(dataset$ORIGIN_AIRPORT_ID)) ORIGIN_AIRPORT_LONG <- as.numeric(lookupLongitude(dataset$ORIGIN_AIRPORT_ID)) DEST_AIRPORT_LAT <- as.numeric(lookupLatitude(dataset$DEST_AIRPORT_ID)) DEST_AIRPORT_LONG <- as.numeric(lookupLongitude(dataset$DEST_AIRPORT_ID)) dataset <- cbind(dataset, ORIGIN_AIRPORT_LAT, ORIGIN_AIRPORT_LONG, DEST_AIRPORT_LAT, DEST_AIRPORT_LONG) # 1.4 Analyzing Causes of Delay ====================================================================================== attach(dataset) delayedFlightsWithKnownReason <- subset(dataset, CARRIER_DELAY > 0 | NAS_DELAY > 0 | WEATHER_DELAY > 0 | SECURITY_DELAY > 0 | LATE_AIRCRAFT_DELAY > 0) delayedFlightsWithWeatherAndAircraftDelay <- subset(dataset, WEATHER_DELAY > 0 & LATE_AIRCRAFT_DELAY > 0) totalNumDelayedFlights <- length(na.omit(DELAY_GROUPS[DELAY_GROUPS!="no delay"])) numDelayedFlightsWithReason <- length(na.omit(delayedFlightsWithKnownReason$DELAY_GROUPS)) numDelayedFlightsWithUnknownReason <- totalNumDelayedFlights - numDelayedFlightsWithReason reason1 <- length(na.omit(CARRIER_DELAY[CARRIER_DELAY>0])) reason2 <- length(na.omit(NAS_DELAY[NAS_DELAY>0])) reason3 <- length(na.omit(WEATHER_DELAY[WEATHER_DELAY>0])) reason4 <- length(na.omit(SECURITY_DELAY[SECURITY_DELAY>0])) reason5 <- length(na.omit(LATE_AIRCRAFT_DELAY[LATE_AIRCRAFT_DELAY>0])) par(mai = c(1,2,0.5,0.5)) barplot(c(numDelayedFlightsWithUnknownReason,reason1,reason2,reason3,reason4,reason5), horiz = TRUE, las = 1, main = "Reasons for Flight Delays", xlab = "Number of Delayed Flights", names.arg = c("Reason Not Given","Carrier","National Air Service","Weather","Security","Late Aircraft"), cex.names = 1.0 ) par(mai = c(0.5,0.5,0.5,0.5)) hist(na.omit(CARRIER_DELAY[CARRIER_DELAY>0]), breaks = 1000, xlim = c(-60,240)) hist(na.omit(NAS_DELAY[NAS_DELAY>0]), breaks = 1000, xlim = c(-10,60)) hist(na.omit(WEATHER_DELAY[WEATHER_DELAY>0]), breaks = 1000, xlim = c(-60,240)) hist(na.omit(SECURITY_DELAY[SECURITY_DELAY>0]), breaks = 1000, xlim = c(-60,240)) hist(na.omit(LATE_AIRCRAFT_DELAY[LATE_AIRCRAFT_DELAY>0]), breaks = 1000, xlim = c(-60,240)) d1_carrierDelayedFlighs <- subset(delayedFlights, CARRIER_DELAY > 0) d2_nasDelayedFlights <- subset(delayedFlights, NAS_DELAY > 0) d3_weatherDelayedFlights <- subset(delayedFlights, WEATHER_DELAY > 0) d4_securityDelayedFlights <- subset(delayedFlights, SECURITY_DELAY > 0) d5_lateAircraftDelayedFlights <- subset(delayedFlights, LATE_AIRCRAFT_DELAY > 0) d6_lateFlights <- subset(dataset, DELAY_GROUPS != "no delay") percentageAllFlightsWeatherDelayed <- 5661/450018 percentageAllFlightsWeatherDelayed percentageDelayedFlightsWithKnownCauseWeatherDelayed <- 5661/97699 percentageDelayedFlightsWithKnownCauseWeatherDelayed delayedFlights <- subset(dataset, ARR_DELAY > 0) #create box plots for categorical features correlation since TKTau has too many question marks boxplot(delayedFlights$ARR_DELAY~delayedFlights$DAY_OF_WEEK,data=delayedFlights, main="Delay on each weekday", xlab="Day of Week", ylab="Delay", outline = F) boxplot(delayedFlights$CRS_DEP_TIME~delayedFlights$DELAY_GROUPS,data=delayedFlights, main="Delay group and departure time", xlab="Delay Group", ylab="Departure Time", outline = F) # dataset$NAS_DELAY # dataset$WEATHER_DELAY # dataset$SECURITY_DELAY # dataset$LATE_AIRCRAFT_DELAY # 1.5 Plotting Latitude and Longitude ================================================================================ # -> Reference: https://rstudio-pubs-static.s3.amazonaws.com/254085_f70afc81201b40c8989091a3e6173dc1.html # Define Map parameters bbox2 <- make_bbox(dataset$DEST_AIRPORT_LONG, dataset$DEST_AIRPORT_LAT, f = .00001) # Create Map map <- openmap(c(bbox2[4], bbox2[1]), c(bbox2[2], bbox2[3]), type = "esri") map <- openproj(map) # All Airports # Plot p0 <- autoplot(map) # Plot Points p <- p0 + geom_point( aes(x = DEST_AIRPORT_LONG, y = DEST_AIRPORT_LAT), data = dataset, alpha = 1, size = 0.5 ) + ggtitle("Airports in the USA") p1 <- p + geom_point(aes(x = DEST_AIRPORT_LONG, y = DEST_AIRPORT_LAT, size = ARR_DELAY ), colour = I(alpha("black", 5/10)), data = dataset) + scale_size(name = "Arrival\nDelays", breaks = c(15, 30, 45, 60, 120), labels = c("<15 min", "15 to 30min", "30 to 45min", "45 to 60min", ">120mins"), limits = c(0,1000)) p1 # -> Need to increase the resolution of the map, # -> and suggest what how else do we wanna make use of longitude and langitude # -> Spent way too much time on plotting this map # 1.9 Spliting the dataset ============================================================================ # -> Data is split in 2 different sets: training and testing sets # Shrink the dataset to train our models faster, 10% # cutoff = round(0.1*nrow(dataset)) # dataset <- dataset[1:cutoff,] # create a list of 70% of the rows in the original dataset we can use for training set.seed(7) test.index <- createDataPartition(dataset$DELAY_GROUPS, p=0.7, list=FALSE) # select 30% of the data for testing testdf.unprocessed <- dataset[-test.index,] # use the remaining 70% of data for creating training dataset traindf.unprocessed <- dataset[test.index,] # 2.0 Analyse Data ============================================================================================ head(traindf.unprocessed) dim(traindf.unprocessed) # -> We have 315016 instances to work with and 22 features/attributes. str(traindf.unprocessed) # Looking at what type of features do we have to work with sapply(traindf.unprocessed, class) # -> class types of some features have to be converted to factors # Look at which feature has missing values describe(traindf.unprocessed) # -> ARR_DELAY feature has 10372 missing values, CRS_ELAPSED_TIME has 4 missing values, # CARRIER_DELAY, WEATHER_DELAY, NAS_DELAY, SECURITY_DELAY, LATE_AIRCRAFT_DELAY has 352318 missing values. # 3.0 Visualize traindf.unprocessed ================================================================================ # Creating functions for plotting # Reference: https://www.kaggle.com/notaapple/detailed-exploratory-data-analysis-using-r plotHist <- function(data_in, i) { data <- data.frame(x=data_in[[i]]) p <- ggplot(data=data, aes(x=factor(x))) + stat_count() + xlab(colnames(data_in)[i]) + theme_light() + theme(axis.text.x = element_text(angle = 90, hjust =1)) return (p) } plotDen <- function(data_in, i){ data <- data.frame(x=data_in[[i]], delay_groups = data_in$DELAY_GROUPS) p <- ggplot(data= data) + geom_line(aes(x = x), stat = 'density', size = 1,alpha = 1.0) + xlab(paste0((colnames(data_in)[i]), '\n', 'Skewness: ',round(skewness(data_in[[i]], na.rm = TRUE), 2))) + theme_light() return(p) } doPlots <- function(data_in, fun, ii, ncol=3) { pp <- list() for (i in ii) { p <- fun(data_in=data_in, i=i) pp <- c(pp, list(p)) } do.call("grid.arrange", c(pp, ncol=ncol)) } # 3.1 Create Histograms and Kernel density plots for each numerical feature doPlots(traindf.unprocessed, fun = plotDen, ii = 1:3, ncol = 2) doPlots(traindf.unprocessed, fun = plotDen, ii = 8:12, ncol = 2) doPlots(traindf.unprocessed, fun = plotDen, ii = 13:17, ncol = 2) doPlots(traindf.unprocessed, fun = plotHist, ii = 8:9, ncol = 1) # 3.2 Create barplots for each categorical feature doPlots(traindf.unprocessed, fun = plotHist, ii = 5, ncol = 1) doPlots(traindf.unprocessed, fun = plotHist, ii = 20, ncol = 1) # -> Looking at the frequencies for our response variable, its distribution follows a right skew. # 4.0 Cleaning Data ============================================================================================ # 4.1 Handling missing data colSums(sapply(dataset, is.na)) # -> There are 10372 rows with missing ARR_DELAY values. # -> Action? Since 10372 rows is just 2.3% of the total number of rows, we can remove the rows with missing values # Visualisation of missing data plot_Missing <- function(data_in, title = NULL){ temp_df <- as.data.frame(ifelse(is.na(data_in), 0, 1)) temp_df <- temp_df[,order(colSums(temp_df))] data_temp <- expand.grid(list(x = 1:nrow(temp_df), y = colnames(temp_df))) data_temp$m <- as.vector(as.matrix(temp_df)) data_temp <- data.frame(x = unlist(data_temp$x), y = unlist(data_temp$y), m = unlist(data_temp$m)) ggplot(data_temp) + geom_tile(aes(x=x, y=y, fill=factor(m))) + scale_fill_manual(values=c("white", "black"), name="Missing\n(0=Yes, 1=No)") + theme_light() + ylab("") + xlab("") + ggtitle(title) } plot_Missing(dataset[,(colSums(is.na(dataset)) > 0)], title = "Missing Values") # 4.2 Remove rows with missing ARR_DELAY and CRS_ELAPSED_TIME values dataset <- dataset %>% filter(!is.na(ARR_DELAY)) %>% filter(!is.na(DEP_TIME_BINS)) %>% filter(!is.na(ARR_TIME_BINS)) describe(dataset) # 5.0 Feature Engineering ========================================================== # One-Hot-Encode categorical features # -> we use OHE to perform “binarization??? of the categories and include them as a feature to train the model. # -> OHE transforms categorical features to a format that works better with classification and regression algorithms. # -> However, algorithms like randomF handles categorical features natively, so OHE is no necessary. # -> Before we OHE, we must convert our response variable to numerical, # because the following code transforms all factor type (categorical) features into OHE format. # We don't want to OHE our reponse variable, we want it to stay as a factor type for modelling purposes. included.features <- c("DELAY_GROUPS", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "CARRIER", "ORIGIN_AIRPORT_ID", "DEST_AIRPORT_ID", "CRS_DEP_TIME", "CRS_ARR_TIME", "DISTANCE", "ORIGIN_AIRPORT_LAT", "ORIGIN_AIRPORT_LONG", "DEST_AIRPORT_LAT", "DEST_AIRPORT_LONG") included.features <- c("ARR_DELAY", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "CARRIER", "ORIGIN_AIRPORT_ID", "DEST_AIRPORT_ID", "CRS_DEP_TIME", "CRS_ARR_TIME", "DISTANCE", "ORIGIN_AIRPORT_LAT", "ORIGIN_AIRPORT_LONG", "DEST_AIRPORT_LAT", "DEST_AIRPORT_LONG") included.features <- c("ARR_DELAY", "MONTH", "DAY_OF_MONTH", "DAY_OF_WEEK", "CARRIER", "ORIGIN_AIRPORT_ID", "DEST_AIRPORT_ID", "DEP_TIME_BINS", "ARR_TIME_BINS", "DISTANCE") dataset <- dataset[included.features] dataset$DELAY_GROUPS <- as.numeric(dataset$DELAY_GROUPS) # One-Hot-Encode all factor type features, put all encoded features into new dataset.ohe dmy <- dummyVars(" ~ .", data = dataset) dataset.ohe <- data.frame(predict(dmy, newdata = dataset)) # Change response variable back to factor type dataset.ohe$DELAY_GROUPS <- factor(dataset.ohe$DELAY_GROUPS) dataset$DELAY_GROUPS <- factor(dataset$DELAY_GROUPS) # Make valid names for response variable's classes since some models require valid names to work. levels(dataset$DELAY_GROUPS) <- make.names(c("no_delay", "delay.1.to.15.mins", "delay.16.to.30.mins", "delay.31.to.45.mins", "delay.46.to.60.mins", "delay.61.to.120.mins", "delay.121.mins.or.more")) levels(dataset.ohe$DELAY_GROUPS) <- make.names(c("no_delay", "delay.1.to.15.mins", "delay.16.to.30.mins", "delay.31.to.45.mins", "delay.46.to.60.mins", "delay.61.to.120.mins", "delay.121.mins.or.more")) # 6.0 Train/Test split ============================================================================ # -> Data is split in 2 different sets: training and testing sets # Shrink the dataset to train our models faster, 10% cutoff = round(0.1*nrow(dataset)) dataset <- dataset[1:cutoff,] cutoff.ohe = round(0.1*nrow(dataset.ohe)) dataset.ohe <- dataset.ohe[1:cutoff.ohe,] # Use the same index we used to split earlier set.seed(7) #test.index <- createDataPartition(dataset$DELAY_GROUPS, p=0.7, list=FALSE) test.index <- createDataPartition(dataset$ARR_DELAY, p=0.7, list=FALSE) # Select 30% of the data for testing testdf <- dataset[-test.index,] # Use the remaining 70% of data for creating training dataset traindf <- dataset[test.index,] # Create train and test sets for one-hot-encoded dataset as well testdf.ohe <- dataset.ohe[-test.index,] traindf.ohe <- dataset.ohe[test.index,] # 7.0 Correlations between categorical variables ============================================================== # Check for relationships between all categorical variables subset <- c(5:7, 1) GKmatrix <- GKtauDataframe(traindf[,subset]) plot(GKmatrix) # -> The Goodman-Kruskal tau measure: knowledge of marital.status is predictive of relationship, and similar otherwise. # -> Reference: https://cran.r-project.org/web/packages/GoodmanKruskal/vignettes/GoodmanKruskal.html # 7.5 Correlations between numerical variables ============================================================ # -> To show this, we combining correlogram with the significance test, # -> Reference: http://www.sthda.com/english/wiki/visualize-correlation-matrix-using-correlogram # Build a function to compute a matrix of p-values cor.mtest <- function(mat, ...) { mat <- as.matrix(mat) n <- ncol(mat) p.mat<- matrix(NA, n, n) diag(p.mat) <- 0 for (i in 1:(n - 1)) { for (j in (i + 1):n) { tmp <- cor.test(mat[, i], mat[, j], ...) p.mat[i, j] <- p.mat[j, i] <- tmp$p.value } } colnames(p.mat) <- rownames(p.mat) <- colnames(mat) p.mat } cor <- cor(traindf[,c(2:3,7:12)]) p.mat <- cor.mtest(traindf[,c(2:3,7:12)]) # Build a correlogram col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA")) corrplot(cor, method="color", col=col(200), type="upper", order="hclust", addCoef.col = "black", # Add coefficient of correlation tl.col="black", tl.srt=45, #Text label color and rotation # Combine with significance p.mat = p.mat, sig.level = 0.01, # hide correlation coefficient on the principal diagonal diag=FALSE) # -> correlations with p-value > 0.05 are considered as insignificant, and crosses are added for those. # -> Don't seem to have any correlation among numerical variables, which is a good thing. # 8.0 Evaluate Algorithms ======================================================= control <- trainControl( method = "cv", number = 5, verboseIter = TRUE, returnData = FALSE, returnResamp = "all", # save losses across all models #classProbs = TRUE, # set to TRUE for AUC to be computed #summaryFunction=multiClassSummary, #summaryFunction=mnLogLoss, allowParallel = TRUE ) # -> The function trainControl can be used to specifiy the type of resampling, # in this case, 5-fold cross validation. metric <- "Accuracy" metric <- "ROC" # Should use this!! metric <- "logLoss" metric <- "RMSE" # -> Algorithms: # -> CART # -> LogitBoost - Boosted Logistic Regression # -> xgbTree - eXtreme Gradient Boosting # -> NNET - Neural Network # -> MLP - Multi-Layer Perceptron # -> All timings recording are trained on 10% of data # Regression attempt xgbTreeGrid <- expand.grid(nrounds = c(100,200), max_depth = seq(7,10,by = 1), eta = 0.1, gamma = 0, colsample_bytree = 1.0, subsample = 1.0, min_child_weight = 4) set.seed(7) system.time(fit.xgb.reg <- caret::train(ARR_DELAY ~., data = traindf.ohe, trControl = control, tuneGrid = xgbTreeGrid, metric = metric, method = "xgbTree", nthread=4)) set.seed(7) system.time(fit.lm <- caret::train(ARR_DELAY ~., data = traindf, trControl = control, metric = metric, method = "lm")) # -> 198.81 set.seed(7) system.time(fit.lm1 <- caret::train(ARR_DELAY ~., data = traindf.ohe, trControl = control, metric = metric, method = "lm")) # -> 189.92 set.seed(7) system.time(fit.lm2 <- caret::train(ARR_DELAY ~.-ORIGIN_AIRPORT_LAT-ORIGIN_AIRPORT_LONG-DEST_AIRPORT_LAT-DEST_AIRPORT_LONG, data = traindf.ohe, trControl = control, metric = metric, method = "lm")) set.seed(7) system.time(fit.glm <- caret::train(ARR_DELAY~., data=traindf.ohe, method="glm", metric=metric, trControl=control)) # -> 437.04 set.seed(7) system.time(fit.glmnet <- caret::train(ARR_DELAY~., data=traindf.ohe, method="glmnet", metric=metric, trControl=control)) # -> 275.94 set.seed(7) system.time(fit.nn <- caret::train(ARR_DELAY~., data=traindf.ohe, method="nnet", metric=metric, trControl=control)) # -> 4876.75 set.seed(7) system.time(fit.bsttree <- caret::train(ARR_DELAY~., data=traindf, method="bstTree", metric=metric, trControl=control)) # eXtreme Gradient Boosting ******************************************************************************** xgbgrid <- expand.grid( nrounds = c(100,200), eta = 0.01, max_depth = c(9,10), gamma = 0, colsample_bytree = c(0.4,0.5), min_child_weight = 1, subsample = 1 ) set.seed(7) system.time(fit.xgb <- caret::train(DELAY_GROUPS ~ ., data = traindf.ohe, trControl = control, tuneGrid = xgbgrid, metric = metric, method = "xgbTree", verbose = 1, num_class = 7)) # -> fit.xgb: 7680.36 sec elapsed # -> Fitting nrounds = 200, max_depth = 10, eta = 0.01, gamma = 0, colsample_bytree = 0.5, min_child_weight = 1, subsample = 1 on full training set fit.xgb$bestTune plot(fit.xgb) predictions.xgb<-predict(fit.xgb,testdf.ohe) caret::confusionMatrix(predictions.xgb, testdf.ohe$DELAY_GROUPS) # Boosted Logistic Regression ******************************************************************************** set.seed(7) system.time(fit.LogitBoost <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="LogitBoost", metric=metric, trControl=control)) # -> fit.logitboost: 927.15 set.seed(7) system.time(fit.LogitBoost1 <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="LogitBoost", metric=metric, trControl=control, preProcess = c('BoxCox'))) # -> fit.logitboost1: 948.52 # CART ******************************************************************************** set.seed(7) system.time(fit.cart <- caret::train(DELAY_GROUPS~., data=traindf, method="rpart", parms = list(split = "information"), #or 'information' metric=metric, trControl=control, tuneLength = 10)) # -> fit.cart: 540.52 set.seed(7) system.time(fit.cart1 <- caret::train(DELAY_GROUPS~., data=traindf, method="rpart", parms = list(split = "gini"), #or 'information' metric=metric, trControl=control, tuneLength = 10)) # -> fit.cart1: 557.76 set.seed(7) system.time(fit.cart2 <- caret::train(DELAY_GROUPS~.-CRS_ELAPSED_TIME, data=traindf, method="rpart", parms = list(split = "gini"), #or 'information' metric=metric, trControl=control, tuneLength = 10)) # -> fit.cart1: 557.76 set.seed(7) system.time(fit.cart3 <- caret::train(DELAY_GROUPS~.-CRS_ELAPSED_TIME, data=traindf, method="rpart", parms = list(split = "information"), #or 'information' metric=metric, trControl=control, tuneLength = 10)) # -> fit.cart1: 557.76 predictions.caret2<-predict(fit.cart2,testdf) caret::confusionMatrix(predictions.caret2, testdf$DELAY_GROUPS) predictions.caret1<-predict(fit.cart1,testdf) caret::confusionMatrix(predictions.caret1, testdf$DELAY_GROUPS) # Neural Network ******************************************************************************** nnetGrid <- expand.grid(size = seq(from = 1, to = 10, by = 1), decay = seq(from = 0.1, to = 0.5, by = 0.1)) set.seed(7) system.time(fit.nnet <- caret::train(DELAY_GROUPS~., data=traindf, method="nnet", metric=metric, trControl=control)) # -> fit.nnet: 334.23 set.seed(7) system.time(fit.nnet1 <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="nnet", metric=metric, trControl=control, preProcess = 'BoxCox', tuneLength = 10)) # -> fit.nnet1: 574.64 set.seed(7) system.time(fit.nnet2 <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="nnet", metric=metric, trControl=control, tuneGrid = nnetGrid)) # -> fit,nnet2: 2527. #The final values used for the model were size = 1 and decay = 0.4. # -> Turns out performance is the same regardless of BoxCox transformation and encoding. # Multi-Layer Perceptron ******************************************************************************** set.seed(7) system.time(fit.mlp <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="mlp", preProcess = 'BoxCox', metric="Accuracy", trControl=control)) # -> fit.mlp: 794.86 # Yet to run ******************************************************************************************** set.seed(7) system.time(fit.LogitBoost <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="LogitBoost", metric="Accuracy", trControl=control, tuneLength = 10)) set.seed(7) system.time(fit.gbm <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="gbm", metric="Accuracy", trControl=control)) xgbgrid2 <- expand.grid(max_depth=c(seq(from = 2, to = 10, by = 1)), eta=c(seq(from = 0.1, to = 1, by = 0.3)), nrounds=c(seq(from = 1, to = 500, by = 50)), colsample_bytree=c(seq(from = 0.5, to = 1, by = 0.1)), min_child_weight=c(seq(from = 0.1, to = 1, by = 0.4)), subsample=c(seq(from = 0.1, to = 1, by = 0.4)), gamma =c(seq(from = 0.1, to = 1, by = 0.4)) ) set.seed(7) system.time(fit.xgb1 <- caret::train(DELAY_GROUPS ~ ., data = traindf.ohe, trControl = control, tuneGrid = xgbgrid2, metric = "Accuracy", method = "xgbTree", verbose = 1, num_class = 7)) set.seed(7) system.time(fit.ada <- caret::train(DELAY_GROUPS~., data=traindf.ohe, method="ada", metric=metric, trControl=control)) print(fit.xgbLinear_1) #plot(fit.xgbLinear_1) library(mlbench) control.rfe <- rfeControl(functions=nbFuncs, method="cv", number=10) # run the RFE algorithm results <- rfe(traindf[,2:14], traindf[,1], sizes=c(1:14), rfeControl=control.rfe) # 9.0 Compare algorithms ============================================================= # collect resampling statistics of ALL trained models results <- resamples(list( #LogitBoost = fit.LogitBoost, #LogitBoost1 = fit.LogitBoost1, #Cart = fit.cart, #Cart1 = fit.cart1, #Cart2 = fit.cart2 #XGBTree = fit.xgb, #NNET = fit.nnet, #NNET1 = fit.nnet1, #NNET2 = fit.nnet2 #MLP = fit.mlp lm = fit.lm, lm1 = fit.lm1, lm2 = fit.lm2 )) # Summarize the fitted models summary(results) # Plot and rank the fitted models dotplot(results) dotplot(results, metric=metric) # Test skill of the BEST trained model on validation/testing dataset predictions.LogitBoost1 <- predict(fit.LogitBoost, newdata=testdf.ohe) predictions.xgb <- predict(fit.xgb, newdata=testdf.ohe) predictions.cart.prob <- predict(fit.cart, newdata = testdf, type='prob') # Evaluate the BEST trained model and print results result.LogitBoost1 <- caret::confusionMatrix(predictions.LogitBoost1, testdf.ohe$DELAY_GROUPS) result.xgb <- caret::confusionMatrix(predictions.xgb, testdf.ohe$DELAY_GROUPS) # summarize Best Model print(result.LogitBoost1) # -> Accuracy : 0.6935 # -> Basically it's predicting all as no delay, this is really bad... # -> Very misleading to use accuracy as a metric. Should we use Area under ROC? print(result.xgb) # -> Accuracy : 0.6433 # -> Predictions are spreaded nicely throughout 7 classes. # -> I picked this because fit.xgb gives the best cv AUC score. # 11.0 Visualising Results ========================================================================================= # Create a ROC plot comparing performance of all models # colors <- randomColor(count = 10, hue = c("random"), luminosity = c("dark")) # roc1 <- roc(testdf$annual.income, predictions.stack.glm5.prob, col=colors[1], percent=TRUE, asp = NA, # plot=TRUE, print.auc=TRUE, grid=TRUE, main="ROC comparison", print.auc.x=70, print.auc.y=80) # roc2 <- roc(testdf$annual.income, predictions.stack.rpart6.probs, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[2], print.auc=TRUE, print.auc.x=70, print.auc.y=70) # roc3 <- roc(testdf$annual.income, predictions.C50.prob, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[3], print.auc=TRUE, print.auc.x=70, print.auc.y=60) # roc4 <- roc(testdf$annual.income, predictions.cart.prob, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[4], print.auc=TRUE, print.auc.x=70, print.auc.y=50) # roc5 <- roc(testdf$annual.income, predictions.lda1.prob, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[5], print.auc=TRUE, print.auc.x=70, print.auc.y=40) # roc6 <- roc(testdf$annual.income, predictions.logit5.prob, plot=TRUE, add=TRUE, # percent=roc1$percent, col=colors[6], print.auc=TRUE, print.auc.x=70, print.auc.y=30) # legend("bottomright", legend=c("stack.glm", "stack.rpart", "C5.0", "CART", "LDA", "logistic"), col=c(colors[1:6]), lwd=2) # 12.0 Function Implementation ================================================================================ # -> https://jessesw.com/Air-Delays/ # -> Converted the function implementation of link from python code to R code. Very time consuming. # -> Final function is still not ready, but it's close to done # -> We still need to build a function that retrieves longitude and latitude for the final function to work. # 12.1 max_num_flights function # -> max_num_flights is a function for the delay prediction function to use for calculating the number of # flights in the database for a given city. # -> Inputs: list of codes retrived in the delay_prediction function # -> Output: The code with the largest number of flights. max_num_flights <- function(codes) { # Array to store all airport codes num_store <- list() if (length(codes)<1) { print('Try entering your city/airport again. No matching airports found.') return } for (i in 1:length(codes)) { num_flights <- sum(grepl(codes[i],dataset$ORIGIN_AIRPORT_ID)) num_store[i] <- num_flights } # Now find the maximum row max_num_store <- max(unlist(num_store)) max_ind = match(max_num_store, num_store) # Now we know which code had the most flights. Return it. return(codes[max_ind]) } codes <- c(13930, 11298) codes1 <- c(11703,12478, 12541, 12545, 12546, 12548, 12953, 13784, 15346, 15859) max_num_flights(codes1) df <- data.frame() df[1,1] <- 1 df[1,2] <- 1 # 12.2 delay_prediction function # -> The function will allow the user to enter all of the information about their flight # -> and return the predicted delay time in minutes. delay_prediction <- function(origin = 'Fort Worth', destination = 'Chicago', carrier = 'American', dept_time = 17, arr_time = 19, month = 5, day = 15, weekday = 'Wednesday') { # Create a dict for our Airlines. Based on the carrierDF. carrier_dict <- list('Endeavor'=1, 'American'=2, 'Alaska'=3, 'JetBlue'=4, 'Delta'=5, 'ExpressJet'=6, 'Frontier'=7, 'AirTran'=8, 'Hawaiian'=9, 'Envoy'=10, 'SkyWest'=11, 'United'=12, 'US Airways'=13, 'Virgin'=14, 'Southwest'=15, 'Mesa'=16) # Another for day of the week weekday_dict <- list('Monday'=1, 'Tuesday'=2, 'Wednesday'=3, 'Thursday'=4, 'Friday'=5, 'Saturday'=6, 'Sunday'=7) # Now find the corresponding airport codes for our origin and destination. #origin_codes = list(airport_lookupDF[airport_lookupDF.Description.str.contains(origin)].Code) origin_codes <- grep(origin, airport_lookupDF$Description) #destination_codes = list(airport_lookupDF[airport_lookupDF.Description.str.contains(destination)].Code) destination_codes <- grep(destination, airport_lookupDF$Description) # From these codes found in the lookup table, see which one had the largest number of flights. origin_code <- max_num_flights(as.list(airport_lookupDF[origin_codes,][1])$Code) destination_code <- max_num_flights(as.list(airport_lookupDF[destination_codes,][1])$Code) # Now that we have these codes, we can look up the other parameters necessary. # Now find the distance between the two airports. distance <- dataset$DISTANCE[dataset$ORIGIN_AIRPORT_ID==origin_code & dataset$DEST_AIRPORT_ID==destination_code][1] carrier_num <- carrier_dict$carrier weekday_num <- weekday_dict$weekday # Now that we have all of our values, we can start combining them together # Now create our array of categorical values. categorical_values = data.frame() categorical_values[1,1] = as.numeric(day) categorical_values[1,2] = as.numeric(weekday_num) categorical_values[1,3] = as.factor(carrier_num) categorical_values[1,4] = as.factor(origin_code) categorical_values[1,5] = as.factor(destination_code) categorical_values[1,6] = as.numeric(dept_time) categorical_values[1,7] = as.numeric(arr_time) categorical_values[1,8] = as.numeric(arr_time-dept_time) categorical_values[1,9] = as.numeric(distance) categorical_values[1,10] = as.numeric(long) categorical_values[1,11] = as.numeric(lat) # Apply the one-hot encoding to these. dmy1 <- dummyVars(" ~ .", data = categorical_values) categorical_values.ohe <- data.frame(predict(dmy1, newdata = categorical_values)) # Now predict this with the model prediction <- predict(fit.LogitBoost1, newdata=testdf.ohe) print ('Your prediction delay is', prediction[0]) return # End of function } # 12.3 lookupAirportData function ======================================================= # Input: # AIRPORT_ID: the five digit number such as from either ORIGIN_AIRPORT_ID or DEST_AIRPORT_ID # RETURN: a string with the value to return e.g. "LATITUDE" or "LONGITUDE" or "DISPLAY_AIRPORT_CITY_NAME_FULL" # Output: value in row corresponding to AIRPORT_ID in Airport_Data.csv and column specified by RETURN # Example: # airport.name <- lookupAirportData(11057, RETURN = "DISPLAY_AIRPORT_NAME") # Special functions for lat and long for faster performance and extra convenience # Examples: # lat <- lookupLatitude(11057) # long <- lookupLongitude(11057) # Data for Functions ===================================================================== airportData <- read.csv("data/Airport_Data.csv", header = TRUE, stringsAsFactors = FALSE) AIRPORT_ID_LIST <- airportData$AIRPORT_ID LATITIUDE_LIST <- airportData$LATITUDE LONGITUDE_LIST <- airportData$LONGITUDE # Function Definitions =================================================================== lookupAirportData <- function(AIRPORT_ID, RETURN) { row.index <- match(AIRPORT_ID,AIRPORT_ID_LIST) column.of.interest <- airportData[,RETURN] return(column.of.interest[row.index]) } lookupLatitude <- function(AIRPORT_ID) { row.index <- match(AIRPORT_ID,AIRPORT_ID_LIST) return(LATITIUDE_LIST[row.index]) } lookupLongitude <- function(AIRPORT_ID) { row.index <- match(AIRPORT_ID,AIRPORT_ID_LIST) return(LONGITUDE_LIST[row.index]) } # 13.0 Conclusions ===================================================================================================
# This is an autogenerated script n = 200 lam1 = 2 lam2 = 5 prob = 0.250000 phi = -0.750000 nsim = 200 df = simulation_mixedPoissonAR1(n, lam1, lam2, phi, prob, nsim)
/archived sims/MixedPoissonAR1/IYW/GeneratedFiles/MixedPois2-5AR1_IYW_N200_NS200_PhiNeg.R
no_license
jlivsey/countsFun
R
false
false
168
r
# This is an autogenerated script n = 200 lam1 = 2 lam2 = 5 prob = 0.250000 phi = -0.750000 nsim = 200 df = simulation_mixedPoissonAR1(n, lam1, lam2, phi, prob, nsim)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mainMethods.R \name{getMSTLR} \alias{getMSTLR} \title{getMSTLR: create spanning tree with specified root and leaves} \usage{ getMSTLR( g1, rootDist, cenDist, cluid, root = 1, leaves = list(a = c(9, 12, 13), b = 17, c = c(2, 6), d = 3, e = c(4, 5), f = 14) ) } \arguments{ \item{g1}{input graph} \item{rootDist}{similariy matrix of clusters} \item{cenDist}{distance matrix of clusters} \item{cluid}{vector, the cluster id} \item{root}{integer, default is 1, the root cluster} \item{leaves}{list, specify the leave groups} } \value{ g1, graph } \description{ getMSTLR: create spanning tree with specified root and leaves } \examples{ }
/man/getMSTLR.Rd
permissive
ouyang-lab/LISA2
R
false
true
730
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mainMethods.R \name{getMSTLR} \alias{getMSTLR} \title{getMSTLR: create spanning tree with specified root and leaves} \usage{ getMSTLR( g1, rootDist, cenDist, cluid, root = 1, leaves = list(a = c(9, 12, 13), b = 17, c = c(2, 6), d = 3, e = c(4, 5), f = 14) ) } \arguments{ \item{g1}{input graph} \item{rootDist}{similariy matrix of clusters} \item{cenDist}{distance matrix of clusters} \item{cluid}{vector, the cluster id} \item{root}{integer, default is 1, the root cluster} \item{leaves}{list, specify the leave groups} } \value{ g1, graph } \description{ getMSTLR: create spanning tree with specified root and leaves } \examples{ }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/et_functions.R \name{getu2f} \alias{getu2f} \title{convert wind speed to 2m value function} \usage{ getu2f(uz, zw) } \arguments{ \item{uz}{Wind speed in m/s. No default.} \item{zw}{Wind speed measurement height in m. No default.} } \description{ Function converts wind speed at measuremnt height to equivalent wind speed at 2m above the ground level } \examples{ getu2f(1.5,3.0) } \keyword{windspeed}
/ASCEET/man/getu2f.Rd
no_license
anfrench/ASCEET_v2
R
false
false
510
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/et_functions.R \name{getu2f} \alias{getu2f} \title{convert wind speed to 2m value function} \usage{ getu2f(uz, zw) } \arguments{ \item{uz}{Wind speed in m/s. No default.} \item{zw}{Wind speed measurement height in m. No default.} } \description{ Function converts wind speed at measuremnt height to equivalent wind speed at 2m above the ground level } \examples{ getu2f(1.5,3.0) } \keyword{windspeed}
# import the dataset wbcd <- read.csv("wisc_bc_data.csv", stringsAsFactors = FALSE) #structure of the dataset str(wbcd) # drop the id feature wbcd<-wbcd[-1] # table of diagnosis table(wbcd$diagnosis) # recode diagnosis as a factor wbcd$diagnosis <- factor(wbcd$diagnosis, levels = c("B", "M"), labels = c("Benign", "Malignant")) # create normalization function normalize <- function(x) { return ((x - min(x)) / (max(x) - min(x))) } # normalize the wbcd data wbcd_r<-as.data.frame(lapply(wbcd[2:31], normalize)) # confirm that normalization worked summary(wbcd_r$area_mean) # create training and test data wbcd_train <- wbcd_r[1:469, ] wbcd_test <- wbcd_r[470:569, ] # create labels for training and test data wbcd_train_labels <- wbcd[1:469, 1] wbcd_test_labels <- wbcd[470:569, 1] # Creating the model.. # load the "class" library library(class) wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test, cl = wbcd_train_labels, k = 21) #Evaluating model performance ---- # load the "gmodels" library library(gmodels) # Create the cross tabulation of predicted vs. actual CrossTable(x = wbcd_test_labels, y = wbcd_test_pred, prop.chisq = FALSE) #Cell Contents #|-------------------------| # | N | # | N / Row Total | # | N / Col Total | # | N / Table Total | # |-------------------------| # Total Observations in Table: 100 # | wbcd_test_pred #wbcd_test_labels | Benign | Malignant | Row Total | # ------------|-----------|-----------|-----------| # Benign | 61 | 0 | 61 | # | 1.000 | 0.000 | 0.610 | # | 0.968 | 0.000 | | # | 0.610 | 0.000 | | # ------------|-----------|-----------|-----------| # Malignant | 2 | 37 | 39 | # | 0.051 | 0.949 | 0.390 | # | 0.032 | 1.000 | | # | 0.020 | 0.370 | | # -------------|-----------|-----------|-----------| # Column Total | 63 | 37 | 100 | # | 0.630 | 0.370 | | # ------------|-----------|-----------|-----------|
/cancer_prediction.R
no_license
YaswanthMareedu/cancer_prediction
R
false
false
2,397
r
# import the dataset wbcd <- read.csv("wisc_bc_data.csv", stringsAsFactors = FALSE) #structure of the dataset str(wbcd) # drop the id feature wbcd<-wbcd[-1] # table of diagnosis table(wbcd$diagnosis) # recode diagnosis as a factor wbcd$diagnosis <- factor(wbcd$diagnosis, levels = c("B", "M"), labels = c("Benign", "Malignant")) # create normalization function normalize <- function(x) { return ((x - min(x)) / (max(x) - min(x))) } # normalize the wbcd data wbcd_r<-as.data.frame(lapply(wbcd[2:31], normalize)) # confirm that normalization worked summary(wbcd_r$area_mean) # create training and test data wbcd_train <- wbcd_r[1:469, ] wbcd_test <- wbcd_r[470:569, ] # create labels for training and test data wbcd_train_labels <- wbcd[1:469, 1] wbcd_test_labels <- wbcd[470:569, 1] # Creating the model.. # load the "class" library library(class) wbcd_test_pred <- knn(train = wbcd_train, test = wbcd_test, cl = wbcd_train_labels, k = 21) #Evaluating model performance ---- # load the "gmodels" library library(gmodels) # Create the cross tabulation of predicted vs. actual CrossTable(x = wbcd_test_labels, y = wbcd_test_pred, prop.chisq = FALSE) #Cell Contents #|-------------------------| # | N | # | N / Row Total | # | N / Col Total | # | N / Table Total | # |-------------------------| # Total Observations in Table: 100 # | wbcd_test_pred #wbcd_test_labels | Benign | Malignant | Row Total | # ------------|-----------|-----------|-----------| # Benign | 61 | 0 | 61 | # | 1.000 | 0.000 | 0.610 | # | 0.968 | 0.000 | | # | 0.610 | 0.000 | | # ------------|-----------|-----------|-----------| # Malignant | 2 | 37 | 39 | # | 0.051 | 0.949 | 0.390 | # | 0.032 | 1.000 | | # | 0.020 | 0.370 | | # -------------|-----------|-----------|-----------| # Column Total | 63 | 37 | 100 | # | 0.630 | 0.370 | | # ------------|-----------|-----------|-----------|
library(alr3) ### Name: hooker ### Title: Hooker's data ### Aliases: hooker ### Keywords: datasets ### ** Examples head(hooker)
/data/genthat_extracted_code/alr3/examples/hooker.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
135
r
library(alr3) ### Name: hooker ### Title: Hooker's data ### Aliases: hooker ### Keywords: datasets ### ** Examples head(hooker)
# loading in the necessary libraries library(tidyverse) # loads the tibble package to convert rownmes_to_col library(janitor) # to use the clean names function library(readr) # used to read in the raw data file type .rds library(here) # to enable the file locations to be reproducible # fetching the raw data file decathlon <- read_rds(here("raw_data/decathlon.rds")) # the first column contains row names and needs to be fixed decathlon <- decathlon %>% rownames_to_column(var = "athlete") # cleaning the column names decathlon_clean_names <- clean_names(decathlon) # check the column data types make sense glimpse(decathlon_clean_names) # The athlete names should feature a consistent format decathlon_clean_names <- decathlon_clean_names %>% mutate( athlete = str_to_title(athlete) ) # we need to make the data "tidy", as rank 1 for example, appears twice # use the pivot_wider() function decathlon_wide <- decathlon_clean_names %>% pivot_wider(names_from = competition, values_from = rank) # finally, output the clean data to a new file write_csv(decathlon_wide, "clean_data/decathlon_clean.csv")
/task1/data_cleaning_scripts/decathlon.R
no_license
pjbateman/dirty_data_codeclan_project_paul
R
false
false
1,141
r
# loading in the necessary libraries library(tidyverse) # loads the tibble package to convert rownmes_to_col library(janitor) # to use the clean names function library(readr) # used to read in the raw data file type .rds library(here) # to enable the file locations to be reproducible # fetching the raw data file decathlon <- read_rds(here("raw_data/decathlon.rds")) # the first column contains row names and needs to be fixed decathlon <- decathlon %>% rownames_to_column(var = "athlete") # cleaning the column names decathlon_clean_names <- clean_names(decathlon) # check the column data types make sense glimpse(decathlon_clean_names) # The athlete names should feature a consistent format decathlon_clean_names <- decathlon_clean_names %>% mutate( athlete = str_to_title(athlete) ) # we need to make the data "tidy", as rank 1 for example, appears twice # use the pivot_wider() function decathlon_wide <- decathlon_clean_names %>% pivot_wider(names_from = competition, values_from = rank) # finally, output the clean data to a new file write_csv(decathlon_wide, "clean_data/decathlon_clean.csv")
#rm(list = ls());gc() #digits_to_match <- 5 #For two accounts to be considered equal, they must have at least how many digits? #Set main working directory project_folder <- "C:/Users/Felipe/Desktop/Duke MIDS/Modelling and Representation of Data/0 - Final Project/" setwd(project_folder) #Load packages library(magrittr) library(dplyr) library(data.table) library(ggplot2) #Load functions setwd("./Copycat-Towns/Scripts/1 - Data preparation/Functions/") source("translate_dataset_column_names.R") source("create_account_id.R") source("remove_repeating_zeros_between_dots.R") source("as_percentage.R") source("is_unique_key.R") source("change_decimal_separator.R") source("n_digits.R") #Import financial accounts datasets setwd(project_folder) setwd("./Out-of-git/1 - Raw data/") #fin = fread("Brazil Municipalities Balance Sheets 2018.csv") fin_1 = fread("Brazil Municipalities Balance Sheets 2018.csv") fin_2 = fread("Brazil Municipalities Change in Wealth 2018.csv") fin = rbind(fin_1, fin_2) rm(fin_1, fin_2);gc() #Define english translation of column names names_fin_datasets <- c("municipality", "municipality_id", "state", "population", "account_type", "account", "amount") #Translate column names to english fin %<>% translate_dataset_column_names(names_fin_datasets) #Does each account appear only once per municipality? stopifnot(is_unique_key(data_table = fin, key = c("account","municipality_id"))) #Set key setkey(fin, account, municipality_id) # Transforms the amount in accounts from brazilian notation (i.e. comma instead of point for decimals) and # as a character) into a numeric value fin[ , amount := change_decimal_separator(amount)] fin$amount %<>% as.numeric stopifnot(is.numeric(fin$amount)) #Checkpoint fin[,account_type := NULL] #Discard unnecessary variable #Remove accounts with zero value fin <- fin[amount != 0] #Identify different municipalities with the same amount in the same account stopifnot(is_unique_key(data_table = fin, key = c("account","municipality_id"))) #Checkpoint #Save dataset with financial accounts. #It will be used both for finding pairs of accounts #and for answering the question of when is a match a mere coincidence. setwd(project_folder) setwd("./Copycat-Towns/Datasets/2 - Intermediary data/") save(fin, file = "accounts.RData") ################## #NEXT SCRIPT: 15 ##################
/Scripts/1 - Data preparation/10 - Reads financial accounts.R
permissive
felbuch/Copycat-Towns
R
false
false
2,501
r
#rm(list = ls());gc() #digits_to_match <- 5 #For two accounts to be considered equal, they must have at least how many digits? #Set main working directory project_folder <- "C:/Users/Felipe/Desktop/Duke MIDS/Modelling and Representation of Data/0 - Final Project/" setwd(project_folder) #Load packages library(magrittr) library(dplyr) library(data.table) library(ggplot2) #Load functions setwd("./Copycat-Towns/Scripts/1 - Data preparation/Functions/") source("translate_dataset_column_names.R") source("create_account_id.R") source("remove_repeating_zeros_between_dots.R") source("as_percentage.R") source("is_unique_key.R") source("change_decimal_separator.R") source("n_digits.R") #Import financial accounts datasets setwd(project_folder) setwd("./Out-of-git/1 - Raw data/") #fin = fread("Brazil Municipalities Balance Sheets 2018.csv") fin_1 = fread("Brazil Municipalities Balance Sheets 2018.csv") fin_2 = fread("Brazil Municipalities Change in Wealth 2018.csv") fin = rbind(fin_1, fin_2) rm(fin_1, fin_2);gc() #Define english translation of column names names_fin_datasets <- c("municipality", "municipality_id", "state", "population", "account_type", "account", "amount") #Translate column names to english fin %<>% translate_dataset_column_names(names_fin_datasets) #Does each account appear only once per municipality? stopifnot(is_unique_key(data_table = fin, key = c("account","municipality_id"))) #Set key setkey(fin, account, municipality_id) # Transforms the amount in accounts from brazilian notation (i.e. comma instead of point for decimals) and # as a character) into a numeric value fin[ , amount := change_decimal_separator(amount)] fin$amount %<>% as.numeric stopifnot(is.numeric(fin$amount)) #Checkpoint fin[,account_type := NULL] #Discard unnecessary variable #Remove accounts with zero value fin <- fin[amount != 0] #Identify different municipalities with the same amount in the same account stopifnot(is_unique_key(data_table = fin, key = c("account","municipality_id"))) #Checkpoint #Save dataset with financial accounts. #It will be used both for finding pairs of accounts #and for answering the question of when is a match a mere coincidence. setwd(project_folder) setwd("./Copycat-Towns/Datasets/2 - Intermediary data/") save(fin, file = "accounts.RData") ################## #NEXT SCRIPT: 15 ##################
#' @name SDMX #' @rdname SDMX #' @aliases SDMX,SDMX-method #' #' @usage #' SDMX(xmlObj, namespaces) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @param namespaces object of class "data.frame" given the list of namespace URIs #' @return an object of class "SDMX" #' #' @seealso \link{readSDMX} SDMX <- function(xmlObj, namespaces){ schema <- SDMXSchema(xmlObj, namespaces); header <- SDMXHeader(xmlObj, namespaces); footer <- SDMXFooter(xmlObj, namespaces); new("SDMX", xmlObj = xmlObj, schema = schema, header = header, footer = footer); } #functions namespaces.SDMX <- function(xmlObj){ nsFromXML <- xmlNamespaceDefinitions(xmlObj, addNames = FALSE, recursive = TRUE, simplify = FALSE) nsDefs.df <- do.call("rbind", lapply(nsFromXML, function(x){ out <- NULL if(length(names(x)) > 0) out <- x$uri return(out) })) row.names(nsDefs.df) <- 1:nrow(nsDefs.df) nsDefs.df <- as.data.frame(nsDefs.df, stringsAsFactors = FALSE) if(nrow(nsDefs.df) > 0){ colnames(nsDefs.df) <- "uri" nsDefs.df$uri <- as.character(nsDefs.df$uri) nsDefs.df <- unique(nsDefs.df) nsDefs.df <- nsDefs.df[!duplicated(nsDefs.df$uri),] nsDefs.df <- as.data.frame(nsDefs.df, stringsAsFactors = FALSE) colnames(nsDefs.df) <- "uri" nsDefs.df <- nsDefs.df[ regexpr("http://www.w3.org", nsDefs.df$uri, "match.length", ignore.case = TRUE) == -1,] nsDefs.df <- as.data.frame(nsDefs.df, stringsAsFactors = FALSE) colnames(nsDefs.df) <- "uri" } return(nsDefs.df) } encodeSDMXOutput <- function(df){ for(col in colnames(df)){ if(is(df[,col],"character")) Encoding(df[,col]) <- "UTF-8" } return(df) } #' @name getNamespaces #' @docType methods #' @aliases getNamespaces,SDMX-method #' @title getNamespaces #' @description Access the namespaces of the SDMX-ML object #' @usage getNamespaces(obj) #' #' @param obj An object deriving from class "SDMX" #' @return an object of class \code{data.frame} giving the id and uri for each #' of the namespaces handled in the SDMX-ML document. #' #' @seealso \link{SDMX-class} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} if (!isGeneric("getNamespaces")) setGeneric("getNamespaces", function(obj) standardGeneric("getNamespaces")); #' @describeIn getNamespaces Access the namespaces of the SDMX-ML object setMethod(f = "getNamespaces", signature = "SDMX", function(obj){ return(namespaces.SDMX(obj@xmlObj)); }) #others non-S4 methods #==================== #' @name findNamespace #' @aliases findNamespace #' @title findNamespace #' @description function used to find a specific namespace within the available #' namespaces of an SDMX-ML object #' #' @usage #' findNamespace(namespaces, messageType) #' #' @param namespaces object of class \code{data.frame} giving the namespaces URIs #' available in a SDMX-ML object, typically obtained with \link{getNamespaces} #' @param messageType object of class \code{character} representing a message type #' @return an object of class "character" giving the namespace uri if found in the #' available namespaces #' #' @section Warning: #' \code{findNamespace} is a function used internally as utility function in #' SDMX-ML object parsers. #' #' @seealso \link{SDMX-class} \link{getNamespaces} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} findNamespace <- function(namespaces, messageType){ regexp <- paste(messageType, "$", sep = "") ns <- c(ns = namespaces$uri[grep(regexp, namespaces$uri, ignore.case = TRUE)]) return(ns) } #' @name isSoapRequestEnvelope #' @aliases isSoapRequestEnvelope #' @title isSoapRequestEnvelope #' @description function used to detect if the XML document corresponds to a SOAP #' request response #' @usage #' isSoapRequestEnvelope(xmlObj, namespaces) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @param namespaces object of class "data.frame" given the list of namespace URIs #' @return an object of class "logical" #' #' @section Warning: #' \code{isSoapRequestEnvelope} is a function used internally by \link{readSDMX} #' #' @seealso \link{SDMX-class} \link{readSDMX} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} #' isSoapRequestEnvelope <- function(xmlObj, namespaces){ return(tolower(xmlName(xmlRoot(xmlObj))) == "envelope") } #' @name getSoapRequestResult #' @aliases getSoapRequestResult #' @title getSoapRequestResult #' @description function used to extract the SDMX-ML message from a SOAP request #' response #' @usage #' getSoapRequestResult(xmlObj) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @return an object of class "XMLInternalDocument derived from XML package #' #' @section Warning: #' \code{getSoapRequestResult} is a function used internally by \link{readSDMX} #' #' @seealso \link{SDMX-class} \link{readSDMX} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} getSoapRequestResult <- function(xmlObj){ body <- xmlChildren(xmlRoot(xmlObj)) response <- xmlChildren(body[[1]]); rm(body); result <- xmlChildren(response[[1]]); rm(response); sdmxDoc <- xmlDoc(xmlChildren(result[[1]])[[1]]); rm(result); return(sdmxDoc) } #' @name isRegistryInterfaceEnvelope #' @aliases isRegistryInterfaceEnvelope #' @title isRegistryInterfaceEnvelope #' @description function used to detect if the XML document corresponds to a #' registry interface query #' @usage #' isRegistryInterfaceEnvelope(xmlObj, nativeRoot) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @param nativeRoot object of class "logical" indicating if it is the native document #' @return an object of class "logical" #' #' @section Warning: #' \code{isRegistryInterfaceEnvelope} is a function used internally by \link{readSDMX} #' #' @seealso \link{SDMX-class} \link{readSDMX} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} #' isRegistryInterfaceEnvelope <- function(xmlObj, nativeRoot){ root <- xmlRoot(xmlObj) if(nativeRoot) root <- root[[1]] return(xmlName(root) == "RegistryInterface") } #' @name getRegistryInterfaceResult #' @aliases getRegistryInterfaceResult #' @title getRegistryInterfaceResult #' @description function used to extract the SDMX-ML message from a registry #' interface query #' @usage #' getRegistryInterfaceResult(xmlObj) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @return an object of class "XMLInternalDocument derived from XML package #' #' @section Warning: #' \code{getRegistryInterfaceResult} is a function used internally by \link{readSDMX} #' #' @seealso \link{SDMX-class} \link{readSDMX} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} getRegistryInterfaceResult <- function(xmlObj){ sdmxDoc <- xmlDoc(xmlChildren(xmlRoot(xmlObj))[[1]]) return(sdmxDoc) }
/R/SDMX-methods.R
no_license
cran/rsdmx
R
false
false
7,517
r
#' @name SDMX #' @rdname SDMX #' @aliases SDMX,SDMX-method #' #' @usage #' SDMX(xmlObj, namespaces) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @param namespaces object of class "data.frame" given the list of namespace URIs #' @return an object of class "SDMX" #' #' @seealso \link{readSDMX} SDMX <- function(xmlObj, namespaces){ schema <- SDMXSchema(xmlObj, namespaces); header <- SDMXHeader(xmlObj, namespaces); footer <- SDMXFooter(xmlObj, namespaces); new("SDMX", xmlObj = xmlObj, schema = schema, header = header, footer = footer); } #functions namespaces.SDMX <- function(xmlObj){ nsFromXML <- xmlNamespaceDefinitions(xmlObj, addNames = FALSE, recursive = TRUE, simplify = FALSE) nsDefs.df <- do.call("rbind", lapply(nsFromXML, function(x){ out <- NULL if(length(names(x)) > 0) out <- x$uri return(out) })) row.names(nsDefs.df) <- 1:nrow(nsDefs.df) nsDefs.df <- as.data.frame(nsDefs.df, stringsAsFactors = FALSE) if(nrow(nsDefs.df) > 0){ colnames(nsDefs.df) <- "uri" nsDefs.df$uri <- as.character(nsDefs.df$uri) nsDefs.df <- unique(nsDefs.df) nsDefs.df <- nsDefs.df[!duplicated(nsDefs.df$uri),] nsDefs.df <- as.data.frame(nsDefs.df, stringsAsFactors = FALSE) colnames(nsDefs.df) <- "uri" nsDefs.df <- nsDefs.df[ regexpr("http://www.w3.org", nsDefs.df$uri, "match.length", ignore.case = TRUE) == -1,] nsDefs.df <- as.data.frame(nsDefs.df, stringsAsFactors = FALSE) colnames(nsDefs.df) <- "uri" } return(nsDefs.df) } encodeSDMXOutput <- function(df){ for(col in colnames(df)){ if(is(df[,col],"character")) Encoding(df[,col]) <- "UTF-8" } return(df) } #' @name getNamespaces #' @docType methods #' @aliases getNamespaces,SDMX-method #' @title getNamespaces #' @description Access the namespaces of the SDMX-ML object #' @usage getNamespaces(obj) #' #' @param obj An object deriving from class "SDMX" #' @return an object of class \code{data.frame} giving the id and uri for each #' of the namespaces handled in the SDMX-ML document. #' #' @seealso \link{SDMX-class} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} if (!isGeneric("getNamespaces")) setGeneric("getNamespaces", function(obj) standardGeneric("getNamespaces")); #' @describeIn getNamespaces Access the namespaces of the SDMX-ML object setMethod(f = "getNamespaces", signature = "SDMX", function(obj){ return(namespaces.SDMX(obj@xmlObj)); }) #others non-S4 methods #==================== #' @name findNamespace #' @aliases findNamespace #' @title findNamespace #' @description function used to find a specific namespace within the available #' namespaces of an SDMX-ML object #' #' @usage #' findNamespace(namespaces, messageType) #' #' @param namespaces object of class \code{data.frame} giving the namespaces URIs #' available in a SDMX-ML object, typically obtained with \link{getNamespaces} #' @param messageType object of class \code{character} representing a message type #' @return an object of class "character" giving the namespace uri if found in the #' available namespaces #' #' @section Warning: #' \code{findNamespace} is a function used internally as utility function in #' SDMX-ML object parsers. #' #' @seealso \link{SDMX-class} \link{getNamespaces} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} findNamespace <- function(namespaces, messageType){ regexp <- paste(messageType, "$", sep = "") ns <- c(ns = namespaces$uri[grep(regexp, namespaces$uri, ignore.case = TRUE)]) return(ns) } #' @name isSoapRequestEnvelope #' @aliases isSoapRequestEnvelope #' @title isSoapRequestEnvelope #' @description function used to detect if the XML document corresponds to a SOAP #' request response #' @usage #' isSoapRequestEnvelope(xmlObj, namespaces) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @param namespaces object of class "data.frame" given the list of namespace URIs #' @return an object of class "logical" #' #' @section Warning: #' \code{isSoapRequestEnvelope} is a function used internally by \link{readSDMX} #' #' @seealso \link{SDMX-class} \link{readSDMX} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} #' isSoapRequestEnvelope <- function(xmlObj, namespaces){ return(tolower(xmlName(xmlRoot(xmlObj))) == "envelope") } #' @name getSoapRequestResult #' @aliases getSoapRequestResult #' @title getSoapRequestResult #' @description function used to extract the SDMX-ML message from a SOAP request #' response #' @usage #' getSoapRequestResult(xmlObj) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @return an object of class "XMLInternalDocument derived from XML package #' #' @section Warning: #' \code{getSoapRequestResult} is a function used internally by \link{readSDMX} #' #' @seealso \link{SDMX-class} \link{readSDMX} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} getSoapRequestResult <- function(xmlObj){ body <- xmlChildren(xmlRoot(xmlObj)) response <- xmlChildren(body[[1]]); rm(body); result <- xmlChildren(response[[1]]); rm(response); sdmxDoc <- xmlDoc(xmlChildren(result[[1]])[[1]]); rm(result); return(sdmxDoc) } #' @name isRegistryInterfaceEnvelope #' @aliases isRegistryInterfaceEnvelope #' @title isRegistryInterfaceEnvelope #' @description function used to detect if the XML document corresponds to a #' registry interface query #' @usage #' isRegistryInterfaceEnvelope(xmlObj, nativeRoot) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @param nativeRoot object of class "logical" indicating if it is the native document #' @return an object of class "logical" #' #' @section Warning: #' \code{isRegistryInterfaceEnvelope} is a function used internally by \link{readSDMX} #' #' @seealso \link{SDMX-class} \link{readSDMX} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} #' isRegistryInterfaceEnvelope <- function(xmlObj, nativeRoot){ root <- xmlRoot(xmlObj) if(nativeRoot) root <- root[[1]] return(xmlName(root) == "RegistryInterface") } #' @name getRegistryInterfaceResult #' @aliases getRegistryInterfaceResult #' @title getRegistryInterfaceResult #' @description function used to extract the SDMX-ML message from a registry #' interface query #' @usage #' getRegistryInterfaceResult(xmlObj) #' #' @param xmlObj object of class "XMLInternalDocument derived from XML package #' @return an object of class "XMLInternalDocument derived from XML package #' #' @section Warning: #' \code{getRegistryInterfaceResult} is a function used internally by \link{readSDMX} #' #' @seealso \link{SDMX-class} \link{readSDMX} #' #' @author Emmanuel Blondel, \email{emmanuel.blondel1@@gmail.com} getRegistryInterfaceResult <- function(xmlObj){ sdmxDoc <- xmlDoc(xmlChildren(xmlRoot(xmlObj))[[1]]) return(sdmxDoc) }
############ scMap ################################# library(scmap) ###Tman is the online dataset I would want to compare my data (SCE) with the Tman dataset. This should already be QC'd and loaded into the environment as normal. ###SCE (your data) can already be in the enviro, but what I tend to do is save the SCE object in the normal R enviro then load it into a separate environment where the Tman data is being QCd ###This way, you're not overloading your main work environment with datasets. The online dataset you're comparing with could be huge for example ####selecting features rowData(Tman.sce.keep.filt)$feature_symbol <- rownames(Tman.sce.keep.filt) Tman.sce.keep.filt <- selectFeatures(Tman.sce.keep.filt, suppress_plot = FALSE) ###indexing based on cyc score Tman.sce.keep.filt <- indexCluster(Tman.sce.keep.filt, cluster_col = "Cyc_Score") ###decided cyclone score for now, but could do something else. ###doesnt look great... heatmap(as.matrix(metadata(Tman.sce.keep.filt)$scmap_cluster_index)) Tman.sce.keep.filt2 <- indexCluster(Tman.sce.keep.filt, cluster_col = "Condition") ####Clustering based on condition ####projection onto G1 and G2M cells SCE.keep.filt <- toSingleCellExperiment(SCE.keep.filt) rowData(SCE.keep.filt)$feature_symbol <- rownames(SCE.keep.filt) scmapCluster_results <- scmapCluster( projection = SCE.keep.filt, index_list = list( yan = metadata(Tman.sce.keep.filt)$scmap_cluster_index, Con = metadata(Tman.sce.keep.filt2)$scmap_cluster_index ) ) ###plotting to see how my data (organised by cell cycle phase) maps to the Cyclone score catagories already summed up before plot( getSankey( colData(SCE.keep.filt)$Cell_Phase, scmapCluster_results$scmap_cluster_labs[,'yan'], plot_height = 400 ) ) ###doing again only this time mappinng my data organised by cell phase to condition of Tman data. plot( getSankey( colData(SCE.keep.filt)$Cell_Phase, scmapCluster_results$scmap_cluster_labs[,'Con'], plot_height = 400 ) ) ###repeating but changing my data to Monocle_State to see how monocle state maps to Tman plot( getSankey( colData(SCE.keep.filt)$Monocle_State, scmapCluster_results$scmap_cluster_labs[,'yan'], plot_height = 400 ) ) plot( getSankey( colData(SCE.keep.filt)$Monocle_State, scmapCluster_results$scmap_cluster_labs[,'Con'], plot_height = 400 ) ) #####Cell to cell mapping ################################## ###This one I don't use at all, but you might find it useful. Instead of seeing what catagories your cells map to in the other dataset, you can map your cells directly to the cells in the other dataset ###The problem is it creates too many pathways, like it's too hard to discern anything useful. set.seed(1) Tman.sce.keep.filt <- indexCell(Tman.sce.keep.filt) ####index here consists of two items: names(metadata(Tman.sce.keep.filt)$scmap_cell_index) ## [1] "subcentroids" "subclusters" length(metadata(Tman.sce.keep.filt)$scmap_cell_index$subcentroids) dim(metadata(Tman.sce.keep.filt)$scmap_cell_index$subcentroids[[1]]) metadata(Tman.sce.keep.filt)$scmap_cell_index$subcentroids[[1]][,1:5] scmapCellTman_results <- scmapCell( SCE.keep.filt, list( yan = metadata(Tman.sce.keep.filt)$scmap_cell_index ) ) scmapCellsTman_clusters <- scmapCell2Cluster( scmapCellTman_results, list( as.character(colData(Tman.sce.keep.filt)$cellType) ) ) plot( getSankey( colData(SCE.keep.filt)$Tissue, scmapCellsTman_clusters$scmap_cluster_labs[,"yan"], plot_height = 400 ) )
/scMap.r
no_license
eoindosullivan/GitHub-scripts
R
false
false
3,577
r
############ scMap ################################# library(scmap) ###Tman is the online dataset I would want to compare my data (SCE) with the Tman dataset. This should already be QC'd and loaded into the environment as normal. ###SCE (your data) can already be in the enviro, but what I tend to do is save the SCE object in the normal R enviro then load it into a separate environment where the Tman data is being QCd ###This way, you're not overloading your main work environment with datasets. The online dataset you're comparing with could be huge for example ####selecting features rowData(Tman.sce.keep.filt)$feature_symbol <- rownames(Tman.sce.keep.filt) Tman.sce.keep.filt <- selectFeatures(Tman.sce.keep.filt, suppress_plot = FALSE) ###indexing based on cyc score Tman.sce.keep.filt <- indexCluster(Tman.sce.keep.filt, cluster_col = "Cyc_Score") ###decided cyclone score for now, but could do something else. ###doesnt look great... heatmap(as.matrix(metadata(Tman.sce.keep.filt)$scmap_cluster_index)) Tman.sce.keep.filt2 <- indexCluster(Tman.sce.keep.filt, cluster_col = "Condition") ####Clustering based on condition ####projection onto G1 and G2M cells SCE.keep.filt <- toSingleCellExperiment(SCE.keep.filt) rowData(SCE.keep.filt)$feature_symbol <- rownames(SCE.keep.filt) scmapCluster_results <- scmapCluster( projection = SCE.keep.filt, index_list = list( yan = metadata(Tman.sce.keep.filt)$scmap_cluster_index, Con = metadata(Tman.sce.keep.filt2)$scmap_cluster_index ) ) ###plotting to see how my data (organised by cell cycle phase) maps to the Cyclone score catagories already summed up before plot( getSankey( colData(SCE.keep.filt)$Cell_Phase, scmapCluster_results$scmap_cluster_labs[,'yan'], plot_height = 400 ) ) ###doing again only this time mappinng my data organised by cell phase to condition of Tman data. plot( getSankey( colData(SCE.keep.filt)$Cell_Phase, scmapCluster_results$scmap_cluster_labs[,'Con'], plot_height = 400 ) ) ###repeating but changing my data to Monocle_State to see how monocle state maps to Tman plot( getSankey( colData(SCE.keep.filt)$Monocle_State, scmapCluster_results$scmap_cluster_labs[,'yan'], plot_height = 400 ) ) plot( getSankey( colData(SCE.keep.filt)$Monocle_State, scmapCluster_results$scmap_cluster_labs[,'Con'], plot_height = 400 ) ) #####Cell to cell mapping ################################## ###This one I don't use at all, but you might find it useful. Instead of seeing what catagories your cells map to in the other dataset, you can map your cells directly to the cells in the other dataset ###The problem is it creates too many pathways, like it's too hard to discern anything useful. set.seed(1) Tman.sce.keep.filt <- indexCell(Tman.sce.keep.filt) ####index here consists of two items: names(metadata(Tman.sce.keep.filt)$scmap_cell_index) ## [1] "subcentroids" "subclusters" length(metadata(Tman.sce.keep.filt)$scmap_cell_index$subcentroids) dim(metadata(Tman.sce.keep.filt)$scmap_cell_index$subcentroids[[1]]) metadata(Tman.sce.keep.filt)$scmap_cell_index$subcentroids[[1]][,1:5] scmapCellTman_results <- scmapCell( SCE.keep.filt, list( yan = metadata(Tman.sce.keep.filt)$scmap_cell_index ) ) scmapCellsTman_clusters <- scmapCell2Cluster( scmapCellTman_results, list( as.character(colData(Tman.sce.keep.filt)$cellType) ) ) plot( getSankey( colData(SCE.keep.filt)$Tissue, scmapCellsTman_clusters$scmap_cluster_labs[,"yan"], plot_height = 400 ) )
setwd("C:/Users/sbhowmi/Desktop/Self Learning/Exploratory Data Analyis/Course_Directory/Week3") png(file = "faceData_face.png") load("data/face.rda") image(t(faceData)[, nrow(faceData):1]) png(file = "faceData_Variance_explained.png") svd1 <- svd(scale(faceData)) plot(svd1$d^2/sum(svd1$d^2), pch = 19, xlab = "Singular vector", ylab = "Variance explained") ## create approximations svd1 <- svd(scale(faceData)) ## printing dimensions of U V D print("Dim of U :- ") print(dim(svd1$u)) print("Dim of V :- ") print(dim(svd1$v)) print("Dim of D :- ") print(dim(diag(svd1$d))) ## Note that %*% is matrix multiplication # Here svd1$d[1] is a constant approx1 <- svd1$u[, 1] %*% t(svd1$v[, 1]) * svd1$d[1] # In these examples we need to make the diagonal matrix out of d approx5 <- svd1$u[, 1:5] %*% diag(svd1$d[1:5]) %*% t(svd1$v[, 1:5]) approx10 <- svd1$u[, 1:10] %*% diag(svd1$d[1:10]) %*% t(svd1$v[, 1:10]) # plot approximations png(file = "faceData_approximations_plotted.png") par(mfrow = c(1, 4)) image(t(approx1)[, nrow(approx1):1], main = "(a)") image(t(approx5)[, nrow(approx5):1], main = "(b)") image(t(approx10)[, nrow(approx10):1], main = "(c)") image(t(faceData)[, nrow(faceData):1], main = "(d)") ## Original data graphics.off()
/Week 3/faceData.R
no_license
saurish/EDA-Course-Code
R
false
false
1,247
r
setwd("C:/Users/sbhowmi/Desktop/Self Learning/Exploratory Data Analyis/Course_Directory/Week3") png(file = "faceData_face.png") load("data/face.rda") image(t(faceData)[, nrow(faceData):1]) png(file = "faceData_Variance_explained.png") svd1 <- svd(scale(faceData)) plot(svd1$d^2/sum(svd1$d^2), pch = 19, xlab = "Singular vector", ylab = "Variance explained") ## create approximations svd1 <- svd(scale(faceData)) ## printing dimensions of U V D print("Dim of U :- ") print(dim(svd1$u)) print("Dim of V :- ") print(dim(svd1$v)) print("Dim of D :- ") print(dim(diag(svd1$d))) ## Note that %*% is matrix multiplication # Here svd1$d[1] is a constant approx1 <- svd1$u[, 1] %*% t(svd1$v[, 1]) * svd1$d[1] # In these examples we need to make the diagonal matrix out of d approx5 <- svd1$u[, 1:5] %*% diag(svd1$d[1:5]) %*% t(svd1$v[, 1:5]) approx10 <- svd1$u[, 1:10] %*% diag(svd1$d[1:10]) %*% t(svd1$v[, 1:10]) # plot approximations png(file = "faceData_approximations_plotted.png") par(mfrow = c(1, 4)) image(t(approx1)[, nrow(approx1):1], main = "(a)") image(t(approx5)[, nrow(approx5):1], main = "(b)") image(t(approx10)[, nrow(approx10):1], main = "(c)") image(t(faceData)[, nrow(faceData):1], main = "(d)") ## Original data graphics.off()
install.packages("Tmisc") library(Tmisc) library(tidyverse) data(quartet) View(quartet) quartet %>% group_by(set) %>% summarise(mean(x),sd(x),mean(y),sd(y),cor(x,y)) ggplot(quartet,aes(x,y))+geom_point()+geom_smooth(method = lm, se = FALSE)+facet_wrap(~set) install.packages("datasauRus") library("datasauRus") ggplot(datasaurus_dozen,aes(x=x, y=y, colour=dataset))+geom_point()+theme_void()+theme(legend.position = "none") +facet_wrap(~dataset,ncol = 3)
/R/week3/samedatadifferentoutcome.R
no_license
ashokjha/dataanalytics
R
false
false
481
r
install.packages("Tmisc") library(Tmisc) library(tidyverse) data(quartet) View(quartet) quartet %>% group_by(set) %>% summarise(mean(x),sd(x),mean(y),sd(y),cor(x,y)) ggplot(quartet,aes(x,y))+geom_point()+geom_smooth(method = lm, se = FALSE)+facet_wrap(~set) install.packages("datasauRus") library("datasauRus") ggplot(datasaurus_dozen,aes(x=x, y=y, colour=dataset))+geom_point()+theme_void()+theme(legend.position = "none") +facet_wrap(~dataset,ncol = 3)
#rm(list=ls()) # import package library(DT) library(leaflet) library(shiny) library(dplyr) library(readr) library(tidytext) library(ggplot2) library(tidyr) library(forcats) #set working directory #setwd("") # import data #business <- read_csv("business_city.csv") # set restaurant name business_tag <- c("McDonald's","Burger King","Five Guys","Wendy's","Shake Shack") # choose business name #business <- business[business$name%in%business_tag,] # choose variable #business <- business %>% # select(business_id, name, address, city, state, postal_code, # stars, review_count, is_open, categories,longitude,latitude) # The business result data #review_business <- business #######Find origin text####################### #mystopwords <- tibble(word = c("bk", "mcdonald", "thru", "morning", "breakfast", "burgerking", "soul", "xt", "snow", "passenger", "q", "concrete", "satisfrie", "cle", "nice", "burger", "love", "perfect", "lot", "super", "fine", "pleasant", "food", "worst", "thi", "king", "bad", "eat", "told", "reivew", "regular", "add", "talk", "heard", "suck", "lunch", "shame", "guy", "wendy")) #import data #fast_food_reviews <- read_csv("review_city.csv") #choose business id #fast_food_reviews <- fast_food_reviews[fast_food_reviews$business_id%in%business$business_id,] #join business name #fast_food_reviews <- inner_join(fast_food_reviews,business) #calculate review count #fast_food_reviews_count <- fast_food_reviews%>%group_by(business_id)%>%dplyr::summarise(review_count=sum(review_count)) #join the review count #review_business <- left_join(review_business,fast_food_reviews_count) ################################################## ##########Load Data############# ################################################## business_tag <- c("McDonald's","Burger King","Five Guys","Wendy's","Shake Shack") review_business <- read_csv("review_business.csv") fast_food_reviews <- read_csv("fast_food_reviews.csv") ####load coefficients and tf_idf data######## coefficients <- read_csv("coefficients.csv") review_tf_idf <- read_csv("review_tf_idf.csv") server = function(input, output, session) { #choose one of the five restaurant Pass <- reactive({ review_business[review_business$name==input$restaurant,] }) #choose the state output$a <- renderUI({ selectInput("state","Please Select State:",c("ALL",Pass()$state)) }) #choose the city output$b <- renderUI({ selectInput("city","Please Select City:",c("ALL",Pass()[Pass()$state==input$state,]$city)) }) #output the table output$DT <- renderDT({ if(input$state=="ALL"&input$city=="ALL"){ Pass()%>%select(name,address,stars) } else if (input$state!="ALL"&input$city=="ALL"){ Pass()%>%filter(state==input$state)%>%select(name,address,stars) } else { Pass()%>%filter(city==input$city&state==input$state)%>%select(name,address,stars) } }) #output the stars distribution output$plot1 <- renderPlot({ if(input$state=="ALL"){ pass <- Pass()%>%select(business_id,name,stars) } else if (input$state!="ALL"&input$city=="ALL"){ pass <- Pass()%>%filter(state==input$state)%>%select(business_id,name,stars) } else { pass <- Pass()%>%filter(city==input$city&state==input$state)%>%select(business_id,name,stars) } ggplot(data=pass,aes(x=stars,fill="red"))+geom_histogram() }) #output the single word plot #output Top 20 positive and negative words output$plot2 <- renderPlot({ # Obtain the coefficients corresponding to specific restaurant restaurant_coefficients <- inner_join(coefficients,review_tf_idf %>% filter(name==input$restaurant)) %>% mutate(score=coef*tf) # Top 30 positive and negative words plot(restaurant_coefficients %>% top_n(30, abs(score)) %>% mutate(word = reorder(word, score)) %>% head(input$n) %>% ggplot(aes(word, score, fill = score > 0)) + geom_col(show.legend = FALSE) + labs(y = "score", x = "word", title = input$restaurant) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + coord_flip()) }) #output the bigram plot output$plot3 <- renderPlot({ mystopwords <- tibble(word = c("bk", "mcdonald", "thru", "morning", "breakfast", "burgerking", "soul", "xt", "snow", "passenger", "q", "concrete", "satisfrie", "cle", "nice", "burger", "love", "perfect", "lot", "super", "fine", "pleasant", "food", "worst", "thi", "king", "bad", "eat", "told", "reivew", "regular", "add", "talk", "heard", "suck", "lunch", "shame", "guy", "wendy")) (reviews_bigram_tf_idf <- fast_food_reviews %>% unnest_tokens(bigram, text, token = "ngrams", n = 2) %>% separate(bigram, c("word1", "word2"), sep = " ") %>% filter(!word1 %in% stop_words$word, !word2 %in% stop_words$word, !word1 %in% mystopwords$word, !word2 %in% mystopwords$word) %>% unite(bigram, word1, word2, sep = " ") %>% dplyr::count(name, bigram) %>% bind_tf_idf(bigram, name, n) %>% arrange(desc(tf_idf)) ) d <- reviews_bigram_tf_idf %>% group_by(name) %>% slice_max(tf_idf, n=input$n) %>% ungroup() #draw the plot ggplot(data=d%>% filter(name==input$restaurant),aes(tf_idf, fct_reorder(bigram, tf_idf), fill = name)) + geom_col(show.legend = FALSE) + facet_wrap(~name, ncol = 2, scales = "free") + labs(x = "tf-idf", y = NULL) }) #suggestion output$d <- renderUI({ switch(input$restaurant, "McDonald's"= h5("Suggestion of McDonald's: Please improve service speed and enhence work efficiency. Top Tags: Quarter Pounder,Happy Meal,Egg MCMuffin"), "Burger King"= h5("Suggestion of Burger King: Please keep food warm without overcook and avoid sending wrong meal and wrong tempertaure. Top Tags: Double Whopper, Pretzel Bun, Onion Rings "), "Five Guys"= h5("Suggestion of Five Guys: Please improve quality of bacon and notice the refund service. Top Tags: Guys Burgers, Cajun Fries"), "Wendy's"= h5("Suggestion of Wendy's: Please improve service speed and attitude. PLease provide fresh chicken. Top Tags:Dave Thomas, Spicy Chicken"), "Shake Shack"= h5("Suggestion of Shake Shack: Please to increase the quantity demand for fries and speed up; Top Tags: Shake Shack, Strawberry Shake, Chessy Fries ")) }) #draw the map output$fast_food_map <- renderLeaflet({ color <- c("red","blue","green","purple","cyan") if(input$state=="ALL"){ pass <- Pass() } else { pass <- Pass()%>%filter(state==input$state) } Pass<- pass%>%dplyr::mutate(stars=round(stars))%>%na.omit()%>%data.frame() pal <- colorFactor(as.character(color), domain = Pass$stars) leaflet(Pass) %>% addTiles() %>%addLegend(position = "bottomleft",pal=pal,values = Pass$stars)%>% addCircles(as.numeric(Pass$longitude), as.numeric(Pass$latitude),color = pal(Pass$stars), radius=Pass$review_count*50,popup = paste("Address:",Pass$address,". ","Review Count:",Pass$review_count),fillOpacity = 1,stroke = FALSE) }) } shinyApp(ui= ui, server=server)
/Code/server.R
no_license
SixuLi/STAT-628-Module-3
R
false
false
7,938
r
#rm(list=ls()) # import package library(DT) library(leaflet) library(shiny) library(dplyr) library(readr) library(tidytext) library(ggplot2) library(tidyr) library(forcats) #set working directory #setwd("") # import data #business <- read_csv("business_city.csv") # set restaurant name business_tag <- c("McDonald's","Burger King","Five Guys","Wendy's","Shake Shack") # choose business name #business <- business[business$name%in%business_tag,] # choose variable #business <- business %>% # select(business_id, name, address, city, state, postal_code, # stars, review_count, is_open, categories,longitude,latitude) # The business result data #review_business <- business #######Find origin text####################### #mystopwords <- tibble(word = c("bk", "mcdonald", "thru", "morning", "breakfast", "burgerking", "soul", "xt", "snow", "passenger", "q", "concrete", "satisfrie", "cle", "nice", "burger", "love", "perfect", "lot", "super", "fine", "pleasant", "food", "worst", "thi", "king", "bad", "eat", "told", "reivew", "regular", "add", "talk", "heard", "suck", "lunch", "shame", "guy", "wendy")) #import data #fast_food_reviews <- read_csv("review_city.csv") #choose business id #fast_food_reviews <- fast_food_reviews[fast_food_reviews$business_id%in%business$business_id,] #join business name #fast_food_reviews <- inner_join(fast_food_reviews,business) #calculate review count #fast_food_reviews_count <- fast_food_reviews%>%group_by(business_id)%>%dplyr::summarise(review_count=sum(review_count)) #join the review count #review_business <- left_join(review_business,fast_food_reviews_count) ################################################## ##########Load Data############# ################################################## business_tag <- c("McDonald's","Burger King","Five Guys","Wendy's","Shake Shack") review_business <- read_csv("review_business.csv") fast_food_reviews <- read_csv("fast_food_reviews.csv") ####load coefficients and tf_idf data######## coefficients <- read_csv("coefficients.csv") review_tf_idf <- read_csv("review_tf_idf.csv") server = function(input, output, session) { #choose one of the five restaurant Pass <- reactive({ review_business[review_business$name==input$restaurant,] }) #choose the state output$a <- renderUI({ selectInput("state","Please Select State:",c("ALL",Pass()$state)) }) #choose the city output$b <- renderUI({ selectInput("city","Please Select City:",c("ALL",Pass()[Pass()$state==input$state,]$city)) }) #output the table output$DT <- renderDT({ if(input$state=="ALL"&input$city=="ALL"){ Pass()%>%select(name,address,stars) } else if (input$state!="ALL"&input$city=="ALL"){ Pass()%>%filter(state==input$state)%>%select(name,address,stars) } else { Pass()%>%filter(city==input$city&state==input$state)%>%select(name,address,stars) } }) #output the stars distribution output$plot1 <- renderPlot({ if(input$state=="ALL"){ pass <- Pass()%>%select(business_id,name,stars) } else if (input$state!="ALL"&input$city=="ALL"){ pass <- Pass()%>%filter(state==input$state)%>%select(business_id,name,stars) } else { pass <- Pass()%>%filter(city==input$city&state==input$state)%>%select(business_id,name,stars) } ggplot(data=pass,aes(x=stars,fill="red"))+geom_histogram() }) #output the single word plot #output Top 20 positive and negative words output$plot2 <- renderPlot({ # Obtain the coefficients corresponding to specific restaurant restaurant_coefficients <- inner_join(coefficients,review_tf_idf %>% filter(name==input$restaurant)) %>% mutate(score=coef*tf) # Top 30 positive and negative words plot(restaurant_coefficients %>% top_n(30, abs(score)) %>% mutate(word = reorder(word, score)) %>% head(input$n) %>% ggplot(aes(word, score, fill = score > 0)) + geom_col(show.legend = FALSE) + labs(y = "score", x = "word", title = input$restaurant) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + coord_flip()) }) #output the bigram plot output$plot3 <- renderPlot({ mystopwords <- tibble(word = c("bk", "mcdonald", "thru", "morning", "breakfast", "burgerking", "soul", "xt", "snow", "passenger", "q", "concrete", "satisfrie", "cle", "nice", "burger", "love", "perfect", "lot", "super", "fine", "pleasant", "food", "worst", "thi", "king", "bad", "eat", "told", "reivew", "regular", "add", "talk", "heard", "suck", "lunch", "shame", "guy", "wendy")) (reviews_bigram_tf_idf <- fast_food_reviews %>% unnest_tokens(bigram, text, token = "ngrams", n = 2) %>% separate(bigram, c("word1", "word2"), sep = " ") %>% filter(!word1 %in% stop_words$word, !word2 %in% stop_words$word, !word1 %in% mystopwords$word, !word2 %in% mystopwords$word) %>% unite(bigram, word1, word2, sep = " ") %>% dplyr::count(name, bigram) %>% bind_tf_idf(bigram, name, n) %>% arrange(desc(tf_idf)) ) d <- reviews_bigram_tf_idf %>% group_by(name) %>% slice_max(tf_idf, n=input$n) %>% ungroup() #draw the plot ggplot(data=d%>% filter(name==input$restaurant),aes(tf_idf, fct_reorder(bigram, tf_idf), fill = name)) + geom_col(show.legend = FALSE) + facet_wrap(~name, ncol = 2, scales = "free") + labs(x = "tf-idf", y = NULL) }) #suggestion output$d <- renderUI({ switch(input$restaurant, "McDonald's"= h5("Suggestion of McDonald's: Please improve service speed and enhence work efficiency. Top Tags: Quarter Pounder,Happy Meal,Egg MCMuffin"), "Burger King"= h5("Suggestion of Burger King: Please keep food warm without overcook and avoid sending wrong meal and wrong tempertaure. Top Tags: Double Whopper, Pretzel Bun, Onion Rings "), "Five Guys"= h5("Suggestion of Five Guys: Please improve quality of bacon and notice the refund service. Top Tags: Guys Burgers, Cajun Fries"), "Wendy's"= h5("Suggestion of Wendy's: Please improve service speed and attitude. PLease provide fresh chicken. Top Tags:Dave Thomas, Spicy Chicken"), "Shake Shack"= h5("Suggestion of Shake Shack: Please to increase the quantity demand for fries and speed up; Top Tags: Shake Shack, Strawberry Shake, Chessy Fries ")) }) #draw the map output$fast_food_map <- renderLeaflet({ color <- c("red","blue","green","purple","cyan") if(input$state=="ALL"){ pass <- Pass() } else { pass <- Pass()%>%filter(state==input$state) } Pass<- pass%>%dplyr::mutate(stars=round(stars))%>%na.omit()%>%data.frame() pal <- colorFactor(as.character(color), domain = Pass$stars) leaflet(Pass) %>% addTiles() %>%addLegend(position = "bottomleft",pal=pal,values = Pass$stars)%>% addCircles(as.numeric(Pass$longitude), as.numeric(Pass$latitude),color = pal(Pass$stars), radius=Pass$review_count*50,popup = paste("Address:",Pass$address,". ","Review Count:",Pass$review_count),fillOpacity = 1,stroke = FALSE) }) } shinyApp(ui= ui, server=server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dynnet.R \name{get_graph} \alias{get_graph} \title{Select Graph from a \code{dynnet} Object} \usage{ get_graph(object, period = NULL) } \arguments{ \item{object}{\code{\link{dynnet}} object} \item{period}{Integer, numeric, or \code{NULL} indicating which graph to or set of graphs to return. See Details.} } \value{ An \code{\link{igraph}} graph or the list of graphs. } \description{ Returns an \code{\link{igraph}} graph or list of graphs composing the dynamic network. } \details{ If \code{period} is \code{NULL}, a list of all graphs in the specified object is returned. If an integer is provided, then the graph associated with that period is returned. Supplying a vector of integers will return a list of the specified graphs. } \author{ Jason W. Morgan \email{jason.w.morgan@gmail.com} }
/man/get_graph.Rd
no_license
petershan1119/dynnet
R
false
true
875
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dynnet.R \name{get_graph} \alias{get_graph} \title{Select Graph from a \code{dynnet} Object} \usage{ get_graph(object, period = NULL) } \arguments{ \item{object}{\code{\link{dynnet}} object} \item{period}{Integer, numeric, or \code{NULL} indicating which graph to or set of graphs to return. See Details.} } \value{ An \code{\link{igraph}} graph or the list of graphs. } \description{ Returns an \code{\link{igraph}} graph or list of graphs composing the dynamic network. } \details{ If \code{period} is \code{NULL}, a list of all graphs in the specified object is returned. If an integer is provided, then the graph associated with that period is returned. Supplying a vector of integers will return a list of the specified graphs. } \author{ Jason W. Morgan \email{jason.w.morgan@gmail.com} }
#Statistical Inference Course - Simulation Exercise #by Preetika Srivastava #Multiple Simulations will be ran to compare the theoretical mean and standard deviation of exponential function, to that the one obtained #by simulations. This exercise leverages the idea of 'Central Limit Theorem' set.seed(3007) lambda = 0.2 n = 40 simNum = 1000 plot(rexp(10000,lambda ), pch = 20, cex=0.6, main = "Exponential Dist with rate as 0.2 and 10,000 obs") plot(rexp(10000,lambda ), pch = 20, cex=0.6, main = "Exponential Dist with rate as 0.2 and 10,000 obs", col= "red") #See fig1 for the output of above code. #generating the collection of means for 1000 simulations of exp. distribution myMean = NULL for(k in 1:simNum) myMean = c(myMean, mean(rexp(n, lambda))) hist(myMean, col="green", main= "rexp Mean Dist", breaks = 50) rug(myMean) #See fig 2 for the output of above code. #Calculating the mean by simulated values and visualizing it round(mean(myMean), 3) hist(myMean, col="green", main="Comparing theoretical vs actual Mean", breaks = 50) abline(v = mean(myMean), lwd ="5", col="red") #See fig 3 for the output of the above code. #Check for Standard Deviation #theoretical round( (1/lambda)/sqrt(n) ,4) #Actual round(sd(myMean) ,4) #If the distribution is noraml ? hist(myMean, prob=TRUE, col="darkblue", main="mean distribution for rexp()", breaks=50) lines(density(myMean), lwd=5, col="red") #See fig 5 for the output of the above code.
/Simulation_Exercise.R
no_license
ankurkhaitan/Statistical-Inference-Coursera
R
false
false
1,453
r
#Statistical Inference Course - Simulation Exercise #by Preetika Srivastava #Multiple Simulations will be ran to compare the theoretical mean and standard deviation of exponential function, to that the one obtained #by simulations. This exercise leverages the idea of 'Central Limit Theorem' set.seed(3007) lambda = 0.2 n = 40 simNum = 1000 plot(rexp(10000,lambda ), pch = 20, cex=0.6, main = "Exponential Dist with rate as 0.2 and 10,000 obs") plot(rexp(10000,lambda ), pch = 20, cex=0.6, main = "Exponential Dist with rate as 0.2 and 10,000 obs", col= "red") #See fig1 for the output of above code. #generating the collection of means for 1000 simulations of exp. distribution myMean = NULL for(k in 1:simNum) myMean = c(myMean, mean(rexp(n, lambda))) hist(myMean, col="green", main= "rexp Mean Dist", breaks = 50) rug(myMean) #See fig 2 for the output of above code. #Calculating the mean by simulated values and visualizing it round(mean(myMean), 3) hist(myMean, col="green", main="Comparing theoretical vs actual Mean", breaks = 50) abline(v = mean(myMean), lwd ="5", col="red") #See fig 3 for the output of the above code. #Check for Standard Deviation #theoretical round( (1/lambda)/sqrt(n) ,4) #Actual round(sd(myMean) ,4) #If the distribution is noraml ? hist(myMean, prob=TRUE, col="darkblue", main="mean distribution for rexp()", breaks=50) lines(density(myMean), lwd=5, col="red") #See fig 5 for the output of the above code.
gsagenie.reset.run <- function(session, input, output, step1_status, step2_status, step3_status, result_status, event='') { if (event == 'go_back') { updateCheckboxInput(session, 'detail', value=FALSE); updateCheckboxInput(session, 'run_button', value=TRUE); updateCheckboxInput(session, 'step3_show', value=TRUE); updateCheckboxInput(session, 'result_show', value=FALSE); updateCheckboxInput(session, 'result_plot', value=FALSE); updateCheckboxInput(session, 'result_split', value=FALSE); updateCheckboxInput(session, 'result.split', value=FALSE); updateCheckboxInput(session, 'result_button', value=FALSE); step3_status$result <- NULL; step3_status$metadata <- NULL; result_status$geneset <- NULL; } else {} result_status }
/source/gsagenie.reset.run.r
no_license
zhezhangsh/gsagenie
R
false
false
778
r
gsagenie.reset.run <- function(session, input, output, step1_status, step2_status, step3_status, result_status, event='') { if (event == 'go_back') { updateCheckboxInput(session, 'detail', value=FALSE); updateCheckboxInput(session, 'run_button', value=TRUE); updateCheckboxInput(session, 'step3_show', value=TRUE); updateCheckboxInput(session, 'result_show', value=FALSE); updateCheckboxInput(session, 'result_plot', value=FALSE); updateCheckboxInput(session, 'result_split', value=FALSE); updateCheckboxInput(session, 'result.split', value=FALSE); updateCheckboxInput(session, 'result_button', value=FALSE); step3_status$result <- NULL; step3_status$metadata <- NULL; result_status$geneset <- NULL; } else {} result_status }
# Copyright 2022 Observational Health Data Sciences and Informatics # # This file is part of PheValuator # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #' @keywords internal "_PACKAGE" #' @importFrom stats quantile runif binom.test #' @importFrom utils capture.output write.csv install.packages menu unzip #' @importFrom methods is #' @importFrom stats aggregate #' @import DatabaseConnector #' @import FeatureExtraction #' @import PatientLevelPrediction NULL
/R/PheValuator.R
permissive
gowthamrao/PheValuator
R
false
false
961
r
# Copyright 2022 Observational Health Data Sciences and Informatics # # This file is part of PheValuator # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #' @keywords internal "_PACKAGE" #' @importFrom stats quantile runif binom.test #' @importFrom utils capture.output write.csv install.packages menu unzip #' @importFrom methods is #' @importFrom stats aggregate #' @import DatabaseConnector #' @import FeatureExtraction #' @import PatientLevelPrediction NULL
\name{loadAgilentDataFlux} \alias{loadAgilentDataFlux} \title{Load Agilent data text files} \description{Load Agilent data text files} \usage{ loadAgilentDataFlux(ifile, ofile = NULL, params = list()) } \arguments{ \item{ifile}{Tab separeted files} \item{ofile}{Optional .Rdata file to store the object} \item{params}{Parameters for loading/parsing and organising the data} } \value{Experiment data object} \author{ David Enot \email{david.enot@gustaveroussy.fr} } \seealso{ \code{\link{paramsParsing}} } \examples{ testfiles=list.files(system.file( package = "GRMeta"),pattern = "txt$",full.names = TRUE) mydata=loadAgilentDataFlux(ifile =testfiles[4]) print(mydata) } \keyword{misc}
/man/loadAgilentDataFlux.Rd
no_license
tonedivad/GRMeta
R
false
false
697
rd
\name{loadAgilentDataFlux} \alias{loadAgilentDataFlux} \title{Load Agilent data text files} \description{Load Agilent data text files} \usage{ loadAgilentDataFlux(ifile, ofile = NULL, params = list()) } \arguments{ \item{ifile}{Tab separeted files} \item{ofile}{Optional .Rdata file to store the object} \item{params}{Parameters for loading/parsing and organising the data} } \value{Experiment data object} \author{ David Enot \email{david.enot@gustaveroussy.fr} } \seealso{ \code{\link{paramsParsing}} } \examples{ testfiles=list.files(system.file( package = "GRMeta"),pattern = "txt$",full.names = TRUE) mydata=loadAgilentDataFlux(ifile =testfiles[4]) print(mydata) } \keyword{misc}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/aiplatform_objects.R \name{GoogleCloudAiplatformV1TrialParameter} \alias{GoogleCloudAiplatformV1TrialParameter} \title{GoogleCloudAiplatformV1TrialParameter Object} \usage{ GoogleCloudAiplatformV1TrialParameter() } \value{ GoogleCloudAiplatformV1TrialParameter object } \description{ GoogleCloudAiplatformV1TrialParameter Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} A message representing a parameter to be tuned. } \concept{GoogleCloudAiplatformV1TrialParameter functions}
/googleaiplatformv1.auto/man/GoogleCloudAiplatformV1TrialParameter.Rd
no_license
justinjm/autoGoogleAPI
R
false
true
595
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/aiplatform_objects.R \name{GoogleCloudAiplatformV1TrialParameter} \alias{GoogleCloudAiplatformV1TrialParameter} \title{GoogleCloudAiplatformV1TrialParameter Object} \usage{ GoogleCloudAiplatformV1TrialParameter() } \value{ GoogleCloudAiplatformV1TrialParameter object } \description{ GoogleCloudAiplatformV1TrialParameter Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} A message representing a parameter to be tuned. } \concept{GoogleCloudAiplatformV1TrialParameter functions}
rankall <- function(outcome, num = "best") { ## Read outcome data outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") ## Check that state and outcome are valid # if (!any(outcome == c("heart attack" , "heart failure" ,"pneumonia" ) )){ # stop("invalid outcome") # } ## For each state, find the hospital of the given rank ## Return a data frame with the hospital names and the ## (abbreviated) state name if (outcome == "heart attack"){ oc <- 11 } else if (outcome == 'heart failure'){ oc <- 17 } else if (outcome == "pneumonia"){ oc <- 23 } else { stop("invalid outcome") } outcome_data <- outcome_data[complete.cases(outcome_data), ] splitted_state <- split(outcome_data,outcome_data$State) results <- list() # if num greater than number of hospitals will return NA if (is.numeric(x=num) && num > nrow(outcome_data)){ return(NA) } else if (num == "best"){ num = 1 } for (state in unique(sort(outcome_data$State)) ){ if (num == "worst"){ num = nrow(splitted_state[[state]][oc]) } sorted_hospitals <- splitted_state[[state]][order(splitted_state[[state]][oc]),]$Hospital.Name #results <- c(sorted_hospitals[num],state) results <- rbind(results,list(sorted_hospitals[num],state)) #sorted <- splitted_state$"AL"[order(splitted_state$"AL"[11]),]$Hospital.Name } results <- as.data.frame(results) colnames(results) <- c('hospital', 'state') print (results) }
/rankall.R
no_license
basmaNasser/Programming-Assignment-3--Hospital-Quality
R
false
false
1,808
r
rankall <- function(outcome, num = "best") { ## Read outcome data outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") ## Check that state and outcome are valid # if (!any(outcome == c("heart attack" , "heart failure" ,"pneumonia" ) )){ # stop("invalid outcome") # } ## For each state, find the hospital of the given rank ## Return a data frame with the hospital names and the ## (abbreviated) state name if (outcome == "heart attack"){ oc <- 11 } else if (outcome == 'heart failure'){ oc <- 17 } else if (outcome == "pneumonia"){ oc <- 23 } else { stop("invalid outcome") } outcome_data <- outcome_data[complete.cases(outcome_data), ] splitted_state <- split(outcome_data,outcome_data$State) results <- list() # if num greater than number of hospitals will return NA if (is.numeric(x=num) && num > nrow(outcome_data)){ return(NA) } else if (num == "best"){ num = 1 } for (state in unique(sort(outcome_data$State)) ){ if (num == "worst"){ num = nrow(splitted_state[[state]][oc]) } sorted_hospitals <- splitted_state[[state]][order(splitted_state[[state]][oc]),]$Hospital.Name #results <- c(sorted_hospitals[num],state) results <- rbind(results,list(sorted_hospitals[num],state)) #sorted <- splitted_state$"AL"[order(splitted_state$"AL"[11]),]$Hospital.Name } results <- as.data.frame(results) colnames(results) <- c('hospital', 'state') print (results) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dasl.R \docType{data} \name{dasl.craters} \alias{dasl.craters} \title{Craters} \format{168 observations} \source{ DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/craters/?sf_paged=9}{Craters} } \description{ Meteor Crater in Arizona was the first recognized impact crater and was identified as such only in the 1920s. With the help of satellite images, more and more craters have been identified; now more than 180 are known. These, of course, are only a small sample of all the impacts the earth has experienced: Only 29% of earth’s surface is land, and many craters have been covered or eroded away. Astronomers have recog-nized a roughly 35 million-year cycle in the frequency of cratering, although the cause of this cycle is not fully understood. The data hold information about craters. craters from the most recent 35Ma (million years) may be the more reliable data, and are suitable for analyses relating age and diameter. } \details{ \url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl} } \references{ Earth Impact Database \url{http://www.unb.ca/passc/ImpactDatabase/} } \concept{Regression}
/man/dasl.craters.Rd
no_license
sigbertklinke/mmstat.data
R
false
true
1,238
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dasl.R \docType{data} \name{dasl.craters} \alias{dasl.craters} \title{Craters} \format{168 observations} \source{ DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/craters/?sf_paged=9}{Craters} } \description{ Meteor Crater in Arizona was the first recognized impact crater and was identified as such only in the 1920s. With the help of satellite images, more and more craters have been identified; now more than 180 are known. These, of course, are only a small sample of all the impacts the earth has experienced: Only 29% of earth’s surface is land, and many craters have been covered or eroded away. Astronomers have recog-nized a roughly 35 million-year cycle in the frequency of cratering, although the cause of this cycle is not fully understood. The data hold information about craters. craters from the most recent 35Ma (million years) may be the more reliable data, and are suitable for analyses relating age and diameter. } \details{ \url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl} } \references{ Earth Impact Database \url{http://www.unb.ca/passc/ImpactDatabase/} } \concept{Regression}
\name{as.data.frame.H2OParsedData} \alias{as.data.frame.H2OParsedData} \title{Converts a parsed H2O object to a data frame.} \description{Convert an \code{\linkS4class{H2OParsedData}} object to a data frame, which allows subsequent data frame operations within the R environment.} \usage{\method{as.data.frame}{H2OParsedData}(x, ...)} \arguments{ \item{x}{ An \code{\linkS4class{H2OParsedData}} object.} \item{...}{Additional arguments to be passed to or from methods.} } \value{ Returns a data frame in the R environment. Note that this call establishes the data set in the R environment, and subsequent operations on the data frame take place within R, not H2O. When data are large, users may experience } \examples{ library(h2o) localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, silentUpgrade = TRUE, promptUpgrade = FALSE) prosPath = system.file("extdata", "prostate.csv", package="h2oRClient") prostate.hex = h2o.importFile(localH2O, path = prosPath) prostate.data.frame <- as.data.frame(prostate.hex) summary(prostate.data.frame) head(prostate.data.frame) h2o.shutdown(localH2O) }
/R/h2oRClient-package/man/as.data.frame.Rd
permissive
svaithianatha/h2o
R
false
false
1,114
rd
\name{as.data.frame.H2OParsedData} \alias{as.data.frame.H2OParsedData} \title{Converts a parsed H2O object to a data frame.} \description{Convert an \code{\linkS4class{H2OParsedData}} object to a data frame, which allows subsequent data frame operations within the R environment.} \usage{\method{as.data.frame}{H2OParsedData}(x, ...)} \arguments{ \item{x}{ An \code{\linkS4class{H2OParsedData}} object.} \item{...}{Additional arguments to be passed to or from methods.} } \value{ Returns a data frame in the R environment. Note that this call establishes the data set in the R environment, and subsequent operations on the data frame take place within R, not H2O. When data are large, users may experience } \examples{ library(h2o) localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, silentUpgrade = TRUE, promptUpgrade = FALSE) prosPath = system.file("extdata", "prostate.csv", package="h2oRClient") prostate.hex = h2o.importFile(localH2O, path = prosPath) prostate.data.frame <- as.data.frame(prostate.hex) summary(prostate.data.frame) head(prostate.data.frame) h2o.shutdown(localH2O) }
#' @title Document Distances #' @description Calculate distances between pairs of documents. #' #' @param document_term_matrix A sparse matrix object of class #' "simple_triplet_matrix", or a dense matrix object, with documents as rows and #' vocabulary entries as columns. #' @param document_indicies A numeric vector of length two (document_a_row_index, #' document_b_row_index), or a list object with each entry containing a vector #' of length two as described above. #' @param distance_method Can be one of "cosine" or "euclidean". Defaults to #' "cosine". #' @return A vector of document pair distances #' @export calculate_document_pair_distances <- function(document_term_matrix, document_indicies, distance_method = "cosine"){ ptm <- proc.time() # get the number of comparisons num_comparisons <- 1 if (class(document_indicies) == "list") { num_comparisons <- length(document_indicies) } else if ((class(document_indicies) == "numeric") & (length(document_indicies) == 2)){ document_indicies <- list(first_pair = document_indicies) } else { stop("You have provided document_indicies in the wrong form...") } pair_distances <- rep(0, num_comparisons) for (i in 1:num_comparisons) { cat("Currently working on document pair",i,"of",num_comparisons,"\n") indicies <- document_indicies[[i]] cat("Extracting rows...\n") # extract rows, this will take a while if (class(document_term_matrix) == "simple_triplet_matrix") { document_1 <- as.numeric(as.matrix(document_term_matrix[indicies[1],])) } else { document_1 <- document_term_matrix[indicies[1],] } if (class(document_term_matrix) == "simple_triplet_matrix") { document_2 <- as.numeric(as.matrix(document_term_matrix[indicies[2],])) } else { document_2 <- document_term_matrix[indicies[2],] } data <- rbind(document_1,document_2) cat("Calculating pairwise distance...\n") # calculate the document similarity matrix simil <- proxy::simil(data, method = distance_method) simil <- as.matrix(simil) distances2 <- proxy::pr_simil2dist(simil) pair_distances[i] <- distances2[2,1] } t2 <- proc.time() - ptm cat("Complete in:",t2[[3]],"seconds...\n") return(pair_distances) }
/R/calculate_document_pair_distances.R
no_license
bethanyleap/SpeedReader
R
false
false
2,368
r
#' @title Document Distances #' @description Calculate distances between pairs of documents. #' #' @param document_term_matrix A sparse matrix object of class #' "simple_triplet_matrix", or a dense matrix object, with documents as rows and #' vocabulary entries as columns. #' @param document_indicies A numeric vector of length two (document_a_row_index, #' document_b_row_index), or a list object with each entry containing a vector #' of length two as described above. #' @param distance_method Can be one of "cosine" or "euclidean". Defaults to #' "cosine". #' @return A vector of document pair distances #' @export calculate_document_pair_distances <- function(document_term_matrix, document_indicies, distance_method = "cosine"){ ptm <- proc.time() # get the number of comparisons num_comparisons <- 1 if (class(document_indicies) == "list") { num_comparisons <- length(document_indicies) } else if ((class(document_indicies) == "numeric") & (length(document_indicies) == 2)){ document_indicies <- list(first_pair = document_indicies) } else { stop("You have provided document_indicies in the wrong form...") } pair_distances <- rep(0, num_comparisons) for (i in 1:num_comparisons) { cat("Currently working on document pair",i,"of",num_comparisons,"\n") indicies <- document_indicies[[i]] cat("Extracting rows...\n") # extract rows, this will take a while if (class(document_term_matrix) == "simple_triplet_matrix") { document_1 <- as.numeric(as.matrix(document_term_matrix[indicies[1],])) } else { document_1 <- document_term_matrix[indicies[1],] } if (class(document_term_matrix) == "simple_triplet_matrix") { document_2 <- as.numeric(as.matrix(document_term_matrix[indicies[2],])) } else { document_2 <- document_term_matrix[indicies[2],] } data <- rbind(document_1,document_2) cat("Calculating pairwise distance...\n") # calculate the document similarity matrix simil <- proxy::simil(data, method = distance_method) simil <- as.matrix(simil) distances2 <- proxy::pr_simil2dist(simil) pair_distances[i] <- distances2[2,1] } t2 <- proc.time() - ptm cat("Complete in:",t2[[3]],"seconds...\n") return(pair_distances) }
library(xgboost) library(boot) ExpYParam_xg = function(allpossible,DATA){ inVar = data.matrix(DATA[,c(1:(ncol(DATA)-1))]) predInVar = data.matrix(allpossible[,c(1:(ncol(allpossible)))]) outVar = DATA$Y model_xgboost = xgboost(verbose = 0, data = data.matrix(inVar), label = outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") predval = predict(model_xgboost,newdata=predInVar,type='response') Ytable = allpossible Ytable$Y = predval return(Ytable) } highdim_reg_xgboost = function(OBS, outVarVector){ ### P(W), P(W|X) for (d in 1:D){ outVar = data.matrix(outVarVector[,d]) if (d > 1){ # P(r) inVar = data.matrix(outVarVector[,c(1:(d-1))]) MyModel = xgboost(verbose = 0, data = data.matrix(inVar), label = outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") list.Pr = c(list.Pr, list(MyModel)) }else{ inVar = data.matrix(rep(1,nrow(OBS))) MyModel = xgboost(verbose = 0, data = data.matrix(inVar), label = outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") list.Pr = list(MyModel) } } return(list.Pr) } PlugInEstimator = function(OBS,D){ W = OBS[,1:D] X = OBS[,(D+1)] Z = OBS[,(D+2)] Y = OBS[,(D+3)] DATA = data.frame(W,X,Z,Y) ################################################################################ # Enumerate all possible values of column ################################################################################ C = 1 tmp = c() for (d in 1:D){ tmp = append(tmp,list(c(0:C))) # W } Wname = paste("W",1:D,sep="") tmp = append(tmp,list(c(0,1))) # X tmp = append(tmp, list(c(0:1))) # Z allpossible = expand.grid(tmp) colnames(allpossible) = c(Wname,'X','Z') ################################################################################ ################################################################################ # Conditional Probability Table ################################################################################ ################################################################################ # E[Y|X,Z,W] Ytable = ExpYParam_xg(allpossible,DATA) ################################################################################ # Learn P(w) ################################################################################ model.W = highdim_reg_xgboost(OBS,W) tmp = rep(1,nrow(allpossible)) for (d in 1:D){ if (d == 1){ predInVar = data.matrix(rep(1,nrow(allpossible))) }else{ predInVar = data.matrix(allpossible[,c(1:(d-1))]) } predval = predict(model.W[[d]],newdata=predInVar,type='response') # P(Zd =1 | Z(d-1),Z(d-2),...,Z(1),X) # predval[predval < 0] = 1e-8 # predval[predval > 1] = 1 - 1e-8 resultval = predval * allpossible[d] + (1-predval) * (1-allpossible[d]) tmp = tmp * resultval } PwTable = allpossible PwTable[,(ncol(allpossible)+1)] = tmp colnames(PwTable)[ncol(PwTable)] = 'prob' ################################################################################ ################################################################################ # Learn P(x|w) ################################################################################ PxTable = allpossible inVar = data.matrix(W) outVar = X predInVar = data.matrix(allpossible[,c(1:D)]) model.X = xgboost(verbose=0, data=inVar,label=outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") predX = predict(model.X,newdata = predInVar,type="response") tmp = predX*allpossible$X + (1-predX)*(1-allpossible$X) PxTable[,(ncol(allpossible)+1)] = tmp colnames(PxTable)[ncol(PxTable)] = 'prob' ################################################################################ ################################################################################ # Learn P(z|x,w) ################################################################################ PzTable = allpossible inVar = data.matrix(data.frame(W,X)) outVar = Z predInVar = data.matrix(allpossible[,c(1:(D+1))]) model.Z = xgboost(verbose=0, data=inVar,label=outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") predZ = predict(model.Z,newdata = predInVar,type="response") tmp = predZ*allpossible$Z + (1-predZ)*(1-allpossible$Z) PzTable[,(ncol(allpossible)+1)] = tmp colnames(PzTable)[ncol(PzTable)] = 'prob' ################################################################################ ################################################################################ # Compute! ################################################################################ # Store the original table allpossibleOrig = allpossible ComputeVal = allpossible # Compute E[Y|W,X,Z] * P(X|W) ComputeVal$YX = Ytable$Y * PxTable$prob # Compute P(Z|X,W) * P(W) ComputeVal$ZW = PzTable$prob * PwTable$prob # Marginalize for X Margin.over.x.YX = ComputeVal[ComputeVal$X==0,'YX'] + ComputeVal[ComputeVal$X==1,'YX'] # Fix X=x for P(Z|x,W) * P(W) ZW.X0 = ComputeVal[ComputeVal$X==0,'ZW'] ZW.X1 = ComputeVal[ComputeVal$X==1,'ZW'] # Compute Causal effect Yx0 = sum(Margin.over.x.YX * ZW.X0) Yx1 = sum(Margin.over.x.YX * ZW.X1) myans = c(Yx0,Yx1) return(myans) }
/demonstration/Example_2-mediator/mediator-plugin.R
no_license
CMLennon/WERM
R
false
false
5,384
r
library(xgboost) library(boot) ExpYParam_xg = function(allpossible,DATA){ inVar = data.matrix(DATA[,c(1:(ncol(DATA)-1))]) predInVar = data.matrix(allpossible[,c(1:(ncol(allpossible)))]) outVar = DATA$Y model_xgboost = xgboost(verbose = 0, data = data.matrix(inVar), label = outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") predval = predict(model_xgboost,newdata=predInVar,type='response') Ytable = allpossible Ytable$Y = predval return(Ytable) } highdim_reg_xgboost = function(OBS, outVarVector){ ### P(W), P(W|X) for (d in 1:D){ outVar = data.matrix(outVarVector[,d]) if (d > 1){ # P(r) inVar = data.matrix(outVarVector[,c(1:(d-1))]) MyModel = xgboost(verbose = 0, data = data.matrix(inVar), label = outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") list.Pr = c(list.Pr, list(MyModel)) }else{ inVar = data.matrix(rep(1,nrow(OBS))) MyModel = xgboost(verbose = 0, data = data.matrix(inVar), label = outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") list.Pr = list(MyModel) } } return(list.Pr) } PlugInEstimator = function(OBS,D){ W = OBS[,1:D] X = OBS[,(D+1)] Z = OBS[,(D+2)] Y = OBS[,(D+3)] DATA = data.frame(W,X,Z,Y) ################################################################################ # Enumerate all possible values of column ################################################################################ C = 1 tmp = c() for (d in 1:D){ tmp = append(tmp,list(c(0:C))) # W } Wname = paste("W",1:D,sep="") tmp = append(tmp,list(c(0,1))) # X tmp = append(tmp, list(c(0:1))) # Z allpossible = expand.grid(tmp) colnames(allpossible) = c(Wname,'X','Z') ################################################################################ ################################################################################ # Conditional Probability Table ################################################################################ ################################################################################ # E[Y|X,Z,W] Ytable = ExpYParam_xg(allpossible,DATA) ################################################################################ # Learn P(w) ################################################################################ model.W = highdim_reg_xgboost(OBS,W) tmp = rep(1,nrow(allpossible)) for (d in 1:D){ if (d == 1){ predInVar = data.matrix(rep(1,nrow(allpossible))) }else{ predInVar = data.matrix(allpossible[,c(1:(d-1))]) } predval = predict(model.W[[d]],newdata=predInVar,type='response') # P(Zd =1 | Z(d-1),Z(d-2),...,Z(1),X) # predval[predval < 0] = 1e-8 # predval[predval > 1] = 1 - 1e-8 resultval = predval * allpossible[d] + (1-predval) * (1-allpossible[d]) tmp = tmp * resultval } PwTable = allpossible PwTable[,(ncol(allpossible)+1)] = tmp colnames(PwTable)[ncol(PwTable)] = 'prob' ################################################################################ ################################################################################ # Learn P(x|w) ################################################################################ PxTable = allpossible inVar = data.matrix(W) outVar = X predInVar = data.matrix(allpossible[,c(1:D)]) model.X = xgboost(verbose=0, data=inVar,label=outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") predX = predict(model.X,newdata = predInVar,type="response") tmp = predX*allpossible$X + (1-predX)*(1-allpossible$X) PxTable[,(ncol(allpossible)+1)] = tmp colnames(PxTable)[ncol(PxTable)] = 'prob' ################################################################################ ################################################################################ # Learn P(z|x,w) ################################################################################ PzTable = allpossible inVar = data.matrix(data.frame(W,X)) outVar = Z predInVar = data.matrix(allpossible[,c(1:(D+1))]) model.Z = xgboost(verbose=0, data=inVar,label=outVar, nrounds = 20,max.depth=10,lambda=0,alpha=0, objective = "binary:logistic") predZ = predict(model.Z,newdata = predInVar,type="response") tmp = predZ*allpossible$Z + (1-predZ)*(1-allpossible$Z) PzTable[,(ncol(allpossible)+1)] = tmp colnames(PzTable)[ncol(PzTable)] = 'prob' ################################################################################ ################################################################################ # Compute! ################################################################################ # Store the original table allpossibleOrig = allpossible ComputeVal = allpossible # Compute E[Y|W,X,Z] * P(X|W) ComputeVal$YX = Ytable$Y * PxTable$prob # Compute P(Z|X,W) * P(W) ComputeVal$ZW = PzTable$prob * PwTable$prob # Marginalize for X Margin.over.x.YX = ComputeVal[ComputeVal$X==0,'YX'] + ComputeVal[ComputeVal$X==1,'YX'] # Fix X=x for P(Z|x,W) * P(W) ZW.X0 = ComputeVal[ComputeVal$X==0,'ZW'] ZW.X1 = ComputeVal[ComputeVal$X==1,'ZW'] # Compute Causal effect Yx0 = sum(Margin.over.x.YX * ZW.X0) Yx1 = sum(Margin.over.x.YX * ZW.X1) myans = c(Yx0,Yx1) return(myans) }
\name{microplot-internal} \title{Internal microplot functions} \alias{optionsCmds} \alias{sys} \alias{plot_grid} \alias{get_legend} \alias{latex} \alias{dvi} \description{Internal microplot functions.} \details{\code{optionsCmds} and \code{sys} and \code{latex} and \code{dvi} are imported from \pkg{Hmisc}. See \code{\link[Hmisc]{sys}}. \code{plot_grid} and \code{get_legend} are imported from \pkg{cowplot}. See \code{\link[cowplot]{plot_grid}} and \code{\link[cowplot]{get_legend}}. } \author{ Richard M. Heiberger <rmh@temple.edu> } \keyword{internal}
/man/microplot-internal.Rd
no_license
cran/microplot
R
false
false
566
rd
\name{microplot-internal} \title{Internal microplot functions} \alias{optionsCmds} \alias{sys} \alias{plot_grid} \alias{get_legend} \alias{latex} \alias{dvi} \description{Internal microplot functions.} \details{\code{optionsCmds} and \code{sys} and \code{latex} and \code{dvi} are imported from \pkg{Hmisc}. See \code{\link[Hmisc]{sys}}. \code{plot_grid} and \code{get_legend} are imported from \pkg{cowplot}. See \code{\link[cowplot]{plot_grid}} and \code{\link[cowplot]{get_legend}}. } \author{ Richard M. Heiberger <rmh@temple.edu> } \keyword{internal}
#' Load and check a frequency file #' #' Loads a frequency database file and compares it against mixture data to check for common errors. #' #' @param filename Path of the frecuency database file #' @param mix Data frame with mixture data. See relMix vignette for description of the format #' @return A list containing #' \itemize{ #' \item {\code{df}} {Data frame with frequencies} #' \item {\code{warning}} {List of strings describing the errors that ocurred but could be fixed or that do not prevent #' the execution of the program.} #' \item {\code{error}} {List of strings describing the errors that ocurred that made it imposible to return a valid data frame. #' If this list is not empty, then the dataframe item will be NULL}} #' @details #' The mixture data is used to perform more advanced checks, such as to make sure all alleles present #' in the mixture file have an entry in the frequency database. #' If warnings are found, the function attempts to fix them and explains what it has done in the warning messages. #' If an error is found, checking stops and a NULL dataframe is returned. The error is described in the error messages. #' @seealso \code{\link{checkMixtureFile}} for information on how to load a mixture file. #' @examples #' \dontrun{ #' mixfile <- system.file("extdata","mixture.txt",package="relMix") #' mix <- checkMixtureFile(mixfile) #' # note: the mixture dataframe is passed as an argument #' # if the previous check failed, the program should not continue #' # with the frequencies file check #' freqfile <- system.file('extdata','frequencies22Markers.txt',package='relMix') #' freqs <- checkFrequenciesFile(freqfile, mix$df) #' } #' @author Elias Hernandis #' @importFrom utils combn #' @export checkFrequenciesFile <- function(filename, mix) { r <- commonChecks(filename, "frequencies file"); df <- r$df; warning <- r$warning; error <- r$error; if (is.null(df)) { return(list(df=NULL, error=error, warning=NULL)); } # Make sure it says "Allele" on cell (1,1) if (length(error) == 0 && names(df)[1] != "Allele") { if (titleize(names(df)[1]) == "Allele") { warning <- append(warning, "The first column must be named \"Allele\" (fixed)"); names(df)[1] <- "Allele"; } } # Check that all alleles are numeric if (length(error) == 0 && !all(sapply(df$Allele, is.numeric))) { error <- append(error, "There are values that are not numeric in the Allele column of the frequency file"); } # Check that all frequencies are either numeric or NA if (length(error) == 0) { for (i in 2:ncol(df)) { if (!all(sapply(df[,i], is.numeric))) { error <- append(error, paste("There are non-numeric frequencies in column", i, "of the frequency file.")); } } } #If more than one marker if(ncol(df)>2){ # Check for duplicate markers # TODO: this error will never trigger b/c R renames duplicate columns when # loading w/ headers if (length(error) == 0 && unique(names(df)[-1]) != names(df)[-1]) { error <- append(error, paste("There are duplicate markers in your frequency table.")); } comb <- combn(union(names(df)[-1], mix$Marker), 2); for (i in 1:ncol(comb)) { m1 <- comb[1,i]; m2 <- comb[2, i]; if (levenshteinDistance(m1, m2) == 1) { warning <- append(warning, paste("Found two markers with very close names: did you mean", m1, "or", m2, "?")); } } } # Check that all marker names present in the mixture file are present in the frequency file if (length(error) == 0 && !all(mix$Marker %in% names(df))) { missingFreqs <- paste(setdiff(mix$Marker, names(df)), collapse=", "); error <- append(error, paste("The frequency database does not contain all markers present in the mixture file. The following are missing: ", missingFreqs)); } if (length(error) > 0) { return(list(df=NULL, warning=NULL, error=error)); } return(list(df=df, warning=warning, error=NULL)); }
/R/checkFrequenciesFile.R
no_license
gdorum/relMix
R
false
false
4,121
r
#' Load and check a frequency file #' #' Loads a frequency database file and compares it against mixture data to check for common errors. #' #' @param filename Path of the frecuency database file #' @param mix Data frame with mixture data. See relMix vignette for description of the format #' @return A list containing #' \itemize{ #' \item {\code{df}} {Data frame with frequencies} #' \item {\code{warning}} {List of strings describing the errors that ocurred but could be fixed or that do not prevent #' the execution of the program.} #' \item {\code{error}} {List of strings describing the errors that ocurred that made it imposible to return a valid data frame. #' If this list is not empty, then the dataframe item will be NULL}} #' @details #' The mixture data is used to perform more advanced checks, such as to make sure all alleles present #' in the mixture file have an entry in the frequency database. #' If warnings are found, the function attempts to fix them and explains what it has done in the warning messages. #' If an error is found, checking stops and a NULL dataframe is returned. The error is described in the error messages. #' @seealso \code{\link{checkMixtureFile}} for information on how to load a mixture file. #' @examples #' \dontrun{ #' mixfile <- system.file("extdata","mixture.txt",package="relMix") #' mix <- checkMixtureFile(mixfile) #' # note: the mixture dataframe is passed as an argument #' # if the previous check failed, the program should not continue #' # with the frequencies file check #' freqfile <- system.file('extdata','frequencies22Markers.txt',package='relMix') #' freqs <- checkFrequenciesFile(freqfile, mix$df) #' } #' @author Elias Hernandis #' @importFrom utils combn #' @export checkFrequenciesFile <- function(filename, mix) { r <- commonChecks(filename, "frequencies file"); df <- r$df; warning <- r$warning; error <- r$error; if (is.null(df)) { return(list(df=NULL, error=error, warning=NULL)); } # Make sure it says "Allele" on cell (1,1) if (length(error) == 0 && names(df)[1] != "Allele") { if (titleize(names(df)[1]) == "Allele") { warning <- append(warning, "The first column must be named \"Allele\" (fixed)"); names(df)[1] <- "Allele"; } } # Check that all alleles are numeric if (length(error) == 0 && !all(sapply(df$Allele, is.numeric))) { error <- append(error, "There are values that are not numeric in the Allele column of the frequency file"); } # Check that all frequencies are either numeric or NA if (length(error) == 0) { for (i in 2:ncol(df)) { if (!all(sapply(df[,i], is.numeric))) { error <- append(error, paste("There are non-numeric frequencies in column", i, "of the frequency file.")); } } } #If more than one marker if(ncol(df)>2){ # Check for duplicate markers # TODO: this error will never trigger b/c R renames duplicate columns when # loading w/ headers if (length(error) == 0 && unique(names(df)[-1]) != names(df)[-1]) { error <- append(error, paste("There are duplicate markers in your frequency table.")); } comb <- combn(union(names(df)[-1], mix$Marker), 2); for (i in 1:ncol(comb)) { m1 <- comb[1,i]; m2 <- comb[2, i]; if (levenshteinDistance(m1, m2) == 1) { warning <- append(warning, paste("Found two markers with very close names: did you mean", m1, "or", m2, "?")); } } } # Check that all marker names present in the mixture file are present in the frequency file if (length(error) == 0 && !all(mix$Marker %in% names(df))) { missingFreqs <- paste(setdiff(mix$Marker, names(df)), collapse=", "); error <- append(error, paste("The frequency database does not contain all markers present in the mixture file. The following are missing: ", missingFreqs)); } if (length(error) > 0) { return(list(df=NULL, warning=NULL, error=error)); } return(list(df=df, warning=warning, error=NULL)); }
#-----------------# # Function for S1 # #-----------------# #Parameters for group-sequential design NF1 <- 60 #NSN of fix design alpha <- 0.025 beta <- 0.1 z1<- -qnorm(alpha,0,1) z2<- -qnorm(beta,0,1) theta.nc<-log( (0.7*(1-0.5)) / ((0.5*(1-0.7))) ) theta<-log( (0.7*(1-0.5)) / ((0.5*(1-0.7))) ) * ((2*z1)/(z1+z2)) a<-(2/theta)*log(1/(2*alpha)) c<-0.25*theta S1.func<-function() { #Table of simulations S1.simu<-data.frame(array(NA,c(K,7))) colnames(S1.simu)<-c("NJ.S1","pIJ.S1","ZJ.S1","BJinf.S1","BJsup.S1","ccl.S1","sig_level.S1") for (k in 1:K){ #Intervention group I <- rbinom(N.S1,1,pI) N<-0 S<-0 V<-0 zsup<-0 zinf<-0 Z<-0 indic<-1 #Indicator, if = 0 : trial end nc <- NULL #Inconclusive trial (yes/no) #Analysis j while (indic==1) { #Sampling without replacement in I for delta patients included in j if ((length(I)-delta)<0) { index <- sample(1:length(I), length(I), replace=F) } else { index <- sample(1:length(I), delta, replace=F) } Ij <- I[index] I <- I[-index] deltaS <- sum(Ij) N<-N+length(index) S<-S+deltaS #Stopping rules deltaV<-delta*pC*(1-pC) V<-V+deltaV zsup<- a + c*V - 0.583*sqrt(deltaV) zinf<- -a + 3*c*V+0.583*sqrt(deltaV) #Test stat Z<-S-N*pC #Conclusion (ccl) ccl <- ifelse(Z>=zsup,1,0) #Indicator for While Loop indic <- ifelse((length(I)>0) & Z>=zinf & Z<zsup,1,0) #Inconclusive yes/no nc <- ifelse(ccl==0 & Z>zinf,1,0) } S1.simu$NJ.S1[k] <- N S1.simu$pIJ.S1[k] <- S/N S1.simu$ZJ.S1[k] <- Z S1.simu$BJinf.S1[k] <- zinf S1.simu$BJsup.S1[k] <- zsup S1.simu$ccl.S1[k] <- ccl S1.simu$nc.S1[k] <- nc S1.simu$sig_level.S1[k] <- ifelse(nc==1,(1-pnorm(abs(Z)/sqrt(V))),NA) } write.table(S1.simu,paste(chemin.details,spec,"_",scenario,".csv",sep=""),sep=";",row.names=F) res$NJMed.S1[which(res$yx==scenario)] <- median(S1.simu$NJ.S1) res$NJ5.S1[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1,0.05) res$NJ95.S1[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1,0.95) res$pct.supNF1[which(res$yx==scenario)] <- round((sum(S1.simu$NJ.S1>NF1)/K),4) res$sig_levelMed.S1[which(res$yx==scenario)] <- median(S1.simu$sig_level.S1,na.rm=TRUE) res$sig_level5.S1[which(res$yx==scenario)] <- quantile(S1.simu$sig_level.S1,0.05,na.rm=TRUE) res$sig_level95.S1[which(res$yx==scenario)] <- quantile(S1.simu$sig_level.S1,0.95,na.rm=TRUE) res$sig_levelNA.S1[which(res$yx==scenario)] <- round(sum(is.na(S1.simu$sig_level.S1))/K,5) res$sig_level_alpha.S1[which(res$yx==scenario)] <- round((sum(S1.simu$sig_level.S1<0.025,na.rm=TRUE)/K),4) res$pIJ.S1[which(res$yx==scenario)] <- round(mean(S1.simu$pIJ.S1),3) res$p.S1[which(res$yx==scenario)] <- round((sum(S1.simu$ccl.S1)/K),4) res$nc.S1[which(res$yx==scenario)] <- round((sum(S1.simu$nc.S1)/K),4) res$futil.S1[which(res$yx==scenario)] <- round(nrow(subset(S1.simu,S1.simu$nc.S1==0 & S1.simu$ccl.S1==0))/K,4) res$NJMed.S1_Eff[which(res$yx==scenario)] <- median(S1.simu$NJ.S1[S1.simu$ccl.S1==1]) res$NJ5.S1_Eff[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$ccl.S1==1],0.05) res$NJ95.S1_Eff[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$ccl.S1==1],0.95) res$NJMed.S1_NC[which(res$yx==scenario)] <- median(S1.simu$NJ.S1[S1.simu$nc.S1==1]) res$NJ5.S1_NC[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$nc.S1==1],0.05) res$NJ95.S1_NC[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$nc.S1==1],0.95) res$NJMed.S1_Futil[which(res$yx==scenario)] <- median(S1.simu$NJ.S1[S1.simu$nc.S1==0 & S1.simu$ccl.S1==0]) res$NJ5.S1_Futil[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$nc.S1==0 & S1.simu$ccl.S1==0],0.05) res$NJ95.S1_Futil[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$nc.S1==0 & S1.simu$ccl.S1==0],0.95) return(res) }
/Code/S1_design/script_S1_fct.R
no_license
PMN-BCH/simu-VHF
R
false
false
4,263
r
#-----------------# # Function for S1 # #-----------------# #Parameters for group-sequential design NF1 <- 60 #NSN of fix design alpha <- 0.025 beta <- 0.1 z1<- -qnorm(alpha,0,1) z2<- -qnorm(beta,0,1) theta.nc<-log( (0.7*(1-0.5)) / ((0.5*(1-0.7))) ) theta<-log( (0.7*(1-0.5)) / ((0.5*(1-0.7))) ) * ((2*z1)/(z1+z2)) a<-(2/theta)*log(1/(2*alpha)) c<-0.25*theta S1.func<-function() { #Table of simulations S1.simu<-data.frame(array(NA,c(K,7))) colnames(S1.simu)<-c("NJ.S1","pIJ.S1","ZJ.S1","BJinf.S1","BJsup.S1","ccl.S1","sig_level.S1") for (k in 1:K){ #Intervention group I <- rbinom(N.S1,1,pI) N<-0 S<-0 V<-0 zsup<-0 zinf<-0 Z<-0 indic<-1 #Indicator, if = 0 : trial end nc <- NULL #Inconclusive trial (yes/no) #Analysis j while (indic==1) { #Sampling without replacement in I for delta patients included in j if ((length(I)-delta)<0) { index <- sample(1:length(I), length(I), replace=F) } else { index <- sample(1:length(I), delta, replace=F) } Ij <- I[index] I <- I[-index] deltaS <- sum(Ij) N<-N+length(index) S<-S+deltaS #Stopping rules deltaV<-delta*pC*(1-pC) V<-V+deltaV zsup<- a + c*V - 0.583*sqrt(deltaV) zinf<- -a + 3*c*V+0.583*sqrt(deltaV) #Test stat Z<-S-N*pC #Conclusion (ccl) ccl <- ifelse(Z>=zsup,1,0) #Indicator for While Loop indic <- ifelse((length(I)>0) & Z>=zinf & Z<zsup,1,0) #Inconclusive yes/no nc <- ifelse(ccl==0 & Z>zinf,1,0) } S1.simu$NJ.S1[k] <- N S1.simu$pIJ.S1[k] <- S/N S1.simu$ZJ.S1[k] <- Z S1.simu$BJinf.S1[k] <- zinf S1.simu$BJsup.S1[k] <- zsup S1.simu$ccl.S1[k] <- ccl S1.simu$nc.S1[k] <- nc S1.simu$sig_level.S1[k] <- ifelse(nc==1,(1-pnorm(abs(Z)/sqrt(V))),NA) } write.table(S1.simu,paste(chemin.details,spec,"_",scenario,".csv",sep=""),sep=";",row.names=F) res$NJMed.S1[which(res$yx==scenario)] <- median(S1.simu$NJ.S1) res$NJ5.S1[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1,0.05) res$NJ95.S1[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1,0.95) res$pct.supNF1[which(res$yx==scenario)] <- round((sum(S1.simu$NJ.S1>NF1)/K),4) res$sig_levelMed.S1[which(res$yx==scenario)] <- median(S1.simu$sig_level.S1,na.rm=TRUE) res$sig_level5.S1[which(res$yx==scenario)] <- quantile(S1.simu$sig_level.S1,0.05,na.rm=TRUE) res$sig_level95.S1[which(res$yx==scenario)] <- quantile(S1.simu$sig_level.S1,0.95,na.rm=TRUE) res$sig_levelNA.S1[which(res$yx==scenario)] <- round(sum(is.na(S1.simu$sig_level.S1))/K,5) res$sig_level_alpha.S1[which(res$yx==scenario)] <- round((sum(S1.simu$sig_level.S1<0.025,na.rm=TRUE)/K),4) res$pIJ.S1[which(res$yx==scenario)] <- round(mean(S1.simu$pIJ.S1),3) res$p.S1[which(res$yx==scenario)] <- round((sum(S1.simu$ccl.S1)/K),4) res$nc.S1[which(res$yx==scenario)] <- round((sum(S1.simu$nc.S1)/K),4) res$futil.S1[which(res$yx==scenario)] <- round(nrow(subset(S1.simu,S1.simu$nc.S1==0 & S1.simu$ccl.S1==0))/K,4) res$NJMed.S1_Eff[which(res$yx==scenario)] <- median(S1.simu$NJ.S1[S1.simu$ccl.S1==1]) res$NJ5.S1_Eff[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$ccl.S1==1],0.05) res$NJ95.S1_Eff[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$ccl.S1==1],0.95) res$NJMed.S1_NC[which(res$yx==scenario)] <- median(S1.simu$NJ.S1[S1.simu$nc.S1==1]) res$NJ5.S1_NC[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$nc.S1==1],0.05) res$NJ95.S1_NC[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$nc.S1==1],0.95) res$NJMed.S1_Futil[which(res$yx==scenario)] <- median(S1.simu$NJ.S1[S1.simu$nc.S1==0 & S1.simu$ccl.S1==0]) res$NJ5.S1_Futil[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$nc.S1==0 & S1.simu$ccl.S1==0],0.05) res$NJ95.S1_Futil[which(res$yx==scenario)] <- quantile(S1.simu$NJ.S1[S1.simu$nc.S1==0 & S1.simu$ccl.S1==0],0.95) return(res) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/update_deps.R \name{update_deps} \alias{update_deps} \title{Update Dependencies for a Package Directory} \usage{ update_deps(path) } \arguments{ \item{path}{the package's root directory.} } \description{ \pkg{cyclocomp} updates the dependencies using \pkg{remotes}. Well, for some strange reason it does so on every run (temporarily). So I permanently do this, and \code{\link[remotes:update_packages]{remotes::update_packages}} does so only for CRAN packages, and \code{remotes:::update.package_deps} is internal to \pkg{remotes} only. So I need copies of internal functions from \pkg{remotes}. } \keyword{internal}
/man/update_deps.Rd
permissive
cran/packager
R
false
true
695
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/update_deps.R \name{update_deps} \alias{update_deps} \title{Update Dependencies for a Package Directory} \usage{ update_deps(path) } \arguments{ \item{path}{the package's root directory.} } \description{ \pkg{cyclocomp} updates the dependencies using \pkg{remotes}. Well, for some strange reason it does so on every run (temporarily). So I permanently do this, and \code{\link[remotes:update_packages]{remotes::update_packages}} does so only for CRAN packages, and \code{remotes:::update.package_deps} is internal to \pkg{remotes} only. So I need copies of internal functions from \pkg{remotes}. } \keyword{internal}
# Tests queryExhaustive(). # library(BiocNeighbors); library(testthat); source("setup.R"); source("test-query-exhaustive.R") set.seed(1001) test_that("queryExhaustive() behaves correctly with queries", { ndata <- 1000 nquery <- 100 for (ndim in c(1, 5, 10, 20)) { for (k in c(1, 5, 20)) { X <- matrix(runif(ndata * ndim), nrow=ndata) Y <- matrix(runif(nquery * ndim), nrow=nquery) out <- queryExhaustive(X, k=k, query=Y) ref <- refQueryKNN(X, Y, k=k) expect_identical(out$index, ref$index) expect_equal(out$distance, ref$distance) } } }) set.seed(1002) test_that("queryExhaustive() works correctly with subsetting", { nobs <- 1000 nquery <- 93 ndim <- 21 k <- 7 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) ref <- queryExhaustive(X, Y, k=k) i <- sample(nquery, 20) sub <- queryExhaustive(X, Y, k=k, subset=i) expect_identical(sub$index, ref$index[i,,drop=FALSE]) expect_identical(sub$distance, ref$distance[i,,drop=FALSE]) i <- rbinom(nquery, 1, 0.5) == 0L sub <- queryExhaustive(X, Y, k=k, subset=i) expect_identical(sub$index, ref$index[i,,drop=FALSE]) expect_identical(sub$distance, ref$distance[i,,drop=FALSE]) rownames(Y) <- paste0("CELL", seq_len(nquery)) i <- sample(rownames(Y), 50) sub <- queryExhaustive(X, Y, k=k, subset=i) m <- match(i, rownames(Y)) expect_identical(sub$index, ref$index[m,,drop=FALSE]) expect_identical(sub$distance, ref$distance[m,,drop=FALSE]) }) set.seed(1003) test_that("queryExhaustive() behaves correctly with alternative options", { nobs <- 1000 nquery <- 100 ndim <- 10 k <- 5 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) out <- queryExhaustive(X, Y, k=k) # Checking what we extract. out2 <- queryExhaustive(X, Y, k=k, get.distance=FALSE) expect_identical(out2$distance, NULL) expect_identical(out2$index, out$index) out3 <- queryExhaustive(X, Y, k=k, get.index=FALSE) expect_identical(out3$index, NULL) expect_identical(out3$distance, out$distance) # Checking precomputation. pre <- buildExhaustive(X) out4 <- queryExhaustive(query=Y, k=k, precomputed=pre) # no need for X! expect_identical(out4, out) # Checking transposition. out5 <- queryExhaustive(X, k=k, query=t(Y), transposed=TRUE) expect_identical(out5, out) }) set.seed(1003001) test_that("queryExhaustive() works with Manhattan distances", { ndata <- 500 # fewer points as refQueryKNN is a slow brute-force method. nquery <- 100 for (ndim in c(1, 5, 10)) { for (k in c(1, 5, 20)) { X <- matrix(runif(ndata * ndim), nrow=ndata) Y <- matrix(runif(nquery * ndim), nrow=nquery) out <- queryExhaustive(X, k=k, query=Y, distance="Manhattan") ref <- refQueryKNN(X, Y, k=k, type="manhattan") expect_identical(out$index, ref$index) expect_equal(out$distance, ref$distance) } } }) set.seed(1003001) test_that("queryExhaustive() works to only obtain the last distance", { ndata <- 500 # fewer points as refQueryKNN is a slow brute-force method. nquery <- 100 for (ndim in c(1, 5, 10)) { for (k in c(1, 5, 20)) { X <- matrix(runif(ndata * ndim), nrow=ndata) Y <- matrix(runif(nquery * ndim), nrow=nquery) ref <- queryExhaustive(X, k=k, query=Y) out <- queryExhaustive(X, k=k, query=Y, last=1) expect_identical(out$distance, ref$distance[,k,drop=FALSE]) expect_identical(out$index, ref$index[,k,drop=FALSE]) ref <- queryExhaustive(X, k=k, query=Y, distance="Manhattan") out <- queryExhaustive(X, k=k, query=Y, last=1, distance="Manhattan") expect_identical(out$distance, ref$distance[,k,drop=FALSE]) expect_identical(out$index, ref$index[,k,drop=FALSE]) } } }) set.seed(100301) test_that("queryExhaustive() behaves correctly with parallelization", { library(BiocParallel) nobs <- 1000 nquery <- 124 ndim <- 10 k <- 5 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) out <- queryExhaustive(X, Y, k=k) # Trying out different types of parallelization. out1 <- queryExhaustive(X, Y, k=k, BPPARAM=safeBPParam(2)) expect_identical(out$index, out1$index) expect_identical(out$distance, out1$distance) out2 <- queryExhaustive(X, Y, k=k, BPPARAM=SnowParam(3)) expect_identical(out$index, out2$index) expect_identical(out$distance, out2$distance) }) set.seed(10031) test_that("queryExhaustive() raw output behaves correctly", { nobs <- 1001 nquery <- 101 ndim <- 11 k <- 7 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) pre <- buildExhaustive(X) out <- queryExhaustive(query=Y, k=k, precomputed=pre, raw.index=TRUE) ref <- queryExhaustive(query=Y, X=t(bndata(pre)), k=k) expect_identical(out, ref) # Behaves with subsetting. i <- sample(nquery, 20) out <- queryExhaustive(query=Y, k=k, precomputed=pre, raw.index=TRUE, subset=i) ref <- queryExhaustive(query=Y, X=t(bndata(pre)), k=k, subset=i) expect_identical(out, ref) i <- rbinom(nquery, 1, 0.5) == 0L out <- queryExhaustive(query=Y, k=k, precomputed=pre, raw.index=TRUE, subset=i) ref <- queryExhaustive(query=Y, X=t(bndata(pre)), k=k, subset=i) expect_identical(out, ref) # Adding row names. rownames(Y) <- paste0("CELL", seq_len(nquery)) i <- sample(rownames(Y), 30) out <- queryExhaustive(query=Y, k=k, precomputed=pre, raw.index=TRUE, subset=i) ref <- queryExhaustive(query=Y, X=t(bndata(pre)), k=k, subset=i) expect_identical(out, ref) }) set.seed(1004) test_that("queryExhaustive() behaves correctly with silly inputs", { nobs <- 1000 nquery <- 100 ndim <- 10 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) # What happens when k is not positive. expect_error(queryExhaustive(X, Y, k=0), "positive") expect_error(queryExhaustive(X, Y, k=-1), "positive") # What happens when there are more NNs than k. restrict <- 10 expect_warning(out <- queryExhaustive(X[seq_len(restrict),], Y, k=20), "capped") expect_warning(ref <- queryExhaustive(X[seq_len(restrict),], Y, k=restrict), NA) expect_equal(out, ref) # What happens when there are no dimensions. out <- queryExhaustive(X[,0], Y[,0], k=20) expect_identical(nrow(out$index), as.integer(nquery)) expect_identical(ncol(out$index), 20L) expect_identical(dim(out$index), dim(out$distance)) expect_true(all(out$distance==0)) # What happens when the query is of a different dimension. Z <- matrix(runif(nobs * ndim * 2), nrow=nobs) expect_error(queryExhaustive(X, k=20, query=Z), "dimensionality") # What happens when we request raw.index without precomputed. expect_error(queryExhaustive(X, Y, k=20, raw.index=TRUE), "not valid") # What happens when the query is not, strictly a matrix. AA <- data.frame(Y) colnames(AA) <- NULL expect_equal(queryExhaustive(X, Y, k=20), queryExhaustive(X, AA, k=20)) # What happens with nothing. expect_identical(queryExhaustive(X, Y, k=10, get.distance=FALSE, get.index=FALSE), list()) })
/tests/testthat/test-query-exhaustive.R
no_license
yzizhen/BiocNeighbors
R
false
false
7,576
r
# Tests queryExhaustive(). # library(BiocNeighbors); library(testthat); source("setup.R"); source("test-query-exhaustive.R") set.seed(1001) test_that("queryExhaustive() behaves correctly with queries", { ndata <- 1000 nquery <- 100 for (ndim in c(1, 5, 10, 20)) { for (k in c(1, 5, 20)) { X <- matrix(runif(ndata * ndim), nrow=ndata) Y <- matrix(runif(nquery * ndim), nrow=nquery) out <- queryExhaustive(X, k=k, query=Y) ref <- refQueryKNN(X, Y, k=k) expect_identical(out$index, ref$index) expect_equal(out$distance, ref$distance) } } }) set.seed(1002) test_that("queryExhaustive() works correctly with subsetting", { nobs <- 1000 nquery <- 93 ndim <- 21 k <- 7 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) ref <- queryExhaustive(X, Y, k=k) i <- sample(nquery, 20) sub <- queryExhaustive(X, Y, k=k, subset=i) expect_identical(sub$index, ref$index[i,,drop=FALSE]) expect_identical(sub$distance, ref$distance[i,,drop=FALSE]) i <- rbinom(nquery, 1, 0.5) == 0L sub <- queryExhaustive(X, Y, k=k, subset=i) expect_identical(sub$index, ref$index[i,,drop=FALSE]) expect_identical(sub$distance, ref$distance[i,,drop=FALSE]) rownames(Y) <- paste0("CELL", seq_len(nquery)) i <- sample(rownames(Y), 50) sub <- queryExhaustive(X, Y, k=k, subset=i) m <- match(i, rownames(Y)) expect_identical(sub$index, ref$index[m,,drop=FALSE]) expect_identical(sub$distance, ref$distance[m,,drop=FALSE]) }) set.seed(1003) test_that("queryExhaustive() behaves correctly with alternative options", { nobs <- 1000 nquery <- 100 ndim <- 10 k <- 5 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) out <- queryExhaustive(X, Y, k=k) # Checking what we extract. out2 <- queryExhaustive(X, Y, k=k, get.distance=FALSE) expect_identical(out2$distance, NULL) expect_identical(out2$index, out$index) out3 <- queryExhaustive(X, Y, k=k, get.index=FALSE) expect_identical(out3$index, NULL) expect_identical(out3$distance, out$distance) # Checking precomputation. pre <- buildExhaustive(X) out4 <- queryExhaustive(query=Y, k=k, precomputed=pre) # no need for X! expect_identical(out4, out) # Checking transposition. out5 <- queryExhaustive(X, k=k, query=t(Y), transposed=TRUE) expect_identical(out5, out) }) set.seed(1003001) test_that("queryExhaustive() works with Manhattan distances", { ndata <- 500 # fewer points as refQueryKNN is a slow brute-force method. nquery <- 100 for (ndim in c(1, 5, 10)) { for (k in c(1, 5, 20)) { X <- matrix(runif(ndata * ndim), nrow=ndata) Y <- matrix(runif(nquery * ndim), nrow=nquery) out <- queryExhaustive(X, k=k, query=Y, distance="Manhattan") ref <- refQueryKNN(X, Y, k=k, type="manhattan") expect_identical(out$index, ref$index) expect_equal(out$distance, ref$distance) } } }) set.seed(1003001) test_that("queryExhaustive() works to only obtain the last distance", { ndata <- 500 # fewer points as refQueryKNN is a slow brute-force method. nquery <- 100 for (ndim in c(1, 5, 10)) { for (k in c(1, 5, 20)) { X <- matrix(runif(ndata * ndim), nrow=ndata) Y <- matrix(runif(nquery * ndim), nrow=nquery) ref <- queryExhaustive(X, k=k, query=Y) out <- queryExhaustive(X, k=k, query=Y, last=1) expect_identical(out$distance, ref$distance[,k,drop=FALSE]) expect_identical(out$index, ref$index[,k,drop=FALSE]) ref <- queryExhaustive(X, k=k, query=Y, distance="Manhattan") out <- queryExhaustive(X, k=k, query=Y, last=1, distance="Manhattan") expect_identical(out$distance, ref$distance[,k,drop=FALSE]) expect_identical(out$index, ref$index[,k,drop=FALSE]) } } }) set.seed(100301) test_that("queryExhaustive() behaves correctly with parallelization", { library(BiocParallel) nobs <- 1000 nquery <- 124 ndim <- 10 k <- 5 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) out <- queryExhaustive(X, Y, k=k) # Trying out different types of parallelization. out1 <- queryExhaustive(X, Y, k=k, BPPARAM=safeBPParam(2)) expect_identical(out$index, out1$index) expect_identical(out$distance, out1$distance) out2 <- queryExhaustive(X, Y, k=k, BPPARAM=SnowParam(3)) expect_identical(out$index, out2$index) expect_identical(out$distance, out2$distance) }) set.seed(10031) test_that("queryExhaustive() raw output behaves correctly", { nobs <- 1001 nquery <- 101 ndim <- 11 k <- 7 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) pre <- buildExhaustive(X) out <- queryExhaustive(query=Y, k=k, precomputed=pre, raw.index=TRUE) ref <- queryExhaustive(query=Y, X=t(bndata(pre)), k=k) expect_identical(out, ref) # Behaves with subsetting. i <- sample(nquery, 20) out <- queryExhaustive(query=Y, k=k, precomputed=pre, raw.index=TRUE, subset=i) ref <- queryExhaustive(query=Y, X=t(bndata(pre)), k=k, subset=i) expect_identical(out, ref) i <- rbinom(nquery, 1, 0.5) == 0L out <- queryExhaustive(query=Y, k=k, precomputed=pre, raw.index=TRUE, subset=i) ref <- queryExhaustive(query=Y, X=t(bndata(pre)), k=k, subset=i) expect_identical(out, ref) # Adding row names. rownames(Y) <- paste0("CELL", seq_len(nquery)) i <- sample(rownames(Y), 30) out <- queryExhaustive(query=Y, k=k, precomputed=pre, raw.index=TRUE, subset=i) ref <- queryExhaustive(query=Y, X=t(bndata(pre)), k=k, subset=i) expect_identical(out, ref) }) set.seed(1004) test_that("queryExhaustive() behaves correctly with silly inputs", { nobs <- 1000 nquery <- 100 ndim <- 10 X <- matrix(runif(nobs * ndim), nrow=nobs) Y <- matrix(runif(nquery * ndim), nrow=nquery) # What happens when k is not positive. expect_error(queryExhaustive(X, Y, k=0), "positive") expect_error(queryExhaustive(X, Y, k=-1), "positive") # What happens when there are more NNs than k. restrict <- 10 expect_warning(out <- queryExhaustive(X[seq_len(restrict),], Y, k=20), "capped") expect_warning(ref <- queryExhaustive(X[seq_len(restrict),], Y, k=restrict), NA) expect_equal(out, ref) # What happens when there are no dimensions. out <- queryExhaustive(X[,0], Y[,0], k=20) expect_identical(nrow(out$index), as.integer(nquery)) expect_identical(ncol(out$index), 20L) expect_identical(dim(out$index), dim(out$distance)) expect_true(all(out$distance==0)) # What happens when the query is of a different dimension. Z <- matrix(runif(nobs * ndim * 2), nrow=nobs) expect_error(queryExhaustive(X, k=20, query=Z), "dimensionality") # What happens when we request raw.index without precomputed. expect_error(queryExhaustive(X, Y, k=20, raw.index=TRUE), "not valid") # What happens when the query is not, strictly a matrix. AA <- data.frame(Y) colnames(AA) <- NULL expect_equal(queryExhaustive(X, Y, k=20), queryExhaustive(X, AA, k=20)) # What happens with nothing. expect_identical(queryExhaustive(X, Y, k=10, get.distance=FALSE, get.index=FALSE), list()) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functions.R \name{pad_frames} \alias{pad_frames} \title{Pad the end of list of data frames} \usage{ pad_frames(x, id, n.period = 360, rotation = "add", force = TRUE) } \arguments{ \item{x}{list of data frames.} \item{id}{character, column name referring to column of \code{x} representing frame sequence integer IDs.} \item{n.period}{An integer, the known period of rotation that will be part of an animation in which the map data frames in \code{x} will be sequentially plotted over. Default is 360 (1-degree increment rotations).} \item{rotation}{character, one of \code{"add"} or \code{"pad"}.} \item{force}{When the length of \code{x} is greater than or equal to \code{n.period} still force padding to occur. Defaults to \code{TRUE}. Otherwise return \code{x}.} } \value{ returns \code{x} but padded with it's final element appended repeatedly based on a specified period and type of padding method. } \description{ Use recycling to pad the end of a list of data frames where data frame elements in the list repeat in a cyclical pattern. } \details{ \code{pad_frames} is used on lists of data frames where sequence of data frames contains content that repeats over the list. It is used in cases where the data frames contain map data (long, lat, and z), for example describing the visible hemisphere surface of a rotating globe. The number of iterations in a full rotation may not be factorable by the length of the time series of map data (length of list). For example, a rotating globe animation may complete 10 rotations with 60 frames per rotation, using 600 frames total, but the list \code{x} may contain 550 data frames. With \code{rotation="pad"}, the last data frame in \code{x} is recycled to pad the series out to 600 frames so that the animation can display and "hang" on the terminal map data set until a final complete rotation is completed. With \code{rotation="add"}, a full period is added to the end of the \code{x} rather than padding only far enough to make the length of the data series factorable by the rotation period length. } \examples{ library(dplyr) library(purrr) data(annualtemps) x <- map(1:4, ~mutate(filter(annualtemps, Year-2009==.x), idx=.x)) n <- 6 pad_frames(x, id="idx", n.period=n, rotation="add") pad_frames(x, id="idx", n.period=n, rotation="pad") }
/man/pad_frames.Rd
no_license
cassljx/mapmate
R
false
true
2,378
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functions.R \name{pad_frames} \alias{pad_frames} \title{Pad the end of list of data frames} \usage{ pad_frames(x, id, n.period = 360, rotation = "add", force = TRUE) } \arguments{ \item{x}{list of data frames.} \item{id}{character, column name referring to column of \code{x} representing frame sequence integer IDs.} \item{n.period}{An integer, the known period of rotation that will be part of an animation in which the map data frames in \code{x} will be sequentially plotted over. Default is 360 (1-degree increment rotations).} \item{rotation}{character, one of \code{"add"} or \code{"pad"}.} \item{force}{When the length of \code{x} is greater than or equal to \code{n.period} still force padding to occur. Defaults to \code{TRUE}. Otherwise return \code{x}.} } \value{ returns \code{x} but padded with it's final element appended repeatedly based on a specified period and type of padding method. } \description{ Use recycling to pad the end of a list of data frames where data frame elements in the list repeat in a cyclical pattern. } \details{ \code{pad_frames} is used on lists of data frames where sequence of data frames contains content that repeats over the list. It is used in cases where the data frames contain map data (long, lat, and z), for example describing the visible hemisphere surface of a rotating globe. The number of iterations in a full rotation may not be factorable by the length of the time series of map data (length of list). For example, a rotating globe animation may complete 10 rotations with 60 frames per rotation, using 600 frames total, but the list \code{x} may contain 550 data frames. With \code{rotation="pad"}, the last data frame in \code{x} is recycled to pad the series out to 600 frames so that the animation can display and "hang" on the terminal map data set until a final complete rotation is completed. With \code{rotation="add"}, a full period is added to the end of the \code{x} rather than padding only far enough to make the length of the data series factorable by the rotation period length. } \examples{ library(dplyr) library(purrr) data(annualtemps) x <- map(1:4, ~mutate(filter(annualtemps, Year-2009==.x), idx=.x)) n <- 6 pad_frames(x, id="idx", n.period=n, rotation="add") pad_frames(x, id="idx", n.period=n, rotation="pad") }
png (file = 'plot4.png') par(mfrow = c(2,2)) #first one plot(jmfinaldata$DT1,jmintdataset$Global_active_power, type = 'b', ann = FALSE, cex = 0.5) title (ylab = 'Global Active Power (kilowatts)') #2nd one plot(jmfinaldata$DT1,jmfinaldata$Voltage, type = 'b', ylab = 'Voltage', xlab = 'date/time') #third one plot(jmfinaldata$DT1,jmfinaldata$Sub_metering_1,type = 'b', col = 'black', ann= FALSE, ylim = c(0,30), ylab ='Energy sub-metering') lines (jmfinaldata$DT1, jmfinaldata$Sub_metering_2, type = 'b',col = 'red') lines(jmfinaldata$DT1, jmfinaldata$Sub_metering_3, type = 'b', col = 'blue') legend('topright', c('submetering1','submetering2', 'submetering3'),col = c('black','red','blue'),lty = 1) #4th one plot(jmfinaldata$DT1,jmfinaldata$Global_reactive_power,type ='b') dev.off()
/plot4.r
no_license
boxcarrovers/Exploratory-Data-Analysis
R
false
false
784
r
png (file = 'plot4.png') par(mfrow = c(2,2)) #first one plot(jmfinaldata$DT1,jmintdataset$Global_active_power, type = 'b', ann = FALSE, cex = 0.5) title (ylab = 'Global Active Power (kilowatts)') #2nd one plot(jmfinaldata$DT1,jmfinaldata$Voltage, type = 'b', ylab = 'Voltage', xlab = 'date/time') #third one plot(jmfinaldata$DT1,jmfinaldata$Sub_metering_1,type = 'b', col = 'black', ann= FALSE, ylim = c(0,30), ylab ='Energy sub-metering') lines (jmfinaldata$DT1, jmfinaldata$Sub_metering_2, type = 'b',col = 'red') lines(jmfinaldata$DT1, jmfinaldata$Sub_metering_3, type = 'b', col = 'blue') legend('topright', c('submetering1','submetering2', 'submetering3'),col = c('black','red','blue'),lty = 1) #4th one plot(jmfinaldata$DT1,jmfinaldata$Global_reactive_power,type ='b') dev.off()
rankhospital <- function(state, outcome, num = "best") { ## Read outcome data data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") state <- state outcome <- outcome rank <- num ## Check that state and outcome are valid state_data <- subset(data, data$State == state) if nrow(state_data == 0){ stop("invalid state") } if(outcome == 'heart attack'){ state_data <- subset(state_data, select = c(7,2,11)) } else if (outcome == 'heart failure'){ state_data <- subset(state_data, select = c(7,2,17)) } else if (outcome == 'pneumonia'){ state_data <- subset(state_data, select = c(7,2,23)) } else { stop("invalid outcome") } state_data[,3] <- as.numeric(state_data[,3]) state_data <- na.omit(state_data) ## Return hospital name in that state with the given rank ## 30-day death rate state_data_ordered <- state_data[ order(state_data[,3],state_data[,2]),] if(num == "best" ){ hospital_by_rank <- state_data_ordered$Hospital.Name[1] } else if (num == "worst"){ hospital_by_rank <- state_data_ordered$Hospital.Name[nrow(state_data)] }else hospital_by_rank <- state_data_ordered$Hospital.Name[num] hospital_by_rank }
/r-programming/assignment3/rankHospital.R
permissive
gdtm86/datasciencecoursera
R
false
false
1,516
r
rankhospital <- function(state, outcome, num = "best") { ## Read outcome data data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") state <- state outcome <- outcome rank <- num ## Check that state and outcome are valid state_data <- subset(data, data$State == state) if nrow(state_data == 0){ stop("invalid state") } if(outcome == 'heart attack'){ state_data <- subset(state_data, select = c(7,2,11)) } else if (outcome == 'heart failure'){ state_data <- subset(state_data, select = c(7,2,17)) } else if (outcome == 'pneumonia'){ state_data <- subset(state_data, select = c(7,2,23)) } else { stop("invalid outcome") } state_data[,3] <- as.numeric(state_data[,3]) state_data <- na.omit(state_data) ## Return hospital name in that state with the given rank ## 30-day death rate state_data_ordered <- state_data[ order(state_data[,3],state_data[,2]),] if(num == "best" ){ hospital_by_rank <- state_data_ordered$Hospital.Name[1] } else if (num == "worst"){ hospital_by_rank <- state_data_ordered$Hospital.Name[nrow(state_data)] }else hospital_by_rank <- state_data_ordered$Hospital.Name[num] hospital_by_rank }
#' Adjust Dose-Response Table when Futility Rule Invoked #' #' @description Adjust dose availabilities given that the futility rule was invoked, if dose sample size >= 6 (if sample size < 6, choose to stay and explore dose further, so no availabilities are changed). #' The dose that invoked the futility rule and any higher doses will be made unavailable for future consideration. #' @param dose A numeric dose level that violated the futility rule #' @param dose_info Dataframe containing #' #' \[dose level | availability | N_d | X_d | Y_d | Z_d | pf | pe | pt\] #' @return Updated dose-response table dataframe with adjusted availabilities #' @usage dose_info = adjust_for_futility_rule(60, dose_info) #if dose level 60 invoked futility rule #' @export adjust_for_futility_rule <- function(dose, dose_info) { dose_ind = which(dose_info$dose_lvl == dose) if (length(dose_ind) == 0) {stop('Invalid dose value. Please enter the dose value and not the index.')} dose_samp_size = dose_info$N_d[dose_ind] if (dose_samp_size >= 6) { for (ind in 1:dose_ind) { dose_info$available[ind] = FALSE #make dose and all lower doses unavailable } } return(dose_info) }
/R/adjust_for_futility_rule.R
no_license
jiyingz/clinicalMPI
R
false
false
1,184
r
#' Adjust Dose-Response Table when Futility Rule Invoked #' #' @description Adjust dose availabilities given that the futility rule was invoked, if dose sample size >= 6 (if sample size < 6, choose to stay and explore dose further, so no availabilities are changed). #' The dose that invoked the futility rule and any higher doses will be made unavailable for future consideration. #' @param dose A numeric dose level that violated the futility rule #' @param dose_info Dataframe containing #' #' \[dose level | availability | N_d | X_d | Y_d | Z_d | pf | pe | pt\] #' @return Updated dose-response table dataframe with adjusted availabilities #' @usage dose_info = adjust_for_futility_rule(60, dose_info) #if dose level 60 invoked futility rule #' @export adjust_for_futility_rule <- function(dose, dose_info) { dose_ind = which(dose_info$dose_lvl == dose) if (length(dose_ind) == 0) {stop('Invalid dose value. Please enter the dose value and not the index.')} dose_samp_size = dose_info$N_d[dose_ind] if (dose_samp_size >= 6) { for (ind in 1:dose_ind) { dose_info$available[ind] = FALSE #make dose and all lower doses unavailable } } return(dose_info) }
# Perceptron Algorithm Implementation s <- array(c(1, 1, 1,-1,1,1,-1,1,1,-1,1,1),dim=c(4,3)) t <- array(c(1, -1, -1,-1),dim=c(4,1)) print(s) print(t) w = t(c(0,0,0)) b = 0 pattern_match <- 0 k<-1 # To Count Iteration n<-0 # step <- 0 alpha = 1 theta = 0.2 Y <- 0 print(-theta) while (n < 4){ for (i in 1:4) { if(n >= 4) { break } step <- step + 1 print(paste0("Inner loop iteration ", i)) print(s[i,]) print(paste0("above is ",i," Pattern")) y <- sum(s[i,]*w) + b print(paste0("Value of y in is ", y)) if(y > theta ){ print("i am in the loop") Y <- 1 if(Y == t[i]){ print("You have classified pattern correctly.....No need to update weight & Bias..!!!") n = n + 1; Y <- 0 } else{ print("Wrong Classification....Need to Upadte weights and Bias....") w = w + (alpha*t[i]*s[i,]) b = b + (alpha*t[i]) print("New Weights and Bias are below...") print(w) print(b) n<-0 } } if(y < -theta){ print("i am in the loop") Y <- -1 if(Y == t[i]){ print("You have classified pattern correctly.....No need to update weight & Bias..!!!") n = n +1 Y <- 0 } else{ print("Wrong Classification....Need to Upadte weights and Bias....") w = w + (alpha*t[i]*s[i,]) b = b + (alpha*t[i]) print("New Weights and Bias are below...") n <- 0 print(w) print(b) } } if(y >= -theta & y <= theta){ Y <- 0 if(Y != t[i]){ print("You have wrong classification.....weights and Bias will change..!!!") w = w + (alpha*t[i]*s[i,]) print("below is your new weight...") print(w) print("Below is your new Bias...") b = b + (alpha*t[i]) print(b) } } } print(paste0("==============================Iteration ",k ," is done==============================")) print("Below is weight & Bias after above iteration") k = k + 1 print(w) print(b) print(paste0("Total",step," steps required to converge")) #print(n) print("***********************************You done with all after.......single step************************************") }
/NN_prog2_Perceptron.R
no_license
Bhaumik10/CS6673-Neural-Network
R
false
false
2,206
r
# Perceptron Algorithm Implementation s <- array(c(1, 1, 1,-1,1,1,-1,1,1,-1,1,1),dim=c(4,3)) t <- array(c(1, -1, -1,-1),dim=c(4,1)) print(s) print(t) w = t(c(0,0,0)) b = 0 pattern_match <- 0 k<-1 # To Count Iteration n<-0 # step <- 0 alpha = 1 theta = 0.2 Y <- 0 print(-theta) while (n < 4){ for (i in 1:4) { if(n >= 4) { break } step <- step + 1 print(paste0("Inner loop iteration ", i)) print(s[i,]) print(paste0("above is ",i," Pattern")) y <- sum(s[i,]*w) + b print(paste0("Value of y in is ", y)) if(y > theta ){ print("i am in the loop") Y <- 1 if(Y == t[i]){ print("You have classified pattern correctly.....No need to update weight & Bias..!!!") n = n + 1; Y <- 0 } else{ print("Wrong Classification....Need to Upadte weights and Bias....") w = w + (alpha*t[i]*s[i,]) b = b + (alpha*t[i]) print("New Weights and Bias are below...") print(w) print(b) n<-0 } } if(y < -theta){ print("i am in the loop") Y <- -1 if(Y == t[i]){ print("You have classified pattern correctly.....No need to update weight & Bias..!!!") n = n +1 Y <- 0 } else{ print("Wrong Classification....Need to Upadte weights and Bias....") w = w + (alpha*t[i]*s[i,]) b = b + (alpha*t[i]) print("New Weights and Bias are below...") n <- 0 print(w) print(b) } } if(y >= -theta & y <= theta){ Y <- 0 if(Y != t[i]){ print("You have wrong classification.....weights and Bias will change..!!!") w = w + (alpha*t[i]*s[i,]) print("below is your new weight...") print(w) print("Below is your new Bias...") b = b + (alpha*t[i]) print(b) } } } print(paste0("==============================Iteration ",k ," is done==============================")) print("Below is weight & Bias after above iteration") k = k + 1 print(w) print(b) print(paste0("Total",step," steps required to converge")) #print(n) print("***********************************You done with all after.......single step************************************") }
#This is the verification test with Informationstrategy increasevalue is 5 options(stringsAsFactors = FALSE) #Note that plot() is not the same as ggplot() #these are from two separate packages library(ggplot2) # needed for reshaping data frames library(reshape2) #used for querying data, performing aggregations, filtering, etc. library(sqldf) myDataVerificationtest2 = read.table("C:/Users/Kirsten/Documents/Studie/Master/3dYear/Agent_Based/v9_afterverification experimenttest2-table.csv", skip = 6, sep = ",", head=TRUE) summary(myDataVerificationtest2) #this will also show you the names of the column names colnames = colnames(myDataVerificationtest2) ##### Don't worry about what this means, it cleans up the column names ##### ##### You can just copy/paste it and re-use it, just make sure that ##### ##### if your data frame isn't called myDataFrame, then update that part ##### # Some colnames start with "X.", get rid of this colnames(myDataVerificationtest2) = gsub("X\\.", "", colnames(myDataVerificationtest2)) # Get rid of periods at the start and end of the names colnames(myDataVerificationtest2) = gsub("^\\.|\\.$", "", colnames(myDataVerificationtest2)) # Convert all periods into underscores colnames(myDataVerificationtest2) = gsub("\\.", "_", colnames(myDataVerificationtest2)) #remove the value at time is 0. Then all the marketshares are also still 0 and this influence the data myDataVerificationtest2<-myDataVerificationtest2[!(myDataVerificationtest2$step == 0),] #testing kurtosis library(e1071) kurtosis(myDataVerificationtest2$total_consumers_RTP) kurtosis(myDataVerificationtest2$total_consumers_CPP) kurtosis(myDataVerificationtest2$total_consumers_ToU) kurtosis(myDataVerificationtest2$total_consumers_RTPH) #testing skewness skewness(myDataVerificationtest2$total_consumers_RTP) skewness(myDataVerificationtest2$total_consumers_CPP) skewness(myDataVerificationtest2$total_consumers_ToU) skewness(myDataVerificationtest2$total_consumers_RTPH) #see if this influence the variance var(myDataVerificationtest2$total_consumers_RTP) var(myDataVerificationtest2$total_consumers_CPP) var(myDataVerificationtest2$total_consumers_ToU) var(myDataVerificationtest2$total_consumers_RTPH) #mean mean(myDataVerificationtest2$total_consumers_RTP) mean(myDataVerificationtest2$total_consumers_CPP) mean(myDataVerificationtest2$total_consumers_ToU) mean(myDataVerificationtest2$total_consumers_RTPH) #relative variance sd(myDataVerificationtest2$total_consumers_RTP)/mean(myDataVerificationtest2$total_consumers_RTP) sd(myDataVerificationtest2$total_consumers_CPP)/mean(myDataVerificationtest2$total_consumers_CPP) sd(myDataVerificationtest2$total_consumers_ToU)/mean(myDataVerificationtest2$total_consumers_ToU) sd(myDataVerificationtest2$total_consumers_RTPH)/mean(myDataVerificationtest2$total_consumers_RTPH)
/r-script_verificationtest2.R
no_license
KMeeuw/AgentBasedJRC
R
false
false
2,850
r
#This is the verification test with Informationstrategy increasevalue is 5 options(stringsAsFactors = FALSE) #Note that plot() is not the same as ggplot() #these are from two separate packages library(ggplot2) # needed for reshaping data frames library(reshape2) #used for querying data, performing aggregations, filtering, etc. library(sqldf) myDataVerificationtest2 = read.table("C:/Users/Kirsten/Documents/Studie/Master/3dYear/Agent_Based/v9_afterverification experimenttest2-table.csv", skip = 6, sep = ",", head=TRUE) summary(myDataVerificationtest2) #this will also show you the names of the column names colnames = colnames(myDataVerificationtest2) ##### Don't worry about what this means, it cleans up the column names ##### ##### You can just copy/paste it and re-use it, just make sure that ##### ##### if your data frame isn't called myDataFrame, then update that part ##### # Some colnames start with "X.", get rid of this colnames(myDataVerificationtest2) = gsub("X\\.", "", colnames(myDataVerificationtest2)) # Get rid of periods at the start and end of the names colnames(myDataVerificationtest2) = gsub("^\\.|\\.$", "", colnames(myDataVerificationtest2)) # Convert all periods into underscores colnames(myDataVerificationtest2) = gsub("\\.", "_", colnames(myDataVerificationtest2)) #remove the value at time is 0. Then all the marketshares are also still 0 and this influence the data myDataVerificationtest2<-myDataVerificationtest2[!(myDataVerificationtest2$step == 0),] #testing kurtosis library(e1071) kurtosis(myDataVerificationtest2$total_consumers_RTP) kurtosis(myDataVerificationtest2$total_consumers_CPP) kurtosis(myDataVerificationtest2$total_consumers_ToU) kurtosis(myDataVerificationtest2$total_consumers_RTPH) #testing skewness skewness(myDataVerificationtest2$total_consumers_RTP) skewness(myDataVerificationtest2$total_consumers_CPP) skewness(myDataVerificationtest2$total_consumers_ToU) skewness(myDataVerificationtest2$total_consumers_RTPH) #see if this influence the variance var(myDataVerificationtest2$total_consumers_RTP) var(myDataVerificationtest2$total_consumers_CPP) var(myDataVerificationtest2$total_consumers_ToU) var(myDataVerificationtest2$total_consumers_RTPH) #mean mean(myDataVerificationtest2$total_consumers_RTP) mean(myDataVerificationtest2$total_consumers_CPP) mean(myDataVerificationtest2$total_consumers_ToU) mean(myDataVerificationtest2$total_consumers_RTPH) #relative variance sd(myDataVerificationtest2$total_consumers_RTP)/mean(myDataVerificationtest2$total_consumers_RTP) sd(myDataVerificationtest2$total_consumers_CPP)/mean(myDataVerificationtest2$total_consumers_CPP) sd(myDataVerificationtest2$total_consumers_ToU)/mean(myDataVerificationtest2$total_consumers_ToU) sd(myDataVerificationtest2$total_consumers_RTPH)/mean(myDataVerificationtest2$total_consumers_RTPH)
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 8895 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 8400 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 8400 c c Input Parameter (command line, file): c input filename QBFLIB/Kronegger-Pfandler-Pichler/bomb/p5-5.pddl_planlen=15.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 1090 c no.of clauses 8895 c no.of taut cls 445 c c Output Parameters: c remaining no.of clauses 8400 c c QBFLIB/Kronegger-Pfandler-Pichler/bomb/p5-5.pddl_planlen=15.qdimacs 1090 8895 E1 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 19 20 22 23 24 25 26 27 28 29 31 32 33 35 36 38 39 40 41 42 43 44 45 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090] 445 5 595 8400 RED
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Kronegger-Pfandler-Pichler/bomb/p5-5.pddl_planlen=15/p5-5.pddl_planlen=15.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
2,705
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 8895 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 8400 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 8400 c c Input Parameter (command line, file): c input filename QBFLIB/Kronegger-Pfandler-Pichler/bomb/p5-5.pddl_planlen=15.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 1090 c no.of clauses 8895 c no.of taut cls 445 c c Output Parameters: c remaining no.of clauses 8400 c c QBFLIB/Kronegger-Pfandler-Pichler/bomb/p5-5.pddl_planlen=15.qdimacs 1090 8895 E1 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 19 20 22 23 24 25 26 27 28 29 31 32 33 35 36 38 39 40 41 42 43 44 45 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090] 445 5 595 8400 RED
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-in-reference.R \name{GROUPS.GOVT} \alias{GROUPS.GOVT} \title{Dataset that should probably be removed or outsourced to data store} \usage{ GROUPS.GOVT() } \description{ Dataset that should probably be removed or outsourced to data store } \examples{ GROUPS.GOVT() } \seealso{ Other datasets in Reference to remove or outsource to data store: \code{\link{BicsSectorBridge}}, \code{\link{RevenueSplit}}, \code{\link{SectorBridge}}, \code{\link{TYPE.EQUITY}}, \code{\link{TYPE.OTHERS}}, \code{\link{sector.bridge}} Other datasets that stand alone: \code{\link{TYPE.EQUITY}}, \code{\link{TYPE.OTHERS}} Other possible_snapshots: \code{\link{ALD.BV}}, \code{\link{ALD.CB}}, \code{\link{ALD.CC}}, \code{\link{ALD.EQ}}, \code{\link{ALD.SPV}}, \code{\link{BALANCE.SHEET.DATA}}, \code{\link{BENCH.REGIONS}}, \code{\link{BicsSectorBridge}}, \code{\link{CB_OG}}, \code{\link{DebtMarketClimate}}, \code{\link{DebtMarket}}, \code{\link{EQMarket.Size}}, \code{\link{EQ_OG}}, \code{\link{FIN.DATA}}, \code{\link{Fund.Data}}, \code{\link{FundsTrusts}}, \code{\link{INDEX.REGIONS}}, \code{\link{Indices}}, \code{\link{LoanMarketClimate}}, \code{\link{LoanMarket}}, \code{\link{PHYSICAL.RISK.CB}}, \code{\link{PHYSICAL.RISK.EQ}}, \code{\link{Receipts}}, \code{\link{RevenueSplit}}, \code{\link{SCENLong}}, \code{\link{SCEN}}, \code{\link{SEC.TYPE.BONDS}}, \code{\link{SectorBridge}}, \code{\link{TYPE.BONDS}}, \code{\link{TYPE.EQUITY}}, \code{\link{TYPE.OTHERS}}, \code{\link{TYPE.RECEIPTS}}, \code{\link{sector.bridge}} } \concept{datasets in Reference to remove or outsource to data store} \concept{datasets that stand alone} \concept{possible_snapshots}
/man/GROUPS.GOVT.Rd
no_license
andybue/r2dii.dataraw
R
false
true
1,766
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-in-reference.R \name{GROUPS.GOVT} \alias{GROUPS.GOVT} \title{Dataset that should probably be removed or outsourced to data store} \usage{ GROUPS.GOVT() } \description{ Dataset that should probably be removed or outsourced to data store } \examples{ GROUPS.GOVT() } \seealso{ Other datasets in Reference to remove or outsource to data store: \code{\link{BicsSectorBridge}}, \code{\link{RevenueSplit}}, \code{\link{SectorBridge}}, \code{\link{TYPE.EQUITY}}, \code{\link{TYPE.OTHERS}}, \code{\link{sector.bridge}} Other datasets that stand alone: \code{\link{TYPE.EQUITY}}, \code{\link{TYPE.OTHERS}} Other possible_snapshots: \code{\link{ALD.BV}}, \code{\link{ALD.CB}}, \code{\link{ALD.CC}}, \code{\link{ALD.EQ}}, \code{\link{ALD.SPV}}, \code{\link{BALANCE.SHEET.DATA}}, \code{\link{BENCH.REGIONS}}, \code{\link{BicsSectorBridge}}, \code{\link{CB_OG}}, \code{\link{DebtMarketClimate}}, \code{\link{DebtMarket}}, \code{\link{EQMarket.Size}}, \code{\link{EQ_OG}}, \code{\link{FIN.DATA}}, \code{\link{Fund.Data}}, \code{\link{FundsTrusts}}, \code{\link{INDEX.REGIONS}}, \code{\link{Indices}}, \code{\link{LoanMarketClimate}}, \code{\link{LoanMarket}}, \code{\link{PHYSICAL.RISK.CB}}, \code{\link{PHYSICAL.RISK.EQ}}, \code{\link{Receipts}}, \code{\link{RevenueSplit}}, \code{\link{SCENLong}}, \code{\link{SCEN}}, \code{\link{SEC.TYPE.BONDS}}, \code{\link{SectorBridge}}, \code{\link{TYPE.BONDS}}, \code{\link{TYPE.EQUITY}}, \code{\link{TYPE.OTHERS}}, \code{\link{TYPE.RECEIPTS}}, \code{\link{sector.bridge}} } \concept{datasets in Reference to remove or outsource to data store} \concept{datasets that stand alone} \concept{possible_snapshots}
\name{xmp15.04} \alias{xmp15.04} \docType{data} \title{data from Example 15.4} \description{ The \code{xmp15.04} data frame has 12 rows and 2 columns. } \format{ This data frame contains the following columns: \describe{ \item{conc}{ a numeric vector } \item{Area}{ a factor with levels \code{Polluted} \code{Unpolluted} } } } \source{ Devore, J. L. (2003) \emph{Probability and Statistics for Engineering and the Sciences (6th ed)}, Duxbury } \examples{ str(xmp15.04) } \keyword{datasets}
/man/xmp15.04.Rd
no_license
dmbates/Devore6
R
false
false
542
rd
\name{xmp15.04} \alias{xmp15.04} \docType{data} \title{data from Example 15.4} \description{ The \code{xmp15.04} data frame has 12 rows and 2 columns. } \format{ This data frame contains the following columns: \describe{ \item{conc}{ a numeric vector } \item{Area}{ a factor with levels \code{Polluted} \code{Unpolluted} } } } \source{ Devore, J. L. (2003) \emph{Probability and Statistics for Engineering and the Sciences (6th ed)}, Duxbury } \examples{ str(xmp15.04) } \keyword{datasets}
# Load the data from the file # Label missing data using "?" symbol # Set the column names # data <- read.table(file = "household_power_consumption.txt",sep = ";",colClasses = "character", col.names = colnames(read.table("household_power_consumption.txt", nrow = 1, header = TRUE, sep=";")), na.strings = "?", skip = 66637, nrows = 2880 ) strptime(paste(data$Date,data$Time, sep=" "), format="%d/%m/%Y %H:%M:%S") jason <- as.numeric(data$Global_active_power) hist(jason,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
/plot1.R
no_license
Sidney-Bishop/ExData_Plotting1
R
false
false
662
r
# Load the data from the file # Label missing data using "?" symbol # Set the column names # data <- read.table(file = "household_power_consumption.txt",sep = ";",colClasses = "character", col.names = colnames(read.table("household_power_consumption.txt", nrow = 1, header = TRUE, sep=";")), na.strings = "?", skip = 66637, nrows = 2880 ) strptime(paste(data$Date,data$Time, sep=" "), format="%d/%m/%Y %H:%M:%S") jason <- as.numeric(data$Global_active_power) hist(jason,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
context("dplyr") sc <- testthat_spark_connection() iris_tbl <- testthat_tbl("iris") test_requires("dplyr") df1 <- data_frame(a = 1:3, b = letters[1:3]) df2 <- data_frame(b = letters[1:3], c = letters[24:26]) df1_tbl <- testthat_tbl("df1") df2_tbl <- testthat_tbl("df2") test_that("the implementation of 'mutate' functions as expected", { test_requires("dplyr") expect_equal( iris %>% mutate(x = Species) %>% tbl_vars() %>% length(), iris_tbl %>% mutate(x = Species) %>% collect() %>% tbl_vars() %>% length() ) }) test_that("the implementation of 'filter' functions as expected", { test_requires("dplyr") expect_equal( iris_tbl %>% filter(Sepal_Length == 5.1) %>% filter(Sepal_Width == 3.5) %>% filter(Petal_Length == 1.4) %>% filter(Petal_Width == 0.2) %>% select(Species) %>% collect(), iris %>% transmute(Sepal_Length = `Sepal.Length`, Sepal_Width = `Sepal.Width`, Petal_Length = `Petal.Length`, Petal_Width = `Petal.Width`, Species = Species) %>% filter(Sepal_Length == 5.1) %>% filter(Sepal_Width == 3.5) %>% filter(Petal_Length == 1.4) %>% filter(Petal_Width == 0.2) %>% transmute(Species = as.character(Species)) ) }) test_that("'head' uses 'limit' clause", { test_requires("dplyr") test_requires("dbplyr") expect_true( grepl( "LIMIT", sql_render(head(iris_tbl)) ) ) }) test_that("'left_join' does not use 'using' clause", { test_requires("dplyr") test_requires("dbplyr") expect_equal( spark_version(sc) >= "2.0.0" && packageVersion("dplyr") < "0.5.0.90", grepl( "USING", sql_render(left_join(df1_tbl, df2_tbl)) ) ) }) test_that("the implementation of 'left_join' functions as expected", { test_requires("dplyr") expect_true( all.equal( left_join(df1, df2), left_join(df1_tbl, df2_tbl) %>% collect() ) ) }) test_that("the implementation of 'sample_n' functions as expected", { test_requires_version("2.0.0", "sample_n() not supported") test_requires("dplyr") # As of Spark 2.1.0, sampling functions are not exact. expect_lt( iris_tbl %>% sample_n(10) %>% collect() %>% nrow(), nrow(iris) ) }) test_that("the implementation of 'sample_frac' functions returns a sample", { test_requires_version("2.0.0", "sample_n() support") test_requires("dplyr") # As of Spark 2.1.0, sampling functions are not exact. expect_lt( iris_tbl %>% select(Petal_Length) %>% sample_frac(0.2) %>% collect() %>% nrow(), nrow(iris) ) expect_lt( iris_tbl %>% select(Petal_Length) %>% sample_n(10) %>% collect() %>% nrow(), nrow(iris) ) }) test_that("'sample_n' and 'sample_frac' work in nontrivial queries (#1299)", { test_requires_version("2.0.0", "sample_n() support") test_requires("dplyr") expect_lt( iris_tbl %>% sample_n(10) %>% collect() %>% nrow(), nrow(iris) ) }) test_that("'sdf_broadcast' forces broadcast hash join", { skip_databricks_connect() query_plan <- df1_tbl %>% sdf_broadcast() %>% left_join(df2_tbl, by = "b") %>% spark_dataframe() %>% invoke("queryExecution") %>% invoke("optimizedPlan") %>% invoke("toString") expect_match(query_plan, "B|broadcast") }) test_that("can compute() over tables", { test_requires("dplyr") iris_tbl %>% compute() succeed() })
/tests/testthat/test-dplyr.R
permissive
Loquats/sparklyr
R
false
false
3,443
r
context("dplyr") sc <- testthat_spark_connection() iris_tbl <- testthat_tbl("iris") test_requires("dplyr") df1 <- data_frame(a = 1:3, b = letters[1:3]) df2 <- data_frame(b = letters[1:3], c = letters[24:26]) df1_tbl <- testthat_tbl("df1") df2_tbl <- testthat_tbl("df2") test_that("the implementation of 'mutate' functions as expected", { test_requires("dplyr") expect_equal( iris %>% mutate(x = Species) %>% tbl_vars() %>% length(), iris_tbl %>% mutate(x = Species) %>% collect() %>% tbl_vars() %>% length() ) }) test_that("the implementation of 'filter' functions as expected", { test_requires("dplyr") expect_equal( iris_tbl %>% filter(Sepal_Length == 5.1) %>% filter(Sepal_Width == 3.5) %>% filter(Petal_Length == 1.4) %>% filter(Petal_Width == 0.2) %>% select(Species) %>% collect(), iris %>% transmute(Sepal_Length = `Sepal.Length`, Sepal_Width = `Sepal.Width`, Petal_Length = `Petal.Length`, Petal_Width = `Petal.Width`, Species = Species) %>% filter(Sepal_Length == 5.1) %>% filter(Sepal_Width == 3.5) %>% filter(Petal_Length == 1.4) %>% filter(Petal_Width == 0.2) %>% transmute(Species = as.character(Species)) ) }) test_that("'head' uses 'limit' clause", { test_requires("dplyr") test_requires("dbplyr") expect_true( grepl( "LIMIT", sql_render(head(iris_tbl)) ) ) }) test_that("'left_join' does not use 'using' clause", { test_requires("dplyr") test_requires("dbplyr") expect_equal( spark_version(sc) >= "2.0.0" && packageVersion("dplyr") < "0.5.0.90", grepl( "USING", sql_render(left_join(df1_tbl, df2_tbl)) ) ) }) test_that("the implementation of 'left_join' functions as expected", { test_requires("dplyr") expect_true( all.equal( left_join(df1, df2), left_join(df1_tbl, df2_tbl) %>% collect() ) ) }) test_that("the implementation of 'sample_n' functions as expected", { test_requires_version("2.0.0", "sample_n() not supported") test_requires("dplyr") # As of Spark 2.1.0, sampling functions are not exact. expect_lt( iris_tbl %>% sample_n(10) %>% collect() %>% nrow(), nrow(iris) ) }) test_that("the implementation of 'sample_frac' functions returns a sample", { test_requires_version("2.0.0", "sample_n() support") test_requires("dplyr") # As of Spark 2.1.0, sampling functions are not exact. expect_lt( iris_tbl %>% select(Petal_Length) %>% sample_frac(0.2) %>% collect() %>% nrow(), nrow(iris) ) expect_lt( iris_tbl %>% select(Petal_Length) %>% sample_n(10) %>% collect() %>% nrow(), nrow(iris) ) }) test_that("'sample_n' and 'sample_frac' work in nontrivial queries (#1299)", { test_requires_version("2.0.0", "sample_n() support") test_requires("dplyr") expect_lt( iris_tbl %>% sample_n(10) %>% collect() %>% nrow(), nrow(iris) ) }) test_that("'sdf_broadcast' forces broadcast hash join", { skip_databricks_connect() query_plan <- df1_tbl %>% sdf_broadcast() %>% left_join(df2_tbl, by = "b") %>% spark_dataframe() %>% invoke("queryExecution") %>% invoke("optimizedPlan") %>% invoke("toString") expect_match(query_plan, "B|broadcast") }) test_that("can compute() over tables", { test_requires("dplyr") iris_tbl %>% compute() succeed() })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cloudwatchlogs_operations.R \name{cloudwatchlogs_put_data_protection_policy} \alias{cloudwatchlogs_put_data_protection_policy} \title{Creates a data protection policy for the specified log group} \usage{ cloudwatchlogs_put_data_protection_policy(logGroupIdentifier, policyDocument) } \arguments{ \item{logGroupIdentifier}{[required] Specify either the log group name or log group ARN.} \item{policyDocument}{[required] Specify the data protection policy, in JSON. This policy must include two JSON blocks: \itemize{ \item The first block must include both a \code{DataIdentifer} array and an \code{Operation} property with an \code{Audit} action. The \code{DataIdentifer} array lists the types of sensitive data that you want to mask. For more information about the available options, see \href{https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/}{Types of data that you can mask}. The \code{Operation} property with an \code{Audit} action is required to find the sensitive data terms. This \code{Audit} action must contain a \code{FindingsDestination} object. You can optionally use that \code{FindingsDestination} object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist. \item The second block must include both a \code{DataIdentifer} array and an \code{Operation} property with an \code{Deidentify} action. The \code{DataIdentifer} array must exactly match the \code{DataIdentifer} array in the first block of the policy. The \code{Operation} property with the \code{Deidentify} action is what actually masks the data, and it must contain the \code{ "MaskConfig": {}} object. The \code{ "MaskConfig": {}} object must be empty. } For an example data protection policy, see the \strong{Examples} section on this page. The contents of the two \code{DataIdentifer} arrays must match exactly. In addition to the two JSON blocks, the \code{policyDocument} can also include \code{Name}, \code{Description}, and \code{Version} fields. The \code{Name} is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in \code{policyDocument} can be up to 30,720 characters.} } \description{ Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data. See \url{https://www.paws-r-sdk.com/docs/cloudwatchlogs_put_data_protection_policy/} for full documentation. } \keyword{internal}
/cran/paws.management/man/cloudwatchlogs_put_data_protection_policy.Rd
permissive
paws-r/paws
R
false
true
2,684
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cloudwatchlogs_operations.R \name{cloudwatchlogs_put_data_protection_policy} \alias{cloudwatchlogs_put_data_protection_policy} \title{Creates a data protection policy for the specified log group} \usage{ cloudwatchlogs_put_data_protection_policy(logGroupIdentifier, policyDocument) } \arguments{ \item{logGroupIdentifier}{[required] Specify either the log group name or log group ARN.} \item{policyDocument}{[required] Specify the data protection policy, in JSON. This policy must include two JSON blocks: \itemize{ \item The first block must include both a \code{DataIdentifer} array and an \code{Operation} property with an \code{Audit} action. The \code{DataIdentifer} array lists the types of sensitive data that you want to mask. For more information about the available options, see \href{https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/}{Types of data that you can mask}. The \code{Operation} property with an \code{Audit} action is required to find the sensitive data terms. This \code{Audit} action must contain a \code{FindingsDestination} object. You can optionally use that \code{FindingsDestination} object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist. \item The second block must include both a \code{DataIdentifer} array and an \code{Operation} property with an \code{Deidentify} action. The \code{DataIdentifer} array must exactly match the \code{DataIdentifer} array in the first block of the policy. The \code{Operation} property with the \code{Deidentify} action is what actually masks the data, and it must contain the \code{ "MaskConfig": {}} object. The \code{ "MaskConfig": {}} object must be empty. } For an example data protection policy, see the \strong{Examples} section on this page. The contents of the two \code{DataIdentifer} arrays must match exactly. In addition to the two JSON blocks, the \code{policyDocument} can also include \code{Name}, \code{Description}, and \code{Version} fields. The \code{Name} is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in \code{policyDocument} can be up to 30,720 characters.} } \description{ Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data. See \url{https://www.paws-r-sdk.com/docs/cloudwatchlogs_put_data_protection_policy/} for full documentation. } \keyword{internal}
#' Dissonance Method function #' #' Function allows to do linear ordering by MD parameter. MD is higher is better. #' #' @param data data to order. Must be numeric. #' @param impact string of "+" and "-". If column is a booster then impact must be "+". For inhibitor "-". #' #' @export Dissonance <- function(data, impact){ Z <- matrix(0, nrow = nrow(data), ncol = ncol(data)) col_min <- apply(data, 2, FUN = min) col_max <- apply(data, 2, FUN = max) for (i in 1:ncol(data)) { if(impact[i] == "-"){ for (j in 1:nrow(data)) { Z[j, i] <- col_min[i]/data[j, i] } }else{ if (impact[i] == "+") { for (j in 1:nrow(data)) { Z[j, i] <- data[j, i]/ col_max[i] } }else{ print("Wrong value in impact") } } } MD <- rowMeans(Z) return(MD) }
/dissonance.R
no_license
blenartowicz/taxonomy
R
false
false
892
r
#' Dissonance Method function #' #' Function allows to do linear ordering by MD parameter. MD is higher is better. #' #' @param data data to order. Must be numeric. #' @param impact string of "+" and "-". If column is a booster then impact must be "+". For inhibitor "-". #' #' @export Dissonance <- function(data, impact){ Z <- matrix(0, nrow = nrow(data), ncol = ncol(data)) col_min <- apply(data, 2, FUN = min) col_max <- apply(data, 2, FUN = max) for (i in 1:ncol(data)) { if(impact[i] == "-"){ for (j in 1:nrow(data)) { Z[j, i] <- col_min[i]/data[j, i] } }else{ if (impact[i] == "+") { for (j in 1:nrow(data)) { Z[j, i] <- data[j, i]/ col_max[i] } }else{ print("Wrong value in impact") } } } MD <- rowMeans(Z) return(MD) }
\name{brownie.lite} \alias{brownie.lite} \title{Likelihood test for rate variation in a continuous trait} \usage{ brownie.lite(tree, x, maxit=2000, test="chisq", nsim=100, se=NULL, ...) } \arguments{ \item{tree}{a phylogenetic tree either as an object of class \code{"phylo"} or \code{"simmap"}. (See \code{\link{read.simmap}}, \code{\link{make.simmap}}, or \code{\link{paintSubTree}} for more details about the latter object class.)} \item{x}{a vector of tip values for species. \code{names(x)} should be the species names.} \item{maxit}{an optional integer value indicating the maximum number of iterations for optimization - may need to be increased for large trees.} \item{test}{an optional string indicating the method for hypothesis testing - options are \code{"chisq"} or \code{"simulation"}.} \item{nsim}{number of simulations (only used if \code{test="simulation"}).} \item{se}{a vector containing the standard errors for each estimated mean in \code{x}.} \item{...}{optional arguments.} } \description{ This function takes an object of class \code{"phylo"} or class \code{"simmap"} with a mapped binary or multistate trait (see \code{\link{read.simmap}}) and data for a single continuously valued character. It then fits the Brownian rate variation ("noncensored") model of O'Meara et al. (2006; \emph{Evolution}). This is also the basic model implemented in Brian O'Meara's \emph{Brownie} software. } \details{ Sampling error in the estimation of species means can also be accounted for by assigning the vector \code{se} with the species specific sampling errors for \code{x}. } \value{ An object of class \code{"brownie.lite"} containing the following components: \item{sig2.single}{is the rate, \eqn{\sigma^2}, for a single-rate model. This is usually the "null" model.} \item{a.single}{is the estimated state at the root node for the single rate model.} \item{var.single}{variance on the single rate estimator - obtained from the Hessian.} \item{logL1}{log-likelihood of the single-rate model.} \item{k1}{number of parameters in the single rate model (always 2).} \item{sig2.multiple}{is a length \emph{p} (for \emph{p} rates) vector of BM rates (\eqn{\sigma_1^2}, \eqn{\sigma_2^2}, and so on) from the multi-rate model.} \item{a.multiple}{is the estimated state at the root node for the multi-rate model.} \item{var.multiple}{\emph{p} x \emph{p} variance-covariance matrix for the \emph{p} rates - the square-roots of the diagonals should give the standard error for each rate.} \item{logL.multiple}{log-likelihood of the multi-rate model.} \item{k2}{number of parameters in the multi-rate model (\emph{p}+1).} \item{P.chisq}{P-value for a likelihood ratio test against the \eqn{\chi^2} distribution; or} \item{P.sim}{P-value for a likelihood ratio test agains a simulated null distribution.} \item{convergence}{logical value indicating if the likelihood optimization converged.} } \references{ O'Meara, B. C., C. Ane, M. J. Sanderson, and P. C. Wainwright. (2006) Testing for different rates of continuous trait evolution using likelihood. \emph{Evolution}, \bold{60}, 922-933. Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223. } \author{Liam Revell \email{liam.revell@umb.edu}} \seealso{ \code{\link{brownieREML}}, \code{\link{evol.vcv}}, \code{\link{ratebytree}} } \keyword{phylogenetics} \keyword{comparative method} \keyword{maximum likelihood}
/man/brownie.lite.Rd
no_license
schnappi-wkl/phytools
R
false
false
3,492
rd
\name{brownie.lite} \alias{brownie.lite} \title{Likelihood test for rate variation in a continuous trait} \usage{ brownie.lite(tree, x, maxit=2000, test="chisq", nsim=100, se=NULL, ...) } \arguments{ \item{tree}{a phylogenetic tree either as an object of class \code{"phylo"} or \code{"simmap"}. (See \code{\link{read.simmap}}, \code{\link{make.simmap}}, or \code{\link{paintSubTree}} for more details about the latter object class.)} \item{x}{a vector of tip values for species. \code{names(x)} should be the species names.} \item{maxit}{an optional integer value indicating the maximum number of iterations for optimization - may need to be increased for large trees.} \item{test}{an optional string indicating the method for hypothesis testing - options are \code{"chisq"} or \code{"simulation"}.} \item{nsim}{number of simulations (only used if \code{test="simulation"}).} \item{se}{a vector containing the standard errors for each estimated mean in \code{x}.} \item{...}{optional arguments.} } \description{ This function takes an object of class \code{"phylo"} or class \code{"simmap"} with a mapped binary or multistate trait (see \code{\link{read.simmap}}) and data for a single continuously valued character. It then fits the Brownian rate variation ("noncensored") model of O'Meara et al. (2006; \emph{Evolution}). This is also the basic model implemented in Brian O'Meara's \emph{Brownie} software. } \details{ Sampling error in the estimation of species means can also be accounted for by assigning the vector \code{se} with the species specific sampling errors for \code{x}. } \value{ An object of class \code{"brownie.lite"} containing the following components: \item{sig2.single}{is the rate, \eqn{\sigma^2}, for a single-rate model. This is usually the "null" model.} \item{a.single}{is the estimated state at the root node for the single rate model.} \item{var.single}{variance on the single rate estimator - obtained from the Hessian.} \item{logL1}{log-likelihood of the single-rate model.} \item{k1}{number of parameters in the single rate model (always 2).} \item{sig2.multiple}{is a length \emph{p} (for \emph{p} rates) vector of BM rates (\eqn{\sigma_1^2}, \eqn{\sigma_2^2}, and so on) from the multi-rate model.} \item{a.multiple}{is the estimated state at the root node for the multi-rate model.} \item{var.multiple}{\emph{p} x \emph{p} variance-covariance matrix for the \emph{p} rates - the square-roots of the diagonals should give the standard error for each rate.} \item{logL.multiple}{log-likelihood of the multi-rate model.} \item{k2}{number of parameters in the multi-rate model (\emph{p}+1).} \item{P.chisq}{P-value for a likelihood ratio test against the \eqn{\chi^2} distribution; or} \item{P.sim}{P-value for a likelihood ratio test agains a simulated null distribution.} \item{convergence}{logical value indicating if the likelihood optimization converged.} } \references{ O'Meara, B. C., C. Ane, M. J. Sanderson, and P. C. Wainwright. (2006) Testing for different rates of continuous trait evolution using likelihood. \emph{Evolution}, \bold{60}, 922-933. Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223. } \author{Liam Revell \email{liam.revell@umb.edu}} \seealso{ \code{\link{brownieREML}}, \code{\link{evol.vcv}}, \code{\link{ratebytree}} } \keyword{phylogenetics} \keyword{comparative method} \keyword{maximum likelihood}
query_mvdata<-function(a_lot,a_var){ data_con <- adminKraken::con_mysql() str<-'Select xvar,sn from mvdata where Lot = "xlot";' gsub1<-gsub("xvar",a_var,str) data_query_string<-gsub("xlot",a_lot,gsub1) data_query<-RMySQL::dbSendQuery(data_con,data_query_string) fetched_data<-RMySQL::dbFetch(data_query,n=-1) clean<-RMySQL::dbClearResult(data_query) drop_con<-RMySQL::dbDisconnect(data_con) fetched_data }
/R/query_mvdata.R
no_license
JARS3N/LLP
R
false
false
408
r
query_mvdata<-function(a_lot,a_var){ data_con <- adminKraken::con_mysql() str<-'Select xvar,sn from mvdata where Lot = "xlot";' gsub1<-gsub("xvar",a_var,str) data_query_string<-gsub("xlot",a_lot,gsub1) data_query<-RMySQL::dbSendQuery(data_con,data_query_string) fetched_data<-RMySQL::dbFetch(data_query,n=-1) clean<-RMySQL::dbClearResult(data_query) drop_con<-RMySQL::dbDisconnect(data_con) fetched_data }
library(tidyverse) library(janitor) library(purrr) library(cowplot) growth_raw <- read_csv("Tetraselmis_experiment/data-processed/growth_data_20140606.csv") all_thermal_data <- read_csv("Tetraselmis_experiment/data-processed/all_thermal_data.csv") %>% clean_names curve_data <- read_csv("Tetraselmis_experiment/data-processed/curve_data_20140606.csv") %>% clean_names() growth <- growth_raw %>% clean_names() # all_growth <- left_join(growth, all_thermal_data, by = c("curve_code" = "isolate_code")) all1 <- left_join(curve_data, all_thermal_data) %>% filter(!is.na(topt)) all2 <- left_join(growth, all1) growth %>% ggplot(aes(x = temperature, y = growth_rate_mu, group = curve_code, color = curve_code)) + geom_line() pos_skew <- all2 %>% filter(rel_curveskew > 0) pos_curves <- unique(pos_skew$curve_code) all2 %>% filter(!is.na(topt)) %>% # filter(curve_code %in% pos_curves) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_n > 4) %>% ggplot(aes(x = temperature, y = growth_rate_mu, group = curve_code, color = curve_code)) + geom_point() + geom_line() + facet_wrap( ~ isolate_code, scales = "free") +theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(color="black")) + theme(text = element_text(size=10, family = "Helvetica")) + theme(strip.background = element_rect(colour="white", fill="white")) ggsave("Tetraselmis_experiment/figures/all_curves.png", width = 12, height = 12) curve <- all2 %>% # filter(grepl("Detonula", speciesname)) %>% filter(isolate_code == 49) tpc1<-function(x){ res<-curve$a[1]*exp(curve$b[1]*x)*(1-((x-curve$z[1])/(curve$w[1]/2))^2) res } library(plotrix) curve %>% group_by(temperature) %>% summarise_each(funs(mean, std.error), growth_rate_mu) %>% ggplot(aes(x = temperature, y = growth_rate_mu_mean)) + geom_point(size = 2) + geom_errorbar(aes(ymin = growth_rate_mu_mean - growth_rate_mu_std.error, ymax = growth_rate_mu_mean + growth_rate_mu_std.error))+ stat_function(fun = tpc1) + theme_bw() + xlim(0, 50) #### figure out how many actual temperatures each data set has #### temp_numbers <- all2 %>% group_by(isolate_code) %>% summarise(number_of_temps = length(unique(temperature))) library(modelr) ??modelr all2 %>% filter(isolate_code == 52) %>% View tpc1<-function(x){ res<-curve$a[1]*exp(curve$b[1]*x)*(1-((x-curve$z[1])/(curve$w[1]/2))^2) res } predict_function <- function(data){ data_sub <- data x <- seq(min(data_sub$temperature), max(data_sub$temperature), by = 0.5) y <- data$a[1]*exp(data$b[1]*x)*(1-((x-data$z[1])/(data$w[1]/2))^2) df <- data.frame(x, y) df } sub <- all2 %>% filter(isolate_code == 52) all_split <- all2 %>% split(.$isolate_code) predictions_df <- all_split %>% map_df(predict_function, .id = "isolate_code") predictions_df <- predictions_df %>% mutate(isolate_code = as.integer(isolate_code)) write_csv(predictions_df, "Tetraselmis_experiment/data-processed/TPC_predictions.csv") all3 <- left_join(predictions_df, all2, by = "isolate_code") write_csv(all3, "Tetraselmis_experiment/data-processed/global_TPCs.csv") all3 <- read_csv("Tetraselmis_experiment/data-processed/global_TPCs.csv") all3 %>% filter(!is.na(topt)) %>% filter(mu_n > 4) %>% filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.85) %>% filter(mu_n > 4) %>% distinct(isolate_code, .keep_all = TRUE) %>% View ggplot(aes(x = temperature, y = growth_rate_mu, color = skew_dir)) + geom_point() + geom_line(aes(x = x, y = y)) + facet_wrap( ~ isolate_code, scales = "free_y") +theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(color="black")) + theme(text = element_text(size=10, family = "Helvetica")) + theme(strip.background = element_rect(colour="white", fill="white")) ggsave("Tetraselmis_experiment/figures/all_curves_fits.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew_rsq95.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew_rsq85.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew_rsq90.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew_rsq90_samex_axis.png", width = 15, height = 12) pos_skews <- all3 %>% filter(!is.na(topt)) %>% filter(mu_n > 4) %>% filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.85) %>% filter(mu_n > 4) %>% distinct(isolate_code, .keep_all = TRUE) %>% select(isolate_code, source, latitude, longitude, habitat, speciesname, class, group) write_csv(pos_skews, "Tetraselmis_experiment/data-processed/pos_skew.csv") all3 %>% filter(!is.na(topt)) %>% filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% # filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.90) %>% filter(mu_n > 4) %>% distinct(isolate_code, .keep_all = TRUE) %>% View ggplot(aes(x = temperature, y = growth_rate_mu)) + # geom_point() + geom_line(aes(x = x, y = y)) + facet_wrap( ~ isolate_code, scales = "free_y") +theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(color="black")) + theme(text = element_text(size=10, family = "Helvetica")) + theme(strip.background = element_rect(colour="white", fill="white")) ggsave("Tetraselmis_experiment/figures/all_curves_fits_nodata_quality.png", width = 13, height = 13) ggsave("Tetraselmis_experiment/figures/all_curves_fits_nodata_quality_nopoints.png", width = 13, height = 13) all4 <- all3 %>% filter(!is.na(topt)) %>% # filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.90) %>% filter(mu_n > 4) greater_90 <- unique(all4$isolate_code) all5 <- all3 %>% filter(!is.na(topt)) %>% # filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.95) %>% filter(mu_n > 4) greater_95 <- unique(all5$isolate_code)
/Tetraselmis_experiment/R/12_curve_exploration.R
permissive
JoeyBernhardt/thermal-variability
R
false
false
6,953
r
library(tidyverse) library(janitor) library(purrr) library(cowplot) growth_raw <- read_csv("Tetraselmis_experiment/data-processed/growth_data_20140606.csv") all_thermal_data <- read_csv("Tetraselmis_experiment/data-processed/all_thermal_data.csv") %>% clean_names curve_data <- read_csv("Tetraselmis_experiment/data-processed/curve_data_20140606.csv") %>% clean_names() growth <- growth_raw %>% clean_names() # all_growth <- left_join(growth, all_thermal_data, by = c("curve_code" = "isolate_code")) all1 <- left_join(curve_data, all_thermal_data) %>% filter(!is.na(topt)) all2 <- left_join(growth, all1) growth %>% ggplot(aes(x = temperature, y = growth_rate_mu, group = curve_code, color = curve_code)) + geom_line() pos_skew <- all2 %>% filter(rel_curveskew > 0) pos_curves <- unique(pos_skew$curve_code) all2 %>% filter(!is.na(topt)) %>% # filter(curve_code %in% pos_curves) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_n > 4) %>% ggplot(aes(x = temperature, y = growth_rate_mu, group = curve_code, color = curve_code)) + geom_point() + geom_line() + facet_wrap( ~ isolate_code, scales = "free") +theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(color="black")) + theme(text = element_text(size=10, family = "Helvetica")) + theme(strip.background = element_rect(colour="white", fill="white")) ggsave("Tetraselmis_experiment/figures/all_curves.png", width = 12, height = 12) curve <- all2 %>% # filter(grepl("Detonula", speciesname)) %>% filter(isolate_code == 49) tpc1<-function(x){ res<-curve$a[1]*exp(curve$b[1]*x)*(1-((x-curve$z[1])/(curve$w[1]/2))^2) res } library(plotrix) curve %>% group_by(temperature) %>% summarise_each(funs(mean, std.error), growth_rate_mu) %>% ggplot(aes(x = temperature, y = growth_rate_mu_mean)) + geom_point(size = 2) + geom_errorbar(aes(ymin = growth_rate_mu_mean - growth_rate_mu_std.error, ymax = growth_rate_mu_mean + growth_rate_mu_std.error))+ stat_function(fun = tpc1) + theme_bw() + xlim(0, 50) #### figure out how many actual temperatures each data set has #### temp_numbers <- all2 %>% group_by(isolate_code) %>% summarise(number_of_temps = length(unique(temperature))) library(modelr) ??modelr all2 %>% filter(isolate_code == 52) %>% View tpc1<-function(x){ res<-curve$a[1]*exp(curve$b[1]*x)*(1-((x-curve$z[1])/(curve$w[1]/2))^2) res } predict_function <- function(data){ data_sub <- data x <- seq(min(data_sub$temperature), max(data_sub$temperature), by = 0.5) y <- data$a[1]*exp(data$b[1]*x)*(1-((x-data$z[1])/(data$w[1]/2))^2) df <- data.frame(x, y) df } sub <- all2 %>% filter(isolate_code == 52) all_split <- all2 %>% split(.$isolate_code) predictions_df <- all_split %>% map_df(predict_function, .id = "isolate_code") predictions_df <- predictions_df %>% mutate(isolate_code = as.integer(isolate_code)) write_csv(predictions_df, "Tetraselmis_experiment/data-processed/TPC_predictions.csv") all3 <- left_join(predictions_df, all2, by = "isolate_code") write_csv(all3, "Tetraselmis_experiment/data-processed/global_TPCs.csv") all3 <- read_csv("Tetraselmis_experiment/data-processed/global_TPCs.csv") all3 %>% filter(!is.na(topt)) %>% filter(mu_n > 4) %>% filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.85) %>% filter(mu_n > 4) %>% distinct(isolate_code, .keep_all = TRUE) %>% View ggplot(aes(x = temperature, y = growth_rate_mu, color = skew_dir)) + geom_point() + geom_line(aes(x = x, y = y)) + facet_wrap( ~ isolate_code, scales = "free_y") +theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(color="black")) + theme(text = element_text(size=10, family = "Helvetica")) + theme(strip.background = element_rect(colour="white", fill="white")) ggsave("Tetraselmis_experiment/figures/all_curves_fits.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew_rsq95.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew_rsq85.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew_rsq90.png", width = 12, height = 12) ggsave("Tetraselmis_experiment/figures/all_curves_fits_skew_rsq90_samex_axis.png", width = 15, height = 12) pos_skews <- all3 %>% filter(!is.na(topt)) %>% filter(mu_n > 4) %>% filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.85) %>% filter(mu_n > 4) %>% distinct(isolate_code, .keep_all = TRUE) %>% select(isolate_code, source, latitude, longitude, habitat, speciesname, class, group) write_csv(pos_skews, "Tetraselmis_experiment/data-processed/pos_skew.csv") all3 %>% filter(!is.na(topt)) %>% filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% # filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.90) %>% filter(mu_n > 4) %>% distinct(isolate_code, .keep_all = TRUE) %>% View ggplot(aes(x = temperature, y = growth_rate_mu)) + # geom_point() + geom_line(aes(x = x, y = y)) + facet_wrap( ~ isolate_code, scales = "free_y") +theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(color="black")) + theme(text = element_text(size=10, family = "Helvetica")) + theme(strip.background = element_rect(colour="white", fill="white")) ggsave("Tetraselmis_experiment/figures/all_curves_fits_nodata_quality.png", width = 13, height = 13) ggsave("Tetraselmis_experiment/figures/all_curves_fits_nodata_quality_nopoints.png", width = 13, height = 13) all4 <- all3 %>% filter(!is.na(topt)) %>% # filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.90) %>% filter(mu_n > 4) greater_90 <- unique(all4$isolate_code) all5 <- all3 %>% filter(!is.na(topt)) %>% # filter(curve_code %in% pos_curves) %>% mutate(skew_dir = ifelse(rel_curveskew<0, "negative skew", "positive skew")) %>% filter(curvequal == "good", maxqual == "good", minqual == "good") %>% filter(mu_rsqrlist > 0.95) %>% filter(mu_n > 4) greater_95 <- unique(all5$isolate_code)
if(RUN_TESTS) { set.seed(20200213) test <- data.frame( x = rpois(1e6, 400), s = sample(paste0("species", 1:100), 1e6, replace = TRUE) ) test <- test[order(test$s, -test$x), ] test_that("Rcpp implementation is right", { expect_equal(crown_helper(test$x, test$s), crown_helper_cpp(test$x, test$s)) expect_equal(crown_helper_faster(test$x, test$s), crown_helper_cpp(test$x, test$s)) }) # microbenchmark::microbenchmark( # crown_helper(test$x, test$s), # crown_helper_faster(test$x, test$s), # crown_helper_cpp(test$x, test$s) # ) }
/tests/testthat/test-R-vs-cpp.R
no_license
eheinzen/championtrees
R
false
false
577
r
if(RUN_TESTS) { set.seed(20200213) test <- data.frame( x = rpois(1e6, 400), s = sample(paste0("species", 1:100), 1e6, replace = TRUE) ) test <- test[order(test$s, -test$x), ] test_that("Rcpp implementation is right", { expect_equal(crown_helper(test$x, test$s), crown_helper_cpp(test$x, test$s)) expect_equal(crown_helper_faster(test$x, test$s), crown_helper_cpp(test$x, test$s)) }) # microbenchmark::microbenchmark( # crown_helper(test$x, test$s), # crown_helper_faster(test$x, test$s), # crown_helper_cpp(test$x, test$s) # ) }
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.20685850817761e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613099315-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
343
r
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.20685850817761e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
plot_idle_time <- function(x, ...) { mapping <- attr(x, "mapping") level <- attr(x, "level") units <- attr(x, "units") y_lab <- glue("Idle time (in {units})") if(level == "log") { attr(x, "raw") %>% ggplot(aes("", idle_time)) + geom_boxplot() + scale_y_continuous() + theme_light() + coord_flip() + labs(x = "", y = y_lab) -> p } else if(level == "case") { x %>% ggplot(aes_string(glue("reorder({mapping$case_id}, idle_time)"), "idle_time")) + geom_col(aes(fill = as.numeric(idle_time))) + scale_fill_continuous_tableau(name = y_lab, palette = "Blue") + theme_light() + scale_y_continuous() + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) + labs(x = "Cases", y = y_lab) -> p } else if(level == "trace") { stop("No plot availabe at this level") } else if(level == "resource") { x %>% ggplot(aes_string(glue("reorder({mapping$resource_id}, idle_time)"), "idle_time")) + geom_col(aes(fill = as.numeric(idle_time))) + scale_fill_continuous_tableau(name = y_lab, palette = "Blue") + scale_y_continuous() + coord_flip() + theme_light() + labs(x = "Resources",y = y_lab) -> p } if(!is.null(mapping$groups)) { p <- p + facet_grid(as.formula(paste(c(paste(mapping$groups, collapse = "+"), "~." ), collapse = "")), scales = "free_y") } return(p) }
/R/plot.idle_time.R
no_license
cran/edeaR
R
false
false
1,404
r
plot_idle_time <- function(x, ...) { mapping <- attr(x, "mapping") level <- attr(x, "level") units <- attr(x, "units") y_lab <- glue("Idle time (in {units})") if(level == "log") { attr(x, "raw") %>% ggplot(aes("", idle_time)) + geom_boxplot() + scale_y_continuous() + theme_light() + coord_flip() + labs(x = "", y = y_lab) -> p } else if(level == "case") { x %>% ggplot(aes_string(glue("reorder({mapping$case_id}, idle_time)"), "idle_time")) + geom_col(aes(fill = as.numeric(idle_time))) + scale_fill_continuous_tableau(name = y_lab, palette = "Blue") + theme_light() + scale_y_continuous() + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) + labs(x = "Cases", y = y_lab) -> p } else if(level == "trace") { stop("No plot availabe at this level") } else if(level == "resource") { x %>% ggplot(aes_string(glue("reorder({mapping$resource_id}, idle_time)"), "idle_time")) + geom_col(aes(fill = as.numeric(idle_time))) + scale_fill_continuous_tableau(name = y_lab, palette = "Blue") + scale_y_continuous() + coord_flip() + theme_light() + labs(x = "Resources",y = y_lab) -> p } if(!is.null(mapping$groups)) { p <- p + facet_grid(as.formula(paste(c(paste(mapping$groups, collapse = "+"), "~." ), collapse = "")), scales = "free_y") } return(p) }
### Clipping GCM Historical information ### C. Navarro - H. Achicanoy ### CIAT, 2016 ### =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= ### ### Processing GCMs information ### =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= ### ### 1.1 GCM extract daily information process for historical data (Done) gcmDailyProcess <- function(){ require(raster) require(ncdf) require(ncdf4) require(rgdal) # Change according to period of analysis gcmDir <- "/mnt/data_cluster_2/gcm/cmip5/raw/daily/historical" # Create folder to save raw information dirout <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_raw_res" # Create folder to save resampled information diroutcut <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_0_05deg_lat" # List of GCMs to extract climatic information gcmList <- c("bcc_csm1_1","bcc_csm1_1_m","csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") # List of climatic variables to be extracted varlist <- c("tasmax", "tasmin", "pr", "rsds") # List of months mthList <- c(paste(0, c(1:9), sep=""), paste(c(10:12))) # List of metrics to be calculated (average and standar deviation) metList <- c("avg", "std") # Get a list of month with and withour 0 in one digit numbers mthListMod <- c(1:12) # Numbers of days per month ndays <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) mthMat <- as.data.frame(cbind(mthList, mthListMod, ndays)) names(mthMat) <- c("Mth", "MthMod", "Ndays") # Extent of Kenya raster bbox <- extent(33.9, 41.9, -4.700001, 5.449999) ## Loop through list of GCMs for(gcm in gcmList) { # Create folder for period to analyse - raw information diroutgcmhis <- paste(dirout, "/", basename(gcm), "/1971_2000", sep="") # Create folder for period to analyse - resampled information diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/1971_2000", sep="") cat(" Cutting:", "Historical ", basename(gcm), "\n") if (!dir.exists(diroutgcmhis)) {dir.create(diroutgcmhis, recursive=T)} if (!dir.exists(paste(diroutgcmhis, "/by-month", sep=""))) {dir.create(paste(diroutgcmhis, "/by-month", sep=""), recursive=T)} if (!dir.exists(diroutgcmhiscut)) {dir.create(diroutgcmhiscut, recursive=T)} if (!dir.exists(paste(diroutgcmhiscut, "/by-month", sep=""))) {dir.create(paste(diroutgcmhiscut, "/by-month", sep=""), recursive=T)} ## Loop through list of variables for(var in varlist) { if(!file.exists(paste(diroutgcmhis, "/", var, "_1971_2000_day_lat.nc", sep=""))) { ncList <- list.files(path=paste(gcmDir, "/", basename(gcm), "/r1i1p1", sep=""), pattern=paste(var, "_day*", sep=""), full.names=TRUE) if(!file.exists(paste(diroutgcmhis, "/", var, "_1971_2000_day.nc", sep=""))) { system(paste("cdo seldate,1971-01-01,2000-12-31 ", ncList[1], " ", diroutgcmhis, "/", var, "_1971_2000_day.nc", sep="")) } system(paste("cdo sellonlatbox,",bbox@xmin+360-10,",",bbox@xmax+360+10,",",bbox@ymin-10,",",bbox@ymax+10," ", diroutgcmhis, "/", var, "_1971_2000_day.nc ", diroutgcmhis, "/", var, "_1971_2000_day_lat.nc",sep="")) } if(!file.exists(paste(diroutgcmhis, "/by-month/", var, "_2000_12.nc", sep=""))) { system(paste("cdo splityear ", diroutgcmhis, "/", var, "_1971_2000_day_lat.nc ", diroutgcmhis, "/by-month/", var, "_", sep="")) for(yr in 1971:2000) { system(paste("cdo splitmon ", diroutgcmhis, "/by-month/", var, "_", yr, ".nc ", diroutgcmhis, "/by-month/", var, "_", yr, "_", sep="")) file.remove(paste(diroutgcmhis, "/by-month/", var, "_", yr, ".nc", sep="")) } } } } } gcmDailyProcess() ### 2.1 GCM extract daily information process for future data (Done) gcmDailyFutureProcess <- function(rcp='rcp26', period='2021_2045'){ require(raster) require(ncdf) require(rgdal) # Change according to period of analysis gcmDir <- paste("/mnt/data_cluster_2/gcm/cmip5/raw/daily/", rcp, sep="") # Create folder to save raw information dirout <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_raw_res" # Create folder to save resampled information diroutcut <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_0_05deg_lat" if(period=='2021_2045') { inYr <- 2021; endYr <- 2045 } else { if(period=='2041_2065') { inYr <- 2041; endYr <- 2065 } } # List of GCMs to extract climatic information gcmList <- c("bcc_csm1_1","bcc_csm1_1_m","csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") # List of climatic variables to be extracted varlist <- c("tasmax", "tasmin", "pr", "rsds") # List of months mthList <- c(paste(0,c(1:9),sep=""),paste(c(10:12))) # List of metrics to be calculated (average and standar deviation) metList <- c("avg", "std") # Get a list of month with and withour 0 in one digit numbers mthListMod <- c(1:12) # Numbers of days per month ndays <- c(31,28,31,30,31,30,31,31,30,31,30,31) mthMat <- as.data.frame(cbind(mthList, mthListMod, ndays)) names(mthMat) <- c("Mth", "MthMod", "Ndays") # Extent of Kenya raster bbox <- extent(33.9, 41.9, -4.700001, 5.449999) ## Loop through list of GCMs for(gcm in gcmList) { # Create folders according to period if(period=='2021_2045'){ # Create folder for period to analyse - raw information diroutgcmhis <- paste(dirout, "/", basename(gcm), "/2021_2045/", basename(rcp), sep="") # Create folder for period to analyse - resampled information diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/2021_2045/", basename(rcp), sep="") } else { if(period=='2041_2065'){ # Create folder for period to analyse - raw information diroutgcmhis <- paste(dirout, "/", basename(gcm), "/2041_2065/", basename(rcp), sep="") # Create folder for period to analyse - resampled information diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/2041_2065/", basename(rcp), sep="") } } cat(" Cutting:", "Future ", basename(gcm), "\n") if (!file.exists(diroutgcmhis)) {dir.create(diroutgcmhis, recursive=T)} if (!file.exists(paste(diroutgcmhis, "/by-month", sep=""))) {dir.create(paste(diroutgcmhis, "/by-month", sep=""), recursive=T)} if (!file.exists(diroutgcmhiscut)) {dir.create(diroutgcmhiscut, recursive=T)} if (!file.exists(paste(diroutgcmhiscut, "/by-month", sep=""))) {dir.create(paste(diroutgcmhiscut, "/by-month", sep=""), recursive=T)} ## Loop through list of variables for(var in varlist) { if(period=='2021_2045') { if(!file.exists(paste(diroutgcmhis, "/", var, "_2021_2045_day_lat.nc", sep=""))) { ncList <- list.files(path=paste(gcmDir, "/", basename(gcm), "/r1i1p1", sep=""), pattern=paste(var, "_day*", sep=""), full.names=TRUE) if(!file.exists(paste(diroutgcmhis, "/", var, "_2021_2045_day.nc", sep=""))) { system(paste("cdo seldate,2021-01-01,2045-12-31 ", ncList[1], " ", diroutgcmhis, "/", var, "_2021_2045_day.nc", sep="")) } system(paste("cdo sellonlatbox,",bbox@xmin+360-10,",",bbox@xmax+360+10,",",bbox@ymin-10,",",bbox@ymax+10," ", diroutgcmhis, "/", var, "_2021_2045_day.nc ", diroutgcmhis, "/", var, "_2021_2045_day_lat.nc",sep="")) } if(!file.exists(paste(diroutgcmhis, "/by-month/", var, "_2045_12.nc", sep=""))) { system(paste("cdo splityear ", diroutgcmhis, "/", var, "_2021_2045_day_lat.nc ", diroutgcmhis, "/by-month/", var, "_", sep="")) for(yr in 2021:2045) { system(paste("cdo splitmon ", diroutgcmhis, "/by-month/", var, "_", yr, ".nc ", diroutgcmhis, "/by-month/", var, "_", yr, "_", sep="")) file.remove(paste(diroutgcmhis, "/by-month/", var, "_", yr, ".nc", sep="")) } } } else { if(period=='2041_2065') { if(!file.exists(paste(diroutgcmhis, "/", var, "_2041_2065_day_lat.nc", sep=""))) { ncList <- list.files(path=paste(gcmDir, "/", basename(gcm), "/r1i1p1", sep=""), pattern=paste(var, "_day*", sep=""), full.names=TRUE) if(!file.exists(paste(diroutgcmhis, "/", var, "_2041_2065_day.nc", sep=""))) { system(paste("cdo seldate,2041-01-01,2065-12-31 ", ncList[1], " ", diroutgcmhis, "/", var, "_2041_2065_day.nc", sep="")) } system(paste("cdo sellonlatbox,",bbox@xmin+360-10,",",bbox@xmax+360+10,",",bbox@ymin-10,",",bbox@ymax+10," ", diroutgcmhis, "/", var, "_2041_2065_day.nc ", diroutgcmhis, "/", var, "_2041_2065_day_lat.nc",sep="")) } if(!file.exists(paste(diroutgcmhis, "/by-month/", var, "_2065_12.nc", sep=""))) { system(paste("cdo splityear ", diroutgcmhis, "/", var, "_2041_2065_day_lat.nc ", diroutgcmhis, "/by-month/", var, "_", sep="")) for(yr in 2041:2065) { system(paste("cdo splitmon ", diroutgcmhis, "/by-month/", var, "_", yr, ".nc ", diroutgcmhis, "/by-month/", var, "_", yr, "_", sep="")) file.remove(paste(diroutgcmhis, "/by-month/", var, "_", yr, ".nc", sep="")) } } } } } } } periodList <- c("2021_2045", "2041_2065") rcpList <- paste("rcp", c(26, 45, 60, 85), sep="") lapply(1:length(periodList), function(i){ cat('Processing period:', periodList[[i]],'\n') library(parallel) mclapply(1:length(rcpList), function(j){ cat('Processing RCP:', rcpList[[j]],'\n') gcmDailyFutureProcess(rcp=rcpList[[j]], period=periodList[[i]]) return(cat('Process done for RCP:', rcpList[[j]],'\n')) }, mc.cores=4) return(cat('Process done for period:', periodList[[i]],'\n')) }) ### 1.2 GCM resample process for historical data (Running) gcmDailyResample <- function(){ require(raster) require(ncdf4) require(rgdal) dirout <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_raw_res" diroutcut <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_0_05deg_lat" inYr <- 1971 endYr <- 2000 # gcmList <- c("bcc_csm1_1","bcc_csm1_1_m","csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") gcmList <- c("csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") varlist <- c("tasmax", "tasmin", "pr", "rsds") mthList <- c(paste(0,c(1:9),sep=""),paste(c(10:12))) metList <- c("avg", "std") # Get a list of month with and withour 0 in one digit numbers mthListMod <- c(1:12) ndays <- c(31,28,31,30,31,30,31,31,30,31,30,31) mthMat <- as.data.frame(cbind(mthList, mthListMod, ndays)) names(mthMat) <- c("Mth", "MthMod", "Ndays") bbox <- extent(33.9, 41.9, -4.700001, 5.449999) rows=203 cols=160 ## Reggrid GCM Historical for(gcm in gcmList){ for(var in varlist){ if(var == "tasmax"){varmod <- "tmax"} if(var == "tasmin"){varmod <- "tmin"} if(var == "pr"){varmod <- "prec"} if(var == "rsds"){varmod <- "rsds"} diroutgcmhis <- paste(dirout, "/", basename(gcm), "/", inYr, "_", endYr, sep="") diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/", inYr, "_", endYr, sep="") for(mth in mthList){ mthMod <- as.numeric(paste((mthMat$MthMod[which(mthMat$Mth == mth)]))) ndayMth <- as.numeric(paste((mthMat$Ndays[which(mthMat$Mth == mth)]))) if(!file.exists(paste(diroutgcmhiscut, "/", varmod, "_", inYr, "_", endYr, "_", mth, "_std.nc", sep=""))){ for(yr in inYr:endYr){ for(i in 1:31){ assign(paste("d", i, sep=""), raster()) } cat(" Resample daily: historical", varmod, "_", yr, " ", mth, "\n") if(!file.exists(paste(diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", sep=""))){ f <- paste(diroutgcmhis, "/by-month/", var, "_", yr, "_", mth, ".nc", sep="") rx <- raster(f) for(i in 1:rx@file@nbands){ assign(paste("d", i, sep=""), raster(f, band=i)) } dList <- c(d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31) dayNcStack <- stack(dList[1:ndayMth]) dayNcStackRes <- resample(dayNcStack, raster(nrows=rows, ncols=cols, xmn=bbox@xmin, xmx=bbox@xmax, ymn=bbox@ymin, ymx=bbox@ymax, resolution=0.05), method='bilinear') # dayNcStackRes <- resample(dayNcStack, raster(nrows=rows, ncols=cols, xmn=bbox@xmin+360, xmx=bbox@xmax+360, ymn=bbox@ymin, ymx=bbox@ymax, resolution=0.05), method='bilinear') # xmin(dayNcStackRes) <- xmin(dayNcStackRes)-360 # xmax(dayNcStackRes) <- xmax(dayNcStackRes)-360 if(varmod == "tmax"){dayNcStackRes <- dayNcStackRes - 273.15} if(varmod == "tmin"){dayNcStackRes <- dayNcStackRes - 273.15} if(varmod == "prec"){dayNcStackRes <- dayNcStackRes * 86400} dayNcStackRes <- writeRaster(dayNcStackRes, paste(diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", sep=""), format="CDF", overwrite=T) } cat(" Calculating avg and std daily: historical", basename(gcm), " ", varmod, "_", yr, " ", mth, "\n") system(paste("cdo dayavg ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_avg.nc", sep="")) system(paste("cdo daystd ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_std.nc", sep="")) # system(paste("D:/jetarapues/cdo/cdo.exe -s dayavg ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_avg.nc", sep="")) # system(paste("D:/jetarapues/cdo/cdo.exe -s daystd ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_std.nc", sep="")) } avgNcList <- paste(diroutgcmhiscut, "/by-month/", varmod, "_", inYr:endYr, "_", mth, "_avg.nc", sep="") avgNcStack <- mean(stack(avgNcList)) avgNcStack <- writeRaster(avgNcStack, paste(diroutgcmhiscut, "/", varmod, "_1971_2000_", mth, "_avg.nc", sep=""), format="CDF", overwrite=T) stdNcList <- paste(diroutgcmhiscut, "/by-month/", varmod, "_", inYr:endYr, "_", mth, "_std.nc", sep="") stdNcStack <- mean(stack(stdNcList)) stdNcStack <- writeRaster(stdNcStack, paste(diroutgcmhiscut, "/", varmod, "_1971_2000_", mth, "_std.nc", sep=""), format="CDF", overwrite=T) for (nc in avgNcList){ file.remove(paste(nc)) } for (nc in stdNcList){ file.remove(paste(nc)) } } } } } } gcmDailyResample() ### 2.2 GCM resample process for future data (Ready to run) gcmDailyFutureResample <- function(rcp='rcp26', period='2021_2045'){ require(raster) require(ncdf) require(rgdal) dirout <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_raw_res" diroutcut <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_0_05deg_lat" if(period=='2021_2045') { inYr <- 2021; endYr <- 2045 } else { if(period=='2041_2065') { inYr <- 2041; endYr <- 2065 } } gcmList <- c("bcc_csm1_1","bcc_csm1_1_m","csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") varlist <- c("tasmax", "tasmin", "pr", "rsds") mthList <- c(paste(0,c(1:9),sep=""),paste(c(10:12))) metList <- c("avg", "std") # Get a list of month with and withour 0 in one digit numbers mthListMod <- c(1:12) ndays <- c(31,28,31,30,31,30,31,31,30,31,30,31) mthMat <- as.data.frame(cbind(mthList, mthListMod, ndays)) names(mthMat) <- c("Mth", "MthMod", "Ndays") bbox <- extent(33.9, 41.9, -4.700001, 5.449999) rows=203 cols=160 ## Reggrid GCM future data for(gcm in gcmList){ for(var in varlist){ if (var == "tasmax"){varmod <- "tmax"} if (var == "tasmin"){varmod <- "tmin"} if (var == "pr"){varmod <- "prec"} if (var == "rsds"){varmod <- "rsds"} # diroutgcmhis <- paste(dirout, "/", basename(gcm), "/", inYr, "_", endYr, sep="") # diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/", inYr, "_", endYr, sep="") if(period=='2021_2045'){ diroutgcmhis <- paste(dirout, "/", basename(gcm), "/2021_2045/", basename(rcp), sep="") diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/2021_2045/", basename(rcp), sep="") } else { if(period=='2041_2065'){ diroutgcmhis <- paste(dirout, "/", basename(gcm), "/2041_2065/", basename(rcp), sep="") diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/2041_2065/", basename(rcp), sep="") } } for(mth in mthList){ mthMod <- as.numeric(paste((mthMat$MthMod[which(mthMat$Mth == mth)]))) ndayMth <- as.numeric(paste((mthMat$Ndays[which(mthMat$Mth == mth)]))) if(!file.exists(paste(diroutgcmhiscut, "/", varmod, "_", inYr, "_", endYr, "_", mth, "_std.nc", sep=""))){ for(yr in inYr:endYr){ for(i in 1:31){ assign(paste("d", i, sep=""), raster()) } cat(" Resample daily: future", varmod, "_", yr, " ", mth, "\n") if(!file.exists(paste(diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", sep=""))){ f <- paste(diroutgcmhis, "/by-month/", var, "_", yr, "_", mth, ".nc", sep="") rx <- raster(f) for(i in 1:rx@file@nbands){ assign(paste("d", i, sep=""), raster(f, band=i)) } dList <- c(d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31) dayNcStack <- stack(dList[1:ndayMth]) dayNcStackRes <- resample(dayNcStack, raster(nrows=rows, ncols=cols, xmn=bbox@xmin, xmx=bbox@xmax, ymn=bbox@ymin, ymx=bbox@ymax, resolution=0.05), method='bilinear') # dayNcStackRes <- resample(dayNcStack, raster(nrows=rows, ncols=cols, xmn=bbox@xmin+360, xmx=bbox@xmax+360, ymn=bbox@ymin, ymx=bbox@ymax, resolution=0.05), method='bilinear') # xmin(dayNcStackRes) <- xmin(dayNcStackRes)-360 # xmax(dayNcStackRes) <- xmax(dayNcStackRes)-360 if (varmod == "tmax"){dayNcStackRes <- dayNcStackRes - 273.15} if (varmod == "tmin"){dayNcStackRes <- dayNcStackRes - 273.15} if (varmod == "prec"){dayNcStackRes <- dayNcStackRes * 86400} dayNcStackRes <- writeRaster(dayNcStackRes, paste(diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", sep=""), format="CDF", overwrite=T) } cat(" Calculating avg and std daily: historical", basename(gcm), " ", varmod, "_", yr, " ", mth, "\n") system(paste("cdo dayavg ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_avg.nc", sep="")) system(paste("cdo daystd ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_std.nc", sep="")) # system(paste("D:/jetarapues/cdo/cdo.exe -s dayavg ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_avg.nc", sep="")) # system(paste("D:/jetarapues/cdo/cdo.exe -s daystd ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_std.nc", sep="")) } avgNcList <- paste(diroutgcmhiscut, "/by-month/", varmod, "_", inYr:endYr, "_", mth, "_avg.nc", sep="") avgNcStack <- mean(stack(avgNcList)) avgNcStack <- writeRaster(avgNcStack, paste(diroutgcmhiscut, "/", varmod, "_1971_2000_", mth, "_avg.nc", sep=""), format="CDF", overwrite=T) stdNcList <- paste(diroutgcmhiscut, "/by-month/", varmod, "_", inYr:endYr, "_", mth, "_std.nc", sep="") stdNcStack <- mean(stack(stdNcList)) stdNcStack <- writeRaster(stdNcStack, paste(diroutgcmhiscut, "/", varmod, "_1971_2000_", mth, "_std.nc", sep=""), format="CDF", overwrite=T) for (nc in avgNcList){ file.remove(paste(nc)) } for (nc in stdNcList){ file.remove(paste(nc)) } } } } } } periodList <- c("2021_2045", "2041_2065") rcpList <- paste("rcp", c(26, 45, 60, 85), sep="") lapply(1:length(periodList), function(i){ cat('Processing period:', periodList[[i]],'\n') library(parallel) mclapply(1:length(rcpList), function(j){ cat('Processing RCP:', rcpList[[j]],'\n') gcmDailyFutureResample(rcp=rcpList[[j]], period=periodList[[i]]) return(cat('Process done for RCP:', rcpList[[j]],'\n')) }, mc.cores=4) return(cat('Process done for period:', periodList[[i]],'\n')) })
/kenya-county-profiles/00_KACCAL_processing_gcm_information.R
no_license
CIAT-DAPA/dapa-climate-change
R
false
false
22,364
r
### Clipping GCM Historical information ### C. Navarro - H. Achicanoy ### CIAT, 2016 ### =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= ### ### Processing GCMs information ### =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= ### ### 1.1 GCM extract daily information process for historical data (Done) gcmDailyProcess <- function(){ require(raster) require(ncdf) require(ncdf4) require(rgdal) # Change according to period of analysis gcmDir <- "/mnt/data_cluster_2/gcm/cmip5/raw/daily/historical" # Create folder to save raw information dirout <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_raw_res" # Create folder to save resampled information diroutcut <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_0_05deg_lat" # List of GCMs to extract climatic information gcmList <- c("bcc_csm1_1","bcc_csm1_1_m","csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") # List of climatic variables to be extracted varlist <- c("tasmax", "tasmin", "pr", "rsds") # List of months mthList <- c(paste(0, c(1:9), sep=""), paste(c(10:12))) # List of metrics to be calculated (average and standar deviation) metList <- c("avg", "std") # Get a list of month with and withour 0 in one digit numbers mthListMod <- c(1:12) # Numbers of days per month ndays <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) mthMat <- as.data.frame(cbind(mthList, mthListMod, ndays)) names(mthMat) <- c("Mth", "MthMod", "Ndays") # Extent of Kenya raster bbox <- extent(33.9, 41.9, -4.700001, 5.449999) ## Loop through list of GCMs for(gcm in gcmList) { # Create folder for period to analyse - raw information diroutgcmhis <- paste(dirout, "/", basename(gcm), "/1971_2000", sep="") # Create folder for period to analyse - resampled information diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/1971_2000", sep="") cat(" Cutting:", "Historical ", basename(gcm), "\n") if (!dir.exists(diroutgcmhis)) {dir.create(diroutgcmhis, recursive=T)} if (!dir.exists(paste(diroutgcmhis, "/by-month", sep=""))) {dir.create(paste(diroutgcmhis, "/by-month", sep=""), recursive=T)} if (!dir.exists(diroutgcmhiscut)) {dir.create(diroutgcmhiscut, recursive=T)} if (!dir.exists(paste(diroutgcmhiscut, "/by-month", sep=""))) {dir.create(paste(diroutgcmhiscut, "/by-month", sep=""), recursive=T)} ## Loop through list of variables for(var in varlist) { if(!file.exists(paste(diroutgcmhis, "/", var, "_1971_2000_day_lat.nc", sep=""))) { ncList <- list.files(path=paste(gcmDir, "/", basename(gcm), "/r1i1p1", sep=""), pattern=paste(var, "_day*", sep=""), full.names=TRUE) if(!file.exists(paste(diroutgcmhis, "/", var, "_1971_2000_day.nc", sep=""))) { system(paste("cdo seldate,1971-01-01,2000-12-31 ", ncList[1], " ", diroutgcmhis, "/", var, "_1971_2000_day.nc", sep="")) } system(paste("cdo sellonlatbox,",bbox@xmin+360-10,",",bbox@xmax+360+10,",",bbox@ymin-10,",",bbox@ymax+10," ", diroutgcmhis, "/", var, "_1971_2000_day.nc ", diroutgcmhis, "/", var, "_1971_2000_day_lat.nc",sep="")) } if(!file.exists(paste(diroutgcmhis, "/by-month/", var, "_2000_12.nc", sep=""))) { system(paste("cdo splityear ", diroutgcmhis, "/", var, "_1971_2000_day_lat.nc ", diroutgcmhis, "/by-month/", var, "_", sep="")) for(yr in 1971:2000) { system(paste("cdo splitmon ", diroutgcmhis, "/by-month/", var, "_", yr, ".nc ", diroutgcmhis, "/by-month/", var, "_", yr, "_", sep="")) file.remove(paste(diroutgcmhis, "/by-month/", var, "_", yr, ".nc", sep="")) } } } } } gcmDailyProcess() ### 2.1 GCM extract daily information process for future data (Done) gcmDailyFutureProcess <- function(rcp='rcp26', period='2021_2045'){ require(raster) require(ncdf) require(rgdal) # Change according to period of analysis gcmDir <- paste("/mnt/data_cluster_2/gcm/cmip5/raw/daily/", rcp, sep="") # Create folder to save raw information dirout <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_raw_res" # Create folder to save resampled information diroutcut <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_0_05deg_lat" if(period=='2021_2045') { inYr <- 2021; endYr <- 2045 } else { if(period=='2041_2065') { inYr <- 2041; endYr <- 2065 } } # List of GCMs to extract climatic information gcmList <- c("bcc_csm1_1","bcc_csm1_1_m","csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") # List of climatic variables to be extracted varlist <- c("tasmax", "tasmin", "pr", "rsds") # List of months mthList <- c(paste(0,c(1:9),sep=""),paste(c(10:12))) # List of metrics to be calculated (average and standar deviation) metList <- c("avg", "std") # Get a list of month with and withour 0 in one digit numbers mthListMod <- c(1:12) # Numbers of days per month ndays <- c(31,28,31,30,31,30,31,31,30,31,30,31) mthMat <- as.data.frame(cbind(mthList, mthListMod, ndays)) names(mthMat) <- c("Mth", "MthMod", "Ndays") # Extent of Kenya raster bbox <- extent(33.9, 41.9, -4.700001, 5.449999) ## Loop through list of GCMs for(gcm in gcmList) { # Create folders according to period if(period=='2021_2045'){ # Create folder for period to analyse - raw information diroutgcmhis <- paste(dirout, "/", basename(gcm), "/2021_2045/", basename(rcp), sep="") # Create folder for period to analyse - resampled information diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/2021_2045/", basename(rcp), sep="") } else { if(period=='2041_2065'){ # Create folder for period to analyse - raw information diroutgcmhis <- paste(dirout, "/", basename(gcm), "/2041_2065/", basename(rcp), sep="") # Create folder for period to analyse - resampled information diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/2041_2065/", basename(rcp), sep="") } } cat(" Cutting:", "Future ", basename(gcm), "\n") if (!file.exists(diroutgcmhis)) {dir.create(diroutgcmhis, recursive=T)} if (!file.exists(paste(diroutgcmhis, "/by-month", sep=""))) {dir.create(paste(diroutgcmhis, "/by-month", sep=""), recursive=T)} if (!file.exists(diroutgcmhiscut)) {dir.create(diroutgcmhiscut, recursive=T)} if (!file.exists(paste(diroutgcmhiscut, "/by-month", sep=""))) {dir.create(paste(diroutgcmhiscut, "/by-month", sep=""), recursive=T)} ## Loop through list of variables for(var in varlist) { if(period=='2021_2045') { if(!file.exists(paste(diroutgcmhis, "/", var, "_2021_2045_day_lat.nc", sep=""))) { ncList <- list.files(path=paste(gcmDir, "/", basename(gcm), "/r1i1p1", sep=""), pattern=paste(var, "_day*", sep=""), full.names=TRUE) if(!file.exists(paste(diroutgcmhis, "/", var, "_2021_2045_day.nc", sep=""))) { system(paste("cdo seldate,2021-01-01,2045-12-31 ", ncList[1], " ", diroutgcmhis, "/", var, "_2021_2045_day.nc", sep="")) } system(paste("cdo sellonlatbox,",bbox@xmin+360-10,",",bbox@xmax+360+10,",",bbox@ymin-10,",",bbox@ymax+10," ", diroutgcmhis, "/", var, "_2021_2045_day.nc ", diroutgcmhis, "/", var, "_2021_2045_day_lat.nc",sep="")) } if(!file.exists(paste(diroutgcmhis, "/by-month/", var, "_2045_12.nc", sep=""))) { system(paste("cdo splityear ", diroutgcmhis, "/", var, "_2021_2045_day_lat.nc ", diroutgcmhis, "/by-month/", var, "_", sep="")) for(yr in 2021:2045) { system(paste("cdo splitmon ", diroutgcmhis, "/by-month/", var, "_", yr, ".nc ", diroutgcmhis, "/by-month/", var, "_", yr, "_", sep="")) file.remove(paste(diroutgcmhis, "/by-month/", var, "_", yr, ".nc", sep="")) } } } else { if(period=='2041_2065') { if(!file.exists(paste(diroutgcmhis, "/", var, "_2041_2065_day_lat.nc", sep=""))) { ncList <- list.files(path=paste(gcmDir, "/", basename(gcm), "/r1i1p1", sep=""), pattern=paste(var, "_day*", sep=""), full.names=TRUE) if(!file.exists(paste(diroutgcmhis, "/", var, "_2041_2065_day.nc", sep=""))) { system(paste("cdo seldate,2041-01-01,2065-12-31 ", ncList[1], " ", diroutgcmhis, "/", var, "_2041_2065_day.nc", sep="")) } system(paste("cdo sellonlatbox,",bbox@xmin+360-10,",",bbox@xmax+360+10,",",bbox@ymin-10,",",bbox@ymax+10," ", diroutgcmhis, "/", var, "_2041_2065_day.nc ", diroutgcmhis, "/", var, "_2041_2065_day_lat.nc",sep="")) } if(!file.exists(paste(diroutgcmhis, "/by-month/", var, "_2065_12.nc", sep=""))) { system(paste("cdo splityear ", diroutgcmhis, "/", var, "_2041_2065_day_lat.nc ", diroutgcmhis, "/by-month/", var, "_", sep="")) for(yr in 2041:2065) { system(paste("cdo splitmon ", diroutgcmhis, "/by-month/", var, "_", yr, ".nc ", diroutgcmhis, "/by-month/", var, "_", yr, "_", sep="")) file.remove(paste(diroutgcmhis, "/by-month/", var, "_", yr, ".nc", sep="")) } } } } } } } periodList <- c("2021_2045", "2041_2065") rcpList <- paste("rcp", c(26, 45, 60, 85), sep="") lapply(1:length(periodList), function(i){ cat('Processing period:', periodList[[i]],'\n') library(parallel) mclapply(1:length(rcpList), function(j){ cat('Processing RCP:', rcpList[[j]],'\n') gcmDailyFutureProcess(rcp=rcpList[[j]], period=periodList[[i]]) return(cat('Process done for RCP:', rcpList[[j]],'\n')) }, mc.cores=4) return(cat('Process done for period:', periodList[[i]],'\n')) }) ### 1.2 GCM resample process for historical data (Running) gcmDailyResample <- function(){ require(raster) require(ncdf4) require(rgdal) dirout <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_raw_res" diroutcut <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_0_05deg_lat" inYr <- 1971 endYr <- 2000 # gcmList <- c("bcc_csm1_1","bcc_csm1_1_m","csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") gcmList <- c("csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") varlist <- c("tasmax", "tasmin", "pr", "rsds") mthList <- c(paste(0,c(1:9),sep=""),paste(c(10:12))) metList <- c("avg", "std") # Get a list of month with and withour 0 in one digit numbers mthListMod <- c(1:12) ndays <- c(31,28,31,30,31,30,31,31,30,31,30,31) mthMat <- as.data.frame(cbind(mthList, mthListMod, ndays)) names(mthMat) <- c("Mth", "MthMod", "Ndays") bbox <- extent(33.9, 41.9, -4.700001, 5.449999) rows=203 cols=160 ## Reggrid GCM Historical for(gcm in gcmList){ for(var in varlist){ if(var == "tasmax"){varmod <- "tmax"} if(var == "tasmin"){varmod <- "tmin"} if(var == "pr"){varmod <- "prec"} if(var == "rsds"){varmod <- "rsds"} diroutgcmhis <- paste(dirout, "/", basename(gcm), "/", inYr, "_", endYr, sep="") diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/", inYr, "_", endYr, sep="") for(mth in mthList){ mthMod <- as.numeric(paste((mthMat$MthMod[which(mthMat$Mth == mth)]))) ndayMth <- as.numeric(paste((mthMat$Ndays[which(mthMat$Mth == mth)]))) if(!file.exists(paste(diroutgcmhiscut, "/", varmod, "_", inYr, "_", endYr, "_", mth, "_std.nc", sep=""))){ for(yr in inYr:endYr){ for(i in 1:31){ assign(paste("d", i, sep=""), raster()) } cat(" Resample daily: historical", varmod, "_", yr, " ", mth, "\n") if(!file.exists(paste(diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", sep=""))){ f <- paste(diroutgcmhis, "/by-month/", var, "_", yr, "_", mth, ".nc", sep="") rx <- raster(f) for(i in 1:rx@file@nbands){ assign(paste("d", i, sep=""), raster(f, band=i)) } dList <- c(d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31) dayNcStack <- stack(dList[1:ndayMth]) dayNcStackRes <- resample(dayNcStack, raster(nrows=rows, ncols=cols, xmn=bbox@xmin, xmx=bbox@xmax, ymn=bbox@ymin, ymx=bbox@ymax, resolution=0.05), method='bilinear') # dayNcStackRes <- resample(dayNcStack, raster(nrows=rows, ncols=cols, xmn=bbox@xmin+360, xmx=bbox@xmax+360, ymn=bbox@ymin, ymx=bbox@ymax, resolution=0.05), method='bilinear') # xmin(dayNcStackRes) <- xmin(dayNcStackRes)-360 # xmax(dayNcStackRes) <- xmax(dayNcStackRes)-360 if(varmod == "tmax"){dayNcStackRes <- dayNcStackRes - 273.15} if(varmod == "tmin"){dayNcStackRes <- dayNcStackRes - 273.15} if(varmod == "prec"){dayNcStackRes <- dayNcStackRes * 86400} dayNcStackRes <- writeRaster(dayNcStackRes, paste(diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", sep=""), format="CDF", overwrite=T) } cat(" Calculating avg and std daily: historical", basename(gcm), " ", varmod, "_", yr, " ", mth, "\n") system(paste("cdo dayavg ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_avg.nc", sep="")) system(paste("cdo daystd ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_std.nc", sep="")) # system(paste("D:/jetarapues/cdo/cdo.exe -s dayavg ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_avg.nc", sep="")) # system(paste("D:/jetarapues/cdo/cdo.exe -s daystd ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_std.nc", sep="")) } avgNcList <- paste(diroutgcmhiscut, "/by-month/", varmod, "_", inYr:endYr, "_", mth, "_avg.nc", sep="") avgNcStack <- mean(stack(avgNcList)) avgNcStack <- writeRaster(avgNcStack, paste(diroutgcmhiscut, "/", varmod, "_1971_2000_", mth, "_avg.nc", sep=""), format="CDF", overwrite=T) stdNcList <- paste(diroutgcmhiscut, "/by-month/", varmod, "_", inYr:endYr, "_", mth, "_std.nc", sep="") stdNcStack <- mean(stack(stdNcList)) stdNcStack <- writeRaster(stdNcStack, paste(diroutgcmhiscut, "/", varmod, "_1971_2000_", mth, "_std.nc", sep=""), format="CDF", overwrite=T) for (nc in avgNcList){ file.remove(paste(nc)) } for (nc in stdNcList){ file.remove(paste(nc)) } } } } } } gcmDailyResample() ### 2.2 GCM resample process for future data (Ready to run) gcmDailyFutureResample <- function(rcp='rcp26', period='2021_2045'){ require(raster) require(ncdf) require(rgdal) dirout <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_raw_res" diroutcut <- "/mnt/workspace_cluster_8/Kenya_KACCAL/data/gcm_0_05deg_lat" if(period=='2021_2045') { inYr <- 2021; endYr <- 2045 } else { if(period=='2041_2065') { inYr <- 2041; endYr <- 2065 } } gcmList <- c("bcc_csm1_1","bcc_csm1_1_m","csiro_mk3_6_0","gfdl_cm3", "gfdl_esm2g","gfdl_esm2m","ipsl_cm5a_mr","miroc_esm", "miroc_esm_chem","miroc_miroc5","mohc_hadgem2_es","ncc_noresm1_m") varlist <- c("tasmax", "tasmin", "pr", "rsds") mthList <- c(paste(0,c(1:9),sep=""),paste(c(10:12))) metList <- c("avg", "std") # Get a list of month with and withour 0 in one digit numbers mthListMod <- c(1:12) ndays <- c(31,28,31,30,31,30,31,31,30,31,30,31) mthMat <- as.data.frame(cbind(mthList, mthListMod, ndays)) names(mthMat) <- c("Mth", "MthMod", "Ndays") bbox <- extent(33.9, 41.9, -4.700001, 5.449999) rows=203 cols=160 ## Reggrid GCM future data for(gcm in gcmList){ for(var in varlist){ if (var == "tasmax"){varmod <- "tmax"} if (var == "tasmin"){varmod <- "tmin"} if (var == "pr"){varmod <- "prec"} if (var == "rsds"){varmod <- "rsds"} # diroutgcmhis <- paste(dirout, "/", basename(gcm), "/", inYr, "_", endYr, sep="") # diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/", inYr, "_", endYr, sep="") if(period=='2021_2045'){ diroutgcmhis <- paste(dirout, "/", basename(gcm), "/2021_2045/", basename(rcp), sep="") diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/2021_2045/", basename(rcp), sep="") } else { if(period=='2041_2065'){ diroutgcmhis <- paste(dirout, "/", basename(gcm), "/2041_2065/", basename(rcp), sep="") diroutgcmhiscut <- paste(diroutcut, "/", basename(gcm), "/2041_2065/", basename(rcp), sep="") } } for(mth in mthList){ mthMod <- as.numeric(paste((mthMat$MthMod[which(mthMat$Mth == mth)]))) ndayMth <- as.numeric(paste((mthMat$Ndays[which(mthMat$Mth == mth)]))) if(!file.exists(paste(diroutgcmhiscut, "/", varmod, "_", inYr, "_", endYr, "_", mth, "_std.nc", sep=""))){ for(yr in inYr:endYr){ for(i in 1:31){ assign(paste("d", i, sep=""), raster()) } cat(" Resample daily: future", varmod, "_", yr, " ", mth, "\n") if(!file.exists(paste(diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", sep=""))){ f <- paste(diroutgcmhis, "/by-month/", var, "_", yr, "_", mth, ".nc", sep="") rx <- raster(f) for(i in 1:rx@file@nbands){ assign(paste("d", i, sep=""), raster(f, band=i)) } dList <- c(d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31) dayNcStack <- stack(dList[1:ndayMth]) dayNcStackRes <- resample(dayNcStack, raster(nrows=rows, ncols=cols, xmn=bbox@xmin, xmx=bbox@xmax, ymn=bbox@ymin, ymx=bbox@ymax, resolution=0.05), method='bilinear') # dayNcStackRes <- resample(dayNcStack, raster(nrows=rows, ncols=cols, xmn=bbox@xmin+360, xmx=bbox@xmax+360, ymn=bbox@ymin, ymx=bbox@ymax, resolution=0.05), method='bilinear') # xmin(dayNcStackRes) <- xmin(dayNcStackRes)-360 # xmax(dayNcStackRes) <- xmax(dayNcStackRes)-360 if (varmod == "tmax"){dayNcStackRes <- dayNcStackRes - 273.15} if (varmod == "tmin"){dayNcStackRes <- dayNcStackRes - 273.15} if (varmod == "prec"){dayNcStackRes <- dayNcStackRes * 86400} dayNcStackRes <- writeRaster(dayNcStackRes, paste(diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", sep=""), format="CDF", overwrite=T) } cat(" Calculating avg and std daily: historical", basename(gcm), " ", varmod, "_", yr, " ", mth, "\n") system(paste("cdo dayavg ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_avg.nc", sep="")) system(paste("cdo daystd ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_std.nc", sep="")) # system(paste("D:/jetarapues/cdo/cdo.exe -s dayavg ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_avg.nc", sep="")) # system(paste("D:/jetarapues/cdo/cdo.exe -s daystd ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, ".nc", " ", diroutgcmhiscut, "/by-month/", varmod, "_", yr, "_", mth, "_std.nc", sep="")) } avgNcList <- paste(diroutgcmhiscut, "/by-month/", varmod, "_", inYr:endYr, "_", mth, "_avg.nc", sep="") avgNcStack <- mean(stack(avgNcList)) avgNcStack <- writeRaster(avgNcStack, paste(diroutgcmhiscut, "/", varmod, "_1971_2000_", mth, "_avg.nc", sep=""), format="CDF", overwrite=T) stdNcList <- paste(diroutgcmhiscut, "/by-month/", varmod, "_", inYr:endYr, "_", mth, "_std.nc", sep="") stdNcStack <- mean(stack(stdNcList)) stdNcStack <- writeRaster(stdNcStack, paste(diroutgcmhiscut, "/", varmod, "_1971_2000_", mth, "_std.nc", sep=""), format="CDF", overwrite=T) for (nc in avgNcList){ file.remove(paste(nc)) } for (nc in stdNcList){ file.remove(paste(nc)) } } } } } } periodList <- c("2021_2045", "2041_2065") rcpList <- paste("rcp", c(26, 45, 60, 85), sep="") lapply(1:length(periodList), function(i){ cat('Processing period:', periodList[[i]],'\n') library(parallel) mclapply(1:length(rcpList), function(j){ cat('Processing RCP:', rcpList[[j]],'\n') gcmDailyFutureResample(rcp=rcpList[[j]], period=periodList[[i]]) return(cat('Process done for RCP:', rcpList[[j]],'\n')) }, mc.cores=4) return(cat('Process done for period:', periodList[[i]],'\n')) })
##create a script to parse cBIOportal data #perhaps this will facilitate analysis across the different datasets... library(cgdsr) library(tidyverse) library(data.table) script.dir <- dirname(sys.frame(1)$ofile) all.genes<<-unique(fread('../data/ucsc_kgXref_hg19_2015_10_29.csv')$geneSymbol) #'getSamplesForDisease creates a unified mapping of all samples #'to various cell lines and disease profiles so that when #'all are joined we can compare one to another getSamplesForDisease<-function(dis='',study='tcga'){ mycgds = CGDS("http://www.cbioportal.org/public-portal/") all.studies<-getCancerStudies(mycgds) ind=grep(paste(tolower(dis),paste(study,'$',sep=''),sep='_'),all.studies$cancer_study_id) if(length(ind)==0) return(c()) mycancerstudy<-all.studies$cancer_study_id[ind] sampList<-lapply(mycancerstudy,function(cs){ caseLists<-getCaseLists(mycgds,cs) samps<-unlist(strsplit(caseLists[match(paste(cs,'all',sep='_'),caseLists[,1]),5],split=' ')) # allprofs<-getGeneticProfiles(mycgds,cs)[,1] print(paste('Found',length(samps),'for',cs)) return(samps) }) all.samps<-unique(unlist(sampList)) print(paste("Found",length(all.samps),'samples for',study,dis)) return(all.samps) } #various disease types in cbioporta. broad.cancer.types=c('brca','cellline','lcll','desm','dlbc','esca','hnsc','luad','mbl','skcm','mm','nsclc','es','prad') mskcc.cancer.types=c('acyc','acbc','blca','coadread','luad','mpnst','thyroid','prad','hnc','sarc','scco') tcga.cancer.types<<-c('laml','acc','blca','lgg','brca','cesc','chol','coadread','esca','gbm','hnsc','kich','kirc','kirp','lihc','luad','lusc','dlbc','ov','paad','thca','pcpg','prad','sarc','skcm','stad','tgct','thym','ucs','ucec','uvm')#meso has no sequence data ##not all have counts cell.line.tiss<-c('CENTRAL_NERVOUS_SYSTEM','BONE','PROSTATE','STOMACH','URINARY_TRACT','OVARY','HAEMATOPOIETIC_AND_LYMPHOID_TISSUE','KIDNEY','THYROID','SKIN','SOFT_TISSUE','SALIVARY_GLAND','LUNG','PLEURA','LIVER','ENDOMETRIUM','PANCREAS','BREAST','UPPER_AERODIGESTIVE_TRACT','LARGE_INTESTINE','AUTONOMIC_GANGLIA','OESOPHAGUS','BILIARY_TRACT','SMALL_INTESTINE') getDisMutationData<-function(dis='',study='tcga',genelist=all.genes){ mycgds = CGDS("http://www.cbioportal.org/public-portal/") all.studies<-getCancerStudies(mycgds) if(tolower(dis)=='alltcga') dval='' else dval=dis #if disease is blank will get all diseases ind=grep(paste(tolower(dval),paste(study,'$',sep=''),sep='_'),all.studies$cancer_study_id) print(paste('found',length(ind),study,'samples for disease',dis)) if(length(ind)==0) return(NULL) mycancerstudy<-all.studies$cancer_study_id[ind] if(study=='tcga') mycancerstudy=intersect(mycancerstudy,paste(tcga.cancer.types,study,sep='_')) else if(study=='mskcc') mycancerstudy=intersect(mycancerstudy,paste(mskcc.cancer.types,study,sep='_')) else if(study=='broad') mycancerstudy=intersect(mycancerstudy,paste(broad.cancer.types,study,sep='_')) expr.list<-lapply(mycancerstudy,function(cs){ print(paste(cs,study,'Mutation data')) caseLists<-getCaseLists(mycgds,cs) allprofs<-getGeneticProfiles(mycgds,cs)[,1] profile=allprofs[grep('mutations',allprofs)] seqSamps=caseLists$case_list_id[grep('sequenced',caseLists$case_list_id)] if(length(genelist)>800) gene.groups=split(genelist, ceiling(seq_along(genelist)/400)) else gene.groups=list(genelist) dat<-lapply(gene.groups,function(g) getProfileData(mycgds,g,profile,seqSamps)) ddat<-matrix() if(length(dat)==1){ ddat<-t(as.data.frame(dat)) }else{ for(i in which(sapply(dat,nrow)!=0)){ ddat<-cbind(ddat,dat[[i]]) } } # nans<-which(apply(ddat,2,function(x) all(is.nan(x)||is.na(x)))) # nas<-which(apply(ddat,2,function(x) all(is.na(x)))) # if(nrow(ddat)>1) # ddat<-ddat[,-nans] ##now set to binary matrix dfdat<-apply(ddat,1,function(x){ sapply(unlist(x),function(y) !is.na(y) && y!='NaN') }) return(dfdat) }) if(length(expr.list)>1){ comm.genes<-c()#rownames(expr.list[[1]]) for(i in 1:length(expr.list)) comm.genes<-union(comm.genes,rownames(expr.list[[i]])) full.dat<-do.call('cbind',lapply(expr.list,function(x){ missing<-setdiff(comm.genes,rownames(x)) print(length(missing)) if(length(missing)>0) dat<-rbind(x[intersect(rownames(x),comm.genes),], t(sapply(missing,function(y,x) rep(FALSE,ncol(x)),x))) else{ dat<-x[intersect(rownames(x),comm.genes),] } colnames(dat)<-colnames(x) dat<-dat[comm.genes,] return(dat) })) } else{ full.dat<-expr.list[[1]] } return(full.dat) } #'get TCGA clientd ata required for survival analysis #' getDisClinicalData<-function(dis='',study='tcga'){ mycgds = CGDS("http://www.cbioportal.org/") all.studies<-getCancerStudies(mycgds) # Get available case lists (collection of samples) for a given cancer study if(tolower(dis)=='alltcga') dval='' else dval=dis ind=grep(paste(tolower(dval),paste(study,'$',sep=''),sep='_'),all.studies$cancer_study_id) print(paste('found',length(ind),study,'studies for disease',dis)) if(length(ind)==0){ return(NULL) } mycancerstudy<-all.studies$cancer_study_id[ind] clin.list<-lapply(mycancerstudy,function(cs){ print(paste(cs,study,'Clinical data')) caseLists<-getCaseLists(mycgds,cs)%>%select(case_list_id,case_ids) clinDat<-getClinicalData(mycgds,caseLists[grep('all',caseLists$case_list_id),1]) clinDat%>%select(OS_MONTHS,OS_STATUS,DFS_MONTHS,DFS_STATUS)%>%mutate(Sample=rownames(clinDat),Study=cs) }) full.dat<-do.call('rbind',clin.list) full.dat } #'formats TCGA expression data into a single matrix, often combining #'samples from multiple studies getDisExpressionData<-function(dis='',study='tcga',getZscores=FALSE,genelist=all.genes){ #if disease is blank will get all diseases mycgds = CGDS("http://www.cbioportal.org/public-portal/") all.studies<-getCancerStudies(mycgds) if(tolower(dis)=='alltcga') dval='' else dval=dis ind=grep(paste(tolower(dval),paste(study,'$',sep=''),sep='_'),all.studies$cancer_study_id) print(paste('found',length(ind),study,'samples for disease',dis)) if(length(ind)==0){ return(NULL) } mycancerstudy<-all.studies$cancer_study_id[ind] expr.list<-lapply(mycancerstudy,function(cs){ print(paste(cs,study,'Expression data')) caseLists<-getCaseLists(mycgds,cs) allprofs<-getGeneticProfiles(mycgds,cs)[,1] rnaseqs<-allprofs[grep('rna_seq',allprofs)] if(length(rnaseqs)==0) rnaseqs<-allprofs[grep('mrna',allprofs)] if(length(grep('merged',rnaseqs))>0) rnaseqs<-rnaseqs[-grep('merged',rnaseqs)] zscores<-grep('Zscores',rnaseqs) profile=rnaseqs[zscores] if(!getZscores) profile=rnaseqs[-zscores] if(length(profile)>1) profile=profile[grep('v2',profile)] mrnaSamps=caseLists$case_list_id[grep('rna_seq',caseLists$case_list_id)] if(length(mrnaSamps)>1) mrnaSamps=mrnaSamps[grep('v2',mrnaSamps)] else if(length(mrnaSamps)==0) mrnaSamps=caseLists$case_list_id[grep('mrna',caseLists$case_list_id)] if(length(genelist)>400) gene.groups=split(all.genes, ceiling(seq_along(all.genes)/400)) else gene.groups=list(genelist) dat<-lapply(gene.groups,function(g) getProfileData(mycgds,g,profile,mrnaSamps)) ddat<-matrix() if(length(dat)==1){ ddat<-t(as.data.frame(dat)) }else{ for(i in which(sapply(dat,nrow)!=0)){ ddat<-cbind(ddat,dat[[i]]) } } nans<-which(apply(ddat,2,function(x) all(is.na(x))||mean(x,na.rm=T)==0)) if(length(nans)>0 && nrow(ddat)>1) ddat<-ddat[,-nans] #ddat<-ddat[,-1] ddat<-data.frame(t(ddat)) ddat }) if(length(expr.list)>1){ comm.genes<-rownames(expr.list[[1]]) for(i in 2:length(expr.list)) comm.genes<-intersect(comm.genes,rownames(expr.list[[i]])) ##now combine all samples by doing a cbind full.dat<-do.call('cbind',lapply(expr.list,function(x) x[comm.genes,])) } else{ full.dat<-expr.list[[1]] } new.dat<-full.dat #print(dim(full.dat)) #new.dat<-apply(full.dat,2,function(x) as.numeric) #rownames(new.dat)<-rownames(full.dat) return(new.dat) } #'get CCLE expressiond ata. can get z score or affy data, not sure which to do yet getCcleExpressionData<-function(tiss='',getZscores=FALSE){ mycgds = CGDS("http://www.cbioportal.org/public-portal/") #all.studies<-getCancerStudies(mycgds) mycancerstudy='cellline_ccle_broad' if(tolower(tiss)=='allccle') tval='' else tval=tiss mprofile<-'cellline_ccle_broad_mrna_median_Zscores' ##eventually test out both if(!getZscores) mprofile<-'cellline_ccle_broad_mrna' profile<-mprofile caseLists<-getCaseLists(mycgds,mycancerstudy) print(paste('Collecting CCLE expression data for',tiss,'tissue')) ##get those samples with mRNA expression data mrnaSamps<<-caseLists$case_list_id[grep('mrna',caseLists$case_list_id)] #cbio seems to handle chunks of 500 or so gene.groups=split(all.genes, ceiling(seq_along(all.genes)/400)) dat<-lapply(gene.groups,function(g) getProfileData(mycgds,g,profile,mrnaSamps)) ddat<-matrix() for(i in which(sapply(dat,nrow)!=0)){ ddat<-cbind(ddat,dat[[i]]) } nans<-which(apply(ddat,2,function(x) all(is.nan(x)))) if(length(nans)>0) ddat<-ddat[,-nans] ddat<-ddat[,-1] ddat<-data.frame(t(ddat)) ##tissue here if(tval!=''){ cols<-grep(tval,colnames(ddat)) print(paste('Selecting',length(cols),'cell lines for tissue',tiss)) }else{ cols<-1:ncol(ddat) } return(ddat[,cols]) } #'get CCLE mutation dat getCcleMutationData<-function(tiss=''){ mycancerstudy<-'cellline_ccle_broad' mycgds = CGDS("http://www.cbioportal.org/public-portal/") #all.studies<-getCancerStudies(mycgds) if(tolower(tiss)=='allccle') tval='' else tval=tiss profile<-"cellline_ccle_broad_mutations" ##think about adding CNA data caseLists<-getCaseLists(mycgds,mycancerstudy) print('Got caselists') mutSamps<-caseLists$case_list_id[grep("sequenced",caseLists[,1])] print(paste('Collecting CCLE mutation data for',tiss,'tissue')) gene.groups=split(all.genes, ceiling(seq_along(all.genes)/500)) dat<-lapply(gene.groups,function(g) getProfileData(mycgds,g,profile,mutSamps)) ddat<-matrix() for(i in which(sapply(dat,nrow)!=0)){ ddat<-cbind(ddat,dat[[i]]) } nans<-which(apply(ddat,2,function(x) all(is.nan(x)||is.na(x)))) # nas<-which(apply(ddat,2,function(x) all(is.na(x)))) ddat<-ddat[,-nans] ##now set to binary matrix dfdat<-apply(ddat,1,function(x){ sapply(unlist(x),function(y) !is.na(y) && y!='NaN') }) ##tissue here if(tval!=''){ cols<-grep(tval,colnames(dfdat)) print(paste('Selecting',length(cols),'cell lines for tissue',tiss)) }else{ cols<-1:ncol(dfdat) } return(dfdat[,cols]) }
/bin/cBioPortalData.R
no_license
sgosline/RASPathwaySig
R
false
false
11,090
r
##create a script to parse cBIOportal data #perhaps this will facilitate analysis across the different datasets... library(cgdsr) library(tidyverse) library(data.table) script.dir <- dirname(sys.frame(1)$ofile) all.genes<<-unique(fread('../data/ucsc_kgXref_hg19_2015_10_29.csv')$geneSymbol) #'getSamplesForDisease creates a unified mapping of all samples #'to various cell lines and disease profiles so that when #'all are joined we can compare one to another getSamplesForDisease<-function(dis='',study='tcga'){ mycgds = CGDS("http://www.cbioportal.org/public-portal/") all.studies<-getCancerStudies(mycgds) ind=grep(paste(tolower(dis),paste(study,'$',sep=''),sep='_'),all.studies$cancer_study_id) if(length(ind)==0) return(c()) mycancerstudy<-all.studies$cancer_study_id[ind] sampList<-lapply(mycancerstudy,function(cs){ caseLists<-getCaseLists(mycgds,cs) samps<-unlist(strsplit(caseLists[match(paste(cs,'all',sep='_'),caseLists[,1]),5],split=' ')) # allprofs<-getGeneticProfiles(mycgds,cs)[,1] print(paste('Found',length(samps),'for',cs)) return(samps) }) all.samps<-unique(unlist(sampList)) print(paste("Found",length(all.samps),'samples for',study,dis)) return(all.samps) } #various disease types in cbioporta. broad.cancer.types=c('brca','cellline','lcll','desm','dlbc','esca','hnsc','luad','mbl','skcm','mm','nsclc','es','prad') mskcc.cancer.types=c('acyc','acbc','blca','coadread','luad','mpnst','thyroid','prad','hnc','sarc','scco') tcga.cancer.types<<-c('laml','acc','blca','lgg','brca','cesc','chol','coadread','esca','gbm','hnsc','kich','kirc','kirp','lihc','luad','lusc','dlbc','ov','paad','thca','pcpg','prad','sarc','skcm','stad','tgct','thym','ucs','ucec','uvm')#meso has no sequence data ##not all have counts cell.line.tiss<-c('CENTRAL_NERVOUS_SYSTEM','BONE','PROSTATE','STOMACH','URINARY_TRACT','OVARY','HAEMATOPOIETIC_AND_LYMPHOID_TISSUE','KIDNEY','THYROID','SKIN','SOFT_TISSUE','SALIVARY_GLAND','LUNG','PLEURA','LIVER','ENDOMETRIUM','PANCREAS','BREAST','UPPER_AERODIGESTIVE_TRACT','LARGE_INTESTINE','AUTONOMIC_GANGLIA','OESOPHAGUS','BILIARY_TRACT','SMALL_INTESTINE') getDisMutationData<-function(dis='',study='tcga',genelist=all.genes){ mycgds = CGDS("http://www.cbioportal.org/public-portal/") all.studies<-getCancerStudies(mycgds) if(tolower(dis)=='alltcga') dval='' else dval=dis #if disease is blank will get all diseases ind=grep(paste(tolower(dval),paste(study,'$',sep=''),sep='_'),all.studies$cancer_study_id) print(paste('found',length(ind),study,'samples for disease',dis)) if(length(ind)==0) return(NULL) mycancerstudy<-all.studies$cancer_study_id[ind] if(study=='tcga') mycancerstudy=intersect(mycancerstudy,paste(tcga.cancer.types,study,sep='_')) else if(study=='mskcc') mycancerstudy=intersect(mycancerstudy,paste(mskcc.cancer.types,study,sep='_')) else if(study=='broad') mycancerstudy=intersect(mycancerstudy,paste(broad.cancer.types,study,sep='_')) expr.list<-lapply(mycancerstudy,function(cs){ print(paste(cs,study,'Mutation data')) caseLists<-getCaseLists(mycgds,cs) allprofs<-getGeneticProfiles(mycgds,cs)[,1] profile=allprofs[grep('mutations',allprofs)] seqSamps=caseLists$case_list_id[grep('sequenced',caseLists$case_list_id)] if(length(genelist)>800) gene.groups=split(genelist, ceiling(seq_along(genelist)/400)) else gene.groups=list(genelist) dat<-lapply(gene.groups,function(g) getProfileData(mycgds,g,profile,seqSamps)) ddat<-matrix() if(length(dat)==1){ ddat<-t(as.data.frame(dat)) }else{ for(i in which(sapply(dat,nrow)!=0)){ ddat<-cbind(ddat,dat[[i]]) } } # nans<-which(apply(ddat,2,function(x) all(is.nan(x)||is.na(x)))) # nas<-which(apply(ddat,2,function(x) all(is.na(x)))) # if(nrow(ddat)>1) # ddat<-ddat[,-nans] ##now set to binary matrix dfdat<-apply(ddat,1,function(x){ sapply(unlist(x),function(y) !is.na(y) && y!='NaN') }) return(dfdat) }) if(length(expr.list)>1){ comm.genes<-c()#rownames(expr.list[[1]]) for(i in 1:length(expr.list)) comm.genes<-union(comm.genes,rownames(expr.list[[i]])) full.dat<-do.call('cbind',lapply(expr.list,function(x){ missing<-setdiff(comm.genes,rownames(x)) print(length(missing)) if(length(missing)>0) dat<-rbind(x[intersect(rownames(x),comm.genes),], t(sapply(missing,function(y,x) rep(FALSE,ncol(x)),x))) else{ dat<-x[intersect(rownames(x),comm.genes),] } colnames(dat)<-colnames(x) dat<-dat[comm.genes,] return(dat) })) } else{ full.dat<-expr.list[[1]] } return(full.dat) } #'get TCGA clientd ata required for survival analysis #' getDisClinicalData<-function(dis='',study='tcga'){ mycgds = CGDS("http://www.cbioportal.org/") all.studies<-getCancerStudies(mycgds) # Get available case lists (collection of samples) for a given cancer study if(tolower(dis)=='alltcga') dval='' else dval=dis ind=grep(paste(tolower(dval),paste(study,'$',sep=''),sep='_'),all.studies$cancer_study_id) print(paste('found',length(ind),study,'studies for disease',dis)) if(length(ind)==0){ return(NULL) } mycancerstudy<-all.studies$cancer_study_id[ind] clin.list<-lapply(mycancerstudy,function(cs){ print(paste(cs,study,'Clinical data')) caseLists<-getCaseLists(mycgds,cs)%>%select(case_list_id,case_ids) clinDat<-getClinicalData(mycgds,caseLists[grep('all',caseLists$case_list_id),1]) clinDat%>%select(OS_MONTHS,OS_STATUS,DFS_MONTHS,DFS_STATUS)%>%mutate(Sample=rownames(clinDat),Study=cs) }) full.dat<-do.call('rbind',clin.list) full.dat } #'formats TCGA expression data into a single matrix, often combining #'samples from multiple studies getDisExpressionData<-function(dis='',study='tcga',getZscores=FALSE,genelist=all.genes){ #if disease is blank will get all diseases mycgds = CGDS("http://www.cbioportal.org/public-portal/") all.studies<-getCancerStudies(mycgds) if(tolower(dis)=='alltcga') dval='' else dval=dis ind=grep(paste(tolower(dval),paste(study,'$',sep=''),sep='_'),all.studies$cancer_study_id) print(paste('found',length(ind),study,'samples for disease',dis)) if(length(ind)==0){ return(NULL) } mycancerstudy<-all.studies$cancer_study_id[ind] expr.list<-lapply(mycancerstudy,function(cs){ print(paste(cs,study,'Expression data')) caseLists<-getCaseLists(mycgds,cs) allprofs<-getGeneticProfiles(mycgds,cs)[,1] rnaseqs<-allprofs[grep('rna_seq',allprofs)] if(length(rnaseqs)==0) rnaseqs<-allprofs[grep('mrna',allprofs)] if(length(grep('merged',rnaseqs))>0) rnaseqs<-rnaseqs[-grep('merged',rnaseqs)] zscores<-grep('Zscores',rnaseqs) profile=rnaseqs[zscores] if(!getZscores) profile=rnaseqs[-zscores] if(length(profile)>1) profile=profile[grep('v2',profile)] mrnaSamps=caseLists$case_list_id[grep('rna_seq',caseLists$case_list_id)] if(length(mrnaSamps)>1) mrnaSamps=mrnaSamps[grep('v2',mrnaSamps)] else if(length(mrnaSamps)==0) mrnaSamps=caseLists$case_list_id[grep('mrna',caseLists$case_list_id)] if(length(genelist)>400) gene.groups=split(all.genes, ceiling(seq_along(all.genes)/400)) else gene.groups=list(genelist) dat<-lapply(gene.groups,function(g) getProfileData(mycgds,g,profile,mrnaSamps)) ddat<-matrix() if(length(dat)==1){ ddat<-t(as.data.frame(dat)) }else{ for(i in which(sapply(dat,nrow)!=0)){ ddat<-cbind(ddat,dat[[i]]) } } nans<-which(apply(ddat,2,function(x) all(is.na(x))||mean(x,na.rm=T)==0)) if(length(nans)>0 && nrow(ddat)>1) ddat<-ddat[,-nans] #ddat<-ddat[,-1] ddat<-data.frame(t(ddat)) ddat }) if(length(expr.list)>1){ comm.genes<-rownames(expr.list[[1]]) for(i in 2:length(expr.list)) comm.genes<-intersect(comm.genes,rownames(expr.list[[i]])) ##now combine all samples by doing a cbind full.dat<-do.call('cbind',lapply(expr.list,function(x) x[comm.genes,])) } else{ full.dat<-expr.list[[1]] } new.dat<-full.dat #print(dim(full.dat)) #new.dat<-apply(full.dat,2,function(x) as.numeric) #rownames(new.dat)<-rownames(full.dat) return(new.dat) } #'get CCLE expressiond ata. can get z score or affy data, not sure which to do yet getCcleExpressionData<-function(tiss='',getZscores=FALSE){ mycgds = CGDS("http://www.cbioportal.org/public-portal/") #all.studies<-getCancerStudies(mycgds) mycancerstudy='cellline_ccle_broad' if(tolower(tiss)=='allccle') tval='' else tval=tiss mprofile<-'cellline_ccle_broad_mrna_median_Zscores' ##eventually test out both if(!getZscores) mprofile<-'cellline_ccle_broad_mrna' profile<-mprofile caseLists<-getCaseLists(mycgds,mycancerstudy) print(paste('Collecting CCLE expression data for',tiss,'tissue')) ##get those samples with mRNA expression data mrnaSamps<<-caseLists$case_list_id[grep('mrna',caseLists$case_list_id)] #cbio seems to handle chunks of 500 or so gene.groups=split(all.genes, ceiling(seq_along(all.genes)/400)) dat<-lapply(gene.groups,function(g) getProfileData(mycgds,g,profile,mrnaSamps)) ddat<-matrix() for(i in which(sapply(dat,nrow)!=0)){ ddat<-cbind(ddat,dat[[i]]) } nans<-which(apply(ddat,2,function(x) all(is.nan(x)))) if(length(nans)>0) ddat<-ddat[,-nans] ddat<-ddat[,-1] ddat<-data.frame(t(ddat)) ##tissue here if(tval!=''){ cols<-grep(tval,colnames(ddat)) print(paste('Selecting',length(cols),'cell lines for tissue',tiss)) }else{ cols<-1:ncol(ddat) } return(ddat[,cols]) } #'get CCLE mutation dat getCcleMutationData<-function(tiss=''){ mycancerstudy<-'cellline_ccle_broad' mycgds = CGDS("http://www.cbioportal.org/public-portal/") #all.studies<-getCancerStudies(mycgds) if(tolower(tiss)=='allccle') tval='' else tval=tiss profile<-"cellline_ccle_broad_mutations" ##think about adding CNA data caseLists<-getCaseLists(mycgds,mycancerstudy) print('Got caselists') mutSamps<-caseLists$case_list_id[grep("sequenced",caseLists[,1])] print(paste('Collecting CCLE mutation data for',tiss,'tissue')) gene.groups=split(all.genes, ceiling(seq_along(all.genes)/500)) dat<-lapply(gene.groups,function(g) getProfileData(mycgds,g,profile,mutSamps)) ddat<-matrix() for(i in which(sapply(dat,nrow)!=0)){ ddat<-cbind(ddat,dat[[i]]) } nans<-which(apply(ddat,2,function(x) all(is.nan(x)||is.na(x)))) # nas<-which(apply(ddat,2,function(x) all(is.na(x)))) ddat<-ddat[,-nans] ##now set to binary matrix dfdat<-apply(ddat,1,function(x){ sapply(unlist(x),function(y) !is.na(y) && y!='NaN') }) ##tissue here if(tval!=''){ cols<-grep(tval,colnames(dfdat)) print(paste('Selecting',length(cols),'cell lines for tissue',tiss)) }else{ cols<-1:ncol(dfdat) } return(dfdat[,cols]) }
#!/applications/R/R-3.5.0/bin/Rscript # Compile DMC1 gene quantile hypergeometric test results into one supplemental table library(parallel) inDir <- "/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/quantiles_by_DMC1_in_genes/" inDir1 <- paste0(inDir, "hypergeometricTests/homoeolog_exp_bias/HC_CS_no_stress/") inDir2 <- paste0(inDir, "hypergeometricTests/triad_movement/HC_CS_no_stress/") geneCat1 <- c("Balanced", ".dominant", "non_dominant", ".suppressed", "non_suppressed", "A.dominant", "B.dominant", "D.dominant", "A.suppressed", "B.suppressed", "D.suppressed") geneCatName1 <- c("Balanced", "Dominant", "Non-dominant", "Suppressed", "Non-suppressed", "A dominant", "B dominant", "D dominant", "A suppressed", "B suppressed", "D suppressed") geneCat2 <- c("stable", "middle", "dynamic") geneCatName2 <- c("Stable 10%", "Middle 80%", "Dynamic 10%") geneCat <- c(geneCat1, geneCat2) geneCatName <- c(geneCatName1, geneCatName2) quantiles <- 1:4 summaryTableDFList <- mclapply(seq_along(quantiles), function(x) { summaryTableDFList_geneCat1 <- lapply(seq_along(geneCat1), function(w) { load(paste0(inDir1, "HC_CS_no_stress_", geneCat1[w], "_representation_among_quantile", x, "_of_4_by_log2_DMC1_Rep1_ChIP_control_in_genes_of_genes_in_Agenome_Bgenome_Dgenome_genomewide_hypergeomTestRes.RData")) WgCat1 <- hgTestResults rm(hgTestResults); gc() WgCat1 <- data.frame( gene_category = geneCatName1[w], DMC1_gene_quantile = paste0("Quantile ", x), genes_in_quantile = WgCat1@quantile_genes, expected = WgCat1@expected, observed = WgCat1@observed, alpha = WgCat1@alpha0.05, log2obsexp = WgCat1@log2obsexp, log2alphaexp = WgCat1@log2alpha, pval = WgCat1@pval, stringsAsFactors = F) WgCat1 }) summaryTableDFList_geneCat2 <- lapply(seq_along(geneCat2), function(w) { load(paste0(inDir2, "HC_CS_no_stress_", geneCat2[w], "_representation_among_quantile", x, "_of_4_by_log2_DMC1_Rep1_ChIP_control_in_genes_of_genes_in_Agenome_Bgenome_Dgenome_genomewide_hypergeomTestRes_minConditions6.RData")) WgCat2 <- hgTestResults rm(hgTestResults); gc() WgCat2 <- data.frame( gene_category = geneCatName2[w], DMC1_gene_quantile = paste0("Quantile ", x), genes_in_quantile = WgCat2@quantile_genes, expected = WgCat2@expected, observed = WgCat2@observed, alpha = WgCat2@alpha0.05, log2obsexp = WgCat2@log2obsexp, log2alphaexp = WgCat2@log2alpha, pval = WgCat2@pval, stringsAsFactors = F) WgCat2 }) rbind( do.call(rbind, summaryTableDFList_geneCat1), do.call(rbind, summaryTableDFList_geneCat2) ) }, mc.cores = length(quantiles), mc.preschedule = F) summaryTableDF <- do.call(rbind, summaryTableDFList) print(colnames(summaryTableDF)) colnames(summaryTableDF) <- c("Gene category", "DMC1 gene quantile", "Total genes in quantile", "Expected category genes in quantile", "Observed category genes in quantile", "Alpha (5%)", "Log2(observed/expected)", "Log2(alpha/expected)", "P") print(colnames(summaryTableDF)) write.table(summaryTableDF, file = "Supplemental_TableS17_DMC1_gene_quantiles_hypergeometricTests_homoeologExpressionVariation.tsv", col.names = T, row.names = F, sep = "\t", quote = F) write.csv(summaryTableDF, file = "Supplemental_TableS17_DMC1_gene_quantiles_hypergeometricTests_homoeologExpressionVariation.csv", row.names = F, quote = F)
/manuscript_DMC1_ASY1/GenomeResearch/SuppTables/SuppTableS17_DMC1_gene_quantiles_hypergeomTests_homoeologExpVar.R
no_license
ajtock/wheat
R
false
false
4,113
r
#!/applications/R/R-3.5.0/bin/Rscript # Compile DMC1 gene quantile hypergeometric test results into one supplemental table library(parallel) inDir <- "/home/ajt200/analysis/wheat/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/quantiles/quantiles_by_DMC1_in_genes/" inDir1 <- paste0(inDir, "hypergeometricTests/homoeolog_exp_bias/HC_CS_no_stress/") inDir2 <- paste0(inDir, "hypergeometricTests/triad_movement/HC_CS_no_stress/") geneCat1 <- c("Balanced", ".dominant", "non_dominant", ".suppressed", "non_suppressed", "A.dominant", "B.dominant", "D.dominant", "A.suppressed", "B.suppressed", "D.suppressed") geneCatName1 <- c("Balanced", "Dominant", "Non-dominant", "Suppressed", "Non-suppressed", "A dominant", "B dominant", "D dominant", "A suppressed", "B suppressed", "D suppressed") geneCat2 <- c("stable", "middle", "dynamic") geneCatName2 <- c("Stable 10%", "Middle 80%", "Dynamic 10%") geneCat <- c(geneCat1, geneCat2) geneCatName <- c(geneCatName1, geneCatName2) quantiles <- 1:4 summaryTableDFList <- mclapply(seq_along(quantiles), function(x) { summaryTableDFList_geneCat1 <- lapply(seq_along(geneCat1), function(w) { load(paste0(inDir1, "HC_CS_no_stress_", geneCat1[w], "_representation_among_quantile", x, "_of_4_by_log2_DMC1_Rep1_ChIP_control_in_genes_of_genes_in_Agenome_Bgenome_Dgenome_genomewide_hypergeomTestRes.RData")) WgCat1 <- hgTestResults rm(hgTestResults); gc() WgCat1 <- data.frame( gene_category = geneCatName1[w], DMC1_gene_quantile = paste0("Quantile ", x), genes_in_quantile = WgCat1@quantile_genes, expected = WgCat1@expected, observed = WgCat1@observed, alpha = WgCat1@alpha0.05, log2obsexp = WgCat1@log2obsexp, log2alphaexp = WgCat1@log2alpha, pval = WgCat1@pval, stringsAsFactors = F) WgCat1 }) summaryTableDFList_geneCat2 <- lapply(seq_along(geneCat2), function(w) { load(paste0(inDir2, "HC_CS_no_stress_", geneCat2[w], "_representation_among_quantile", x, "_of_4_by_log2_DMC1_Rep1_ChIP_control_in_genes_of_genes_in_Agenome_Bgenome_Dgenome_genomewide_hypergeomTestRes_minConditions6.RData")) WgCat2 <- hgTestResults rm(hgTestResults); gc() WgCat2 <- data.frame( gene_category = geneCatName2[w], DMC1_gene_quantile = paste0("Quantile ", x), genes_in_quantile = WgCat2@quantile_genes, expected = WgCat2@expected, observed = WgCat2@observed, alpha = WgCat2@alpha0.05, log2obsexp = WgCat2@log2obsexp, log2alphaexp = WgCat2@log2alpha, pval = WgCat2@pval, stringsAsFactors = F) WgCat2 }) rbind( do.call(rbind, summaryTableDFList_geneCat1), do.call(rbind, summaryTableDFList_geneCat2) ) }, mc.cores = length(quantiles), mc.preschedule = F) summaryTableDF <- do.call(rbind, summaryTableDFList) print(colnames(summaryTableDF)) colnames(summaryTableDF) <- c("Gene category", "DMC1 gene quantile", "Total genes in quantile", "Expected category genes in quantile", "Observed category genes in quantile", "Alpha (5%)", "Log2(observed/expected)", "Log2(alpha/expected)", "P") print(colnames(summaryTableDF)) write.table(summaryTableDF, file = "Supplemental_TableS17_DMC1_gene_quantiles_hypergeometricTests_homoeologExpressionVariation.tsv", col.names = T, row.names = F, sep = "\t", quote = F) write.csv(summaryTableDF, file = "Supplemental_TableS17_DMC1_gene_quantiles_hypergeometricTests_homoeologExpressionVariation.csv", row.names = F, quote = F)
library(Rcplex) solve_classification_svm <- function(K, y, C, epsilon) { N <- length(y) yyK <- (y %*% t(y)) * K opts <- list() opts$trace <- 0 opts$maxcalls <- 41 * 200 result <- Rcplex(cvec = rep(-1, N), Amat = matrix(y, nrow = 1, byrow = TRUE), bvec = 0, Qmat = yyK, lb = rep(0, N), ub = rep(C, N), control = opts, objsense = "min", sense = "E") alpha <- result$xopt[1:N] alpha_original <- alpha alpha[alpha < +C * epsilon] <- 0 alpha[alpha > +C * (1 - epsilon)] <- +C objective <- sum(alpha) - 0.5 * (t(alpha) %*% yyK) %*% (alpha) objective <- objective * (objective >= 0) objective_original <- sum(alpha_original) - 0.5 * (t(alpha_original) %*% yyK) %*% (alpha_original) objective_original <- objective_original * (objective_original >= 0) support_indices <- which(alpha != 0) active_indices <- which(alpha != 0 & alpha < C) if (length(active_indices) > 0) { b <- mean(y[active_indices] * (1 - yyK[active_indices, support_indices] %*% alpha[support_indices])) } else { b <- 0 } model <- list(alpha = alpha * y, b = b, objective = objective, alpha_original = alpha_original*y, objective_original = objective_original) return(model) } solve_multitask_quadratic_master_problem <- function(obj_coef, obj_quadratic, constraints_matrix, rhs, senses, lb, TN, P) { opts <- list() opts$trace <- 0 opts$maxcalls <- 41 * 200 Qmat <- NULL if(!is.null(obj_quadratic)) Qmat <- 2*obj_quadratic result <- Rcplex(cvec = obj_coef, Qmat = Qmat, Amat = constraints_matrix, bvec = rhs, lb = lb, control = opts, objsense = "min", sense = senses) #order of variables: Gamma_1, ..., Gamma_T, eta_1_1,...,eta_1_P,eta_2_1,...,eta_2_P,...,eta_T_1,...,eta_T_P sln <- result$xopt Gamma <- sln[1:TN] i <- TN+1 eta <- vector("list", TN) for(t in 1:TN) { eta[[t]] <- sln[i:(i+P-1)] i <- i+P } quadratic_penalty <- 0 if(!is.null(obj_quadratic)) quadratic_penalty <- (t(sln) %*% obj_quadratic) %*% (sln) linear_penalty <- sum(sln*obj_coef) - sum(Gamma) objective <- result$obj output <- list(Gamma = Gamma, quadratic_penalty = quadratic_penalty, linear_penalty = linear_penalty, eta = eta, objective = objective) return(output) }
/solve_classification_models_cplex.R
no_license
arezourahimi/mtgsbc
R
false
false
2,695
r
library(Rcplex) solve_classification_svm <- function(K, y, C, epsilon) { N <- length(y) yyK <- (y %*% t(y)) * K opts <- list() opts$trace <- 0 opts$maxcalls <- 41 * 200 result <- Rcplex(cvec = rep(-1, N), Amat = matrix(y, nrow = 1, byrow = TRUE), bvec = 0, Qmat = yyK, lb = rep(0, N), ub = rep(C, N), control = opts, objsense = "min", sense = "E") alpha <- result$xopt[1:N] alpha_original <- alpha alpha[alpha < +C * epsilon] <- 0 alpha[alpha > +C * (1 - epsilon)] <- +C objective <- sum(alpha) - 0.5 * (t(alpha) %*% yyK) %*% (alpha) objective <- objective * (objective >= 0) objective_original <- sum(alpha_original) - 0.5 * (t(alpha_original) %*% yyK) %*% (alpha_original) objective_original <- objective_original * (objective_original >= 0) support_indices <- which(alpha != 0) active_indices <- which(alpha != 0 & alpha < C) if (length(active_indices) > 0) { b <- mean(y[active_indices] * (1 - yyK[active_indices, support_indices] %*% alpha[support_indices])) } else { b <- 0 } model <- list(alpha = alpha * y, b = b, objective = objective, alpha_original = alpha_original*y, objective_original = objective_original) return(model) } solve_multitask_quadratic_master_problem <- function(obj_coef, obj_quadratic, constraints_matrix, rhs, senses, lb, TN, P) { opts <- list() opts$trace <- 0 opts$maxcalls <- 41 * 200 Qmat <- NULL if(!is.null(obj_quadratic)) Qmat <- 2*obj_quadratic result <- Rcplex(cvec = obj_coef, Qmat = Qmat, Amat = constraints_matrix, bvec = rhs, lb = lb, control = opts, objsense = "min", sense = senses) #order of variables: Gamma_1, ..., Gamma_T, eta_1_1,...,eta_1_P,eta_2_1,...,eta_2_P,...,eta_T_1,...,eta_T_P sln <- result$xopt Gamma <- sln[1:TN] i <- TN+1 eta <- vector("list", TN) for(t in 1:TN) { eta[[t]] <- sln[i:(i+P-1)] i <- i+P } quadratic_penalty <- 0 if(!is.null(obj_quadratic)) quadratic_penalty <- (t(sln) %*% obj_quadratic) %*% (sln) linear_penalty <- sum(sln*obj_coef) - sum(Gamma) objective <- result$obj output <- list(Gamma = Gamma, quadratic_penalty = quadratic_penalty, linear_penalty = linear_penalty, eta = eta, objective = objective) return(output) }
#Code to find the months of attack SHARKATTACK<- read.csv('sharkattack.csv', header = T, stringsAsFactors = F) install.packages("lubridate") SHARKATTACK $Date <- gsub('', '', MDEHBFile $TEHBDedInnTier1Individual) library(lubridate) SHARKATTACK $Month <- month(SHARKATTACK $Date) SHARKATTACK $Area <- as.factor(SHARKATTACK $Area) summary(SHARKATTACK $Area) SHARKATTACKnc <- subset(SHARKATTACK, SHARKATTACK $Area == "North Carolina" ) SHARKATTACKnc $Date <- as.factor(SHARKATTACKnc $Date) summary(SHARKATTACKnc $Date) #to merge moonphase file and shark attack file of nc sharkattackdates<- read.csv('shark_attack_dates.csv', header = T, stringsAsFactors = F) names(sharkattackdates) names(sharkattackdates) <- c("Date","attack","moonphase") sharkattacknc <- read.csv('NC-2000_2016_time.csv', header = T, stringsAsFactors = F) sharkattacknc $Date <- as.factor(sharkattacknc $Date) sharkattackdates $Date <- as.factor(sharkattackdates $Date) sharkfile <- merge(sharkattackdates,sharkattacknc ,all.x = TRUE ) write.csv(sharkfile,"sharkfile.csv")
/DataMining/RCode/monthsbysharkattacks.R
no_license
AKDDResearch/Shark-Attack
R
false
false
1,054
r
#Code to find the months of attack SHARKATTACK<- read.csv('sharkattack.csv', header = T, stringsAsFactors = F) install.packages("lubridate") SHARKATTACK $Date <- gsub('', '', MDEHBFile $TEHBDedInnTier1Individual) library(lubridate) SHARKATTACK $Month <- month(SHARKATTACK $Date) SHARKATTACK $Area <- as.factor(SHARKATTACK $Area) summary(SHARKATTACK $Area) SHARKATTACKnc <- subset(SHARKATTACK, SHARKATTACK $Area == "North Carolina" ) SHARKATTACKnc $Date <- as.factor(SHARKATTACKnc $Date) summary(SHARKATTACKnc $Date) #to merge moonphase file and shark attack file of nc sharkattackdates<- read.csv('shark_attack_dates.csv', header = T, stringsAsFactors = F) names(sharkattackdates) names(sharkattackdates) <- c("Date","attack","moonphase") sharkattacknc <- read.csv('NC-2000_2016_time.csv', header = T, stringsAsFactors = F) sharkattacknc $Date <- as.factor(sharkattacknc $Date) sharkattackdates $Date <- as.factor(sharkattackdates $Date) sharkfile <- merge(sharkattackdates,sharkattacknc ,all.x = TRUE ) write.csv(sharkfile,"sharkfile.csv")
context("checks shrub transect data") data <- read.csv("../Plants/Portal_plant_transects_2015_present.csv") old_transects <- read.csv("../Plants/Portal_plant_transects_1989_2009.csv") species <- read.csv('../Plants/Portal_plant_species.csv') test_that("valid year", { expect_true(all(data$year %in% 1981:max(data$year))) }) test_that("valid plot", { expect_true(all(data$plot %in% 1:24)) }) test_that("valid transect", { expect_true(all(data$transect %in% c(11,71))) }) test_that("valid species", { expect_true(all(data$species %in% species$speciescode)) }) test_that("valid start", { expect_true(all(data$start %in% 0:7500)) }) test_that("valid stop", { expect_true(all(data$stop %in% 0:7500)) }) test_that("stop greater than start", { expect_true(all(data$stop >= data$start)) }) test_that("valid height", { expect_true(min(data$height, na.rm = T) > 0) expect_true(max(data$height, na.rm = T) <= 400) }) test_that("no duplicate data", { expect_true(sum(duplicated(data))==0) }) test_that("valid year", { expect_true(all(old_transects$year %in% c(1989,1992,1995,1998,2001,2004,2009))) }) test_that("valid plot", { expect_true(all(old_transects$plot %in% 1:24)) }) test_that("valid transect", { expect_true(all(old_transects$transect %in% c("NW","SW","NE","SE",""))) }) test_that("valid species", { expect_true(all(old_transects$species %in% species$speciescode)) }) test_that("valid point", { expect_true(all(old_transects$point %in% c(1:250,NA))) })
/testthat/test-shrub_transects.R
permissive
weecology/PortalData
R
false
false
1,544
r
context("checks shrub transect data") data <- read.csv("../Plants/Portal_plant_transects_2015_present.csv") old_transects <- read.csv("../Plants/Portal_plant_transects_1989_2009.csv") species <- read.csv('../Plants/Portal_plant_species.csv') test_that("valid year", { expect_true(all(data$year %in% 1981:max(data$year))) }) test_that("valid plot", { expect_true(all(data$plot %in% 1:24)) }) test_that("valid transect", { expect_true(all(data$transect %in% c(11,71))) }) test_that("valid species", { expect_true(all(data$species %in% species$speciescode)) }) test_that("valid start", { expect_true(all(data$start %in% 0:7500)) }) test_that("valid stop", { expect_true(all(data$stop %in% 0:7500)) }) test_that("stop greater than start", { expect_true(all(data$stop >= data$start)) }) test_that("valid height", { expect_true(min(data$height, na.rm = T) > 0) expect_true(max(data$height, na.rm = T) <= 400) }) test_that("no duplicate data", { expect_true(sum(duplicated(data))==0) }) test_that("valid year", { expect_true(all(old_transects$year %in% c(1989,1992,1995,1998,2001,2004,2009))) }) test_that("valid plot", { expect_true(all(old_transects$plot %in% 1:24)) }) test_that("valid transect", { expect_true(all(old_transects$transect %in% c("NW","SW","NE","SE",""))) }) test_that("valid species", { expect_true(all(old_transects$species %in% species$speciescode)) }) test_that("valid point", { expect_true(all(old_transects$point %in% c(1:250,NA))) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dynamodb_operations.R \name{dynamodb_restore_table_to_point_in_time} \alias{dynamodb_restore_table_to_point_in_time} \title{Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime} \usage{ dynamodb_restore_table_to_point_in_time(SourceTableArn, SourceTableName, TargetTableName, UseLatestRestorableTime, RestoreDateTime, BillingModeOverride, GlobalSecondaryIndexOverride, LocalSecondaryIndexOverride, ProvisionedThroughputOverride, SSESpecificationOverride) } \arguments{ \item{SourceTableArn}{The DynamoDB table that will be restored. This value is an Amazon Resource Name (ARN).} \item{SourceTableName}{Name of the source table that is being restored.} \item{TargetTableName}{[required] The name of the new table to which it must be restored to.} \item{UseLatestRestorableTime}{Restore the table to the latest possible time. \code{LatestRestorableDateTime} is typically 5 minutes before the current time.} \item{RestoreDateTime}{Time in the past to restore the table to.} \item{BillingModeOverride}{The billing mode of the restored table.} \item{GlobalSecondaryIndexOverride}{List of global secondary indexes for the restored table. The indexes provided should match existing secondary indexes. You can choose to exclude some or all of the indexes at the time of restore.} \item{LocalSecondaryIndexOverride}{List of local secondary indexes for the restored table. The indexes provided should match existing secondary indexes. You can choose to exclude some or all of the indexes at the time of restore.} \item{ProvisionedThroughputOverride}{Provisioned throughput settings for the restored table.} \item{SSESpecificationOverride}{The new server-side encryption settings for the restored table.} } \description{ Restores the specified table to the specified point in time within \code{EarliestRestorableDateTime} and \code{LatestRestorableDateTime}. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account. When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table. Along with data, the following are also included on the new restored table using point in time recovery: \itemize{ \item Global secondary indexes (GSIs) \item Local secondary indexes (LSIs) \item Provisioned read and write capacity \item Encryption settings All these settings come from the current settings of the source table at the time of restore. } You must manually set up the following on the restored table: \itemize{ \item Auto scaling policies \item IAM policies \item Amazon CloudWatch metrics and alarms \item Tags \item Stream settings \item Time to Live (TTL) settings \item Point in time recovery settings } } \section{Request syntax}{ \preformatted{svc$restore_table_to_point_in_time( SourceTableArn = "string", SourceTableName = "string", TargetTableName = "string", UseLatestRestorableTime = TRUE|FALSE, RestoreDateTime = as.POSIXct( "2015-01-01" ), BillingModeOverride = "PROVISIONED"|"PAY_PER_REQUEST", GlobalSecondaryIndexOverride = list( list( IndexName = "string", KeySchema = list( list( AttributeName = "string", KeyType = "HASH"|"RANGE" ) ), Projection = list( ProjectionType = "ALL"|"KEYS_ONLY"|"INCLUDE", NonKeyAttributes = list( "string" ) ), ProvisionedThroughput = list( ReadCapacityUnits = 123, WriteCapacityUnits = 123 ) ) ), LocalSecondaryIndexOverride = list( list( IndexName = "string", KeySchema = list( list( AttributeName = "string", KeyType = "HASH"|"RANGE" ) ), Projection = list( ProjectionType = "ALL"|"KEYS_ONLY"|"INCLUDE", NonKeyAttributes = list( "string" ) ) ) ), ProvisionedThroughputOverride = list( ReadCapacityUnits = 123, WriteCapacityUnits = 123 ), SSESpecificationOverride = list( Enabled = TRUE|FALSE, SSEType = "AES256"|"KMS", KMSMasterKeyId = "string" ) ) } } \keyword{internal}
/paws/man/dynamodb_restore_table_to_point_in_time.Rd
permissive
sanchezvivi/paws
R
false
true
4,398
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dynamodb_operations.R \name{dynamodb_restore_table_to_point_in_time} \alias{dynamodb_restore_table_to_point_in_time} \title{Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime} \usage{ dynamodb_restore_table_to_point_in_time(SourceTableArn, SourceTableName, TargetTableName, UseLatestRestorableTime, RestoreDateTime, BillingModeOverride, GlobalSecondaryIndexOverride, LocalSecondaryIndexOverride, ProvisionedThroughputOverride, SSESpecificationOverride) } \arguments{ \item{SourceTableArn}{The DynamoDB table that will be restored. This value is an Amazon Resource Name (ARN).} \item{SourceTableName}{Name of the source table that is being restored.} \item{TargetTableName}{[required] The name of the new table to which it must be restored to.} \item{UseLatestRestorableTime}{Restore the table to the latest possible time. \code{LatestRestorableDateTime} is typically 5 minutes before the current time.} \item{RestoreDateTime}{Time in the past to restore the table to.} \item{BillingModeOverride}{The billing mode of the restored table.} \item{GlobalSecondaryIndexOverride}{List of global secondary indexes for the restored table. The indexes provided should match existing secondary indexes. You can choose to exclude some or all of the indexes at the time of restore.} \item{LocalSecondaryIndexOverride}{List of local secondary indexes for the restored table. The indexes provided should match existing secondary indexes. You can choose to exclude some or all of the indexes at the time of restore.} \item{ProvisionedThroughputOverride}{Provisioned throughput settings for the restored table.} \item{SSESpecificationOverride}{The new server-side encryption settings for the restored table.} } \description{ Restores the specified table to the specified point in time within \code{EarliestRestorableDateTime} and \code{LatestRestorableDateTime}. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account. When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table. Along with data, the following are also included on the new restored table using point in time recovery: \itemize{ \item Global secondary indexes (GSIs) \item Local secondary indexes (LSIs) \item Provisioned read and write capacity \item Encryption settings All these settings come from the current settings of the source table at the time of restore. } You must manually set up the following on the restored table: \itemize{ \item Auto scaling policies \item IAM policies \item Amazon CloudWatch metrics and alarms \item Tags \item Stream settings \item Time to Live (TTL) settings \item Point in time recovery settings } } \section{Request syntax}{ \preformatted{svc$restore_table_to_point_in_time( SourceTableArn = "string", SourceTableName = "string", TargetTableName = "string", UseLatestRestorableTime = TRUE|FALSE, RestoreDateTime = as.POSIXct( "2015-01-01" ), BillingModeOverride = "PROVISIONED"|"PAY_PER_REQUEST", GlobalSecondaryIndexOverride = list( list( IndexName = "string", KeySchema = list( list( AttributeName = "string", KeyType = "HASH"|"RANGE" ) ), Projection = list( ProjectionType = "ALL"|"KEYS_ONLY"|"INCLUDE", NonKeyAttributes = list( "string" ) ), ProvisionedThroughput = list( ReadCapacityUnits = 123, WriteCapacityUnits = 123 ) ) ), LocalSecondaryIndexOverride = list( list( IndexName = "string", KeySchema = list( list( AttributeName = "string", KeyType = "HASH"|"RANGE" ) ), Projection = list( ProjectionType = "ALL"|"KEYS_ONLY"|"INCLUDE", NonKeyAttributes = list( "string" ) ) ) ), ProvisionedThroughputOverride = list( ReadCapacityUnits = 123, WriteCapacityUnits = 123 ), SSESpecificationOverride = list( Enabled = TRUE|FALSE, SSEType = "AES256"|"KMS", KMSMasterKeyId = "string" ) ) } } \keyword{internal}
setwd('/Users/jmrt/Documents/Internship/Embassy_HateCrimes/hate_groups') library(leaflet) library(rgdal) library(dplyr) library(colorspace) library(htmltools) library(tidyverse) ## Download Shapes and Data ## shapes <- readOGR( "data/shapes/tl_2017_us_state.shp") hg <-read.csv('data/splc-hate-groups.csv') agg_hc <-read.csv('data/agg_hc.csv') #use aggregated data from python code pop <-read.csv('data/population.csv') agg_hg<-read.csv('data/agg_hg.csv') ## Aggregate Hate Groups Data ## all_hg <-hg %>% group_by(State, Year) %>% summarise(all=n()) categories = c("Anti-Immigrant", "Anti-Muslim", "White Nationalist", "Neo-Nazi") for(cat in categories){ df = filter(hg, hg['Ideology'] == cat) %>% group_by(State, Year) %>% summarize(cat= n()) colnames(df)<-c('State', 'Year', cat) all_hg<-merge(all_hg, df, by=c('State', 'Year'), all=TRUE) } #Include population estimates all_hg = merge(filter(all_hg, all_hg$Year > 2009), pop, by=c('State', 'Year'), all=TRUE) #Remove DC all_hg = filter(all_hg, all_hg['State'] != 'District of Columbia') all_states = tibble(unique(all_hg$State)) colnames(all_states) <-c('State') all_hg = merge(all_hg, all_states, by='State', all=TRUE) #Normalize number of hate groups per million people all_hg$all[is.na(all_hg$all)] <- 0 all_hg$all_n = all_hg[cat]/all_hg$population*1000000 for (cat in categories){ all_hg[cat][is.na(all_hg[cat])] <- 0 all_hg[paste(cat, '_n', sep='')]= all_hg[cat]/all_hg$population*1000000 } all_hg['Other'] = all_hg$all - rowSums(all_hg[categories], na.rm=TRUE) ## Merge with Hate Crimes Data ## hdata = merge(all_hg, agg_hc, by=c('State', 'Year'), all=TRUE) hdata$race[is.na(hdata$race)] <- 0#Replace NA with 0 hdata$latino[is.na(hdata$latino)] <- 0 hdata['race_n'] = hdata$race / hdata$population * 1000000 hdata['latino_n'] = hdata$latino / hdata$population * 1000000 ## Melt and Merge Functions ## #melted = melt(hdata, id.vars = c('State', 'Year'), measure.vars=colnames(hdata)[3:11]) year=2019 df = filter(hdata, hdata['Year'] == year) shapes_toplot = merge(shapes, hdata, by.x='NAME', by.y='State', all=TRUE) ## Merge all aggregated data with Shapes and PLOT## map_plot<-function(var_toplot, title, color="PuBu", bins=4){ pal = colorBin(color, shapes_toplot[[var_toplot]], bins=bins) labels = paste("<p>", shapes_toplot$NAME, "</p>", "<p>", title, "</p>", round(shapes_toplot[[var_toplot]], digits=1), "</p>", sep="") m <-leaflet() %>% setView(-96, 37.8, 5) %>% addProviderTiles(providers$OpenStreetMap.Mapnik) %>% addPolygons(data=shapes_toplot, weight=1, smoothFactor = 0.5,color='grey', fillOpacity = 0.8, fillColor = pal(shapes_toplot[[var_toplot]]), #Highlight neighbourhoods highlight = highlightOptions(weight=5, color='transparent', bringToFront = TRUE, fillOpacity = 0.7), layerId = shapes_toplot$GEOID, label = lapply(labels, HTML), labelOptions = labelOptions( style = list("font-weight" = "normal", padding = "2px 5px"), textsize = "8px", direction = "auto")) #%>% addLegend(values=shapes_toplot$[[var_toplot]],pal=pal,title=title) return(m) } #map_plot('all', 'Number of Hate Groups:', color='Reds') #map_plot('Anti-Immigrant', 'Number of Hate Groups:', color='Reds') #map_plot('race', 'Number of Race Motivated Hate Crimes per Million:', color='Oranges') #map_plot('latino', 'Number of Anti-Latino Motivated Hate Crimes per Million:', color='BuPu')
/handle_data.R
no_license
jramtos/hate_groups
R
false
false
3,679
r
setwd('/Users/jmrt/Documents/Internship/Embassy_HateCrimes/hate_groups') library(leaflet) library(rgdal) library(dplyr) library(colorspace) library(htmltools) library(tidyverse) ## Download Shapes and Data ## shapes <- readOGR( "data/shapes/tl_2017_us_state.shp") hg <-read.csv('data/splc-hate-groups.csv') agg_hc <-read.csv('data/agg_hc.csv') #use aggregated data from python code pop <-read.csv('data/population.csv') agg_hg<-read.csv('data/agg_hg.csv') ## Aggregate Hate Groups Data ## all_hg <-hg %>% group_by(State, Year) %>% summarise(all=n()) categories = c("Anti-Immigrant", "Anti-Muslim", "White Nationalist", "Neo-Nazi") for(cat in categories){ df = filter(hg, hg['Ideology'] == cat) %>% group_by(State, Year) %>% summarize(cat= n()) colnames(df)<-c('State', 'Year', cat) all_hg<-merge(all_hg, df, by=c('State', 'Year'), all=TRUE) } #Include population estimates all_hg = merge(filter(all_hg, all_hg$Year > 2009), pop, by=c('State', 'Year'), all=TRUE) #Remove DC all_hg = filter(all_hg, all_hg['State'] != 'District of Columbia') all_states = tibble(unique(all_hg$State)) colnames(all_states) <-c('State') all_hg = merge(all_hg, all_states, by='State', all=TRUE) #Normalize number of hate groups per million people all_hg$all[is.na(all_hg$all)] <- 0 all_hg$all_n = all_hg[cat]/all_hg$population*1000000 for (cat in categories){ all_hg[cat][is.na(all_hg[cat])] <- 0 all_hg[paste(cat, '_n', sep='')]= all_hg[cat]/all_hg$population*1000000 } all_hg['Other'] = all_hg$all - rowSums(all_hg[categories], na.rm=TRUE) ## Merge with Hate Crimes Data ## hdata = merge(all_hg, agg_hc, by=c('State', 'Year'), all=TRUE) hdata$race[is.na(hdata$race)] <- 0#Replace NA with 0 hdata$latino[is.na(hdata$latino)] <- 0 hdata['race_n'] = hdata$race / hdata$population * 1000000 hdata['latino_n'] = hdata$latino / hdata$population * 1000000 ## Melt and Merge Functions ## #melted = melt(hdata, id.vars = c('State', 'Year'), measure.vars=colnames(hdata)[3:11]) year=2019 df = filter(hdata, hdata['Year'] == year) shapes_toplot = merge(shapes, hdata, by.x='NAME', by.y='State', all=TRUE) ## Merge all aggregated data with Shapes and PLOT## map_plot<-function(var_toplot, title, color="PuBu", bins=4){ pal = colorBin(color, shapes_toplot[[var_toplot]], bins=bins) labels = paste("<p>", shapes_toplot$NAME, "</p>", "<p>", title, "</p>", round(shapes_toplot[[var_toplot]], digits=1), "</p>", sep="") m <-leaflet() %>% setView(-96, 37.8, 5) %>% addProviderTiles(providers$OpenStreetMap.Mapnik) %>% addPolygons(data=shapes_toplot, weight=1, smoothFactor = 0.5,color='grey', fillOpacity = 0.8, fillColor = pal(shapes_toplot[[var_toplot]]), #Highlight neighbourhoods highlight = highlightOptions(weight=5, color='transparent', bringToFront = TRUE, fillOpacity = 0.7), layerId = shapes_toplot$GEOID, label = lapply(labels, HTML), labelOptions = labelOptions( style = list("font-weight" = "normal", padding = "2px 5px"), textsize = "8px", direction = "auto")) #%>% addLegend(values=shapes_toplot$[[var_toplot]],pal=pal,title=title) return(m) } #map_plot('all', 'Number of Hate Groups:', color='Reds') #map_plot('Anti-Immigrant', 'Number of Hate Groups:', color='Reds') #map_plot('race', 'Number of Race Motivated Hate Crimes per Million:', color='Oranges') #map_plot('latino', 'Number of Anti-Latino Motivated Hate Crimes per Million:', color='BuPu')
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/set_coverage.R \name{set_coverage} \alias{set_coverage} \title{set_coverage} \usage{ set_coverage(beginDate = character(), endDate = character(), date = character(), sci_names = character(), geographicDescription = character(), westBoundingCoordinate = numeric(), eastBoundingCoordinate = numeric(), northBoundingCoordinate = numeric(), southBoundingCoordinate = numeric(), altitudeMinimum = numeric(), altitudeMaximum = numeric(), altitudeUnits = character()) } \arguments{ \item{beginDate}{Starting date for temporal coverage range.} \item{endDate}{End date for temporal coverage range} \item{date}{give a single date, or vector of single dates covered (instead of beginDate and endDate)} \item{sci_names}{string (space seperated) or list or data frame of scientific names for species covered. See details} \item{geographicDescription}{text string describing the geographic location} \item{westBoundingCoordinate}{Decimal longitude for west edge bounding box} \item{eastBoundingCoordinate}{Decimal longitude for east edge bounding box} \item{northBoundingCoordinate}{Decimal latitude value for north of bounding box} \item{southBoundingCoordinate}{Decimal latitude value for south edge of bounding box} \item{altitudeMinimum}{minimum altitude covered by the data (optional)} \item{altitudeMaximum}{maximum altitude covered by the data (optional)} \item{altitudeUnits}{name of the units used to measure altitude, if given} } \value{ a coverage object for EML } \description{ set_coverage } \details{ set_coverage provides a simple and concise way to specify most common temporal, taxonomic, and geographic coverage metadata. For certain studies this will not be well suited, and users will need the more flexible but more verbose construction using "new()" methods; for instance, to specify temporal coverage in geological epoch instead of calendar dates, or to specify taxonomic coverage in terms of other ranks or identifiers. } \note{ If "sci_names" is a data frame, column names of the data frame are rank names. For user-defined "sci_names", users must make sure that the order of rank names they specify is from high to low. Ex. "Kingdom","Phylum","Class","Order","Family","Genus","Species","Common" } \examples{ coverage <- set_coverage(begin = '2012-06-01', end = '2013-12-31', sci_names = "Sarracenia purpurea", geographicDescription = "California coast, down through Baja, Mexico", west = -122.44, east = -117.15, north = 37.38, south = 30.00) }
/man/set_coverage.Rd
no_license
nicolasfstgelais/EML
R
false
true
2,616
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/set_coverage.R \name{set_coverage} \alias{set_coverage} \title{set_coverage} \usage{ set_coverage(beginDate = character(), endDate = character(), date = character(), sci_names = character(), geographicDescription = character(), westBoundingCoordinate = numeric(), eastBoundingCoordinate = numeric(), northBoundingCoordinate = numeric(), southBoundingCoordinate = numeric(), altitudeMinimum = numeric(), altitudeMaximum = numeric(), altitudeUnits = character()) } \arguments{ \item{beginDate}{Starting date for temporal coverage range.} \item{endDate}{End date for temporal coverage range} \item{date}{give a single date, or vector of single dates covered (instead of beginDate and endDate)} \item{sci_names}{string (space seperated) or list or data frame of scientific names for species covered. See details} \item{geographicDescription}{text string describing the geographic location} \item{westBoundingCoordinate}{Decimal longitude for west edge bounding box} \item{eastBoundingCoordinate}{Decimal longitude for east edge bounding box} \item{northBoundingCoordinate}{Decimal latitude value for north of bounding box} \item{southBoundingCoordinate}{Decimal latitude value for south edge of bounding box} \item{altitudeMinimum}{minimum altitude covered by the data (optional)} \item{altitudeMaximum}{maximum altitude covered by the data (optional)} \item{altitudeUnits}{name of the units used to measure altitude, if given} } \value{ a coverage object for EML } \description{ set_coverage } \details{ set_coverage provides a simple and concise way to specify most common temporal, taxonomic, and geographic coverage metadata. For certain studies this will not be well suited, and users will need the more flexible but more verbose construction using "new()" methods; for instance, to specify temporal coverage in geological epoch instead of calendar dates, or to specify taxonomic coverage in terms of other ranks or identifiers. } \note{ If "sci_names" is a data frame, column names of the data frame are rank names. For user-defined "sci_names", users must make sure that the order of rank names they specify is from high to low. Ex. "Kingdom","Phylum","Class","Order","Family","Genus","Species","Common" } \examples{ coverage <- set_coverage(begin = '2012-06-01', end = '2013-12-31', sci_names = "Sarracenia purpurea", geographicDescription = "California coast, down through Baja, Mexico", west = -122.44, east = -117.15, north = 37.38, south = 30.00) }
library(methods) library(foreach) library(Matrix) library(caret) library(crisp) mse = function(y, y.hat) { sum((y - y.hat)^2)/length(y) } loadGapStatistic = function(city, trial) { as.integer(read.csv(sprintf('../../data/crime/%s/results/gfl/%d.csv', city, trial), header=FALSE)$V1[3]) } args = commandArgs(trailingOnly = TRUE) city = args[1] trial = as.numeric(args[2]) # Trial ID q = loadGapStatistic(city, trial) # Use the same Q as the GapTV method sprintf("q: %d", q) max.lam = 10.0 lambda.min.ratio = 0.001 n.lambda = 50 lambda.seq = exp(seq(log(max.lam), log(max.lam * lambda.min.ratio), len = n.lambda)) train_data = as.matrix(read.csv(file.path(sprintf("../../data/crime/%s/train/%d.csv", city, trial)), header=FALSE)) test_data = as.matrix(read.csv(file.path(sprintf("../../data/crime/%s/test/%d.csv", city, trial)), header=FALSE)) X_sweep = data.frame(as.matrix(read.csv(file.path(sprintf("../../data/crime/%s/sweep.csv", city)), header=FALSE))) N = dim(train_data)[1] X = train_data[, c(1, 2)] y = as.numeric(as.vector(train_data[, 3])) X_test = test_data[, c(1, 2)] y_test = test_data[, 3] X = data.frame(X) X_test = data.frame(X_test) colnames(X) = c("x1", "x2") colnames(X_test) = c("x1", "x2") colnames(X_sweep) = c("x1", "x2") # Estimate the best lambda via k-fold cross validation k = 5 folds <- createFolds(1:length(y), k=k) prederr = rep(0, n.lambda) for (fold in folds){ print("Fold") foldmodel = crisp(y[-fold], X[-fold,], n.lambda=n.lambda, q=q, lambda.seq=lambda.seq) prederr = prederr + sapply(1:n.lambda, function(i) mse(y[fold], predict(foldmodel, X[fold,], i))) } best.lambda = which.min(prederr) if (best.lambda == 1){ lambda.seq = c(max.lam+1, max.lam) best.lambda = 2 } model = crisp(y, X, n.lambda=best.lambda, q=q, lambda.seq=lambda.seq[1:best.lambda]) y_hat = predict(model, X_test, best.lambda) write.csv(y_hat, file.path(sprintf("../../data/crime/%s/predictions/gapcrisp/%d.csv", city, trial)), row.names=FALSE) rmse = sqrt(mean((y_test - y_hat)**2)) maxerr = max(abs(y_test - y_hat)) sprintf("CRISP --> q: %d lambda: %f RMSE: %f Max Error: %f", q, lambda.seq[best.lambda], rmse, maxerr) write.csv(c(rmse,maxerr), file.path(sprintf("../../data/crime/%s/results/gapcrisp/%d.csv", city, trial)), row.names=FALSE) print("Sweeping") y_sweep = predict(model, X_sweep, best.lambda) print("Saving sweeps") write.csv(y_sweep, file.path(sprintf("../../data/crime/%s/sweeps/gapcrisp/%d.csv", city, trial)), row.names=FALSE) print("Done!")
/R/crime/run_gap_crisp.R
no_license
tansey/mvtv
R
false
false
2,505
r
library(methods) library(foreach) library(Matrix) library(caret) library(crisp) mse = function(y, y.hat) { sum((y - y.hat)^2)/length(y) } loadGapStatistic = function(city, trial) { as.integer(read.csv(sprintf('../../data/crime/%s/results/gfl/%d.csv', city, trial), header=FALSE)$V1[3]) } args = commandArgs(trailingOnly = TRUE) city = args[1] trial = as.numeric(args[2]) # Trial ID q = loadGapStatistic(city, trial) # Use the same Q as the GapTV method sprintf("q: %d", q) max.lam = 10.0 lambda.min.ratio = 0.001 n.lambda = 50 lambda.seq = exp(seq(log(max.lam), log(max.lam * lambda.min.ratio), len = n.lambda)) train_data = as.matrix(read.csv(file.path(sprintf("../../data/crime/%s/train/%d.csv", city, trial)), header=FALSE)) test_data = as.matrix(read.csv(file.path(sprintf("../../data/crime/%s/test/%d.csv", city, trial)), header=FALSE)) X_sweep = data.frame(as.matrix(read.csv(file.path(sprintf("../../data/crime/%s/sweep.csv", city)), header=FALSE))) N = dim(train_data)[1] X = train_data[, c(1, 2)] y = as.numeric(as.vector(train_data[, 3])) X_test = test_data[, c(1, 2)] y_test = test_data[, 3] X = data.frame(X) X_test = data.frame(X_test) colnames(X) = c("x1", "x2") colnames(X_test) = c("x1", "x2") colnames(X_sweep) = c("x1", "x2") # Estimate the best lambda via k-fold cross validation k = 5 folds <- createFolds(1:length(y), k=k) prederr = rep(0, n.lambda) for (fold in folds){ print("Fold") foldmodel = crisp(y[-fold], X[-fold,], n.lambda=n.lambda, q=q, lambda.seq=lambda.seq) prederr = prederr + sapply(1:n.lambda, function(i) mse(y[fold], predict(foldmodel, X[fold,], i))) } best.lambda = which.min(prederr) if (best.lambda == 1){ lambda.seq = c(max.lam+1, max.lam) best.lambda = 2 } model = crisp(y, X, n.lambda=best.lambda, q=q, lambda.seq=lambda.seq[1:best.lambda]) y_hat = predict(model, X_test, best.lambda) write.csv(y_hat, file.path(sprintf("../../data/crime/%s/predictions/gapcrisp/%d.csv", city, trial)), row.names=FALSE) rmse = sqrt(mean((y_test - y_hat)**2)) maxerr = max(abs(y_test - y_hat)) sprintf("CRISP --> q: %d lambda: %f RMSE: %f Max Error: %f", q, lambda.seq[best.lambda], rmse, maxerr) write.csv(c(rmse,maxerr), file.path(sprintf("../../data/crime/%s/results/gapcrisp/%d.csv", city, trial)), row.names=FALSE) print("Sweeping") y_sweep = predict(model, X_sweep, best.lambda) print("Saving sweeps") write.csv(y_sweep, file.path(sprintf("../../data/crime/%s/sweeps/gapcrisp/%d.csv", city, trial)), row.names=FALSE) print("Done!")
library(EMtree) library(PLNmodels) library(LITree) library(ggraph) library(tidygraph) library(tidyverse) library(mvtnorm) library(useful) library(mclust) library(MASS) library(parallel) library(ROCR) library(reshape2)#for ggimage library(gridExtra) library(harrypotter) library(sparsepca) source("/Users/raphaellemomal/these/R/codes/missingActor/fonctions-missing.R") source("/Users/raphaellemomal/these/R/codes/missingActor/fonctions-exactDet.R") J_AUC<-function(seed, p,r,alpha,eig.tol=1e-6,cliques_spca=NULL,B=100,cores=3,plot=FALSE,type="scale-free",n=200){ #------ Data simulation set.seed(seed) O=1:p H=(p+1):(p+r) missing_data<-missing_from_scratch(n,p,r,type,plot) counts=missing_data$Y ; sigmaO= missing_data$Sigma ; omega=missing_data$Omega trueClique=missing_data$TC ; hidden=missing_data$H PLNfit<-PLN(counts~1, control=list(trace=0)) MO<-PLNfit$var_par$M ; SO<-PLNfit$var_par$S ; sigma_obs=PLNfit$model_par$Sigma theta=PLNfit$model_par$Theta ; matcovar=matrix(1, n,1) ome=omega[c(setdiff(1:(p+r), hidden), hidden),c(setdiff(1:(p+r), hidden), hidden)] diag(ome)=0 #------ cliques et VEMtee if(is.null(cliques)){ cliques_spca <- boot_FitSparsePCA(scale(MO),B=B,r=1, cores=3) } ListVEM_filtre<-List.VEM(cliquesObj=cliques_spca, counts, sigma_obs, MO,SO,r=1,eps=1e-3, maxIter=200, alpha = alpha,cores=cores, nobeta = FALSE, filterDiag = TRUE,filterWg=TRUE) ListVEM_nofiltre<-List.VEM(cliquesObj=cliques_spca, counts, sigma_obs, MO,SO,r=1,eps=1e-3, maxIter=200, alpha =alpha,cores=cores, nobeta = FALSE, filterDiag = FALSE,filterWg=FALSE) ListVEM=c(ListVEM_filtre,ListVEM_nofiltre) #------ shape des résultats filtre=rep(c(TRUE,FALSE), each=length(unique(cliques_spca$cliqueList))) goodPrec=!do.call(rbind,lapply(ListVEM, function(x) x$max.prec)) J=do.call(rbind,lapply(ListVEM, function(vem){tail(vem$lowbound$J,1)})) nbocc=do.call(rbind, lapply(ListVEM, function(vem){ vem$nbocc})) AUC=do.call(rbind, lapply(ListVEM, function(vem){ Pg=vem$Pg AUC=round(auc(pred = Pg, label = ome),4) })) ppvh=do.call(rbind, lapply(ListVEM, function(vem){ Pg=vem$Pg ppvh=accppvtpr(Pg,ome,h=15,seuil=0.5)[5] })) Icl<-do.call(rbind, lapply(ListVEM, function(vem){ J=tail(vem$lowbound$J,1) ;Wg=vem$Wg ; Pg=vem$Pg pen_T=-( sum( Pg * log(Wg+(Wg==0)) ) - logSumTree(Wg)$det) ICL = J-pen_T return(ICL) })) vBICs<-do.call(rbind, lapply(ListVEM, function(vem){ J=tail(vem$lowbound$J,1) r=1;d=1 ; q=p+r nbparam<-p*(d) + (p*(p+1)/2 +r*p)+(q*(q-1)/2 - 1) nbparam_mixedmodel<-round(SumTree(vem$W),1)*(1+q^2)-1 vbic0=J- nbparam*log(n)/2 vbic1=J- nbparam*log(n)/2 -nbparam_mixedmodel vbic2=J- (nbparam+nbparam_mixedmodel)*log(n)/2 return(c(vbic0=vbic0,vbic1=vbic1,vbic2=vbic2)) })) JPLN<-do.call(rbind, lapply(ListVEM, function(vem){ EhZZ=t(vem$M[,O])%*%vem$M[,O] + diag(colSums(vem$S[,O])) sigTilde = (1/n)*EhZZ omega=vem$omega EsO=vem$Pg*vem$omega+diag(diag(vem$omega)) EgOm = EsO[O,O] - matrix(EsO[O,H],p,r)%*%matrix(EsO[H,O],r,p)/EsO[H,H] EgOm = nearPD(EgOm, eig.tol=eig.tol)$mat JPLN_SigT = part_JPLN(sigTilde,EhZZ=EhZZ) JPLN_EgOm = part_JPLN(EgOm,EhZZ=EhZZ, var=FALSE) return(data.frame(JPLN_SigT=JPLN_SigT,JPLN_EgOm=JPLN_EgOm))})) data= data.frame(goodPrec, J,Icl, AUC,ppvh, vBICs,filtre=filtre, JPLN_SigT=JPLN$JPLN_SigT,JPLN_EgOm=JPLN$JPLN_EgOm, index=rep(1:length(unique(cliques_spca$cliqueList)),2)) return(data) } part_JPLN<-function(mat_var,EhZZ, var=TRUE){ if(var){ partJPLN=-n*0.5*(det.fractional(mat_var, log=TRUE)) - 0.5*sum(EhZZ*solve(mat_var)) }else{# si on donne une matrice de précision partJPLN=n*0.5*(det.fractional(mat_var, log=TRUE)) - 0.5*sum(EhZZ*(mat_var)) } return(partJPLN) } # maxJ_good=which(J==max(J[J<min(J[!goodPrec])])) # maxJ=which.max(J) # choice=which.max(ICL[goodPrec]) #ICL[choice] #--- experiments cliques_spca19=readRDS("/Users/raphaellemomal/these/R/codes/missingActor/SimResults/cliques_spca19_10000.rds") cliques_spca1=readRDS("/Users/raphaellemomal/these/R/codes/missingActor/SimResults/cliques_spca1_10000.rds") small_cliques1=list() small_cliques1$cliqueList=cliques_spca1$cliqueList[1:10] small_cliques1$nb_occ = cliques_spca1$nb_occ[1:10] tic() seed1<-J_AUC(seed = 1,p = 14,r=1, alpha=0.1,eig.tol=1e-6, cliques_spca=cliques_spca1) # 17 min toc() tic() seed19<-J_AUC(seed = 19,p = 14,r=1, alpha=0.1,eig.tol=1e-6, cliques_spca=cliques_spca19) # 71 min toc() plot(seed19$JPLN_SigT,seed19$JPLN_EgOm) seed1$seed=1 ; seed19$seed=19 seed_filtre=rbind(seed1,seed19) saveRDS(seed_filtre, "/Users/raphaellemomal/these/R/codes/missingActor/SimResults/seed_filtre_eigtol_1e-6.rds") #--- plots seed_filtre %>% ggplot(aes( AUC,J, color=(goodPrec)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed)+mytheme.dark("Sans précision \nmachine:") seed_filtre %>% ggplot(aes( AUC,Delta, color=(goodPrec)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed)+mytheme.dark("Sans précision \nmachine:") seed_filtre %>% ggplot(aes( ppvh,Delta, color=(goodPrec)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed)+mytheme.dark("Sans précision \nmachine:") seed_filtre=seed_filtre %>% group_by(seed,filtre) %>% mutate(medDelta = quantile(Delta,0.1), goodDelta = Delta<medDelta) %>% ungroup() seed_filtre %>% ggplot(aes( AUC,J, color=(goodDelta)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed) + mytheme.dark("Delta < q10(Delta)") seed_filtre %>% dplyr::select(J,filtre,index,seed) %>% spread(filtre,J) %>% ggplot(aes(`FALSE`, `TRUE`))+geom_point()+facet_wrap(~seed)+geom_abline()+ mytheme.dark("") seed_filtre %>% dplyr::select(Delta,filtre,index,seed) %>% spread(filtre,Delta) %>% ggplot(aes(`FALSE`, `TRUE`))+geom_point()+facet_wrap(~seed)+geom_abline()+ mytheme.dark("")+labs(title="Delta moins élevé avec filtres",x="sans filtre", y="avec filtre") seed_filtre=seed_filtre %>% mutate(tron_vbic1=ifelse(!is.finite(vbic1),.Machine$double.xmax,vbic1)) seed_filtre %>% ggplot(aes( AUC,tron_vbic1, color=(goodDelta)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed) + mytheme.dark("Delta < q10(Delta)") data %>% filter(goodPrec) %>% filter(ICL==max(ICL)) data %>%filter(J==max(J)) data%>% group_by(goodPrec) %>% summarise(maxJ=max(J), mean.auc=mean(AUC), mean.ppvh=mean(ppvh), indexmaxJ = which.max(J), indexmaxICL = which.max(ICL)) # VEM_1=ListVEM[[maxJ_good]] # maxJ_good=which(!goodPrec)[which.max(ICL[!goodPrec])] data %>%mutate(nbocc=nbocc) %>% ggplot(aes(ppvh,J, color=as.factor(nbocc)))+geom_point()+mytheme.dark("") data=data %>% mutate(penT = -ICL + J) data05=data data05 %>%ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data1 %>% ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data2 %>% ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data3 %>% ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data3 %>%mutate(nbocc=nbocc) %>% ggplot(aes(ppvh,J, color=(nbocc > 1 & goodPrec)))+geom_point()+mytheme.dark("") data %>% ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data %>% ggplot(aes(ppvh,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data %>% ggplot(aes(ppvh,J, color=goodPrec))+geom_point()+mytheme.dark("") data %>% ggplot(aes(ICL,J,color=AUC>0.6, shape=goodPrec))+geom_point()+ mytheme.dark("auc>0.6")+geom_abline() data %>% ggplot(aes(ppvh,AUC, color=goodPrec))+geom_point()+mytheme.dark("") data %>% ggplot(aes(AUC,(penT), color=goodPrec))+geom_point()+mytheme.dark("") #########@ # tests locaux which(ICL>-2200) trueClique clique=cliques_spca$cliqueList[[1]] clique=ListVEM[[33]]$clique init=initVEM(counts = counts,initviasigma=clique, sigma_obs,r = 1) Wginit= init$Wginit; Winit= init$Winit; omegainit=init$omegainit ; MHinit=init$MHinit VEM=VEMtree(counts,MO,SO,MH=MHinit,omegainit,Winit,Wginit, eps=1e-3, alpha=0.3, maxIter=200, plot=TRUE,print.hist=FALSE,filterWg=TRUE, verbatim = TRUE,nobeta = FALSE, filterDiag = TRUE) vem=ListVEM[[27]] plotVEM(vem$Pg,ome,r=1,seuil=0.5) tail(vem$lowbound$J,1) #-2739.929 avec alpha=0.1 et filtres, max.prec=FALSE #-2746.588 avec alpha=0.1 sans filtres, max.prec=TRUE #-3105.91 avec alpha=0.05, avec filtres, max.prec=FALSE (sans filtres passe pas)
/R/codes/missingActor/J_AUC.R
no_license
Rmomal/these
R
false
false
8,864
r
library(EMtree) library(PLNmodels) library(LITree) library(ggraph) library(tidygraph) library(tidyverse) library(mvtnorm) library(useful) library(mclust) library(MASS) library(parallel) library(ROCR) library(reshape2)#for ggimage library(gridExtra) library(harrypotter) library(sparsepca) source("/Users/raphaellemomal/these/R/codes/missingActor/fonctions-missing.R") source("/Users/raphaellemomal/these/R/codes/missingActor/fonctions-exactDet.R") J_AUC<-function(seed, p,r,alpha,eig.tol=1e-6,cliques_spca=NULL,B=100,cores=3,plot=FALSE,type="scale-free",n=200){ #------ Data simulation set.seed(seed) O=1:p H=(p+1):(p+r) missing_data<-missing_from_scratch(n,p,r,type,plot) counts=missing_data$Y ; sigmaO= missing_data$Sigma ; omega=missing_data$Omega trueClique=missing_data$TC ; hidden=missing_data$H PLNfit<-PLN(counts~1, control=list(trace=0)) MO<-PLNfit$var_par$M ; SO<-PLNfit$var_par$S ; sigma_obs=PLNfit$model_par$Sigma theta=PLNfit$model_par$Theta ; matcovar=matrix(1, n,1) ome=omega[c(setdiff(1:(p+r), hidden), hidden),c(setdiff(1:(p+r), hidden), hidden)] diag(ome)=0 #------ cliques et VEMtee if(is.null(cliques)){ cliques_spca <- boot_FitSparsePCA(scale(MO),B=B,r=1, cores=3) } ListVEM_filtre<-List.VEM(cliquesObj=cliques_spca, counts, sigma_obs, MO,SO,r=1,eps=1e-3, maxIter=200, alpha = alpha,cores=cores, nobeta = FALSE, filterDiag = TRUE,filterWg=TRUE) ListVEM_nofiltre<-List.VEM(cliquesObj=cliques_spca, counts, sigma_obs, MO,SO,r=1,eps=1e-3, maxIter=200, alpha =alpha,cores=cores, nobeta = FALSE, filterDiag = FALSE,filterWg=FALSE) ListVEM=c(ListVEM_filtre,ListVEM_nofiltre) #------ shape des résultats filtre=rep(c(TRUE,FALSE), each=length(unique(cliques_spca$cliqueList))) goodPrec=!do.call(rbind,lapply(ListVEM, function(x) x$max.prec)) J=do.call(rbind,lapply(ListVEM, function(vem){tail(vem$lowbound$J,1)})) nbocc=do.call(rbind, lapply(ListVEM, function(vem){ vem$nbocc})) AUC=do.call(rbind, lapply(ListVEM, function(vem){ Pg=vem$Pg AUC=round(auc(pred = Pg, label = ome),4) })) ppvh=do.call(rbind, lapply(ListVEM, function(vem){ Pg=vem$Pg ppvh=accppvtpr(Pg,ome,h=15,seuil=0.5)[5] })) Icl<-do.call(rbind, lapply(ListVEM, function(vem){ J=tail(vem$lowbound$J,1) ;Wg=vem$Wg ; Pg=vem$Pg pen_T=-( sum( Pg * log(Wg+(Wg==0)) ) - logSumTree(Wg)$det) ICL = J-pen_T return(ICL) })) vBICs<-do.call(rbind, lapply(ListVEM, function(vem){ J=tail(vem$lowbound$J,1) r=1;d=1 ; q=p+r nbparam<-p*(d) + (p*(p+1)/2 +r*p)+(q*(q-1)/2 - 1) nbparam_mixedmodel<-round(SumTree(vem$W),1)*(1+q^2)-1 vbic0=J- nbparam*log(n)/2 vbic1=J- nbparam*log(n)/2 -nbparam_mixedmodel vbic2=J- (nbparam+nbparam_mixedmodel)*log(n)/2 return(c(vbic0=vbic0,vbic1=vbic1,vbic2=vbic2)) })) JPLN<-do.call(rbind, lapply(ListVEM, function(vem){ EhZZ=t(vem$M[,O])%*%vem$M[,O] + diag(colSums(vem$S[,O])) sigTilde = (1/n)*EhZZ omega=vem$omega EsO=vem$Pg*vem$omega+diag(diag(vem$omega)) EgOm = EsO[O,O] - matrix(EsO[O,H],p,r)%*%matrix(EsO[H,O],r,p)/EsO[H,H] EgOm = nearPD(EgOm, eig.tol=eig.tol)$mat JPLN_SigT = part_JPLN(sigTilde,EhZZ=EhZZ) JPLN_EgOm = part_JPLN(EgOm,EhZZ=EhZZ, var=FALSE) return(data.frame(JPLN_SigT=JPLN_SigT,JPLN_EgOm=JPLN_EgOm))})) data= data.frame(goodPrec, J,Icl, AUC,ppvh, vBICs,filtre=filtre, JPLN_SigT=JPLN$JPLN_SigT,JPLN_EgOm=JPLN$JPLN_EgOm, index=rep(1:length(unique(cliques_spca$cliqueList)),2)) return(data) } part_JPLN<-function(mat_var,EhZZ, var=TRUE){ if(var){ partJPLN=-n*0.5*(det.fractional(mat_var, log=TRUE)) - 0.5*sum(EhZZ*solve(mat_var)) }else{# si on donne une matrice de précision partJPLN=n*0.5*(det.fractional(mat_var, log=TRUE)) - 0.5*sum(EhZZ*(mat_var)) } return(partJPLN) } # maxJ_good=which(J==max(J[J<min(J[!goodPrec])])) # maxJ=which.max(J) # choice=which.max(ICL[goodPrec]) #ICL[choice] #--- experiments cliques_spca19=readRDS("/Users/raphaellemomal/these/R/codes/missingActor/SimResults/cliques_spca19_10000.rds") cliques_spca1=readRDS("/Users/raphaellemomal/these/R/codes/missingActor/SimResults/cliques_spca1_10000.rds") small_cliques1=list() small_cliques1$cliqueList=cliques_spca1$cliqueList[1:10] small_cliques1$nb_occ = cliques_spca1$nb_occ[1:10] tic() seed1<-J_AUC(seed = 1,p = 14,r=1, alpha=0.1,eig.tol=1e-6, cliques_spca=cliques_spca1) # 17 min toc() tic() seed19<-J_AUC(seed = 19,p = 14,r=1, alpha=0.1,eig.tol=1e-6, cliques_spca=cliques_spca19) # 71 min toc() plot(seed19$JPLN_SigT,seed19$JPLN_EgOm) seed1$seed=1 ; seed19$seed=19 seed_filtre=rbind(seed1,seed19) saveRDS(seed_filtre, "/Users/raphaellemomal/these/R/codes/missingActor/SimResults/seed_filtre_eigtol_1e-6.rds") #--- plots seed_filtre %>% ggplot(aes( AUC,J, color=(goodPrec)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed)+mytheme.dark("Sans précision \nmachine:") seed_filtre %>% ggplot(aes( AUC,Delta, color=(goodPrec)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed)+mytheme.dark("Sans précision \nmachine:") seed_filtre %>% ggplot(aes( ppvh,Delta, color=(goodPrec)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed)+mytheme.dark("Sans précision \nmachine:") seed_filtre=seed_filtre %>% group_by(seed,filtre) %>% mutate(medDelta = quantile(Delta,0.1), goodDelta = Delta<medDelta) %>% ungroup() seed_filtre %>% ggplot(aes( AUC,J, color=(goodDelta)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed) + mytheme.dark("Delta < q10(Delta)") seed_filtre %>% dplyr::select(J,filtre,index,seed) %>% spread(filtre,J) %>% ggplot(aes(`FALSE`, `TRUE`))+geom_point()+facet_wrap(~seed)+geom_abline()+ mytheme.dark("") seed_filtre %>% dplyr::select(Delta,filtre,index,seed) %>% spread(filtre,Delta) %>% ggplot(aes(`FALSE`, `TRUE`))+geom_point()+facet_wrap(~seed)+geom_abline()+ mytheme.dark("")+labs(title="Delta moins élevé avec filtres",x="sans filtre", y="avec filtre") seed_filtre=seed_filtre %>% mutate(tron_vbic1=ifelse(!is.finite(vbic1),.Machine$double.xmax,vbic1)) seed_filtre %>% ggplot(aes( AUC,tron_vbic1, color=(goodDelta)))+geom_point()+ geom_vline(xintercept = 0.5, linetype="dashed", color="gray")+ facet_grid(filtre~seed) + mytheme.dark("Delta < q10(Delta)") data %>% filter(goodPrec) %>% filter(ICL==max(ICL)) data %>%filter(J==max(J)) data%>% group_by(goodPrec) %>% summarise(maxJ=max(J), mean.auc=mean(AUC), mean.ppvh=mean(ppvh), indexmaxJ = which.max(J), indexmaxICL = which.max(ICL)) # VEM_1=ListVEM[[maxJ_good]] # maxJ_good=which(!goodPrec)[which.max(ICL[!goodPrec])] data %>%mutate(nbocc=nbocc) %>% ggplot(aes(ppvh,J, color=as.factor(nbocc)))+geom_point()+mytheme.dark("") data=data %>% mutate(penT = -ICL + J) data05=data data05 %>%ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data1 %>% ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data2 %>% ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data3 %>% ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data3 %>%mutate(nbocc=nbocc) %>% ggplot(aes(ppvh,J, color=(nbocc > 1 & goodPrec)))+geom_point()+mytheme.dark("") data %>% ggplot(aes(AUC,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data %>% ggplot(aes(ppvh,ICL, color=goodPrec))+geom_point()+mytheme.dark("") data %>% ggplot(aes(ppvh,J, color=goodPrec))+geom_point()+mytheme.dark("") data %>% ggplot(aes(ICL,J,color=AUC>0.6, shape=goodPrec))+geom_point()+ mytheme.dark("auc>0.6")+geom_abline() data %>% ggplot(aes(ppvh,AUC, color=goodPrec))+geom_point()+mytheme.dark("") data %>% ggplot(aes(AUC,(penT), color=goodPrec))+geom_point()+mytheme.dark("") #########@ # tests locaux which(ICL>-2200) trueClique clique=cliques_spca$cliqueList[[1]] clique=ListVEM[[33]]$clique init=initVEM(counts = counts,initviasigma=clique, sigma_obs,r = 1) Wginit= init$Wginit; Winit= init$Winit; omegainit=init$omegainit ; MHinit=init$MHinit VEM=VEMtree(counts,MO,SO,MH=MHinit,omegainit,Winit,Wginit, eps=1e-3, alpha=0.3, maxIter=200, plot=TRUE,print.hist=FALSE,filterWg=TRUE, verbatim = TRUE,nobeta = FALSE, filterDiag = TRUE) vem=ListVEM[[27]] plotVEM(vem$Pg,ome,r=1,seuil=0.5) tail(vem$lowbound$J,1) #-2739.929 avec alpha=0.1 et filtres, max.prec=FALSE #-2746.588 avec alpha=0.1 sans filtres, max.prec=TRUE #-3105.91 avec alpha=0.05, avec filtres, max.prec=FALSE (sans filtres passe pas)
# Need some data to play with df1 <- data.frame(LETTERS, dfindex = 1:26) df2 <- data.frame(letters, dfindex = c(1:10, 15, 20, 22:35)) # INNER JOIN: returns rows when there is a match in both tables. merge(df1, df2) merge(df1, df2, by="dfindex") # FULL (outer) JOIN: all records from both the tables and fill in NULLs for # missing matches on either side. merge(df1, df2, all = TRUE) # join by name names(df1) <- c("alpha", "lotsaNumbers") merge(df1, df2, by.x = "lotsaNumbers", by.y = "dfindex") # Outer join: merge(x = df1, y = df2, by = 'CustomerId', all = TRUE) # Left outer: merge(x = df1, y = df2, by = 'CustomerId', all.x = TRUE) # Right outer: merge(x = df1, y = df2, by = 'CustomerId', all.y = TRUE) # Cross join: merge(x = df1, y = df2, by = NULL) # merge on multiple columns: merge(df1, df2, by = c('CustomerId', 'OrderId'))
/r/learning/data_type/dataframe/merge.r
no_license
jk983294/math
R
false
false
844
r
# Need some data to play with df1 <- data.frame(LETTERS, dfindex = 1:26) df2 <- data.frame(letters, dfindex = c(1:10, 15, 20, 22:35)) # INNER JOIN: returns rows when there is a match in both tables. merge(df1, df2) merge(df1, df2, by="dfindex") # FULL (outer) JOIN: all records from both the tables and fill in NULLs for # missing matches on either side. merge(df1, df2, all = TRUE) # join by name names(df1) <- c("alpha", "lotsaNumbers") merge(df1, df2, by.x = "lotsaNumbers", by.y = "dfindex") # Outer join: merge(x = df1, y = df2, by = 'CustomerId', all = TRUE) # Left outer: merge(x = df1, y = df2, by = 'CustomerId', all.x = TRUE) # Right outer: merge(x = df1, y = df2, by = 'CustomerId', all.y = TRUE) # Cross join: merge(x = df1, y = df2, by = NULL) # merge on multiple columns: merge(df1, df2, by = c('CustomerId', 'OrderId'))