content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
### Data Science Track ### Exploring Data ## Quiz 1 - plot 4 (includes 4 graphs) ## use local unix tools to strip down our data by issuing the following commands ## (remove the "#>" before issuing the commands): #> grep "^[12]\/2\/2007" household_power_consumption.txt > hpc_2days_noheader.txt #> head -1 household_power_consumption.txt > hpc_2days_headeronly.txt #> cat hpc_2days_headeronly.txt hpc_2days_noheader.txt > hpc_2days.txt library(data.table) hpc <- fread("hpc_2days.txt", header=TRUE, sep=";") # get into a workable date format hpc <- transform(hpc, DateCT = strptime(paste(hpc$Date, hpc$Time), format = "%d/%m/%Y %H:%M:%S")) # set up our 2 x 2 grid of graphs par(mfrow = c(2, 2)) # graph 1 is from quiz 1 plot 2 with(hpc, plot(DateCT, Global_active_power, type="l", main = "", xlab = "", ylab = "Global Active Power")) # graph 2 is new with(hpc, plot(DateCT, Voltage, type="l", main = "", xlab = "datetime", ylab = "Voltage")) # graph 3 is from quiz 1 plot 3 with(hpc, plot(DateCT, Sub_metering_1, type="n", main = "", ylab = "Entergy sub metering", xlab = "")) with(hpc, { lines(x = DateCT, y = Sub_metering_1, type="l", col="black") lines(x = DateCT, y = Sub_metering_2, type="l", col="red") lines(x = DateCT, y = Sub_metering_3, type="l", col="blue") }) # this is tricky legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1, cex = 0.3) # graph 4 is new with(hpc, plot(DateCT, Global_reactive_power, type="l", main = "", xlab = "datetime", ylab = "Global_reactive_power")) dev.copy(png, "plot4.png") dev.off()
/plot4.R
no_license
epinym/ExData_Plotting1
R
false
false
1,634
r
### Data Science Track ### Exploring Data ## Quiz 1 - plot 4 (includes 4 graphs) ## use local unix tools to strip down our data by issuing the following commands ## (remove the "#>" before issuing the commands): #> grep "^[12]\/2\/2007" household_power_consumption.txt > hpc_2days_noheader.txt #> head -1 household_power_consumption.txt > hpc_2days_headeronly.txt #> cat hpc_2days_headeronly.txt hpc_2days_noheader.txt > hpc_2days.txt library(data.table) hpc <- fread("hpc_2days.txt", header=TRUE, sep=";") # get into a workable date format hpc <- transform(hpc, DateCT = strptime(paste(hpc$Date, hpc$Time), format = "%d/%m/%Y %H:%M:%S")) # set up our 2 x 2 grid of graphs par(mfrow = c(2, 2)) # graph 1 is from quiz 1 plot 2 with(hpc, plot(DateCT, Global_active_power, type="l", main = "", xlab = "", ylab = "Global Active Power")) # graph 2 is new with(hpc, plot(DateCT, Voltage, type="l", main = "", xlab = "datetime", ylab = "Voltage")) # graph 3 is from quiz 1 plot 3 with(hpc, plot(DateCT, Sub_metering_1, type="n", main = "", ylab = "Entergy sub metering", xlab = "")) with(hpc, { lines(x = DateCT, y = Sub_metering_1, type="l", col="black") lines(x = DateCT, y = Sub_metering_2, type="l", col="red") lines(x = DateCT, y = Sub_metering_3, type="l", col="blue") }) # this is tricky legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1, cex = 0.3) # graph 4 is new with(hpc, plot(DateCT, Global_reactive_power, type="l", main = "", xlab = "datetime", ylab = "Global_reactive_power")) dev.copy(png, "plot4.png") dev.off()
# This program reads the data created by the replicators and downloaded from # Google in the previous step and compiles them into a master dataframe # ------------------------------------- # Compile replication lists into a master file # ------------------------------------- # Read in worksheet names of the replication google sheets ws <- readRDS(file=file.path(dataloc,"mapping_ws_nums.Rds")) ws <- as.vector(unlist(ws[1:length(ws)-1])) # Compile all the worksheets except for "2009 missing online material" repllist <- NA for ( x in 1:length(ws) ) { if ( ws[x] != "2009 missing online material" ) { print(paste("Processing",ws[x])) if ( x == 1 ) { # Read in the first list and set variable types repllist <- read_csv(file = file.path(dataloc,paste0("replication_list_",x,".csv")), col_types = cols( .default = col_character(), X1 = col_integer(), Year = col_integer(),Volume = col_integer(), Issue = col_integer() ) ) } else { # Read in the subsequent lists and set variable types tmp <- read_csv(file = file.path(dataloc,paste0("replication_list_",x,".csv")), col_types = cols( .default = col_character(), X1 = col_integer(), Year = col_integer(), Volume = col_integer(), Issue = col_integer() ) ) # Add to master dataframe repllist <- bind_rows(repllist,tmp) rm(tmp) } } } # ------------------------------------- # Tidy up compiled master file # ------------------------------------- # Fix typos repllist2 <- repllist %>% mutate(DOI = ifelse(DOI == "10.1257/aer.2013047","10.1257/aer.20130479",DOI)) %>% mutate(DOI = ifelse(DOI == "10.1257/app.4.1.247","10.1257/app.4.2.247",DOI)) # Drop the verbose article descriptions (authors, etc.) to later pick it up again from Crossref repllist2 <- repllist2 %>% select(DOI,`Source Title`, `Entry Questionnaire`,`Entry Questionnaire Author`,`Expected Difficulty`, Completed1 = Completed, Replicated1 = Replicated, Replicator1 = Replicator, Completed2 = Completed_1, Replicated2 = Replicated_1, Replicator2 = `2nd Replicator`, `Data Type`,`Data Access Type`,`Data Comments`,`Data URL`,`Data Contact`, starts_with("Data Access Type:"),worksheet.rownum=X1,worksheet) # Some diagnostics knitr::kable(table(repllist2$`Entry Questionnaire Author`,repllist2$`Source Title`)) # Save saveRDS(repllist2,file=file.path(interwrk,"replication_list_2.Rds"))
/programs/02_read_clean_replicationlist.R
no_license
AEADataEditor/report-aea-data-editor-2018
R
false
false
2,724
r
# This program reads the data created by the replicators and downloaded from # Google in the previous step and compiles them into a master dataframe # ------------------------------------- # Compile replication lists into a master file # ------------------------------------- # Read in worksheet names of the replication google sheets ws <- readRDS(file=file.path(dataloc,"mapping_ws_nums.Rds")) ws <- as.vector(unlist(ws[1:length(ws)-1])) # Compile all the worksheets except for "2009 missing online material" repllist <- NA for ( x in 1:length(ws) ) { if ( ws[x] != "2009 missing online material" ) { print(paste("Processing",ws[x])) if ( x == 1 ) { # Read in the first list and set variable types repllist <- read_csv(file = file.path(dataloc,paste0("replication_list_",x,".csv")), col_types = cols( .default = col_character(), X1 = col_integer(), Year = col_integer(),Volume = col_integer(), Issue = col_integer() ) ) } else { # Read in the subsequent lists and set variable types tmp <- read_csv(file = file.path(dataloc,paste0("replication_list_",x,".csv")), col_types = cols( .default = col_character(), X1 = col_integer(), Year = col_integer(), Volume = col_integer(), Issue = col_integer() ) ) # Add to master dataframe repllist <- bind_rows(repllist,tmp) rm(tmp) } } } # ------------------------------------- # Tidy up compiled master file # ------------------------------------- # Fix typos repllist2 <- repllist %>% mutate(DOI = ifelse(DOI == "10.1257/aer.2013047","10.1257/aer.20130479",DOI)) %>% mutate(DOI = ifelse(DOI == "10.1257/app.4.1.247","10.1257/app.4.2.247",DOI)) # Drop the verbose article descriptions (authors, etc.) to later pick it up again from Crossref repllist2 <- repllist2 %>% select(DOI,`Source Title`, `Entry Questionnaire`,`Entry Questionnaire Author`,`Expected Difficulty`, Completed1 = Completed, Replicated1 = Replicated, Replicator1 = Replicator, Completed2 = Completed_1, Replicated2 = Replicated_1, Replicator2 = `2nd Replicator`, `Data Type`,`Data Access Type`,`Data Comments`,`Data URL`,`Data Contact`, starts_with("Data Access Type:"),worksheet.rownum=X1,worksheet) # Some diagnostics knitr::kable(table(repllist2$`Entry Questionnaire Author`,repllist2$`Source Title`)) # Save saveRDS(repllist2,file=file.path(interwrk,"replication_list_2.Rds"))
# ctr+shift+c(一括コメント) #これにより深さと増加量のグラフをすべて生成 #initialize total_height<-0 total_increase<-0 ave_height<-0 ave_increase<-0 #filename before_workspace <- "/home/ax909424/r" after_workspace <- "/simulation_result" expand <- c("/reproduction","/homo_path","/bias","/both") date <- c("/20181104","/20181105","/20181104","/20181106") #マクロもどき FIRST_SHARE <- 1 TOTAL_SHARE <- 2 HEIGHT <- 3 VALUE_THETA <- 4 AVERAGE_INCREASE <- 1 AVERAGE_HEIGHT <- 2 MAX_FIRST_SHARE <- 20 SPLIT <- 11 X_LABEL <- 1 Y_LABEL <- 2 for(kind_of_expand in 1 : 4 ){ #------create total data matrix( 10000 colums * 220 rows)----------------- for(i in 1 : MAX_FIRST_SHARE){ #filenameにファイル名を入れて、valueにその値を格納 filename <- paste(before_workspace , after_workspace , expand[kind_of_expand] , date[kind_of_expand] ,"/value_first=",i,".txt",sep="") #read_txt_table:初期共有者,情報共有者,深さ,ニュースの値を格納する行列(110000 * 4) read_txt_table = read.table(filename,header=FALSE,sep=" ") #cat(filename) for(j in 1 : SPLIT){ #thetaの値ごとにソート:temp~~ <- read_txt_table[該当行 , 欲しい情報] temp_height <- read_txt_table[read_txt_table[ , VALUE_THETA] == (j-1) / 10.0 , HEIGHT] temp_increase <- read_txt_table[ read_txt_table[ , VALUE_THETA] == (j-1) / 10.0 , TOTAL_SHARE] - read_txt_table[read_txt_table[ , VALUE_THETA] == (j-1) / 10.0 , FIRST_SHARE ] #一番最初だけtotalに代入し、それ以外は結合 if(i == 1 && j==1 && (j-1) / 10 == 0){ total_height <- temp_height total_increase <- temp_increase } else{ total_height <- cbind(total_height , temp_height) total_increase <- cbind(total_increase , temp_increase) } } } #-------calculate 1*220 average matrix , increase and height--------- for(i in 1:220){ ave_height[i] <- mean(total_height[,i]) ave_increase[i] <- mean(total_increase[,i]) } #-------------------------------------------------------------------- #Average[,1]:increase , Average[,2]:height Average<-cbind(ave_increase,ave_height) k_first<-c(1,11) k_end<-c(10,20) inc_hei<-c("increase","height") main_thema<-c("increase and news content","height and news content") cols<-c("purple","green","blue","red","black") lty_status <- c("longdash" , "dotdash" , "dotted" , "dashed" , "solid") legend_range <- c(20,15,10,5,1) pch_style <- c(0,15,16,17,18) #create 4 images #i:information's kind , j:launch 1 or 11 , k:draw line for(i in 1:2){ lty_count <- 5 png(paste("/home/ax909424/r/image/",expand[kind_of_expand],"/",inc_hei[i],".png",sep=""),width=800,height=600) par(mar=c(5.5,6.0,4.1,2)) par(mgp=c(4,1.2,0)) plot(0,0,type="n",xlim=range(0,10),ylim=range(0,20),xlab="news content",ylab=inc_hei[i],main=main_thema[i] , xaxt = "n" , yaxt = "n" ,cex.lab = 2.5 , cex.main = 2.5) #mar=c(5,0,4,2)+0.1 #par(mgp=c(10,1,0)) #par(oma = c(3,4,3,2)) #横軸の目盛の値を変更 axis(X_LABEL,0:10, seq(0, 1,by = 0.1)) axis(Y_LABEL,0:20, seq(0,20,by = 1) , las = 1 , tck=1.0 ,lty = "dotted") k_range<-k_first[1]:k_end[2] for(k in k_range){ head=11*(k-1)+1 tail=11*k par(pch = "+") if((k%%5 ==0) || k==1 ){ lines(0:10,Average[head:tail,i],col=cols[lty_count],lty=lty_status[lty_count] , lwd = 2 ) lty_count <- lty_count - 1 #print(k) } } legend("topright",legend=legend_range,col=cols,lty=lty_status, cex = 2) dev.off() } }
/r_workspace_master/source/create_4_image_ver1.3.R
no_license
kenty38/research_git_repository
R
false
false
4,030
r
# ctr+shift+c(一括コメント) #これにより深さと増加量のグラフをすべて生成 #initialize total_height<-0 total_increase<-0 ave_height<-0 ave_increase<-0 #filename before_workspace <- "/home/ax909424/r" after_workspace <- "/simulation_result" expand <- c("/reproduction","/homo_path","/bias","/both") date <- c("/20181104","/20181105","/20181104","/20181106") #マクロもどき FIRST_SHARE <- 1 TOTAL_SHARE <- 2 HEIGHT <- 3 VALUE_THETA <- 4 AVERAGE_INCREASE <- 1 AVERAGE_HEIGHT <- 2 MAX_FIRST_SHARE <- 20 SPLIT <- 11 X_LABEL <- 1 Y_LABEL <- 2 for(kind_of_expand in 1 : 4 ){ #------create total data matrix( 10000 colums * 220 rows)----------------- for(i in 1 : MAX_FIRST_SHARE){ #filenameにファイル名を入れて、valueにその値を格納 filename <- paste(before_workspace , after_workspace , expand[kind_of_expand] , date[kind_of_expand] ,"/value_first=",i,".txt",sep="") #read_txt_table:初期共有者,情報共有者,深さ,ニュースの値を格納する行列(110000 * 4) read_txt_table = read.table(filename,header=FALSE,sep=" ") #cat(filename) for(j in 1 : SPLIT){ #thetaの値ごとにソート:temp~~ <- read_txt_table[該当行 , 欲しい情報] temp_height <- read_txt_table[read_txt_table[ , VALUE_THETA] == (j-1) / 10.0 , HEIGHT] temp_increase <- read_txt_table[ read_txt_table[ , VALUE_THETA] == (j-1) / 10.0 , TOTAL_SHARE] - read_txt_table[read_txt_table[ , VALUE_THETA] == (j-1) / 10.0 , FIRST_SHARE ] #一番最初だけtotalに代入し、それ以外は結合 if(i == 1 && j==1 && (j-1) / 10 == 0){ total_height <- temp_height total_increase <- temp_increase } else{ total_height <- cbind(total_height , temp_height) total_increase <- cbind(total_increase , temp_increase) } } } #-------calculate 1*220 average matrix , increase and height--------- for(i in 1:220){ ave_height[i] <- mean(total_height[,i]) ave_increase[i] <- mean(total_increase[,i]) } #-------------------------------------------------------------------- #Average[,1]:increase , Average[,2]:height Average<-cbind(ave_increase,ave_height) k_first<-c(1,11) k_end<-c(10,20) inc_hei<-c("increase","height") main_thema<-c("increase and news content","height and news content") cols<-c("purple","green","blue","red","black") lty_status <- c("longdash" , "dotdash" , "dotted" , "dashed" , "solid") legend_range <- c(20,15,10,5,1) pch_style <- c(0,15,16,17,18) #create 4 images #i:information's kind , j:launch 1 or 11 , k:draw line for(i in 1:2){ lty_count <- 5 png(paste("/home/ax909424/r/image/",expand[kind_of_expand],"/",inc_hei[i],".png",sep=""),width=800,height=600) par(mar=c(5.5,6.0,4.1,2)) par(mgp=c(4,1.2,0)) plot(0,0,type="n",xlim=range(0,10),ylim=range(0,20),xlab="news content",ylab=inc_hei[i],main=main_thema[i] , xaxt = "n" , yaxt = "n" ,cex.lab = 2.5 , cex.main = 2.5) #mar=c(5,0,4,2)+0.1 #par(mgp=c(10,1,0)) #par(oma = c(3,4,3,2)) #横軸の目盛の値を変更 axis(X_LABEL,0:10, seq(0, 1,by = 0.1)) axis(Y_LABEL,0:20, seq(0,20,by = 1) , las = 1 , tck=1.0 ,lty = "dotted") k_range<-k_first[1]:k_end[2] for(k in k_range){ head=11*(k-1)+1 tail=11*k par(pch = "+") if((k%%5 ==0) || k==1 ){ lines(0:10,Average[head:tail,i],col=cols[lty_count],lty=lty_status[lty_count] , lwd = 2 ) lty_count <- lty_count - 1 #print(k) } } legend("topright",legend=legend_range,col=cols,lty=lty_status, cex = 2) dev.off() } }
#' Tchau #' #' Description here #' #' @return nothing #' @export #' #' @examples #' '> tchau() #' [1] "Tchau, world!" tchau <- function() { print("Tchau, world!") }
/R/tchau.R
permissive
hugoharada/package00
R
false
false
170
r
#' Tchau #' #' Description here #' #' @return nothing #' @export #' #' @examples #' '> tchau() #' [1] "Tchau, world!" tchau <- function() { print("Tchau, world!") }
#' Sets the default breaks for a time axis #' #' \code{xgx_breaks_time} sets the default breaks for a time axis, #' given the units of the data and the units of the plot. #' It is inspired by scales::extended_breaks #' #' for the extended breaks function, weights is a set of 4 weights for #' \enumerate{ #' \item simplicity - how early in the Q order are you #' \item coverage - labelings that don't extend outside the data: #' range(data) / range(labels) #' \item density (previously granularity) - how close to the number of ticks #' do you get (default is 5) #' \item legibility - has to do with fontsize and formatting to prevent #' label overlap #' } #' #' @references Talbot, Justin, Sharon Lin, and Pat Hanrahan. #' "An extension of Wilkinson’s algorithm for positioning tick labels on axes." #' IEEE Transactions on visualization and #' computer graphics 16.6 (2010): 1036-1043. #' #' @param data_range range of the data #' @param units_plot units to use in the plot #' @param number_breaks number of breaks to aim for (default is 5) #' #' @return numeric vector of breaks #' #' @examples #' xgx_breaks_time(c(0, 5), "hour") #' xgx_breaks_time(c(0, 6), "hour") #' xgx_breaks_time(c(-3, 5), "hour") #' xgx_breaks_time(c(0, 24), "hour") #' xgx_breaks_time(c(0, 12), "hour") #' xgx_breaks_time(c(1, 4), "day") #' xgx_breaks_time(c(1, 12), "day") #' xgx_breaks_time(c(1, 14), "day") #' xgx_breaks_time(c(1, 50), "day") #' xgx_breaks_time(c(1000, 3000), "day") #' xgx_breaks_time(c(-21, 100), "day") #' xgx_breaks_time(c(-1, 10), "week") #' #' @importFrom labeling extended #' @export xgx_breaks_time <- function(data_range, units_plot, number_breaks = 5) { data_min <- min(data_range) data_max <- max(data_range) data_span <- data_max - data_min number_breaks <- 5 # number of breaks to aim for preferred_increment_default <- c(1, 5, 2, 4, 3, 1) weights_default <- c(0.25, 0.2, 0.5, 0.05) weights_simple <- c(1, 0.2, 0.5, 0.05) if (units_plot %in% c("hour", "month") && data_span >= 48) { preferred_increment <- c(24, 12, 6, 3) weights <- weights_simple } else if (units_plot %in% c("hour", "month") && data_span >= 24) { preferred_increment <- c(3, 12, 6, 2) weights <- weights_simple } else if (units_plot %in% c("hour", "month") && data_span < 24) { preferred_increment <- c(6, 3, 2, 1) weights <- weights_simple } else if (units_plot == "day" && data_span >= 720) { preferred_increment <- c(360, 180) weights <- weights_simple } else if (units_plot == "day" && data_span >= 360) { preferred_increment <- c(180, 120, 60) weights <- weights_simple } else if (units_plot == "day" && data_span >= 90) { preferred_increment <- c(30, 7) weights <- weights_simple } else if (units_plot == "day" && data_span >= 12) { preferred_increment <- c(7, 14, 28) weights <- weights_simple } else { preferred_increment <- preferred_increment_default weights <- weights_default } breaks <- labeling::extended(data_min, data_max, m = number_breaks, Q = preferred_increment, w = weights) return(breaks) }
/R/xgx_breaks_time.R
permissive
Novartis/xgxr
R
false
false
3,142
r
#' Sets the default breaks for a time axis #' #' \code{xgx_breaks_time} sets the default breaks for a time axis, #' given the units of the data and the units of the plot. #' It is inspired by scales::extended_breaks #' #' for the extended breaks function, weights is a set of 4 weights for #' \enumerate{ #' \item simplicity - how early in the Q order are you #' \item coverage - labelings that don't extend outside the data: #' range(data) / range(labels) #' \item density (previously granularity) - how close to the number of ticks #' do you get (default is 5) #' \item legibility - has to do with fontsize and formatting to prevent #' label overlap #' } #' #' @references Talbot, Justin, Sharon Lin, and Pat Hanrahan. #' "An extension of Wilkinson’s algorithm for positioning tick labels on axes." #' IEEE Transactions on visualization and #' computer graphics 16.6 (2010): 1036-1043. #' #' @param data_range range of the data #' @param units_plot units to use in the plot #' @param number_breaks number of breaks to aim for (default is 5) #' #' @return numeric vector of breaks #' #' @examples #' xgx_breaks_time(c(0, 5), "hour") #' xgx_breaks_time(c(0, 6), "hour") #' xgx_breaks_time(c(-3, 5), "hour") #' xgx_breaks_time(c(0, 24), "hour") #' xgx_breaks_time(c(0, 12), "hour") #' xgx_breaks_time(c(1, 4), "day") #' xgx_breaks_time(c(1, 12), "day") #' xgx_breaks_time(c(1, 14), "day") #' xgx_breaks_time(c(1, 50), "day") #' xgx_breaks_time(c(1000, 3000), "day") #' xgx_breaks_time(c(-21, 100), "day") #' xgx_breaks_time(c(-1, 10), "week") #' #' @importFrom labeling extended #' @export xgx_breaks_time <- function(data_range, units_plot, number_breaks = 5) { data_min <- min(data_range) data_max <- max(data_range) data_span <- data_max - data_min number_breaks <- 5 # number of breaks to aim for preferred_increment_default <- c(1, 5, 2, 4, 3, 1) weights_default <- c(0.25, 0.2, 0.5, 0.05) weights_simple <- c(1, 0.2, 0.5, 0.05) if (units_plot %in% c("hour", "month") && data_span >= 48) { preferred_increment <- c(24, 12, 6, 3) weights <- weights_simple } else if (units_plot %in% c("hour", "month") && data_span >= 24) { preferred_increment <- c(3, 12, 6, 2) weights <- weights_simple } else if (units_plot %in% c("hour", "month") && data_span < 24) { preferred_increment <- c(6, 3, 2, 1) weights <- weights_simple } else if (units_plot == "day" && data_span >= 720) { preferred_increment <- c(360, 180) weights <- weights_simple } else if (units_plot == "day" && data_span >= 360) { preferred_increment <- c(180, 120, 60) weights <- weights_simple } else if (units_plot == "day" && data_span >= 90) { preferred_increment <- c(30, 7) weights <- weights_simple } else if (units_plot == "day" && data_span >= 12) { preferred_increment <- c(7, 14, 28) weights <- weights_simple } else { preferred_increment <- preferred_increment_default weights <- weights_default } breaks <- labeling::extended(data_min, data_max, m = number_breaks, Q = preferred_increment, w = weights) return(breaks) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.gprd.R \name{plot.gprd} \alias{plot.gprd} \title{Plot Gaussian Process Regression for Regression Discontinuity} \usage{ \method{plot}{gprd}( x, from = "data_min", to = "data_max", n_points = NULL, ci_width = 0.95, data_color = "#1c1c1c1c", line_color = "black", ci_color = "#87878787", plot_cutoff = TRUE, main_title = "", xlab = "", ylab = "", ... ) } \arguments{ \item{x}{A object of class \code{\link{gprd}}} \item{from}{The lowest x value to plot prediction for. The default, "data_min", uses the lowest forcing variable observed value. If both \code{from} and \code{to} are \code{NULL}, only values in the training data and at the cutoff are plotted. Otherwise a numeric vector of length one should be given.} \item{to}{The highest x value to plot prediction for. The default "data_max" uses the largest forcing variable observed value. If both \code{from} and \code{to} are \code{NULL}, only values in the training data and at the cutoff are plotted. Otherwise a numeric vector of length one should be given.} \item{n_points}{An integer vector of length one giving the number of prediction points to plot; if \code{NULL} and \code{from} and \code{to} are given, \code{n_points = length(seq(from = from, to = to, by = 0.01))}} \item{ci_width}{A numeric vector of length one between 0 and 1 giving the width of the confidence interval for tau} \item{data_color}{A color to plot the data points in} \item{line_color}{A color to plot the predictive mean line in} \item{ci_color}{A color to plot the CI polygon in} \item{plot_cutoff}{A logical vector of length one; if \code{TRUE}, a dashed vertical line (\code{lty = 2}) marks the cutoff; default is \code{TRUE}} \item{main_title}{A character vector of length one giving a main title for the plot} \item{xlab}{A character vector of length one giving a title for the x axis} \item{ylab}{A character vector of length one giving a title for the y axis} \item{...}{Other arguments passed to \code{\link[graphics]{plot}}} } \value{ Returns \code{NULL} invisibly } \description{ Plot Gaussian Process Regression for Regression Discontinuity } \seealso{ gprd }
/man/plot.gprd.Rd
no_license
duckmayr/gprd
R
false
true
2,227
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.gprd.R \name{plot.gprd} \alias{plot.gprd} \title{Plot Gaussian Process Regression for Regression Discontinuity} \usage{ \method{plot}{gprd}( x, from = "data_min", to = "data_max", n_points = NULL, ci_width = 0.95, data_color = "#1c1c1c1c", line_color = "black", ci_color = "#87878787", plot_cutoff = TRUE, main_title = "", xlab = "", ylab = "", ... ) } \arguments{ \item{x}{A object of class \code{\link{gprd}}} \item{from}{The lowest x value to plot prediction for. The default, "data_min", uses the lowest forcing variable observed value. If both \code{from} and \code{to} are \code{NULL}, only values in the training data and at the cutoff are plotted. Otherwise a numeric vector of length one should be given.} \item{to}{The highest x value to plot prediction for. The default "data_max" uses the largest forcing variable observed value. If both \code{from} and \code{to} are \code{NULL}, only values in the training data and at the cutoff are plotted. Otherwise a numeric vector of length one should be given.} \item{n_points}{An integer vector of length one giving the number of prediction points to plot; if \code{NULL} and \code{from} and \code{to} are given, \code{n_points = length(seq(from = from, to = to, by = 0.01))}} \item{ci_width}{A numeric vector of length one between 0 and 1 giving the width of the confidence interval for tau} \item{data_color}{A color to plot the data points in} \item{line_color}{A color to plot the predictive mean line in} \item{ci_color}{A color to plot the CI polygon in} \item{plot_cutoff}{A logical vector of length one; if \code{TRUE}, a dashed vertical line (\code{lty = 2}) marks the cutoff; default is \code{TRUE}} \item{main_title}{A character vector of length one giving a main title for the plot} \item{xlab}{A character vector of length one giving a title for the x axis} \item{ylab}{A character vector of length one giving a title for the y axis} \item{...}{Other arguments passed to \code{\link[graphics]{plot}}} } \value{ Returns \code{NULL} invisibly } \description{ Plot Gaussian Process Regression for Regression Discontinuity } \seealso{ gprd }
#' Check package including congruence of attributes and data #' #' This function checks that the attributes listed in the metadata match the values in the data for each #' tabular data object. It may also optionally check if all creators have ORCIDs and have full access #' to all elements of the data package. #' #' @param mn (MNode) The Member Node to query. #' @param resource_map_pid (character) The PID for a resource map. #' @param read_all_data (logical) Read all data from remote and check that column types match attributes. If `FALSE`, #' only read first 10 rows. Only applicable to public packages (private packages will read complete dataset). #' If `check_attributes = FALSE`, no rows will be read. #' @param check_attributes (logical) Check congruence of attributes and data. #' @param check_creators (logical) Check if each creator has an ORCID. Will also run if `check_access = TRUE`. #' @param check_access (logical) Check if each creator has full access to the metadata, resource map, and data objects. #' Will not run if the checks associated with `check_creators` fail. #' #' @return `NULL` #' #' @import arcticdatautils #' @import dataone #' @import EML #' @importFrom crayon green red yellow #' @importFrom datapack hasAccessRule #' @importFrom methods is slot #' @importFrom ncdf4 nc_open ncvar_get #' @importFrom readxl read_excel #' @importFrom sf read_sf st_set_geometry #' @importFrom utils read.csv read.delim read.table download.file #' #' @export #' #' @examples #' \dontrun{ #' # Run all QA checks #' #' qa_package(mn, pid, read_all_data = TRUE, check_attributes = TRUE, #' check_creators = TRUE, check_access = TRUE) #' } qa_package <- function(mn, resource_map_pid, read_all_data = TRUE, check_attributes = TRUE, check_creators = FALSE, check_access = FALSE) { stopifnot(class(mn) %in% c("MNode", "CNode")) stopifnot(is.character(resource_map_pid), nchar(resource_map_pid) > 0) stopifnot(is.logical(read_all_data)) stopifnot(is.logical(check_attributes)) stopifnot(is.logical(check_creators)) stopifnot(is.logical(check_access)) package <- tryCatch(suppressWarnings(arcticdatautils::get_package(mn, resource_map_pid, file_names = TRUE)), error = function(e) stop("\nFailed to get package. Is the Member Node correct? Is your DataONE token set?")) cat(crayon::green(paste0("\n.....Processing package ", package$resource_map, "..............."))) doc <- EML::read_eml(dataone::getObject(mn, package$metadata)) # Check creators if (check_creators || check_access) { creator_ORCIDs <- qa_creator_ORCIDs(doc) } # Check access if (check_access && length(creator_ORCIDs) > 0) { # Check metadata sysmeta <- dataone::getSystemMetadata(mn, package$metadata) qa_access(sysmeta, creator_ORCIDs) # Check resource_map sysmeta <- dataone::getSystemMetadata(mn, package$resource_map) qa_access(sysmeta, creator_ORCIDs) # Check data objects for (object in package$data) { sysmeta <- dataone::getSystemMetadata(mn, object) qa_access(sysmeta, creator_ORCIDs) } } if (!is.null(names(doc$dataset$dataTable)) & length(doc$dataset$dataTable) > 0) { doc$dataset$dataTable <- list(doc$dataset$dataTable) } if (!is.null(names(doc$dataset$otherEntity)) & length(doc$dataset$otherEntity) > 0) { doc$dataset$otherEntity <- list(doc$dataset$otherEntity) } if (!is.null(names(doc$dataset$spatialVector)) & length(doc$dataset$spatialVector) > 0) { doc$dataset$spatialVector <- list(doc$dataset$spatialVector) } if (is.null(eml_get_simple(doc$dataset, "dataTable"))) doc$dataset$dataTable <- list() if (is.null(eml_get_simple(doc$dataset, "otherEntity"))) doc$dataset$otherEntity <- list() if (is.null(eml_get_simple(doc$dataset, "spatialVector"))) doc$dataset$spatialVector <- list() eml_objects <- c(doc$dataset$dataTable, doc$dataset$otherEntity, doc$dataset$spatialVector) if (length(eml_objects) == 0) { cat(crayon::red("\nNo data objects of a supported format were found in the EML.")) cat(crayon::green(paste0("\n\n.....Processing complete for package ", package$resource_map, "..............."))) return() } # Preserve order of getting data objects based on data type for correct name assignment # Entity names may not match data object names, so use objectName to ensure matches with data names names(eml_objects) <- c(arcticdatautils::eml_get_simple(doc$dataset$dataTable, "objectName"), arcticdatautils::eml_get_simple(doc$dataset$otherEntity, "objectName"), arcticdatautils::eml_get_simple(doc$dataset$spatialVector, "objectName")) # If object names are missing, use entity names instead if (is.null(names(eml_objects)) || any(is.na(names(eml_objects)))) { names(eml_objects) <-c(arcticdatautils::eml_get_simple(doc$dataset$dataTable, "entityName"), arcticdatautils::eml_get_simple(doc$dataset$otherEntity, "entityName"), arcticdatautils::eml_get_simple(doc$dataset$spatialVector, "entityName")) } data_objects <- dl_and_read_all_data(mn, package, doc, read_all_data) # If missing fileName, assign name to data objects for (i in seq_along(data_objects)) { if (is.na(names(data_objects)[[i]])) { id <- package$data[[i]] urls <- unique(arcticdatautils::eml_get_simple(eml_objects, "url"), recursive = TRUE) %>% grep("http", ., value = T) j <- which(stringr::str_detect(urls, id)) names(data_objects)[[i]] <- if (!is.na(EML::eml_get(eml_objects[[j]], "objectName"))) { EML::eml_get(eml_objects[[j]], "objectName") } else { EML::eml_get(eml_objects[[j]], "entityName") } } } # what does this even do? this seems wrong # if (length(eml_objects) != length(data_objects)) { # cat(crayon::red("\nThe number of downloaded data objects does not match the number of EML data objects.")) # cat(crayon::green(paste0("\n\n.....Processing complete for package ", # package$resource_map, "..............."))) # return() # } #eml_objects <- list(doc$dataset$dataTable, doc$dataset$otherEntity, doc$dataset$spatialVector) # Filter out data objects that have SKIP status data_objects <- Filter(function(x) suppressWarnings(length(x$status) == 0) || suppressWarnings(x$status != "SKIP"), data_objects) eml_objects <- Filter(function(x) EML::eml_get(x, "objectName") %in% names(data_objects) || EML::eml_get(x, "entityName") %in% names(data_objects), eml_objects) # Index objects in parallel based on names (in ascending order) for correct processing in iterations eml_objects <- eml_objects[order(names(eml_objects))] data_objects <- data_objects[order(names(data_objects))] if (check_attributes) mapply(qa_attributes, eml_objects, data_objects, MoreArgs = list(doc = doc)) cat(crayon::green(paste0("\n\n.....Processing complete for package ", package$resource_map, "..............."))) } # Helper function for downloading and reading all data objects in a data package dl_and_read_all_data <- function(mn, package, doc, read_all_data) { stopifnot(class(mn) %in% c("MNode", "CNode")) stopifnot(is.list(package), length(package) > 0) stopifnot(methods::is(doc, "emld")) stopifnot(is.logical(read_all_data)) urls <- unique(arcticdatautils::eml_get_simple(doc$dataset, "url"), recursive = TRUE) %>% grep("http", ., value = T) # Check that each data object has a matching URL in the EML wrong_URL <- FALSE for (datapid in package$data) { n <- which(grepl(paste0(datapid, "$"), urls)) if (length(n) != 1) { cat(crayon::red(paste("\nThe distribution URL for object", datapid, "is missing or incongruent in the physical section of the EML.\n"))) wrong_URL <- TRUE } } if (length(urls) != length(package$data) || wrong_URL) { # Stop here to ensure proper ordering in the following iterations stop("\nAll distribution URLs for data objects must match the data PIDs to continue.") } if (read_all_data) { rows_to_read <- -1 } else { rows_to_read <- 10 } objects <- lapply(package$data, dl_and_read_data, doc, mn, rows_to_read) return(objects) } # Helper function for downloading and reading a data object dl_and_read_data <- function(objectpid, doc, mn, rows_to_read) { supported_file_formats <- c("text/csv", "text/tsv", "text/plain", "application/vnd.ms-excel", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "application/zip", "netCDF-4", "netCDF-3", "CF-1.4", "CF-1.3", "CF-1.2", "CF-1.1", "CF-1.0") if (!is.null(names(doc$dataset$dataTable)) & length(doc$dataset$dataTable) > 0) { doc$dataset$dataTable <- list(doc$dataset$dataTable) } if (!is.null(names(doc$dataset$otherEntity)) & length(doc$dataset$otherEntity) > 0) { doc$dataset$otherEntity <- list(doc$dataset$otherEntity) } if (!is.null(names(doc$dataset$spatialVector)) & length(doc$dataset$spatialVector) > 0) { doc$dataset$spatialVector <- list(doc$dataset$spatialVector) } if (!is.null(doc$dataset$dataTable)){ urls_dataTable <- unique(arcticdatautils::eml_get_simple(doc$dataset$dataTable, "url"), recursive = TRUE) %>% grep("http", ., value = T) n_dT <- which(grepl(paste0(objectpid, "$"), urls_dataTable)) } else n_dt <- list() if (!is.null(doc$dataset$otherEntity)){ urls_otherEntity <- unique(arcticdatautils::eml_get_simple(doc$dataset$otherEntity, "url"), recursive = TRUE) %>% grep("http", ., value = T) n_oE <- which(grepl(paste0(objectpid, "$"), urls_otherEntity)) } else n_oE <- list() if (!is.null(doc$dataset$spatialVector)){ urls_spatialVector <- unique(arcticdatautils::eml_get_simple(doc$dataset$spatialVector, "url"), recursive = TRUE) %>% grep("http", ., value = T) n_sV <- which(grepl(paste0(objectpid, "$"), urls_spatialVector)) } else n_sV <- list() if (length(n_dT) == 1) { entity <- doc$dataset$dataTable[[n_dT]] urls <- urls_dataTable i <- n_dT } else if (length(n_oE) == 1) { entity <- doc$dataset$otherEntity[[n_oE]] urls <- urls_otherEntity i <- n_oE } else if (length(n_sV) == 1) { entity <- doc$dataset$spatialVector[[n_sV]] urls <- urls_spatialVector i <- n_sV } else { cat(crayon::yellow("\nData object is not tabular or not a supported format. Skipped.")) cat(crayon::green(paste0("\n..........Download complete for object ", objectpid, " (", entity$physical$objectName, ")..............."))) return(list(status = "SKIP")) } cat(crayon::green(paste0("\n\n..........Downloading object ", objectpid, " (", entity$physical$objectName, ")..............."))) # If object is not tabular data, skip to next object format <- arcticdatautils::eml_get_simple(entity, "formatName") if (length(format) == 0) { cat(crayon::red("\nData object has no given format ID in EML. Unable to check if supported format. Skipped")) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) } else if (!(format %in% supported_file_formats)) { cat(crayon::red("\nData object is not tabular or not a supported format. Skipped.")) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) } if (is.null(EML::get_attributes(entity$attributeList)$attributes) && length(slot(entity$attributeList, 'references')) == 0) { cat(crayon::red(paste0("\nEmpty attribute table for data object. Skipped."))) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) } # If package is public, read directly from the file; otherwise, use DataONE API sysmeta <- dataone::getSystemMetadata(mn, objectpid) isPublic <- datapack::hasAccessRule(sysmeta, "public", "read") tryCatch({ if (isPublic) { if (format == "text/csv") { data <- utils::read.csv(urls[i], nrows = rows_to_read, check.names = FALSE, stringsAsFactors = FALSE) } else if (format == "text/tsv") { data <- utils::read.delim(urls[i], nrows = rows_to_read) } else if (format == "text/plain") { data <- utils::read.table(urls[i], nrows = rows_to_read) } else if (format == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" || format == "application/vnd.ms-excel") { tmp <- tempfile() utils::download.file(url = urls[i], destfile = tmp, mode = "wb", quiet = TRUE) data <- readxl::read_excel(tmp, n_max = if (rows_to_read == -1) Inf else rows_to_read) unlink(tmp) data } else if (format == "application/zip") { # Many formats can exist within a .zip file; skip if not spatial data tmp <- tempfile() utils::download.file(url = urls[i], destfile = tmp, quiet = TRUE) tmp2 <- tempfile() utils::unzip(tmp, exdir = tmp2) t <- list.files(tmp2, full.names = TRUE, recursive = TRUE) if (any(grep("*\\.shp", t))) { cat(crayon::yellow("\nNote: Shapefiles have attribute name limits of 10 characters.")) data <- suppressWarnings(sf::read_sf(t[grep("*\\.shp", t)]) %>% sf::st_set_geometry(NULL)) } else if (any(grep("*\\.gdb", t))) { data <- suppressWarnings(sf::read_sf(list.dirs(tmp2)[2]) %>% sf::st_set_geometry(NULL)) } else { cat(crayon::red("\nSpatial data not present within .zip file. Skipped.")) cat(crayon::green("\n..........Object not downloaded..............................")) unlink(c(tmp, tmp2), recursive = TRUE) return(list(status = "SKIP")) } unlink(c(tmp, tmp2), recursive = TRUE) data } else if (format == "netCDF-4" || format == "netCDF-3" || format == "CF-1.4" || format == "CF-1.3" || format == "CF-1.2" || format == "CF-1.1" || format == "CF-1.0") { tmp <- tempfile() utils::download.file(url = urls[i], destfile = tmp, mode = "wb", quiet = TRUE) nc <- ncdf4::nc_open(tmp) data <- netcdf_to_dataframe(nc) unlink(tmp) rm(nc) # clean up now because many netCDF files are large data } } else { if (format == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" || format == "application/vnd.ms-excel") { cat(crayon::red("\nThis function uses the DataONE API to read private data objects and currently cannot read .xls or .xlsx files.\nSkipped. Check attributes manually.")) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) } else if (format == "netCDF-4" || format == "netCDF-3" || format == "CF-1.4" || format == "CF-1.3" || format == "CF-1.2" || format == "CF-1.1" || format == "CF-1.0") { tmp <- tempfile() writeBin(dataone::getObject(mn, objectpid), tmp) nc <- ncdf4::nc_open(tmp) data <- netcdf_to_dataframe(nc) unlink(tmp) rm(nc) # clean up now because many netCDF files are large data } else { data <- utils::read.csv(textConnection(rawToChar(dataone::getObject(mn, objectpid))), nrows = rows_to_read, check.names = FALSE, stringsAsFactors = FALSE) } } cat(crayon::green("\n..........Download complete..............................")) return(data) }, error = function(e) { cat(crayon::red(paste0("\nFailed to read file at ", urls[i], ". Skipped."))) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) }) } # Helper function for converting 2D data from a netCDF to a data.frame object netcdf_to_dataframe <- function(nc) { att_names <- names(nc$var) dims <- nc$dim dim_names <- c() for (i in seq_along(dims)) { dim_names[i] <- dims[[i]]$name } var_names <- c(att_names, dim_names) # remove duplicates dup_indices <- which(duplicated(tolower(var_names))) if (length(dup_indices) > 0) { var_names <- var_names[-dup_indices] } data <- lapply(var_names, function(x) ncdf4::ncvar_get(nc, x)) max_length <- max(unlist(lapply(data, function(x) length(x)))) results <- data.frame(matrix(ncol = length(data), nrow = max_length)) names(results) <- var_names for (i in seq_along(results)) { results[ , i] <- rep_len(data[[i]], length.out = max_length) } return(results) } #' Check congruence of data and metadata attributes for a tabular data object #' #' This function checks the congruence of data and metadata attributes #' for a tabular data object. Supported objects include `dataTable`, `otherEntity`, #' and `spatialVector` entities. It can be used on its own but is also #' called by [qa_package()] to check all tabular data objects in a data package. #' #' This function checks the following: #' * Names: Check that column names in attributes match column names in data frame. Possible conditions to check for: #' * attributeList does not exist for data frame #' * Some of the attributes that exist in the data do not exist in the attributeList #' * Some of the attributes that exist in the attributeList do not exist in the data #' * Typos in attribute or column names resulting in nonmatches #' * Domains: Check that attribute types in EML match attribute types in data frame. Possible conditions to check for: #' * nominal, ordinal, integer, ratio, dateTime #' * If domain is enumerated domain, enumerated values in the data are accounted for in the enumerated definition #' * If domain is enumerated domain, enumerated values in the enumerated definition are all represented in the data #' * Type of data does not match attribute type #' * Values: Check that values in data are reasonable. Possible conditions to check for: #' * Accidental characters in the data (e.g., one character in a column of integers) #' * If missing values are present, missing value codes are also present #' #' @param entity (emld) An EML `dataTable`, `otherEntity`, or `spatialVector` associated with the data object. #' @param data (data.frame) A data frame of the data object. #' @param doc (emld) The entire EML object. This is necessary if attributes with references are being checked. #' #' @return `NULL` #' #' @import arcticdatautils #' @import EML #' @importFrom crayon green red yellow #' @importFrom lubridate parse_date_time #' @importFrom methods is slot #' @importFrom stats na.omit #' @importFrom stringr str_split #' @importFrom utils capture.output head #' #' @export #' #' @seealso [qa_package()] #' #' @examples #' \dontrun{ #' # Checking a .csv file #' dataTable <- doc$dataset$dataTable[[1]] #' data <- readr::read_csv("https://cn.dataone.org/cn/v2/resolve/urn:uuid:...") #' #' qa_attributes(dataTable, data) #' } qa_attributes <- function(entity, data, doc = NULL) { stopifnot(is.data.frame(data)) if (!is.null(doc) && !methods::is(doc, "emld")) { stop("Input should be of class 'emld'.") } objectpid <- stringr::str_split(entity$physical$distribution$online$url$url, "(?=urn.)", simplify = TRUE)[[2]] cat(crayon::green(paste0("\n\n..........Processing object ", objectpid, " (", entity$physical$objectName, ")..............."))) entity_list <- doc$dataset[names(doc$dataset) %in% c("dataTable", "otherEntity", "spatialVector")] names(entity_list) <- 'entity' tryCatch({ suppressWarnings(attributeTable <- EML::get_attributes(entity$attributeList)) # Check for references if (nrow(attributeTable$attributes) == 0) { ref_index <- match_reference_to_attributeList(doc, entity) if (length(ref_index) > 0) { entity2 <- entity_list$entity[[ref_index]] attributeTable <- EML::get_attributes(entity2$attributeList) } } attributeNames <- attributeTable$attributes$attributeName # Check if attributes are present if (is.null(attributeNames)) { cat(crayon::red(paste("\nEmpty attribute table for object at", entity$physical$distribution$online$url))) } # Check for duplicated attributes based on names if (any(duplicated(attributeNames))) { cat(crayon::red(paste("\nThere are duplicated attribute names in the EML."))) } header <- as.numeric(entity$physical$dataFormat$textFormat$numHeaderLines) if (length(header) > 0 && !is.na(header) && header > 1) { names(data) <- NULL names(data) <- data[(header - 1), ] } data_cols <- colnames(data) # Check that attribute names match column names allequal <- isTRUE(all.equal(data_cols, attributeNames)) if (!allequal) { intersection <- intersect(attributeNames, data_cols) nonmatcheml <- attributeNames[!attributeNames %in% intersection] nonmatchdata <- data_cols[!data_cols %in% intersection] # EML has values that data does not have if (length(nonmatcheml) > 0) { cat(crayon::red(paste0("\nThe EML includes attributes '", toString(nonmatcheml, sep = ", "), "' that are not present in the data."))) cat(crayon::yellow("\nContinuing attribute and data matching without mismatched attributes - fix issues and re-run after first round completion.")) } # Data has values that EML does not have if (length(nonmatchdata) > 0) { cat(crayon::red(paste0("\nThe data includes attributes '", toString(nonmatchdata, sep = ", "), "' that are not present in the EML."))) cat(crayon::yellow("\nContinuing attribute and data matching without mismatched attributes - fix issues and re-run after first round completion.")) } # Values match but are not ordered correctly if (length(nonmatcheml) == 0 && length(nonmatchdata) == 0 && allequal == FALSE) { cat(crayon::yellow("\nAttribute names match column names but are incorrectly ordered.")) } data <- data[ , which(colnames(data) %in% intersection)] attributeTable$attributes <- attributeTable$attributes[which(attributeTable$attributes$attributeName %in% intersection), ] attributeTable$attributes <- attributeTable$attributes[order(match(attributeTable$attributes$attributeName, colnames(data))), ] } # Check that type of column matches type of data based on acceptable DataONE formats for (i in seq_along(data)) { matchingAtt <- attributeTable$attributes[i, ] attClass <- class(data[ , i]) # If matchingAtt has a dateTime domain, coerce the column based on the date/time format if (matchingAtt$measurementScale == "dateTime") { attClass <- class(suppressWarnings(lubridate::parse_date_time(data[ , i], orders = c("ymd", "HMS", "ymd HMS", "y", "m", "d", "ym", "md", "m/d/y", "d/m/y", "ymd HM", "yq", "j", "H", "M", "S", "MS", "HM", "I", "a", "A", "U", "w", "W")))) } if (attClass == "numeric" || attClass == "integer" || attClass == "double") { if (matchingAtt$measurementScale != "ratio" && matchingAtt$measurementScale != "interval" && matchingAtt$measurementScale != "dateTime") { cat(crayon::yellow(paste0("\nMismatch in attribute type for the attribute '", matchingAtt$attributeName, "'. Type of data is ", attClass, " which should probably have interval or ratio measurementScale in EML, not ", matchingAtt$measurementScale, "."))) } } else if (attClass == "character" || attClass == "logical") { if (matchingAtt$measurementScale != "nominal" && matchingAtt$measurementScale != "ordinal") { cat(crayon::yellow(paste0("\nMismatch in attribute type for the attribute '", matchingAtt$attributeName, "'. Type of data is ", attClass, " which should probably have nominal or ordinal measurementScale in EML, not ", matchingAtt$measurementScale, "."))) } } else if (any(attClass %in% c("POSIXct", "POSIXt", "Date", "Period"))) { if (matchingAtt$measurementScale != "dateTime") { cat(crayon::yellow(paste0("\nMismatch in attribute type for the attribute '", matchingAtt$attributeName, "'. Type of data is ", attClass, " which should probably have dateTime measurementScale in EML, not ", matchingAtt$measurementScale, "."))) } } } # Check that enumerated domains match values in data if (length(attributeTable$factors) > 0) { for (i in seq_along(unique(attributeTable$factors$attributeName))) { emlAttName <- unique(attributeTable$factors$attributeName)[i] emlUniqueValues <- attributeTable$factors[attributeTable$factors$attributeName == emlAttName, "code"] dataUniqueValues <- unique(stats::na.omit(data[[which(colnames(data) == emlAttName)]])) # omit NAs in unique values intersection <- intersect(dataUniqueValues, emlUniqueValues) nonmatcheml <- emlUniqueValues[!emlUniqueValues %in% intersection] nonmatchdata <- dataUniqueValues[!dataUniqueValues %in% intersection] if (length(nonmatcheml) > 0) { cat(crayon::yellow(paste0("\nThe EML contains the following enumerated domain values for the attribute '", as.character(emlAttName), "' that do not appear in the data: ", toString(nonmatcheml, sep = ", ")))) } if (length(nonmatchdata) > 0) { cat(crayon::yellow(paste0("\nThe data contains the following enumerated domain values for the attribute '", as.character(emlAttName), "' that do not appear in the EML: ", toString(nonmatchdata, sep = ", ")))) } } } # If there are any missing values in the data, check that there is an associated missing value code in the EML for (i in which(colSums(is.na(data)) > 0)) { # only checks for NA values but others like -99 or -999 could be present attribute <- attributeTable$attributes[i, ] if (is.null(attribute$missingValueCode)) { cat(crayon::red(paste0("\nThe attribute '", attribute$attributeName, "' contains missing values but does not have a missing value code."))) } } }, error = function(e){ cat(crayon::red("\nError. Processing for object stopped. ")) cat(crayon::red("Here's the original error message: ")) cat(crayon::red(message(e))) } ) cat(crayon::green("\n..........Processing complete..............................")) } # Helper function for matching a reference to an attributeList # Returns the index of the match match_reference_to_attributeList <- function(doc, entity) { # Get list of 'dataTable', 'otherEntity', etc. entity_list <- doc$dataset[names(doc$dataset) %in% c("dataTable", "otherEntity", "spatialVector")] names(entity_list) <- 'entity' # Get the ref we want to match ref <- eml_get_simple(entity, "references") # Get all of the references present att_lists <- eml_get(entity_list, "attributeList") # Get the index of the reference we want ids <- lapply(att_lists, eml_get_simple, "id") ids <- ids[!names(ids) == "@context"] ids <- lapply(ids, function(x){if (length(x > 1)) x[!(x == "@id")]}) suppressWarnings( index <- which(str_detect(ids, paste0('^', ref, '$')) == T)) return(index) } #' Check the ORCIDs of creators in a given EML #' #' This function is called by \code{\link{qa_package}}. #' See \code{\link{qa_package}} documentation for more details. #' #' @param doc (emld) Package metadata. #' #' @return creator_ORCIDs (character) Returns \code{character(0)} if any tests fail. #' #' @noRd qa_creator_ORCIDs <- function(doc) { # Check creators creators <- doc$dataset$creator if (!is.null(names(creators))){ creators <- list(doc$dataset$creator) } creator_ORCIDs <- unlist(arcticdatautils::eml_get_simple(creators, "userId")) %>% grep("orcid", ., value = T) creator_ORCIDs <- creator_ORCIDs[which(names(creator_ORCIDs) == "userId")] isORCID <- grepl("http[s]?:\\/\\/orcid.org\\/[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}", creator_ORCIDs) creator_ORCIDs <- sub("^https://", "http://", creator_ORCIDs) if (length(isORCID) != length(creators) || !all(isORCID)) { cat(crayon::red("\nEach creator needs to have a proper ORCID.")) return(character(0)) } else { return(creator_ORCIDs) } } #' Check rights and access for creators in sysmeta #' #' This function is called by \code{\link{qa_package}}. #' See \code{\link{qa_package}} documentation for more details. #' #' @param sysmeta (sysmeta) Sysmeta of a given object. #' @param creator_ORCIDs (character) ORCIDs of creators. Result of \code{\link{qa_creator_ORCIDs}}. #' #' @noRd qa_access <- function(sysmeta, creator_ORCIDs) { # Check rightsHolder if (!(sysmeta@rightsHolder %in% creator_ORCIDs)) { cat(crayon::yellow("\nThe rightsHolder for", sysmeta@identifier, "is not set to one of the creators.")) } # Check creator access for (creator in creator_ORCIDs) { creator_read <- datapack::hasAccessRule(sysmeta, creator, "read") creator_write <- datapack::hasAccessRule(sysmeta, creator, "write") creator_changePermission <- datapack::hasAccessRule(sysmeta, creator, "changePermission") creator_rightsHolder <- sysmeta@rightsHolder %in% creator access <- c(creator_read, creator_write, creator_changePermission) if (!all(access) & !creator_rightsHolder) { cat(crayon::yellow("\nFull access for", sysmeta@identifier, "is not set for creator with ORCID", creator)) } } }
/R/qa_package.R
permissive
NCEAS/datamgmt
R
false
false
32,329
r
#' Check package including congruence of attributes and data #' #' This function checks that the attributes listed in the metadata match the values in the data for each #' tabular data object. It may also optionally check if all creators have ORCIDs and have full access #' to all elements of the data package. #' #' @param mn (MNode) The Member Node to query. #' @param resource_map_pid (character) The PID for a resource map. #' @param read_all_data (logical) Read all data from remote and check that column types match attributes. If `FALSE`, #' only read first 10 rows. Only applicable to public packages (private packages will read complete dataset). #' If `check_attributes = FALSE`, no rows will be read. #' @param check_attributes (logical) Check congruence of attributes and data. #' @param check_creators (logical) Check if each creator has an ORCID. Will also run if `check_access = TRUE`. #' @param check_access (logical) Check if each creator has full access to the metadata, resource map, and data objects. #' Will not run if the checks associated with `check_creators` fail. #' #' @return `NULL` #' #' @import arcticdatautils #' @import dataone #' @import EML #' @importFrom crayon green red yellow #' @importFrom datapack hasAccessRule #' @importFrom methods is slot #' @importFrom ncdf4 nc_open ncvar_get #' @importFrom readxl read_excel #' @importFrom sf read_sf st_set_geometry #' @importFrom utils read.csv read.delim read.table download.file #' #' @export #' #' @examples #' \dontrun{ #' # Run all QA checks #' #' qa_package(mn, pid, read_all_data = TRUE, check_attributes = TRUE, #' check_creators = TRUE, check_access = TRUE) #' } qa_package <- function(mn, resource_map_pid, read_all_data = TRUE, check_attributes = TRUE, check_creators = FALSE, check_access = FALSE) { stopifnot(class(mn) %in% c("MNode", "CNode")) stopifnot(is.character(resource_map_pid), nchar(resource_map_pid) > 0) stopifnot(is.logical(read_all_data)) stopifnot(is.logical(check_attributes)) stopifnot(is.logical(check_creators)) stopifnot(is.logical(check_access)) package <- tryCatch(suppressWarnings(arcticdatautils::get_package(mn, resource_map_pid, file_names = TRUE)), error = function(e) stop("\nFailed to get package. Is the Member Node correct? Is your DataONE token set?")) cat(crayon::green(paste0("\n.....Processing package ", package$resource_map, "..............."))) doc <- EML::read_eml(dataone::getObject(mn, package$metadata)) # Check creators if (check_creators || check_access) { creator_ORCIDs <- qa_creator_ORCIDs(doc) } # Check access if (check_access && length(creator_ORCIDs) > 0) { # Check metadata sysmeta <- dataone::getSystemMetadata(mn, package$metadata) qa_access(sysmeta, creator_ORCIDs) # Check resource_map sysmeta <- dataone::getSystemMetadata(mn, package$resource_map) qa_access(sysmeta, creator_ORCIDs) # Check data objects for (object in package$data) { sysmeta <- dataone::getSystemMetadata(mn, object) qa_access(sysmeta, creator_ORCIDs) } } if (!is.null(names(doc$dataset$dataTable)) & length(doc$dataset$dataTable) > 0) { doc$dataset$dataTable <- list(doc$dataset$dataTable) } if (!is.null(names(doc$dataset$otherEntity)) & length(doc$dataset$otherEntity) > 0) { doc$dataset$otherEntity <- list(doc$dataset$otherEntity) } if (!is.null(names(doc$dataset$spatialVector)) & length(doc$dataset$spatialVector) > 0) { doc$dataset$spatialVector <- list(doc$dataset$spatialVector) } if (is.null(eml_get_simple(doc$dataset, "dataTable"))) doc$dataset$dataTable <- list() if (is.null(eml_get_simple(doc$dataset, "otherEntity"))) doc$dataset$otherEntity <- list() if (is.null(eml_get_simple(doc$dataset, "spatialVector"))) doc$dataset$spatialVector <- list() eml_objects <- c(doc$dataset$dataTable, doc$dataset$otherEntity, doc$dataset$spatialVector) if (length(eml_objects) == 0) { cat(crayon::red("\nNo data objects of a supported format were found in the EML.")) cat(crayon::green(paste0("\n\n.....Processing complete for package ", package$resource_map, "..............."))) return() } # Preserve order of getting data objects based on data type for correct name assignment # Entity names may not match data object names, so use objectName to ensure matches with data names names(eml_objects) <- c(arcticdatautils::eml_get_simple(doc$dataset$dataTable, "objectName"), arcticdatautils::eml_get_simple(doc$dataset$otherEntity, "objectName"), arcticdatautils::eml_get_simple(doc$dataset$spatialVector, "objectName")) # If object names are missing, use entity names instead if (is.null(names(eml_objects)) || any(is.na(names(eml_objects)))) { names(eml_objects) <-c(arcticdatautils::eml_get_simple(doc$dataset$dataTable, "entityName"), arcticdatautils::eml_get_simple(doc$dataset$otherEntity, "entityName"), arcticdatautils::eml_get_simple(doc$dataset$spatialVector, "entityName")) } data_objects <- dl_and_read_all_data(mn, package, doc, read_all_data) # If missing fileName, assign name to data objects for (i in seq_along(data_objects)) { if (is.na(names(data_objects)[[i]])) { id <- package$data[[i]] urls <- unique(arcticdatautils::eml_get_simple(eml_objects, "url"), recursive = TRUE) %>% grep("http", ., value = T) j <- which(stringr::str_detect(urls, id)) names(data_objects)[[i]] <- if (!is.na(EML::eml_get(eml_objects[[j]], "objectName"))) { EML::eml_get(eml_objects[[j]], "objectName") } else { EML::eml_get(eml_objects[[j]], "entityName") } } } # what does this even do? this seems wrong # if (length(eml_objects) != length(data_objects)) { # cat(crayon::red("\nThe number of downloaded data objects does not match the number of EML data objects.")) # cat(crayon::green(paste0("\n\n.....Processing complete for package ", # package$resource_map, "..............."))) # return() # } #eml_objects <- list(doc$dataset$dataTable, doc$dataset$otherEntity, doc$dataset$spatialVector) # Filter out data objects that have SKIP status data_objects <- Filter(function(x) suppressWarnings(length(x$status) == 0) || suppressWarnings(x$status != "SKIP"), data_objects) eml_objects <- Filter(function(x) EML::eml_get(x, "objectName") %in% names(data_objects) || EML::eml_get(x, "entityName") %in% names(data_objects), eml_objects) # Index objects in parallel based on names (in ascending order) for correct processing in iterations eml_objects <- eml_objects[order(names(eml_objects))] data_objects <- data_objects[order(names(data_objects))] if (check_attributes) mapply(qa_attributes, eml_objects, data_objects, MoreArgs = list(doc = doc)) cat(crayon::green(paste0("\n\n.....Processing complete for package ", package$resource_map, "..............."))) } # Helper function for downloading and reading all data objects in a data package dl_and_read_all_data <- function(mn, package, doc, read_all_data) { stopifnot(class(mn) %in% c("MNode", "CNode")) stopifnot(is.list(package), length(package) > 0) stopifnot(methods::is(doc, "emld")) stopifnot(is.logical(read_all_data)) urls <- unique(arcticdatautils::eml_get_simple(doc$dataset, "url"), recursive = TRUE) %>% grep("http", ., value = T) # Check that each data object has a matching URL in the EML wrong_URL <- FALSE for (datapid in package$data) { n <- which(grepl(paste0(datapid, "$"), urls)) if (length(n) != 1) { cat(crayon::red(paste("\nThe distribution URL for object", datapid, "is missing or incongruent in the physical section of the EML.\n"))) wrong_URL <- TRUE } } if (length(urls) != length(package$data) || wrong_URL) { # Stop here to ensure proper ordering in the following iterations stop("\nAll distribution URLs for data objects must match the data PIDs to continue.") } if (read_all_data) { rows_to_read <- -1 } else { rows_to_read <- 10 } objects <- lapply(package$data, dl_and_read_data, doc, mn, rows_to_read) return(objects) } # Helper function for downloading and reading a data object dl_and_read_data <- function(objectpid, doc, mn, rows_to_read) { supported_file_formats <- c("text/csv", "text/tsv", "text/plain", "application/vnd.ms-excel", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "application/zip", "netCDF-4", "netCDF-3", "CF-1.4", "CF-1.3", "CF-1.2", "CF-1.1", "CF-1.0") if (!is.null(names(doc$dataset$dataTable)) & length(doc$dataset$dataTable) > 0) { doc$dataset$dataTable <- list(doc$dataset$dataTable) } if (!is.null(names(doc$dataset$otherEntity)) & length(doc$dataset$otherEntity) > 0) { doc$dataset$otherEntity <- list(doc$dataset$otherEntity) } if (!is.null(names(doc$dataset$spatialVector)) & length(doc$dataset$spatialVector) > 0) { doc$dataset$spatialVector <- list(doc$dataset$spatialVector) } if (!is.null(doc$dataset$dataTable)){ urls_dataTable <- unique(arcticdatautils::eml_get_simple(doc$dataset$dataTable, "url"), recursive = TRUE) %>% grep("http", ., value = T) n_dT <- which(grepl(paste0(objectpid, "$"), urls_dataTable)) } else n_dt <- list() if (!is.null(doc$dataset$otherEntity)){ urls_otherEntity <- unique(arcticdatautils::eml_get_simple(doc$dataset$otherEntity, "url"), recursive = TRUE) %>% grep("http", ., value = T) n_oE <- which(grepl(paste0(objectpid, "$"), urls_otherEntity)) } else n_oE <- list() if (!is.null(doc$dataset$spatialVector)){ urls_spatialVector <- unique(arcticdatautils::eml_get_simple(doc$dataset$spatialVector, "url"), recursive = TRUE) %>% grep("http", ., value = T) n_sV <- which(grepl(paste0(objectpid, "$"), urls_spatialVector)) } else n_sV <- list() if (length(n_dT) == 1) { entity <- doc$dataset$dataTable[[n_dT]] urls <- urls_dataTable i <- n_dT } else if (length(n_oE) == 1) { entity <- doc$dataset$otherEntity[[n_oE]] urls <- urls_otherEntity i <- n_oE } else if (length(n_sV) == 1) { entity <- doc$dataset$spatialVector[[n_sV]] urls <- urls_spatialVector i <- n_sV } else { cat(crayon::yellow("\nData object is not tabular or not a supported format. Skipped.")) cat(crayon::green(paste0("\n..........Download complete for object ", objectpid, " (", entity$physical$objectName, ")..............."))) return(list(status = "SKIP")) } cat(crayon::green(paste0("\n\n..........Downloading object ", objectpid, " (", entity$physical$objectName, ")..............."))) # If object is not tabular data, skip to next object format <- arcticdatautils::eml_get_simple(entity, "formatName") if (length(format) == 0) { cat(crayon::red("\nData object has no given format ID in EML. Unable to check if supported format. Skipped")) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) } else if (!(format %in% supported_file_formats)) { cat(crayon::red("\nData object is not tabular or not a supported format. Skipped.")) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) } if (is.null(EML::get_attributes(entity$attributeList)$attributes) && length(slot(entity$attributeList, 'references')) == 0) { cat(crayon::red(paste0("\nEmpty attribute table for data object. Skipped."))) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) } # If package is public, read directly from the file; otherwise, use DataONE API sysmeta <- dataone::getSystemMetadata(mn, objectpid) isPublic <- datapack::hasAccessRule(sysmeta, "public", "read") tryCatch({ if (isPublic) { if (format == "text/csv") { data <- utils::read.csv(urls[i], nrows = rows_to_read, check.names = FALSE, stringsAsFactors = FALSE) } else if (format == "text/tsv") { data <- utils::read.delim(urls[i], nrows = rows_to_read) } else if (format == "text/plain") { data <- utils::read.table(urls[i], nrows = rows_to_read) } else if (format == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" || format == "application/vnd.ms-excel") { tmp <- tempfile() utils::download.file(url = urls[i], destfile = tmp, mode = "wb", quiet = TRUE) data <- readxl::read_excel(tmp, n_max = if (rows_to_read == -1) Inf else rows_to_read) unlink(tmp) data } else if (format == "application/zip") { # Many formats can exist within a .zip file; skip if not spatial data tmp <- tempfile() utils::download.file(url = urls[i], destfile = tmp, quiet = TRUE) tmp2 <- tempfile() utils::unzip(tmp, exdir = tmp2) t <- list.files(tmp2, full.names = TRUE, recursive = TRUE) if (any(grep("*\\.shp", t))) { cat(crayon::yellow("\nNote: Shapefiles have attribute name limits of 10 characters.")) data <- suppressWarnings(sf::read_sf(t[grep("*\\.shp", t)]) %>% sf::st_set_geometry(NULL)) } else if (any(grep("*\\.gdb", t))) { data <- suppressWarnings(sf::read_sf(list.dirs(tmp2)[2]) %>% sf::st_set_geometry(NULL)) } else { cat(crayon::red("\nSpatial data not present within .zip file. Skipped.")) cat(crayon::green("\n..........Object not downloaded..............................")) unlink(c(tmp, tmp2), recursive = TRUE) return(list(status = "SKIP")) } unlink(c(tmp, tmp2), recursive = TRUE) data } else if (format == "netCDF-4" || format == "netCDF-3" || format == "CF-1.4" || format == "CF-1.3" || format == "CF-1.2" || format == "CF-1.1" || format == "CF-1.0") { tmp <- tempfile() utils::download.file(url = urls[i], destfile = tmp, mode = "wb", quiet = TRUE) nc <- ncdf4::nc_open(tmp) data <- netcdf_to_dataframe(nc) unlink(tmp) rm(nc) # clean up now because many netCDF files are large data } } else { if (format == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" || format == "application/vnd.ms-excel") { cat(crayon::red("\nThis function uses the DataONE API to read private data objects and currently cannot read .xls or .xlsx files.\nSkipped. Check attributes manually.")) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) } else if (format == "netCDF-4" || format == "netCDF-3" || format == "CF-1.4" || format == "CF-1.3" || format == "CF-1.2" || format == "CF-1.1" || format == "CF-1.0") { tmp <- tempfile() writeBin(dataone::getObject(mn, objectpid), tmp) nc <- ncdf4::nc_open(tmp) data <- netcdf_to_dataframe(nc) unlink(tmp) rm(nc) # clean up now because many netCDF files are large data } else { data <- utils::read.csv(textConnection(rawToChar(dataone::getObject(mn, objectpid))), nrows = rows_to_read, check.names = FALSE, stringsAsFactors = FALSE) } } cat(crayon::green("\n..........Download complete..............................")) return(data) }, error = function(e) { cat(crayon::red(paste0("\nFailed to read file at ", urls[i], ". Skipped."))) cat(crayon::green("\n..........Object not downloaded..............................")) return(list(status = "SKIP")) }) } # Helper function for converting 2D data from a netCDF to a data.frame object netcdf_to_dataframe <- function(nc) { att_names <- names(nc$var) dims <- nc$dim dim_names <- c() for (i in seq_along(dims)) { dim_names[i] <- dims[[i]]$name } var_names <- c(att_names, dim_names) # remove duplicates dup_indices <- which(duplicated(tolower(var_names))) if (length(dup_indices) > 0) { var_names <- var_names[-dup_indices] } data <- lapply(var_names, function(x) ncdf4::ncvar_get(nc, x)) max_length <- max(unlist(lapply(data, function(x) length(x)))) results <- data.frame(matrix(ncol = length(data), nrow = max_length)) names(results) <- var_names for (i in seq_along(results)) { results[ , i] <- rep_len(data[[i]], length.out = max_length) } return(results) } #' Check congruence of data and metadata attributes for a tabular data object #' #' This function checks the congruence of data and metadata attributes #' for a tabular data object. Supported objects include `dataTable`, `otherEntity`, #' and `spatialVector` entities. It can be used on its own but is also #' called by [qa_package()] to check all tabular data objects in a data package. #' #' This function checks the following: #' * Names: Check that column names in attributes match column names in data frame. Possible conditions to check for: #' * attributeList does not exist for data frame #' * Some of the attributes that exist in the data do not exist in the attributeList #' * Some of the attributes that exist in the attributeList do not exist in the data #' * Typos in attribute or column names resulting in nonmatches #' * Domains: Check that attribute types in EML match attribute types in data frame. Possible conditions to check for: #' * nominal, ordinal, integer, ratio, dateTime #' * If domain is enumerated domain, enumerated values in the data are accounted for in the enumerated definition #' * If domain is enumerated domain, enumerated values in the enumerated definition are all represented in the data #' * Type of data does not match attribute type #' * Values: Check that values in data are reasonable. Possible conditions to check for: #' * Accidental characters in the data (e.g., one character in a column of integers) #' * If missing values are present, missing value codes are also present #' #' @param entity (emld) An EML `dataTable`, `otherEntity`, or `spatialVector` associated with the data object. #' @param data (data.frame) A data frame of the data object. #' @param doc (emld) The entire EML object. This is necessary if attributes with references are being checked. #' #' @return `NULL` #' #' @import arcticdatautils #' @import EML #' @importFrom crayon green red yellow #' @importFrom lubridate parse_date_time #' @importFrom methods is slot #' @importFrom stats na.omit #' @importFrom stringr str_split #' @importFrom utils capture.output head #' #' @export #' #' @seealso [qa_package()] #' #' @examples #' \dontrun{ #' # Checking a .csv file #' dataTable <- doc$dataset$dataTable[[1]] #' data <- readr::read_csv("https://cn.dataone.org/cn/v2/resolve/urn:uuid:...") #' #' qa_attributes(dataTable, data) #' } qa_attributes <- function(entity, data, doc = NULL) { stopifnot(is.data.frame(data)) if (!is.null(doc) && !methods::is(doc, "emld")) { stop("Input should be of class 'emld'.") } objectpid <- stringr::str_split(entity$physical$distribution$online$url$url, "(?=urn.)", simplify = TRUE)[[2]] cat(crayon::green(paste0("\n\n..........Processing object ", objectpid, " (", entity$physical$objectName, ")..............."))) entity_list <- doc$dataset[names(doc$dataset) %in% c("dataTable", "otherEntity", "spatialVector")] names(entity_list) <- 'entity' tryCatch({ suppressWarnings(attributeTable <- EML::get_attributes(entity$attributeList)) # Check for references if (nrow(attributeTable$attributes) == 0) { ref_index <- match_reference_to_attributeList(doc, entity) if (length(ref_index) > 0) { entity2 <- entity_list$entity[[ref_index]] attributeTable <- EML::get_attributes(entity2$attributeList) } } attributeNames <- attributeTable$attributes$attributeName # Check if attributes are present if (is.null(attributeNames)) { cat(crayon::red(paste("\nEmpty attribute table for object at", entity$physical$distribution$online$url))) } # Check for duplicated attributes based on names if (any(duplicated(attributeNames))) { cat(crayon::red(paste("\nThere are duplicated attribute names in the EML."))) } header <- as.numeric(entity$physical$dataFormat$textFormat$numHeaderLines) if (length(header) > 0 && !is.na(header) && header > 1) { names(data) <- NULL names(data) <- data[(header - 1), ] } data_cols <- colnames(data) # Check that attribute names match column names allequal <- isTRUE(all.equal(data_cols, attributeNames)) if (!allequal) { intersection <- intersect(attributeNames, data_cols) nonmatcheml <- attributeNames[!attributeNames %in% intersection] nonmatchdata <- data_cols[!data_cols %in% intersection] # EML has values that data does not have if (length(nonmatcheml) > 0) { cat(crayon::red(paste0("\nThe EML includes attributes '", toString(nonmatcheml, sep = ", "), "' that are not present in the data."))) cat(crayon::yellow("\nContinuing attribute and data matching without mismatched attributes - fix issues and re-run after first round completion.")) } # Data has values that EML does not have if (length(nonmatchdata) > 0) { cat(crayon::red(paste0("\nThe data includes attributes '", toString(nonmatchdata, sep = ", "), "' that are not present in the EML."))) cat(crayon::yellow("\nContinuing attribute and data matching without mismatched attributes - fix issues and re-run after first round completion.")) } # Values match but are not ordered correctly if (length(nonmatcheml) == 0 && length(nonmatchdata) == 0 && allequal == FALSE) { cat(crayon::yellow("\nAttribute names match column names but are incorrectly ordered.")) } data <- data[ , which(colnames(data) %in% intersection)] attributeTable$attributes <- attributeTable$attributes[which(attributeTable$attributes$attributeName %in% intersection), ] attributeTable$attributes <- attributeTable$attributes[order(match(attributeTable$attributes$attributeName, colnames(data))), ] } # Check that type of column matches type of data based on acceptable DataONE formats for (i in seq_along(data)) { matchingAtt <- attributeTable$attributes[i, ] attClass <- class(data[ , i]) # If matchingAtt has a dateTime domain, coerce the column based on the date/time format if (matchingAtt$measurementScale == "dateTime") { attClass <- class(suppressWarnings(lubridate::parse_date_time(data[ , i], orders = c("ymd", "HMS", "ymd HMS", "y", "m", "d", "ym", "md", "m/d/y", "d/m/y", "ymd HM", "yq", "j", "H", "M", "S", "MS", "HM", "I", "a", "A", "U", "w", "W")))) } if (attClass == "numeric" || attClass == "integer" || attClass == "double") { if (matchingAtt$measurementScale != "ratio" && matchingAtt$measurementScale != "interval" && matchingAtt$measurementScale != "dateTime") { cat(crayon::yellow(paste0("\nMismatch in attribute type for the attribute '", matchingAtt$attributeName, "'. Type of data is ", attClass, " which should probably have interval or ratio measurementScale in EML, not ", matchingAtt$measurementScale, "."))) } } else if (attClass == "character" || attClass == "logical") { if (matchingAtt$measurementScale != "nominal" && matchingAtt$measurementScale != "ordinal") { cat(crayon::yellow(paste0("\nMismatch in attribute type for the attribute '", matchingAtt$attributeName, "'. Type of data is ", attClass, " which should probably have nominal or ordinal measurementScale in EML, not ", matchingAtt$measurementScale, "."))) } } else if (any(attClass %in% c("POSIXct", "POSIXt", "Date", "Period"))) { if (matchingAtt$measurementScale != "dateTime") { cat(crayon::yellow(paste0("\nMismatch in attribute type for the attribute '", matchingAtt$attributeName, "'. Type of data is ", attClass, " which should probably have dateTime measurementScale in EML, not ", matchingAtt$measurementScale, "."))) } } } # Check that enumerated domains match values in data if (length(attributeTable$factors) > 0) { for (i in seq_along(unique(attributeTable$factors$attributeName))) { emlAttName <- unique(attributeTable$factors$attributeName)[i] emlUniqueValues <- attributeTable$factors[attributeTable$factors$attributeName == emlAttName, "code"] dataUniqueValues <- unique(stats::na.omit(data[[which(colnames(data) == emlAttName)]])) # omit NAs in unique values intersection <- intersect(dataUniqueValues, emlUniqueValues) nonmatcheml <- emlUniqueValues[!emlUniqueValues %in% intersection] nonmatchdata <- dataUniqueValues[!dataUniqueValues %in% intersection] if (length(nonmatcheml) > 0) { cat(crayon::yellow(paste0("\nThe EML contains the following enumerated domain values for the attribute '", as.character(emlAttName), "' that do not appear in the data: ", toString(nonmatcheml, sep = ", ")))) } if (length(nonmatchdata) > 0) { cat(crayon::yellow(paste0("\nThe data contains the following enumerated domain values for the attribute '", as.character(emlAttName), "' that do not appear in the EML: ", toString(nonmatchdata, sep = ", ")))) } } } # If there are any missing values in the data, check that there is an associated missing value code in the EML for (i in which(colSums(is.na(data)) > 0)) { # only checks for NA values but others like -99 or -999 could be present attribute <- attributeTable$attributes[i, ] if (is.null(attribute$missingValueCode)) { cat(crayon::red(paste0("\nThe attribute '", attribute$attributeName, "' contains missing values but does not have a missing value code."))) } } }, error = function(e){ cat(crayon::red("\nError. Processing for object stopped. ")) cat(crayon::red("Here's the original error message: ")) cat(crayon::red(message(e))) } ) cat(crayon::green("\n..........Processing complete..............................")) } # Helper function for matching a reference to an attributeList # Returns the index of the match match_reference_to_attributeList <- function(doc, entity) { # Get list of 'dataTable', 'otherEntity', etc. entity_list <- doc$dataset[names(doc$dataset) %in% c("dataTable", "otherEntity", "spatialVector")] names(entity_list) <- 'entity' # Get the ref we want to match ref <- eml_get_simple(entity, "references") # Get all of the references present att_lists <- eml_get(entity_list, "attributeList") # Get the index of the reference we want ids <- lapply(att_lists, eml_get_simple, "id") ids <- ids[!names(ids) == "@context"] ids <- lapply(ids, function(x){if (length(x > 1)) x[!(x == "@id")]}) suppressWarnings( index <- which(str_detect(ids, paste0('^', ref, '$')) == T)) return(index) } #' Check the ORCIDs of creators in a given EML #' #' This function is called by \code{\link{qa_package}}. #' See \code{\link{qa_package}} documentation for more details. #' #' @param doc (emld) Package metadata. #' #' @return creator_ORCIDs (character) Returns \code{character(0)} if any tests fail. #' #' @noRd qa_creator_ORCIDs <- function(doc) { # Check creators creators <- doc$dataset$creator if (!is.null(names(creators))){ creators <- list(doc$dataset$creator) } creator_ORCIDs <- unlist(arcticdatautils::eml_get_simple(creators, "userId")) %>% grep("orcid", ., value = T) creator_ORCIDs <- creator_ORCIDs[which(names(creator_ORCIDs) == "userId")] isORCID <- grepl("http[s]?:\\/\\/orcid.org\\/[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}", creator_ORCIDs) creator_ORCIDs <- sub("^https://", "http://", creator_ORCIDs) if (length(isORCID) != length(creators) || !all(isORCID)) { cat(crayon::red("\nEach creator needs to have a proper ORCID.")) return(character(0)) } else { return(creator_ORCIDs) } } #' Check rights and access for creators in sysmeta #' #' This function is called by \code{\link{qa_package}}. #' See \code{\link{qa_package}} documentation for more details. #' #' @param sysmeta (sysmeta) Sysmeta of a given object. #' @param creator_ORCIDs (character) ORCIDs of creators. Result of \code{\link{qa_creator_ORCIDs}}. #' #' @noRd qa_access <- function(sysmeta, creator_ORCIDs) { # Check rightsHolder if (!(sysmeta@rightsHolder %in% creator_ORCIDs)) { cat(crayon::yellow("\nThe rightsHolder for", sysmeta@identifier, "is not set to one of the creators.")) } # Check creator access for (creator in creator_ORCIDs) { creator_read <- datapack::hasAccessRule(sysmeta, creator, "read") creator_write <- datapack::hasAccessRule(sysmeta, creator, "write") creator_changePermission <- datapack::hasAccessRule(sysmeta, creator, "changePermission") creator_rightsHolder <- sysmeta@rightsHolder %in% creator access <- c(creator_read, creator_write, creator_changePermission) if (!all(access) & !creator_rightsHolder) { cat(crayon::yellow("\nFull access for", sysmeta@identifier, "is not set for creator with ORCID", creator)) } } }
\name{fsu-package} \alias{fsu-package} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Forecasting Functions for Time Series } \description{ Featuring methods and tools developed by the Foreacsting & Strategy Unit (National Technical University of Athens) \cr \url{https://fsu.gr}. } \details{ Package: fsu \cr Type: Package \cr LazyLoad: yes } \author{ Evangelos M. Theodorou \cr \cr Maintainer: vagtheodorou@fsu.gr }
/man/fsu-package.Rd
no_license
vagtheodorou/fsu
R
false
false
462
rd
\name{fsu-package} \alias{fsu-package} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Forecasting Functions for Time Series } \description{ Featuring methods and tools developed by the Foreacsting & Strategy Unit (National Technical University of Athens) \cr \url{https://fsu.gr}. } \details{ Package: fsu \cr Type: Package \cr LazyLoad: yes } \author{ Evangelos M. Theodorou \cr \cr Maintainer: vagtheodorou@fsu.gr }
library(shiny) library(HistData) data(GaltonFamilies) fitGalton <- readRDS("galtonFit.rds") lowerConf <<- 0 upperConf <<- 0 fit <<- 0 predictHeight <- function(input) { newData = data.frame("mother" = input$mother, "father" = input$father, "gender" = input$gender, "childNum" = input$childNum) pred <- predict(fitGalton, newData, interval='confidence') lowerConf <<- pred[2] upperConf <<- pred[3] fit <<- pred[1] round(pred[1],1) } shinyServer(function(input,output){ output$height <- renderText({ if(input$goButton > 0) { isolate(paste(as.character(predictHeight(input)), " inches")) } else { "Please enter your data and click 'Estimate Me!'" } }) output$confidenceLower <- renderText({ if(input$goButton > 0) { isolate(paste(as.character(round(lowerConf, 1)), " inches")) } else { "" } }) output$confidenceUpper <- renderText({ if(input$goButton > 0) { isolate(paste(as.character(round(upperConf, 1)), " inches")) } else { "" } }) output$histogram <- renderPlot({ if(input$goButton > 0) { hist(GaltonFamilies$childHeight, xlab="Children's Height", ylab="Frequency", main="Histogram of Children's Height from GaltonFamilies") abline(v = round(fit,2), col="red") } }) })
/heightEstimator/server.R
no_license
KJBrock/DataProducts
R
false
false
1,397
r
library(shiny) library(HistData) data(GaltonFamilies) fitGalton <- readRDS("galtonFit.rds") lowerConf <<- 0 upperConf <<- 0 fit <<- 0 predictHeight <- function(input) { newData = data.frame("mother" = input$mother, "father" = input$father, "gender" = input$gender, "childNum" = input$childNum) pred <- predict(fitGalton, newData, interval='confidence') lowerConf <<- pred[2] upperConf <<- pred[3] fit <<- pred[1] round(pred[1],1) } shinyServer(function(input,output){ output$height <- renderText({ if(input$goButton > 0) { isolate(paste(as.character(predictHeight(input)), " inches")) } else { "Please enter your data and click 'Estimate Me!'" } }) output$confidenceLower <- renderText({ if(input$goButton > 0) { isolate(paste(as.character(round(lowerConf, 1)), " inches")) } else { "" } }) output$confidenceUpper <- renderText({ if(input$goButton > 0) { isolate(paste(as.character(round(upperConf, 1)), " inches")) } else { "" } }) output$histogram <- renderPlot({ if(input$goButton > 0) { hist(GaltonFamilies$childHeight, xlab="Children's Height", ylab="Frequency", main="Histogram of Children's Height from GaltonFamilies") abline(v = round(fit,2), col="red") } }) })
################################# ####### Synthetic examples ###### ######## Nested clusters ######## ################################# rm(list=ls()) library(MASS) library(ggplot2) library(reshape) library(reshape2) ### Define ggplot2 theme ### my_theme <- theme( panel.background = element_rect(fill = NA), panel.grid.major = element_line(colour = "grey50"), panel.grid.major.x = element_blank() , panel.grid.major.y = element_line(size=.1, color="black"), axis.ticks.y = element_blank(), axis.ticks.x = element_blank() ) ### Load results ### n_experiments <- 100 all_weights_6clusters <- all_weights_3clusters <- all_ari_one_6clusters <- all_ari_one_3clusters <- all_cophene_6clusters <- all_cophene_3clusters <- array(NA, c(2, n_experiments)) all_ari_all_6clusters <- all_ari_all_3clusters <- rep(NA, n_experiments) for(j in 1:n_experiments){ load(paste0("../results/ari-c-", j,"-new-kmeans.RData")) # K = 6 all_weights_6clusters[,j] <- weights_6clusters all_ari_all_6clusters[j] <- ari_all_6clusters all_ari_one_6clusters[,j] <- ari_one_6clusters all_cophene_6clusters[,j] <- cophenetic_6clusters # K = 3 all_weights_3clusters[,j] <- weights_3clusters all_ari_all_3clusters[j] <- ari_all_3clusters all_ari_one_3clusters[,j] <- ari_one_3clusters all_cophene_3clusters[,j] <- cophenetic_3clusters } ##################################### K = 3 #################################### ### Adjusted Rand index ari <- rbind(all_ari_one_3clusters, all_ari_all_3clusters) rownames(ari) <- c("6", "3", "3+6") ari<-t(ari) ari.m <- melt(ari) head(ari.m) # pasting some rows of the melted data.frame colnames(ari.m) <- c("Experiment", "Datasets", "ARI") ari.m$Datasets <- factor(ari.m$Datasets, levels = c("3","6", "3+6"), ordered = TRUE) ggplot(data = ari.m, aes(x=Datasets, y=ARI)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/ari-c-new-3clusters-kmeans.jpg", device = "jpeg", width = 7, height = 8, units = "cm") ### Weights rownames(all_weights_3clusters) <- c("6", "3") colnames(all_weights_3clusters) <- 1:n_experiments weights_3clusters.m <- melt(t(all_weights_3clusters)) head(weights_3clusters.m) colnames(weights_3clusters.m) <- c("Experiments", "Dataset", "Weight") weights_3clusters.m$Dataset <- factor(weights_3clusters.m$Dataset, levels = c("3","6"), ordered = TRUE) ggplot(data = weights_3clusters.m, aes(x=Dataset, y=Weight)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/weights-3clusters-new-kmeans.jpg", device = "jpeg", width = 3.5, height = 8, units = "cm") ### Cophenetic correlation coefficient rownames(all_cophene_3clusters) <- c("6", "3") colnames(all_cophene_3clusters) <- 1:n_experiments cophene_3clusters.m <- melt(t(all_cophene_3clusters)) head(cophene_3clusters.m) colnames(cophene_3clusters.m) <- c("Experiments", "Dataset", "Correlation") cophene_3clusters.m$Dataset <- factor(cophene_3clusters.m$Dataset, levels = c("3","6"), ordered = TRUE) ggplot(data = cophene_3clusters.m, aes(x=Dataset, y=Correlation)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/cophene-3clusters-new-kmeans.jpg", device = "jpeg", width = 3.5, height = 8, units = "cm") ##################################### K = 6 #################################### ### Adjusted Rand index ari <- rbind(all_ari_one_6clusters, all_ari_all_6clusters) rownames(ari) <- c("6", "3", "3+6") ari<-t(ari) ari.m <- melt(ari) head(ari.m) # pasting some rows of the melted data.frame colnames(ari.m) <- c("Experiment", "Datasets", "ARI") ari.m$Datasets <- factor(ari.m$Datasets, levels = c("3","6", "3+6"), ordered = TRUE) ggplot(data = ari.m, aes(x=Datasets, y=ARI)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/ari-c-6clusters-new-kmeans.jpg", device = "jpeg", width = 7, height = 8, units = "cm") ### Weights rownames(all_weights_6clusters) <- c("6", "3") colnames(all_weights_6clusters) <- 1:n_experiments weights_6clusters.m <- melt(t(all_weights_6clusters)) head(weights_6clusters.m) colnames(weights_6clusters.m) <- c("Experiments", "Dataset", "Weight") weights_6clusters.m$Dataset <- factor(weights_6clusters.m$Dataset, levels = c("3","6"), ordered = TRUE) ggplot(data = weights_6clusters.m, aes(x=Dataset, y=Weight)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/weights-6clusters-new-kmeans.jpg", device = "jpeg", width = 3.5, height = 8, units = "cm") ### Cophenetic correlation coefficient rownames(all_cophene_6clusters) <- c("6", "3") colnames(all_cophene_6clusters) <- 1:n_experiments cophene_6clusters.m <- melt(t(all_cophene_6clusters)) head(cophene_6clusters.m) colnames(cophene_6clusters.m) <- c("Experiments", "Dataset", "Correlation") cophene_6clusters.m$Dataset <- factor(cophene_6clusters.m$Dataset, levels = c("3","6"), ordered = TRUE) ggplot(data = cophene_6clusters.m, aes(x=Dataset, y=Correlation)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/cophene-6clusters-new-kmeans.jpg", device = "jpeg", width = 3.5, height = 8, units = "cm")
/code/synthetic-data-c-plot-new-kmeans.R
no_license
acabassi/klic-code
R
false
false
5,369
r
################################# ####### Synthetic examples ###### ######## Nested clusters ######## ################################# rm(list=ls()) library(MASS) library(ggplot2) library(reshape) library(reshape2) ### Define ggplot2 theme ### my_theme <- theme( panel.background = element_rect(fill = NA), panel.grid.major = element_line(colour = "grey50"), panel.grid.major.x = element_blank() , panel.grid.major.y = element_line(size=.1, color="black"), axis.ticks.y = element_blank(), axis.ticks.x = element_blank() ) ### Load results ### n_experiments <- 100 all_weights_6clusters <- all_weights_3clusters <- all_ari_one_6clusters <- all_ari_one_3clusters <- all_cophene_6clusters <- all_cophene_3clusters <- array(NA, c(2, n_experiments)) all_ari_all_6clusters <- all_ari_all_3clusters <- rep(NA, n_experiments) for(j in 1:n_experiments){ load(paste0("../results/ari-c-", j,"-new-kmeans.RData")) # K = 6 all_weights_6clusters[,j] <- weights_6clusters all_ari_all_6clusters[j] <- ari_all_6clusters all_ari_one_6clusters[,j] <- ari_one_6clusters all_cophene_6clusters[,j] <- cophenetic_6clusters # K = 3 all_weights_3clusters[,j] <- weights_3clusters all_ari_all_3clusters[j] <- ari_all_3clusters all_ari_one_3clusters[,j] <- ari_one_3clusters all_cophene_3clusters[,j] <- cophenetic_3clusters } ##################################### K = 3 #################################### ### Adjusted Rand index ari <- rbind(all_ari_one_3clusters, all_ari_all_3clusters) rownames(ari) <- c("6", "3", "3+6") ari<-t(ari) ari.m <- melt(ari) head(ari.m) # pasting some rows of the melted data.frame colnames(ari.m) <- c("Experiment", "Datasets", "ARI") ari.m$Datasets <- factor(ari.m$Datasets, levels = c("3","6", "3+6"), ordered = TRUE) ggplot(data = ari.m, aes(x=Datasets, y=ARI)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/ari-c-new-3clusters-kmeans.jpg", device = "jpeg", width = 7, height = 8, units = "cm") ### Weights rownames(all_weights_3clusters) <- c("6", "3") colnames(all_weights_3clusters) <- 1:n_experiments weights_3clusters.m <- melt(t(all_weights_3clusters)) head(weights_3clusters.m) colnames(weights_3clusters.m) <- c("Experiments", "Dataset", "Weight") weights_3clusters.m$Dataset <- factor(weights_3clusters.m$Dataset, levels = c("3","6"), ordered = TRUE) ggplot(data = weights_3clusters.m, aes(x=Dataset, y=Weight)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/weights-3clusters-new-kmeans.jpg", device = "jpeg", width = 3.5, height = 8, units = "cm") ### Cophenetic correlation coefficient rownames(all_cophene_3clusters) <- c("6", "3") colnames(all_cophene_3clusters) <- 1:n_experiments cophene_3clusters.m <- melt(t(all_cophene_3clusters)) head(cophene_3clusters.m) colnames(cophene_3clusters.m) <- c("Experiments", "Dataset", "Correlation") cophene_3clusters.m$Dataset <- factor(cophene_3clusters.m$Dataset, levels = c("3","6"), ordered = TRUE) ggplot(data = cophene_3clusters.m, aes(x=Dataset, y=Correlation)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/cophene-3clusters-new-kmeans.jpg", device = "jpeg", width = 3.5, height = 8, units = "cm") ##################################### K = 6 #################################### ### Adjusted Rand index ari <- rbind(all_ari_one_6clusters, all_ari_all_6clusters) rownames(ari) <- c("6", "3", "3+6") ari<-t(ari) ari.m <- melt(ari) head(ari.m) # pasting some rows of the melted data.frame colnames(ari.m) <- c("Experiment", "Datasets", "ARI") ari.m$Datasets <- factor(ari.m$Datasets, levels = c("3","6", "3+6"), ordered = TRUE) ggplot(data = ari.m, aes(x=Datasets, y=ARI)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/ari-c-6clusters-new-kmeans.jpg", device = "jpeg", width = 7, height = 8, units = "cm") ### Weights rownames(all_weights_6clusters) <- c("6", "3") colnames(all_weights_6clusters) <- 1:n_experiments weights_6clusters.m <- melt(t(all_weights_6clusters)) head(weights_6clusters.m) colnames(weights_6clusters.m) <- c("Experiments", "Dataset", "Weight") weights_6clusters.m$Dataset <- factor(weights_6clusters.m$Dataset, levels = c("3","6"), ordered = TRUE) ggplot(data = weights_6clusters.m, aes(x=Dataset, y=Weight)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/weights-6clusters-new-kmeans.jpg", device = "jpeg", width = 3.5, height = 8, units = "cm") ### Cophenetic correlation coefficient rownames(all_cophene_6clusters) <- c("6", "3") colnames(all_cophene_6clusters) <- 1:n_experiments cophene_6clusters.m <- melt(t(all_cophene_6clusters)) head(cophene_6clusters.m) colnames(cophene_6clusters.m) <- c("Experiments", "Dataset", "Correlation") cophene_6clusters.m$Dataset <- factor(cophene_6clusters.m$Dataset, levels = c("3","6"), ordered = TRUE) ggplot(data = cophene_6clusters.m, aes(x=Dataset, y=Correlation)) + geom_boxplot(outlier.size = 0.3) + ylim(0,1) + my_theme ggsave("../figures/cophene-6clusters-new-kmeans.jpg", device = "jpeg", width = 3.5, height = 8, units = "cm")
% Generated by roxygen2 (4.0.2): do not edit by hand \name{saveWidget} \alias{saveWidget} \title{Save a widget to an HTML file} \usage{ saveWidget(widget, file, selfcontained = TRUE, libdir = NULL) } \arguments{ \item{widget}{Widget to save} \item{file}{File to save HTML into} \item{selfcontained}{Whether to save the HTML as a single self-contained file (with external resources base64 encoded) or a file with external resources placed in an adjacent directory.} \item{libdir}{Directory to copy HTML dependencies into (defaults to filename_files).} } \description{ Save a rendered widget to an HTML file (e.g. for sharing with others). }
/man/saveWidget.Rd
no_license
hafen/htmlwidgets
R
false
false
646
rd
% Generated by roxygen2 (4.0.2): do not edit by hand \name{saveWidget} \alias{saveWidget} \title{Save a widget to an HTML file} \usage{ saveWidget(widget, file, selfcontained = TRUE, libdir = NULL) } \arguments{ \item{widget}{Widget to save} \item{file}{File to save HTML into} \item{selfcontained}{Whether to save the HTML as a single self-contained file (with external resources base64 encoded) or a file with external resources placed in an adjacent directory.} \item{libdir}{Directory to copy HTML dependencies into (defaults to filename_files).} } \description{ Save a rendered widget to an HTML file (e.g. for sharing with others). }
library(powdR) ### Name: bkg ### Title: Fit a background to XRPD data ### Aliases: bkg ### ** Examples data(soils) fit_bkg <- bkg(soils$granite)
/data/genthat_extracted_code/powdR/examples/bkg.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
152
r
library(powdR) ### Name: bkg ### Title: Fit a background to XRPD data ### Aliases: bkg ### ** Examples data(soils) fit_bkg <- bkg(soils$granite)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Prediction_operators.R \name{getPredictionProbabilities} \alias{getPredictionProbabilities} \title{Get probabilities for some classes.} \usage{ getPredictionProbabilities(pred, cl) } \arguments{ \item{pred}{[\code{\link{Prediction}}]\cr Prediction object.} \item{cl}{[\code{character}]\cr Names of classes. Default is either all classes for multi-class / multilabel problems or the positive class for binary classification.} } \value{ [\code{data.frame}] with numerical columns or a numerical vector if length of \code{cl} is 1. Order of columns is defined by \code{cl}. } \description{ Get probabilities for some classes. } \examples{ task = makeClassifTask(data = iris, target = "Species") lrn = makeLearner("classif.lda", predict.type = "prob") mod = train(lrn, task) # predict probabilities pred = predict(mod, newdata = iris) # Get probabilities for all classes head(getPredictionProbabilities(pred)) # Get probabilities for a subset of classes head(getPredictionProbabilities(pred, c("setosa", "virginica"))) } \seealso{ Other predict: \code{\link{asROCRPrediction}}, \code{\link{generateROCRCurvesData}}, \code{\link{getPredictionResponse}}, \code{\link{plotROCRCurvesGGVIS}}, \code{\link{plotROCRCurves}}, \code{\link{plotViperCharts}}, \code{\link{predict.WrappedModel}}, \code{\link{setPredictThreshold}}, \code{\link{setPredictType}} }
/man/getPredictionProbabilities.Rd
no_license
abhik1368/mlr
R
false
true
1,447
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Prediction_operators.R \name{getPredictionProbabilities} \alias{getPredictionProbabilities} \title{Get probabilities for some classes.} \usage{ getPredictionProbabilities(pred, cl) } \arguments{ \item{pred}{[\code{\link{Prediction}}]\cr Prediction object.} \item{cl}{[\code{character}]\cr Names of classes. Default is either all classes for multi-class / multilabel problems or the positive class for binary classification.} } \value{ [\code{data.frame}] with numerical columns or a numerical vector if length of \code{cl} is 1. Order of columns is defined by \code{cl}. } \description{ Get probabilities for some classes. } \examples{ task = makeClassifTask(data = iris, target = "Species") lrn = makeLearner("classif.lda", predict.type = "prob") mod = train(lrn, task) # predict probabilities pred = predict(mod, newdata = iris) # Get probabilities for all classes head(getPredictionProbabilities(pred)) # Get probabilities for a subset of classes head(getPredictionProbabilities(pred, c("setosa", "virginica"))) } \seealso{ Other predict: \code{\link{asROCRPrediction}}, \code{\link{generateROCRCurvesData}}, \code{\link{getPredictionResponse}}, \code{\link{plotROCRCurvesGGVIS}}, \code{\link{plotROCRCurves}}, \code{\link{plotViperCharts}}, \code{\link{predict.WrappedModel}}, \code{\link{setPredictThreshold}}, \code{\link{setPredictType}} }
spacetime.db = function( DS, p, B=NULL, grp=NULL ) { #// usage: low level function to convert data into bigmemory obects to permit parallel #// data access and maipulation #// B is the xyz data to work upon #/+ if (DS %in% "bigmemory.inla.filenames" ) { # create file backed bigmemory objects p$tmp.datadir = file.path( p$project.root, "tmp" ) if( !file.exists(p$tmp.datadir)) dir.create( p$tmp.datadir, recursive=TRUE, showWarnings=FALSE ) # input data stored as a bigmatrix to permit operations with min memory usage # split into separate components to minimize filelocking conflicts p$backingfile.Y = "input.Y.bigmatrix.tmp" p$descriptorfile.Y = "input.Y.bigmatrix.desc" p$backingfile.X = "input.X.bigmatrix.tmp" p$descriptorfile.X = "input.X.bigmatrix.desc" p$backingfile.LOCS = "input.LOCS.bigmatrix.tmp" p$descriptorfile.LOCS = "input.LOCS.bigmatrix.desc" p$backingfile.P = "predictions.bigmatrix.tmp" p$descriptorfile.P = "predictions.bigmatrix.desc" p$backingfile.S = "statistics.bigmatrix.tmp" p$descriptorfile.S = "statistics.bigmatrix.desc" p$backingfile.Pcov = "predictions_cov.bigmatrix.tmp" p$descriptorfile.Pcov = "predictions_cov.bigmatrix.desc" p$backingfile.Ploc = "predictions_loc.bigmatrix.tmp" p$descriptorfile.Ploc = "predictions_loc.bigmatrix.desc" p$backingfile.Sloc = "statistics_loc.bigmatrix.tmp" p$descriptorfile.Sloc = "statistics_loc.bigmatrix.desc" return(p) } # -------------------------- if (DS %in% "bigmemory.inla.cleanup" ) { # load bigmemory data objects pointers p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) todelete = file.path( p$tmp.datadir, c( p$backingfile.P, p$descriptorfile.P, p$backingfile.S, p$descriptorfile.S, p$backingfile.Sloc, p$descriptorfile.Sloc, p$backingfile.Ploc, p$descriptorfile.Ploc, p$backingfile.Pcov, p$descriptorfile.Pcov, p$backingfile.Y, p$descriptorfile.Y, p$backingfile.X, p$descriptorfile.X, p$backingfile.LOCS, p$descriptorfile.LOCS )) for (fn in todelete ) if (file.exists(fn)) file.remove(fn) return( todelete ) } # ------------------ if (DS == "bigmemory.inla.inputs.data" ) { spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="dependent" ) spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="coordinates" ) spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="covariates" ) } # ------------------ if (DS == "bigmemory.inla.inputs.prediction" ) { spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="prediction.coordinates" ) spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="prediction.covariates" ) } # ------------------ if (DS == "bigmemory.inla" ) { # create file backed bigmemory objects p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) # load bigmemory data objects pointers if (grp=="dependent") { # dependent variable fn.Y = file.path(p$tmp.datadir, p$backingfile.Y ) if ( file.exists( fn.Y) ) file.remove( fn.Y) Y = filebacked.big.matrix( nrow= nrow(B), ncol=1, type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.Y, descriptorfile=p$descriptorfile.Y ) if ( "data.frame" %in% class(B) ) { Y[] = as.matrix( B[ , p$variables$Y ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { Y[] = as.matrix( slot(B, "data")[, p$variables$Y ] ) } } if (grp=="covariates") { # independent variables/ covariates if ( exists( "X", p$variables) ) { fn.X = file.path(p$tmp.datadir, p$backingfile.X ) if ( file.exists( fn.X) ) file.remove( fn.X) X = filebacked.big.matrix( nrow=nrow(B), ncol=length( p$variables$X ), type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.X, descriptorfile=p$descriptorfile.X ) if ( "data.frame" %in% class(B) ) { X[] = as.matrix( B[ , p$variables$X ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { X[] = as.matrix( slot(B, "data")[, p$variables$X ] ) } } } if (grp=="coordinates") { # coordinates fn.LOC = file.path(p$tmp.datadir, p$backingfile.LOC ) if ( file.exists( fn.LOC) ) file.remove( fn.LOC) LOCS = filebacked.big.matrix( nrow=nrow(B), ncol=2, type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.LOCS, descriptorfile=p$descriptorfile.LOCS ) if ( "data.frame" %in% class(B) ) { LOCS[] = as.matrix( B[ , p$variables$LOCS ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { LOCS[] = as.matrix( coordinates(B) ) } } if (grp=="prediction.coordinates") { # prediction coordinates fn.Ploc = file.path(p$tmp.datadir, p$backingfile.Ploc ) if ( file.exists( fn.Ploc) ) file.remove( fn.Ploc ) Ploc = filebacked.big.matrix( nrow=nrow(B), ncol=2, type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.Ploc, descriptorfile=p$descriptorfile.Ploc ) if ( "data.frame" %in% class(B) ) { Ploc[] = as.matrix( B[ , p$variables$LOCS ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { Ploc[] = as.matrix( coordinates(B) ) } } if (grp=="prediction.covariates") { # prediction covariates i.e., independent variables/ covariates if ( exists( "X", p$variables) ) { fn.Pcov = file.path(p$tmp.datadir, p$backingfile.Pcov ) if ( file.exists( fn.Pcov) ) file.remove( fn.Pcov) Pcov = filebacked.big.matrix( nrow=nrow(B), ncol=length( p$variables$X ), type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.Pcov, descriptorfile=p$descriptorfile.Pcov ) if ( "data.frame" %in% class(B) ) { Pcov[] = as.matrix( B[ , p$variables$X ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { Pcov[] = as.matrix( slot(B, "data")[, p$variables$X ] ) } } } if (grp=="statistics.coordinates") { # statistics coordinates fn.Sloc = file.path(p$tmp.datadir, p$backingfile.Sloc ) if ( file.exists( fn.Sloc) ) file.remove( fn.Sloc ) coords = expand.grid( p$sbbox$plons, p$sbbox$plats ) Sloc = filebacked.big.matrix( nrow=nrow(coords), ncol=2, type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.Sloc, descriptorfile=p$descriptorfile.Sloc ) Sloc[] = as.matrix( coords ) } if (grp=="statistics.results") { # statistics results output file .. initialize coords = expand.grid( p$sbbox$plons, p$sbbox$plats ) statsvars = c("range", "range.sd", "spatial.error", "observation.error") fn.S = file.path(p$tmp.datadir, p$backingfile.S ) if ( file.exists( fn.S) ) file.remove( fn.S) S = filebacked.big.matrix( nrow=nrow(coords), ncol= length( statsvars ), type="double", init=NA, dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.S, descriptorfile=p$descriptorfile.S ) } return( "complete" ) } # ---------------- if (DS %in% c( "predictions", "predictions.redo", "predictions.bigmemory.initialize" ) ) { # load bigmemory data objects pointers for predictions p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) rootdir = file.path( p$project.root, "interpolated" ) dir.create( rootdir, showWarnings=FALSE, recursive =TRUE) fn.P = file.path( rootdir, paste( "spacetime", "predictions", p$spatial.domain, "rdata", sep=".") ) if ( DS=="predictions" ) { preds = NULL if (file.exists( fn.P ) ) load( fn.P ) return( preds ) } if ( DS=="predictions.bigmemory.initialize" ) { # predictions storage matrix (discretized) fn.P = file.path(p$tmp.datadir, p$backingfile.P ) if ( file.exists( fn.P) ) file.remove( fn.P) # contains c(count, pred.mean, pred.sd) P = filebacked.big.matrix( nrow=p$nplon * p$nplat, ncol=3, type="double", init=NA, dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.P, descriptorfile=p$descriptorfile.P ) return( fn.P ) } if ( DS =="predictions.redo" ) { pp = bigmemory::attach.big.matrix(p$descriptorfile.P, path=p$tmp.datadir) # predictions preds = pp[] ppl = bigmemory::attach.big.matrix(p$descriptorfile.Ploc, path=p$tmp.datadir) predloc = ppl[] preds = as.data.frame( cbind ( predloc, preds ) ) names(preds) = c( "plon", "plat", "ndata", "mean", "sdev" ) save( preds, file=fn.P, compress=TRUE ) return(fn.P) } } # ----------------- if (DS == "statistics.box") { sbbox = list( plats = seq( p$corners$plat[1], p$corners$plat[2], by=p$dist.mwin ), plons = seq( p$corners$plon[1], p$corners$plon[2], by=p$dist.mwin ) ) return(sbbox) } # ----------------- if (DS %in% c( "boundary.redo", "boundary" ) ) { p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) fn = file.path(p$tmp.datadir, "boundary.rdata" ) if (DS=="boundary") { if( file.exists(fn)) load( fn) return( boundary ) } # load bigmemory data objects pointers # data: Y = bigmemory::attach.big.matrix(p$descriptorfile.Y, path=p$tmp.datadir ) LOCS = bigmemory::attach.big.matrix(p$descriptorfile.LOCS, path=p$tmp.datadir ) hasdata = 1:length(Y) bad = which( !is.finite( Y[])) if (length(bad)> 0 ) hasdata[bad] = NA # covariates (independent vars) if ( exists( "X", p$variables) ) { X = bigmemory::attach.big.matrix(p$descriptorfile.X, path=p$tmp.datadir ) if ( length( p$variables$X ) == 1 ) { bad = which( !is.finite( X[]) ) } else { bad = which( !is.finite( rowSums(X[])) ) } if (length(bad)> 0 ) hasdata[bad] = NA } ii = na.omit(hasdata) ndata = length(ii) locs_noise = LOCS[ii,] + runif( ndata*2, min=-p$pres*p$spacetime.noise, max=p$pres*p$spacetime.noise ) maxdist = max( diff( range( LOCS[ii,1] )), diff( range( LOCS[ii,2] )) ) convex = -0.04 if (exists( "mesh.boundary.convex", p) ) convex=p$mesh.boundary.convex resolution = 125 if (exists( "mesh.boundary.resolution", p) ) resolution=p$mesh.boundary.resolution boundary=list( polygon = inla.nonconvex.hull( LOCS[ii,], convex=convex, resolution=resolution ) ) Sloc = bigmemory::attach.big.matrix(p$descriptorfile.Sloc , path=p$tmp.datadir ) # statistical output locations boundary$inside.polygon = point.in.polygon( Sloc[,1], Sloc[,2], boundary$polygon$loc[,1], boundary$polygon$loc[,2], mode.checked=TRUE) save( boundary, file=fn, compress=TRUE ) plot( LOCS[ii,], pch="." ) # data locations lines( boundary$polygon$loc , col="green" ) return( fn ) } # ----------------- if (DS %in% c( "statistics", "statistics.redo", "statistics.bigmemory.initialize", "statistics.bigmemory.size" , "statistics.bigmemory.status" ) ) { # load bigmemory data objects pointers p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) rootdir = file.path( p$project.root, "interpolated" ) dir.create( rootdir, showWarnings=FALSE, recursive =TRUE) fn.S = file.path( rootdir, paste( "spacetime", "statistics", p$spatial.domain, "rdata", sep=".") ) if ( DS=="statistics" ) { stats = NULL if (file.exists( fn.S) ) load( fn.S ) return( stats ) } if ( DS=="statistics.bigmemory.initialize" ) { # statistics storage matrix ( aggregation window, coords ) .. no inputs required spacetime.db( p=p, DS="bigmemory.inla", grp="statistics.coordinates" ) #Sloc spacetime.db( p=p, DS="bigmemory.inla", grp="statistics.results" ) # S return( "complete" ) } if ( DS=="statistics.bigmemory.size" ) { # load bigmemory data objects pointers p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) S = bigmemory::attach.big.matrix(p$descriptorfile.S , path=p$tmp.datadir ) return( nrow(S) ) } if ( DS=="statistics.bigmemory.status" ) { # find locations for statistic computation and trim area based on availability of data # stats: p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) S = bigmemory::attach.big.matrix(p$descriptorfile.S , path=p$tmp.datadir ) bnds = spacetime.db( p, DS="boundary" ) # problematic and/or no data (e.g., land, etc.) and skipped to.ignore = which( bnds$inside.polygon == 0 ) # outside boundary i = which( is.nan( S[,1] ) & bnds$inside.polygon != 0 ) # not yet completed j = which( is.na( S[,1] ) & bnds$inside.polygon != 0 ) # completed k = which( is.finite (S[,1]) & bnds$inside.polygon != 0 ) # not yet done return( list(problematic=i, incomplete=j, completed=k, n.total=nrow(S[]), n.incomplete=length(j), n.problematic=length(i), n.complete=length(k), to.ignore=to.ignore ) ) } if ( DS =="statistics.redo" ) { #\\ spacetime.db( "statsitics.redo") .. statistics are stored at a different resolution than the final grid #\\ this fast interpolates the solutions to the final grid p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) S = bigmemory::attach.big.matrix(p$descriptorfile.S, path=p$tmp.datadir) # statistical outputs ss = as.data.frame( S[] ) statnames0 = c( "range", "range.sd", "spatial.var", "observation.var" ) statnames = c( "range", "range.sd", "spatial.sd", "observation.sd" ) datalink = c( "log", "log", "log", "log" ) # a log-link seems appropriate for these data names(ss) = statnames0 ssl = bigmemory::attach.big.matrix(p$descriptorfile.Sloc, path=p$tmp.datadir) # statistical output locations sslocs = as.data.frame(ssl[]) # copy names(sslocs) = p$variables$LOCS ss = cbind( sslocs, ss ) rm (S) ss$spatial.sd = sqrt( ss$spatial.var ) ss$observation.sd = sqrt( ss$observation.var ) ss$spatial.var = NULL ss$observation.var = NULL # trim quaniles in case of extreme values for ( v in statnames ) { vq = quantile( ss[,v], probs= c(0.025, 0.975), na.rm=TRUE ) ii = which( ss[,v] < vq[1] ) if ( length(ii)>0) ss[ii,v] = vq[1] jj = which( ss[,v] > vq[2] ) if ( length(jj)>0) ss[jj,v] = vq[2] } locsout = expand.grid( p$plons, p$plats ) # final output grid attr( locsout , "out.attrs") = NULL names( locsout ) = p$variables$LOCS stats = matrix( NA, ncol=length(statnames), nrow=nrow( locsout) ) # output data colnames(stats)=statnames for ( iv in 1:length(statnames) ) { vn = statnames[iv] # create a "surface" and interpolate to larger grid using # (gaussian) kernel-based smooth on the log-scale z = log( matrix( ss[,vn], nrow=length(p$sbbox$plons), ncol=length( p$sbbox$plats) ) ) RES = NULL RES = spacetime.interpolate.kernel.density( x=p$sbbox$plons, y=p$sbbox$plats, z=z, locsout=locsout, nxout=length(p$plons), nyout=length( p$plats), theta=p$dist.mwin, xwidth=p$dist.mwin*10, ywidth=p$dist.mwin*10 ) # 10 SD of the normal kernel # 10 SD of the normal kernel if ( !is.null( RES )) stats[,iv] = exp( RES$z ) # return to correct scale method = FALSE if (method=="inla.fast") { # fast, but not fast enough for prime time yet # interpolation using inla is also an option # but will require a little more tweaking as it was a bit slow range0 = median( ss$range, na.rm=TRUE ) oo = which( is.finite( ss[,vn] ) ) if ( length(oo) < 30 ) next() RES = spacetime.interpolate.inla.singlepass ( ss[oo,vn], ss[oo, p$variables$LOCS], locsout, lengthscale=range0, method="fast", link=datalink[iv] ) if ( !is.null( RES )) stats[,iv] = RES$xmean rm (RES); gc() } } save( stats, file=fn.S, compress=TRUE ) return( fn.S) plotdata=FALSE ## to debug if (plotdata) { p$spatial.domain="canada.east" # force isobaths to work in levelplot datarange = log( c( 5, 1200 )) dr = seq( datarange[1], datarange[2], length.out=150) oc = landmask( db="worldHires", regions=c("Canada", "US"), return.value="not.land", tag="predictions" ) ## resolution of "predictions" which is the final grid size toplot = cbind( locsout, z=(stats[,"range"]) )[oc,] resol = c(p$dist.mwin,p$dist.mwin) levelplot( log(z) ~ plon + plat, toplot, aspect="iso", at=dr, col.regions=color.code( "seis", dr) , contour=FALSE, labels=FALSE, pretty=TRUE, xlab=NULL,ylab=NULL,scales=list(draw=FALSE), cex=2, resol=resol, panel = function(x, y, subscripts, ...) { panel.levelplot (x, y, subscripts, aspect="iso", rez=resol, ...) cl = landmask( return.value="coast.lonlat", ylim=c(36,53), xlim=c(-72,-45) ) cl = lonlat2planar( data.frame( cbind(lon=cl$x, lat=cl$y)), proj.type=p$internal.crs ) panel.xyplot( cl$plon, cl$plat, col = "black", type="l", lwd=0.8 ) } ) p$spatial.domain="canada.east.highres" } } } }
/spacetime/src/_Rfunctions/spacetime.methods/spacetime.db.r
no_license
fernandomayer/ecomod
R
false
false
18,463
r
spacetime.db = function( DS, p, B=NULL, grp=NULL ) { #// usage: low level function to convert data into bigmemory obects to permit parallel #// data access and maipulation #// B is the xyz data to work upon #/+ if (DS %in% "bigmemory.inla.filenames" ) { # create file backed bigmemory objects p$tmp.datadir = file.path( p$project.root, "tmp" ) if( !file.exists(p$tmp.datadir)) dir.create( p$tmp.datadir, recursive=TRUE, showWarnings=FALSE ) # input data stored as a bigmatrix to permit operations with min memory usage # split into separate components to minimize filelocking conflicts p$backingfile.Y = "input.Y.bigmatrix.tmp" p$descriptorfile.Y = "input.Y.bigmatrix.desc" p$backingfile.X = "input.X.bigmatrix.tmp" p$descriptorfile.X = "input.X.bigmatrix.desc" p$backingfile.LOCS = "input.LOCS.bigmatrix.tmp" p$descriptorfile.LOCS = "input.LOCS.bigmatrix.desc" p$backingfile.P = "predictions.bigmatrix.tmp" p$descriptorfile.P = "predictions.bigmatrix.desc" p$backingfile.S = "statistics.bigmatrix.tmp" p$descriptorfile.S = "statistics.bigmatrix.desc" p$backingfile.Pcov = "predictions_cov.bigmatrix.tmp" p$descriptorfile.Pcov = "predictions_cov.bigmatrix.desc" p$backingfile.Ploc = "predictions_loc.bigmatrix.tmp" p$descriptorfile.Ploc = "predictions_loc.bigmatrix.desc" p$backingfile.Sloc = "statistics_loc.bigmatrix.tmp" p$descriptorfile.Sloc = "statistics_loc.bigmatrix.desc" return(p) } # -------------------------- if (DS %in% "bigmemory.inla.cleanup" ) { # load bigmemory data objects pointers p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) todelete = file.path( p$tmp.datadir, c( p$backingfile.P, p$descriptorfile.P, p$backingfile.S, p$descriptorfile.S, p$backingfile.Sloc, p$descriptorfile.Sloc, p$backingfile.Ploc, p$descriptorfile.Ploc, p$backingfile.Pcov, p$descriptorfile.Pcov, p$backingfile.Y, p$descriptorfile.Y, p$backingfile.X, p$descriptorfile.X, p$backingfile.LOCS, p$descriptorfile.LOCS )) for (fn in todelete ) if (file.exists(fn)) file.remove(fn) return( todelete ) } # ------------------ if (DS == "bigmemory.inla.inputs.data" ) { spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="dependent" ) spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="coordinates" ) spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="covariates" ) } # ------------------ if (DS == "bigmemory.inla.inputs.prediction" ) { spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="prediction.coordinates" ) spacetime.db( p=p, DS="bigmemory.inla", B=B, grp="prediction.covariates" ) } # ------------------ if (DS == "bigmemory.inla" ) { # create file backed bigmemory objects p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) # load bigmemory data objects pointers if (grp=="dependent") { # dependent variable fn.Y = file.path(p$tmp.datadir, p$backingfile.Y ) if ( file.exists( fn.Y) ) file.remove( fn.Y) Y = filebacked.big.matrix( nrow= nrow(B), ncol=1, type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.Y, descriptorfile=p$descriptorfile.Y ) if ( "data.frame" %in% class(B) ) { Y[] = as.matrix( B[ , p$variables$Y ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { Y[] = as.matrix( slot(B, "data")[, p$variables$Y ] ) } } if (grp=="covariates") { # independent variables/ covariates if ( exists( "X", p$variables) ) { fn.X = file.path(p$tmp.datadir, p$backingfile.X ) if ( file.exists( fn.X) ) file.remove( fn.X) X = filebacked.big.matrix( nrow=nrow(B), ncol=length( p$variables$X ), type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.X, descriptorfile=p$descriptorfile.X ) if ( "data.frame" %in% class(B) ) { X[] = as.matrix( B[ , p$variables$X ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { X[] = as.matrix( slot(B, "data")[, p$variables$X ] ) } } } if (grp=="coordinates") { # coordinates fn.LOC = file.path(p$tmp.datadir, p$backingfile.LOC ) if ( file.exists( fn.LOC) ) file.remove( fn.LOC) LOCS = filebacked.big.matrix( nrow=nrow(B), ncol=2, type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.LOCS, descriptorfile=p$descriptorfile.LOCS ) if ( "data.frame" %in% class(B) ) { LOCS[] = as.matrix( B[ , p$variables$LOCS ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { LOCS[] = as.matrix( coordinates(B) ) } } if (grp=="prediction.coordinates") { # prediction coordinates fn.Ploc = file.path(p$tmp.datadir, p$backingfile.Ploc ) if ( file.exists( fn.Ploc) ) file.remove( fn.Ploc ) Ploc = filebacked.big.matrix( nrow=nrow(B), ncol=2, type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.Ploc, descriptorfile=p$descriptorfile.Ploc ) if ( "data.frame" %in% class(B) ) { Ploc[] = as.matrix( B[ , p$variables$LOCS ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { Ploc[] = as.matrix( coordinates(B) ) } } if (grp=="prediction.covariates") { # prediction covariates i.e., independent variables/ covariates if ( exists( "X", p$variables) ) { fn.Pcov = file.path(p$tmp.datadir, p$backingfile.Pcov ) if ( file.exists( fn.Pcov) ) file.remove( fn.Pcov) Pcov = filebacked.big.matrix( nrow=nrow(B), ncol=length( p$variables$X ), type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.Pcov, descriptorfile=p$descriptorfile.Pcov ) if ( "data.frame" %in% class(B) ) { Pcov[] = as.matrix( B[ , p$variables$X ] ) } else if ( "SpatialGridDataFrame" %in% class(B) ) { Pcov[] = as.matrix( slot(B, "data")[, p$variables$X ] ) } } } if (grp=="statistics.coordinates") { # statistics coordinates fn.Sloc = file.path(p$tmp.datadir, p$backingfile.Sloc ) if ( file.exists( fn.Sloc) ) file.remove( fn.Sloc ) coords = expand.grid( p$sbbox$plons, p$sbbox$plats ) Sloc = filebacked.big.matrix( nrow=nrow(coords), ncol=2, type="double", dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.Sloc, descriptorfile=p$descriptorfile.Sloc ) Sloc[] = as.matrix( coords ) } if (grp=="statistics.results") { # statistics results output file .. initialize coords = expand.grid( p$sbbox$plons, p$sbbox$plats ) statsvars = c("range", "range.sd", "spatial.error", "observation.error") fn.S = file.path(p$tmp.datadir, p$backingfile.S ) if ( file.exists( fn.S) ) file.remove( fn.S) S = filebacked.big.matrix( nrow=nrow(coords), ncol= length( statsvars ), type="double", init=NA, dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.S, descriptorfile=p$descriptorfile.S ) } return( "complete" ) } # ---------------- if (DS %in% c( "predictions", "predictions.redo", "predictions.bigmemory.initialize" ) ) { # load bigmemory data objects pointers for predictions p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) rootdir = file.path( p$project.root, "interpolated" ) dir.create( rootdir, showWarnings=FALSE, recursive =TRUE) fn.P = file.path( rootdir, paste( "spacetime", "predictions", p$spatial.domain, "rdata", sep=".") ) if ( DS=="predictions" ) { preds = NULL if (file.exists( fn.P ) ) load( fn.P ) return( preds ) } if ( DS=="predictions.bigmemory.initialize" ) { # predictions storage matrix (discretized) fn.P = file.path(p$tmp.datadir, p$backingfile.P ) if ( file.exists( fn.P) ) file.remove( fn.P) # contains c(count, pred.mean, pred.sd) P = filebacked.big.matrix( nrow=p$nplon * p$nplat, ncol=3, type="double", init=NA, dimnames=NULL, separated=FALSE, backingpath=p$tmp.datadir, backingfile=p$backingfile.P, descriptorfile=p$descriptorfile.P ) return( fn.P ) } if ( DS =="predictions.redo" ) { pp = bigmemory::attach.big.matrix(p$descriptorfile.P, path=p$tmp.datadir) # predictions preds = pp[] ppl = bigmemory::attach.big.matrix(p$descriptorfile.Ploc, path=p$tmp.datadir) predloc = ppl[] preds = as.data.frame( cbind ( predloc, preds ) ) names(preds) = c( "plon", "plat", "ndata", "mean", "sdev" ) save( preds, file=fn.P, compress=TRUE ) return(fn.P) } } # ----------------- if (DS == "statistics.box") { sbbox = list( plats = seq( p$corners$plat[1], p$corners$plat[2], by=p$dist.mwin ), plons = seq( p$corners$plon[1], p$corners$plon[2], by=p$dist.mwin ) ) return(sbbox) } # ----------------- if (DS %in% c( "boundary.redo", "boundary" ) ) { p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) fn = file.path(p$tmp.datadir, "boundary.rdata" ) if (DS=="boundary") { if( file.exists(fn)) load( fn) return( boundary ) } # load bigmemory data objects pointers # data: Y = bigmemory::attach.big.matrix(p$descriptorfile.Y, path=p$tmp.datadir ) LOCS = bigmemory::attach.big.matrix(p$descriptorfile.LOCS, path=p$tmp.datadir ) hasdata = 1:length(Y) bad = which( !is.finite( Y[])) if (length(bad)> 0 ) hasdata[bad] = NA # covariates (independent vars) if ( exists( "X", p$variables) ) { X = bigmemory::attach.big.matrix(p$descriptorfile.X, path=p$tmp.datadir ) if ( length( p$variables$X ) == 1 ) { bad = which( !is.finite( X[]) ) } else { bad = which( !is.finite( rowSums(X[])) ) } if (length(bad)> 0 ) hasdata[bad] = NA } ii = na.omit(hasdata) ndata = length(ii) locs_noise = LOCS[ii,] + runif( ndata*2, min=-p$pres*p$spacetime.noise, max=p$pres*p$spacetime.noise ) maxdist = max( diff( range( LOCS[ii,1] )), diff( range( LOCS[ii,2] )) ) convex = -0.04 if (exists( "mesh.boundary.convex", p) ) convex=p$mesh.boundary.convex resolution = 125 if (exists( "mesh.boundary.resolution", p) ) resolution=p$mesh.boundary.resolution boundary=list( polygon = inla.nonconvex.hull( LOCS[ii,], convex=convex, resolution=resolution ) ) Sloc = bigmemory::attach.big.matrix(p$descriptorfile.Sloc , path=p$tmp.datadir ) # statistical output locations boundary$inside.polygon = point.in.polygon( Sloc[,1], Sloc[,2], boundary$polygon$loc[,1], boundary$polygon$loc[,2], mode.checked=TRUE) save( boundary, file=fn, compress=TRUE ) plot( LOCS[ii,], pch="." ) # data locations lines( boundary$polygon$loc , col="green" ) return( fn ) } # ----------------- if (DS %in% c( "statistics", "statistics.redo", "statistics.bigmemory.initialize", "statistics.bigmemory.size" , "statistics.bigmemory.status" ) ) { # load bigmemory data objects pointers p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) rootdir = file.path( p$project.root, "interpolated" ) dir.create( rootdir, showWarnings=FALSE, recursive =TRUE) fn.S = file.path( rootdir, paste( "spacetime", "statistics", p$spatial.domain, "rdata", sep=".") ) if ( DS=="statistics" ) { stats = NULL if (file.exists( fn.S) ) load( fn.S ) return( stats ) } if ( DS=="statistics.bigmemory.initialize" ) { # statistics storage matrix ( aggregation window, coords ) .. no inputs required spacetime.db( p=p, DS="bigmemory.inla", grp="statistics.coordinates" ) #Sloc spacetime.db( p=p, DS="bigmemory.inla", grp="statistics.results" ) # S return( "complete" ) } if ( DS=="statistics.bigmemory.size" ) { # load bigmemory data objects pointers p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) S = bigmemory::attach.big.matrix(p$descriptorfile.S , path=p$tmp.datadir ) return( nrow(S) ) } if ( DS=="statistics.bigmemory.status" ) { # find locations for statistic computation and trim area based on availability of data # stats: p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) S = bigmemory::attach.big.matrix(p$descriptorfile.S , path=p$tmp.datadir ) bnds = spacetime.db( p, DS="boundary" ) # problematic and/or no data (e.g., land, etc.) and skipped to.ignore = which( bnds$inside.polygon == 0 ) # outside boundary i = which( is.nan( S[,1] ) & bnds$inside.polygon != 0 ) # not yet completed j = which( is.na( S[,1] ) & bnds$inside.polygon != 0 ) # completed k = which( is.finite (S[,1]) & bnds$inside.polygon != 0 ) # not yet done return( list(problematic=i, incomplete=j, completed=k, n.total=nrow(S[]), n.incomplete=length(j), n.problematic=length(i), n.complete=length(k), to.ignore=to.ignore ) ) } if ( DS =="statistics.redo" ) { #\\ spacetime.db( "statsitics.redo") .. statistics are stored at a different resolution than the final grid #\\ this fast interpolates the solutions to the final grid p = spacetime.db( p=p, DS="bigmemory.inla.filenames" ) S = bigmemory::attach.big.matrix(p$descriptorfile.S, path=p$tmp.datadir) # statistical outputs ss = as.data.frame( S[] ) statnames0 = c( "range", "range.sd", "spatial.var", "observation.var" ) statnames = c( "range", "range.sd", "spatial.sd", "observation.sd" ) datalink = c( "log", "log", "log", "log" ) # a log-link seems appropriate for these data names(ss) = statnames0 ssl = bigmemory::attach.big.matrix(p$descriptorfile.Sloc, path=p$tmp.datadir) # statistical output locations sslocs = as.data.frame(ssl[]) # copy names(sslocs) = p$variables$LOCS ss = cbind( sslocs, ss ) rm (S) ss$spatial.sd = sqrt( ss$spatial.var ) ss$observation.sd = sqrt( ss$observation.var ) ss$spatial.var = NULL ss$observation.var = NULL # trim quaniles in case of extreme values for ( v in statnames ) { vq = quantile( ss[,v], probs= c(0.025, 0.975), na.rm=TRUE ) ii = which( ss[,v] < vq[1] ) if ( length(ii)>0) ss[ii,v] = vq[1] jj = which( ss[,v] > vq[2] ) if ( length(jj)>0) ss[jj,v] = vq[2] } locsout = expand.grid( p$plons, p$plats ) # final output grid attr( locsout , "out.attrs") = NULL names( locsout ) = p$variables$LOCS stats = matrix( NA, ncol=length(statnames), nrow=nrow( locsout) ) # output data colnames(stats)=statnames for ( iv in 1:length(statnames) ) { vn = statnames[iv] # create a "surface" and interpolate to larger grid using # (gaussian) kernel-based smooth on the log-scale z = log( matrix( ss[,vn], nrow=length(p$sbbox$plons), ncol=length( p$sbbox$plats) ) ) RES = NULL RES = spacetime.interpolate.kernel.density( x=p$sbbox$plons, y=p$sbbox$plats, z=z, locsout=locsout, nxout=length(p$plons), nyout=length( p$plats), theta=p$dist.mwin, xwidth=p$dist.mwin*10, ywidth=p$dist.mwin*10 ) # 10 SD of the normal kernel # 10 SD of the normal kernel if ( !is.null( RES )) stats[,iv] = exp( RES$z ) # return to correct scale method = FALSE if (method=="inla.fast") { # fast, but not fast enough for prime time yet # interpolation using inla is also an option # but will require a little more tweaking as it was a bit slow range0 = median( ss$range, na.rm=TRUE ) oo = which( is.finite( ss[,vn] ) ) if ( length(oo) < 30 ) next() RES = spacetime.interpolate.inla.singlepass ( ss[oo,vn], ss[oo, p$variables$LOCS], locsout, lengthscale=range0, method="fast", link=datalink[iv] ) if ( !is.null( RES )) stats[,iv] = RES$xmean rm (RES); gc() } } save( stats, file=fn.S, compress=TRUE ) return( fn.S) plotdata=FALSE ## to debug if (plotdata) { p$spatial.domain="canada.east" # force isobaths to work in levelplot datarange = log( c( 5, 1200 )) dr = seq( datarange[1], datarange[2], length.out=150) oc = landmask( db="worldHires", regions=c("Canada", "US"), return.value="not.land", tag="predictions" ) ## resolution of "predictions" which is the final grid size toplot = cbind( locsout, z=(stats[,"range"]) )[oc,] resol = c(p$dist.mwin,p$dist.mwin) levelplot( log(z) ~ plon + plat, toplot, aspect="iso", at=dr, col.regions=color.code( "seis", dr) , contour=FALSE, labels=FALSE, pretty=TRUE, xlab=NULL,ylab=NULL,scales=list(draw=FALSE), cex=2, resol=resol, panel = function(x, y, subscripts, ...) { panel.levelplot (x, y, subscripts, aspect="iso", rez=resol, ...) cl = landmask( return.value="coast.lonlat", ylim=c(36,53), xlim=c(-72,-45) ) cl = lonlat2planar( data.frame( cbind(lon=cl$x, lat=cl$y)), proj.type=p$internal.crs ) panel.xyplot( cl$plon, cl$plat, col = "black", type="l", lwd=0.8 ) } ) p$spatial.domain="canada.east.highres" } } } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-coastlines.R \docType{data} \name{coastlines} \alias{coastlines} \alias{Melbourne_coastline} \alias{Sydney_coastline} \alias{Brisbane_coastline} \title{Coastline coordinates} \format{\describe{ \item{\code{long}}{Longitude of coastline.} \item{\code{lat}}{Latitude of coastline.} \item{\code{order}}{Order of polygon.} \item{\code{hole,piece,group,id,rowname,N}}{(Brisbane only) data to distinguish between mainland and island coastlines.} }} \usage{ Melbourne_coastline Sydney_coastline Brisbane_coastline } \description{ Used to annotate maps to provide the coastline around cities. } \keyword{datasets}
/man/coastlines.Rd
no_license
HughParsonage/grattanCharts
R
false
true
691
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-coastlines.R \docType{data} \name{coastlines} \alias{coastlines} \alias{Melbourne_coastline} \alias{Sydney_coastline} \alias{Brisbane_coastline} \title{Coastline coordinates} \format{\describe{ \item{\code{long}}{Longitude of coastline.} \item{\code{lat}}{Latitude of coastline.} \item{\code{order}}{Order of polygon.} \item{\code{hole,piece,group,id,rowname,N}}{(Brisbane only) data to distinguish between mainland and island coastlines.} }} \usage{ Melbourne_coastline Sydney_coastline Brisbane_coastline } \description{ Used to annotate maps to provide the coastline around cities. } \keyword{datasets}
#for splitting data library(caTools) split<-sample.split(all_data,SplitRatio=0.7) split train1<-subset(all_data,split="TRUE") test1<-subset(all_data,split="FALSE") train1 test1 model2<-lm(January ~.,data=train1) summary(model2) pred2<-predict(model2,test1) pred2 summary(model2) #for calculating errors library(MLmetrics) MAE(all_data$January,pred2) RMSE(all_data$January,pred2) R2_Score(all_data$January,pred2) #support vector regression library(e1071) model3<-svm(January ~.,data=train1,kernel="polynomial",type="eps") summary(model3) pred3<-predict(model3,test1) pred3 summary(model3) MAE(all_data$January,pred3) RMSE(all_data$January,pred3) R2_Score(all_data$January,pred3) # support vector regression library(rpart) model4<-rpart(January ~.,data=train1,method = "anova") summary(model4) pred4<-predict(model4,test1) pred4 summary(model4) MAE(all_data$January,pred4) RMSE(all_data$January,pred4) R2_Score(all_data$January,pred4)
/Regression_Analysis.R
no_license
Gunjal15/Spatio-Temporal-Analysis-Of-Agro-Climatic-Parameters-Of-India-Using-Machine-Learning-Algorithms
R
false
false
995
r
#for splitting data library(caTools) split<-sample.split(all_data,SplitRatio=0.7) split train1<-subset(all_data,split="TRUE") test1<-subset(all_data,split="FALSE") train1 test1 model2<-lm(January ~.,data=train1) summary(model2) pred2<-predict(model2,test1) pred2 summary(model2) #for calculating errors library(MLmetrics) MAE(all_data$January,pred2) RMSE(all_data$January,pred2) R2_Score(all_data$January,pred2) #support vector regression library(e1071) model3<-svm(January ~.,data=train1,kernel="polynomial",type="eps") summary(model3) pred3<-predict(model3,test1) pred3 summary(model3) MAE(all_data$January,pred3) RMSE(all_data$January,pred3) R2_Score(all_data$January,pred3) # support vector regression library(rpart) model4<-rpart(January ~.,data=train1,method = "anova") summary(model4) pred4<-predict(model4,test1) pred4 summary(model4) MAE(all_data$January,pred4) RMSE(all_data$January,pred4) R2_Score(all_data$January,pred4)
#' Dismounts veracrypt container directly from R #' #' This function dismounts the veracrypt container directly from R. Also check the sister function \code{\link{vc_mount}}. #' @param drive character: name of the drive from which to dismount the volume. Example - "Z://" #' @param vcexecutable character: name of the veracrypt executable file. If it doesn't contain an absolute path, the file name is relative to the current working directory, getwd(). Default is "C:/Program Files/VeraCrypt/VeraCrypt.exe" #' @return Running the function will dismount the veracrypt volume as specified. Returns a NULL object in R. #' @examples #' vc_dismount("k://") #' #' vc_dismount("k://", vcexecutable="C:/Program Files/Veracrypt/Veracrypt.exe") #' #' vc_dismount("k://", vcexecutable="C:/Program Files (x86)/Veracrypt/Veracrypt.exe") #' @export vc_dismount<-function(drive,vcexecutable="C:\\Program Files\\VeraCrypt\\VeraCrypt.exe"){ system(paste("\"",vcexecutable,"\"", ' /d ', drive,' /b',sep = ""),wait = F) while(dir.exists(drive)){ } }
/R/vc_dismount.R
no_license
Anoopsinghrawat/snxt
R
false
false
1,039
r
#' Dismounts veracrypt container directly from R #' #' This function dismounts the veracrypt container directly from R. Also check the sister function \code{\link{vc_mount}}. #' @param drive character: name of the drive from which to dismount the volume. Example - "Z://" #' @param vcexecutable character: name of the veracrypt executable file. If it doesn't contain an absolute path, the file name is relative to the current working directory, getwd(). Default is "C:/Program Files/VeraCrypt/VeraCrypt.exe" #' @return Running the function will dismount the veracrypt volume as specified. Returns a NULL object in R. #' @examples #' vc_dismount("k://") #' #' vc_dismount("k://", vcexecutable="C:/Program Files/Veracrypt/Veracrypt.exe") #' #' vc_dismount("k://", vcexecutable="C:/Program Files (x86)/Veracrypt/Veracrypt.exe") #' @export vc_dismount<-function(drive,vcexecutable="C:\\Program Files\\VeraCrypt\\VeraCrypt.exe"){ system(paste("\"",vcexecutable,"\"", ' /d ', drive,' /b',sep = ""),wait = F) while(dir.exists(drive)){ } }
## R programming assignment Deadline : 9-27-2015 makeCacheMatrix <- function(x = matrix()) { ## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse. m <- NULL ## m is set to null set <- function(y) { x <<- y m <<- NULL } get <- function() x setInverse <- function(inverse) m <<- inverse ## solve function solves the equation a %*% x = b for x, where b can be either a vector or a matrix. getInverse <- function() m list(set = set, ## to make a List of function. get = get, setInverse = setInverse, getInverse = getInverse) } cacheSolve <- function(x, ...) { ## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix m <- x$getInverse() if (!is.null(m)) { message("getting cached data") return(m) } mat <- x$get() m <- solve(mat, ...) x$setInverse(m) m }
/cachematrix.R
no_license
tuliphs1113/ProgrammingAssignment2
R
false
false
941
r
## R programming assignment Deadline : 9-27-2015 makeCacheMatrix <- function(x = matrix()) { ## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse. m <- NULL ## m is set to null set <- function(y) { x <<- y m <<- NULL } get <- function() x setInverse <- function(inverse) m <<- inverse ## solve function solves the equation a %*% x = b for x, where b can be either a vector or a matrix. getInverse <- function() m list(set = set, ## to make a List of function. get = get, setInverse = setInverse, getInverse = getInverse) } cacheSolve <- function(x, ...) { ## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix m <- x$getInverse() if (!is.null(m)) { message("getting cached data") return(m) } mat <- x$get() m <- solve(mat, ...) x$setInverse(m) m }
testlist <- list(genotype = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(detectRUNS:::genoConvertCpp,testlist) str(result)
/detectRUNS/inst/testfiles/genoConvertCpp/libFuzzer_genoConvertCpp/genoConvertCpp_valgrind_files/1609875257-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
203
r
testlist <- list(genotype = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(detectRUNS:::genoConvertCpp,testlist) str(result)
context("API methods") exercism_path <- "~/temp" exercism_key <- "test123" dir.create(exercism_path) Sys.setenv(EXERCISM_KEY = exercism_key) Sys.setenv(EXERCISM_PATH = path.expand(exercism_path)) on.exit({ unlink(exercism_path, recursive = TRUE, force = TRUE) Sys.unsetenv("EXERCISM_KEY") Sys.unsetenv("EXERCISM_PATH") }) test_that("check_next_problem", { expect_message(check_next_problem("python")) expect_error(check_next_problem("unknown")) }) test_that("fetch_next", { expect_error(fetch_next(track_id = "unknown", open = FALSE)) expect_message(fetch_next(track_id = "haskell", force = TRUE, open = FALSE)) expect_warning(fetch_next(track_id = "haskell", force = TRUE, open = FALSE), "Problem folder already exists") expect_warning(fetch_next(track_id = "haskell", open = FALSE), "Not submitted") }) test_that("fetch_problem", { expect_error(fetch_problem( track_id = "unknown", slug = "hello-world", open = FALSE )) expect_message(fetch_problem( track_id = "haskell", slug = "anagram", force = TRUE, open = FALSE )) expect_warning(fetch_problem( track_id = "haskell", slug = "anagram", force = TRUE, open = FALSE ), "Problem folder already exists") expect_error(fetch_problem( track_id = "haskell", slug = "hello-world", open = FALSE ), "Problem folder already exists") })
/tests/testthat/test-api.R
permissive
jonmcalder/exercism
R
false
false
1,414
r
context("API methods") exercism_path <- "~/temp" exercism_key <- "test123" dir.create(exercism_path) Sys.setenv(EXERCISM_KEY = exercism_key) Sys.setenv(EXERCISM_PATH = path.expand(exercism_path)) on.exit({ unlink(exercism_path, recursive = TRUE, force = TRUE) Sys.unsetenv("EXERCISM_KEY") Sys.unsetenv("EXERCISM_PATH") }) test_that("check_next_problem", { expect_message(check_next_problem("python")) expect_error(check_next_problem("unknown")) }) test_that("fetch_next", { expect_error(fetch_next(track_id = "unknown", open = FALSE)) expect_message(fetch_next(track_id = "haskell", force = TRUE, open = FALSE)) expect_warning(fetch_next(track_id = "haskell", force = TRUE, open = FALSE), "Problem folder already exists") expect_warning(fetch_next(track_id = "haskell", open = FALSE), "Not submitted") }) test_that("fetch_problem", { expect_error(fetch_problem( track_id = "unknown", slug = "hello-world", open = FALSE )) expect_message(fetch_problem( track_id = "haskell", slug = "anagram", force = TRUE, open = FALSE )) expect_warning(fetch_problem( track_id = "haskell", slug = "anagram", force = TRUE, open = FALSE ), "Problem folder already exists") expect_error(fetch_problem( track_id = "haskell", slug = "hello-world", open = FALSE ), "Problem folder already exists") })
############################################################################################################################ # checkModelList() # This checks user model list passed to MARSS(). # The main purpose is to make sure that MARSS.form functions will work not to make sure model is valid # Dim checks on matrices are carried out in the MARSS.form functions that translate a model list to marssMODEL object # No error checking is done on controls and inits besides checking that it is present (NULL is ok); # is.marssMLE() will error-check controls and inits ########################################################################################################################## checkModelList <- function(model, defaults, this.form.allows) { ## First deal with case where model is not passed in all if (is.null(model)) model <- defaults if (!is.list(model)) { msg <- " model must be passed in as a list.\n" cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } model.elem <- names(defaults) ### If some elements are missing from the model list use the defaults passed.in <- model.elem %in% names(model) for (el in model.elem[!passed.in]) { model[[el]] <- defaults[[el]] } for (el in model.elem[passed.in]) { if (is.null(model[[el]])) model[[el]] <- defaults[[el]] } # Check model structures (b497) if (!all(names(model) %in% model.elem)) { bad.name <- names(model)[!(names(model) %in% model.elem)] msg <- paste(" Elements ", bad.name, " not allowed in model list for this form.\n", sep = "") cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } if (!all(model.elem %in% names(model))) { bad.name <- model.elem[!(model.elem %in% names(model))] msg <- paste(" Element ", bad.name, " is missing in the model list passed into MARSS().\n", sep = "") cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } # check that model list doesn't have any duplicate names if (any(duplicated(names(model)))) { bad.name <- names(model)[duplicated(names(model))] msg <- paste(" The elements ", bad.name, " are duplicated in the model list passed into MARSS().\n", sep = "") cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } # Series of checks on the model specification problem <- FALSE msg <- NULL # check model structures only have allowed cases for (el in model.elem) { bad.str <- FALSE # if length=1, then it must be a character or numeric string and that string must be in allowed. vectors length>1 are not allowed if (!is.factor(model[[el]]) && !is.array(model[[el]])) { if (length(model[[el]]) != 1) bad.str <- TRUE if (!bad.str) { testit <- try(model[[el]] %in% this.form.allows[[el]]) if (inherits(testit, "try-error")) { bad.str <- TRUE } else { if (!testit) bad.str <- TRUE } } } if (bad.str) { problem <- TRUE msg <- c(msg, paste(" The model value for ", el, " is not allowed. Check ?MARSS.form \n", sep = "")) } # if factor, must be allowed to be factor if (is.factor(model[[el]]) && !(el %in% this.form.allows$factors)) { problem <- TRUE msg <- c(msg, paste(" model$", el, " is not allowed to be a factor.\n", sep = "")) } # if matrix must be allowed to be matrix if (is.array(model[[el]]) && !(el %in% this.form.allows$matrices)) { problem <- TRUE msg <- c(msg, paste(" model$", el, " is not allowed to be a matrix.\n", sep = "")) } # if matrix then no NAs if character or list; this would be caught in is.marssMODEL but would be hard for user to understand problem if (is.array(model[[el]]) && (el %in% this.form.allows$matrices)) { if (any(is.na(model[[el]])) || any(unlist(lapply(model[[el]], is.infinite)))) { problem <- TRUE msg <- c(msg, paste(" model$", el, " is a matrix. No NAs or Infs allowed in this case.\n", sep = "")) } if (is.list(model[[el]]) && any(sapply(model[[el]], length) != 1)) { problem <- TRUE msg <- c(msg, paste(" model$", el, " is a list matrix. Each element must be length 1.\n", sep = "")) } } # is matrix } # end for (el in model.elem) # If there are errors, then don't proceed with the rest of the checks if (problem) { cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } # return the updated model list with missing elements filled in with defaults model }
/R/checkModelList.r
permissive
nwfsc-timeseries/MARSS
R
false
false
4,990
r
############################################################################################################################ # checkModelList() # This checks user model list passed to MARSS(). # The main purpose is to make sure that MARSS.form functions will work not to make sure model is valid # Dim checks on matrices are carried out in the MARSS.form functions that translate a model list to marssMODEL object # No error checking is done on controls and inits besides checking that it is present (NULL is ok); # is.marssMLE() will error-check controls and inits ########################################################################################################################## checkModelList <- function(model, defaults, this.form.allows) { ## First deal with case where model is not passed in all if (is.null(model)) model <- defaults if (!is.list(model)) { msg <- " model must be passed in as a list.\n" cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } model.elem <- names(defaults) ### If some elements are missing from the model list use the defaults passed.in <- model.elem %in% names(model) for (el in model.elem[!passed.in]) { model[[el]] <- defaults[[el]] } for (el in model.elem[passed.in]) { if (is.null(model[[el]])) model[[el]] <- defaults[[el]] } # Check model structures (b497) if (!all(names(model) %in% model.elem)) { bad.name <- names(model)[!(names(model) %in% model.elem)] msg <- paste(" Elements ", bad.name, " not allowed in model list for this form.\n", sep = "") cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } if (!all(model.elem %in% names(model))) { bad.name <- model.elem[!(model.elem %in% names(model))] msg <- paste(" Element ", bad.name, " is missing in the model list passed into MARSS().\n", sep = "") cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } # check that model list doesn't have any duplicate names if (any(duplicated(names(model)))) { bad.name <- names(model)[duplicated(names(model))] msg <- paste(" The elements ", bad.name, " are duplicated in the model list passed into MARSS().\n", sep = "") cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } # Series of checks on the model specification problem <- FALSE msg <- NULL # check model structures only have allowed cases for (el in model.elem) { bad.str <- FALSE # if length=1, then it must be a character or numeric string and that string must be in allowed. vectors length>1 are not allowed if (!is.factor(model[[el]]) && !is.array(model[[el]])) { if (length(model[[el]]) != 1) bad.str <- TRUE if (!bad.str) { testit <- try(model[[el]] %in% this.form.allows[[el]]) if (inherits(testit, "try-error")) { bad.str <- TRUE } else { if (!testit) bad.str <- TRUE } } } if (bad.str) { problem <- TRUE msg <- c(msg, paste(" The model value for ", el, " is not allowed. Check ?MARSS.form \n", sep = "")) } # if factor, must be allowed to be factor if (is.factor(model[[el]]) && !(el %in% this.form.allows$factors)) { problem <- TRUE msg <- c(msg, paste(" model$", el, " is not allowed to be a factor.\n", sep = "")) } # if matrix must be allowed to be matrix if (is.array(model[[el]]) && !(el %in% this.form.allows$matrices)) { problem <- TRUE msg <- c(msg, paste(" model$", el, " is not allowed to be a matrix.\n", sep = "")) } # if matrix then no NAs if character or list; this would be caught in is.marssMODEL but would be hard for user to understand problem if (is.array(model[[el]]) && (el %in% this.form.allows$matrices)) { if (any(is.na(model[[el]])) || any(unlist(lapply(model[[el]], is.infinite)))) { problem <- TRUE msg <- c(msg, paste(" model$", el, " is a matrix. No NAs or Infs allowed in this case.\n", sep = "")) } if (is.list(model[[el]]) && any(sapply(model[[el]], length) != 1)) { problem <- TRUE msg <- c(msg, paste(" model$", el, " is a list matrix. Each element must be length 1.\n", sep = "")) } } # is matrix } # end for (el in model.elem) # If there are errors, then don't proceed with the rest of the checks if (problem) { cat("\n", "Errors were caught in checkModelList \n", msg, sep = "") stop("Stopped in checkModelList() due to specification problem(s).\n", call. = FALSE) } # return the updated model list with missing elements filled in with defaults model }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/edge_info.R \name{edge_info} \alias{edge_info} \title{Get detailed information on edges} \usage{ edge_info(graph) } \arguments{ \item{graph}{a graph object of class 'gv_graph'.} } \value{ a data frame containing information specific to each edge within the graph. } \description{ Obtain a data frame with detailed information on edges and their interrelationships within a graph. }
/man/edge_info.Rd
no_license
taylorruss/DiagrammeR
R
false
false
469
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/edge_info.R \name{edge_info} \alias{edge_info} \title{Get detailed information on edges} \usage{ edge_info(graph) } \arguments{ \item{graph}{a graph object of class 'gv_graph'.} } \value{ a data frame containing information specific to each edge within the graph. } \description{ Obtain a data frame with detailed information on edges and their interrelationships within a graph. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/operations.R \name{log,ExpressionSet-method} \alias{log,ExpressionSet-method} \alias{expb} \alias{expb,numeric-method} \alias{expb,matrix-method} \alias{expb,ExpressionSet-method} \alias{exp,ExpressionSet-method} \alias{range,ExpressionSet-method} \alias{quantile.ExpressionSet} \title{Numeric Computations on ExpressionSet objects} \usage{ \S4method{log}{ExpressionSet}(x, ...) expb(x, ...) \S4method{expb}{numeric}(x, base = exp(1)) \S4method{expb}{matrix}(x, base = exp(1)) \S4method{expb}{ExpressionSet}(x, ...) \S4method{exp}{ExpressionSet}(x) \S4method{range}{ExpressionSet}(x, ..., na.rm = FALSE) \method{quantile}{ExpressionSet}(x, ...) } \arguments{ \item{x}{an \code{ExpressionSet} object.} \item{...}{extra arguments passed to subsequent calls, usually of the corresponding method in the \pkg{stats} package.} \item{base}{log base to use.} \item{na.rm}{logical that indicates if missing values should be omitted from the computation.} } \description{ The \pkg{xbioc} package defines some generics and methods to apply numeric transformations to \code{ExpressionSet} objects, which is convenient when working on gene expression deconvolution algorithms, where scale (log/linear) may matter. \code{log} log-transforms the expression matrix of \code{\link{ExpressionSet}} objects. }
/man/ExpressionSet-compute.Rd
no_license
linuxpham/xbioc
R
false
true
1,380
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/operations.R \name{log,ExpressionSet-method} \alias{log,ExpressionSet-method} \alias{expb} \alias{expb,numeric-method} \alias{expb,matrix-method} \alias{expb,ExpressionSet-method} \alias{exp,ExpressionSet-method} \alias{range,ExpressionSet-method} \alias{quantile.ExpressionSet} \title{Numeric Computations on ExpressionSet objects} \usage{ \S4method{log}{ExpressionSet}(x, ...) expb(x, ...) \S4method{expb}{numeric}(x, base = exp(1)) \S4method{expb}{matrix}(x, base = exp(1)) \S4method{expb}{ExpressionSet}(x, ...) \S4method{exp}{ExpressionSet}(x) \S4method{range}{ExpressionSet}(x, ..., na.rm = FALSE) \method{quantile}{ExpressionSet}(x, ...) } \arguments{ \item{x}{an \code{ExpressionSet} object.} \item{...}{extra arguments passed to subsequent calls, usually of the corresponding method in the \pkg{stats} package.} \item{base}{log base to use.} \item{na.rm}{logical that indicates if missing values should be omitted from the computation.} } \description{ The \pkg{xbioc} package defines some generics and methods to apply numeric transformations to \code{ExpressionSet} objects, which is convenient when working on gene expression deconvolution algorithms, where scale (log/linear) may matter. \code{log} log-transforms the expression matrix of \code{\link{ExpressionSet}} objects. }
\name{pcor.shrink} \alias{pcor.shrink} \alias{pvar.shrink} \title{Shrinkage Estimates of Partial Correlation and Partial Variance} \description{ The functions \code{pcor.shrink} and \code{pvar.shrink} compute shrinkage estimates of partial correlation and partial variance, respectively. } \usage{ pcor.shrink(x, lambda, w, verbose=TRUE) pvar.shrink(x, lambda, lambda.var, w, verbose=TRUE) } \arguments{ \item{x}{a data matrix} \item{lambda}{the correlation shrinkage intensity (range 0-1). If \code{lambda} is not specified (the default) it is estimated using an analytic formula from Sch\"afer and Strimmer (2005) - see \code{\link{cor.shrink}}. For \code{lambda=0} the empirical correlations are recovered.} \item{lambda.var}{the variance shrinkage intensity (range 0-1). If \code{lambda.var} is not specified (the default) it is estimated using an analytic formula from Opgen-Rhein and Strimmer (2007) - see details below. For \code{lambda.var=0} the empirical variances are recovered.} \item{w}{optional: weights for each data point - if not specified uniform weights are assumed (\code{w = rep(1/n, n)} with \code{n = nrow(x)}).} \item{verbose}{report progress while computing (default: TRUE)} } \details{ The partial variance \eqn{var(X_k | rest)} is the variance of \eqn{X_k} conditioned on the remaining variables. It equals the inverse of the corresponding diagonal entry of the precision matrix (see Whittaker 1990). The partial correlations \eqn{corr(X_k, X_l | rest)} is the correlation between \eqn{X_k} and \eqn{X_l} conditioned on the remaining variables. It equals the sign-reversed entries of the off-diagonal entries of the precision matrix, standardized by the the squared root of the associated inverse partial variances. Note that using \code{pcor.shrink(x)} \emph{much} faster than \code{cor2pcor(cor.shrink(x))}. For details about the shrinkage procedure consult Sch\"afer and Strimmer (2005), Opgen-Rhein and Strimmer (2007), and the help page of \code{\link{cov.shrink}}. } \value{ \code{pcor.shrink} returns the partial correlation matrix. Attached to this matrix are the standardized partial variances (i.e. PVAR/VAR) that can be retrieved using \code{\link{attr}} under the attribute "spv". \code{pvar.shrink} returns the partial variances. } \author{ Juliane Sch\"afer and Korbinian Strimmer (\url{https://strimmerlab.github.io}). } \references{ Opgen-Rhein, R., and K. Strimmer. 2007. Accurate ranking of differentially expressed genes by a distribution-free shrinkage approach. Statist. Appl. Genet. Mol. Biol. \bold{6}:9. <DOI:10.2202/1544-6115.1252> Sch\"afer, J., and K. Strimmer. 2005. A shrinkage approach to large-scale covariance estimation and implications for functional genomics. Statist. Appl. Genet. Mol. Biol. \bold{4}:32. <DOI:10.2202/1544-6115.1175> Whittaker J. 1990. Graphical Models in Applied Multivariate Statistics. John Wiley, Chichester. } \seealso{\code{\link{invcov.shrink}}, \code{\link{cov.shrink}}, \code{\link{cor2pcor}}} \examples{ # load corpcor library library("corpcor") # generate data matrix p = 50 n = 10 X = matrix(rnorm(n*p), nrow = n, ncol = p) # partial variance pv = pvar.shrink(X) pv # partial correlations (fast and recommend way) pcr1 = pcor.shrink(X) # other possibilities to estimate partial correlations pcr2 = cor2pcor( cor.shrink(X) ) # all the same sum((pcr1 - pcr2)^2) } \keyword{multivariate}
/man/pcor.shrink.Rd
no_license
cran/corpcor
R
false
false
3,611
rd
\name{pcor.shrink} \alias{pcor.shrink} \alias{pvar.shrink} \title{Shrinkage Estimates of Partial Correlation and Partial Variance} \description{ The functions \code{pcor.shrink} and \code{pvar.shrink} compute shrinkage estimates of partial correlation and partial variance, respectively. } \usage{ pcor.shrink(x, lambda, w, verbose=TRUE) pvar.shrink(x, lambda, lambda.var, w, verbose=TRUE) } \arguments{ \item{x}{a data matrix} \item{lambda}{the correlation shrinkage intensity (range 0-1). If \code{lambda} is not specified (the default) it is estimated using an analytic formula from Sch\"afer and Strimmer (2005) - see \code{\link{cor.shrink}}. For \code{lambda=0} the empirical correlations are recovered.} \item{lambda.var}{the variance shrinkage intensity (range 0-1). If \code{lambda.var} is not specified (the default) it is estimated using an analytic formula from Opgen-Rhein and Strimmer (2007) - see details below. For \code{lambda.var=0} the empirical variances are recovered.} \item{w}{optional: weights for each data point - if not specified uniform weights are assumed (\code{w = rep(1/n, n)} with \code{n = nrow(x)}).} \item{verbose}{report progress while computing (default: TRUE)} } \details{ The partial variance \eqn{var(X_k | rest)} is the variance of \eqn{X_k} conditioned on the remaining variables. It equals the inverse of the corresponding diagonal entry of the precision matrix (see Whittaker 1990). The partial correlations \eqn{corr(X_k, X_l | rest)} is the correlation between \eqn{X_k} and \eqn{X_l} conditioned on the remaining variables. It equals the sign-reversed entries of the off-diagonal entries of the precision matrix, standardized by the the squared root of the associated inverse partial variances. Note that using \code{pcor.shrink(x)} \emph{much} faster than \code{cor2pcor(cor.shrink(x))}. For details about the shrinkage procedure consult Sch\"afer and Strimmer (2005), Opgen-Rhein and Strimmer (2007), and the help page of \code{\link{cov.shrink}}. } \value{ \code{pcor.shrink} returns the partial correlation matrix. Attached to this matrix are the standardized partial variances (i.e. PVAR/VAR) that can be retrieved using \code{\link{attr}} under the attribute "spv". \code{pvar.shrink} returns the partial variances. } \author{ Juliane Sch\"afer and Korbinian Strimmer (\url{https://strimmerlab.github.io}). } \references{ Opgen-Rhein, R., and K. Strimmer. 2007. Accurate ranking of differentially expressed genes by a distribution-free shrinkage approach. Statist. Appl. Genet. Mol. Biol. \bold{6}:9. <DOI:10.2202/1544-6115.1252> Sch\"afer, J., and K. Strimmer. 2005. A shrinkage approach to large-scale covariance estimation and implications for functional genomics. Statist. Appl. Genet. Mol. Biol. \bold{4}:32. <DOI:10.2202/1544-6115.1175> Whittaker J. 1990. Graphical Models in Applied Multivariate Statistics. John Wiley, Chichester. } \seealso{\code{\link{invcov.shrink}}, \code{\link{cov.shrink}}, \code{\link{cor2pcor}}} \examples{ # load corpcor library library("corpcor") # generate data matrix p = 50 n = 10 X = matrix(rnorm(n*p), nrow = n, ncol = p) # partial variance pv = pvar.shrink(X) pv # partial correlations (fast and recommend way) pcr1 = pcor.shrink(X) # other possibilities to estimate partial correlations pcr2 = cor2pcor( cor.shrink(X) ) # all the same sum((pcr1 - pcr2)^2) } \keyword{multivariate}
#' @title Duplicate Genotypes #' @description Identify duplicate or very similar genotypes. #' #' @param g a \linkS4class{gtypes} object. #' @param num.shared either number of loci or percentage of loci two #' individuals must share to be considered duplicate individuals. #' @param num.cores number of CPU cores to use. #' #' @return if no duplicates are present, the result is \code{NULL}, otherwise #' a data.frame with the following columns is returned: #' \tabular{ll}{ #' \code{ids.1, ids.2} \tab sample ids.\cr #' \code{strata.1, strata.2} \tab sample stratification.\cr #' \code{num.loci.genotyped} \tab number of loci genotyped for both #' samples.\cr #' \code{num.loci.shared} \tab number of loci shared (all alleles the same) between both samples.\cr #' \code{prop.loci.shared} \tab proportion of loci genotyped for both samples #' that are shared.\cr #' \code{mismatch.loci} \tab loci where the two samples do not match.\cr #' } #' #' @author Eric Archer \email{eric.archer@@noaa.gov} #' #' @examples #' data(msats.g) #' #' # identify potential duplicates in Coastal strata #' dupes <- dupGenotypes(msats.g[, , "Coastal"]) #' dupes #' #' @export #' dupGenotypes <- function(g, num.shared = 0.8, num.cores = 1) { #if not already, convert num.shared to % if(num.shared > 1) num.shared <- num.shared / nLoc(g) shared.locs <- propSharedLoci(g, type = "ids", num.cores = num.cores) dup.df <- shared.locs[shared.locs[, "prop.same"] >= num.shared, ] if(nrow(dup.df) > 0) { dup.df$strata.1 <- as.character(strata(g)[dup.df$ids.1]) dup.df$strata.2 <- as.character(strata(g)[dup.df$ids.2]) dup.df$mismatch.loci <- sapply(1:nrow(dup.df), function(i) { shared.prop <- as.matrix(dup.df[i, locNames(g)]) loc.diff <- locNames(g)[which(shared.prop < 1)] paste(loc.diff, collapse = ", ") }) colnames(dup.df)[c(3:5)] <- c( "num.loci.shared", "num.loci.genotyped", "prop.loci.shared" ) dup.df <- dup.df[, c("ids.1", "ids.2", "strata.1", "strata.2", "num.loci.genotyped", "num.loci.shared", "prop.loci.shared", "mismatch.loci")] } if(nrow(dup.df) > 0) { sort.order <- order(dup.df$prop.loci.shared, dup.df$num.loci.shared, rev(dup.df$ids.1), rev(dup.df$ids.2), decreasing = TRUE ) dup.df <- dup.df[sort.order, ] rownames(dup.df) <- NULL } else dup.df <- NULL if(is.null(dup.df)) cat("No duplicates found. NULL returned.\n") dup.df }
/R/dupGenotypes.R
no_license
PAMorin/strataG
R
false
false
2,537
r
#' @title Duplicate Genotypes #' @description Identify duplicate or very similar genotypes. #' #' @param g a \linkS4class{gtypes} object. #' @param num.shared either number of loci or percentage of loci two #' individuals must share to be considered duplicate individuals. #' @param num.cores number of CPU cores to use. #' #' @return if no duplicates are present, the result is \code{NULL}, otherwise #' a data.frame with the following columns is returned: #' \tabular{ll}{ #' \code{ids.1, ids.2} \tab sample ids.\cr #' \code{strata.1, strata.2} \tab sample stratification.\cr #' \code{num.loci.genotyped} \tab number of loci genotyped for both #' samples.\cr #' \code{num.loci.shared} \tab number of loci shared (all alleles the same) between both samples.\cr #' \code{prop.loci.shared} \tab proportion of loci genotyped for both samples #' that are shared.\cr #' \code{mismatch.loci} \tab loci where the two samples do not match.\cr #' } #' #' @author Eric Archer \email{eric.archer@@noaa.gov} #' #' @examples #' data(msats.g) #' #' # identify potential duplicates in Coastal strata #' dupes <- dupGenotypes(msats.g[, , "Coastal"]) #' dupes #' #' @export #' dupGenotypes <- function(g, num.shared = 0.8, num.cores = 1) { #if not already, convert num.shared to % if(num.shared > 1) num.shared <- num.shared / nLoc(g) shared.locs <- propSharedLoci(g, type = "ids", num.cores = num.cores) dup.df <- shared.locs[shared.locs[, "prop.same"] >= num.shared, ] if(nrow(dup.df) > 0) { dup.df$strata.1 <- as.character(strata(g)[dup.df$ids.1]) dup.df$strata.2 <- as.character(strata(g)[dup.df$ids.2]) dup.df$mismatch.loci <- sapply(1:nrow(dup.df), function(i) { shared.prop <- as.matrix(dup.df[i, locNames(g)]) loc.diff <- locNames(g)[which(shared.prop < 1)] paste(loc.diff, collapse = ", ") }) colnames(dup.df)[c(3:5)] <- c( "num.loci.shared", "num.loci.genotyped", "prop.loci.shared" ) dup.df <- dup.df[, c("ids.1", "ids.2", "strata.1", "strata.2", "num.loci.genotyped", "num.loci.shared", "prop.loci.shared", "mismatch.loci")] } if(nrow(dup.df) > 0) { sort.order <- order(dup.df$prop.loci.shared, dup.df$num.loci.shared, rev(dup.df$ids.1), rev(dup.df$ids.2), decreasing = TRUE ) dup.df <- dup.df[sort.order, ] rownames(dup.df) <- NULL } else dup.df <- NULL if(is.null(dup.df)) cat("No duplicates found. NULL returned.\n") dup.df }
#R_Workshop_June2019 # what we did in the workshop in June 2019 # Tomo Eguchi # June 2019 # clean the workspace before do anything rm(list=ls()) # read the cleaned up version of our data file growth_data_1 <- read.csv(file = 'data/Growth data Nov 2008 cleaned.csv', na.strings = "") # remove the couple weight entries - assign NAs growth_data_2 <- read.csv(file = 'data/Growth data Nov 2008.csv', na.strings = c('172.4*', '200+')) # remove the rows with NAs in SCL and weight: # extract the relevant columns from the original "dirty" # data frame growth_data_2a <- growth_data_2[, c('SCL', 'Weight')] # find where NAs are: growth_data_2NA <- is.na(growth_data_2a) # sum each row: growth_data_2sum <- rowSums(growth_data_2NA) # extract those rows that didn't have 2 in sums: growth_data_2b <- growth_data_2[growth_data_2sum != 2, ] # alternatively, use !is.na? -> much simpler!!! growth_data_2c <- growth_data_2[!is.na(growth_data_2$SCL) | !is.na(growth_data_2$Weight),] # see if they are the same: summary(growth_data_2b) summary(growth_data_2c) dim(growth_data_2b) dim(growth_data_2c) # seems like they are the same! # examples of subsetting: big.turtles <- subset(growth_data_2b, SCL > 90.0) middle.turtle <- subset(growth_data_2b, SCL > 60.0 & SCL < 100) little.big.turtles <- subset(growth_data_2b, SCL < 60.0 | SCL > 100) # create a real data columns growth_data_2b$Date <- as.Date(growth_data_2b$Date.Caught, format = '%m/%d/%Y') # extract one turtle turtle.2116 <- subset(growth_data_2b, Turtle.ID.Tag == '2116') plot(turtle.2116$Date, turtle.2116$SCL, type = 'p') plot(turtle.2116$Date, turtle.2116$SCL, type = 'l') plot(turtle.2116$Date, turtle.2116$SCL, type = 'b') # creating size class factor: # an awkward way of doing it... growth_data_2b$Size.Class <- 1 growth_data_2b$SCL[is.na(growth_data_2b$SCL)] <- -1 growth_data_2b[growth_data_2b$SCL>60.0 & growth_data_2b$SCL <90.0, 'Size.Class'] <- 2 growth_data_2b[growth_data_2b$SCL > 90.0, 'Size.Class'] <- 3 growth_data_2b[growth_data_2b$SCL < 0, 'SCL'] <- NA growth_data_2b[is.na(growth_data_2b$SCL), 'Size.Class'] <- NA growth_data_2b$f.size.class <- as.factor(growth_data_2b$Size.Class) # or use subset data frames to make it more elegant: # create another dataframe for little ones: little.turtles <- subset(growth_data_2c, SCL < 60.0) # also re-subset big and medium ones with factor size classes: big.turtles <- subset(growth_data_2c, SCL > 90.0) middle.turtles <- subset(growth_data_2c, SCL > 60.0 & SCL < 100) # create size class column: little.turtles$Size.Class <- 1 middle.turtle$Size.Class <- 2 big.turtles$Size.Class <- 3 # then rbind all: growth_data_2d <- rbind(little.turtles, middle.turtle, big.turtles) growth_data_2d$Date <- as.Date(growth_data_2d$Date.Caught, format = '%m/%d/%Y') growth_data_2d$f.size.class <- as.factor(growth_data_2d$Size.Class) # use the factor variable to make pretty plots: library(ggplot2) p1 <- ggplot(data = growth_data_2d) + geom_point(aes(x = Date, y = SCL)) p2 <- ggplot(data = growth_data_2d) + geom_point(aes(x = Date, y = SCL, color = Turtle.ID.Tag)) p3 <- p2 + geom_line(aes(x = Date, y = SCL, color =Turtle.ID.Tag)) p4 <- p3 + theme(legend.position = 'none') p5 <- p4 + theme(panel.background = element_blank()) p6 <- p5 + theme(axis.line = element_line(color = 'red')) p7 <- p6 + theme(axis.line = element_line(size = 2)) p8 <- p7 + ylab("SCL (cm)") + xlab("Date") p9 <- p8 + ggtitle('Change in SCL') p10 <- p9 +theme(plot.title = element_text(hjust = 0.5)) p11 <- p10 + theme(axis.text.x = element_text(size = 12, angle = 90), axis.text.y = element_text(size = 12)) p12 <- p11 + theme(axis.title.x = element_text(size = 15)) # Let's run a linear regression analysis on SCL and mass # if possible, always look at relationships so that # models are appropriate. p13 <- ggplot() + geom_point(data = growth_data_2d, aes(x = SCL, y = Weight, color = Turtle.ID.Tag)) + theme(legend.position = 'none') # ggplot doesn't like NAs growth_data_2d <- na.omit(growth_data_2d) p13 <- ggplot() + geom_point(data = growth_data_2d, aes(x = SCL, y = Weight, color = Turtle.ID.Tag)) + theme(legend.position = 'none') # not really linear - as expected and seems like there # are some outliers!! Let's find them and remove: # Learn dplyr library(dplyr) # first one: filter(growth_data_2d, SCL < 50 & Weight > 50) # second and third: filter(growth_data_2d, SCL > 65 & SCL < 75 & Weight > 75) # can go through by eye and remove one at a time... growth_data_2d[growth_data_2d$Turtle.ID.Tag == 'X-129/X-130',] # looks like just one so remove growth_data_2d <- growth_data_2d[growth_data_2d$Turtle.ID.Tag != 'X-129/X-130',] # not very smooth/elegant # or create another column with index, True and False growth_data_2d$include <- TRUE growth_data_2d$include[growth_data_2d$SCL < 50 & growth_data_2d$Weight > 50] <- FALSE growth_data_2d$include[growth_data_2d$SCL > 65 & growth_data_2d$SCL < 75 & growth_data_2d$Weight > 75] <- FALSE growth_data_2e <- growth_data_2d[growth_data_2d$include,] p14 <- ggplot() + geom_point(data = growth_data_2e, aes(x = SCL, y = Weight, color = Turtle.ID.Tag)) + theme(legend.position = 'none') # take care of the non-linear part: p15 <- ggplot() + geom_point(data = growth_data_2e, aes(x = SCL, y = log(Weight), color = Turtle.ID.Tag)) + theme(legend.position = 'none') # about right or ... p16 <- ggplot() + geom_point(data = growth_data_2e, aes(x = SCL^3, y = Weight, color = Turtle.ID.Tag)) + theme(legend.position = 'none') # another approach is to just embrace the non-linearity # and use GAM... a bit too much stats for this workshop # another issue we see is the chagne in variance (residuals) # with increasing SCL... this needs to be dealt with with # different models... more stats! # ignoring some offensive issues... run a basic model: lm_lin <- lm(Weight ~ SCL, data = growth_data_2e) summary(lm_lin) growth_data_2e$SCL3 <- growth_data_2e$SCL^3 lm_cubed <- lm(Weight ~ SCL3, data = growth_data_2e) summary(lm_cubed) # but wait... repeated measures? # mixed effects model: library(lme4) # individual as a random effect: lmm_cubed <- lmer(Weight ~ SCL3 + (1|Turtle.ID.Tag), data = growth_data_2e) # centering and scaling: growth_data_2e$scaled_SCL3 <- scale(growth_data_2e$SCL3) growth_data_2e$scaled_Weight <- scale(growth_data_2e$Weight) lmm_scaled_cubed <- lmer(scaled_Weight ~ scaled_SCL3 + (1|Turtle.ID.Tag), data = growth_data_2e) summary(lmm_scaled_cubed) # enough stats for now... ###################################################3 # another dataset and practice creating functions: rm(list=ls()) library(ggplot2) save.fig <- F # read the data file and clean up some ridiculous data points data.0 <- read.csv('data/IntakeAndDischargeTemp.csv', header = T) head(data.0) summary(data.0) # NA needs to be removed before making the comparison below: data.0 <- na.omit(data.0) plot(data.0$Intake, data.0$Discharge) data.0[data.0$Discharge < 50 | data.0$Discharge > 104, 'Discharge'] <- NA data.0 <- na.omit(data.0) data.0$Date <- as.Date(data.0$Date, format = '%d-%b-%y %H:%M:%S') data.0$Month <- as.numeric(format(as.Date(data.0$Date), '%m')) data.0$fMonth <- as.factor(data.0$Month) # we use Celsius not Fahrenheit # create a function that converts from F to C F2C <- function(F){ C <- (F - 32)*5/9 return(C) } p1 <- ggplot() + geom_boxplot(data = data.0, aes(x = fMonth, y = F2C(Intake)), color = 'blue', size = 1.5, alpha = 0.6) + geom_boxplot(data = data.0, aes(x = fMonth, y = F2C(Discharge)), color = 'red', size = 1.1, alpha = 0.4) + #geom_hline(yintercept = 15) + ylab("") + xlab("") + #ggtitle() + theme(plot.title = element_text(hjust = 0.5), legend.title = element_text(size = 10, hjust = 0.5), legend.text = element_text(size = 8, vjust = 0), axis.text.x = element_text(size = 12), axis.text.y = element_text(size = 12)) if (save.fig){ ggsave(plot = p1, dpi = 1200, device = 'png', filename = 'images/intake_discharge.png') } dif.temp <- F2C(data.0$Discharge) - F2C(data.0$Intake) mean.dif.temp <- mean(dif.temp) #se.dif.temp <- SE(dif.temp) ##################################################### # Another example: rm(list = ls()) library(ggplot2) #library(dplyr) #library(viridis) library(reshape) #library(tidyr) save.fig <- T dat.raw <- read.delim('data/PDO_Feb2017_data.txt', sep = "", header = T) PDO.values <- melt(dat.raw, id.vars = 'YEAR') colnames(PDO.values) <- c('Year', 'Month', 'PDO') dt <- seq(from = 0, to = 1.0 - 1/12, by = 1/12) uniq.period <- c('JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC') PDO.values$dt <- NA for (k in 1:length(uniq.period)){ PDO.values[PDO.values$Month == uniq.period[k], 'dt'] <- dt[k] } PDO.values$time <- PDO.values$Year + PDO.values$dt PDO.values$Pos <- ifelse(PDO.values$PDO > 0, 'TRUE', 'FALSE') min.yr <- 1990 max.yr <- max(PDO.values$Year) PDO.values.1990 <- subset(PDO.values, Year < max.yr & Year > min.yr) DGN_bycatch_year <- c(2001, 1998, 1997, 1993, 1992) p1 <- ggplot(data = PDO.values.1990) + geom_bar(stat = 'identity', aes(x = time, y = PDO, fill = Pos)) + scale_x_continuous(name = '', breaks = seq(from = min.yr, to = max.yr, by = 1)) + scale_fill_manual(values = c('blue', 'red'), guide = FALSE) + ggtitle('PDO index') + annotate('rect', xmin = 2001, xmax = 2002, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1997, xmax = 1999, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1992, xmax = 1994, ymin = -Inf, ymax = Inf, alpha = 0.3) + theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(size = 12, angle = 90), axis.text.y = element_text(size = 12)) min.yr <- 1980 max.yr <- max(PDO.values$Year) PDO.values.1980 <- subset(PDO.values, Year <= max.yr & Year >= min.yr) PDO.values.1980$Year <- as.factor(PDO.values.1980$Year) p2 <- ggplot(data = PDO.values.1980) + geom_bar(stat = 'identity', aes(x = time, y = PDO, fill = Pos)) + scale_x_continuous(name = '', breaks = seq(from = min.yr, to = max.yr, by = 1)) + scale_fill_manual(values = c('blue', 'red'), guide = FALSE) + ggtitle('Pacific Decadal Oscillation Index') + annotate('rect', xmin = 2001, xmax = 2002, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1997, xmax = 1999, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1992, xmax = 1994, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 2006, xmax = 2007, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 2014, xmax = 2016, ymin = -Inf, ymax = Inf, alpha = 0.3) + theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(size = 11, angle = 90), axis.text.y = element_text(size = 12)) min.yr <- min(PDO.values$Year) max.yr <- max(PDO.values$Year) PDO.values$Year <- as.factor(PDO.values$Year) p3 <- ggplot(data = PDO.values) + geom_bar(stat = 'identity', aes(x = time, y = PDO, fill = Pos)) + scale_x_continuous(name = '', breaks = seq(from = min.yr, to = max.yr, by = 1)) + scale_fill_manual(values = c('blue', 'red'), guide = FALSE) + ggtitle('Pacific Decadal Oscillation Index') + annotate('rect', xmin = 2001, xmax = 2002, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1997, xmax = 1999, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1992, xmax = 1994, ymin = -Inf, ymax = Inf, alpha = 0.3) + theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(size = 11, angle = 90), axis.text.y = element_text(size = 12)) if (save.fig){ ggsave(plot = p1, dpi = 1200, file = paste0('images/PDO1990_', Sys.Date(), '.png')) ggsave(plot = p3, dpi = 1200, file = paste0('images/PDOall_', Sys.Date(), '.png')) ggsave(plot = p2, dpi = 1200, file = paste0('images/PDO1980_', Sys.Date(), '.png')) } ###################################################### rm(list=ls()) library(ggplot2) library(ggmap) library(viridis) #library(cowplot) save.fig <- T internet <- T #infile <- 'data/Stranding_Query_Loggerheads_March2017.txt' infile <- 'data/CcStrandingQuery_16March2017.csv' dat0 <- read.table(infile, sep = ",", header = TRUE) # look at the data: head(dat0) str(dat0) dat0.state <- dat0[dat0$State != '', ] dat0.state$Year <- as.factor(dat0.state$Year_Initially_Observed) p1 <- ggplot(data = dat0.state) + geom_bar(aes(x = Year, fill = State)) + #qplot(yr.fac, data = dat1.fishery, geom = "bar", fill = STATE) + scale_y_continuous(breaks = seq(0, 17, 1)) + ylab('Counts') + xlab('Year') + ggtitle('Stranded loggerhead turtles') + theme(axis.text.x = element_text(angle = 90, size = 15, vjust = 0.5)) dat1 <- subset(dat0, Alive_Released == 'FALSE' & !is.na(Latitude)) dat1$yr.fac <- as.factor(dat1$Year_Initially_Observed) dat1.size <- dat1[, c('State', 'yr.fac', 'Species_Code', 'Latitude', 'Longitude', 'Weight', 'Curved_Carapace_Length', 'Straight_Carapace_Length')] colnames(dat1.size) <- c('State', 'Year', 'Species_Code', 'Latitude', 'Longitude', 'Weight', 'Curved_Carapace_Length', 'Straight_Carapace_Length') # if (internet){ # West.coast <- get_map(location = c(lon = -138.0, # lat = 43.0), # zoom = 4, # maptype = "satellite", # color = 'bw', # source = 'google') # saveRDS(West.coast, # file = 'RData/CC_stranding_westcoast.rds') # # So.Cal <- get_map(location = c(lon = -119.0, # lat = 33), # zoom = 7, # maptype = "satellite", # color = 'bw', # source = 'google') # saveRDS(So.Cal, # file = 'RData/CC_stranding_SoCal.rds') # } else { # West.coast <- readRDS(file = 'RData/CC_stranding_westcoast.rds') # SoCal <- readRDS(file = 'RData/CC_stranding_SoCal.rds') # print('read from rds files') # } # map.west.coast <- ggmap(West.coast) # map.So.Cal <- ggmap(So.Cal) # # p2 <-map.west.coast + # geom_point(data = dat1.size, # aes(x = Longitude, y = Latitude, # color = Year), # size = 4) + # scale_color_viridis(discrete = TRUE, # begin = 0.5, end = 1.0) + # xlab("Longitude") + # ylab("Latitude") + # theme(plot.title = element_text(hjust = 0.5), # legend.title = element_text(size = 10, hjust = 0.5), # legend.text = element_text(size = 8, vjust = 0), # legend.position = c(0.15, 0.4)) # # dat.locs.So.Cal <- subset(dat1.size, # Latitude < 34.45 & Longitude > -122) # p3 <- map.So.Cal + # geom_point(data = dat.locs.So.Cal, # aes(x = Longitude, # y = Latitude, # color = Year), # size = 3) + # scale_color_viridis(discrete = TRUE, # begin = 0.5, # end = 1.0) + # xlab("Longitude") + # ylab("Latitude") + # #ggtitle("Loggerhead turtles") + # theme(plot.title = element_text(hjust = 0.5), # legend.title = element_text(size = 10, # hjust = 0.5), # legend.text = element_text(size = 8, # vjust = 0), # legend.position = c(0.90, 0.6)) # # dat.size <- na.omit(data.frame(Year = dat1.size$Year, # CCL = dat1.size$Curved_Carapace_Length, # state = dat1.size$State)) # p4 <- ggplot() + # geom_histogram(data = dat.size, # aes(x = CCL), # binwidth = 5, # color = 'black', # fill = 'white') + # xlab(expression(CCL[cm])) + # ylab('Frequency') + # ggtitle('USA') + # xlim(10, 100) + # # scale_x_discrete() # theme(axis.title.y = element_text(size = 12), # axis.text.y = element_text(size = 12)) # # if (save.fig){ # ggsave(filename = paste0('images/Cc_strandings_', # Sys.Date(), '.png'), # plot = p1, # width = 8, # height = 7, # dpi = 1200) # # ggsave(filename = paste0('images/Cc_strandings_westcoast_', # Sys.Date(), '.png'), # plot = p2, # width = 9.4, # height = 8.4, # dpi = 1200) # # ggsave(filename = paste0('images/Cc_strandings_SCB_', # Sys.Date(), '.png'), # plot = p3, # width = 9.4, # height = 8.4, # dpi = 1200) # # }
/R_workshop_June2019.R
no_license
mteguchi/R_mini_course_2019
R
false
false
18,549
r
#R_Workshop_June2019 # what we did in the workshop in June 2019 # Tomo Eguchi # June 2019 # clean the workspace before do anything rm(list=ls()) # read the cleaned up version of our data file growth_data_1 <- read.csv(file = 'data/Growth data Nov 2008 cleaned.csv', na.strings = "") # remove the couple weight entries - assign NAs growth_data_2 <- read.csv(file = 'data/Growth data Nov 2008.csv', na.strings = c('172.4*', '200+')) # remove the rows with NAs in SCL and weight: # extract the relevant columns from the original "dirty" # data frame growth_data_2a <- growth_data_2[, c('SCL', 'Weight')] # find where NAs are: growth_data_2NA <- is.na(growth_data_2a) # sum each row: growth_data_2sum <- rowSums(growth_data_2NA) # extract those rows that didn't have 2 in sums: growth_data_2b <- growth_data_2[growth_data_2sum != 2, ] # alternatively, use !is.na? -> much simpler!!! growth_data_2c <- growth_data_2[!is.na(growth_data_2$SCL) | !is.na(growth_data_2$Weight),] # see if they are the same: summary(growth_data_2b) summary(growth_data_2c) dim(growth_data_2b) dim(growth_data_2c) # seems like they are the same! # examples of subsetting: big.turtles <- subset(growth_data_2b, SCL > 90.0) middle.turtle <- subset(growth_data_2b, SCL > 60.0 & SCL < 100) little.big.turtles <- subset(growth_data_2b, SCL < 60.0 | SCL > 100) # create a real data columns growth_data_2b$Date <- as.Date(growth_data_2b$Date.Caught, format = '%m/%d/%Y') # extract one turtle turtle.2116 <- subset(growth_data_2b, Turtle.ID.Tag == '2116') plot(turtle.2116$Date, turtle.2116$SCL, type = 'p') plot(turtle.2116$Date, turtle.2116$SCL, type = 'l') plot(turtle.2116$Date, turtle.2116$SCL, type = 'b') # creating size class factor: # an awkward way of doing it... growth_data_2b$Size.Class <- 1 growth_data_2b$SCL[is.na(growth_data_2b$SCL)] <- -1 growth_data_2b[growth_data_2b$SCL>60.0 & growth_data_2b$SCL <90.0, 'Size.Class'] <- 2 growth_data_2b[growth_data_2b$SCL > 90.0, 'Size.Class'] <- 3 growth_data_2b[growth_data_2b$SCL < 0, 'SCL'] <- NA growth_data_2b[is.na(growth_data_2b$SCL), 'Size.Class'] <- NA growth_data_2b$f.size.class <- as.factor(growth_data_2b$Size.Class) # or use subset data frames to make it more elegant: # create another dataframe for little ones: little.turtles <- subset(growth_data_2c, SCL < 60.0) # also re-subset big and medium ones with factor size classes: big.turtles <- subset(growth_data_2c, SCL > 90.0) middle.turtles <- subset(growth_data_2c, SCL > 60.0 & SCL < 100) # create size class column: little.turtles$Size.Class <- 1 middle.turtle$Size.Class <- 2 big.turtles$Size.Class <- 3 # then rbind all: growth_data_2d <- rbind(little.turtles, middle.turtle, big.turtles) growth_data_2d$Date <- as.Date(growth_data_2d$Date.Caught, format = '%m/%d/%Y') growth_data_2d$f.size.class <- as.factor(growth_data_2d$Size.Class) # use the factor variable to make pretty plots: library(ggplot2) p1 <- ggplot(data = growth_data_2d) + geom_point(aes(x = Date, y = SCL)) p2 <- ggplot(data = growth_data_2d) + geom_point(aes(x = Date, y = SCL, color = Turtle.ID.Tag)) p3 <- p2 + geom_line(aes(x = Date, y = SCL, color =Turtle.ID.Tag)) p4 <- p3 + theme(legend.position = 'none') p5 <- p4 + theme(panel.background = element_blank()) p6 <- p5 + theme(axis.line = element_line(color = 'red')) p7 <- p6 + theme(axis.line = element_line(size = 2)) p8 <- p7 + ylab("SCL (cm)") + xlab("Date") p9 <- p8 + ggtitle('Change in SCL') p10 <- p9 +theme(plot.title = element_text(hjust = 0.5)) p11 <- p10 + theme(axis.text.x = element_text(size = 12, angle = 90), axis.text.y = element_text(size = 12)) p12 <- p11 + theme(axis.title.x = element_text(size = 15)) # Let's run a linear regression analysis on SCL and mass # if possible, always look at relationships so that # models are appropriate. p13 <- ggplot() + geom_point(data = growth_data_2d, aes(x = SCL, y = Weight, color = Turtle.ID.Tag)) + theme(legend.position = 'none') # ggplot doesn't like NAs growth_data_2d <- na.omit(growth_data_2d) p13 <- ggplot() + geom_point(data = growth_data_2d, aes(x = SCL, y = Weight, color = Turtle.ID.Tag)) + theme(legend.position = 'none') # not really linear - as expected and seems like there # are some outliers!! Let's find them and remove: # Learn dplyr library(dplyr) # first one: filter(growth_data_2d, SCL < 50 & Weight > 50) # second and third: filter(growth_data_2d, SCL > 65 & SCL < 75 & Weight > 75) # can go through by eye and remove one at a time... growth_data_2d[growth_data_2d$Turtle.ID.Tag == 'X-129/X-130',] # looks like just one so remove growth_data_2d <- growth_data_2d[growth_data_2d$Turtle.ID.Tag != 'X-129/X-130',] # not very smooth/elegant # or create another column with index, True and False growth_data_2d$include <- TRUE growth_data_2d$include[growth_data_2d$SCL < 50 & growth_data_2d$Weight > 50] <- FALSE growth_data_2d$include[growth_data_2d$SCL > 65 & growth_data_2d$SCL < 75 & growth_data_2d$Weight > 75] <- FALSE growth_data_2e <- growth_data_2d[growth_data_2d$include,] p14 <- ggplot() + geom_point(data = growth_data_2e, aes(x = SCL, y = Weight, color = Turtle.ID.Tag)) + theme(legend.position = 'none') # take care of the non-linear part: p15 <- ggplot() + geom_point(data = growth_data_2e, aes(x = SCL, y = log(Weight), color = Turtle.ID.Tag)) + theme(legend.position = 'none') # about right or ... p16 <- ggplot() + geom_point(data = growth_data_2e, aes(x = SCL^3, y = Weight, color = Turtle.ID.Tag)) + theme(legend.position = 'none') # another approach is to just embrace the non-linearity # and use GAM... a bit too much stats for this workshop # another issue we see is the chagne in variance (residuals) # with increasing SCL... this needs to be dealt with with # different models... more stats! # ignoring some offensive issues... run a basic model: lm_lin <- lm(Weight ~ SCL, data = growth_data_2e) summary(lm_lin) growth_data_2e$SCL3 <- growth_data_2e$SCL^3 lm_cubed <- lm(Weight ~ SCL3, data = growth_data_2e) summary(lm_cubed) # but wait... repeated measures? # mixed effects model: library(lme4) # individual as a random effect: lmm_cubed <- lmer(Weight ~ SCL3 + (1|Turtle.ID.Tag), data = growth_data_2e) # centering and scaling: growth_data_2e$scaled_SCL3 <- scale(growth_data_2e$SCL3) growth_data_2e$scaled_Weight <- scale(growth_data_2e$Weight) lmm_scaled_cubed <- lmer(scaled_Weight ~ scaled_SCL3 + (1|Turtle.ID.Tag), data = growth_data_2e) summary(lmm_scaled_cubed) # enough stats for now... ###################################################3 # another dataset and practice creating functions: rm(list=ls()) library(ggplot2) save.fig <- F # read the data file and clean up some ridiculous data points data.0 <- read.csv('data/IntakeAndDischargeTemp.csv', header = T) head(data.0) summary(data.0) # NA needs to be removed before making the comparison below: data.0 <- na.omit(data.0) plot(data.0$Intake, data.0$Discharge) data.0[data.0$Discharge < 50 | data.0$Discharge > 104, 'Discharge'] <- NA data.0 <- na.omit(data.0) data.0$Date <- as.Date(data.0$Date, format = '%d-%b-%y %H:%M:%S') data.0$Month <- as.numeric(format(as.Date(data.0$Date), '%m')) data.0$fMonth <- as.factor(data.0$Month) # we use Celsius not Fahrenheit # create a function that converts from F to C F2C <- function(F){ C <- (F - 32)*5/9 return(C) } p1 <- ggplot() + geom_boxplot(data = data.0, aes(x = fMonth, y = F2C(Intake)), color = 'blue', size = 1.5, alpha = 0.6) + geom_boxplot(data = data.0, aes(x = fMonth, y = F2C(Discharge)), color = 'red', size = 1.1, alpha = 0.4) + #geom_hline(yintercept = 15) + ylab("") + xlab("") + #ggtitle() + theme(plot.title = element_text(hjust = 0.5), legend.title = element_text(size = 10, hjust = 0.5), legend.text = element_text(size = 8, vjust = 0), axis.text.x = element_text(size = 12), axis.text.y = element_text(size = 12)) if (save.fig){ ggsave(plot = p1, dpi = 1200, device = 'png', filename = 'images/intake_discharge.png') } dif.temp <- F2C(data.0$Discharge) - F2C(data.0$Intake) mean.dif.temp <- mean(dif.temp) #se.dif.temp <- SE(dif.temp) ##################################################### # Another example: rm(list = ls()) library(ggplot2) #library(dplyr) #library(viridis) library(reshape) #library(tidyr) save.fig <- T dat.raw <- read.delim('data/PDO_Feb2017_data.txt', sep = "", header = T) PDO.values <- melt(dat.raw, id.vars = 'YEAR') colnames(PDO.values) <- c('Year', 'Month', 'PDO') dt <- seq(from = 0, to = 1.0 - 1/12, by = 1/12) uniq.period <- c('JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC') PDO.values$dt <- NA for (k in 1:length(uniq.period)){ PDO.values[PDO.values$Month == uniq.period[k], 'dt'] <- dt[k] } PDO.values$time <- PDO.values$Year + PDO.values$dt PDO.values$Pos <- ifelse(PDO.values$PDO > 0, 'TRUE', 'FALSE') min.yr <- 1990 max.yr <- max(PDO.values$Year) PDO.values.1990 <- subset(PDO.values, Year < max.yr & Year > min.yr) DGN_bycatch_year <- c(2001, 1998, 1997, 1993, 1992) p1 <- ggplot(data = PDO.values.1990) + geom_bar(stat = 'identity', aes(x = time, y = PDO, fill = Pos)) + scale_x_continuous(name = '', breaks = seq(from = min.yr, to = max.yr, by = 1)) + scale_fill_manual(values = c('blue', 'red'), guide = FALSE) + ggtitle('PDO index') + annotate('rect', xmin = 2001, xmax = 2002, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1997, xmax = 1999, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1992, xmax = 1994, ymin = -Inf, ymax = Inf, alpha = 0.3) + theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(size = 12, angle = 90), axis.text.y = element_text(size = 12)) min.yr <- 1980 max.yr <- max(PDO.values$Year) PDO.values.1980 <- subset(PDO.values, Year <= max.yr & Year >= min.yr) PDO.values.1980$Year <- as.factor(PDO.values.1980$Year) p2 <- ggplot(data = PDO.values.1980) + geom_bar(stat = 'identity', aes(x = time, y = PDO, fill = Pos)) + scale_x_continuous(name = '', breaks = seq(from = min.yr, to = max.yr, by = 1)) + scale_fill_manual(values = c('blue', 'red'), guide = FALSE) + ggtitle('Pacific Decadal Oscillation Index') + annotate('rect', xmin = 2001, xmax = 2002, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1997, xmax = 1999, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1992, xmax = 1994, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 2006, xmax = 2007, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 2014, xmax = 2016, ymin = -Inf, ymax = Inf, alpha = 0.3) + theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(size = 11, angle = 90), axis.text.y = element_text(size = 12)) min.yr <- min(PDO.values$Year) max.yr <- max(PDO.values$Year) PDO.values$Year <- as.factor(PDO.values$Year) p3 <- ggplot(data = PDO.values) + geom_bar(stat = 'identity', aes(x = time, y = PDO, fill = Pos)) + scale_x_continuous(name = '', breaks = seq(from = min.yr, to = max.yr, by = 1)) + scale_fill_manual(values = c('blue', 'red'), guide = FALSE) + ggtitle('Pacific Decadal Oscillation Index') + annotate('rect', xmin = 2001, xmax = 2002, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1997, xmax = 1999, ymin = -Inf, ymax = Inf, alpha = 0.3) + annotate('rect', xmin = 1992, xmax = 1994, ymin = -Inf, ymax = Inf, alpha = 0.3) + theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(size = 11, angle = 90), axis.text.y = element_text(size = 12)) if (save.fig){ ggsave(plot = p1, dpi = 1200, file = paste0('images/PDO1990_', Sys.Date(), '.png')) ggsave(plot = p3, dpi = 1200, file = paste0('images/PDOall_', Sys.Date(), '.png')) ggsave(plot = p2, dpi = 1200, file = paste0('images/PDO1980_', Sys.Date(), '.png')) } ###################################################### rm(list=ls()) library(ggplot2) library(ggmap) library(viridis) #library(cowplot) save.fig <- T internet <- T #infile <- 'data/Stranding_Query_Loggerheads_March2017.txt' infile <- 'data/CcStrandingQuery_16March2017.csv' dat0 <- read.table(infile, sep = ",", header = TRUE) # look at the data: head(dat0) str(dat0) dat0.state <- dat0[dat0$State != '', ] dat0.state$Year <- as.factor(dat0.state$Year_Initially_Observed) p1 <- ggplot(data = dat0.state) + geom_bar(aes(x = Year, fill = State)) + #qplot(yr.fac, data = dat1.fishery, geom = "bar", fill = STATE) + scale_y_continuous(breaks = seq(0, 17, 1)) + ylab('Counts') + xlab('Year') + ggtitle('Stranded loggerhead turtles') + theme(axis.text.x = element_text(angle = 90, size = 15, vjust = 0.5)) dat1 <- subset(dat0, Alive_Released == 'FALSE' & !is.na(Latitude)) dat1$yr.fac <- as.factor(dat1$Year_Initially_Observed) dat1.size <- dat1[, c('State', 'yr.fac', 'Species_Code', 'Latitude', 'Longitude', 'Weight', 'Curved_Carapace_Length', 'Straight_Carapace_Length')] colnames(dat1.size) <- c('State', 'Year', 'Species_Code', 'Latitude', 'Longitude', 'Weight', 'Curved_Carapace_Length', 'Straight_Carapace_Length') # if (internet){ # West.coast <- get_map(location = c(lon = -138.0, # lat = 43.0), # zoom = 4, # maptype = "satellite", # color = 'bw', # source = 'google') # saveRDS(West.coast, # file = 'RData/CC_stranding_westcoast.rds') # # So.Cal <- get_map(location = c(lon = -119.0, # lat = 33), # zoom = 7, # maptype = "satellite", # color = 'bw', # source = 'google') # saveRDS(So.Cal, # file = 'RData/CC_stranding_SoCal.rds') # } else { # West.coast <- readRDS(file = 'RData/CC_stranding_westcoast.rds') # SoCal <- readRDS(file = 'RData/CC_stranding_SoCal.rds') # print('read from rds files') # } # map.west.coast <- ggmap(West.coast) # map.So.Cal <- ggmap(So.Cal) # # p2 <-map.west.coast + # geom_point(data = dat1.size, # aes(x = Longitude, y = Latitude, # color = Year), # size = 4) + # scale_color_viridis(discrete = TRUE, # begin = 0.5, end = 1.0) + # xlab("Longitude") + # ylab("Latitude") + # theme(plot.title = element_text(hjust = 0.5), # legend.title = element_text(size = 10, hjust = 0.5), # legend.text = element_text(size = 8, vjust = 0), # legend.position = c(0.15, 0.4)) # # dat.locs.So.Cal <- subset(dat1.size, # Latitude < 34.45 & Longitude > -122) # p3 <- map.So.Cal + # geom_point(data = dat.locs.So.Cal, # aes(x = Longitude, # y = Latitude, # color = Year), # size = 3) + # scale_color_viridis(discrete = TRUE, # begin = 0.5, # end = 1.0) + # xlab("Longitude") + # ylab("Latitude") + # #ggtitle("Loggerhead turtles") + # theme(plot.title = element_text(hjust = 0.5), # legend.title = element_text(size = 10, # hjust = 0.5), # legend.text = element_text(size = 8, # vjust = 0), # legend.position = c(0.90, 0.6)) # # dat.size <- na.omit(data.frame(Year = dat1.size$Year, # CCL = dat1.size$Curved_Carapace_Length, # state = dat1.size$State)) # p4 <- ggplot() + # geom_histogram(data = dat.size, # aes(x = CCL), # binwidth = 5, # color = 'black', # fill = 'white') + # xlab(expression(CCL[cm])) + # ylab('Frequency') + # ggtitle('USA') + # xlim(10, 100) + # # scale_x_discrete() # theme(axis.title.y = element_text(size = 12), # axis.text.y = element_text(size = 12)) # # if (save.fig){ # ggsave(filename = paste0('images/Cc_strandings_', # Sys.Date(), '.png'), # plot = p1, # width = 8, # height = 7, # dpi = 1200) # # ggsave(filename = paste0('images/Cc_strandings_westcoast_', # Sys.Date(), '.png'), # plot = p2, # width = 9.4, # height = 8.4, # dpi = 1200) # # ggsave(filename = paste0('images/Cc_strandings_SCB_', # Sys.Date(), '.png'), # plot = p3, # width = 9.4, # height = 8.4, # dpi = 1200) # # }
# -------- # Коссова Е.В., Потанин Б.С. # Микроэконометрика качественных данных # Тема 2. Альтернативные спецификации распределений # в моделях бинарного выбора # -------- # Отключим scientific notation options(scipen = 999) #--------------------------------------------------- # Симуляция данных #--------------------------------------------------- # Воспроизведем процесс генерации данных, предполагаемый # классическими моделями бинарного выбора с линейным # индексом. # Для удобства представим, что мы симулируем процесс, # определяющий дефолт по кредиту. # Симулируем данные set.seed(123) # для воспроизводимости n <- 10000 # число индивидов в выборке h <- data.frame(income = exp(rnorm(n, 10, 0.7))) # доход h$age = round(runif(n, 20, 100)) # возраст educ = t(rmultinom(n, 1, c(0.5, 0.3, 0.2))) # уровень образования h$educ_1 = as.numeric(educ[, 1] == 1) # среднее образование h$educ_2 = as.numeric(educ[, 2] == 1) # среднее специальное образование h$educ_3 <- as.numeric(educ[, 3] == 1) # высшее образование h$credit <- runif(n, 10000, 1000000) # объем кредита h$stable <- rbinom(n, 1, 0.6) # стабильная работа # Симулируем случайную ошибку # несколькими различными способами # Случай №1 eps <- rnorm(n) # случайная ошибка из # стандартного нормального # распределения plot(density(eps)) # Случай №2 ber <- rbinom(n, size = 1, prob = 0.7) # случайная ошибка xi1 <- rt(n, 5) # из смеси распределений xi2 <- rt(n, 5) # Стьюдента eps2 <- ber * (xi1 - 3) + (1 - ber) * (xi2 + 3) plot(density(eps2)) # Случай №3 tau <- c(0.01, 0.05) eps3_var <- exp(tau[1] * h$age + # дисперсия случайной ошибки tau[2] * h$stable) ^ 2 # зависит от некоторых регрессоров eps3 <- rnorm(n, 0, sqrt(eps3_var)) # случайная ошибка с гетероскедастичной # случайно ошибки plot(density(eps3)) # Создадим линейный индекс beta <- c(8, -0.7, -0.02, # оцениваемые регрессионные 0.0001, -0.1, -0.3, # коэффициенты -0.5, 0.001, -0.1) default_li <- beta[1] + beta[2] * log(h$income) + # линейный индекс, beta[3] * h$age + # отражающий вклад наблюдаемых beta[4] * h$age ^ 2 + # факторов в вероятность дефолта beta[5] * h$educ_1 + beta[6] * h$educ_2 + beta[7] * h$educ_3 + beta[8] * sqrt(h$credit) + beta[9] * h$stable * log(h$income) default_star <- default_li + eps # латентная переменная, # отражающая склонность # к дефолту default_star2 <- default_li + eps2 # латентная переменная с # с не нормальной случайно ошибкой default_star3 <- default_li + eps3 # латентная переменная с # гетероскедастичной случайной ошибкой # Создадим наблюдаемую зависимую переменную, # отражающую факт дефолта h$default <- as.numeric(default_star >= 0) # наблюдаемое значение переменной mean(h$default) # доля дефолтов h$default2 <- as.numeric(default_star2 >= 0) mean(h$default2) h$default3 <- as.numeric(default_star3 >= 0) mean(h$default3) # Итоговые данные head(h, 10) # -------------------------------------------- # Описание переменных: # income - доход # age - возраст # educ_1 - среднее образование # educ_2 - среднее специальное образование # educ_3 - высшее образование # credit - объем кредита # default - факт дефолта # default2 - факт дефолта при не нормальных # случайных ошибках # default3 - факт дефолта при гетероскедастичных # случайных ошибках # stable - стабильная работа # -------------------------------------------- #--------------------------------------------------- # Часть 1. Учет гетероскедастичности в пробит модели #--------------------------------------------------- # ----- # Учимся: # 1. Оценивать пробит модель с гетероскедастичной # случайно ошибкой # 2. Считать для данной модели вероятности # и предельные эффекты # 3. Проверять гипотезу о гомоскедастичности # ----- library("glmx") # пакет, позволяющий оценивать пробит # модель с гетероскдестичной # случайной ошибкой library("lmtest") # дополнительные тесты library("numDeriv") # численное дифференцирование library("margins") # расчет предельных эффектов library("hpa") # распределение Галланта и Нички # Оценим пробит модель model_probit <- glm(formula = default3 ~ log(income) + # указываем формулу без константы, поскольку age + educ_3, # она учитывается автоматически data = h, # датафрейм, из которого берутся # зависимая и независимые переменные family = binomial(link = "probit")) # тип оцениваемой бинарной регрессии: в # данном случае пробит model_hetprobit <- hetglm(formula = default3 ~ log(income) + # линейный индекс age + educ_3 | # основного уравнения age + stable, # линейный индекс # уравнения дисперсии data = h, family = binomial(link = "probit")) summary(model_hetprobit) # В функции hetglm() link.scale указывает, # в каком виде представлена ошибка # Имеются следующие варианты: # 1. identity --- sigma_i = w_i * tau # 2. log --- log(sigma_i) = w_i * tau => sigma_i = exp(w_i * tau_i) # 3. sqrt --- sqrt(sigma_i) = w_i * tau => sigma_i = (w_i * tau_i) ^ 2 # Достанем полученные оценки beta_est <- model_hetprobit$coefficients$mean # оценки коэффициентов при переменных # основного уравнения tau_est <- model_hetprobit$coefficients$scale # оценки коэффициентов при переменных # в уравнении дисперсии # Достанем оценки стандартных отклонений # случайных ошибок sigma_est <- predict(model_hetprobit, type = "scale") head(sigma_est, 10) # Осуществим тест на гомоскедастичность: # H0: tau = 0 lrtest(model_hetprobit, model_probit) # Предскажем prob_est <- predict(model_hetprobit, type = "response") # вероятности head(prob_est, 10) y_li_est <- predict(model_hetprobit, type = "link") # линейный индекс head(y_li_est, 10) # Рассчитаем предельный эффект # для индивида Boris <- data.frame(income = 55000, # укажем характеристики age = 35, # Бориса в датафрейме educ_3 = 1, stable = 1) # Предварительные расчеты prob_Boris <- predict(model_hetprobit, newdata = Boris, # оценка вероятности type = "response") # дефолта Бориса li_Boris_adj <- predict(model_hetprobit, newdata = Boris, # оценка отношения линейного type = "link") # индекса Бориса к стнадртному # отклонению случайно ошибки sigma_Boris <- predict(model_hetprobit, newdata = Boris, # оценка стандартного type = "scale") # отклонения случайной # ошибки Бориса li_Boris <- li_Boris_adj * sigma_Boris # оценка линейного # индекса Бориса # Используем встроенную функцию ME_Boris <- margins(model_hetprobit, data = Boris) summary(ME_Boris) # Считаем предельный эффект аналитически ME_age_1 <- dnorm(li_Boris, sd = sigma_Boris) * (beta_est["age"] - li_Boris * tau_est["age"]) # Считаем предельный эффект с помощью # численного дифференцирования delta <- 1e-6 # приращение Boris_delta <- Boris Boris_delta$age <- Boris$age + delta # приращение по возрасту prob_Boris_delta <- predict(model_hetprobit, newdata = Boris_delta, type = "response") ME_age_2 <- (prob_Boris_delta - prob_Boris) / delta # ЗАДАНИЯ (* - средне, ** - сложно, *** - очень сложно) # 1.1. Используя встроенные данные Mroz87 из библиотеки # sampleSelection и пробит модель с гетероскедастичной # случайно ошибкой определите, как на вероятность # занятости (lfp) влияют возраст (age), образование (educ), # факт проживания в городе (city) и число несовершеннолетних # детей (kids5 и kids618). При этом предполагается, что # гетероскедастичность может быть обусловлена возрастом # и уровнем образования. Далее, для 28-летнего индивида # без высшего образования и с доходом 20000 оцените: # 1) вероятность занятости # 2) предельный эффект возраста на вероятность занятости # 3) предельный эффект проживания в городе на вероятность занятости # 4*) предельный эффект возраста на вероятность занятости, если # возраст входит в линейный индекс квадратично # 5) повторите предыдущие пункты, используя различные подходы # к определению формы уравнения дисперсии: см. аргумент link.scale # 6**) стандартную ошибку оценки вероятности занятости #--------------------------------------------------- # Дополнительные материалы #--------------------------------------------------- # Проверим гипотезу о гомоскедастичности # при помощи LM теста HetprobitLnL <- function(x, # коэффициенты y, # зависима переменна X, # регрессоры основного уравнения W, # регрессоры уравнения дисперсии scale_fn = exp, # функция уравнения дисперсии is_aggregate = TRUE) # возвращаем функцию правдоподобия (TRUE) # или отдельные вклады (FALSE) { m_X <- ncol(X) m_W <- ncol(W) beta <- matrix(x[1:m_X], ncol = 1) # вектор beta коэффициентов и tau <- matrix(x[(m_X + 1):(m_X + m_W)], ncol = 1) # вектор дополнительных параметров # переводим в матрицу с одним столбцом y_li_mean <- X %*% beta # оценка линейного индекса y_li_scale <- W %*% tau # латентной переменной y_li_scale_fn <- scale_fn(y_li_scale) n_obs <- nrow(X) # количество наблюдений L_vec <- matrix(NA, nrow = n_obs, # вектор столбец вкладов наблюдений ncol = 1) # в функцию правдоподобия is_y_0 <- (y == 0) # вектор условий y = 0 is_y_1 <- (y == 1) # вектор условий y = 1 L_vec[is_y_1] <- pnorm(y_li_mean[is_y_1], sd = y_li_scale_fn[is_y_1]) # вклад наблюдений для которых yi = 1 L_vec[is_y_0] <- 1 - pnorm(y_li_mean[is_y_0], sd = y_li_scale_fn[is_y_0]) # вклад наблюдений для которых yi = 0 lnL_vec <- log(L_vec) # логарифмы вкладов if(!is_aggregate) # возвращаем вклады { # при необходимости return(lnL_vec) } lnL <- sum(lnL_vec) # логарифм функции правдоподобия return(lnL) } # Достанем данные df_hetprobit <- model.frame(model_hetprobit) # все регрессоры X_mat <- cbind(1, as.matrix(df_hetprobit[ # регрессоры основного names(df_hetprobit) %in% names(beta_est)])) # уравнения W_mat <- as.matrix(df_hetprobit[ # регрессоры уравнения names(df_hetprobit) %in% names(tau_est)]) # дисперсии # Достанем оценки ограниченной модели x_est_R <- c(model_probit$coefficients, rep(0, ncol(W_mat))) n_R <- length(x_est_R) # добавим имена names(x_est_R)[(n_R - 1):n_R] <- paste(colnames(W_mat), # для красоты "sigma") print(x_est_R) # Рассчитаем правдоподобие полной модели в точке, # определяемой оценками, полученными по ограниченной # модели lnL_R <- HetprobitLnL(x_est_R, df_hetprobit[, 1], X_mat, W_mat, exp) lnL_R_grad <- grad(func = HetprobitLnL, # считаем градиент данной функции x = x_est_R, # численным методом y = df_hetprobit[, 1], X = X_mat, W = W_mat, scale_fn = exp) # замените exp на function(x) # { # return(abs(x + 1)}) # } # и убедитесь, что результат не изменится lnL_R_grad <- matrix(lnL_R_grad, ncol = 1) # градиент как матрица с одним столбцом lnL_R_Jac <- jacobian(func = HetprobitLnL, # оцениваем асимптотическую ковариационную x = x_est_R, # матрицу при помощи Якобиана, расcчитанного y = df_hetprobit[, 1], # численным методом, поскольку численно X = X_mat, W = W_mat, # рассчитать Гессиан достаточно точным scale_fn = exp, # образом не получается is_aggregate = FALSE, method.args = list(r = 8)) as_cov_est <- solve(t(lnL_R_Jac) %*% lnL_R_Jac) # cчитаем оценку асимптотической ковариационной # матрицы с помощью Якобианов, поскольку # численным методом Гессиан считается # очень плохо # Реализуем тест LM_value <- t(lnL_R_grad) %*% # считаем статистику теста as_cov_est %*% # множителей Лагранжа lnL_R_grad p_value <- 1 - pchisq(LM_value, df = 2) # рассчитываем p-value теста #--------------------------------------------------- # Часть 2. Тестирование гипотезы о нормальном # распределении случайных ошибок #--------------------------------------------------- # ----- # Учимся: # 1. Проверять гипотезу о нормальном распределении # случайно ошибки # 2. Применять гибкие распределения случайных ошибок # ----- # Оценим пробит модель model_probit <- glm(formula = default2 ~ log(income) + age + I(age ^ 2) + educ_3 + educ_2 + stable + I(stable * log(income)) + sqrt(credit), data = h, family = binomial(link = "probit")) summary(model_probit) # Запишем функцию правдоподобия # для модели со случайно ошибкой # из распределения Пирсона ProbitLnLExtended <- function(x, # вектор значений параметров y, # зависимая переменная X, # матрица независимых переменных is_aggregate = TRUE) # при TRUE возвращаем логарифм # функции правдоподобия, а при # FALSE возвращаем вектор вкладов { beta <- matrix(x[-c(1, 2)], ncol = 1) # вектор beta коэффициентов и t <- matrix(x[c(1, 2)], ncol = 1) # вектор дополнительных параметров # переводим в матрицу с одним столбцом y_li <- X %*% beta # оценка линейного индекса y_est <- y_li + t[1] * y_li ^ 2 + # оценка математического ожидания t[2] * y_li ^ 3 # латентной переменной n_obs <- nrow(X) # количество наблюдений L_vec <- matrix(NA, nrow = n_obs, # вектор столбец вкладов наблюдений ncol = 1) # в функцию правдоподобия is_y_0 <- (y == 0) # вектор условий (y = 0) is_y_1 <- (y == 1) # вектор условий (y = 1) L_vec[is_y_1] <- pnorm(y_est[is_y_1]) # вклад наблюдений для которых yi = 1 L_vec[is_y_0] <- 1 - pnorm(y_est[is_y_0]) # вклад наблюдений для которых yi = 0 lnL_vec <- log(L_vec) # логарифмы вкладов if(!is_aggregate) # возвращаем вклады { # при необходимости return(lnL_vec) } lnL <- sum(lnL_vec) # логарифм функции правдоподобия return(lnL) } # Воспользуемся созданной функцией # Оценки модели при справедливом ограничении, # накладываемом нулевой гипотезой beta_est <- coef(model_probit) # достаем оценки из обычной пробит beta_R <- c(0, 0, beta_est) # модели и приравниваем значения names(beta_R)[c(1, 2)] <- c("t1", "t2") # дополнительных параметров к значениям, # предполагаемым нулевой гипотезой # Создадим матрицу регрессоров X_mat <- as.matrix(model.frame(model_probit)) # достаем датафрейм с регрессорами и X_mat[, 1] <- 1 # первращаем его в матрицу, а также colnames(X_mat)[1] <- "Intercept" # заменяем зависимую переменную на константу head(X_mat, 5) # Применим функцию lnL_R <- ProbitLnLExtended(beta_R, h$default2, X_mat) # считаем логарифм функции правоподобия # при ограничениях, совпадающую с логарифмом # функции правдоподобия обычной пробит модели lnL_R_grad <- grad(func = ProbitLnLExtended, # считаем градиент данной функции x = beta_R, # численным методом y = h$default2, X = X_mat) lnL_R_grad <- matrix(lnL_R_grad, ncol = 1) # градиент как матрица с одним столбцом lnL_R_Jac <- jacobian(func = ProbitLnLExtended, # считаем Якобин данной функции x = beta_R, # численным методом y = h$default2, X = X_mat, is_aggregate = FALSE) as_cov_est <- solve(t(lnL_R_Jac) %*% lnL_R_Jac) # cчитаем оценку асимптотической ковариационной # матрицы с помощью Якобианов, поскольку # численным методом Гессиан считается # в данном случае очень плохо # Реализуем тест LM_value_1 <- t(lnL_R_grad) %*% # считаем статистику теста as_cov_est %*% # множителей Лагранжа lnL_R_grad p_value_1 <- 1 - pchisq(LM_value_1, df = 2) # рассчитываем p-value теста # множителей Лагранжа # С использованием регрессии на единицы # Достанем датафрейм, содержащий # переменные модели d <- model.frame(model_probit) # все переменные # Рассчитаем предварительные величины y_li_est <- predict(model_probit) F_est <- pnorm(y_li_est) f_est <- dnorm(y_li_est) # Вычислим обобщенные остатки gr <- ((d[, 1] - F_est) / # обобщенный остаток (F_est * (1 - F_est))) * f_est # Считаем производные по коэффициентам d_beta <- apply(X_mat, 2, function(x) # производные по { # регресионным коэффициентам x * gr }) d_t1 <- (gr * y_li_est ^ 2) # производная по t1 d_t2 <- (gr * y_li_est ^ 3) # производная по t2 # Сравним аналитические и численные производные grad_df <- data.frame("Numeric" = lnL_R_grad, "Analytical" = colSums(cbind(d_t1, d_t2, d_beta))) rownames(grad_df) <- c("t1", "t2", colnames(X_mat)) print(grad_df) # Проводим LM тест n <- nrow(d) # число наблюдений LM_df <- data.frame("my_ones" = rep(1, n), # вектор из единиц "d_" = d_beta, d_t1, d_t2) head(LM_df, 5) ones_regression <- summary(lm(my_ones~. + 0, # регрессия на вектор единиц без константы data = LM_df)) R2 <- ones_regression$r.squared # коэффициент детерминации регрессии LM_value_2 <- R2 * n # LM статистика p_value_2 <- 1 - pchisq(q = LM_value_2, df = 2) # Сравним полученные результаты и убедимся, # что они полностью совпадают c(LM_value_1, LM_value_2) # сравниваем статистики c(p_value_1, p_value_2) # сравниваем p-value # Чтобы учесть возможность отклонения # распределения от нормального можно # воспользоваться моделью Галланта и Нички model_hpa <- hpaBinary(formula = default2 ~ I(-log(income)) + age + I(age ^ 2) + educ_3 + educ_2 + stable + I(stable * log(income)) + sqrt(credit), data = h, K = 3) # K - число параметров распределения Галланта и Нички # При первом из регрессоров коэффициент фиксируется на # единице, поэтому нужно использовать регрессорв, # знак при котором предполагается положительным, либо # сам знак самого регрессора меняется на противоположный summary(model_hpa) plot(model_hpa) # оценка функции плотности # случайных ошибок plot(density(eps2)) # ЗАДАНИЯ (* - средне, ** - сложно, *** - очень сложно) # 2.1. Осуществите тест на проверку соблюдения допущения # о нормальном распределении случайной ошибки # рассматривая в качестве регрессоров только # логарифм дохода и возраст используя: # 1) первый способ (функцию правдоподобия) # 2) второй способ (регрессия на вектор единиц) # 2.2. Используя встроенные данные Mroz87 из библиотеки # sampleSelection определите, как на вероятность # занятости (lfp) влияют возраст (age), образование (educ), # факт проживания в городе (city) и число несовершеннолетних # детей (kids5 и kids618). Проверьте гипотезу о # нормальном распределении используя: # 1) первый способ (функцию правдоподобия) # 2) второй способ (регрессия на вектор единиц)
/2. Тестирование гипотез о спецификации в моделях бинарного выбора.r
no_license
bogdanpotanin/Microeconometrics2
R
false
false
33,761
r
# -------- # Коссова Е.В., Потанин Б.С. # Микроэконометрика качественных данных # Тема 2. Альтернативные спецификации распределений # в моделях бинарного выбора # -------- # Отключим scientific notation options(scipen = 999) #--------------------------------------------------- # Симуляция данных #--------------------------------------------------- # Воспроизведем процесс генерации данных, предполагаемый # классическими моделями бинарного выбора с линейным # индексом. # Для удобства представим, что мы симулируем процесс, # определяющий дефолт по кредиту. # Симулируем данные set.seed(123) # для воспроизводимости n <- 10000 # число индивидов в выборке h <- data.frame(income = exp(rnorm(n, 10, 0.7))) # доход h$age = round(runif(n, 20, 100)) # возраст educ = t(rmultinom(n, 1, c(0.5, 0.3, 0.2))) # уровень образования h$educ_1 = as.numeric(educ[, 1] == 1) # среднее образование h$educ_2 = as.numeric(educ[, 2] == 1) # среднее специальное образование h$educ_3 <- as.numeric(educ[, 3] == 1) # высшее образование h$credit <- runif(n, 10000, 1000000) # объем кредита h$stable <- rbinom(n, 1, 0.6) # стабильная работа # Симулируем случайную ошибку # несколькими различными способами # Случай №1 eps <- rnorm(n) # случайная ошибка из # стандартного нормального # распределения plot(density(eps)) # Случай №2 ber <- rbinom(n, size = 1, prob = 0.7) # случайная ошибка xi1 <- rt(n, 5) # из смеси распределений xi2 <- rt(n, 5) # Стьюдента eps2 <- ber * (xi1 - 3) + (1 - ber) * (xi2 + 3) plot(density(eps2)) # Случай №3 tau <- c(0.01, 0.05) eps3_var <- exp(tau[1] * h$age + # дисперсия случайной ошибки tau[2] * h$stable) ^ 2 # зависит от некоторых регрессоров eps3 <- rnorm(n, 0, sqrt(eps3_var)) # случайная ошибка с гетероскедастичной # случайно ошибки plot(density(eps3)) # Создадим линейный индекс beta <- c(8, -0.7, -0.02, # оцениваемые регрессионные 0.0001, -0.1, -0.3, # коэффициенты -0.5, 0.001, -0.1) default_li <- beta[1] + beta[2] * log(h$income) + # линейный индекс, beta[3] * h$age + # отражающий вклад наблюдаемых beta[4] * h$age ^ 2 + # факторов в вероятность дефолта beta[5] * h$educ_1 + beta[6] * h$educ_2 + beta[7] * h$educ_3 + beta[8] * sqrt(h$credit) + beta[9] * h$stable * log(h$income) default_star <- default_li + eps # латентная переменная, # отражающая склонность # к дефолту default_star2 <- default_li + eps2 # латентная переменная с # с не нормальной случайно ошибкой default_star3 <- default_li + eps3 # латентная переменная с # гетероскедастичной случайной ошибкой # Создадим наблюдаемую зависимую переменную, # отражающую факт дефолта h$default <- as.numeric(default_star >= 0) # наблюдаемое значение переменной mean(h$default) # доля дефолтов h$default2 <- as.numeric(default_star2 >= 0) mean(h$default2) h$default3 <- as.numeric(default_star3 >= 0) mean(h$default3) # Итоговые данные head(h, 10) # -------------------------------------------- # Описание переменных: # income - доход # age - возраст # educ_1 - среднее образование # educ_2 - среднее специальное образование # educ_3 - высшее образование # credit - объем кредита # default - факт дефолта # default2 - факт дефолта при не нормальных # случайных ошибках # default3 - факт дефолта при гетероскедастичных # случайных ошибках # stable - стабильная работа # -------------------------------------------- #--------------------------------------------------- # Часть 1. Учет гетероскедастичности в пробит модели #--------------------------------------------------- # ----- # Учимся: # 1. Оценивать пробит модель с гетероскедастичной # случайно ошибкой # 2. Считать для данной модели вероятности # и предельные эффекты # 3. Проверять гипотезу о гомоскедастичности # ----- library("glmx") # пакет, позволяющий оценивать пробит # модель с гетероскдестичной # случайной ошибкой library("lmtest") # дополнительные тесты library("numDeriv") # численное дифференцирование library("margins") # расчет предельных эффектов library("hpa") # распределение Галланта и Нички # Оценим пробит модель model_probit <- glm(formula = default3 ~ log(income) + # указываем формулу без константы, поскольку age + educ_3, # она учитывается автоматически data = h, # датафрейм, из которого берутся # зависимая и независимые переменные family = binomial(link = "probit")) # тип оцениваемой бинарной регрессии: в # данном случае пробит model_hetprobit <- hetglm(formula = default3 ~ log(income) + # линейный индекс age + educ_3 | # основного уравнения age + stable, # линейный индекс # уравнения дисперсии data = h, family = binomial(link = "probit")) summary(model_hetprobit) # В функции hetglm() link.scale указывает, # в каком виде представлена ошибка # Имеются следующие варианты: # 1. identity --- sigma_i = w_i * tau # 2. log --- log(sigma_i) = w_i * tau => sigma_i = exp(w_i * tau_i) # 3. sqrt --- sqrt(sigma_i) = w_i * tau => sigma_i = (w_i * tau_i) ^ 2 # Достанем полученные оценки beta_est <- model_hetprobit$coefficients$mean # оценки коэффициентов при переменных # основного уравнения tau_est <- model_hetprobit$coefficients$scale # оценки коэффициентов при переменных # в уравнении дисперсии # Достанем оценки стандартных отклонений # случайных ошибок sigma_est <- predict(model_hetprobit, type = "scale") head(sigma_est, 10) # Осуществим тест на гомоскедастичность: # H0: tau = 0 lrtest(model_hetprobit, model_probit) # Предскажем prob_est <- predict(model_hetprobit, type = "response") # вероятности head(prob_est, 10) y_li_est <- predict(model_hetprobit, type = "link") # линейный индекс head(y_li_est, 10) # Рассчитаем предельный эффект # для индивида Boris <- data.frame(income = 55000, # укажем характеристики age = 35, # Бориса в датафрейме educ_3 = 1, stable = 1) # Предварительные расчеты prob_Boris <- predict(model_hetprobit, newdata = Boris, # оценка вероятности type = "response") # дефолта Бориса li_Boris_adj <- predict(model_hetprobit, newdata = Boris, # оценка отношения линейного type = "link") # индекса Бориса к стнадртному # отклонению случайно ошибки sigma_Boris <- predict(model_hetprobit, newdata = Boris, # оценка стандартного type = "scale") # отклонения случайной # ошибки Бориса li_Boris <- li_Boris_adj * sigma_Boris # оценка линейного # индекса Бориса # Используем встроенную функцию ME_Boris <- margins(model_hetprobit, data = Boris) summary(ME_Boris) # Считаем предельный эффект аналитически ME_age_1 <- dnorm(li_Boris, sd = sigma_Boris) * (beta_est["age"] - li_Boris * tau_est["age"]) # Считаем предельный эффект с помощью # численного дифференцирования delta <- 1e-6 # приращение Boris_delta <- Boris Boris_delta$age <- Boris$age + delta # приращение по возрасту prob_Boris_delta <- predict(model_hetprobit, newdata = Boris_delta, type = "response") ME_age_2 <- (prob_Boris_delta - prob_Boris) / delta # ЗАДАНИЯ (* - средне, ** - сложно, *** - очень сложно) # 1.1. Используя встроенные данные Mroz87 из библиотеки # sampleSelection и пробит модель с гетероскедастичной # случайно ошибкой определите, как на вероятность # занятости (lfp) влияют возраст (age), образование (educ), # факт проживания в городе (city) и число несовершеннолетних # детей (kids5 и kids618). При этом предполагается, что # гетероскедастичность может быть обусловлена возрастом # и уровнем образования. Далее, для 28-летнего индивида # без высшего образования и с доходом 20000 оцените: # 1) вероятность занятости # 2) предельный эффект возраста на вероятность занятости # 3) предельный эффект проживания в городе на вероятность занятости # 4*) предельный эффект возраста на вероятность занятости, если # возраст входит в линейный индекс квадратично # 5) повторите предыдущие пункты, используя различные подходы # к определению формы уравнения дисперсии: см. аргумент link.scale # 6**) стандартную ошибку оценки вероятности занятости #--------------------------------------------------- # Дополнительные материалы #--------------------------------------------------- # Проверим гипотезу о гомоскедастичности # при помощи LM теста HetprobitLnL <- function(x, # коэффициенты y, # зависима переменна X, # регрессоры основного уравнения W, # регрессоры уравнения дисперсии scale_fn = exp, # функция уравнения дисперсии is_aggregate = TRUE) # возвращаем функцию правдоподобия (TRUE) # или отдельные вклады (FALSE) { m_X <- ncol(X) m_W <- ncol(W) beta <- matrix(x[1:m_X], ncol = 1) # вектор beta коэффициентов и tau <- matrix(x[(m_X + 1):(m_X + m_W)], ncol = 1) # вектор дополнительных параметров # переводим в матрицу с одним столбцом y_li_mean <- X %*% beta # оценка линейного индекса y_li_scale <- W %*% tau # латентной переменной y_li_scale_fn <- scale_fn(y_li_scale) n_obs <- nrow(X) # количество наблюдений L_vec <- matrix(NA, nrow = n_obs, # вектор столбец вкладов наблюдений ncol = 1) # в функцию правдоподобия is_y_0 <- (y == 0) # вектор условий y = 0 is_y_1 <- (y == 1) # вектор условий y = 1 L_vec[is_y_1] <- pnorm(y_li_mean[is_y_1], sd = y_li_scale_fn[is_y_1]) # вклад наблюдений для которых yi = 1 L_vec[is_y_0] <- 1 - pnorm(y_li_mean[is_y_0], sd = y_li_scale_fn[is_y_0]) # вклад наблюдений для которых yi = 0 lnL_vec <- log(L_vec) # логарифмы вкладов if(!is_aggregate) # возвращаем вклады { # при необходимости return(lnL_vec) } lnL <- sum(lnL_vec) # логарифм функции правдоподобия return(lnL) } # Достанем данные df_hetprobit <- model.frame(model_hetprobit) # все регрессоры X_mat <- cbind(1, as.matrix(df_hetprobit[ # регрессоры основного names(df_hetprobit) %in% names(beta_est)])) # уравнения W_mat <- as.matrix(df_hetprobit[ # регрессоры уравнения names(df_hetprobit) %in% names(tau_est)]) # дисперсии # Достанем оценки ограниченной модели x_est_R <- c(model_probit$coefficients, rep(0, ncol(W_mat))) n_R <- length(x_est_R) # добавим имена names(x_est_R)[(n_R - 1):n_R] <- paste(colnames(W_mat), # для красоты "sigma") print(x_est_R) # Рассчитаем правдоподобие полной модели в точке, # определяемой оценками, полученными по ограниченной # модели lnL_R <- HetprobitLnL(x_est_R, df_hetprobit[, 1], X_mat, W_mat, exp) lnL_R_grad <- grad(func = HetprobitLnL, # считаем градиент данной функции x = x_est_R, # численным методом y = df_hetprobit[, 1], X = X_mat, W = W_mat, scale_fn = exp) # замените exp на function(x) # { # return(abs(x + 1)}) # } # и убедитесь, что результат не изменится lnL_R_grad <- matrix(lnL_R_grad, ncol = 1) # градиент как матрица с одним столбцом lnL_R_Jac <- jacobian(func = HetprobitLnL, # оцениваем асимптотическую ковариационную x = x_est_R, # матрицу при помощи Якобиана, расcчитанного y = df_hetprobit[, 1], # численным методом, поскольку численно X = X_mat, W = W_mat, # рассчитать Гессиан достаточно точным scale_fn = exp, # образом не получается is_aggregate = FALSE, method.args = list(r = 8)) as_cov_est <- solve(t(lnL_R_Jac) %*% lnL_R_Jac) # cчитаем оценку асимптотической ковариационной # матрицы с помощью Якобианов, поскольку # численным методом Гессиан считается # очень плохо # Реализуем тест LM_value <- t(lnL_R_grad) %*% # считаем статистику теста as_cov_est %*% # множителей Лагранжа lnL_R_grad p_value <- 1 - pchisq(LM_value, df = 2) # рассчитываем p-value теста #--------------------------------------------------- # Часть 2. Тестирование гипотезы о нормальном # распределении случайных ошибок #--------------------------------------------------- # ----- # Учимся: # 1. Проверять гипотезу о нормальном распределении # случайно ошибки # 2. Применять гибкие распределения случайных ошибок # ----- # Оценим пробит модель model_probit <- glm(formula = default2 ~ log(income) + age + I(age ^ 2) + educ_3 + educ_2 + stable + I(stable * log(income)) + sqrt(credit), data = h, family = binomial(link = "probit")) summary(model_probit) # Запишем функцию правдоподобия # для модели со случайно ошибкой # из распределения Пирсона ProbitLnLExtended <- function(x, # вектор значений параметров y, # зависимая переменная X, # матрица независимых переменных is_aggregate = TRUE) # при TRUE возвращаем логарифм # функции правдоподобия, а при # FALSE возвращаем вектор вкладов { beta <- matrix(x[-c(1, 2)], ncol = 1) # вектор beta коэффициентов и t <- matrix(x[c(1, 2)], ncol = 1) # вектор дополнительных параметров # переводим в матрицу с одним столбцом y_li <- X %*% beta # оценка линейного индекса y_est <- y_li + t[1] * y_li ^ 2 + # оценка математического ожидания t[2] * y_li ^ 3 # латентной переменной n_obs <- nrow(X) # количество наблюдений L_vec <- matrix(NA, nrow = n_obs, # вектор столбец вкладов наблюдений ncol = 1) # в функцию правдоподобия is_y_0 <- (y == 0) # вектор условий (y = 0) is_y_1 <- (y == 1) # вектор условий (y = 1) L_vec[is_y_1] <- pnorm(y_est[is_y_1]) # вклад наблюдений для которых yi = 1 L_vec[is_y_0] <- 1 - pnorm(y_est[is_y_0]) # вклад наблюдений для которых yi = 0 lnL_vec <- log(L_vec) # логарифмы вкладов if(!is_aggregate) # возвращаем вклады { # при необходимости return(lnL_vec) } lnL <- sum(lnL_vec) # логарифм функции правдоподобия return(lnL) } # Воспользуемся созданной функцией # Оценки модели при справедливом ограничении, # накладываемом нулевой гипотезой beta_est <- coef(model_probit) # достаем оценки из обычной пробит beta_R <- c(0, 0, beta_est) # модели и приравниваем значения names(beta_R)[c(1, 2)] <- c("t1", "t2") # дополнительных параметров к значениям, # предполагаемым нулевой гипотезой # Создадим матрицу регрессоров X_mat <- as.matrix(model.frame(model_probit)) # достаем датафрейм с регрессорами и X_mat[, 1] <- 1 # первращаем его в матрицу, а также colnames(X_mat)[1] <- "Intercept" # заменяем зависимую переменную на константу head(X_mat, 5) # Применим функцию lnL_R <- ProbitLnLExtended(beta_R, h$default2, X_mat) # считаем логарифм функции правоподобия # при ограничениях, совпадающую с логарифмом # функции правдоподобия обычной пробит модели lnL_R_grad <- grad(func = ProbitLnLExtended, # считаем градиент данной функции x = beta_R, # численным методом y = h$default2, X = X_mat) lnL_R_grad <- matrix(lnL_R_grad, ncol = 1) # градиент как матрица с одним столбцом lnL_R_Jac <- jacobian(func = ProbitLnLExtended, # считаем Якобин данной функции x = beta_R, # численным методом y = h$default2, X = X_mat, is_aggregate = FALSE) as_cov_est <- solve(t(lnL_R_Jac) %*% lnL_R_Jac) # cчитаем оценку асимптотической ковариационной # матрицы с помощью Якобианов, поскольку # численным методом Гессиан считается # в данном случае очень плохо # Реализуем тест LM_value_1 <- t(lnL_R_grad) %*% # считаем статистику теста as_cov_est %*% # множителей Лагранжа lnL_R_grad p_value_1 <- 1 - pchisq(LM_value_1, df = 2) # рассчитываем p-value теста # множителей Лагранжа # С использованием регрессии на единицы # Достанем датафрейм, содержащий # переменные модели d <- model.frame(model_probit) # все переменные # Рассчитаем предварительные величины y_li_est <- predict(model_probit) F_est <- pnorm(y_li_est) f_est <- dnorm(y_li_est) # Вычислим обобщенные остатки gr <- ((d[, 1] - F_est) / # обобщенный остаток (F_est * (1 - F_est))) * f_est # Считаем производные по коэффициентам d_beta <- apply(X_mat, 2, function(x) # производные по { # регресионным коэффициентам x * gr }) d_t1 <- (gr * y_li_est ^ 2) # производная по t1 d_t2 <- (gr * y_li_est ^ 3) # производная по t2 # Сравним аналитические и численные производные grad_df <- data.frame("Numeric" = lnL_R_grad, "Analytical" = colSums(cbind(d_t1, d_t2, d_beta))) rownames(grad_df) <- c("t1", "t2", colnames(X_mat)) print(grad_df) # Проводим LM тест n <- nrow(d) # число наблюдений LM_df <- data.frame("my_ones" = rep(1, n), # вектор из единиц "d_" = d_beta, d_t1, d_t2) head(LM_df, 5) ones_regression <- summary(lm(my_ones~. + 0, # регрессия на вектор единиц без константы data = LM_df)) R2 <- ones_regression$r.squared # коэффициент детерминации регрессии LM_value_2 <- R2 * n # LM статистика p_value_2 <- 1 - pchisq(q = LM_value_2, df = 2) # Сравним полученные результаты и убедимся, # что они полностью совпадают c(LM_value_1, LM_value_2) # сравниваем статистики c(p_value_1, p_value_2) # сравниваем p-value # Чтобы учесть возможность отклонения # распределения от нормального можно # воспользоваться моделью Галланта и Нички model_hpa <- hpaBinary(formula = default2 ~ I(-log(income)) + age + I(age ^ 2) + educ_3 + educ_2 + stable + I(stable * log(income)) + sqrt(credit), data = h, K = 3) # K - число параметров распределения Галланта и Нички # При первом из регрессоров коэффициент фиксируется на # единице, поэтому нужно использовать регрессорв, # знак при котором предполагается положительным, либо # сам знак самого регрессора меняется на противоположный summary(model_hpa) plot(model_hpa) # оценка функции плотности # случайных ошибок plot(density(eps2)) # ЗАДАНИЯ (* - средне, ** - сложно, *** - очень сложно) # 2.1. Осуществите тест на проверку соблюдения допущения # о нормальном распределении случайной ошибки # рассматривая в качестве регрессоров только # логарифм дохода и возраст используя: # 1) первый способ (функцию правдоподобия) # 2) второй способ (регрессия на вектор единиц) # 2.2. Используя встроенные данные Mroz87 из библиотеки # sampleSelection определите, как на вероятность # занятости (lfp) влияют возраст (age), образование (educ), # факт проживания в городе (city) и число несовершеннолетних # детей (kids5 и kids618). Проверьте гипотезу о # нормальном распределении используя: # 1) первый способ (функцию правдоподобия) # 2) второй способ (регрессия на вектор единиц)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/playmoviespartner_functions.R \docType{package} \name{playmoviespartner_googleAuthR} \alias{playmoviespartner_googleAuthR} \alias{playmoviespartner_googleAuthR-package} \title{Google Play Movies Partner API Gets the delivery status of titles for Google Play Movies Partners.} \description{ Auto-generated code by googleAuthR::gar_create_api_skeleton at 2017-03-05 19:57:46 filename: /Users/mark/dev/R/autoGoogleAPI/googleplaymoviespartnerv1.auto/R/playmoviespartner_functions.R api_json: api_json } \details{ Authentication scopes used are: \itemize{ \item https://www.googleapis.com/auth/playmovies_partner.readonly } }
/googleplaymoviespartnerv1.auto/man/playmoviespartner_googleAuthR.Rd
permissive
GVersteeg/autoGoogleAPI
R
false
true
702
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/playmoviespartner_functions.R \docType{package} \name{playmoviespartner_googleAuthR} \alias{playmoviespartner_googleAuthR} \alias{playmoviespartner_googleAuthR-package} \title{Google Play Movies Partner API Gets the delivery status of titles for Google Play Movies Partners.} \description{ Auto-generated code by googleAuthR::gar_create_api_skeleton at 2017-03-05 19:57:46 filename: /Users/mark/dev/R/autoGoogleAPI/googleplaymoviespartnerv1.auto/R/playmoviespartner_functions.R api_json: api_json } \details{ Authentication scopes used are: \itemize{ \item https://www.googleapis.com/auth/playmovies_partner.readonly } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mcode.R \name{mcode} \alias{mcode} \title{Multivariate recode} \usage{ mcode(..., recodes, .fill = NA, .result, .factors = c("character", "numeric")) } \arguments{ \item{\dots}{One or more vectors of equal length.} \item{recodes}{A \sQuote{car}-like set of recode commands.} \item{.fill}{A single value to use to fill in missing values in the resulting branched variables.} \item{.result}{A character vector specifying the class of the resulting vector.} \item{.factors}{A character string indicating whether to treatment factors in \code{\dots} as character (the default) or numeric.} } \value{ A vector of length equal to the input vector(s) in \code{\dots}. } \description{ Recode one or more vectors to a single vector } \details{ Recoding is a basic step in any analysis. It is fairly easy to recode a single variable (e.g. by replacing values in the vector or using the \code{recode} function in \bold{car} or \code{mapvalues} in \bold{plyr}), but it can be cumbersome to recode multiple variables into a single vector .This is useful when, for example, a factorial experiment has the group for each factor stored as separate variables, but analysis will be performed across the entire design (rather than factor-by-factor), or when it is necessary to create a factor representing multivariate combinations of demographic groups (e.g., an age-sex-race stratification) from a set of separate input vectors representing each demographic variable. That would normally require a series of \code{\link[base]{ifelse}} statements or complex use of boolean arguments. This function aims to make it simple to create a single vector from multiple input vectors in a manner more useful than \code{\link[base]{interaction}}. The syntax borrows from the \code{recode} function in the \bold{car} package. This really only works for categorical variables, but a continuous variable could be collapsed with a standard recode() command before being used with this. } \examples{ # RECODE A SINGLE VARIABLE BASED ON A `car::recode`-STYLE SCHEME r <- mcode(c(1,3,5,4,2), recodes = "5=1;4=2;3=3;2=4;1=5") stopifnot(identical(r, c(5,3,1,2,4))) # WORK WITH MISSING VALUES: mcode(c(1,1,1,1,1,NA), c(1,1,2,2,NA,1), recodes = "c(1,1)=1;c(1,2)=2;c(1,NA)=3") # COMPARE `mcode` TO VARIOUS ALTERNATIVES a <- c(1,2,1,2,1,NA,2,NA) b <- c(1,1,2,2,NA,1,NA,2) # recode using `mcode` m1 <- mcode(a, b, recodes = "c(1,1)=1;c(1,2)=2;c(2,1)=3;c(2,2)=4") # compare to `ifelse`: m2 <- ifelse(a == 1 & b == 1, 1, ifelse(a == 1 & b == 2, 2, ifelse(a == 2 & b == 1, 3, ifelse(a == 2 & b == 2, 4, NA)))) identical(m1, m2) # compare to a sequence of extraction statements m3 <- rep(NA, length(a)) m3[a == 1 & b == 1] <- 1 m3[a == 1 & b == 2] <- 2 m3[a == 2 & b == 1] <- 3 m3[a == 2 & b == 2] <- 4 identical(m1, m3) # compare to interaction m4 <- interaction(a, b) levels(m4) <- c("1.1" = 1, "1.2" = 2, "2.1" = 3, "2.2" = 4)[levels(m4)] m4 <- as.numeric(as.character(m4)) identical(m1, m4) r <- "c(1,1,1,1)=1;c(1,1,1,0)=2;c(1,1,0,1)=3;c(1,0,1,1)=4;c(0,1,1,1)=5" mcode(c(rep(1,9),0), c(rep(0,5),rep(1,5)), c(rep(1,8),0,1), c(rep(1,5),rep(0,2),rep(1,3)), recodes = r) } \seealso{ \code{\link{mergeNA}} }
/man/mcode.Rd
no_license
leeper/mcode
R
false
true
3,345
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mcode.R \name{mcode} \alias{mcode} \title{Multivariate recode} \usage{ mcode(..., recodes, .fill = NA, .result, .factors = c("character", "numeric")) } \arguments{ \item{\dots}{One or more vectors of equal length.} \item{recodes}{A \sQuote{car}-like set of recode commands.} \item{.fill}{A single value to use to fill in missing values in the resulting branched variables.} \item{.result}{A character vector specifying the class of the resulting vector.} \item{.factors}{A character string indicating whether to treatment factors in \code{\dots} as character (the default) or numeric.} } \value{ A vector of length equal to the input vector(s) in \code{\dots}. } \description{ Recode one or more vectors to a single vector } \details{ Recoding is a basic step in any analysis. It is fairly easy to recode a single variable (e.g. by replacing values in the vector or using the \code{recode} function in \bold{car} or \code{mapvalues} in \bold{plyr}), but it can be cumbersome to recode multiple variables into a single vector .This is useful when, for example, a factorial experiment has the group for each factor stored as separate variables, but analysis will be performed across the entire design (rather than factor-by-factor), or when it is necessary to create a factor representing multivariate combinations of demographic groups (e.g., an age-sex-race stratification) from a set of separate input vectors representing each demographic variable. That would normally require a series of \code{\link[base]{ifelse}} statements or complex use of boolean arguments. This function aims to make it simple to create a single vector from multiple input vectors in a manner more useful than \code{\link[base]{interaction}}. The syntax borrows from the \code{recode} function in the \bold{car} package. This really only works for categorical variables, but a continuous variable could be collapsed with a standard recode() command before being used with this. } \examples{ # RECODE A SINGLE VARIABLE BASED ON A `car::recode`-STYLE SCHEME r <- mcode(c(1,3,5,4,2), recodes = "5=1;4=2;3=3;2=4;1=5") stopifnot(identical(r, c(5,3,1,2,4))) # WORK WITH MISSING VALUES: mcode(c(1,1,1,1,1,NA), c(1,1,2,2,NA,1), recodes = "c(1,1)=1;c(1,2)=2;c(1,NA)=3") # COMPARE `mcode` TO VARIOUS ALTERNATIVES a <- c(1,2,1,2,1,NA,2,NA) b <- c(1,1,2,2,NA,1,NA,2) # recode using `mcode` m1 <- mcode(a, b, recodes = "c(1,1)=1;c(1,2)=2;c(2,1)=3;c(2,2)=4") # compare to `ifelse`: m2 <- ifelse(a == 1 & b == 1, 1, ifelse(a == 1 & b == 2, 2, ifelse(a == 2 & b == 1, 3, ifelse(a == 2 & b == 2, 4, NA)))) identical(m1, m2) # compare to a sequence of extraction statements m3 <- rep(NA, length(a)) m3[a == 1 & b == 1] <- 1 m3[a == 1 & b == 2] <- 2 m3[a == 2 & b == 1] <- 3 m3[a == 2 & b == 2] <- 4 identical(m1, m3) # compare to interaction m4 <- interaction(a, b) levels(m4) <- c("1.1" = 1, "1.2" = 2, "2.1" = 3, "2.2" = 4)[levels(m4)] m4 <- as.numeric(as.character(m4)) identical(m1, m4) r <- "c(1,1,1,1)=1;c(1,1,1,0)=2;c(1,1,0,1)=3;c(1,0,1,1)=4;c(0,1,1,1)=5" mcode(c(rep(1,9),0), c(rep(0,5),rep(1,5)), c(rep(1,8),0,1), c(rep(1,5),rep(0,2),rep(1,3)), recodes = r) } \seealso{ \code{\link{mergeNA}} }
## File Name: invariance_alignment_summary_optimization.R ## File Version: 0.07 invariance_alignment_summary_optimization <- function(object, digits) { align.pow <- object$align.pow align.scale <- object$align.scale cat("Optimization Function Value", "=", round(object$fopt[1],digits), "\n" ) cat("Optimizer", "=", object$res_optim$optimizer, "\n" ) cat("Fixed", "=", object$fixed, "\n" ) # cat("Number of iterations", "=", object$res_optim$iter, "\n" ) cat("Converged", "=", object$res_optim$converged, "\n" ) cat("Alignment Power Values","=", round(align.pow[1], digits), round(align.pow[2], digits), "\n") cat("Alignment Scale Values","=", round(align.scale[1], digits), round(align.scale[2], digits), "\n") cat("Epsilon Value", "=", object$eps, "\n" ) }
/R/invariance_alignment_summary_optimization.R
no_license
hyunsooseol/sirt
R
false
false
855
r
## File Name: invariance_alignment_summary_optimization.R ## File Version: 0.07 invariance_alignment_summary_optimization <- function(object, digits) { align.pow <- object$align.pow align.scale <- object$align.scale cat("Optimization Function Value", "=", round(object$fopt[1],digits), "\n" ) cat("Optimizer", "=", object$res_optim$optimizer, "\n" ) cat("Fixed", "=", object$fixed, "\n" ) # cat("Number of iterations", "=", object$res_optim$iter, "\n" ) cat("Converged", "=", object$res_optim$converged, "\n" ) cat("Alignment Power Values","=", round(align.pow[1], digits), round(align.pow[2], digits), "\n") cat("Alignment Scale Values","=", round(align.scale[1], digits), round(align.scale[2], digits), "\n") cat("Epsilon Value", "=", object$eps, "\n" ) }
#'@export get_perm_peaks <- function(file_name, chunk, perms, nchunks, chunksize, margin, smooth_ix_range, peak_pos_range, stat_fun, resid_fun, smooth_func, cores, libs, bandwidth, X, trait, covariates, s0, z0, zmin, pheno_transformation=NULL){ #Check chunk argument stopifnot(class(chunk)=="numeric" | class(chunk)=="integer") #Open pheno file dm <- detect_dm_csv(filename=file_name, header=TRUE, sep=" ") dm$columns$type <- rep(c("integer", "double"), c(1, nrow(dm$columns)-1)) df_laf <- laf_open(dm) #Make sure the trait data is sorted correctly if(!all(dm$columns$name[-1] %in% X[[1]])){ stop("Not all of the samples in ", file_name, " are in the trait file.\n") } X <- X[match(dm$columns$name[-1], X[[1]]),] if(chunk==1) lmargin <- 0 else lmargin <- margin if(chunk==nchunks) rmargin <- 0 else rmargin <- margin goto(df_laf, max(1, (chunk-1)*chunksize-lmargin + 1)) dat <- next_block(df_laf, nrows=chunksize + lmargin + rmargin) close(df_laf) #If necessary, transform phenotype if(!is.null(pheno_transformation)){ cat("Adjusting phenotype.\n") dat[,-1] <- apply(dat[,-1], MARGIN=2, FUN=function(y){ pheno_transformation(y) }) if(any(is.na(dat[,-1])) | any(!is.finite(dat[,-1]))) stop("ERROR: Phenotype adjustment gives infinite or missing value.\n") } if(length(covariates) > 0){ cat("Regressing covariates on phenotype.\n") dat[,-1] <- apply(dat[,-1], MARGIN=1, FUN=function(y){ resid_fun(y, X, covariates) }) } pos_out <- dat$pos[smooth_ix_range[1]:smooth_ix_range[2]] cat("Calculating test statistics for ", ncol(perms), " permutations.\n") perm_traits <- apply(perms, 2, function(ix){X[[trait]][ix]}) perm_stats <- stats_many(Y=dat[,-1], X = perm_traits, s0, cores, stat_fun, libs) perm_stats_sm <- apply(perm_stats, 2, function(sts){ smooth_func(x=dat$pos, y=sts, xout=pos_out, bandwidth=bandwidth) }) if(!sum(is.na(perm_stats_sm))==0){ cols <- unique(which(is.na(perm_stats_sm), arr.ind = TRUE)[,2]) cat("NAs found: ", chunk, cols, "\n") } cat("Calculating variance of smoothed permutation statistics.\n") perm_var <- apply(perm_stats_sm[pos_out >= peak_pos_range[1] & pos_out <= peak_pos_range[2], ], 1, var, na.rm=TRUE) cat("Finding peaks in smoothed permutation statistics.\n") perm_peaks <- apply(perm_stats_sm, 2, function(sm_sts){ mxlist(sm_sts, z0, zmin, pos=pos_out) }) perm_peaks <- as.data.frame(do.call(rbind, perm_peaks)) names(perm_peaks) <- c("mx", "pos", "start", "stop", "ix1", "ix2") close(df_laf) return(list("perm_var"=perm_var, "perm_peaks"=perm_peaks)) }
/R/get_perm_peaks.R
no_license
jean997/fret
R
false
false
2,744
r
#'@export get_perm_peaks <- function(file_name, chunk, perms, nchunks, chunksize, margin, smooth_ix_range, peak_pos_range, stat_fun, resid_fun, smooth_func, cores, libs, bandwidth, X, trait, covariates, s0, z0, zmin, pheno_transformation=NULL){ #Check chunk argument stopifnot(class(chunk)=="numeric" | class(chunk)=="integer") #Open pheno file dm <- detect_dm_csv(filename=file_name, header=TRUE, sep=" ") dm$columns$type <- rep(c("integer", "double"), c(1, nrow(dm$columns)-1)) df_laf <- laf_open(dm) #Make sure the trait data is sorted correctly if(!all(dm$columns$name[-1] %in% X[[1]])){ stop("Not all of the samples in ", file_name, " are in the trait file.\n") } X <- X[match(dm$columns$name[-1], X[[1]]),] if(chunk==1) lmargin <- 0 else lmargin <- margin if(chunk==nchunks) rmargin <- 0 else rmargin <- margin goto(df_laf, max(1, (chunk-1)*chunksize-lmargin + 1)) dat <- next_block(df_laf, nrows=chunksize + lmargin + rmargin) close(df_laf) #If necessary, transform phenotype if(!is.null(pheno_transformation)){ cat("Adjusting phenotype.\n") dat[,-1] <- apply(dat[,-1], MARGIN=2, FUN=function(y){ pheno_transformation(y) }) if(any(is.na(dat[,-1])) | any(!is.finite(dat[,-1]))) stop("ERROR: Phenotype adjustment gives infinite or missing value.\n") } if(length(covariates) > 0){ cat("Regressing covariates on phenotype.\n") dat[,-1] <- apply(dat[,-1], MARGIN=1, FUN=function(y){ resid_fun(y, X, covariates) }) } pos_out <- dat$pos[smooth_ix_range[1]:smooth_ix_range[2]] cat("Calculating test statistics for ", ncol(perms), " permutations.\n") perm_traits <- apply(perms, 2, function(ix){X[[trait]][ix]}) perm_stats <- stats_many(Y=dat[,-1], X = perm_traits, s0, cores, stat_fun, libs) perm_stats_sm <- apply(perm_stats, 2, function(sts){ smooth_func(x=dat$pos, y=sts, xout=pos_out, bandwidth=bandwidth) }) if(!sum(is.na(perm_stats_sm))==0){ cols <- unique(which(is.na(perm_stats_sm), arr.ind = TRUE)[,2]) cat("NAs found: ", chunk, cols, "\n") } cat("Calculating variance of smoothed permutation statistics.\n") perm_var <- apply(perm_stats_sm[pos_out >= peak_pos_range[1] & pos_out <= peak_pos_range[2], ], 1, var, na.rm=TRUE) cat("Finding peaks in smoothed permutation statistics.\n") perm_peaks <- apply(perm_stats_sm, 2, function(sm_sts){ mxlist(sm_sts, z0, zmin, pos=pos_out) }) perm_peaks <- as.data.frame(do.call(rbind, perm_peaks)) names(perm_peaks) <- c("mx", "pos", "start", "stop", "ix1", "ix2") close(df_laf) return(list("perm_var"=perm_var, "perm_peaks"=perm_peaks)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/heat_tree--legend.R \name{inverse} \alias{inverse} \title{Generate the inverse of a function} \usage{ inverse(f, interval) } \arguments{ \item{f}{(\code{function} with one argument) A function to derive and inverse from} \item{interval}{(\code{character} of length 2) The range of the value the inverse function can return.} } \value{ (\code{function}) Return the inverse of the function given } \description{ http://stackoverflow.com/questions/10081479/solving-for-the-inverse-of-a-function-in-r } \keyword{internal}
/man/inverse.Rd
permissive
seninp/metacoder
R
false
true
598
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/heat_tree--legend.R \name{inverse} \alias{inverse} \title{Generate the inverse of a function} \usage{ inverse(f, interval) } \arguments{ \item{f}{(\code{function} with one argument) A function to derive and inverse from} \item{interval}{(\code{character} of length 2) The range of the value the inverse function can return.} } \value{ (\code{function}) Return the inverse of the function given } \description{ http://stackoverflow.com/questions/10081479/solving-for-the-inverse-of-a-function-in-r } \keyword{internal}
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { ## set the invers Matrix to null inverse <- NULL set <- function(y) { x <<- y ## copy rhe matix to var x inverse <<- NULL ## set the invers Matrix to null } get <- function() x ## return the Matrix setinverse <- function(value) inverse <<- value ##set the inverse matrix value getinverse <- function() inverse ## get the inverse function value list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inverse <- x$getinverse() ## get the current ChacheMatrix invertMatrix if(!is.null(inverse)) { ## if not null return the cached data message("getting cached data") return(inverse) } data <- x$get() ## get the matrix data inverse <- solve(data, ...) #calculate the invers matrix for the data x$setinverse(inverse) # set the inverse matrix to cache inverse }
/cachematrix.R
no_license
GalPeled/ProgrammingAssignment2
R
false
false
1,169
r
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { ## set the invers Matrix to null inverse <- NULL set <- function(y) { x <<- y ## copy rhe matix to var x inverse <<- NULL ## set the invers Matrix to null } get <- function() x ## return the Matrix setinverse <- function(value) inverse <<- value ##set the inverse matrix value getinverse <- function() inverse ## get the inverse function value list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inverse <- x$getinverse() ## get the current ChacheMatrix invertMatrix if(!is.null(inverse)) { ## if not null return the cached data message("getting cached data") return(inverse) } data <- x$get() ## get the matrix data inverse <- solve(data, ...) #calculate the invers matrix for the data x$setinverse(inverse) # set the inverse matrix to cache inverse }
##================================================== # Dimensions for viewer ##================================================== niceDimensions <- function(n, nrow = NULL, ncol = NULL){ sqn <- sqrt(n) chooseDims <- c(!is.null(nrow) & !is.null(ncol), !is.null(nrow) & is.null(ncol), is.null(nrow) & !is.null(ncol), is.null(nrow) & is.null(ncol)) Dims <- switch( which(chooseDims), "1" = c(nrow, ncol), "2" = c(nrow, ceiling(n / nrow)), "3" = c(ceiling(n / ncol), ncol), "4" = c(ceiling(n / ceiling(sqn)), ceiling(sqn)) ) return(Dims) } ##================================================ # Multiplot ##================================================ # ... : ggplots # (plots can be named except with a still-exist-parameter's name) # plotlist : list of ggplots # (plots can be named) # ncol : number of columns # byrow : TRUE / FALSE # if list graphics is sorted by row (TRUE) or by columns (FALSE) # plotsTitle # mainTitles multiplot <- function(..., plotList = NULL, nrow = NULL, ncol = NULL, byrow = TRUE, row.heights = NULL, col.widths = NULL, withPlotsTitle = TRUE, mainTitle = NULL ){ require(grid) ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Make a list from the ... arguments and plotlist ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lplots <- c(list(...), plotList) nb.plots <- length(Lplots) ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # DIMENSION OF THE LAYOUT ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dims <- niceDimensions(nb.plots, nrow, ncol) NR <- Dims[1] NC <- Dims[2] ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # MAKE THE LAYOUT PANEL ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ mat_layout <- matrix( seq(1, NC * NR), nrow = NR, ncol = NC, byrow = byrow ) # Parameters not available yet to be chosen by users # Display graphes if(nb.plots== 1){ print(Lplots[[1]]) }else{ okMainTitle <- !is.null(mainTitle) ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## DIMENSION LAYOUT ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # hauteur de ligne nrowLayout <- 2 * NR - 1 nbHei <- length(row.heights) row.heights <- c( row.heights[1:min(nbHei, NR)], if(nbHei < NR){rep(1, NR - nbHei)} ) layout_rows <- c(3, rep(c(0.3, 3), times = NR-1)) seqrow <- seq(1, nrowLayout, 2) layout_rows[seqrow] <- layout_rows[seqrow] * row.heights if(okMainTitle){ mt <- 1 margeTitle <- 1 * length(gregexpr("\n", mainTitle)[[1]]) layout_rows <- c(margeTitle, layout_rows) nrowLayout <- nrowLayout + 1 }else{ mt <- 0 } ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## largeur de colonne ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ okMainLegend <- FALSE ncolLayout <- NC + ifelse(okMainLegend, 1, 0) nbWid <- length(col.widths) col.widths <- c(col.widths[1:min(nbWid, NC)], if(nbWid < NC){rep(1, NC - nbWid)}) layout_cols <- c(5 * col.widths, if(okMainLegend){ 2 } ) ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Set up the page ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ grid.newpage() pushViewport( viewport( layout = grid.layout( nrow = nrowLayout, ncol = ncolLayout, widths = grid::unit(layout_cols, units = "null"), heights = grid::unit(layout_rows, units = "null") ) )) mainTitleSets <- bquote(bold(.(mainTitle))) ## Title of the Page if(okMainTitle){ grid.text( mainTitleSets, vp = viewport( layout.pos.row = 1, layout.pos.col = 1:ncolLayout ) ) } ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Make each plot, in the correct location # Get the i,j matrix positions of the regions that contain this subplot ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ for(i in 1:nb.plots){ # coordonnees Layout of the subplot matchidx <- as.data.frame( which(mat_layout == i, arr.ind = TRUE) ) iPlot <- Lplots[[i]] if( length(iPlot)!=0 & !(class(iPlot)[1] %in% c("logical", "NULL")) ){ # title of the subplot if(is.na(withPlotsTitle)){ iTitle <- names(Lplots)[i] iPlot <- iPlot + ggtitle(iTitle) }else{ if(withPlotsTitle){ iPlot <- iPlot }else{ iPlot <- iPlot + ggtitle(NULL) } } ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # print the plot ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ iRow <- 2 * matchidx$row - 1 + mt iCol <- matchidx$col print( iPlot, vp = viewport( layout.pos.row = iRow, layout.pos.col = iCol ) ) } # end if ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # the ith graph is printed ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ } # end for } # end if ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # END ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ invisible() }
/code/multiplot.R
no_license
CedricBezy/stat_sante_git
R
false
false
6,064
r
##================================================== # Dimensions for viewer ##================================================== niceDimensions <- function(n, nrow = NULL, ncol = NULL){ sqn <- sqrt(n) chooseDims <- c(!is.null(nrow) & !is.null(ncol), !is.null(nrow) & is.null(ncol), is.null(nrow) & !is.null(ncol), is.null(nrow) & is.null(ncol)) Dims <- switch( which(chooseDims), "1" = c(nrow, ncol), "2" = c(nrow, ceiling(n / nrow)), "3" = c(ceiling(n / ncol), ncol), "4" = c(ceiling(n / ceiling(sqn)), ceiling(sqn)) ) return(Dims) } ##================================================ # Multiplot ##================================================ # ... : ggplots # (plots can be named except with a still-exist-parameter's name) # plotlist : list of ggplots # (plots can be named) # ncol : number of columns # byrow : TRUE / FALSE # if list graphics is sorted by row (TRUE) or by columns (FALSE) # plotsTitle # mainTitles multiplot <- function(..., plotList = NULL, nrow = NULL, ncol = NULL, byrow = TRUE, row.heights = NULL, col.widths = NULL, withPlotsTitle = TRUE, mainTitle = NULL ){ require(grid) ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Make a list from the ... arguments and plotlist ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lplots <- c(list(...), plotList) nb.plots <- length(Lplots) ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # DIMENSION OF THE LAYOUT ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dims <- niceDimensions(nb.plots, nrow, ncol) NR <- Dims[1] NC <- Dims[2] ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # MAKE THE LAYOUT PANEL ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ mat_layout <- matrix( seq(1, NC * NR), nrow = NR, ncol = NC, byrow = byrow ) # Parameters not available yet to be chosen by users # Display graphes if(nb.plots== 1){ print(Lplots[[1]]) }else{ okMainTitle <- !is.null(mainTitle) ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## DIMENSION LAYOUT ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # hauteur de ligne nrowLayout <- 2 * NR - 1 nbHei <- length(row.heights) row.heights <- c( row.heights[1:min(nbHei, NR)], if(nbHei < NR){rep(1, NR - nbHei)} ) layout_rows <- c(3, rep(c(0.3, 3), times = NR-1)) seqrow <- seq(1, nrowLayout, 2) layout_rows[seqrow] <- layout_rows[seqrow] * row.heights if(okMainTitle){ mt <- 1 margeTitle <- 1 * length(gregexpr("\n", mainTitle)[[1]]) layout_rows <- c(margeTitle, layout_rows) nrowLayout <- nrowLayout + 1 }else{ mt <- 0 } ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## largeur de colonne ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ okMainLegend <- FALSE ncolLayout <- NC + ifelse(okMainLegend, 1, 0) nbWid <- length(col.widths) col.widths <- c(col.widths[1:min(nbWid, NC)], if(nbWid < NC){rep(1, NC - nbWid)}) layout_cols <- c(5 * col.widths, if(okMainLegend){ 2 } ) ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Set up the page ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ grid.newpage() pushViewport( viewport( layout = grid.layout( nrow = nrowLayout, ncol = ncolLayout, widths = grid::unit(layout_cols, units = "null"), heights = grid::unit(layout_rows, units = "null") ) )) mainTitleSets <- bquote(bold(.(mainTitle))) ## Title of the Page if(okMainTitle){ grid.text( mainTitleSets, vp = viewport( layout.pos.row = 1, layout.pos.col = 1:ncolLayout ) ) } ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Make each plot, in the correct location # Get the i,j matrix positions of the regions that contain this subplot ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ for(i in 1:nb.plots){ # coordonnees Layout of the subplot matchidx <- as.data.frame( which(mat_layout == i, arr.ind = TRUE) ) iPlot <- Lplots[[i]] if( length(iPlot)!=0 & !(class(iPlot)[1] %in% c("logical", "NULL")) ){ # title of the subplot if(is.na(withPlotsTitle)){ iTitle <- names(Lplots)[i] iPlot <- iPlot + ggtitle(iTitle) }else{ if(withPlotsTitle){ iPlot <- iPlot }else{ iPlot <- iPlot + ggtitle(NULL) } } ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # print the plot ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ iRow <- 2 * matchidx$row - 1 + mt iCol <- matchidx$col print( iPlot, vp = viewport( layout.pos.row = iRow, layout.pos.col = iCol ) ) } # end if ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # the ith graph is printed ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ } # end for } # end if ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # END ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ invisible() }
### 2a AP 2021.1 ### Técnica de Análise de Dados de Transportes ### Prof. Dr. Manoel Mendonça de Castro Neto ### Aluno: Nelson de O. Quesado Filho ### Matrícula: 504117 # Bibliotecas utilizadas library(tidyverse) library(wesanderson) library(lmtest) #1a Questão: # (...) avaliar se o agregado (brita) da 1AP está com rugosidade média menor que # 440, pois isso implicaria na necessidade de buscar outra pedreira. A amostra # ao lado, posta em ordem crescente, é a mesma da 1AP. Responda os itens abaixo: # a) (2,0) Para atingir o objetivo da análise, realize um teste de hipóteses, # explicitando as hipóteses nula e alternativa, o valor do nível de significância, # o valor-p, o resultado do teste (se rejeita ou não H0) e a conclusão da análise. # Indique as premissas da sua análise. # RESPOSTA a) # Antes de se avançar no teste de hipótese, realiza-se uma análise descritiva # da amostra. rugosidade <- c(57.723 ,76.145 ,101.936 ,110.534 ,131.412 ,147.378 ,159.66 ,162.116 ,168.257 ,169.485 ,182.994 ,195.276 ,197.732 ,198.96 ,205.101 ,205.101 ,219.839 ,222.295 ,240.717 ,241.946 ,260.368 ,267.737 ,278.79 ,283.703 ,283.703 ,292.3 ,299.669 ,310.722 ,310.722 ,325.46 ,336.513 ,342.654 ,354.935 ,359.848 ,375.814 ,393.008 ,401.605 ,426.168 ,434.765 ,438.45 ,458.1 ,459.328 ,463.013 ,475.294 ,478.978 ,498.629 ,507.226 ,515.823 ,533.017 ,546.527 ,550.211 ,555.124 ,555.124 ,573.546 ,587.056 ,591.968 ,601.794 ,605.478 ,623.9 ,648.463 ,663.201 ,688.992 ,713.555 ,766.366 ,794.613 ,800.754 ,837.598 ,918.656 ,937.078 ,945.675 ,949.36) data.frame(rugosidade) %>% summarise(avg = mean(rugosidade), s = sd(rugosidade), coef_var = avg/s, var = s^2, n = n()) # A amostra apresenta rugosidade média de 422,76, ou seja, abaixo do valor # desejado para a aplicação. Sabe-se que esta estatística - a média - é # insuficiente para a análise desejaada. # Ainda, verifica-se um desvio padrão de 230,74, o que representa um coeficiente # de variação de 1,83. Interpreta-se este valor como alto, e que provavelmente # contribuirá para um intervalo de confiança demasiadamente largo. # A amostra também apresenta uma quantidade de observações razoável (71) para # o tipo de análise pretendida. data.frame(rugosidade) %>% ggplot() + geom_point(aes(x = c(1:71), y = sort(rugosidade), color = "A"), alpha = .8) + geom_boxplot(aes(x = c(1:71), y = rugosidade, color = "B"), alpha = 0) + scale_color_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("Rugosidade"), guide = "legend") + labs(title = "Dispersão e Diagrama de Caixa", subtitle = paste("n =", length(rugosidade)), x = "Observação", y = "Rugosidade") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) data.frame(rugosidade) %>% ggplot() + geom_histogram(aes(x = rugosidade, y = ..density.., fill = "A"), color = "white", alpha = .8, bins = 9) + geom_density(aes(x = rugosidade, fill = "B"), alpha = .7) + scale_fill_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("Rugosidade"), guide = "legend") + labs(title = "Histograma de Frequência e Densidade de Probabilidade", subtitle = paste("n =", length(rugosidade)), x = "Rugosidade", y = "Probabilidade") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) # Os gráficas mostram uma certa assimetria à direita. Apesar desta característica, # decide-se verificar visualmente a proximidade da amostra em comparação à # distribuição normal data.frame(rugosidade) %>% ggplot() + geom_density(aes(x = rugosidade, fill = "A"), alpha = .5) + geom_density(aes(x = qnorm(seq(0.003, 0.997, length = length(rugosidade)), mean = mean(rugosidade), sd = sd(rugosidade)), fill = "B"), alpha = .5) + scale_fill_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "Curva", breaks = c("A", "B"), label = c("Amostra", "Normal"), guide = "legend") + labs(title = "Densidade de Probabilidade Amostral vs. Normal", subtitle = paste("n =", length(rugosidade)), x = "Rugosidade", y = "Probabilidade") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) data.frame(rugosidade) %>% ggplot() + stat_ecdf(aes(x = rugosidade, color = "A"), alpha = .8) + stat_ecdf(aes(x = qnorm(seq(0.003, 0.997, length = length(rugosidade)), mean = mean(rugosidade), sd = sd(rugosidade)), color = "B"), alpha = .8) + scale_color_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "Curva", breaks = c("A", "B"), label = c("Amostra", "Distribuição Normal"), guide = "legend") + labs(title = "Distribuição de Probabilidade Acumulada", subtitle = paste("n =", length(rugosidade)), x = "Rugosidade", y = "Probabilidade") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) data.frame(rugosidade) %>% ggplot() + geom_qq(aes(sample = scale(rugosidade), color = "A"), alpha = .8) + geom_abline(aes(intercept = 0, slope = 1, color = "B")) + scale_color_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("rugosidade"), guide = "legend") + labs(title = "Gráfico Quantil-Quantil", subtitle = paste("n =", length(rugosidade)), x = "Quantis Teóricos", y = "Quantis Observados") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) # Os gráficos densidade de probabilidade, probabilidade acumulada e quantil-quantil # apontam certa semelhança entre a amostra e a distribuição normal. # Aplcia-se o teste Shapiro-Wilk (onde a hipótese nula é que a amostra origina-se # de uma população normalmente distribuída) para verificar a normalidade. shapiro.test(rugosidade) # O resultado do valor-p em 0.0086 aponta para uma probabilidade rara da amostra # ter se originado de uma distribuição normal, permitindo rejeitar estatísticamente # tal hipótese. # Contudo, apoiando-se no Teorema do Limite Central, entende-se ser possível # realizar uma inferência intervalar da média ao aplicar teste de hipótese com # amostra de tamanho grande (aponta-se como 30 um tamanho amostral razoável para # este tipo de abordagem). # CONSTRUÇÃO DO TESTE DE HPÓTESE # Ho: µ = 440 Esta hipótese significa utilizar a população que originou a # amostra, ou seja, não "fazer nada" (hipótese a ser testada) # Ha: µ < 440 Esta hipótese significar inutilizar a população que originou a # amostra, ou seja, buscar uma nova jazida. # Arbitra-se, conforme usualmente estabelece-se nas pesquisas da área, um nível # de significância de 5% como critério de avaliação do teste. Interpreta-se esse # nível como a probabilidade do erro tipo I ocorrer, ou seja, rejeitar a pedreira # sendo ela adequada para o uso. # O teste em questão, por estimar o desvio padrão populacional a partir do desvio # padrão amostra, deve ser o teste T-Student com n-1 graus de liberdade. # Representação visual do teste-t: df <- length(rugosidade) - 1 med <- mean(rugosidade) se <- sd(rugosidade)/sqrt(length(rugosidade)) t_rug <- (med-440)/se # Valor T da amostra # Sombra eixo_x <- 440 + qt(seq(.05, .9999, .001), df) * se eixo_y <- dt((eixo_x-440)/se, df) # Curva T Completa curva <- data.frame(x = 440 + qt(seq(.0001, .9999, length = length(eixo_x)), df) * se, y = dt(qt(seq(.0001, .9999, length = length(eixo_x)), df), df)) # Plota-se a Curva ggplot() + geom_line(aes(x = curva$x, y = curva$y)) + geom_area(aes(eixo_x, eixo_y, fill = "A"), alpha = .5) + geom_vline(aes(xintercept = 440 + qt(0.05, df) * se, color = "A"), linetype = 2) + geom_vline(aes(xintercept = med, color = "B"), size = 1) + labs(title = "Representação Gráfica do Teste T", subtitle = paste("n =", length(rugosidade), "| df =", df, "| alfa = 5%"), x = "Escore t", y = "Probabilidade") + scale_fill_manual(values = wes_palette(n = 1, name = "Darjeeling1"), name = "", breaks = c("A"), labels = c("Área Não Rejei. Ho"), guide ="legend") + scale_color_manual(values = wes_palette(n = 3, name = "Darjeeling1"), name = "", breaks = c("A", "B"), labels = c("Limite IC Média Pop.", "Valor-T Amostral"), guide ="legend") + geom_text(aes(x = 375, y = .425, label = paste("t-crítico =", round(qt(0.05, df), 1))), size = 4, color = wes_palette(n = 1, name = "Darjeeling1")) + geom_text(aes(x = 450, y = .475, label = paste("valor-t =", round(t_rug, 1))), color = "#067D99", size = 4, color = wes_palette(n = 1, name = "Darjeeling1")) + theme_minimal() # A partir do teste de hipótese realizado, pode-se dizer que não há evidências # estatísticas suficientes para se rejeitar a hipótese nula, pois o valor-t da # média amostral encontra-se na região de não rejeição. Ou seja, entende-se # que a amostra não justifica a troca de jazida por outra mais adequada. # É importante apontar que o teste é válido apenas para amostras aleatórias # e independentes, o que não foi verificado nesta anaálise e assume-se como premissa. # Ainda, assume-se que o desvio padrão populacional (desconhecido) é bem # representado pelo desvio padrão amostral. Para esta premissa, entende-se que o # uso da distribuição t-student no lugar da distribução normal é apropriado. # FINAL DA RESPOSTA a) # b) (1,0) Qual é o erro tipo-II da sua análise? # RESPOSTA b) # Para o caso em questão, o erro tipo II é aquele onde não rejeita-se a hipótese # nula, sendo ela falsa, ou seja, àquele passível de ser cometido considerando a # decisão tomada no item a) desta questão da avaliação. # Para o cálculo do erro tipo II, assume-se que a população possui uma média # inferior à 440. Arbitra-se, como premissa da avaliação do erro tipo II, que a # população possui média igual à média amostral. O desvio padrão populacional # também é estimado pelo desvio padrão amostral. med # média amostral = média populacional # Sombra eixo_x1 <- med + ( qt( seq( dt(((med + qt(0.05, df) * se)-med)/se, df), .9999, length = length(eixo_x)), df)) * se eixo_y1 <- dt((eixo_x1 - med)/se, df) # Curva T Completa curva1 <- data.frame(x = med + qt(seq(.0001, .9999, length = length(eixo_x1)), df) * se, y = dt(qt(seq(.0001, .9999, length = length(eixo_x1)), df), df)) # Plota-se a Curva ggplot() + geom_line(aes(x = curva$x, y = curva$y)) + # Ho geom_line(aes(x = curva1$x, y = curva1$y)) + # Ha geom_area(aes(eixo_x1, eixo_y1, fill = "A"), alpha = .5) + # ha geom_vline(aes(xintercept = c(440, mean(rugosidade)), color = c("B", "C")), linetype = 2) + labs(title = "Representação Gráfica do Erro Tipo II", subtitle = paste("n =", length(rugosidade), "| df =", df, "| beta =", 1 - pt(qt(0.05, df) ,df)), x = "Escore t", y = "Probabilidade") + scale_fill_manual(values = wes_palette(n = 1, name = "Darjeeling1"), name = "", breaks = c("A"), labels = c("Erro Tipo II"), guide ="legend") + scale_color_manual(values = wes_palette(n = 3, name = "Darjeeling1"), name = "", breaks = c("B", "C"), labels = c("µ = 440 (Ho)", paste("µ =", round(mean(rugosidade),1))), guide ="legend") + theme_minimal() # FINAL DA RESPOSTA b) # c) (1,0) Estime a rugosidade média do agregado utilizando um intervalo de # confiança de 95%. Para atingir o objetivo da análise posto no enunciado, é # mais adequado utlizar o teste de hipóteses do item (a) ou esse intervalo de # confiança do item (c)? Justifique # Para esta análise, estima-se um intervalo de cofiança da média populacional # a partir da amostra. data.frame(inf = mean(rugosidade) + qt(.025, df)*(sd(rugosidade)/sqrt(length(rugosidade))), med = mean(rugosidade), sup = mean(rugosidade) + qt(.975, df)*(sd(rugosidade)/sqrt(length(rugosidade)))) # Entendo que o teste de hipótese é mais adequado pois considera a hipótese da # tomada de decisão com um grau de certeza conhecida. O intervalo de confiança, # por sua vez, pode ajudar na tomada de decisão e na interpretação do contexto. # FINAL DA RESPOSTA c) E DA QUESTÃO 1 # 02) (3,0) O agregado da questão 1 estava em seu estado natural. Nesta questão # 2, você pegou outra amostra do mesmo agregado e o submeteu a um tratamento de # polimento, a fim de torná-lo menos rugoso. Veja na coluna "Polido" a nova # amostra resultante, que também foi colocada em ordem crescente. O objetivo da # sua análise é avaliar se o tratamento é capaz de diminuir a rugosidade média # do agregado. natural <- rugosidade polida <- c(27.019 ,73.689 ,87.199 ,99.48 ,112.99 ,116.674 ,119.131 ,121.587 ,126.499 ,144.922 ,152.291 ,153.519 ,153.519 ,160.888 ,162.116 ,163.344 ,163.344 ,174.397 ,180.538 ,181.766 ,184.223 ,185.451 ,195.276 ,198.96 ,201.417 ,210.014 ,211.242 ,251.771 ,251.771 ,257.911 ,266.509 ,277.562 ,282.474 ,289.843 ,289.843 ,299.669 ,311.95 ,315.635 ,318.091 ,319.319 ,325.46 ,346.338 ,351.251 ,361.076 ,365.989 ,370.901 ,377.042 ,385.639 ,385.639 ,386.867 ,405.289 ,411.43 ,412.658 ,420.027 ,424.94 ,428.624 ,437.221 ,483.891 ,491.26 ,499.857 ,515.823 ,535.473 ,578.459 ,593.196 ,594.425 ,601.794 ,604.25 ,676.711 ,697.589 ,714.783 ,760.225 ,770.05 ,783.56 ,846.195 ,962.87) # a) (0,5) Apresente o seu método de análise. # RESPOSTA a) # A análise deste item avaliará se houve mudança significativa na rugosidade # média entre as duas amostras. Para tanto será realizado um teste T (pois não # se conhece o desvio padrão da população) unilatetra (pois se presume que a # amostra natual deve possuir maior asperesa média) e não pareado (pois há # independência entre as amostras) # RESPOSTA a) # b) (1,5) Apresente os resultados da sua análise. # RESPOSTA b) t.test(natural, polida, paired = FALSE, alternative = "greater") # FINAL DA RESPOSTA b) # c) (1,0) Apresente a conclusão e as premissas da sua análise. # RESPOSTA c) # Considerando que a hipótese nula deste teste assume que a difernça entre # as médias não é maior que 0, o valor p apresenta a probabilidade das amostras # terem surgido nesta condição. Avaliando o evento como raro (2,074%), rejeita-se # a hipótese nula, e apoia-se a afirmativa de que o processo aplicado é capaz # de reduzir a rugosidade. # FINAL DA RESPOSTA c) # 3) (3,0) Um dos indicadores mais comuns para se avaliar a qualidade do tráfego # é o atraso, que, de forma simples, pode ser entendido como tempo que os # condutores perdem parados em congestionamentos. # Como o atraso não é uma variável fácil de se coletar, a cidade de Los Angeles # lhe contratou para avaliar se seria possível estimar o atraso total anual na # cidade (em horas) com base na sua população. Com base em uma análise de # correlação e de regressão, apresente o seu método de análise, os resultados, a # conclusão e as premissas. atrasos <- data.frame(populacao = c(9900,9900,9900,10500,10710,10920,11140,11305,11420,11760,11845,11950,12000,12090,12220,12280,12310,12330,12350,12350,12400,12500), atraso = c(185569,202365,212548,250504,308512,428605,502848,585693,609125,604755,603530,614903,549822,589488,659825,640790,695408,681315,649165,632562,641847,623796)) # RESPOSTA QUESTÃO 3 # Para a avaliação proposta - correlação e regressão - aponta-se para as premissas # de coleta de amostras onde as observações devem ser independentes e aleatórias. # Método de Análise # O método de analise proposto é composto pelas seguintes etapas: # (i) avaliação da correlação entre as variáveis por gráfico de dispersão e # coeficiente de correlação de Pearson (R). # (ii) avaliação do modelo de regressão linear simples e da significância dos # parâmetros estimados. # (iii) avaliação das premissas do modelo de regressão, no que diz respeito à # normalidade, homocedasticidade e independência dos resíduos. # Etapa (i) atrasos %>% ggplot() + geom_point(aes(populacao, atraso, color = "A"), size = 2, alpha = .8) + labs(title = "Dispersão", subtitle = paste("n =", nrow(atrasos)), x = "População", y = "Atraso Médio") + scale_color_manual(values = wes_palette(n = 1, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("Amostra"), guide = "legend") + theme_minimal() # Aparenta haver correlação linear positiva entre as variáveis. cor(atrasos$populacao, atrasos$atraso, method = "pearson") # correlação alta # O coeficiente de Regressão de Pearson é 0,95, o que pode indicar forte correlação # entre as variáveis. # Etapa (ii) reg <- lm(atrasos$atraso ~ atrasos$populacao) summary(reg) # O modelo de regressão linear apresenta parâmetros B0 e B1 significativos, ambos # com valor-p menor que 0,1%. Ou seja, estima-se que esses valores são diferentes # de 0 e influenciam no modelo. O R2 ajustado apresenta valor muito satisfatório, # sendo igual a 0.905. atrasos %>% ggplot(aes(populacao, atraso)) + geom_point(aes(color = "A"), size = 2, alpha = .8) + stat_smooth(aes(color = "B"), method = lm) + labs(title = "Dispersão e Regressão Linear Simples", subtitle = paste("n =", nrow(atrasos)), x = "População", y = "Atraso Médio") + scale_color_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "", breaks = c("A", "B"), label = c("Amostra", "Regressão"), guide = "legend") + theme_minimal() # Etapa (iii) # O gráfico e os testes a seguir avaliam as premissas dos resíduos. data.frame(reg$residuals) %>% ggplot(aes(c(1:22), reg.residuals)) + geom_point(aes(color = "A"), size = 2, alpha = .8) + labs(title = "Dispersão dos Resíduos", subtitle = paste("n =", nrow(atrasos)), x = "Observação", y = "Atraso Médio") + scale_color_manual(values = wes_palette(n = 1, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("Resíduos"), guide = "legend") + theme_minimal() # Analisando o gráfico, pode-ser perceber certa heterocedasticidade e, talvez, # um padrão de dependência. Os testes a seguir estabelecem critérios objetivos # para nortear essa avaliação. # Normalidade - Teste Shapiro-Wilk shapiro.test(reg$residuals) # O teste shapiro-wilk reforça a normalidade dos resíduos ao não rejeitar a # hipótese nula de normalidade dos resíduos. # Homocedasticidade - Teste Breusch-Pagan bptest(reg) # O teste Breusch-Pagan reforça a homocedasticidade dos resíduos ao não rejeitar # a hipótese nula de homocedasticidades dos resíduos. # Independência - Teste autocorrelação dwtest(reg) # O teste de auto-correlação aponta para uma dependência entre os resíduos ao # rejeitar a hipótese de independência dos resíduos. Essa premissa enfraquece # o modelo. Sugere-se que medidas corretivas sejam aplicadas ajustar o modelo. # FINAL RESPOSTA QUESTÃO 3
/Nelson_Quesado_2AP_21_v5.R
no_license
nelsonquesado/tad-2021.1
R
false
false
18,623
r
### 2a AP 2021.1 ### Técnica de Análise de Dados de Transportes ### Prof. Dr. Manoel Mendonça de Castro Neto ### Aluno: Nelson de O. Quesado Filho ### Matrícula: 504117 # Bibliotecas utilizadas library(tidyverse) library(wesanderson) library(lmtest) #1a Questão: # (...) avaliar se o agregado (brita) da 1AP está com rugosidade média menor que # 440, pois isso implicaria na necessidade de buscar outra pedreira. A amostra # ao lado, posta em ordem crescente, é a mesma da 1AP. Responda os itens abaixo: # a) (2,0) Para atingir o objetivo da análise, realize um teste de hipóteses, # explicitando as hipóteses nula e alternativa, o valor do nível de significância, # o valor-p, o resultado do teste (se rejeita ou não H0) e a conclusão da análise. # Indique as premissas da sua análise. # RESPOSTA a) # Antes de se avançar no teste de hipótese, realiza-se uma análise descritiva # da amostra. rugosidade <- c(57.723 ,76.145 ,101.936 ,110.534 ,131.412 ,147.378 ,159.66 ,162.116 ,168.257 ,169.485 ,182.994 ,195.276 ,197.732 ,198.96 ,205.101 ,205.101 ,219.839 ,222.295 ,240.717 ,241.946 ,260.368 ,267.737 ,278.79 ,283.703 ,283.703 ,292.3 ,299.669 ,310.722 ,310.722 ,325.46 ,336.513 ,342.654 ,354.935 ,359.848 ,375.814 ,393.008 ,401.605 ,426.168 ,434.765 ,438.45 ,458.1 ,459.328 ,463.013 ,475.294 ,478.978 ,498.629 ,507.226 ,515.823 ,533.017 ,546.527 ,550.211 ,555.124 ,555.124 ,573.546 ,587.056 ,591.968 ,601.794 ,605.478 ,623.9 ,648.463 ,663.201 ,688.992 ,713.555 ,766.366 ,794.613 ,800.754 ,837.598 ,918.656 ,937.078 ,945.675 ,949.36) data.frame(rugosidade) %>% summarise(avg = mean(rugosidade), s = sd(rugosidade), coef_var = avg/s, var = s^2, n = n()) # A amostra apresenta rugosidade média de 422,76, ou seja, abaixo do valor # desejado para a aplicação. Sabe-se que esta estatística - a média - é # insuficiente para a análise desejaada. # Ainda, verifica-se um desvio padrão de 230,74, o que representa um coeficiente # de variação de 1,83. Interpreta-se este valor como alto, e que provavelmente # contribuirá para um intervalo de confiança demasiadamente largo. # A amostra também apresenta uma quantidade de observações razoável (71) para # o tipo de análise pretendida. data.frame(rugosidade) %>% ggplot() + geom_point(aes(x = c(1:71), y = sort(rugosidade), color = "A"), alpha = .8) + geom_boxplot(aes(x = c(1:71), y = rugosidade, color = "B"), alpha = 0) + scale_color_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("Rugosidade"), guide = "legend") + labs(title = "Dispersão e Diagrama de Caixa", subtitle = paste("n =", length(rugosidade)), x = "Observação", y = "Rugosidade") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) data.frame(rugosidade) %>% ggplot() + geom_histogram(aes(x = rugosidade, y = ..density.., fill = "A"), color = "white", alpha = .8, bins = 9) + geom_density(aes(x = rugosidade, fill = "B"), alpha = .7) + scale_fill_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("Rugosidade"), guide = "legend") + labs(title = "Histograma de Frequência e Densidade de Probabilidade", subtitle = paste("n =", length(rugosidade)), x = "Rugosidade", y = "Probabilidade") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) # Os gráficas mostram uma certa assimetria à direita. Apesar desta característica, # decide-se verificar visualmente a proximidade da amostra em comparação à # distribuição normal data.frame(rugosidade) %>% ggplot() + geom_density(aes(x = rugosidade, fill = "A"), alpha = .5) + geom_density(aes(x = qnorm(seq(0.003, 0.997, length = length(rugosidade)), mean = mean(rugosidade), sd = sd(rugosidade)), fill = "B"), alpha = .5) + scale_fill_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "Curva", breaks = c("A", "B"), label = c("Amostra", "Normal"), guide = "legend") + labs(title = "Densidade de Probabilidade Amostral vs. Normal", subtitle = paste("n =", length(rugosidade)), x = "Rugosidade", y = "Probabilidade") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) data.frame(rugosidade) %>% ggplot() + stat_ecdf(aes(x = rugosidade, color = "A"), alpha = .8) + stat_ecdf(aes(x = qnorm(seq(0.003, 0.997, length = length(rugosidade)), mean = mean(rugosidade), sd = sd(rugosidade)), color = "B"), alpha = .8) + scale_color_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "Curva", breaks = c("A", "B"), label = c("Amostra", "Distribuição Normal"), guide = "legend") + labs(title = "Distribuição de Probabilidade Acumulada", subtitle = paste("n =", length(rugosidade)), x = "Rugosidade", y = "Probabilidade") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) data.frame(rugosidade) %>% ggplot() + geom_qq(aes(sample = scale(rugosidade), color = "A"), alpha = .8) + geom_abline(aes(intercept = 0, slope = 1, color = "B")) + scale_color_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("rugosidade"), guide = "legend") + labs(title = "Gráfico Quantil-Quantil", subtitle = paste("n =", length(rugosidade)), x = "Quantis Teóricos", y = "Quantis Observados") + theme_minimal() + theme(plot.title = element_text(size = 14, face = "bold")) # Os gráficos densidade de probabilidade, probabilidade acumulada e quantil-quantil # apontam certa semelhança entre a amostra e a distribuição normal. # Aplcia-se o teste Shapiro-Wilk (onde a hipótese nula é que a amostra origina-se # de uma população normalmente distribuída) para verificar a normalidade. shapiro.test(rugosidade) # O resultado do valor-p em 0.0086 aponta para uma probabilidade rara da amostra # ter se originado de uma distribuição normal, permitindo rejeitar estatísticamente # tal hipótese. # Contudo, apoiando-se no Teorema do Limite Central, entende-se ser possível # realizar uma inferência intervalar da média ao aplicar teste de hipótese com # amostra de tamanho grande (aponta-se como 30 um tamanho amostral razoável para # este tipo de abordagem). # CONSTRUÇÃO DO TESTE DE HPÓTESE # Ho: µ = 440 Esta hipótese significa utilizar a população que originou a # amostra, ou seja, não "fazer nada" (hipótese a ser testada) # Ha: µ < 440 Esta hipótese significar inutilizar a população que originou a # amostra, ou seja, buscar uma nova jazida. # Arbitra-se, conforme usualmente estabelece-se nas pesquisas da área, um nível # de significância de 5% como critério de avaliação do teste. Interpreta-se esse # nível como a probabilidade do erro tipo I ocorrer, ou seja, rejeitar a pedreira # sendo ela adequada para o uso. # O teste em questão, por estimar o desvio padrão populacional a partir do desvio # padrão amostra, deve ser o teste T-Student com n-1 graus de liberdade. # Representação visual do teste-t: df <- length(rugosidade) - 1 med <- mean(rugosidade) se <- sd(rugosidade)/sqrt(length(rugosidade)) t_rug <- (med-440)/se # Valor T da amostra # Sombra eixo_x <- 440 + qt(seq(.05, .9999, .001), df) * se eixo_y <- dt((eixo_x-440)/se, df) # Curva T Completa curva <- data.frame(x = 440 + qt(seq(.0001, .9999, length = length(eixo_x)), df) * se, y = dt(qt(seq(.0001, .9999, length = length(eixo_x)), df), df)) # Plota-se a Curva ggplot() + geom_line(aes(x = curva$x, y = curva$y)) + geom_area(aes(eixo_x, eixo_y, fill = "A"), alpha = .5) + geom_vline(aes(xintercept = 440 + qt(0.05, df) * se, color = "A"), linetype = 2) + geom_vline(aes(xintercept = med, color = "B"), size = 1) + labs(title = "Representação Gráfica do Teste T", subtitle = paste("n =", length(rugosidade), "| df =", df, "| alfa = 5%"), x = "Escore t", y = "Probabilidade") + scale_fill_manual(values = wes_palette(n = 1, name = "Darjeeling1"), name = "", breaks = c("A"), labels = c("Área Não Rejei. Ho"), guide ="legend") + scale_color_manual(values = wes_palette(n = 3, name = "Darjeeling1"), name = "", breaks = c("A", "B"), labels = c("Limite IC Média Pop.", "Valor-T Amostral"), guide ="legend") + geom_text(aes(x = 375, y = .425, label = paste("t-crítico =", round(qt(0.05, df), 1))), size = 4, color = wes_palette(n = 1, name = "Darjeeling1")) + geom_text(aes(x = 450, y = .475, label = paste("valor-t =", round(t_rug, 1))), color = "#067D99", size = 4, color = wes_palette(n = 1, name = "Darjeeling1")) + theme_minimal() # A partir do teste de hipótese realizado, pode-se dizer que não há evidências # estatísticas suficientes para se rejeitar a hipótese nula, pois o valor-t da # média amostral encontra-se na região de não rejeição. Ou seja, entende-se # que a amostra não justifica a troca de jazida por outra mais adequada. # É importante apontar que o teste é válido apenas para amostras aleatórias # e independentes, o que não foi verificado nesta anaálise e assume-se como premissa. # Ainda, assume-se que o desvio padrão populacional (desconhecido) é bem # representado pelo desvio padrão amostral. Para esta premissa, entende-se que o # uso da distribuição t-student no lugar da distribução normal é apropriado. # FINAL DA RESPOSTA a) # b) (1,0) Qual é o erro tipo-II da sua análise? # RESPOSTA b) # Para o caso em questão, o erro tipo II é aquele onde não rejeita-se a hipótese # nula, sendo ela falsa, ou seja, àquele passível de ser cometido considerando a # decisão tomada no item a) desta questão da avaliação. # Para o cálculo do erro tipo II, assume-se que a população possui uma média # inferior à 440. Arbitra-se, como premissa da avaliação do erro tipo II, que a # população possui média igual à média amostral. O desvio padrão populacional # também é estimado pelo desvio padrão amostral. med # média amostral = média populacional # Sombra eixo_x1 <- med + ( qt( seq( dt(((med + qt(0.05, df) * se)-med)/se, df), .9999, length = length(eixo_x)), df)) * se eixo_y1 <- dt((eixo_x1 - med)/se, df) # Curva T Completa curva1 <- data.frame(x = med + qt(seq(.0001, .9999, length = length(eixo_x1)), df) * se, y = dt(qt(seq(.0001, .9999, length = length(eixo_x1)), df), df)) # Plota-se a Curva ggplot() + geom_line(aes(x = curva$x, y = curva$y)) + # Ho geom_line(aes(x = curva1$x, y = curva1$y)) + # Ha geom_area(aes(eixo_x1, eixo_y1, fill = "A"), alpha = .5) + # ha geom_vline(aes(xintercept = c(440, mean(rugosidade)), color = c("B", "C")), linetype = 2) + labs(title = "Representação Gráfica do Erro Tipo II", subtitle = paste("n =", length(rugosidade), "| df =", df, "| beta =", 1 - pt(qt(0.05, df) ,df)), x = "Escore t", y = "Probabilidade") + scale_fill_manual(values = wes_palette(n = 1, name = "Darjeeling1"), name = "", breaks = c("A"), labels = c("Erro Tipo II"), guide ="legend") + scale_color_manual(values = wes_palette(n = 3, name = "Darjeeling1"), name = "", breaks = c("B", "C"), labels = c("µ = 440 (Ho)", paste("µ =", round(mean(rugosidade),1))), guide ="legend") + theme_minimal() # FINAL DA RESPOSTA b) # c) (1,0) Estime a rugosidade média do agregado utilizando um intervalo de # confiança de 95%. Para atingir o objetivo da análise posto no enunciado, é # mais adequado utlizar o teste de hipóteses do item (a) ou esse intervalo de # confiança do item (c)? Justifique # Para esta análise, estima-se um intervalo de cofiança da média populacional # a partir da amostra. data.frame(inf = mean(rugosidade) + qt(.025, df)*(sd(rugosidade)/sqrt(length(rugosidade))), med = mean(rugosidade), sup = mean(rugosidade) + qt(.975, df)*(sd(rugosidade)/sqrt(length(rugosidade)))) # Entendo que o teste de hipótese é mais adequado pois considera a hipótese da # tomada de decisão com um grau de certeza conhecida. O intervalo de confiança, # por sua vez, pode ajudar na tomada de decisão e na interpretação do contexto. # FINAL DA RESPOSTA c) E DA QUESTÃO 1 # 02) (3,0) O agregado da questão 1 estava em seu estado natural. Nesta questão # 2, você pegou outra amostra do mesmo agregado e o submeteu a um tratamento de # polimento, a fim de torná-lo menos rugoso. Veja na coluna "Polido" a nova # amostra resultante, que também foi colocada em ordem crescente. O objetivo da # sua análise é avaliar se o tratamento é capaz de diminuir a rugosidade média # do agregado. natural <- rugosidade polida <- c(27.019 ,73.689 ,87.199 ,99.48 ,112.99 ,116.674 ,119.131 ,121.587 ,126.499 ,144.922 ,152.291 ,153.519 ,153.519 ,160.888 ,162.116 ,163.344 ,163.344 ,174.397 ,180.538 ,181.766 ,184.223 ,185.451 ,195.276 ,198.96 ,201.417 ,210.014 ,211.242 ,251.771 ,251.771 ,257.911 ,266.509 ,277.562 ,282.474 ,289.843 ,289.843 ,299.669 ,311.95 ,315.635 ,318.091 ,319.319 ,325.46 ,346.338 ,351.251 ,361.076 ,365.989 ,370.901 ,377.042 ,385.639 ,385.639 ,386.867 ,405.289 ,411.43 ,412.658 ,420.027 ,424.94 ,428.624 ,437.221 ,483.891 ,491.26 ,499.857 ,515.823 ,535.473 ,578.459 ,593.196 ,594.425 ,601.794 ,604.25 ,676.711 ,697.589 ,714.783 ,760.225 ,770.05 ,783.56 ,846.195 ,962.87) # a) (0,5) Apresente o seu método de análise. # RESPOSTA a) # A análise deste item avaliará se houve mudança significativa na rugosidade # média entre as duas amostras. Para tanto será realizado um teste T (pois não # se conhece o desvio padrão da população) unilatetra (pois se presume que a # amostra natual deve possuir maior asperesa média) e não pareado (pois há # independência entre as amostras) # RESPOSTA a) # b) (1,5) Apresente os resultados da sua análise. # RESPOSTA b) t.test(natural, polida, paired = FALSE, alternative = "greater") # FINAL DA RESPOSTA b) # c) (1,0) Apresente a conclusão e as premissas da sua análise. # RESPOSTA c) # Considerando que a hipótese nula deste teste assume que a difernça entre # as médias não é maior que 0, o valor p apresenta a probabilidade das amostras # terem surgido nesta condição. Avaliando o evento como raro (2,074%), rejeita-se # a hipótese nula, e apoia-se a afirmativa de que o processo aplicado é capaz # de reduzir a rugosidade. # FINAL DA RESPOSTA c) # 3) (3,0) Um dos indicadores mais comuns para se avaliar a qualidade do tráfego # é o atraso, que, de forma simples, pode ser entendido como tempo que os # condutores perdem parados em congestionamentos. # Como o atraso não é uma variável fácil de se coletar, a cidade de Los Angeles # lhe contratou para avaliar se seria possível estimar o atraso total anual na # cidade (em horas) com base na sua população. Com base em uma análise de # correlação e de regressão, apresente o seu método de análise, os resultados, a # conclusão e as premissas. atrasos <- data.frame(populacao = c(9900,9900,9900,10500,10710,10920,11140,11305,11420,11760,11845,11950,12000,12090,12220,12280,12310,12330,12350,12350,12400,12500), atraso = c(185569,202365,212548,250504,308512,428605,502848,585693,609125,604755,603530,614903,549822,589488,659825,640790,695408,681315,649165,632562,641847,623796)) # RESPOSTA QUESTÃO 3 # Para a avaliação proposta - correlação e regressão - aponta-se para as premissas # de coleta de amostras onde as observações devem ser independentes e aleatórias. # Método de Análise # O método de analise proposto é composto pelas seguintes etapas: # (i) avaliação da correlação entre as variáveis por gráfico de dispersão e # coeficiente de correlação de Pearson (R). # (ii) avaliação do modelo de regressão linear simples e da significância dos # parâmetros estimados. # (iii) avaliação das premissas do modelo de regressão, no que diz respeito à # normalidade, homocedasticidade e independência dos resíduos. # Etapa (i) atrasos %>% ggplot() + geom_point(aes(populacao, atraso, color = "A"), size = 2, alpha = .8) + labs(title = "Dispersão", subtitle = paste("n =", nrow(atrasos)), x = "População", y = "Atraso Médio") + scale_color_manual(values = wes_palette(n = 1, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("Amostra"), guide = "legend") + theme_minimal() # Aparenta haver correlação linear positiva entre as variáveis. cor(atrasos$populacao, atrasos$atraso, method = "pearson") # correlação alta # O coeficiente de Regressão de Pearson é 0,95, o que pode indicar forte correlação # entre as variáveis. # Etapa (ii) reg <- lm(atrasos$atraso ~ atrasos$populacao) summary(reg) # O modelo de regressão linear apresenta parâmetros B0 e B1 significativos, ambos # com valor-p menor que 0,1%. Ou seja, estima-se que esses valores são diferentes # de 0 e influenciam no modelo. O R2 ajustado apresenta valor muito satisfatório, # sendo igual a 0.905. atrasos %>% ggplot(aes(populacao, atraso)) + geom_point(aes(color = "A"), size = 2, alpha = .8) + stat_smooth(aes(color = "B"), method = lm) + labs(title = "Dispersão e Regressão Linear Simples", subtitle = paste("n =", nrow(atrasos)), x = "População", y = "Atraso Médio") + scale_color_manual(values = wes_palette(n = 2, name = "Darjeeling1"), name = "", breaks = c("A", "B"), label = c("Amostra", "Regressão"), guide = "legend") + theme_minimal() # Etapa (iii) # O gráfico e os testes a seguir avaliam as premissas dos resíduos. data.frame(reg$residuals) %>% ggplot(aes(c(1:22), reg.residuals)) + geom_point(aes(color = "A"), size = 2, alpha = .8) + labs(title = "Dispersão dos Resíduos", subtitle = paste("n =", nrow(atrasos)), x = "Observação", y = "Atraso Médio") + scale_color_manual(values = wes_palette(n = 1, name = "Darjeeling1"), name = "", breaks = c("A"), label = c("Resíduos"), guide = "legend") + theme_minimal() # Analisando o gráfico, pode-ser perceber certa heterocedasticidade e, talvez, # um padrão de dependência. Os testes a seguir estabelecem critérios objetivos # para nortear essa avaliação. # Normalidade - Teste Shapiro-Wilk shapiro.test(reg$residuals) # O teste shapiro-wilk reforça a normalidade dos resíduos ao não rejeitar a # hipótese nula de normalidade dos resíduos. # Homocedasticidade - Teste Breusch-Pagan bptest(reg) # O teste Breusch-Pagan reforça a homocedasticidade dos resíduos ao não rejeitar # a hipótese nula de homocedasticidades dos resíduos. # Independência - Teste autocorrelação dwtest(reg) # O teste de auto-correlação aponta para uma dependência entre os resíduos ao # rejeitar a hipótese de independência dos resíduos. Essa premissa enfraquece # o modelo. Sugere-se que medidas corretivas sejam aplicadas ajustar o modelo. # FINAL RESPOSTA QUESTÃO 3
#' createExperimentSample - Create a new experiment sample from a sample lot. #' #' \code{createExperimentSample} Creates a new instance of an entity. #' @param coreApi coreApi object with valid jsessionid #' @param experimentType experiment type to get as character string #' @param experimentBarcode experiment barcode #' @param sampleLotBarcode barcode of sample to add to experiment #' @param body values for sample attributes as a list of key-values pairs #' @param useVerbose Use verbose communication for debugging #' @export #' @return RETURN returns a list $entity contains entity information, $response contains the entire http response #' @examples #'\dontrun{ #' api<-CoreAPIV2::coreApi("PATH TO JSON FILE") #' login<- CoreAPIV2::authBasic(api) #' item<-CoreAPIV2::createExperimentSample(login$coreApi,"Experiment_Type", #' "Assaybarcode","ProtocolBarcode") #' CoreAPIV2::logOut(login$coreApi ) #' } #'@author Craig Parman #'@description \code{createExperimentSample} Creates a new experiment sample fomr a sample lot. createExperimentSample<-function (coreApi,experimentType,experimentBarcode, sampleLotBarcode,body=NULL,useVerbose=FALSE) { #clean the names for ODATA experimentType<- CoreAPIV2::ODATAcleanName(experimentType) experimentSampleType <- paste0(experimentType,"_SAMPLE") exptRef<- list('EXPERIMENT@odata.bind' = paste0("/",experimentType,"('",experimentBarcode,"')")) entityRef<- list('ENTITY@odata.bind' = paste0("/ENTITY('",sampleLotBarcode,"')")) fullBody<- jsonlite::toJSON(c(body,exptRef,entityRef),auto_unbox = TRUE) headers <- c('Content-Type' = "application/json;odata.metadata=full") response<-CoreAPIV2::apiPOST(coreApi,resource=experimentSampleType,body=fullBody,encode = "json", headers =headers ,special=NULL,useVerbose=useVerbose) list(entity=httr::content(response),response=response) }
/R/createExperimentSample.R
no_license
ceparman/CoreAPIV2_beta
R
false
false
1,950
r
#' createExperimentSample - Create a new experiment sample from a sample lot. #' #' \code{createExperimentSample} Creates a new instance of an entity. #' @param coreApi coreApi object with valid jsessionid #' @param experimentType experiment type to get as character string #' @param experimentBarcode experiment barcode #' @param sampleLotBarcode barcode of sample to add to experiment #' @param body values for sample attributes as a list of key-values pairs #' @param useVerbose Use verbose communication for debugging #' @export #' @return RETURN returns a list $entity contains entity information, $response contains the entire http response #' @examples #'\dontrun{ #' api<-CoreAPIV2::coreApi("PATH TO JSON FILE") #' login<- CoreAPIV2::authBasic(api) #' item<-CoreAPIV2::createExperimentSample(login$coreApi,"Experiment_Type", #' "Assaybarcode","ProtocolBarcode") #' CoreAPIV2::logOut(login$coreApi ) #' } #'@author Craig Parman #'@description \code{createExperimentSample} Creates a new experiment sample fomr a sample lot. createExperimentSample<-function (coreApi,experimentType,experimentBarcode, sampleLotBarcode,body=NULL,useVerbose=FALSE) { #clean the names for ODATA experimentType<- CoreAPIV2::ODATAcleanName(experimentType) experimentSampleType <- paste0(experimentType,"_SAMPLE") exptRef<- list('EXPERIMENT@odata.bind' = paste0("/",experimentType,"('",experimentBarcode,"')")) entityRef<- list('ENTITY@odata.bind' = paste0("/ENTITY('",sampleLotBarcode,"')")) fullBody<- jsonlite::toJSON(c(body,exptRef,entityRef),auto_unbox = TRUE) headers <- c('Content-Type' = "application/json;odata.metadata=full") response<-CoreAPIV2::apiPOST(coreApi,resource=experimentSampleType,body=fullBody,encode = "json", headers =headers ,special=NULL,useVerbose=useVerbose) list(entity=httr::content(response),response=response) }
# plotting GPP vs. R and color coding by season; JAZ & JK; 2018-03-09 #first part is creating dataframes with data we want to plot. #starts with reading in individual lake files and binding them into one data frame #then merges dataframes together (e.g., metabolism dataframe with loading data frame) #there is code in the dropbox for creating a dataframe for the light climate data but I've left this #out for now because the light climate data are not on Github #code for plotting starts~line 100 library(xlsx) library(dplyr) dir<-'results/metab/20161107/' # directory of metabolism data folders<-list.files(dir) # folders in this dir folders<-folders[-grep('.doc',folders)] # get rid of README doc all_metab<-data.frame() # data frame to store all metab data for(i in 1:length(folders)){ # loops over all folders in metab directory cur<-read.table(file.path(dir,folders[i],paste(folders[i],'_metabEst.txt',sep='')),header=T,sep='\t', stringsAsFactors = F) # read in lake specific metab data cur<-cur[,1:12] # getting rid of any unnecessary columns cur$lake<-folders[i] #creating a column of lake names from the names of the folders all_metab<-rbind(all_metab,cur) #stacking all the lakes in one dataframe one by one } all_metab$date<-as.Date(paste(all_metab$year,all_metab$doy),format='%Y %j') # making date season_cutoff <- readRDS('results/z_scored_schmidt.rds') %>% select(-doy)# seasonal cutoff based on z-scored schmidt stability all_metab <- left_join(all_metab, season_cutoff, by = c('lake' = 'lake', 'date' = 'date')) # creating season column. season cutoffs used by Jim ; can change if needed # Spring = (doy < 180) # Summer = (doy >= 180 & doy <=240) # Fall = (doy > 240) #all_metab$season <- rep(NA,nrow(all_metab)) #all_metab$season <- ifelse(all_metab$doy>120&all_metab$doy<180,'spring',all_metab$season) # if doy with criteria, spring, else leave alone #all_metab$season <- ifelse(all_metab$doy>=180&all_metab$doy<=240,'summer',all_metab$season) #all_metab$season <- ifelse(all_metab$doy>240&all_metab$doy<305,'fall',all_metab$season) #loop below creates one dataframe with all loading data in it - JK modifying the loop JZ created above for metabolism dir<-'results/nutrient load/' # directory of loading data folders<-list.files(dir) # folders in this dir (in this case there are only files no folders) folders<-folders[-grep('Readme',folders)] # get rid of README doc all_load<-data.frame() # data frame to store all loading data + discharge for(i in 1:length(folders)){ # loops over all folders in loads directory, needed to change syntax on file path because there are no lake folders in main load folder cur<-read.table(file.path(dir,folders[i]),header=T,sep='\t', stringsAsFactors = F) # read in lake specific load data cur$lake<-strsplit(folders[i],split = '_')[[1]][1] # this line creates the lake column,splits the string (the type of variable that is files[i]) by a character that you choose, then chooses the 1st split all_load<-rbind(all_load,cur) #stacking all the lakes in one dataframe } #creating logged inflow colum all_load$loginflow<- log(all_load$inflow) #merging loads and metabolism data into one dataframe #does not include lakes without inflows or Mendota (no load yet as of 10/28/18) all_metabload<-merge(all_metab, all_load, by=c('lake','doy')) #does not include lakes without inflows or Mendota (no load) all_metabload$DOC_TP<-all_metabload$DOC_load/all_metabload$TP_load #creating DOC:TP column all_metabload$TN_TP<-all_metabload$TN_load/all_metabload$TP_load #creating TP:TN column #loop below creates one dataframe with all schmidt stability data in it - JK modifying JZ allmetab loop dir<-'data/schmidt stability/' # directory of stability data files<-list.files(dir) # files in this dir all_st<-data.frame() # data frame to store all stability data, this data are not summarized by day for(i in 1:length(files)){ # loops over all files in stability directory cur<-read.table(file.path(dir,files[i],sep=''),header=T,sep='\t', stringsAsFactors = F) # read in lake specific buoyancy data cur$lake<-strsplit(files[i],split = '_')[[1]][1] # this line creates the lake column,splits the string (the type of variable that is files[i]) by a character that you choose, then chooses the 1st split all_st<-rbind(all_st,cur) } #adding doy column to all_st all_st$doy <- strftime(all_st$datetime, format = "%j") #calculating average daily stability using doy and lake as by variable ave_st<-aggregate(all_st, by=list(all_st$doy,all_st$lake), FUN=mean, na.rm=TRUE) #dropping variables that couldn't calculate mean (doy, lake) and renaming group.1 and group.2 ave_st<-ave_st[c(-3,-5,-6)] names(ave_st)[1] <- "doy" names(ave_st)[2]<-"lake" #merging stability and metabolism data into one dataframe all_metabst<-merge(all_metab, ave_st, by=c('lake', 'doy')) # plotting GPP vs. R by season cv_thres = 4 #cutoff for keeping metabolism data windows() par(mfrow=c(4,4)) # how many panels on graph (alternatively we could call a new window in every iteration of for loop) par(mar=c(3,2,2,2), oma=c(3,3,1,1)) all_metab = all_metab %>% dplyr::mutate(color = case_when(season == 'spring' ~ 'green', season == 'summer' ~ 'blue', season == 'fall' ~ 'orange')) # creates a new column in the all_metab data frame for season color for(i in 1:length(unique(all_metab$lake))){ # looping through each lake cur<-all_metab[all_metab$lake==unique(all_metab$lake)[i],] cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons cur <- cur[cur$GPP_SD/cur$GPP<cv_thres&cur$R_SD/abs(cur$R)<cv_thres,]#keeping only days that meet CV threshold xlim=c(0,max(abs(c(cur$R,cur$GPP)),na.rm = T)) ylim=c(-1*xlim[2],xlim[1]) plot(cur$R~cur$GPP,pch=16,ylim=ylim,xlim=xlim,ylab='',xlab='',main=unique(all_metab$lake)[i],col=cur$color) abline(0,-1,lty=2,lwd=2) } mtext(expression(R~(mg~O[2]~L^-1~day^-1)), side=2, outer=TRUE) mtext(expression(GPP~(mg~O[2]~L^-1~day^-1)), side=1, outer=TRUE) # plotting GPP vs. R colored by doy in grey scale cv_thres = 4 #cutoff for keeping metabolism data windows() par(mfrow=c(4,4)) # how many panels on graph (alternatively we could call a new window in every iteration of for loop) for(i in 1:length(unique(all_metab$lake))){ # looping through each lake cur<-all_metab[all_metab$lake==unique(all_metab$lake)[i],] cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons cur <- cur[cur$GPP_SD/cur$GPP<cv_thres&cur$R_SD/abs(cur$R)<cv_thres,]#keeping only days that meet CV threshold col <- rev(grey.colors(n = nrow(cur))) # colors based on DOY ylim=c(min(cur$R,na.rm = T),max(cur$R,na.rm = T)) xlim=c(min(cur$GPP,na.rm = T),max(cur$GPP,na.rm = T)) plot(cur$R~cur$GPP,pch=21,ylim=ylim,xlim=xlim,ylab='R',xlab='GPP',main=unique(all_metab$lake)[i],col='black',bg=col[as.factor(cur$doy)]) # darker colors are later in year abline(0,-1,lty=2,lwd=2) } # plotting GPP vs. R colored by 3rd variable (using either all_metabload or all_metabst) #current code codes by TP_load cv_thres = 4 #cutoff for keeping metabolism data windows() #don't use when plotting each lake separately par(mfrow=c(4,4)) # how many panels on graph (alternatively we could call a new window in every iteration of for loop) for(i in 1:length(unique(all_metabload$lake))){ # looping through each lake #windows() #use if want each lake in separate plot cur<-all_metabload[all_metabload$lake==unique(all_metabload$lake)[i],] cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons cur <- cur[cur$GPP_SD/cur$GPP<cv_thres&cur$R_SD/abs(cur$R)<cv_thres,]#keeping only days that meet CV threshold col <- rev(grey.colors(n = nrow(cur), start=0, end=1)) #number of colors based on number of rows ylim=c(min(cur$R,na.rm = T),max(cur$R,na.rm = T)) xlim=c(min(cur$GPP,na.rm = T),max(cur$GPP,na.rm = T)) #cur <- subset(cur, cur$doy>=180&cur$doy<=240) #using this line plots only summer data #cur <- subset(cur, cur$doy>=121&cur$doy<=179) #using this line plots only spring data #cur <- subset(cur, cur$doy>=241&cur$doy<=304) #using this line plots only fall data plot(cur$R~cur$GPP,pch=21,ylim=ylim,xlim=xlim,ylab='R',xlab='GPP',main=unique(all_metabload$lake)[i],col='black',bg=col[as.factor(cur$TP_load)]) # darker colors are higher values abline(0,-1,lty=2,lwd=2) } #looking across lakes at effect of third variable #this code plots all lakes on the same graph coded by a third variable in greyscale windows() cur<-all_metabload cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons col <- rev(grey.colors(n = nrow(cur), start=0, end=1)) #number of colors based on number of rows ylim=c(min(cur$R,na.rm = T),max(cur$R,na.rm = T)) xlim=c(min(cur$GPP,na.rm = T),max(cur$GPP,na.rm = T)) #cur <- subset(cur, cur$doy>=180&cur$doy<=240) #using this line plots only summer data #cur <- subset(cur, cur$doy>=121&cur$doy<=179) #using this line plots only spring data #cur <- subset(cur, cur$doy>=241&cur$doy<=304) #using this line plots only fall data plot(cur$R~cur$GPP,pch=21,ylim=ylim,xlim=xlim,ylab='R',xlab='GPP',col='black',bg=col[as.factor(cur$DOC_TP)]) # darker colors are higher values abline(0,-1,lty=2,lwd=2) #testing use of quartiles for categories of driver variables #should we calculate quartiles before or after dropping poorly fitting days? I think before # plotting GPP vs. R colored by 3rd variable (using either all_metabload or all_metabst) cv_thres = 4 #cutoff for keeping metabolism data windows() #don't use when plotting each lake separately par(mfrow=c(4,4)) # how many panels on graph (alternatively we could call a new window in every iteration of for loop) col = c('orange','green','blue', 'black') for(i in 1:length(unique(all_metabst$lake))){ # looping through each lake #windows() #use if want each lake in separate plot cur<-all_metabst[all_metabst$lake==unique(all_metabst$lake)[i],] cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons cur<- cur[!is.na(cur$schmidt.stability),]#keeps only days with data #cur <- subset(cur, cur$doy>=180&cur$doy<=240) #using this line uses only summer data cur$cat <- rep(NA,nrow(cur)) #creating a new column to populate with 1,2,3,4 quartile y= quantile(cur$schmidt.stability, c(.25)) #finds the cutoff value for 25th percentile y1=quantile(cur$schmidt.stability, c(.5)) #finds the cutoff value for 50th percentile y2=quantile(cur$schmidt.stability, c(0.75)) #finds the cutoff values for 75th percentile cur$cat<- ifelse(cur$schmidt.stability<=y,'1',cur$cat) #next 4 lines code the quartile based on value of each day cur$cat<- ifelse(cur$schmidt.stability>y & cur$schmidt.stability<=y1,'2',cur$cat) cur$cat<- ifelse(cur$schmidt.stability>y1 & cur$schmidt.stability<=y2,'3',cur$cat) cur$cat<- ifelse(cur$schmidt.stability>y2,'4',cur$cat) cur <- cur[cur$GPP_SD/cur$GPP<cv_thres&cur$R_SD/abs(cur$R)<cv_thres,]#keeping only days that meet CV threshold ylim=c(min(cur$R,na.rm = T),max(cur$R,na.rm = T)) xlim=c(min(cur$GPP,na.rm = T),max(cur$GPP,na.rm = T)) #cur <- subset(cur, cur$doy>=180&cur$doy<=240) #using this line plots only summer data, need to think about whether you've calculated quartiles within or aross seasons if plotting a subset #cur <- subset(cur, cur$doy>=121&cur$doy<=179) #using this line plots only spring data #cur <- subset(cur, cur$doy>=241&cur$doy<=304) #using this line plots only fall data plot(cur$R~cur$GPP,pch=16, cex=.9,ylim=ylim,xlim=xlim,ylab='R',xlab='GPP',main=unique(all_metabst$lake)[i],col=col[as.factor(cur$cat)]) abline(0,-1,lty=2,lwd=2) }
/R_code/fingerprintplots.r
no_license
Atefeh786/catchment_metab_wg
R
false
false
11,807
r
# plotting GPP vs. R and color coding by season; JAZ & JK; 2018-03-09 #first part is creating dataframes with data we want to plot. #starts with reading in individual lake files and binding them into one data frame #then merges dataframes together (e.g., metabolism dataframe with loading data frame) #there is code in the dropbox for creating a dataframe for the light climate data but I've left this #out for now because the light climate data are not on Github #code for plotting starts~line 100 library(xlsx) library(dplyr) dir<-'results/metab/20161107/' # directory of metabolism data folders<-list.files(dir) # folders in this dir folders<-folders[-grep('.doc',folders)] # get rid of README doc all_metab<-data.frame() # data frame to store all metab data for(i in 1:length(folders)){ # loops over all folders in metab directory cur<-read.table(file.path(dir,folders[i],paste(folders[i],'_metabEst.txt',sep='')),header=T,sep='\t', stringsAsFactors = F) # read in lake specific metab data cur<-cur[,1:12] # getting rid of any unnecessary columns cur$lake<-folders[i] #creating a column of lake names from the names of the folders all_metab<-rbind(all_metab,cur) #stacking all the lakes in one dataframe one by one } all_metab$date<-as.Date(paste(all_metab$year,all_metab$doy),format='%Y %j') # making date season_cutoff <- readRDS('results/z_scored_schmidt.rds') %>% select(-doy)# seasonal cutoff based on z-scored schmidt stability all_metab <- left_join(all_metab, season_cutoff, by = c('lake' = 'lake', 'date' = 'date')) # creating season column. season cutoffs used by Jim ; can change if needed # Spring = (doy < 180) # Summer = (doy >= 180 & doy <=240) # Fall = (doy > 240) #all_metab$season <- rep(NA,nrow(all_metab)) #all_metab$season <- ifelse(all_metab$doy>120&all_metab$doy<180,'spring',all_metab$season) # if doy with criteria, spring, else leave alone #all_metab$season <- ifelse(all_metab$doy>=180&all_metab$doy<=240,'summer',all_metab$season) #all_metab$season <- ifelse(all_metab$doy>240&all_metab$doy<305,'fall',all_metab$season) #loop below creates one dataframe with all loading data in it - JK modifying the loop JZ created above for metabolism dir<-'results/nutrient load/' # directory of loading data folders<-list.files(dir) # folders in this dir (in this case there are only files no folders) folders<-folders[-grep('Readme',folders)] # get rid of README doc all_load<-data.frame() # data frame to store all loading data + discharge for(i in 1:length(folders)){ # loops over all folders in loads directory, needed to change syntax on file path because there are no lake folders in main load folder cur<-read.table(file.path(dir,folders[i]),header=T,sep='\t', stringsAsFactors = F) # read in lake specific load data cur$lake<-strsplit(folders[i],split = '_')[[1]][1] # this line creates the lake column,splits the string (the type of variable that is files[i]) by a character that you choose, then chooses the 1st split all_load<-rbind(all_load,cur) #stacking all the lakes in one dataframe } #creating logged inflow colum all_load$loginflow<- log(all_load$inflow) #merging loads and metabolism data into one dataframe #does not include lakes without inflows or Mendota (no load yet as of 10/28/18) all_metabload<-merge(all_metab, all_load, by=c('lake','doy')) #does not include lakes without inflows or Mendota (no load) all_metabload$DOC_TP<-all_metabload$DOC_load/all_metabload$TP_load #creating DOC:TP column all_metabload$TN_TP<-all_metabload$TN_load/all_metabload$TP_load #creating TP:TN column #loop below creates one dataframe with all schmidt stability data in it - JK modifying JZ allmetab loop dir<-'data/schmidt stability/' # directory of stability data files<-list.files(dir) # files in this dir all_st<-data.frame() # data frame to store all stability data, this data are not summarized by day for(i in 1:length(files)){ # loops over all files in stability directory cur<-read.table(file.path(dir,files[i],sep=''),header=T,sep='\t', stringsAsFactors = F) # read in lake specific buoyancy data cur$lake<-strsplit(files[i],split = '_')[[1]][1] # this line creates the lake column,splits the string (the type of variable that is files[i]) by a character that you choose, then chooses the 1st split all_st<-rbind(all_st,cur) } #adding doy column to all_st all_st$doy <- strftime(all_st$datetime, format = "%j") #calculating average daily stability using doy and lake as by variable ave_st<-aggregate(all_st, by=list(all_st$doy,all_st$lake), FUN=mean, na.rm=TRUE) #dropping variables that couldn't calculate mean (doy, lake) and renaming group.1 and group.2 ave_st<-ave_st[c(-3,-5,-6)] names(ave_st)[1] <- "doy" names(ave_st)[2]<-"lake" #merging stability and metabolism data into one dataframe all_metabst<-merge(all_metab, ave_st, by=c('lake', 'doy')) # plotting GPP vs. R by season cv_thres = 4 #cutoff for keeping metabolism data windows() par(mfrow=c(4,4)) # how many panels on graph (alternatively we could call a new window in every iteration of for loop) par(mar=c(3,2,2,2), oma=c(3,3,1,1)) all_metab = all_metab %>% dplyr::mutate(color = case_when(season == 'spring' ~ 'green', season == 'summer' ~ 'blue', season == 'fall' ~ 'orange')) # creates a new column in the all_metab data frame for season color for(i in 1:length(unique(all_metab$lake))){ # looping through each lake cur<-all_metab[all_metab$lake==unique(all_metab$lake)[i],] cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons cur <- cur[cur$GPP_SD/cur$GPP<cv_thres&cur$R_SD/abs(cur$R)<cv_thres,]#keeping only days that meet CV threshold xlim=c(0,max(abs(c(cur$R,cur$GPP)),na.rm = T)) ylim=c(-1*xlim[2],xlim[1]) plot(cur$R~cur$GPP,pch=16,ylim=ylim,xlim=xlim,ylab='',xlab='',main=unique(all_metab$lake)[i],col=cur$color) abline(0,-1,lty=2,lwd=2) } mtext(expression(R~(mg~O[2]~L^-1~day^-1)), side=2, outer=TRUE) mtext(expression(GPP~(mg~O[2]~L^-1~day^-1)), side=1, outer=TRUE) # plotting GPP vs. R colored by doy in grey scale cv_thres = 4 #cutoff for keeping metabolism data windows() par(mfrow=c(4,4)) # how many panels on graph (alternatively we could call a new window in every iteration of for loop) for(i in 1:length(unique(all_metab$lake))){ # looping through each lake cur<-all_metab[all_metab$lake==unique(all_metab$lake)[i],] cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons cur <- cur[cur$GPP_SD/cur$GPP<cv_thres&cur$R_SD/abs(cur$R)<cv_thres,]#keeping only days that meet CV threshold col <- rev(grey.colors(n = nrow(cur))) # colors based on DOY ylim=c(min(cur$R,na.rm = T),max(cur$R,na.rm = T)) xlim=c(min(cur$GPP,na.rm = T),max(cur$GPP,na.rm = T)) plot(cur$R~cur$GPP,pch=21,ylim=ylim,xlim=xlim,ylab='R',xlab='GPP',main=unique(all_metab$lake)[i],col='black',bg=col[as.factor(cur$doy)]) # darker colors are later in year abline(0,-1,lty=2,lwd=2) } # plotting GPP vs. R colored by 3rd variable (using either all_metabload or all_metabst) #current code codes by TP_load cv_thres = 4 #cutoff for keeping metabolism data windows() #don't use when plotting each lake separately par(mfrow=c(4,4)) # how many panels on graph (alternatively we could call a new window in every iteration of for loop) for(i in 1:length(unique(all_metabload$lake))){ # looping through each lake #windows() #use if want each lake in separate plot cur<-all_metabload[all_metabload$lake==unique(all_metabload$lake)[i],] cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons cur <- cur[cur$GPP_SD/cur$GPP<cv_thres&cur$R_SD/abs(cur$R)<cv_thres,]#keeping only days that meet CV threshold col <- rev(grey.colors(n = nrow(cur), start=0, end=1)) #number of colors based on number of rows ylim=c(min(cur$R,na.rm = T),max(cur$R,na.rm = T)) xlim=c(min(cur$GPP,na.rm = T),max(cur$GPP,na.rm = T)) #cur <- subset(cur, cur$doy>=180&cur$doy<=240) #using this line plots only summer data #cur <- subset(cur, cur$doy>=121&cur$doy<=179) #using this line plots only spring data #cur <- subset(cur, cur$doy>=241&cur$doy<=304) #using this line plots only fall data plot(cur$R~cur$GPP,pch=21,ylim=ylim,xlim=xlim,ylab='R',xlab='GPP',main=unique(all_metabload$lake)[i],col='black',bg=col[as.factor(cur$TP_load)]) # darker colors are higher values abline(0,-1,lty=2,lwd=2) } #looking across lakes at effect of third variable #this code plots all lakes on the same graph coded by a third variable in greyscale windows() cur<-all_metabload cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons col <- rev(grey.colors(n = nrow(cur), start=0, end=1)) #number of colors based on number of rows ylim=c(min(cur$R,na.rm = T),max(cur$R,na.rm = T)) xlim=c(min(cur$GPP,na.rm = T),max(cur$GPP,na.rm = T)) #cur <- subset(cur, cur$doy>=180&cur$doy<=240) #using this line plots only summer data #cur <- subset(cur, cur$doy>=121&cur$doy<=179) #using this line plots only spring data #cur <- subset(cur, cur$doy>=241&cur$doy<=304) #using this line plots only fall data plot(cur$R~cur$GPP,pch=21,ylim=ylim,xlim=xlim,ylab='R',xlab='GPP',col='black',bg=col[as.factor(cur$DOC_TP)]) # darker colors are higher values abline(0,-1,lty=2,lwd=2) #testing use of quartiles for categories of driver variables #should we calculate quartiles before or after dropping poorly fitting days? I think before # plotting GPP vs. R colored by 3rd variable (using either all_metabload or all_metabst) cv_thres = 4 #cutoff for keeping metabolism data windows() #don't use when plotting each lake separately par(mfrow=c(4,4)) # how many panels on graph (alternatively we could call a new window in every iteration of for loop) col = c('orange','green','blue', 'black') for(i in 1:length(unique(all_metabst$lake))){ # looping through each lake #windows() #use if want each lake in separate plot cur<-all_metabst[all_metabst$lake==unique(all_metabst$lake)[i],] cur <- cur[!is.na(cur$season),] # only keeping dates that fall within our pre-defined seasons cur<- cur[!is.na(cur$schmidt.stability),]#keeps only days with data #cur <- subset(cur, cur$doy>=180&cur$doy<=240) #using this line uses only summer data cur$cat <- rep(NA,nrow(cur)) #creating a new column to populate with 1,2,3,4 quartile y= quantile(cur$schmidt.stability, c(.25)) #finds the cutoff value for 25th percentile y1=quantile(cur$schmidt.stability, c(.5)) #finds the cutoff value for 50th percentile y2=quantile(cur$schmidt.stability, c(0.75)) #finds the cutoff values for 75th percentile cur$cat<- ifelse(cur$schmidt.stability<=y,'1',cur$cat) #next 4 lines code the quartile based on value of each day cur$cat<- ifelse(cur$schmidt.stability>y & cur$schmidt.stability<=y1,'2',cur$cat) cur$cat<- ifelse(cur$schmidt.stability>y1 & cur$schmidt.stability<=y2,'3',cur$cat) cur$cat<- ifelse(cur$schmidt.stability>y2,'4',cur$cat) cur <- cur[cur$GPP_SD/cur$GPP<cv_thres&cur$R_SD/abs(cur$R)<cv_thres,]#keeping only days that meet CV threshold ylim=c(min(cur$R,na.rm = T),max(cur$R,na.rm = T)) xlim=c(min(cur$GPP,na.rm = T),max(cur$GPP,na.rm = T)) #cur <- subset(cur, cur$doy>=180&cur$doy<=240) #using this line plots only summer data, need to think about whether you've calculated quartiles within or aross seasons if plotting a subset #cur <- subset(cur, cur$doy>=121&cur$doy<=179) #using this line plots only spring data #cur <- subset(cur, cur$doy>=241&cur$doy<=304) #using this line plots only fall data plot(cur$R~cur$GPP,pch=16, cex=.9,ylim=ylim,xlim=xlim,ylab='R',xlab='GPP',main=unique(all_metabst$lake)[i],col=col[as.factor(cur$cat)]) abline(0,-1,lty=2,lwd=2) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{summary.SpdModelTest} \alias{summary.SpdModelTest} \title{Summarise a \code{SpdModelTest} class object} \usage{ summary.SpdModelTest(object, ...) } \arguments{ \item{object}{A \code{SpdModelTest} class object produced using the \code{link{modelTest}} function.} \item{...}{Ignored} } \description{ \code{summary} method for class "\code{SpdModelTest}" } \details{ The summary function returns metadata (number of radiocarbon dates, bins, and simulations), the p-value of the global signficance test, and the chronological interval of local positive and negative deviations from the simulation envelope. } \seealso{ \code{\link{modelTest}}. }
/man/summary.SpdModelTest.Rd
no_license
f-silva-archaeo/rcarbon
R
false
true
735
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{summary.SpdModelTest} \alias{summary.SpdModelTest} \title{Summarise a \code{SpdModelTest} class object} \usage{ summary.SpdModelTest(object, ...) } \arguments{ \item{object}{A \code{SpdModelTest} class object produced using the \code{link{modelTest}} function.} \item{...}{Ignored} } \description{ \code{summary} method for class "\code{SpdModelTest}" } \details{ The summary function returns metadata (number of radiocarbon dates, bins, and simulations), the p-value of the global signficance test, and the chronological interval of local positive and negative deviations from the simulation envelope. } \seealso{ \code{\link{modelTest}}. }
## Format date to Type Date epc <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?") epc$Date <- as.Date(t$Date, "%d/%m/%Y") ## Filter data set from Feb. 1, 2007 to Feb. 2, 2007 epct <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2")) ## Remove incomplete observation epct <- epct[complete.cases(epct),] ## Combine Date and Time column dateTime <- paste(epct$Date, epct$Time) ## Name the vector dateTime <- setNames(dateTime, "DateTime") ## Remove Date and Time column epct <- epct[ ,!(names(t) %in% c("Date","Time"))] ## Add DateTime column epct <- cbind(dateTime, epct) ## Format dateTime Column epct$dateTime <- as.POSIXct(dateTime) dev.copy(png,"plot2.png", width=480, height=480) plot(t$Global_active_power~t$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="") dev.off()
/plot2.R
no_license
xela005/ExData_Plotting1
R
false
false
902
r
## Format date to Type Date epc <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?") epc$Date <- as.Date(t$Date, "%d/%m/%Y") ## Filter data set from Feb. 1, 2007 to Feb. 2, 2007 epct <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2")) ## Remove incomplete observation epct <- epct[complete.cases(epct),] ## Combine Date and Time column dateTime <- paste(epct$Date, epct$Time) ## Name the vector dateTime <- setNames(dateTime, "DateTime") ## Remove Date and Time column epct <- epct[ ,!(names(t) %in% c("Date","Time"))] ## Add DateTime column epct <- cbind(dateTime, epct) ## Format dateTime Column epct$dateTime <- as.POSIXct(dateTime) dev.copy(png,"plot2.png", width=480, height=480) plot(t$Global_active_power~t$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="") dev.off()
library(plyr) # Step 1 # Merge the training and test sets to create one data set ############################################################################### x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt") y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt") subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt") x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt") y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt") subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt") # create 'x' data set x_data <- rbind(x_train, x_test) # create 'y' data set y_data <- rbind(y_train, y_test) # create 'subject' data set subject_data <- rbind(subject_train, subject_test) # Step 2 # Extract only the measurements on the mean and standard deviation for each measurement ############################################################################### features <- read.table("./data/UCI HAR Dataset/features.txt") # get only columns with mean() or std() in their names mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2]) # subset the desired columns x_data <- x_data[, mean_and_std_features] # correct the column names names(x_data) <- features[mean_and_std_features, 2] # Step 3 # Use descriptive activity names to name the activities in the data set ############################################################################### activities <- read.table("./data/UCI HAR Dataset/activity_labels.txt") # update values with correct activity names y_data[, 1] <- activities[y_data[, 1], 2] # correct column name names(y_data) <- "activity" # Step 4 # Appropriately label the data set with descriptive variable names ############################################################################### # correct column name names(subject_data) <- "subject" # bind all the data in a single data set all_data <- cbind(x_data, y_data, subject_data) # Step 5 # Create a second, independent tidy data set with the average of each variable # for each activity and each subject ############################################################################### # 66 <- 68 columns but last two (activity & subject) averages_data <- ddply(all_data, .(subject, activity), function(x) colMeans(x[, 1:66])) write.table(averages_data, "averages_data.txt", row.name=FALSE)
/run_analysis.R
no_license
brettearnest/GetAndCleanDataProject
R
false
false
2,371
r
library(plyr) # Step 1 # Merge the training and test sets to create one data set ############################################################################### x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt") y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt") subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt") x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt") y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt") subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt") # create 'x' data set x_data <- rbind(x_train, x_test) # create 'y' data set y_data <- rbind(y_train, y_test) # create 'subject' data set subject_data <- rbind(subject_train, subject_test) # Step 2 # Extract only the measurements on the mean and standard deviation for each measurement ############################################################################### features <- read.table("./data/UCI HAR Dataset/features.txt") # get only columns with mean() or std() in their names mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2]) # subset the desired columns x_data <- x_data[, mean_and_std_features] # correct the column names names(x_data) <- features[mean_and_std_features, 2] # Step 3 # Use descriptive activity names to name the activities in the data set ############################################################################### activities <- read.table("./data/UCI HAR Dataset/activity_labels.txt") # update values with correct activity names y_data[, 1] <- activities[y_data[, 1], 2] # correct column name names(y_data) <- "activity" # Step 4 # Appropriately label the data set with descriptive variable names ############################################################################### # correct column name names(subject_data) <- "subject" # bind all the data in a single data set all_data <- cbind(x_data, y_data, subject_data) # Step 5 # Create a second, independent tidy data set with the average of each variable # for each activity and each subject ############################################################################### # 66 <- 68 columns but last two (activity & subject) averages_data <- ddply(all_data, .(subject, activity), function(x) colMeans(x[, 1:66])) write.table(averages_data, "averages_data.txt", row.name=FALSE)
library(dplyr) #check if the dataset exists and if not download and unzip the data: if (!file.exists("UCI HAR Dataset")){ Url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(Url, destfile = "./data.zip", method = "curl") #unzip files unzip("data.zip") } #read files to R X_test <- read.table("./UCI HAR Dataset/test/X_test.txt") y_test <- read.table("./UCI HAR Dataset/test/y_test.txt") subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt") X_train <- read.table("./UCI HAR Dataset/train/X_train.txt") y_train <- read.table("./UCI HAR Dataset/train/y_train.txt") subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt") features <- read.table("./UCI HAR Dataset/features.txt") #label the data x_labels <- make.names(as.character(features[[2]]), unique=TRUE) names(X_test) <- x_labels names(X_train) <- x_labels names(y_test) <- "activity_num" names(y_train) <- "activity_num" names(subject_test) <- "subject" names(subject_train) <- "subject" #combine data and add column with activity labels test_set <- cbind(subject_test, y_test, X_test) train_set <- cbind(subject_train, y_train, X_train) full_data <- rbind(test_set, train_set) full_data <- mutate(full_data, activity=factor(activity_num, labels=c("WALKING", "WALKING_upstairs", "WALKING_downstairs", "SITTING", "STANDING","LAYING"))) #extract mean and standard deviation data_mean_SD <- select(full_data, subject:activity_num, activity, tBodyAcc.mean...X:tBodyAcc.std...Z, tGravityAcc.mean...X:tGravityAcc.std...Z, tBodyAccJerk.mean...X:tBodyAccJerk.std...Z, tBodyGyro.mean...X:tBodyGyro.std...Z, tBodyGyroJerk.mean...X:tBodyGyroJerk.std...Z, tBodyAccMag.mean..:tBodyAccMag.std.., tGravityAccMag.mean..:tGravityAccMag.std.., tBodyAccJerkMag.mean..:tBodyAccJerkMag.std.., tBodyGyroMag.mean..:tBodyGyroMag.std.., tBodyGyroJerkMag.mean..:tBodyGyroJerkMag.std.., fBodyAcc.mean...X:fBodyAcc.std...Z, fBodyAccJerk.mean...X:fBodyAccJerk.std...Z, fBodyGyro.mean...X:fBodyGyro.std...Z, fBodyAccMag.mean..:fBodyAccMag.std.., fBodyBodyAccJerkMag.mean..: fBodyBodyAccJerkMag.std.., fBodyBodyGyroMag.mean..:fBodyBodyGyroMag.std.., fBodyBodyGyroJerkMag.mean..:fBodyBodyGyroJerkMag.std..) #rename variables to remove extra dots and "Body"s data_mean_SD <- rename(data_mean_SD, tBodyAccMag.mean=tBodyAccMag.mean.., tBodyAccMag.std=tBodyAccMag.std.., tGravityAccMag.mean=tGravityAccMag.mean.., tGravityAccMag.std=tGravityAccMag.std.., tBodyAccJerkMag.mean=tBodyAccJerkMag.mean.., tBodyAccJerkMag.std=tBodyAccJerkMag.std.., tBodyGyroMag.mean=tBodyGyroMag.mean.., tBodyGyroMag.std=tBodyGyroMag.std.., tBodyGyroJerkMag.mean=tBodyGyroJerkMag.mean.., tBodyGyroJerkMag.std=tBodyGyroJerkMag.std.., fBodyAccMag.mean=fBodyAccMag.mean.., fBodyAccMag.std=fBodyAccMag.std.., fBodyAccJerkMag.mean=fBodyBodyAccJerkMag.mean.., fBodyAccJerkMag.std=fBodyBodyAccJerkMag.std.., fBodyGyroMag.mean=fBodyBodyGyroMag.mean.., fBodyGyroMag.std=fBodyBodyGyroMag.std.., fBodyGyroJerkMag.mean=fBodyBodyGyroJerkMag.mean.., fBodyGyroJerkMag.std=fBodyBodyGyroJerkMag.std..) #create summary data data_summary <- data_mean_SD %>% group_by(activity, subject) %>% summarize_each(funs(mean), -activity_num) #create text file write.table(data_summary, file = "averages.txt", row.names = FALSE)
/run_analysis.R
no_license
ReettaH/Course-project_Getting-and-Cleaning-data
R
false
false
4,235
r
library(dplyr) #check if the dataset exists and if not download and unzip the data: if (!file.exists("UCI HAR Dataset")){ Url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(Url, destfile = "./data.zip", method = "curl") #unzip files unzip("data.zip") } #read files to R X_test <- read.table("./UCI HAR Dataset/test/X_test.txt") y_test <- read.table("./UCI HAR Dataset/test/y_test.txt") subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt") X_train <- read.table("./UCI HAR Dataset/train/X_train.txt") y_train <- read.table("./UCI HAR Dataset/train/y_train.txt") subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt") features <- read.table("./UCI HAR Dataset/features.txt") #label the data x_labels <- make.names(as.character(features[[2]]), unique=TRUE) names(X_test) <- x_labels names(X_train) <- x_labels names(y_test) <- "activity_num" names(y_train) <- "activity_num" names(subject_test) <- "subject" names(subject_train) <- "subject" #combine data and add column with activity labels test_set <- cbind(subject_test, y_test, X_test) train_set <- cbind(subject_train, y_train, X_train) full_data <- rbind(test_set, train_set) full_data <- mutate(full_data, activity=factor(activity_num, labels=c("WALKING", "WALKING_upstairs", "WALKING_downstairs", "SITTING", "STANDING","LAYING"))) #extract mean and standard deviation data_mean_SD <- select(full_data, subject:activity_num, activity, tBodyAcc.mean...X:tBodyAcc.std...Z, tGravityAcc.mean...X:tGravityAcc.std...Z, tBodyAccJerk.mean...X:tBodyAccJerk.std...Z, tBodyGyro.mean...X:tBodyGyro.std...Z, tBodyGyroJerk.mean...X:tBodyGyroJerk.std...Z, tBodyAccMag.mean..:tBodyAccMag.std.., tGravityAccMag.mean..:tGravityAccMag.std.., tBodyAccJerkMag.mean..:tBodyAccJerkMag.std.., tBodyGyroMag.mean..:tBodyGyroMag.std.., tBodyGyroJerkMag.mean..:tBodyGyroJerkMag.std.., fBodyAcc.mean...X:fBodyAcc.std...Z, fBodyAccJerk.mean...X:fBodyAccJerk.std...Z, fBodyGyro.mean...X:fBodyGyro.std...Z, fBodyAccMag.mean..:fBodyAccMag.std.., fBodyBodyAccJerkMag.mean..: fBodyBodyAccJerkMag.std.., fBodyBodyGyroMag.mean..:fBodyBodyGyroMag.std.., fBodyBodyGyroJerkMag.mean..:fBodyBodyGyroJerkMag.std..) #rename variables to remove extra dots and "Body"s data_mean_SD <- rename(data_mean_SD, tBodyAccMag.mean=tBodyAccMag.mean.., tBodyAccMag.std=tBodyAccMag.std.., tGravityAccMag.mean=tGravityAccMag.mean.., tGravityAccMag.std=tGravityAccMag.std.., tBodyAccJerkMag.mean=tBodyAccJerkMag.mean.., tBodyAccJerkMag.std=tBodyAccJerkMag.std.., tBodyGyroMag.mean=tBodyGyroMag.mean.., tBodyGyroMag.std=tBodyGyroMag.std.., tBodyGyroJerkMag.mean=tBodyGyroJerkMag.mean.., tBodyGyroJerkMag.std=tBodyGyroJerkMag.std.., fBodyAccMag.mean=fBodyAccMag.mean.., fBodyAccMag.std=fBodyAccMag.std.., fBodyAccJerkMag.mean=fBodyBodyAccJerkMag.mean.., fBodyAccJerkMag.std=fBodyBodyAccJerkMag.std.., fBodyGyroMag.mean=fBodyBodyGyroMag.mean.., fBodyGyroMag.std=fBodyBodyGyroMag.std.., fBodyGyroJerkMag.mean=fBodyBodyGyroJerkMag.mean.., fBodyGyroJerkMag.std=fBodyBodyGyroJerkMag.std..) #create summary data data_summary <- data_mean_SD %>% group_by(activity, subject) %>% summarize_each(funs(mean), -activity_num) #create text file write.table(data_summary, file = "averages.txt", row.names = FALSE)
\name{geom_step} \alias{geom_step} \title{Connect observations by stairs.} \usage{ geom_step(mapping = NULL, data = NULL, stat = "identity", position = "identity", direction = "hv", ...) } \arguments{ \item{direction}{direction of stairs: 'vh' for vertical then horizontal, or 'hv' for horizontal then vertical} \item{mapping}{The aesthetic mapping, usually constructed with \code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set at the layer level if you are overriding the plot defaults.} \item{data}{A layer specific dataset - only needed if you want to override the plot defaults.} \item{stat}{The statistical transformation to use on the data for this layer.} \item{position}{The position adjustment to use for overlappling points on this layer} \item{...}{other arguments passed on to \code{\link{layer}}. This can include aesthetics whose values you want to set, not map. See \code{\link{layer}} for more details.} } \description{ Connect observations by stairs. } \examples{ # Simple quantiles/ECDF from examples(plot) x <- sort(rnorm(47)) qplot(seq_along(x), x, geom="step") # Steps go horizontally, then vertically (default) qplot(seq_along(x), x, geom="step", direction = "hv") plot(x, type = "s") # Steps go vertically, then horizontally qplot(seq_along(x), x, geom="step", direction = "vh") plot(x, type = "S") # Also works with other aesthetics df <- data.frame( x = sort(rnorm(50)), trt = sample(c("a", "b"), 50, rep = T) ) qplot(seq_along(x), x, data = df, geom="step", colour = trt) }
/man/geom_step.Rd
no_license
djmurphy420/ggplot2
R
false
false
1,567
rd
\name{geom_step} \alias{geom_step} \title{Connect observations by stairs.} \usage{ geom_step(mapping = NULL, data = NULL, stat = "identity", position = "identity", direction = "hv", ...) } \arguments{ \item{direction}{direction of stairs: 'vh' for vertical then horizontal, or 'hv' for horizontal then vertical} \item{mapping}{The aesthetic mapping, usually constructed with \code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set at the layer level if you are overriding the plot defaults.} \item{data}{A layer specific dataset - only needed if you want to override the plot defaults.} \item{stat}{The statistical transformation to use on the data for this layer.} \item{position}{The position adjustment to use for overlappling points on this layer} \item{...}{other arguments passed on to \code{\link{layer}}. This can include aesthetics whose values you want to set, not map. See \code{\link{layer}} for more details.} } \description{ Connect observations by stairs. } \examples{ # Simple quantiles/ECDF from examples(plot) x <- sort(rnorm(47)) qplot(seq_along(x), x, geom="step") # Steps go horizontally, then vertically (default) qplot(seq_along(x), x, geom="step", direction = "hv") plot(x, type = "s") # Steps go vertically, then horizontally qplot(seq_along(x), x, geom="step", direction = "vh") plot(x, type = "S") # Also works with other aesthetics df <- data.frame( x = sort(rnorm(50)), trt = sample(c("a", "b"), 50, rep = T) ) qplot(seq_along(x), x, data = df, geom="step", colour = trt) }
######### Binomal Power Analysis of AUV Data ####### library(stringr) library(dplyr) library(tidyr) # Clear memory ---- rm(list=ls()) # Set working directory #### #w.dir<-dirname(rstudioapi::getActiveDocumentContext()$path) w.dir <- "~/PA_Binomial" w.dir # Set data directory - to read the data from #setwd(paste(w.dir, "outputs/", sep='/')) setwd("~/PA_Binomial/outputs") dir("~/PA_Binomial/data/") ## Set file name -- filen <- "DTV-NPZ-seag-epower-22092020.csv" # Load data df <- read.csv(paste(w.dir, "data", filen, sep='/')) str(df) # 65420 obs names(df) head(df) summary(df) library(epower) #packageVersion("epower") #install.packages("INLA", repos=c(getOption("repos"), INLA="https://inla.r-inla-download.org/R/stable"), dep=TRUE) library(INLA, verbose=TRUE) ### Set design names(df) str(df) #df$Period <- as.factor(df$Period) #df$CvI <- as.factor(df$CvI) #df$Time <- as.factor(df$Time) summary(df) #dat<-read.csv(paste(working.dir, "Data/raw", "BRUV-sg-test1.csv", sep='/')) dat <- as.data.frame(df) summary(dat) str(dat) # Set design dataComponents<-supplyData( dat=dat, variableType="binomial", design.matrix=list( Response="Seagrass", Trials="no.scored", Location="ZoneName", sublocation="Cluster", Time="Time", "subtime"=NA, BvA="Period", CvI="CvI"), levels.dat=list( Before="Before", Control="Control", After="After", Impact="Impact"), scenario.data=list( Number.of.iterations=400, filename="DTV-NPZ-400it-binomal-24092020", Number.of.Impact.Locations=1, Number.of.Control.Locations=2, Number.of.sublocations.within.Location="1;2;3", Number.of.sample.times.Before=3, # this number needs to be higher than 2 (replicates) Number.of.sample.times.After="1;2;3", #Number.of.sample.times.After=1, #Number.of.sample.times.After=2, #Number.of.sample.times.After=3, Number.of.subtimes.within.Time=NA, Number.of.trials="500; 1500; 2500", Number.of.replicate.measurements=1), effect.info=list( Multiplicative=1, Fixed.change=0, Effect.values="-0.2;-0.4;-0.6;-0.8"), ncores = 30) ### The supply data function is not reading the ncores dataComponents$ncores <- 30 require(INLA,quietly=TRUE) scenarioParams<<-do.call(powerScenario,list(inputData=dataComponents)) assessPower() ## Check memory ---- memfree <- as.numeric(system("awk '/MemFree/ {print $2}' /proc/meminfo", intern=TRUE)) memfree # 5392800 kb - at the start of sesson : 64877516, after 150 its: 64082116; after 300 its: 63007928 # To obtain information on the total and/or on the available memory in linux, you can try system('free -m') # 61535 300 its - 500 its :7751 system('grep MemTotal /proc/meminfo') # 65965264 kB 200 its system('lshw -class memory') # 62GiB (after 200 its) x <- system('grep MemTotal /proc/meminfo', intern = TRUE) x # 65965264 kB # memory usage library(pryr) mem_used() # after 150 its 301 MB -- after 300 its: 555 MB -- for 500 its: 1.14 GB
/DTV-NPZ-22092020.R
no_license
anitas-giraldo/PA_Binomial
R
false
false
2,982
r
######### Binomal Power Analysis of AUV Data ####### library(stringr) library(dplyr) library(tidyr) # Clear memory ---- rm(list=ls()) # Set working directory #### #w.dir<-dirname(rstudioapi::getActiveDocumentContext()$path) w.dir <- "~/PA_Binomial" w.dir # Set data directory - to read the data from #setwd(paste(w.dir, "outputs/", sep='/')) setwd("~/PA_Binomial/outputs") dir("~/PA_Binomial/data/") ## Set file name -- filen <- "DTV-NPZ-seag-epower-22092020.csv" # Load data df <- read.csv(paste(w.dir, "data", filen, sep='/')) str(df) # 65420 obs names(df) head(df) summary(df) library(epower) #packageVersion("epower") #install.packages("INLA", repos=c(getOption("repos"), INLA="https://inla.r-inla-download.org/R/stable"), dep=TRUE) library(INLA, verbose=TRUE) ### Set design names(df) str(df) #df$Period <- as.factor(df$Period) #df$CvI <- as.factor(df$CvI) #df$Time <- as.factor(df$Time) summary(df) #dat<-read.csv(paste(working.dir, "Data/raw", "BRUV-sg-test1.csv", sep='/')) dat <- as.data.frame(df) summary(dat) str(dat) # Set design dataComponents<-supplyData( dat=dat, variableType="binomial", design.matrix=list( Response="Seagrass", Trials="no.scored", Location="ZoneName", sublocation="Cluster", Time="Time", "subtime"=NA, BvA="Period", CvI="CvI"), levels.dat=list( Before="Before", Control="Control", After="After", Impact="Impact"), scenario.data=list( Number.of.iterations=400, filename="DTV-NPZ-400it-binomal-24092020", Number.of.Impact.Locations=1, Number.of.Control.Locations=2, Number.of.sublocations.within.Location="1;2;3", Number.of.sample.times.Before=3, # this number needs to be higher than 2 (replicates) Number.of.sample.times.After="1;2;3", #Number.of.sample.times.After=1, #Number.of.sample.times.After=2, #Number.of.sample.times.After=3, Number.of.subtimes.within.Time=NA, Number.of.trials="500; 1500; 2500", Number.of.replicate.measurements=1), effect.info=list( Multiplicative=1, Fixed.change=0, Effect.values="-0.2;-0.4;-0.6;-0.8"), ncores = 30) ### The supply data function is not reading the ncores dataComponents$ncores <- 30 require(INLA,quietly=TRUE) scenarioParams<<-do.call(powerScenario,list(inputData=dataComponents)) assessPower() ## Check memory ---- memfree <- as.numeric(system("awk '/MemFree/ {print $2}' /proc/meminfo", intern=TRUE)) memfree # 5392800 kb - at the start of sesson : 64877516, after 150 its: 64082116; after 300 its: 63007928 # To obtain information on the total and/or on the available memory in linux, you can try system('free -m') # 61535 300 its - 500 its :7751 system('grep MemTotal /proc/meminfo') # 65965264 kB 200 its system('lshw -class memory') # 62GiB (after 200 its) x <- system('grep MemTotal /proc/meminfo', intern = TRUE) x # 65965264 kB # memory usage library(pryr) mem_used() # after 150 its 301 MB -- after 300 its: 555 MB -- for 500 its: 1.14 GB
# Rutina para expandir un archivo con datos almacenados en forma de frecuencias datos <- data.frame(fumador=c(1,1,2,2,1,1,2,2), enfcard=c(rep(c(1,0),4)), sexo=c(rep(1,4), rep(2,4)), frecuencia=c(150,900,75,950,100,850,50,925)) head(datos) datos.expanded <- datos[rep(row.names(datos), datos$frecuencia), 1:3] filas <- sample(nrow(datos.expanded), replace = F) chicuadrado <- datos.expanded[filas,] row.names(chicuadrado) <- 1:nrow(datos.expanded) haven::write_sav(chicuadrado, "chicuadrado.sav")
/expandirdataframe.R
no_license
jrlacalle/material_docente
R
false
false
555
r
# Rutina para expandir un archivo con datos almacenados en forma de frecuencias datos <- data.frame(fumador=c(1,1,2,2,1,1,2,2), enfcard=c(rep(c(1,0),4)), sexo=c(rep(1,4), rep(2,4)), frecuencia=c(150,900,75,950,100,850,50,925)) head(datos) datos.expanded <- datos[rep(row.names(datos), datos$frecuencia), 1:3] filas <- sample(nrow(datos.expanded), replace = F) chicuadrado <- datos.expanded[filas,] row.names(chicuadrado) <- 1:nrow(datos.expanded) haven::write_sav(chicuadrado, "chicuadrado.sav")
library(magrittr) # para usar el operador %in% library(stringr) # para usar str_sub ### Primero le quitamos a los datos la info privada datos <- read.csv("VoluntariosEstados19s.csv") ### Vamos a inventar una funcion que nos deje la lada pero que quite el telefono sacalada <- function(telnumer){ result <- str_sub(telnumer, 1,3) return(result) } ###aplicamos la funcion a cada elemento de la columna y la hacemos nueva col datos["Lada"] <- apply(datos["Teléfono"], 1, sacalada) quitar <- c("Apellido.Paterno", "Apellido.Materno", "Correo", "Teléfono", "Nombre.Completo") datos <- datos[ ,!( names(datos) %in% quitar ) ] #Observen como negamos las columnas con ! write.csv(datos, "VoluntariosSanitizados.csv") #Y hacemos un cvs mas privado #datos <- read.csv("VoluntariosSanitizados01.csv") #y lo volvemos a cargar str_sub("tuabuela", -1,-1) #Esto nos da el último caracter de "tuabuela" posiblegenero <- function(nombre) { ## esta función le pone "Fem" o "Mas" dependiendo de la ultima letra del nombre if (str_sub(nombre, -1,-1) =="a"){ resultado <- "Fem"} else{ resultado <- "Mas"} return(resultado) } posiblegenero2 <- function(nombre) { ## esta función le pone 2 o 1 dependiendo de la ultima letra del nombre if (str_sub(nombre, -1,-1) =="a"){ resultado <- 2} else{ resultado <- 1} return(resultado) } ###aplicamos la funcion a cada elemento de la columna y la hacemos nueva col datos["PosGen"] <- apply( datos["Nombre"], 1,posiblegenero) datos["PosGen2"] <- apply( datos["Nombre"], 1,posiblegenero2) #Cunatas hay que cumplan con la condición dentro del which nfems <- length(which(datos["PosGen"]=="Fem")) #Ponemos el numero de renglon (su "nombre") como un número datos$Secuencia <- as.numeric(rownames(datos)) #Dibujamos los puntitos. plot(x=datos$Secuencia, y=datos$PosGen2) #Ponemos todo en el espacio de trabajo que esta contenido en dados attach(datos) #cha chan,menos letras plot(x=Secuencia, y=PosGen2) #Le decimos a R que PosGen es una categoria datos$PosGen <- as.factor(datos$PosGen) ###Una prueba de T para ver si hay alguna "significancia" en que si los datos son ## o no diferentes tururu<-t.test(Secuencia~PosGen, data=datos) #Una grafica plot(datos$Secuencia~datos$PosGen, data=datos) #Otra grafica mas bonita library(ggplot2) library(wesanderson) ###o esta otra versión figura <- ggplot(datos, aes(x=PosGen, y=Secuencia, fill=PosGen))+ geom_boxplot()+ geom_jitter(width=0.04)+ ggtitle("Dispersión de Voluntariado por Género")+ theme_linedraw() + theme(legend.position="none")+ scale_fill_manual(values=wes_palette(name="Darjeeling"))+ scale_x_discrete(name="Posible Género")+ scale_y_continuous(name="Secuencia de Aparición")+ theme(text=element_text(size=14, family="Times", hjust=0.5), axis.text.x=element_text(size=12, family="Times"), axis.text.y=element_text(size=12, family="Times") )+coord_flip()+annotate("140", x=100, y=10, label="140") figura ggsave("DispersionPorGenero.png", plot=figura, dpi=92) ## Pos hagamos la estadística randomRows = function(df,n){ return(df[sample(nrow(df),n),]) } ##Cuantos grupos asi podriamos separar choose(20,11) muchos<-5000 promedios<-c() for (i in 1:muchos){ promedios[i]=mean(randomRows(datos,nfems)$Secuencia) } promfem <- 101.1522 tantos<-length(promedios[promedios<promfem]) nuestrap=1.0-tantos/muchos
/R/R para emergencias/PruebaT01.r
no_license
kzapfe/AprendiendoComputoLibre
R
false
false
3,471
r
library(magrittr) # para usar el operador %in% library(stringr) # para usar str_sub ### Primero le quitamos a los datos la info privada datos <- read.csv("VoluntariosEstados19s.csv") ### Vamos a inventar una funcion que nos deje la lada pero que quite el telefono sacalada <- function(telnumer){ result <- str_sub(telnumer, 1,3) return(result) } ###aplicamos la funcion a cada elemento de la columna y la hacemos nueva col datos["Lada"] <- apply(datos["Teléfono"], 1, sacalada) quitar <- c("Apellido.Paterno", "Apellido.Materno", "Correo", "Teléfono", "Nombre.Completo") datos <- datos[ ,!( names(datos) %in% quitar ) ] #Observen como negamos las columnas con ! write.csv(datos, "VoluntariosSanitizados.csv") #Y hacemos un cvs mas privado #datos <- read.csv("VoluntariosSanitizados01.csv") #y lo volvemos a cargar str_sub("tuabuela", -1,-1) #Esto nos da el último caracter de "tuabuela" posiblegenero <- function(nombre) { ## esta función le pone "Fem" o "Mas" dependiendo de la ultima letra del nombre if (str_sub(nombre, -1,-1) =="a"){ resultado <- "Fem"} else{ resultado <- "Mas"} return(resultado) } posiblegenero2 <- function(nombre) { ## esta función le pone 2 o 1 dependiendo de la ultima letra del nombre if (str_sub(nombre, -1,-1) =="a"){ resultado <- 2} else{ resultado <- 1} return(resultado) } ###aplicamos la funcion a cada elemento de la columna y la hacemos nueva col datos["PosGen"] <- apply( datos["Nombre"], 1,posiblegenero) datos["PosGen2"] <- apply( datos["Nombre"], 1,posiblegenero2) #Cunatas hay que cumplan con la condición dentro del which nfems <- length(which(datos["PosGen"]=="Fem")) #Ponemos el numero de renglon (su "nombre") como un número datos$Secuencia <- as.numeric(rownames(datos)) #Dibujamos los puntitos. plot(x=datos$Secuencia, y=datos$PosGen2) #Ponemos todo en el espacio de trabajo que esta contenido en dados attach(datos) #cha chan,menos letras plot(x=Secuencia, y=PosGen2) #Le decimos a R que PosGen es una categoria datos$PosGen <- as.factor(datos$PosGen) ###Una prueba de T para ver si hay alguna "significancia" en que si los datos son ## o no diferentes tururu<-t.test(Secuencia~PosGen, data=datos) #Una grafica plot(datos$Secuencia~datos$PosGen, data=datos) #Otra grafica mas bonita library(ggplot2) library(wesanderson) ###o esta otra versión figura <- ggplot(datos, aes(x=PosGen, y=Secuencia, fill=PosGen))+ geom_boxplot()+ geom_jitter(width=0.04)+ ggtitle("Dispersión de Voluntariado por Género")+ theme_linedraw() + theme(legend.position="none")+ scale_fill_manual(values=wes_palette(name="Darjeeling"))+ scale_x_discrete(name="Posible Género")+ scale_y_continuous(name="Secuencia de Aparición")+ theme(text=element_text(size=14, family="Times", hjust=0.5), axis.text.x=element_text(size=12, family="Times"), axis.text.y=element_text(size=12, family="Times") )+coord_flip()+annotate("140", x=100, y=10, label="140") figura ggsave("DispersionPorGenero.png", plot=figura, dpi=92) ## Pos hagamos la estadística randomRows = function(df,n){ return(df[sample(nrow(df),n),]) } ##Cuantos grupos asi podriamos separar choose(20,11) muchos<-5000 promedios<-c() for (i in 1:muchos){ promedios[i]=mean(randomRows(datos,nfems)$Secuencia) } promfem <- 101.1522 tantos<-length(promedios[promedios<promfem]) nuestrap=1.0-tantos/muchos
# Applied Linear Regression, Third edition # Chapter 12 Logistic Regression # October 14, 2004; revised January 2011 for alr3 Version 2.0, R only require(alr3) ################################################################ # Balsam fir blowdown # blowBF # page 255 m1 <- glm(y ~ logb(D, 2), family=binomial(), data=blowBF) summary(m1) # Table 12.2 m2 <- glm(y ~ logb(D, 2) + S, family=binomial(), data=blowBF) m3 <- update(m2, ~ . + logb(D, 2):S) summary(m2) summary(m3) # Fig. 12.1 "../EPS-figs/blow1.eps" op<- if(is.null(version$language) == FALSE) par(mfrow=c(2, 1), mai=c(.6, .6, .1, .1), mgp=c(2, 1, 0), cex.lab=1.0, cex=0.7) else par(mfrow=c(2, 1)) with(blowBF, plot(jitter(logb(D, 2), amount=.05), jitter(blowBF$y, amount=0.02), xlab=expression(paste("(a) ", Log[2](Diameter))), ylab="Blowdown indicator")) #lines(smooth.spline(logb(D, 2), y), lty=3) abline(lm(y ~ logb(D, 2), data=blowBF), lty=1) xx <- with(blowBF, seq(min(D), max(D), length=100)) lo.fit <- loess(y ~ logb(D, 2), data=blowBF, degree=1) lines(logb(xx, 2), predict(lo.fit, data.frame(D=xx)), lty=3) lines(logb(xx, 2), predict(m1, data.frame(D=xx), type="response"), lty=2) library(sm) with(blowBF, sm.density.compare(logb(D, 2), y, lty=c(1, 2), xlab=expression(paste("(b) ", log[2](D))))) legend("topright", inset=.02, legend=c("Y=0", "Y=1"), lty=c(1, 2)) par(op) with(blowBF,{ plot(logb(D, 2), S, col=c("red", "black")[y + 1], pch=y + 1) points(logb(D[y==1], 2), S[y==1], col=2, pch=2) } ) logistic <- function(x) { 1/(1 + exp(-x)) } xx <- seq(-6, 6, length=100) # Fig. 12.2 "../EPS-figs/logistic.eps" plot(xx, logistic(xx), type="l", xlab=expression(paste(beta, "'", bold(x))), ylab="Probability") s1 <-summary(m1) s1$coef print(paste("Deviance:", round(s1$deviance, 2))) print(paste("Pearson's X^2:", round(sum(residuals(m1, type="pearson")^2), 2))) # Fig. 12.3 "../EPS-figs/blow1a.eps" op <- par(mfrow=c(2, 1), mai=c(.6, .6, .1, .1), mgp=c(2, 1, 0), cex.lab=1.0, cex=0.95) with(blowBF, sm.density.compare(S, y, lty=c(1, 2), xlab=expression(paste("(a) ", S)))) legend("topright", inset=.02, legend=c("Y=0", "Y=1"), lty=c(1, 2), pch=c(1, 2)) with(blowBF, { plot(jitter(logb(D, 2), amount=.04), S, pch=y + 1, cex=0.5, xlab=expression(paste("(b) ", log[2](D)))) points(jitter(logb(D[y==0], 2), factor=.5), S[y==0], pch=2) }) legend("topright", legend=c("Y=0", "Y=1"), pch=c(1, 2), cex=0.8) par(op) m2 <- update(m1, ~ . + S) m3 <- update(m2, ~ . + S:logb(D, 2)) # Fig. 12.4 "../EPS-figs/blowc.eps" op <- par(mfrow=c(2, 1), mai=c(.6, .6, .1, .1), mgp=c(2, 1, 0), cex.lab=1.0, cex=0.95) with(blowBF, { xa <- seq(min(D), max(D), len=99) ya <- seq(.01, .99, len=99) za <- matrix(nrow=99, ncol=99) for (i in 1:99) { za[, i] <- predict(m2, data.frame(D=rep(xa[i], 99), S=ya), type="response")} if(is.null(version$language) == FALSE){ contour(logb(xa, 2), ya, za, xlab=expression(paste("(b) ", log[2](D))), ylab="S") points(jitter(logb(D, 2), amount=.04), S, pch=y + 1, cex=.5)} else { contour(logb(xa, 2), ya, za, xlab="(b) log[2](D)", ylab="S") points(jitter(logb(D[y==0], 2), factor=.4), S[y==0], pch=1, cex=.5) points(jitter(logb(D[y==1], 2), factor=.4), S[y==1], pch=2, cex=.5)} # second model with interaction za <- matrix(nrow=99, ncol=99) for (i in 1:99) { za[, i] <- predict(m3, data.frame(D=rep(xa[i], 99), S=ya), type="response")} if(is.null(version$language) == FALSE){ contour(logb(xa, 2), ya, za, xlab=expression(paste("(b) ", log[2](D))), ylab="S") points(jitter(logb(D, 2), amount=.04), S, pch=y + 1, cex=.5) } else { contour(logb(xa, 2), ya, za, xlab="(b)log[2](D)", ylab="S") points(jitter(logb(D[y==0], 2), factor=.4), S[y==0], pch=1, cex=.5) points(jitter(logb(D[y==1], 2), factor=.4), S[y==1], pch=2, cex=.5)} }) par(op) # Fig. 12.5 xx <- seq(0, 1, length=100) op <- par(mfrow=c(1, 2), mar=c(4, 3, 0, .5) + .1, mgp=c(2, 1, 0), cex=0.6) plot(xx, exp(.4009 + 4.9098*xx), type="l", xlab="(a) S", ylab="Odds multiplier") with(blowBF, xx <- seq(min(logb(D, 2)), max(logb(D, 2)), length=100)) plot(2^(xx), exp(coef(m3)[3]/10 + coef(m3)[4]*xx/10), type="l", xlab="(b) D", ylab="Odds multiplier") par(op) summary(m2) summary(m3) print(paste("Pearson's X^2:", round(sum(residuals(m3, type="pearson")^2), 2))) anova(m1, m2, m3, test="Chisq") anova(m1, m3, test="Chisq") m0 <- update(m1, ~ . -logb(D, 2)) anova(m0, m1, m2, m3, test="Chisq") ############################################################################# # Titanic data dt <- titanic head(titanic) mysummary <- function(m){c(df=m$df.residual, G2=m$deviance, X2=sum(residuals(m, type="pearson")^2) )} m1 <- glm(cbind(Surv, N-Surv) ~ Class + Age + Sex, data=titanic, family=binomial()) m2 <- update(m1, ~ . + Class:Sex) m3 <- update(m2, ~ . + Class:Age) m4 <- update(m3, ~ . + Age:Sex) m5 <- update(m4, ~ Class:Age:Sex) ans <- mysummary(m1) ans <- rbind(ans, mysummary(m2)) ans <- rbind(ans, mysummary(m3)) ans <- rbind(ans, mysummary(m4)) ans <- rbind(ans, mysummary(m5)) row.names(ans) <- c( m1$formula, m2$formula, m3$formula, m4$formula, m5$formula) ans
/ALR3_Book/Chapter12_Logistic Regression.R
no_license
PyRPy/ML_Py_Templates
R
false
false
5,381
r
# Applied Linear Regression, Third edition # Chapter 12 Logistic Regression # October 14, 2004; revised January 2011 for alr3 Version 2.0, R only require(alr3) ################################################################ # Balsam fir blowdown # blowBF # page 255 m1 <- glm(y ~ logb(D, 2), family=binomial(), data=blowBF) summary(m1) # Table 12.2 m2 <- glm(y ~ logb(D, 2) + S, family=binomial(), data=blowBF) m3 <- update(m2, ~ . + logb(D, 2):S) summary(m2) summary(m3) # Fig. 12.1 "../EPS-figs/blow1.eps" op<- if(is.null(version$language) == FALSE) par(mfrow=c(2, 1), mai=c(.6, .6, .1, .1), mgp=c(2, 1, 0), cex.lab=1.0, cex=0.7) else par(mfrow=c(2, 1)) with(blowBF, plot(jitter(logb(D, 2), amount=.05), jitter(blowBF$y, amount=0.02), xlab=expression(paste("(a) ", Log[2](Diameter))), ylab="Blowdown indicator")) #lines(smooth.spline(logb(D, 2), y), lty=3) abline(lm(y ~ logb(D, 2), data=blowBF), lty=1) xx <- with(blowBF, seq(min(D), max(D), length=100)) lo.fit <- loess(y ~ logb(D, 2), data=blowBF, degree=1) lines(logb(xx, 2), predict(lo.fit, data.frame(D=xx)), lty=3) lines(logb(xx, 2), predict(m1, data.frame(D=xx), type="response"), lty=2) library(sm) with(blowBF, sm.density.compare(logb(D, 2), y, lty=c(1, 2), xlab=expression(paste("(b) ", log[2](D))))) legend("topright", inset=.02, legend=c("Y=0", "Y=1"), lty=c(1, 2)) par(op) with(blowBF,{ plot(logb(D, 2), S, col=c("red", "black")[y + 1], pch=y + 1) points(logb(D[y==1], 2), S[y==1], col=2, pch=2) } ) logistic <- function(x) { 1/(1 + exp(-x)) } xx <- seq(-6, 6, length=100) # Fig. 12.2 "../EPS-figs/logistic.eps" plot(xx, logistic(xx), type="l", xlab=expression(paste(beta, "'", bold(x))), ylab="Probability") s1 <-summary(m1) s1$coef print(paste("Deviance:", round(s1$deviance, 2))) print(paste("Pearson's X^2:", round(sum(residuals(m1, type="pearson")^2), 2))) # Fig. 12.3 "../EPS-figs/blow1a.eps" op <- par(mfrow=c(2, 1), mai=c(.6, .6, .1, .1), mgp=c(2, 1, 0), cex.lab=1.0, cex=0.95) with(blowBF, sm.density.compare(S, y, lty=c(1, 2), xlab=expression(paste("(a) ", S)))) legend("topright", inset=.02, legend=c("Y=0", "Y=1"), lty=c(1, 2), pch=c(1, 2)) with(blowBF, { plot(jitter(logb(D, 2), amount=.04), S, pch=y + 1, cex=0.5, xlab=expression(paste("(b) ", log[2](D)))) points(jitter(logb(D[y==0], 2), factor=.5), S[y==0], pch=2) }) legend("topright", legend=c("Y=0", "Y=1"), pch=c(1, 2), cex=0.8) par(op) m2 <- update(m1, ~ . + S) m3 <- update(m2, ~ . + S:logb(D, 2)) # Fig. 12.4 "../EPS-figs/blowc.eps" op <- par(mfrow=c(2, 1), mai=c(.6, .6, .1, .1), mgp=c(2, 1, 0), cex.lab=1.0, cex=0.95) with(blowBF, { xa <- seq(min(D), max(D), len=99) ya <- seq(.01, .99, len=99) za <- matrix(nrow=99, ncol=99) for (i in 1:99) { za[, i] <- predict(m2, data.frame(D=rep(xa[i], 99), S=ya), type="response")} if(is.null(version$language) == FALSE){ contour(logb(xa, 2), ya, za, xlab=expression(paste("(b) ", log[2](D))), ylab="S") points(jitter(logb(D, 2), amount=.04), S, pch=y + 1, cex=.5)} else { contour(logb(xa, 2), ya, za, xlab="(b) log[2](D)", ylab="S") points(jitter(logb(D[y==0], 2), factor=.4), S[y==0], pch=1, cex=.5) points(jitter(logb(D[y==1], 2), factor=.4), S[y==1], pch=2, cex=.5)} # second model with interaction za <- matrix(nrow=99, ncol=99) for (i in 1:99) { za[, i] <- predict(m3, data.frame(D=rep(xa[i], 99), S=ya), type="response")} if(is.null(version$language) == FALSE){ contour(logb(xa, 2), ya, za, xlab=expression(paste("(b) ", log[2](D))), ylab="S") points(jitter(logb(D, 2), amount=.04), S, pch=y + 1, cex=.5) } else { contour(logb(xa, 2), ya, za, xlab="(b)log[2](D)", ylab="S") points(jitter(logb(D[y==0], 2), factor=.4), S[y==0], pch=1, cex=.5) points(jitter(logb(D[y==1], 2), factor=.4), S[y==1], pch=2, cex=.5)} }) par(op) # Fig. 12.5 xx <- seq(0, 1, length=100) op <- par(mfrow=c(1, 2), mar=c(4, 3, 0, .5) + .1, mgp=c(2, 1, 0), cex=0.6) plot(xx, exp(.4009 + 4.9098*xx), type="l", xlab="(a) S", ylab="Odds multiplier") with(blowBF, xx <- seq(min(logb(D, 2)), max(logb(D, 2)), length=100)) plot(2^(xx), exp(coef(m3)[3]/10 + coef(m3)[4]*xx/10), type="l", xlab="(b) D", ylab="Odds multiplier") par(op) summary(m2) summary(m3) print(paste("Pearson's X^2:", round(sum(residuals(m3, type="pearson")^2), 2))) anova(m1, m2, m3, test="Chisq") anova(m1, m3, test="Chisq") m0 <- update(m1, ~ . -logb(D, 2)) anova(m0, m1, m2, m3, test="Chisq") ############################################################################# # Titanic data dt <- titanic head(titanic) mysummary <- function(m){c(df=m$df.residual, G2=m$deviance, X2=sum(residuals(m, type="pearson")^2) )} m1 <- glm(cbind(Surv, N-Surv) ~ Class + Age + Sex, data=titanic, family=binomial()) m2 <- update(m1, ~ . + Class:Sex) m3 <- update(m2, ~ . + Class:Age) m4 <- update(m3, ~ . + Age:Sex) m5 <- update(m4, ~ Class:Age:Sex) ans <- mysummary(m1) ans <- rbind(ans, mysummary(m2)) ans <- rbind(ans, mysummary(m3)) ans <- rbind(ans, mysummary(m4)) ans <- rbind(ans, mysummary(m5)) row.names(ans) <- c( m1$formula, m2$formula, m3$formula, m4$formula, m5$formula) ans
power <- read.table("household_power_consumption.txt", sep=";", header = TRUE, fill = FALSE, strip.white=TRUE, stringsAsFactors=FALSE) power$DateTime <- strptime( paste( power$Date, power$Time ), format="%d/%m/%Y %H:%M:%S" ) power$Date <- as.Date( power$Date, format="%d/%m/%Y" ) power <- subset( power, power$Date >= "2007-02-01" & power$Date <= "2007-02-02" ) power$Global_active_power <- as.numeric( power$Global_active_power ) power$Global_reactive_power <- as.numeric( power$Global_reactive_power ) power$Voltage <- as.numeric( power$Voltage ) power$Global_intensity <- as.numeric( power$Global_intensity ) power$Sub_metering_1 <- as.numeric( power$Sub_metering_1 ) power$Sub_metering_2 <- as.numeric( power$Sub_metering_2 ) png( file="plot3.png" ) plot( power$DateTime, power$Sub_metering_1, xlab="", ylab="Energy sub metering", type="n" ) lines( power$DateTime, power$Sub_metering_1, col="black" ) lines( power$DateTime, power$Sub_metering_2, col="red" ) lines( power$DateTime, power$Sub_metering_3, col="blue" ) legend("topright", lwd=c(1,1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.off()
/plot3.R
no_license
xaibeing/ExData_Plotting1
R
false
false
1,166
r
power <- read.table("household_power_consumption.txt", sep=";", header = TRUE, fill = FALSE, strip.white=TRUE, stringsAsFactors=FALSE) power$DateTime <- strptime( paste( power$Date, power$Time ), format="%d/%m/%Y %H:%M:%S" ) power$Date <- as.Date( power$Date, format="%d/%m/%Y" ) power <- subset( power, power$Date >= "2007-02-01" & power$Date <= "2007-02-02" ) power$Global_active_power <- as.numeric( power$Global_active_power ) power$Global_reactive_power <- as.numeric( power$Global_reactive_power ) power$Voltage <- as.numeric( power$Voltage ) power$Global_intensity <- as.numeric( power$Global_intensity ) power$Sub_metering_1 <- as.numeric( power$Sub_metering_1 ) power$Sub_metering_2 <- as.numeric( power$Sub_metering_2 ) png( file="plot3.png" ) plot( power$DateTime, power$Sub_metering_1, xlab="", ylab="Energy sub metering", type="n" ) lines( power$DateTime, power$Sub_metering_1, col="black" ) lines( power$DateTime, power$Sub_metering_2, col="red" ) lines( power$DateTime, power$Sub_metering_3, col="blue" ) legend("topright", lwd=c(1,1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.off()
cutltraj <- function(ltraj, criterion, value.NA = FALSE, nextr = TRUE, ...) { if (!inherits(ltraj, "ltraj")) stop("ltraj should be of class \"ltraj\"") infol <- infolocs(ltraj) res <- list() k <- 1 for (i in 1:length(ltraj)) { if (!is.null(infol)) { att <- attributes(ltraj[[i]]) x <- cbind(ltraj[[i]], infol[[i]]) attr(x, "id") <- att$id attr(x, "burst") <- att$burst } else { x <- ltraj[[i]] } ex <- parse(text = criterion) coin <- eval(ex, envir = x) coin[is.na(coin)] <- value.NA if (nextr) { kwa <- c(1, cumsum(as.numeric(coin))+1) kwa <- kwa[1:(length(kwa)-1)] } else { kwa <- cumsum(as.numeric(coin)) x <- x[!coin,] kwa <- kwa[!coin] } mkk <- nchar(max(kwa)) kwa <- sapply(kwa, function(hh) { nc <- nchar(hh) if (mkk-nc>0) { return(paste(c(rep("0",mkk-nc),hh), collapse="")) } else { return(as.character(hh)) }}) bu <- factor(paste(attr(x,"burst"),kwa,sep=".")) id <- factor(rep(attr(x,"id"), nrow(x))) if (is.null(infol)) { res[[i]] <- as.ltraj(id=id, xy=x[,c("x","y")], date=x$date, burst=bu, typeII=attr(ltraj,"typeII"), infolocs=infol) } else { inf <- x[,(names(x)%in%names(infol[[i]])), drop=FALSE] res[[i]] <- as.ltraj(id=id, xy=x[,c("x","y")], date=x$date, burst=bu, typeII=attr(ltraj,"typeII"), infolocs=inf) } } if (length(res)>1) { res <- do.call("c.ltraj", res) } else { res <- res[[1]] } rrr <- unlist(lapply(res,nrow)) resb <- res[rrr>=3] if (length(res)!=length(resb)) warning(paste("At least 3 relocations are needed for a burst\n", sum(rrr[rrr<3]), "relocations have been deleted")) resb <- rec(resb,...) return(resb) } bindltraj <- function(ltraj, ...) { if (!inherits(ltraj, "ltraj")) stop("ltraj should be of class \"ltraj\"") traj <- .ltraj2traj(ltraj) traj$burst <- traj$id return(.traj2ltraj(traj, ...)) }
/adehabitatLT/R/cutltraj.r
no_license
ingted/R-Examples
R
false
false
2,455
r
cutltraj <- function(ltraj, criterion, value.NA = FALSE, nextr = TRUE, ...) { if (!inherits(ltraj, "ltraj")) stop("ltraj should be of class \"ltraj\"") infol <- infolocs(ltraj) res <- list() k <- 1 for (i in 1:length(ltraj)) { if (!is.null(infol)) { att <- attributes(ltraj[[i]]) x <- cbind(ltraj[[i]], infol[[i]]) attr(x, "id") <- att$id attr(x, "burst") <- att$burst } else { x <- ltraj[[i]] } ex <- parse(text = criterion) coin <- eval(ex, envir = x) coin[is.na(coin)] <- value.NA if (nextr) { kwa <- c(1, cumsum(as.numeric(coin))+1) kwa <- kwa[1:(length(kwa)-1)] } else { kwa <- cumsum(as.numeric(coin)) x <- x[!coin,] kwa <- kwa[!coin] } mkk <- nchar(max(kwa)) kwa <- sapply(kwa, function(hh) { nc <- nchar(hh) if (mkk-nc>0) { return(paste(c(rep("0",mkk-nc),hh), collapse="")) } else { return(as.character(hh)) }}) bu <- factor(paste(attr(x,"burst"),kwa,sep=".")) id <- factor(rep(attr(x,"id"), nrow(x))) if (is.null(infol)) { res[[i]] <- as.ltraj(id=id, xy=x[,c("x","y")], date=x$date, burst=bu, typeII=attr(ltraj,"typeII"), infolocs=infol) } else { inf <- x[,(names(x)%in%names(infol[[i]])), drop=FALSE] res[[i]] <- as.ltraj(id=id, xy=x[,c("x","y")], date=x$date, burst=bu, typeII=attr(ltraj,"typeII"), infolocs=inf) } } if (length(res)>1) { res <- do.call("c.ltraj", res) } else { res <- res[[1]] } rrr <- unlist(lapply(res,nrow)) resb <- res[rrr>=3] if (length(res)!=length(resb)) warning(paste("At least 3 relocations are needed for a burst\n", sum(rrr[rrr<3]), "relocations have been deleted")) resb <- rec(resb,...) return(resb) } bindltraj <- function(ltraj, ...) { if (!inherits(ltraj, "ltraj")) stop("ltraj should be of class \"ltraj\"") traj <- .ltraj2traj(ltraj) traj$burst <- traj$id return(.traj2ltraj(traj, ...)) }
# set the file address: #setwd("/Users/obadiah/dialoguesystems") mydata <- read.csv("a.csv", header=TRUE, sep=",") # putting the number of words per utterance into a list for Speaker1 and Speaker2 Speaker1Counts <- subset(mydata, Speaker==1)$Counts Speaker2Counts <- subset(mydata, Speaker==2)$Counts # Median value for Speaker1 and Speaker2 print(median(Speaker1Counts)) print(median(Speaker2Counts)) # Mode value for Speaker1 and Speaker2 mode <- print(names(sort(-table(Speaker1Counts)))[1]) print(mode) # Standart deviation for Speaker1 and Speaker2 print(sd(Speaker1Counts)) print(sd(Speaker2Counts)) # Histogram. Still working on it # First hist(Speaker1Counts, main="Histogram", xlab="Number", ylab="Frequency", border="blue", col="green", xlim = c(0, 100.0), ylim = c(0, 30.0) ) #Second hist(Speaker2Counts, main="Histogram", xlab="Number", ylab="Frequency", border="blue", col="green", xlim = c(0, 300.0), ylim = c(0, 10.0), breaks=length(Speaker2Counts)) #Both Link: http://www.theanalysisfactor.com/r-tutorial-part-12/ B <- c(Speaker1Counts, Speaker2Counts) print(B) hist(B, main="Histogram", xlab="Number", ylab="Frequency", border="blue", col="green", xlim = c(0, 300.0), ylim = c(0, 20.0), breaks=length(Speaker1Counts)) moredata <- read.csv("b.csv", header=TRUE, sep=",") # putting the number of words per utterance into a list for Speaker1 and Speaker2 Speaker1Money <- subset(moredata, Speaker==1)$Money Speaker2Money <- subset(moredata, Speaker==2)$Money chisq.test(Speaker1Money, Speaker2Money, correct = TRUE, p = rep(1/length(Speaker1Money), length(Speaker1Money)), rescale.p = FALSE, simulate.p.value = FALSE, B = 2000)
/hw2_R.R
no_license
mitchellirvin/dialoguesystems
R
false
false
1,817
r
# set the file address: #setwd("/Users/obadiah/dialoguesystems") mydata <- read.csv("a.csv", header=TRUE, sep=",") # putting the number of words per utterance into a list for Speaker1 and Speaker2 Speaker1Counts <- subset(mydata, Speaker==1)$Counts Speaker2Counts <- subset(mydata, Speaker==2)$Counts # Median value for Speaker1 and Speaker2 print(median(Speaker1Counts)) print(median(Speaker2Counts)) # Mode value for Speaker1 and Speaker2 mode <- print(names(sort(-table(Speaker1Counts)))[1]) print(mode) # Standart deviation for Speaker1 and Speaker2 print(sd(Speaker1Counts)) print(sd(Speaker2Counts)) # Histogram. Still working on it # First hist(Speaker1Counts, main="Histogram", xlab="Number", ylab="Frequency", border="blue", col="green", xlim = c(0, 100.0), ylim = c(0, 30.0) ) #Second hist(Speaker2Counts, main="Histogram", xlab="Number", ylab="Frequency", border="blue", col="green", xlim = c(0, 300.0), ylim = c(0, 10.0), breaks=length(Speaker2Counts)) #Both Link: http://www.theanalysisfactor.com/r-tutorial-part-12/ B <- c(Speaker1Counts, Speaker2Counts) print(B) hist(B, main="Histogram", xlab="Number", ylab="Frequency", border="blue", col="green", xlim = c(0, 300.0), ylim = c(0, 20.0), breaks=length(Speaker1Counts)) moredata <- read.csv("b.csv", header=TRUE, sep=",") # putting the number of words per utterance into a list for Speaker1 and Speaker2 Speaker1Money <- subset(moredata, Speaker==1)$Money Speaker2Money <- subset(moredata, Speaker==2)$Money chisq.test(Speaker1Money, Speaker2Money, correct = TRUE, p = rep(1/length(Speaker1Money), length(Speaker1Money)), rescale.p = FALSE, simulate.p.value = FALSE, B = 2000)
#' DHS data is delivered in .zip files and the names of these zip files #' usually (but not always) encode the survey code (country, round, and #' release). These scheme means that at the listing stage the contents #' can not be easily renamed althought _we_ assume case-ignorant naming #' and later convert all names to lower case. #' #' @param f paths to .zip files from DHS. #' @return Here for each .zip file we calculate a list of contained #' files and the survey code encoded in the names of these nested files. #' @export list_files <- function(f) { all_files <- lapply(f, unzip, list=TRUE) %>% lapply(function(x) x[['Name']] %>% list()) names(all_files) <- f all_files <- lapply(all_files, function(x) list( putative_survey = x[[1]] %>% basename %>% tolower %>% substr(1,6), original_file = x[[1]]) ) return(all_files) } #' While DHS data _usually_ encodes the survey code (country, round, and #' release) in the file names, sometimes these are not correct. It is #' possible to run a straightforward check by looking in the contained #' Stata files and generating an independent label from the 'v000' column #' which concatenates the country code and the DHS round (not release) #' code. This function takes a list produced by `list_files` and #' inserts the internal country and round codes. #' #' @param l output of `list_files` function. #' @return same as input with (where possible) inserted `internal_country_code` #' and `internal_round_code` elements. #' @export insert_internal_codes <- function(file_index) { scratch = tempdir() for (zip_file in names(file_index)) { file_index[[zip_file]][['internal_country_code']] <- list() file_index[[zip_file]][['internal_round_code']] <- list() file_index[[zip_file]][['bad_dta_file']] <- vector(mode='character', length=0) internal_files = file_index[[zip_file]][['original_file']] dta_files = internal_files[internal_files %>% sapply(has_extension, e='dta')] unzip(zipfile = zip_file, files = dta_files, exdir = scratch) for (f in dta_files) { dta_f = try(readstata13::read.dta13(file = file.path(scratch, f))) if (isTRUE(length(dta_f) == 1) && class(dta_f) == 'try-error') { file_index[[zip_file]][['bad_dta_file']] <- c( file_index[[zip_file]][['bad_dta_file']], dta_f) next } if (!is.null(dta_f[['v000']])) { code = unique(dta_f[['v000']]) if (length(code) != 1) stop("Multiple survey codes in one survey file.") file_index[[zip_file]][['internal_country_code']] = c( file_index[[zip_file]][['internal_country_code']], substr(code, 1, 2)) file_index[[zip_file]][['internal_round_code']] = c( file_index[[zip_file]][['internal_round_code']], substr(code, 3, 3)) } rm(dta_f); gc() } junk <- dir(path = scratch, full.names=TRUE) file.remove(junk, recursive=TRUE) } return(file_index) } #' Extract .dta files from DHS .zip files and insert data on #' extracted .dta files into the appropriate file_index elements. #' Files are copied to the output path all lower-case as is #' our convention. #' #' @param file_index holding info on zip files and contents #' generated by, at least, `list_files`. #' @param output_path where extracted .dta files are written. #' @return file_index updated with an 'extracted_file` element #' for each entry that contained a .dta file. #' @export extract_dta_files <- function(file_index, output_path) { scratch = tempdir() zip_file[['extracted_file']] <- list() for (zip_file in names(file_index)) { internal_files = file_index[[zip_file]][['original_file']] dta_files = internal_files[internal_files %>% sapply(has_extension, e='dta')] unzip(zipfile = zip_file, files = dta_files, exdir = scratch) for (f in dta_files) file.copy( from = file.path(scratch, f), to = file.path(output_path, tolower(f)) ) zip_file[['extracted_file']] <- c(zip_file[['extracted_file']], tolower(dta_files)) } return(file_index) }
/R/index.R
no_license
sakrejda/pdhs
R
false
false
4,093
r
#' DHS data is delivered in .zip files and the names of these zip files #' usually (but not always) encode the survey code (country, round, and #' release). These scheme means that at the listing stage the contents #' can not be easily renamed althought _we_ assume case-ignorant naming #' and later convert all names to lower case. #' #' @param f paths to .zip files from DHS. #' @return Here for each .zip file we calculate a list of contained #' files and the survey code encoded in the names of these nested files. #' @export list_files <- function(f) { all_files <- lapply(f, unzip, list=TRUE) %>% lapply(function(x) x[['Name']] %>% list()) names(all_files) <- f all_files <- lapply(all_files, function(x) list( putative_survey = x[[1]] %>% basename %>% tolower %>% substr(1,6), original_file = x[[1]]) ) return(all_files) } #' While DHS data _usually_ encodes the survey code (country, round, and #' release) in the file names, sometimes these are not correct. It is #' possible to run a straightforward check by looking in the contained #' Stata files and generating an independent label from the 'v000' column #' which concatenates the country code and the DHS round (not release) #' code. This function takes a list produced by `list_files` and #' inserts the internal country and round codes. #' #' @param l output of `list_files` function. #' @return same as input with (where possible) inserted `internal_country_code` #' and `internal_round_code` elements. #' @export insert_internal_codes <- function(file_index) { scratch = tempdir() for (zip_file in names(file_index)) { file_index[[zip_file]][['internal_country_code']] <- list() file_index[[zip_file]][['internal_round_code']] <- list() file_index[[zip_file]][['bad_dta_file']] <- vector(mode='character', length=0) internal_files = file_index[[zip_file]][['original_file']] dta_files = internal_files[internal_files %>% sapply(has_extension, e='dta')] unzip(zipfile = zip_file, files = dta_files, exdir = scratch) for (f in dta_files) { dta_f = try(readstata13::read.dta13(file = file.path(scratch, f))) if (isTRUE(length(dta_f) == 1) && class(dta_f) == 'try-error') { file_index[[zip_file]][['bad_dta_file']] <- c( file_index[[zip_file]][['bad_dta_file']], dta_f) next } if (!is.null(dta_f[['v000']])) { code = unique(dta_f[['v000']]) if (length(code) != 1) stop("Multiple survey codes in one survey file.") file_index[[zip_file]][['internal_country_code']] = c( file_index[[zip_file]][['internal_country_code']], substr(code, 1, 2)) file_index[[zip_file]][['internal_round_code']] = c( file_index[[zip_file]][['internal_round_code']], substr(code, 3, 3)) } rm(dta_f); gc() } junk <- dir(path = scratch, full.names=TRUE) file.remove(junk, recursive=TRUE) } return(file_index) } #' Extract .dta files from DHS .zip files and insert data on #' extracted .dta files into the appropriate file_index elements. #' Files are copied to the output path all lower-case as is #' our convention. #' #' @param file_index holding info on zip files and contents #' generated by, at least, `list_files`. #' @param output_path where extracted .dta files are written. #' @return file_index updated with an 'extracted_file` element #' for each entry that contained a .dta file. #' @export extract_dta_files <- function(file_index, output_path) { scratch = tempdir() zip_file[['extracted_file']] <- list() for (zip_file in names(file_index)) { internal_files = file_index[[zip_file]][['original_file']] dta_files = internal_files[internal_files %>% sapply(has_extension, e='dta')] unzip(zipfile = zip_file, files = dta_files, exdir = scratch) for (f in dta_files) file.copy( from = file.path(scratch, f), to = file.path(output_path, tolower(f)) ) zip_file[['extracted_file']] <- c(zip_file[['extracted_file']], tolower(dta_files)) } return(file_index) }
#LIBRERIAS library(psych) library(tidyverse) library(ggplot2) library(ggpubr) library(nortest) library(plotly) library(htmlwidgets) library(Hmisc) library(ggcorrplot) library(stargazer) #DATA da <- DATA names(da) IB <- select(da, IB1:IB14) IB #DESCRIPTIVOS desc <- as.data.frame(psych::describe(select(da, IB1:IB14))) %>% round(3) %>% select(-trimmed, -mad, -vars) %>% relocate(min, max, range, .before = mean) %>% relocate(median, .after = mean) %>% relocate(sd, .before = se) desc #SHAPIRO-WILK (-50) shapiro <- function(x){ shapiro.test(x) } shap <- function(x){ for(i in 1:ncol(x)){ i <- shapiro(x[[i]]) print(i) } } shap(IB) #KOLMOGOROV-SMIRNOV (+50) kolmogorov <- function(x){ lillie.test(x) } KMO <- function(x){ for(i in 1:ncol(x)){ i <- kolmogorov(x[[i]]) print(i) } } KMO(IB) #BARPLOT gr <- ggplot(IB, aes(x=IB1))+ geom_bar(fill="light blue", alpha=0.5)+ theme_dark() #DISPERSION PLOT gr <- ggplot(da, aes(x=SEXO, y=EDAD))+ geom_jitter()+ scale_x_log10() gr #Interactivo pp1 <- ggplotly(gr) saveWidget(as_widget(pp1), "interactivo.html") #FRECUENCIAS Y NORMALIDAD UNA VARIABLE ggplot(da, aes(x = IB1)) + geom_histogram(aes(y =..density..), colour = "#7F7F7F", bins = 14, fill = "pink", alpha = 0.3) + stat_function(fun = dnorm, args = list(mean = mean(da$IB1), sd = sd(da$IB1)), colour="red") + geom_vline(aes(xintercept=mean(IB1)), colour="blue", linetype="dashed", size=1) + theme_dark() #FRECUENCIAS Y NORMALIDAD VARIAS VARIABLES histograma <- function(x){ print(ggplot(da, aes(x = x)) + geom_histogram(aes(y =..density..), colour = "#7F7F7F", fill = "pink", alpha = 0.3, bins = 14) + stat_function(fun = dnorm, args = list(mean = mean(x), sd = sd(x)), colour="red") + geom_vline(aes(xintercept=mean(x)), colour="blue", linetype="dashed", size=1) + theme_dark()) } GRUPO <- function(x){ for(i in 1:ncol(x)){ i <- histograma(x[[i]]) } } pdf("normal.pdf") GRUPO(IB) #CORRELACIÓN corr<- rcorr(as.matrix(IB), type = "pearson") corr ggcorrplot(corr$r, hc.order = T, type = "lower", lab = T) stargazer(data.frame(corr$r), summary = F, type = "html", out = "correlacion.hmtl")
/plots.R
permissive
AngelGarciaODiana/R
R
false
false
2,590
r
#LIBRERIAS library(psych) library(tidyverse) library(ggplot2) library(ggpubr) library(nortest) library(plotly) library(htmlwidgets) library(Hmisc) library(ggcorrplot) library(stargazer) #DATA da <- DATA names(da) IB <- select(da, IB1:IB14) IB #DESCRIPTIVOS desc <- as.data.frame(psych::describe(select(da, IB1:IB14))) %>% round(3) %>% select(-trimmed, -mad, -vars) %>% relocate(min, max, range, .before = mean) %>% relocate(median, .after = mean) %>% relocate(sd, .before = se) desc #SHAPIRO-WILK (-50) shapiro <- function(x){ shapiro.test(x) } shap <- function(x){ for(i in 1:ncol(x)){ i <- shapiro(x[[i]]) print(i) } } shap(IB) #KOLMOGOROV-SMIRNOV (+50) kolmogorov <- function(x){ lillie.test(x) } KMO <- function(x){ for(i in 1:ncol(x)){ i <- kolmogorov(x[[i]]) print(i) } } KMO(IB) #BARPLOT gr <- ggplot(IB, aes(x=IB1))+ geom_bar(fill="light blue", alpha=0.5)+ theme_dark() #DISPERSION PLOT gr <- ggplot(da, aes(x=SEXO, y=EDAD))+ geom_jitter()+ scale_x_log10() gr #Interactivo pp1 <- ggplotly(gr) saveWidget(as_widget(pp1), "interactivo.html") #FRECUENCIAS Y NORMALIDAD UNA VARIABLE ggplot(da, aes(x = IB1)) + geom_histogram(aes(y =..density..), colour = "#7F7F7F", bins = 14, fill = "pink", alpha = 0.3) + stat_function(fun = dnorm, args = list(mean = mean(da$IB1), sd = sd(da$IB1)), colour="red") + geom_vline(aes(xintercept=mean(IB1)), colour="blue", linetype="dashed", size=1) + theme_dark() #FRECUENCIAS Y NORMALIDAD VARIAS VARIABLES histograma <- function(x){ print(ggplot(da, aes(x = x)) + geom_histogram(aes(y =..density..), colour = "#7F7F7F", fill = "pink", alpha = 0.3, bins = 14) + stat_function(fun = dnorm, args = list(mean = mean(x), sd = sd(x)), colour="red") + geom_vline(aes(xintercept=mean(x)), colour="blue", linetype="dashed", size=1) + theme_dark()) } GRUPO <- function(x){ for(i in 1:ncol(x)){ i <- histograma(x[[i]]) } } pdf("normal.pdf") GRUPO(IB) #CORRELACIÓN corr<- rcorr(as.matrix(IB), type = "pearson") corr ggcorrplot(corr$r, hc.order = T, type = "lower", lab = T) stargazer(data.frame(corr$r), summary = F, type = "html", out = "correlacion.hmtl")
library(highcharter) suppressPackageStartupMessages(library(dplyr)) suppressPackageStartupMessages(library(purrr)) n <- 10 hc <- highchart() %>% hc_add_series(data = rnorm(n), name = "s1", id = "g1") %>% hc_add_series(data = rnorm(n), name = "s2", id = "g2") hc series <- data_frame( name = paste0("series", seq(1:n)), linkedTo = ifelse(runif(n) < 0.5, "g1", "g2"), data = map(1:10, rnorm, n = 10)) %>% list_parse() series[[1]] hc %>% hc_add_series_list(series)
/devscripts/highcharter-issues-121.R
no_license
APKBridget/highcharter
R
false
false
511
r
library(highcharter) suppressPackageStartupMessages(library(dplyr)) suppressPackageStartupMessages(library(purrr)) n <- 10 hc <- highchart() %>% hc_add_series(data = rnorm(n), name = "s1", id = "g1") %>% hc_add_series(data = rnorm(n), name = "s2", id = "g2") hc series <- data_frame( name = paste0("series", seq(1:n)), linkedTo = ifelse(runif(n) < 0.5, "g1", "g2"), data = map(1:10, rnorm, n = 10)) %>% list_parse() series[[1]] hc %>% hc_add_series_list(series)
testlist <- list(end = NULL, start = NULL, x = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 3L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L))) result <- do.call(dynutils::project_to_segments,testlist) str(result)
/dynutils/inst/testfiles/project_to_segments/AFL_project_to_segments/project_to_segments_valgrind_files/1609871889-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
326
r
testlist <- list(end = NULL, start = NULL, x = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 3L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L))) result <- do.call(dynutils::project_to_segments,testlist) str(result)
library(rhandsontable) shinyServer(function(input, output, session) { fname = tempfile() observe({ # remove button and isolate to update file automatically # after each table change input$saveBtn hot = isolate(input$hot) if (!is.null(hot)) { write.csv(hot, fname) print(fname) } }) output$hot = renderRHandsontable({ if (!is.null(input$hot)) { DF = hot_to_r(input$hot) } else { DF = read.csv("mtcars.csv", stringsAsFactors = FALSE) } rhandsontable(DF) %>% hot_table(highlightCol = TRUE, highlightRow = TRUE) }) })
/inst/examples/rhandsontable_datafile/server.R
permissive
P01010000/rhandsontable
R
false
false
600
r
library(rhandsontable) shinyServer(function(input, output, session) { fname = tempfile() observe({ # remove button and isolate to update file automatically # after each table change input$saveBtn hot = isolate(input$hot) if (!is.null(hot)) { write.csv(hot, fname) print(fname) } }) output$hot = renderRHandsontable({ if (!is.null(input$hot)) { DF = hot_to_r(input$hot) } else { DF = read.csv("mtcars.csv", stringsAsFactors = FALSE) } rhandsontable(DF) %>% hot_table(highlightCol = TRUE, highlightRow = TRUE) }) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inference_optimhelp.R \name{evaluate_logpostpdf} \alias{evaluate_logpostpdf} \title{Evaluate posterior density function} \usage{ evaluate_logpostpdf(map, zprior, U, obs, zref) } \arguments{ \item{map}{Mapping object. Usually a compound map, see \code{\link{create_compound_map}}} \item{zprior}{Vector of prior estimates of the independent variables (i.e., associated with nodes without parent nodes)} \item{U}{Prior covariance matrix of the independent variables} \item{obs}{Vector with observed values of dependent nodes. Must be of same size as \code{zprior}. An \code{NA} value in this vector means that the corresponding variable was not observed.} \item{zref}{Vector with values of independent variables used as reference vector in Taylor expansion. The posterior pdf is also evaluated exactly at this point.} } \value{ Return a list with the following elements: \tabular{ll}{ \code{val} \tab Value of the posterior probability density function evaluated at \code{zref} \cr \code{jac} \tab Gradient of the posteroir probability density function evaluated at \code{zref} } } \description{ Evaluate the posterior density function for a given vector of independent variables. }
/man/evaluate_logpostpdf.Rd
permissive
gschnabel/nucdataBaynet
R
false
true
1,266
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inference_optimhelp.R \name{evaluate_logpostpdf} \alias{evaluate_logpostpdf} \title{Evaluate posterior density function} \usage{ evaluate_logpostpdf(map, zprior, U, obs, zref) } \arguments{ \item{map}{Mapping object. Usually a compound map, see \code{\link{create_compound_map}}} \item{zprior}{Vector of prior estimates of the independent variables (i.e., associated with nodes without parent nodes)} \item{U}{Prior covariance matrix of the independent variables} \item{obs}{Vector with observed values of dependent nodes. Must be of same size as \code{zprior}. An \code{NA} value in this vector means that the corresponding variable was not observed.} \item{zref}{Vector with values of independent variables used as reference vector in Taylor expansion. The posterior pdf is also evaluated exactly at this point.} } \value{ Return a list with the following elements: \tabular{ll}{ \code{val} \tab Value of the posterior probability density function evaluated at \code{zref} \cr \code{jac} \tab Gradient of the posteroir probability density function evaluated at \code{zref} } } \description{ Evaluate the posterior density function for a given vector of independent variables. }
library(tidyverse) library(dasper) library(foreach) library(doParallel) # Load data --------------------------------------------------------------- load("/home/dzhang/projects/RNA_seq_diag_mito/results/tidy_samp_metadata/mito_samp_metadata_tidy.rda") ref <- dasper:::.ref_load(ref = "/data/references/ensembl/gtf_gff3/v97/Homo_sapiens.GRCh38.97.gtf") # Functions --------------------------------------------------------------- add_pt_sex <- function(gene_counts_rse, ref){ # DDX3Y & XIST expression used as sex determinants sex_genes <- GenomicFeatures::genes(ref, filter = list(gene_id = c("ENSG00000067048", "ENSG00000229807"))) gene_counts_rse$sex <- as.character(NA) for(i in seq_len(dim(gene_counts_rse)[2])){ sex_genes_cov <- rtracklayer::import(gene_counts_rse$bw_path[i], which = sex_genes, as = "NumericList") %>% sum() stopifnot(sum(sex_genes_cov) > 0) gene_counts_rse$sex[i] <- ifelse(sex_genes_cov["ENSG00000229807"] > sex_genes_cov["ENSG00000067048"], "female", "male") %>% unname() } return(gene_counts_rse) } # Main -------------------------------------------------------------------- ##### Generate gene count matrices ##### # use one core per patient numCores <- nrow(mito_samp_metadata_tidy) registerDoParallel(numCores) foreach(i=1:nrow(mito_samp_metadata_tidy)) %dopar% { samp_id <- mito_samp_metadata_tidy[["samp_id_tidy"]][i] samp_bam <- mito_samp_metadata_tidy[["bam_path"]][i] print(stringr::str_c(Sys.time(), " - ", i, " - ", samp_id)) # run RNA-seq QC to obtain gene counts system( stringr::str_c("/tools/RNA-SeQC/rnaseqc.v2.3.4.linux", " /data/references/ensembl/gtf_gff3/v97/Homo_sapiens.GRCh38.97.genes.gtf", " ", samp_bam, " ", here::here("/results/aberrant_expression/get_gene_count_RSE/"), " -s ", samp_id, " -v -v") ) } ##### Check matrix is as expected ##### gene_count_test <- read_delim(here::here("results/aberrant_expression/get_gene_count_RSE/control_1.gene_reads.gct"), delim = "\t", skip = 2) # eye-balling expression values summary(gene_count_test$control_1) # 22746 genes with a TPM above 0 - looks about right sum(gene_count_test$control_1 > 0) rm(gene_count_test) ##### Merge all patient gene counts ##### gene_count_paths <- list.files(here::here("results/aberrant_expression/get_gene_count_RSE/"), pattern = "gene_reads", full.names = TRUE) for(i in seq_along(gene_count_paths)){ gene_counts <- read_delim(gene_count_paths[i], delim = "\t", skip = 2) if(i == 1){ gene_counts_all <- gene_counts }else{ gene_counts_all <- gene_counts_all %>% left_join(gene_counts) } } ##### Create RSE ##### # get gene info and order by genes gene_info <- GenomicFeatures::genes(ref, filter = list(gene_id = gene_counts_all[["Name"]])) stopifnot(all(names(gene_info) %in% gene_counts_all[["Name"]])) stopifnot(length(gene_info) %in% nrow(gene_counts_all)) gene_counts_all <- gene_counts_all %>% mutate(Name = Name %>% factor(names(gene_info))) %>% dplyr::arrange(Name) stopifnot(identical(names(gene_info), gene_counts_all[["Name"]] %>% as.character())) # get sample info and order by samples gene_counts_mat <- gene_counts_all %>% dplyr::select(-Name, -Description) %>% as.matrix() mito_samp_metadata_tidy <- mito_samp_metadata_tidy %>% as_tibble() %>% mutate(samp_id_tidy = samp_id_tidy %>% factor(colnames(gene_counts_mat))) %>% arrange(samp_id_tidy) stopifnot(identical(mito_samp_metadata_tidy[["samp_id_tidy"]] %>% as.character(), colnames(gene_counts_mat))) # convert gene counts into an RSE gene_counts_rse <- SummarizedExperiment::SummarizedExperiment(rowRanges = gene_info, colData = mito_samp_metadata_tidy, assays = list(count = gene_counts_mat)) ##### add patient sexes ##### gene_counts_rse <- add_pt_sex(gene_counts_rse, ref) ##### add batch ##### gene_counts_rse$batch <- ifelse(str_detect(gene_counts_rse$bw_path, "mito_add_pos_ctrls"), 2, 1) # Save data --------------------------------------------------------------- save(gene_counts_rse, file = here::here("results/aberrant_expression/get_gene_count_RSE/gene_counts_rse.rda"))
/scripts/aberrant_expression/1_get_gene_count_RSE.R
no_license
dzhang32/ATG7_RNAseq
R
false
false
4,571
r
library(tidyverse) library(dasper) library(foreach) library(doParallel) # Load data --------------------------------------------------------------- load("/home/dzhang/projects/RNA_seq_diag_mito/results/tidy_samp_metadata/mito_samp_metadata_tidy.rda") ref <- dasper:::.ref_load(ref = "/data/references/ensembl/gtf_gff3/v97/Homo_sapiens.GRCh38.97.gtf") # Functions --------------------------------------------------------------- add_pt_sex <- function(gene_counts_rse, ref){ # DDX3Y & XIST expression used as sex determinants sex_genes <- GenomicFeatures::genes(ref, filter = list(gene_id = c("ENSG00000067048", "ENSG00000229807"))) gene_counts_rse$sex <- as.character(NA) for(i in seq_len(dim(gene_counts_rse)[2])){ sex_genes_cov <- rtracklayer::import(gene_counts_rse$bw_path[i], which = sex_genes, as = "NumericList") %>% sum() stopifnot(sum(sex_genes_cov) > 0) gene_counts_rse$sex[i] <- ifelse(sex_genes_cov["ENSG00000229807"] > sex_genes_cov["ENSG00000067048"], "female", "male") %>% unname() } return(gene_counts_rse) } # Main -------------------------------------------------------------------- ##### Generate gene count matrices ##### # use one core per patient numCores <- nrow(mito_samp_metadata_tidy) registerDoParallel(numCores) foreach(i=1:nrow(mito_samp_metadata_tidy)) %dopar% { samp_id <- mito_samp_metadata_tidy[["samp_id_tidy"]][i] samp_bam <- mito_samp_metadata_tidy[["bam_path"]][i] print(stringr::str_c(Sys.time(), " - ", i, " - ", samp_id)) # run RNA-seq QC to obtain gene counts system( stringr::str_c("/tools/RNA-SeQC/rnaseqc.v2.3.4.linux", " /data/references/ensembl/gtf_gff3/v97/Homo_sapiens.GRCh38.97.genes.gtf", " ", samp_bam, " ", here::here("/results/aberrant_expression/get_gene_count_RSE/"), " -s ", samp_id, " -v -v") ) } ##### Check matrix is as expected ##### gene_count_test <- read_delim(here::here("results/aberrant_expression/get_gene_count_RSE/control_1.gene_reads.gct"), delim = "\t", skip = 2) # eye-balling expression values summary(gene_count_test$control_1) # 22746 genes with a TPM above 0 - looks about right sum(gene_count_test$control_1 > 0) rm(gene_count_test) ##### Merge all patient gene counts ##### gene_count_paths <- list.files(here::here("results/aberrant_expression/get_gene_count_RSE/"), pattern = "gene_reads", full.names = TRUE) for(i in seq_along(gene_count_paths)){ gene_counts <- read_delim(gene_count_paths[i], delim = "\t", skip = 2) if(i == 1){ gene_counts_all <- gene_counts }else{ gene_counts_all <- gene_counts_all %>% left_join(gene_counts) } } ##### Create RSE ##### # get gene info and order by genes gene_info <- GenomicFeatures::genes(ref, filter = list(gene_id = gene_counts_all[["Name"]])) stopifnot(all(names(gene_info) %in% gene_counts_all[["Name"]])) stopifnot(length(gene_info) %in% nrow(gene_counts_all)) gene_counts_all <- gene_counts_all %>% mutate(Name = Name %>% factor(names(gene_info))) %>% dplyr::arrange(Name) stopifnot(identical(names(gene_info), gene_counts_all[["Name"]] %>% as.character())) # get sample info and order by samples gene_counts_mat <- gene_counts_all %>% dplyr::select(-Name, -Description) %>% as.matrix() mito_samp_metadata_tidy <- mito_samp_metadata_tidy %>% as_tibble() %>% mutate(samp_id_tidy = samp_id_tidy %>% factor(colnames(gene_counts_mat))) %>% arrange(samp_id_tidy) stopifnot(identical(mito_samp_metadata_tidy[["samp_id_tidy"]] %>% as.character(), colnames(gene_counts_mat))) # convert gene counts into an RSE gene_counts_rse <- SummarizedExperiment::SummarizedExperiment(rowRanges = gene_info, colData = mito_samp_metadata_tidy, assays = list(count = gene_counts_mat)) ##### add patient sexes ##### gene_counts_rse <- add_pt_sex(gene_counts_rse, ref) ##### add batch ##### gene_counts_rse$batch <- ifelse(str_detect(gene_counts_rse$bw_path, "mito_add_pos_ctrls"), 2, 1) # Save data --------------------------------------------------------------- save(gene_counts_rse, file = here::here("results/aberrant_expression/get_gene_count_RSE/gene_counts_rse.rda"))
# The cleaned script can be found in Datasets/clean/prescriber-info-categorical-cleaned.csv # This script is designated to provide a CSV which we can use for modeling # The data cleaning done: # Remove all categorical variables, ie State, Gender, etc. # Remove all opiates. # Select the top 10 most frequently prescribed non-opiates # Each column is a binary variable - either prescribed the drug or did not # The resulting CSV contains the following columns # State # Credentials # Specialty # Opioid.Prescriber <- The class we are trying to predict # Clear workspace rm(list=ls()) #Read the prescriber info and save into data frame prescriberInfo <- data.frame(read.csv("../raw/prescriber-info.csv", stringsAsFactors=FALSE)) #Read opioids from file opioids <- read.csv("../raw/opioids.csv") opioids <- as.character(opioids[,1]) # First column contains the names of the opiates opioids <- gsub("\ |-",".",opioids) # replace hyphens and spaces with periods to match the dataset #Remove all opioids, col count = 245/256 prescriberInfo <- prescriberInfo[, !names(prescriberInfo) %in% opioids] #Select drugs with highest frequency filterNumerical = c("Specialty", "Credentials", "State", "Opioid.Prescriber") prescriberInfo <- prescriberInfo[, names(prescriberInfo) %in% filterNumerical] #Select drugs with highest frequency filterNumerical = c("Gender", "Specialty", "NPI", "Credentials", "State", "Speciality", "Opioid.Prescriber") nonOpiates <- prescriberInfo[, !names(prescriberInfo) %in% filterNumerical] colSums(nonOpiates) ##Take the mean and remove unnecessary opiates temp <- data.frame(a=character(), b=numeric(), stringsAsFactors = FALSE) for (col in names(nonOpiates)) { temp[nrow(temp)+1, ] <- c(col, as.numeric(sum(nonOpiates[col] > 0) / nrow(nonOpiates))) } temp$b <- as.numeric(as.character(temp$b)) temp <- temp[order(temp$b), ] temp <- temp[1:11,] # Uncomment if >1 = 1 #for (col in names(temp)) { # prescriberInfo[col] <- ifelse(col, as.numeric(prescriberInfo[col])==0, 0, 1) #} garbageNames <- names(nonOpiates)[!(names(nonOpiates) %in% temp$a)] prescriberInfo <- prescriberInfo[, !names(prescriberInfo) %in% garbageNames] ##Write a cleaned CSV write.csv(prescriberInfo, "../clean/prescriber-info-categorical-cleaned.csv", row.names=FALSE)
/Datasets/cleaning scripts/cleanPrescribersCategorical.R
no_license
lilwizeguy/COEN-281-Final-Project
R
false
false
2,282
r
# The cleaned script can be found in Datasets/clean/prescriber-info-categorical-cleaned.csv # This script is designated to provide a CSV which we can use for modeling # The data cleaning done: # Remove all categorical variables, ie State, Gender, etc. # Remove all opiates. # Select the top 10 most frequently prescribed non-opiates # Each column is a binary variable - either prescribed the drug or did not # The resulting CSV contains the following columns # State # Credentials # Specialty # Opioid.Prescriber <- The class we are trying to predict # Clear workspace rm(list=ls()) #Read the prescriber info and save into data frame prescriberInfo <- data.frame(read.csv("../raw/prescriber-info.csv", stringsAsFactors=FALSE)) #Read opioids from file opioids <- read.csv("../raw/opioids.csv") opioids <- as.character(opioids[,1]) # First column contains the names of the opiates opioids <- gsub("\ |-",".",opioids) # replace hyphens and spaces with periods to match the dataset #Remove all opioids, col count = 245/256 prescriberInfo <- prescriberInfo[, !names(prescriberInfo) %in% opioids] #Select drugs with highest frequency filterNumerical = c("Specialty", "Credentials", "State", "Opioid.Prescriber") prescriberInfo <- prescriberInfo[, names(prescriberInfo) %in% filterNumerical] #Select drugs with highest frequency filterNumerical = c("Gender", "Specialty", "NPI", "Credentials", "State", "Speciality", "Opioid.Prescriber") nonOpiates <- prescriberInfo[, !names(prescriberInfo) %in% filterNumerical] colSums(nonOpiates) ##Take the mean and remove unnecessary opiates temp <- data.frame(a=character(), b=numeric(), stringsAsFactors = FALSE) for (col in names(nonOpiates)) { temp[nrow(temp)+1, ] <- c(col, as.numeric(sum(nonOpiates[col] > 0) / nrow(nonOpiates))) } temp$b <- as.numeric(as.character(temp$b)) temp <- temp[order(temp$b), ] temp <- temp[1:11,] # Uncomment if >1 = 1 #for (col in names(temp)) { # prescriberInfo[col] <- ifelse(col, as.numeric(prescriberInfo[col])==0, 0, 1) #} garbageNames <- names(nonOpiates)[!(names(nonOpiates) %in% temp$a)] prescriberInfo <- prescriberInfo[, !names(prescriberInfo) %in% garbageNames] ##Write a cleaned CSV write.csv(prescriberInfo, "../clean/prescriber-info-categorical-cleaned.csv", row.names=FALSE)
\name{calcBgGD} \alias{calcBgGD} %- Also NEED an "\alias" for EACH other topic documented here. \title{ Calculate Biogas Production from Gas Density Data (GD-BMP) } \description{ \code{calcBgGD} (for \emph{calc}ulate \emph{b}io\emph{g}as from \emph{GD} (gas density) measurements) calculates cumulative biogas, methane production and production rates from mass loss and volume measurements for any number of bottles. } \usage{ calcBgGD( # Main arguments dat, temp.vol, temp.grav, pres.vol, pres.grav, # Column names id.name, time.name, vol.name, m.pre.name = NULL, m.post.name, comp.name = 'xCH4', # Settings vented.mass = FALSE, averaging = 'final', temp.init = NULL, pres.init = NULL, headspace = NULL, vol.hs.name = NULL, headcomp = 'N2', # Calculation method and other settings vmethod = 'vol', comp.lim = c(0, 1), comp.sub = NA, imethod = 'linear', extrap = FALSE, addt0 = TRUE, showt0 = TRUE, dry = FALSE, # Warnings and messages std.message = TRUE, check = TRUE, # Units and standard conditions temp.std = getOption('temp.std', as.numeric(NA)), pres.std = getOption('pres.std', as.numeric(NA)), unit.temp = getOption('unit.temp', 'C'), unit.pres = getOption('unit.pres', 'atm') ) } %- maybe also "usage" for other objects documented here. \arguments{ \item{dat}{ a data frame with bottle identification code, time of measurement (as \code{numeric}, or \code{POSIX}), bottle mass, and measured biogas volume. See Details section for details on units. Additional columns can be present--these will be returned in the output data frame. } \item{temp.vol}{ temperature at which biogas volume was measured. } \item{temp.grav}{ temperature of bottle headspace at time of biogas venting, prior to gravimetric measurement. } \item{pres.vol}{ pressure at which biogas volume was measured. } \item{pres.grav}{ pressure of bottle headspace at time of biogas venting, prior to gravimetric measurement. } \item{id.name}{ name of the bottle identification code column in \code{dat}. Must be the same in all data frames used in the function. } \item{time.name}{ name of column containing time data (cumulative time) in \code{dat}. } \item{vol.name}{ name of the measured biogas volume column in \code{dat}. } \item{m.pre.name}{ name of column containing pre-venting bottle mass in \code{dat} data frame. Optional, required for \code{vented.mass = TRUE} and to calculate leakage. } \item{m.post.name}{ name of column containing post-venting bottle mass in \code{dat} data frame. See details. } \item{comp.name}{ name of column with biogas composition to be added to output data frame. } \item{vented.mass}{ Set to \code{TRUE} to use vented mass loss (GD_v method) instead of total mass loss (GD_t method) in calculations. See details. } \item{averaging}{ type of averaging used for calculating biogas composition. Default is \code{final}. See details. } \item{temp.init}{ optional initial headspace temperature. Used to correct results for initial headspace. See details. } \item{pres.init}{ optional initial headspace pressure. Used to correct results for initial headspace. See details. } \item{headspace}{ optional data frame or length-one numeric vector with reactor headspace volume(s). If a data frame is used, it should at least contain a \code{"id"} (reactor identification code) column (see \code{"id.name"}) and headspace volume column (see \code{vol.hs.name} argument). Required if \code{cmethod = "total"}. Default is \code{NULL}. } \item{vol.hs.name}{ optional name of column containing headspace volume data in optional \code{headspace} data frame. } \item{headcomp}{ optional initial headspace composition used to correct results for initial headspace. Default of \code{"N2"} is only logical value. } \item{vmethod}{ method used for calculating biogas volume. Default of \code{'vol'} is based on measured biogas volume in \code{vol.name} column. Alternative is \code{'grav'} for gravimetric method, which should be used with \code{vented.mass = TRUE}. } \item{comp.lim}{ acceptable limits on calculated methane mole fraction. Any values outside of this range are set to \code{comp.sub}. Default of \code{c(0, 1)}. Length two vector. } \item{comp.sub}{ Value substituted in for calculated methane mole fraction when calculated value is outside of \code{comp.lim} range. Length one vector. Default value of \code{NA} should generally never be changed. } \item{imethod}{ method used for interpolation of \code{xCH4}. This is passed as the \code{method} argument to \code{\link{interp}}. Length one character vector. Default is \code{"linear"} for linear interpolation. } \item{extrap}{ should \code{comp.name} be extrapolated? Length one logical vector. This is passed as the \code{extrap} argument to \code{\link{interp}}. Default is \code{FALSE}. } \item{addt0}{ is the earliest time in \code{dat} data frame \dQuote{time zero} (start time)? If not, this argument adds a row with \code{time.name = 0} for each reactor in order to calculate production rates for the first observation. This addition is only made when \code{time.name} is \code{numeric} (or \code{integer}). Length-one logical vector. Default is \code{TRUE}. To return these additional rows in the output, see \code{showt0}. } \item{showt0}{ should \dQuote{time zero} rows be returned in the output? Can be convenient for plotting cumulative volumes. Only applies if \code{time.name} is \code{numeric} (or \code{integer}). These rows may have been present in the original data (\code{dat}) or added by the function (see \code{addt0}). Default value depends on \code{dat} \code{time.name} column content. If \code{time.name} column is numeric and contains 0 then the default value is \code{TRUE} and otherwise \code{FALSE}. } \item{dry}{ set to \code{TRUE} if volume data are standardized to dry conditions. The default (\code{FALSE}) means biogas is assumed to be saturated with water vapor. } \item{std.message}{ should a message with the standard conditions be displayed? Default is \code{TRUE}. } \item{check}{ should input data be checked for unreasonable values (with warnings)? Currently only composition values are checked. Default is \code{TRUE}. Values are changed if outside 0, 1 (divided by 100). } \item{temp.std}{ standard temperature for presentation of biogas and methane results. Length one numeric vector. Default value is 0 degrees C (set in \code{\link{stdVol}}). Argument is passed to \code{\link{stdVol}}. } \item{pres.std}{ standard pressure for presentation of biogas and methane results. Length one numeric vector. Default value is 1.0 atm (101325 Pa) (set in \code{\link{stdVol}}). Argument is passed to \code{\link{stdVol}}. } \item{unit.temp}{ temperature units for \code{temp} and \code{temp.std} arguments. Default is \code{"C"} for degrees Celcius. Argument is passed to \code{\link{stdVol}}. } \item{unit.pres}{ pressure units for \code{pres} and \code{pres.std} arguments. Default is \code{"atm"}. Argument is passed to \code{\link{stdVol}}. } } \details{ Using volume and mass loss data from \code{dat}, this function will calculate standardized biogas and methane production for each observation using the gas density (GD) method. See reference below for details on the method. Standard values and units for temperature and pressure can be globally set using the function \code{\link{options}}. See \code{\link{stdVol}}. } \value{ a data frame with all the columns originally present in \code{dat}, plus others including these: \item{vBg}{Standardized volume of biogas production for individual event.} \item{xCH4}{Calculated mole fraction of methane in biogas.} \item{vCH4}{Standardized volume of methane production for individual event.} \item{cvBg}{Standardized cumulative volume of biogas production.} \item{cvCH4}{Standardized cumulative volume of methane production.} \item{rvBg}{Production rate of biogas.} \item{rvCH4}{Production rate of methane.} Units are based on units in input data. } \references{ Justesen, C.G., Astals, S., Mortensen, J.R., Thorsen, R., Koch, K., Weinrich, S., Triolo, J.M., Hafner, S.D. 2019. Development and validation of a low-cost gas density method for measuring biochemical potential (BMP) \emph{Water (MDPI)} \bold{11(12)}: 2431. } \author{ Sasha D. Hafner, Camilla Justesen, Jacob Mortensen } \seealso{ \code{\link{calcBgMan}}, \code{\link{calcBgVol}}, \code{\link{summBg}}, \code{\link{interp}}, \code{\link{stdVol}}, \code{\link{options}} } \examples{ data("UQGDBiogas") data("UQGDSetup") head(UQGDBiogas) head(UQGDSetup) cbg <- calcBgGD(UQGDBiogas, temp.vol = 20, pres.vol = 1013.25, temp.grav = 30, pres.grav = 1500, id.name = 'id', vol.name = 'vol', m.pre.name = 'mass.init', m.post.name = 'mass.final', time.name = 'time.d', unit.pres = 'mbar') BMP <- summBg(cbg, UQGDSetup, id.name = "id", time.name = 'time.d', descrip.name = 'descrip', inoc.name = "Inoculum", inoc.m.name = "m.inoc", norm.name = "m.sub.vs", when = 'end') BMP } %% Add one or more standard keywords, see file "KEYWORDS" in the %% R documentation directory. \keyword{chron} \keyword{manip} \concept{biogas}
/man/calcBgGD.Rd
no_license
sashahafner/biogas
R
false
false
9,821
rd
\name{calcBgGD} \alias{calcBgGD} %- Also NEED an "\alias" for EACH other topic documented here. \title{ Calculate Biogas Production from Gas Density Data (GD-BMP) } \description{ \code{calcBgGD} (for \emph{calc}ulate \emph{b}io\emph{g}as from \emph{GD} (gas density) measurements) calculates cumulative biogas, methane production and production rates from mass loss and volume measurements for any number of bottles. } \usage{ calcBgGD( # Main arguments dat, temp.vol, temp.grav, pres.vol, pres.grav, # Column names id.name, time.name, vol.name, m.pre.name = NULL, m.post.name, comp.name = 'xCH4', # Settings vented.mass = FALSE, averaging = 'final', temp.init = NULL, pres.init = NULL, headspace = NULL, vol.hs.name = NULL, headcomp = 'N2', # Calculation method and other settings vmethod = 'vol', comp.lim = c(0, 1), comp.sub = NA, imethod = 'linear', extrap = FALSE, addt0 = TRUE, showt0 = TRUE, dry = FALSE, # Warnings and messages std.message = TRUE, check = TRUE, # Units and standard conditions temp.std = getOption('temp.std', as.numeric(NA)), pres.std = getOption('pres.std', as.numeric(NA)), unit.temp = getOption('unit.temp', 'C'), unit.pres = getOption('unit.pres', 'atm') ) } %- maybe also "usage" for other objects documented here. \arguments{ \item{dat}{ a data frame with bottle identification code, time of measurement (as \code{numeric}, or \code{POSIX}), bottle mass, and measured biogas volume. See Details section for details on units. Additional columns can be present--these will be returned in the output data frame. } \item{temp.vol}{ temperature at which biogas volume was measured. } \item{temp.grav}{ temperature of bottle headspace at time of biogas venting, prior to gravimetric measurement. } \item{pres.vol}{ pressure at which biogas volume was measured. } \item{pres.grav}{ pressure of bottle headspace at time of biogas venting, prior to gravimetric measurement. } \item{id.name}{ name of the bottle identification code column in \code{dat}. Must be the same in all data frames used in the function. } \item{time.name}{ name of column containing time data (cumulative time) in \code{dat}. } \item{vol.name}{ name of the measured biogas volume column in \code{dat}. } \item{m.pre.name}{ name of column containing pre-venting bottle mass in \code{dat} data frame. Optional, required for \code{vented.mass = TRUE} and to calculate leakage. } \item{m.post.name}{ name of column containing post-venting bottle mass in \code{dat} data frame. See details. } \item{comp.name}{ name of column with biogas composition to be added to output data frame. } \item{vented.mass}{ Set to \code{TRUE} to use vented mass loss (GD_v method) instead of total mass loss (GD_t method) in calculations. See details. } \item{averaging}{ type of averaging used for calculating biogas composition. Default is \code{final}. See details. } \item{temp.init}{ optional initial headspace temperature. Used to correct results for initial headspace. See details. } \item{pres.init}{ optional initial headspace pressure. Used to correct results for initial headspace. See details. } \item{headspace}{ optional data frame or length-one numeric vector with reactor headspace volume(s). If a data frame is used, it should at least contain a \code{"id"} (reactor identification code) column (see \code{"id.name"}) and headspace volume column (see \code{vol.hs.name} argument). Required if \code{cmethod = "total"}. Default is \code{NULL}. } \item{vol.hs.name}{ optional name of column containing headspace volume data in optional \code{headspace} data frame. } \item{headcomp}{ optional initial headspace composition used to correct results for initial headspace. Default of \code{"N2"} is only logical value. } \item{vmethod}{ method used for calculating biogas volume. Default of \code{'vol'} is based on measured biogas volume in \code{vol.name} column. Alternative is \code{'grav'} for gravimetric method, which should be used with \code{vented.mass = TRUE}. } \item{comp.lim}{ acceptable limits on calculated methane mole fraction. Any values outside of this range are set to \code{comp.sub}. Default of \code{c(0, 1)}. Length two vector. } \item{comp.sub}{ Value substituted in for calculated methane mole fraction when calculated value is outside of \code{comp.lim} range. Length one vector. Default value of \code{NA} should generally never be changed. } \item{imethod}{ method used for interpolation of \code{xCH4}. This is passed as the \code{method} argument to \code{\link{interp}}. Length one character vector. Default is \code{"linear"} for linear interpolation. } \item{extrap}{ should \code{comp.name} be extrapolated? Length one logical vector. This is passed as the \code{extrap} argument to \code{\link{interp}}. Default is \code{FALSE}. } \item{addt0}{ is the earliest time in \code{dat} data frame \dQuote{time zero} (start time)? If not, this argument adds a row with \code{time.name = 0} for each reactor in order to calculate production rates for the first observation. This addition is only made when \code{time.name} is \code{numeric} (or \code{integer}). Length-one logical vector. Default is \code{TRUE}. To return these additional rows in the output, see \code{showt0}. } \item{showt0}{ should \dQuote{time zero} rows be returned in the output? Can be convenient for plotting cumulative volumes. Only applies if \code{time.name} is \code{numeric} (or \code{integer}). These rows may have been present in the original data (\code{dat}) or added by the function (see \code{addt0}). Default value depends on \code{dat} \code{time.name} column content. If \code{time.name} column is numeric and contains 0 then the default value is \code{TRUE} and otherwise \code{FALSE}. } \item{dry}{ set to \code{TRUE} if volume data are standardized to dry conditions. The default (\code{FALSE}) means biogas is assumed to be saturated with water vapor. } \item{std.message}{ should a message with the standard conditions be displayed? Default is \code{TRUE}. } \item{check}{ should input data be checked for unreasonable values (with warnings)? Currently only composition values are checked. Default is \code{TRUE}. Values are changed if outside 0, 1 (divided by 100). } \item{temp.std}{ standard temperature for presentation of biogas and methane results. Length one numeric vector. Default value is 0 degrees C (set in \code{\link{stdVol}}). Argument is passed to \code{\link{stdVol}}. } \item{pres.std}{ standard pressure for presentation of biogas and methane results. Length one numeric vector. Default value is 1.0 atm (101325 Pa) (set in \code{\link{stdVol}}). Argument is passed to \code{\link{stdVol}}. } \item{unit.temp}{ temperature units for \code{temp} and \code{temp.std} arguments. Default is \code{"C"} for degrees Celcius. Argument is passed to \code{\link{stdVol}}. } \item{unit.pres}{ pressure units for \code{pres} and \code{pres.std} arguments. Default is \code{"atm"}. Argument is passed to \code{\link{stdVol}}. } } \details{ Using volume and mass loss data from \code{dat}, this function will calculate standardized biogas and methane production for each observation using the gas density (GD) method. See reference below for details on the method. Standard values and units for temperature and pressure can be globally set using the function \code{\link{options}}. See \code{\link{stdVol}}. } \value{ a data frame with all the columns originally present in \code{dat}, plus others including these: \item{vBg}{Standardized volume of biogas production for individual event.} \item{xCH4}{Calculated mole fraction of methane in biogas.} \item{vCH4}{Standardized volume of methane production for individual event.} \item{cvBg}{Standardized cumulative volume of biogas production.} \item{cvCH4}{Standardized cumulative volume of methane production.} \item{rvBg}{Production rate of biogas.} \item{rvCH4}{Production rate of methane.} Units are based on units in input data. } \references{ Justesen, C.G., Astals, S., Mortensen, J.R., Thorsen, R., Koch, K., Weinrich, S., Triolo, J.M., Hafner, S.D. 2019. Development and validation of a low-cost gas density method for measuring biochemical potential (BMP) \emph{Water (MDPI)} \bold{11(12)}: 2431. } \author{ Sasha D. Hafner, Camilla Justesen, Jacob Mortensen } \seealso{ \code{\link{calcBgMan}}, \code{\link{calcBgVol}}, \code{\link{summBg}}, \code{\link{interp}}, \code{\link{stdVol}}, \code{\link{options}} } \examples{ data("UQGDBiogas") data("UQGDSetup") head(UQGDBiogas) head(UQGDSetup) cbg <- calcBgGD(UQGDBiogas, temp.vol = 20, pres.vol = 1013.25, temp.grav = 30, pres.grav = 1500, id.name = 'id', vol.name = 'vol', m.pre.name = 'mass.init', m.post.name = 'mass.final', time.name = 'time.d', unit.pres = 'mbar') BMP <- summBg(cbg, UQGDSetup, id.name = "id", time.name = 'time.d', descrip.name = 'descrip', inoc.name = "Inoculum", inoc.m.name = "m.inoc", norm.name = "m.sub.vs", when = 'end') BMP } %% Add one or more standard keywords, see file "KEYWORDS" in the %% R documentation directory. \keyword{chron} \keyword{manip} \concept{biogas}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cfbd_games.R \name{cfbd_game_info} \alias{cfbd_game_info} \title{\strong{Get results information from games.}} \usage{ cfbd_game_info( year, week = NULL, season_type = "regular", team = NULL, home_team = NULL, away_team = NULL, conference = NULL, game_id = NULL, quarter_scores = FALSE ) } \arguments{ \item{year}{(\emph{Integer} required): Year, 4 digit format(\emph{YYYY})} \item{week}{(\emph{Integer} optional): Week - values from 1-15, 1-14 for seasons pre-playoff (i.e. 2013 or earlier)} \item{season_type}{(\emph{String} default regular): Select Season Type: regular, postseason, or both} \item{team}{(\emph{String} optional): D-I Team} \item{home_team}{(\emph{String} optional): Home D-I Team} \item{away_team}{(\emph{String} optional): Away D-I Team} \item{conference}{(\emph{String} optional): Conference abbreviation - Select a valid FBS conference\cr Conference abbreviations P5: ACC, B12, B1G, SEC, PAC\cr Conference abbreviations G5 and FBS Independents: CUSA, MAC, MWC, Ind, SBC, AAC\cr} \item{game_id}{(\emph{Integer} optional): Game ID filter for querying a single game} \item{quarter_scores}{(\emph{Logical} default FALSE): This is a parameter to return the list columns that give the score at each quarter: \code{home_line_scores} and \code{away_line_scores}.\cr I have defaulted the parameter to false so that you will not have to go to the trouble of dropping it.} } \value{ \code{\link[=cfbd_game_info]{cfbd_game_info()}} - A data frame with 22 variables: \describe{ \item{\code{game_id}: integer.}{Referencing game id.} \item{\code{season}: integer.}{Season of the game.} \item{\code{week}: integer.}{Game week.} \item{\code{season_type}: character.}{Season type of the game.} \item{\code{start_date}: character.}{Game date.} \item{\code{start_time_tbd}: logical.}{TRUE/FALSE flag for if the game's start time is to be determined.} \item{\code{neutral_site}: logical.}{TRUE/FALSE flag for the game taking place at a neutral site.} \item{\code{conference_game}: logical.}{TRUE/FALSE flag for this game qualifying as a conference game.} \item{\code{attendance}: integer.}{Reported attendance at the game.} \item{\code{venue_id}: integer.}{Referencing venue id.} \item{\code{venue}: character.}{Venue name.} \item{\code{home_id}: integer.}{Home team referencing id.} \item{\code{home_team}: character.}{Home team name.} \item{\code{home_conference}: character.}{Home team conference.} \item{\code{home_points}: integer.}{Home team points.} \item{\code{home_post_win_prob}: character.}{Home team post-game win probability.} \item{\code{home_pregame_elo}: character.}{Home team pre-game ELO rating.} \item{\code{home_postgame_elo}: character.}{Home team post-game ELO rating.} \item{\code{away_id}: integer.}{Away team referencing id.} \item{\code{away_team}: character.}{Away team name.} \item{\code{away_conference}: character.}{Away team conference.} \item{\code{away_points}: integer.}{Away team points.} \item{\code{away_post_win_prob}: character.}{Away team post-game win probability.} \item{\code{away_pregame_elo}: character.}{Away team pre-game ELO rating.} \item{\code{away_postgame_elo}: character.}{Away team post-game ELO rating.} \item{\code{excitement_index}: character.}{Game excitement index.} \item{\code{highlights}: character.}{Game highlight urls.} \item{\code{notes}: character.}{Game notes.} } } \description{ \strong{Get results information from games.} } \examples{ \donttest{ cfbd_game_info(2018, week = 1) cfbd_game_info(2018, week = 7, conference = "Ind") # 7 OTs LSU @ TAMU cfbd_game_info(2018, week = 13, team = "Texas A&M", quarter_scores = TRUE) } } \keyword{Game} \keyword{Info}
/man/cfbd_game_info.Rd
permissive
Engy-22/cfbfastR
R
false
true
3,743
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cfbd_games.R \name{cfbd_game_info} \alias{cfbd_game_info} \title{\strong{Get results information from games.}} \usage{ cfbd_game_info( year, week = NULL, season_type = "regular", team = NULL, home_team = NULL, away_team = NULL, conference = NULL, game_id = NULL, quarter_scores = FALSE ) } \arguments{ \item{year}{(\emph{Integer} required): Year, 4 digit format(\emph{YYYY})} \item{week}{(\emph{Integer} optional): Week - values from 1-15, 1-14 for seasons pre-playoff (i.e. 2013 or earlier)} \item{season_type}{(\emph{String} default regular): Select Season Type: regular, postseason, or both} \item{team}{(\emph{String} optional): D-I Team} \item{home_team}{(\emph{String} optional): Home D-I Team} \item{away_team}{(\emph{String} optional): Away D-I Team} \item{conference}{(\emph{String} optional): Conference abbreviation - Select a valid FBS conference\cr Conference abbreviations P5: ACC, B12, B1G, SEC, PAC\cr Conference abbreviations G5 and FBS Independents: CUSA, MAC, MWC, Ind, SBC, AAC\cr} \item{game_id}{(\emph{Integer} optional): Game ID filter for querying a single game} \item{quarter_scores}{(\emph{Logical} default FALSE): This is a parameter to return the list columns that give the score at each quarter: \code{home_line_scores} and \code{away_line_scores}.\cr I have defaulted the parameter to false so that you will not have to go to the trouble of dropping it.} } \value{ \code{\link[=cfbd_game_info]{cfbd_game_info()}} - A data frame with 22 variables: \describe{ \item{\code{game_id}: integer.}{Referencing game id.} \item{\code{season}: integer.}{Season of the game.} \item{\code{week}: integer.}{Game week.} \item{\code{season_type}: character.}{Season type of the game.} \item{\code{start_date}: character.}{Game date.} \item{\code{start_time_tbd}: logical.}{TRUE/FALSE flag for if the game's start time is to be determined.} \item{\code{neutral_site}: logical.}{TRUE/FALSE flag for the game taking place at a neutral site.} \item{\code{conference_game}: logical.}{TRUE/FALSE flag for this game qualifying as a conference game.} \item{\code{attendance}: integer.}{Reported attendance at the game.} \item{\code{venue_id}: integer.}{Referencing venue id.} \item{\code{venue}: character.}{Venue name.} \item{\code{home_id}: integer.}{Home team referencing id.} \item{\code{home_team}: character.}{Home team name.} \item{\code{home_conference}: character.}{Home team conference.} \item{\code{home_points}: integer.}{Home team points.} \item{\code{home_post_win_prob}: character.}{Home team post-game win probability.} \item{\code{home_pregame_elo}: character.}{Home team pre-game ELO rating.} \item{\code{home_postgame_elo}: character.}{Home team post-game ELO rating.} \item{\code{away_id}: integer.}{Away team referencing id.} \item{\code{away_team}: character.}{Away team name.} \item{\code{away_conference}: character.}{Away team conference.} \item{\code{away_points}: integer.}{Away team points.} \item{\code{away_post_win_prob}: character.}{Away team post-game win probability.} \item{\code{away_pregame_elo}: character.}{Away team pre-game ELO rating.} \item{\code{away_postgame_elo}: character.}{Away team post-game ELO rating.} \item{\code{excitement_index}: character.}{Game excitement index.} \item{\code{highlights}: character.}{Game highlight urls.} \item{\code{notes}: character.}{Game notes.} } } \description{ \strong{Get results information from games.} } \examples{ \donttest{ cfbd_game_info(2018, week = 1) cfbd_game_info(2018, week = 7, conference = "Ind") # 7 OTs LSU @ TAMU cfbd_game_info(2018, week = 13, team = "Texas A&M", quarter_scores = TRUE) } } \keyword{Game} \keyword{Info}
#Figures going into Thesis #Top-down experiment, Bottom-up experiment, and regional results #load libraries library(RgoogleMaps) library(maps) #source documents where data files are contained source("archived_scripts/Tara_thesis/ExclosureAnalysis_Summer2016.R") #Triangle exclosures (top-down) source("archived_scripts/Tara_thesis/tree_species_model.R") #Tree species model (#Bottom up) source("archived_scripts/Tara_thesis/northern_comparisons.R") # Regional comparisons source("archived_scripts/Tara_thesis/data_analysis.R") # Southern Appalachian exclosures (top-down) #Figure 1 - Differences of Differences histogram comparing effects of exclosures on #Bird Food Arthropods and caterpillars in the Southern Appalachians and in the triangle #still need to figure out how to do this in a panel/do I really want to show this? caterpillar_time4_t1 = dplyr::filter(caterpillar_time4_t, VFX_VF_dif != 26) par(mfrow = c(2, 2), mar=c(4,4.5,2,2), oma = c(3,3,2,2), cex.lab = 1.5, cex.axis = 1.5, cex.main = 1.5) hist(food_time4_12$VFX_VF_dif, breaks=30, col = "deepskyblue", ylab="Appalachians", xlab = "", main = "Arthropods", ylim = c(0,70), plot = TRUE) #bird food app legend("topright", "A", bty="n", cex = 1.5) hist(caterpillar_time4_12$VFX_VF_dif, breaks=10, col = "deepsky blue", ylab="", xlab = "", main = "Caterpillars", xlim = c(-2,2), ylim = c(0,70), plot = TRUE) #caterpillars app legend("topright", "B", bty="n", cex = 1.5) hist(food_time4_t$VFX_VF_dif, breaks=10, col = "mediumspringgreen", ylab="Piedmont", xlab = "", ylim = c(0,20), main="", plot = TRUE) #bird food tri legend("topright", "C", bty="n", cex = 1.5) hist(caterpillar_time4_t1$VFX_VF_dif, breaks=10, col = "mediumspringgreen", ylab="", xlab = "", xlim = c(-2,2), ylim = c(0,20), main="", plot = TRUE) #caterpillars tri legend("topright", "D", bty="n", cex = 1.5) mtext("Number of Surveys", side = 2, outer = TRUE, line = 1.5, cex = 1.5) mtext("Difference in Change in Arthropod Density", side = 1, outer = TRUE, line = .5, cex = 1.5) #there is an outlier in NC piedmont caterpillars and bird food (26 caterpillars) #removed for plotting ease, but should note #ylab = percent of total #xlab = "Difference in Change in Arth Density between VFX and VF #Figure 2- Raw arth counts for bird food, caterpillars for triangle and southern appalachians caterpillar_final1 = filter(caterpillar_final, Visit3VFX != 26) par(mfrow = c(4, 2), mar=c(4,4,2,2), cex.lab = 1.5, cex.axis = 1.6, cex.main = 1.8) hist(food_time4_12$Visit3VF, ylim=c(0,80), main ="Control", xlab = "Arth. density", ylab = "", col="deepskyblue", plot = TRUE) legend("topright", "A", bty="n", cex = 1.5) hist(food_time4_12$Visit3VFX, ylim=c(0,80), main ="Exclosure", xlab = "Arth. density", ylab = "", col="deepskyblue", plot = TRUE) legend("topright", "B", bty="n", cex = 1.5) hist(caterpillar_time4_12$Visit3VF, ylim=c(0,80),main ="", xlab = "Cat. density", ylab = "", col="deepskyblue", plot = TRUE) legend("topright", "C", bty="n", cex = 1.5) hist(caterpillar_time4_12$Visit3VFX, ylim=c(0,80), main ="", xlab = "Cat. density", ylab = "", col="deepskyblue", plot = TRUE) legend("topright", "D", bty="n", cex = 1.5) hist(food_final$Visit3VF, ylim=c(0,25), main ="", xlab = "Arth. density", ylab = "", col="mediumspringgreen", plot = TRUE) legend("topright", "E", bty="n", cex = 1.5) hist(food_final$Visit3VFX, ylim=c(0,25), main ="", xlab = "Arth. density", ylab = "", col="mediumspringgreen", plot = TRUE) legend("topright", "F", bty="n", cex = 1.5) hist(caterpillar_final$Visit3VF, ylim=c(0,25), main ="", xlab = "Cat. density", ylab = "", col="mediumspringgreen", plot = TRUE) legend("topright", "G", bty="n", cex = 1.5) hist(caterpillar_final1$Visit3VFX, ylim=c(0,25), main ="", xlab = "Cat. density", ylab = "", col="mediumspringgreen", plot = TRUE) legend("topright", "H", bty="n", cex = 1.5) mtext("Number of Surveys", side = 2, outer = TRUE, line = 1.0, cex = 1.4) #mtext("NC Piedmont S. Appalachians", outer = TRUE, side = 2, line = 0) #Figure 3 #Plot HSD results par(mfrow = c(2, 2), mar=c(6,2.8,2,2), cex.lab = 1.2, cex.axis = 1, cex.main = 1.5) barplot(plotting.log_app_food$means, las=2, main = "Arthropods", ylab = "Appalachians", ylim = c(-.3,.8), cex.names=.65, col = (ifelse(plotting.log_app_food$M == "a", "blue", ifelse(plotting.log_app_food$M == "b","royalblue1", ifelse(plotting.log_app_food$M == "bc", "deepskyblue", ifelse(plotting.log_app_food$M == "c", "lightskyblue1", ifelse(plotting.log_app_food$M == "d", "white", NA))))))) text(x=seq(from=.7, to= 11.5 ,by=1.2), y=.6, plotting.log_app_food$M) text(x=seq(from=.9, to= 12, by = 1.29), y = -.15, labels = plotting.log_app_food$tree_sp, lheight = 1.5, srt = 55, adj = 1, xpd = TRUE, cex = .9) #caterpillar appalachians barplot(plotting.log_app_caterpillar$means, las=2, main = "Caterpillars", ylim = c(-.3,.8), col = (ifelse(plotting.log_app_caterpillar$M == "a", "blue", ifelse(plotting.log_app_caterpillar$M == "b","royalblue1", ifelse(plotting.log_app_caterpillar$M == "bc", "deepskyblue", ifelse(plotting.log_app_caterpillar$M == "c", "lightskyblue1", ifelse(plotting.log_app_caterpillar$M == "d", "white", NA))))))) text(x=seq(from=.7, to= 11.5 ,by=1.2), y=.6, plotting.log_app_caterpillar$M) #plotting.log_tri_caterpillar$tree_sp text(x=seq(from=.9, to= 12, by = 1.29), y = -.15, labels = plotting.log_app_caterpillar$tree_sp, lheight = 1.5, srt = 55, adj = 1, xpd = TRUE, cex = .9) #bird food triangle barplot(plotting.log_tri_food$means, las=2, ylab = "Piedmont", ylim = c(0,.55), col = (ifelse(plotting.log_tri_food$M == "a", "seagreen4", ifelse(plotting.log_tri_food$M == "ab", "seagreen3", ifelse(plotting.log_tri_food$M == "b","mediumspringgreen", ifelse(plotting.log_tri_food$M == "bc", "lightgreen", ifelse(plotting.log_tri_food$M == "c", "mintcream", NA))))))) text(x=seq(from=.7, to= 11.5 ,by=1.2), y=.5, plotting.log_tri_food$M) #plotting.log_tri_caterpillar$tree_sp text(x=seq(from=.9, to= 12, by = 1.29), y = 0, labels = plotting.log_tri_food$tree_sp, lheight = 1.5, srt = 55, adj = 1, xpd = TRUE, cex = .9) #caterpillar triangle barplot(plotting.log_tri_caterpillar$means, las=2, mar =c(1,1,1,1), ylim = c(0,.55), col = (ifelse(plotting.log_tri_caterpillar$M == "a", "seagreen4", ifelse(plotting.log_tri_caterpillar$M == "ab", "seagreen3", ifelse(plotting.log_tri_caterpillar$M == "b","mediumspringgreen", ifelse(plotting.log_tri_caterpillar$M == "bc", "lightgreen", ifelse(plotting.log_tri_caterpillar$M == "c", "mintcream", NA))))))) text(x=seq(from=.7, to= 11.5 ,by=1.2), y=.5, plotting.log_tri_caterpillar$M) mtext("Log-transformed Arth Density", side = 2, outer = TRUE, line = 1.5, cex = 1.3) mtext("Tree Species", side = 1, outer = TRUE, line = .5, cex = 1.3) #plotting.log_tri_caterpillar$tree_sp text(x=seq(from=.9, to= 12, by = 1.29), y = 0, labels = plotting.log_tri_caterpillar$tree_sp, lheight = 1.5, srt = 55, adj = 1, xpd = TRUE, cex = .9) #Map locations of sites and Regions library(RgoogleMaps) ##Read in BBS Lat-Long Data BBS_coordinates <- read.table("data/environmental/BBS_stop_latlongs.txt", sep= '\t', quote="\"", header=TRUE) names(BBS_coordinates) = c("location", "lat", "long") oth_coordinates = data.frame(location = c("Botanical Garden", "Prairie Ridge", "Hubbard Brook", "Moosilauke", "Russell", "Stinson", "C", "H", "M"), lat = c(35.898645, 35.809674, 43.93903, 43.990567,44.004677, 43.83500, 41.460058, 41.521779, 41.476715), long = c(-79.031469, -78.716546, -71.75115, -71.773936, -71.645864, -71.773936, -72.520527, -72.541126, -72.631527)) all_coordinates = rbind(BBS_coordinates, oth_coordinates) all_coordinates1=filter(all_coordinates, location %in% unique(region_complete$site)) write.csv(all_coordinates1, "data/northern_comparisons_coordinates.csv") ##Map lat-long data par(mfrow = c(1, 1), mar=c(7,4,3,3)) hub = filter(all_coordinates, location %in% c("Hubbard Brook", "Moosilauke", "Russell", "Stinson")) sing = filter(all_coordinates, location %in% c("C", "H", "M")) va = filter(all_coordinates, location %in% uniq_va) sa = filter(all_coordinates, location %in% uniq_sa) tri = filter(all_coordinates, location %in% c("Botanical Garden", "Prairie Ridge")) write.csv(hub, "data/environmental/hub_latlong.csv") write.csv(sing, "data/environmental/sing_latlong.csv") write.csv(va, "data/environmental/va_latlong.csv") write.csv(sa, "data/environmental/sa_latlong.csv") write.csv(tri, "data/environmental/tri_latlong.csv")
/archived_scripts/Tara_thesis/thesisfigures_tm.R
no_license
hurlbertlab/caterpillars-count-analysis
R
false
false
9,205
r
#Figures going into Thesis #Top-down experiment, Bottom-up experiment, and regional results #load libraries library(RgoogleMaps) library(maps) #source documents where data files are contained source("archived_scripts/Tara_thesis/ExclosureAnalysis_Summer2016.R") #Triangle exclosures (top-down) source("archived_scripts/Tara_thesis/tree_species_model.R") #Tree species model (#Bottom up) source("archived_scripts/Tara_thesis/northern_comparisons.R") # Regional comparisons source("archived_scripts/Tara_thesis/data_analysis.R") # Southern Appalachian exclosures (top-down) #Figure 1 - Differences of Differences histogram comparing effects of exclosures on #Bird Food Arthropods and caterpillars in the Southern Appalachians and in the triangle #still need to figure out how to do this in a panel/do I really want to show this? caterpillar_time4_t1 = dplyr::filter(caterpillar_time4_t, VFX_VF_dif != 26) par(mfrow = c(2, 2), mar=c(4,4.5,2,2), oma = c(3,3,2,2), cex.lab = 1.5, cex.axis = 1.5, cex.main = 1.5) hist(food_time4_12$VFX_VF_dif, breaks=30, col = "deepskyblue", ylab="Appalachians", xlab = "", main = "Arthropods", ylim = c(0,70), plot = TRUE) #bird food app legend("topright", "A", bty="n", cex = 1.5) hist(caterpillar_time4_12$VFX_VF_dif, breaks=10, col = "deepsky blue", ylab="", xlab = "", main = "Caterpillars", xlim = c(-2,2), ylim = c(0,70), plot = TRUE) #caterpillars app legend("topright", "B", bty="n", cex = 1.5) hist(food_time4_t$VFX_VF_dif, breaks=10, col = "mediumspringgreen", ylab="Piedmont", xlab = "", ylim = c(0,20), main="", plot = TRUE) #bird food tri legend("topright", "C", bty="n", cex = 1.5) hist(caterpillar_time4_t1$VFX_VF_dif, breaks=10, col = "mediumspringgreen", ylab="", xlab = "", xlim = c(-2,2), ylim = c(0,20), main="", plot = TRUE) #caterpillars tri legend("topright", "D", bty="n", cex = 1.5) mtext("Number of Surveys", side = 2, outer = TRUE, line = 1.5, cex = 1.5) mtext("Difference in Change in Arthropod Density", side = 1, outer = TRUE, line = .5, cex = 1.5) #there is an outlier in NC piedmont caterpillars and bird food (26 caterpillars) #removed for plotting ease, but should note #ylab = percent of total #xlab = "Difference in Change in Arth Density between VFX and VF #Figure 2- Raw arth counts for bird food, caterpillars for triangle and southern appalachians caterpillar_final1 = filter(caterpillar_final, Visit3VFX != 26) par(mfrow = c(4, 2), mar=c(4,4,2,2), cex.lab = 1.5, cex.axis = 1.6, cex.main = 1.8) hist(food_time4_12$Visit3VF, ylim=c(0,80), main ="Control", xlab = "Arth. density", ylab = "", col="deepskyblue", plot = TRUE) legend("topright", "A", bty="n", cex = 1.5) hist(food_time4_12$Visit3VFX, ylim=c(0,80), main ="Exclosure", xlab = "Arth. density", ylab = "", col="deepskyblue", plot = TRUE) legend("topright", "B", bty="n", cex = 1.5) hist(caterpillar_time4_12$Visit3VF, ylim=c(0,80),main ="", xlab = "Cat. density", ylab = "", col="deepskyblue", plot = TRUE) legend("topright", "C", bty="n", cex = 1.5) hist(caterpillar_time4_12$Visit3VFX, ylim=c(0,80), main ="", xlab = "Cat. density", ylab = "", col="deepskyblue", plot = TRUE) legend("topright", "D", bty="n", cex = 1.5) hist(food_final$Visit3VF, ylim=c(0,25), main ="", xlab = "Arth. density", ylab = "", col="mediumspringgreen", plot = TRUE) legend("topright", "E", bty="n", cex = 1.5) hist(food_final$Visit3VFX, ylim=c(0,25), main ="", xlab = "Arth. density", ylab = "", col="mediumspringgreen", plot = TRUE) legend("topright", "F", bty="n", cex = 1.5) hist(caterpillar_final$Visit3VF, ylim=c(0,25), main ="", xlab = "Cat. density", ylab = "", col="mediumspringgreen", plot = TRUE) legend("topright", "G", bty="n", cex = 1.5) hist(caterpillar_final1$Visit3VFX, ylim=c(0,25), main ="", xlab = "Cat. density", ylab = "", col="mediumspringgreen", plot = TRUE) legend("topright", "H", bty="n", cex = 1.5) mtext("Number of Surveys", side = 2, outer = TRUE, line = 1.0, cex = 1.4) #mtext("NC Piedmont S. Appalachians", outer = TRUE, side = 2, line = 0) #Figure 3 #Plot HSD results par(mfrow = c(2, 2), mar=c(6,2.8,2,2), cex.lab = 1.2, cex.axis = 1, cex.main = 1.5) barplot(plotting.log_app_food$means, las=2, main = "Arthropods", ylab = "Appalachians", ylim = c(-.3,.8), cex.names=.65, col = (ifelse(plotting.log_app_food$M == "a", "blue", ifelse(plotting.log_app_food$M == "b","royalblue1", ifelse(plotting.log_app_food$M == "bc", "deepskyblue", ifelse(plotting.log_app_food$M == "c", "lightskyblue1", ifelse(plotting.log_app_food$M == "d", "white", NA))))))) text(x=seq(from=.7, to= 11.5 ,by=1.2), y=.6, plotting.log_app_food$M) text(x=seq(from=.9, to= 12, by = 1.29), y = -.15, labels = plotting.log_app_food$tree_sp, lheight = 1.5, srt = 55, adj = 1, xpd = TRUE, cex = .9) #caterpillar appalachians barplot(plotting.log_app_caterpillar$means, las=2, main = "Caterpillars", ylim = c(-.3,.8), col = (ifelse(plotting.log_app_caterpillar$M == "a", "blue", ifelse(plotting.log_app_caterpillar$M == "b","royalblue1", ifelse(plotting.log_app_caterpillar$M == "bc", "deepskyblue", ifelse(plotting.log_app_caterpillar$M == "c", "lightskyblue1", ifelse(plotting.log_app_caterpillar$M == "d", "white", NA))))))) text(x=seq(from=.7, to= 11.5 ,by=1.2), y=.6, plotting.log_app_caterpillar$M) #plotting.log_tri_caterpillar$tree_sp text(x=seq(from=.9, to= 12, by = 1.29), y = -.15, labels = plotting.log_app_caterpillar$tree_sp, lheight = 1.5, srt = 55, adj = 1, xpd = TRUE, cex = .9) #bird food triangle barplot(plotting.log_tri_food$means, las=2, ylab = "Piedmont", ylim = c(0,.55), col = (ifelse(plotting.log_tri_food$M == "a", "seagreen4", ifelse(plotting.log_tri_food$M == "ab", "seagreen3", ifelse(plotting.log_tri_food$M == "b","mediumspringgreen", ifelse(plotting.log_tri_food$M == "bc", "lightgreen", ifelse(plotting.log_tri_food$M == "c", "mintcream", NA))))))) text(x=seq(from=.7, to= 11.5 ,by=1.2), y=.5, plotting.log_tri_food$M) #plotting.log_tri_caterpillar$tree_sp text(x=seq(from=.9, to= 12, by = 1.29), y = 0, labels = plotting.log_tri_food$tree_sp, lheight = 1.5, srt = 55, adj = 1, xpd = TRUE, cex = .9) #caterpillar triangle barplot(plotting.log_tri_caterpillar$means, las=2, mar =c(1,1,1,1), ylim = c(0,.55), col = (ifelse(plotting.log_tri_caterpillar$M == "a", "seagreen4", ifelse(plotting.log_tri_caterpillar$M == "ab", "seagreen3", ifelse(plotting.log_tri_caterpillar$M == "b","mediumspringgreen", ifelse(plotting.log_tri_caterpillar$M == "bc", "lightgreen", ifelse(plotting.log_tri_caterpillar$M == "c", "mintcream", NA))))))) text(x=seq(from=.7, to= 11.5 ,by=1.2), y=.5, plotting.log_tri_caterpillar$M) mtext("Log-transformed Arth Density", side = 2, outer = TRUE, line = 1.5, cex = 1.3) mtext("Tree Species", side = 1, outer = TRUE, line = .5, cex = 1.3) #plotting.log_tri_caterpillar$tree_sp text(x=seq(from=.9, to= 12, by = 1.29), y = 0, labels = plotting.log_tri_caterpillar$tree_sp, lheight = 1.5, srt = 55, adj = 1, xpd = TRUE, cex = .9) #Map locations of sites and Regions library(RgoogleMaps) ##Read in BBS Lat-Long Data BBS_coordinates <- read.table("data/environmental/BBS_stop_latlongs.txt", sep= '\t', quote="\"", header=TRUE) names(BBS_coordinates) = c("location", "lat", "long") oth_coordinates = data.frame(location = c("Botanical Garden", "Prairie Ridge", "Hubbard Brook", "Moosilauke", "Russell", "Stinson", "C", "H", "M"), lat = c(35.898645, 35.809674, 43.93903, 43.990567,44.004677, 43.83500, 41.460058, 41.521779, 41.476715), long = c(-79.031469, -78.716546, -71.75115, -71.773936, -71.645864, -71.773936, -72.520527, -72.541126, -72.631527)) all_coordinates = rbind(BBS_coordinates, oth_coordinates) all_coordinates1=filter(all_coordinates, location %in% unique(region_complete$site)) write.csv(all_coordinates1, "data/northern_comparisons_coordinates.csv") ##Map lat-long data par(mfrow = c(1, 1), mar=c(7,4,3,3)) hub = filter(all_coordinates, location %in% c("Hubbard Brook", "Moosilauke", "Russell", "Stinson")) sing = filter(all_coordinates, location %in% c("C", "H", "M")) va = filter(all_coordinates, location %in% uniq_va) sa = filter(all_coordinates, location %in% uniq_sa) tri = filter(all_coordinates, location %in% c("Botanical Garden", "Prairie Ridge")) write.csv(hub, "data/environmental/hub_latlong.csv") write.csv(sing, "data/environmental/sing_latlong.csv") write.csv(va, "data/environmental/va_latlong.csv") write.csv(sa, "data/environmental/sa_latlong.csv") write.csv(tri, "data/environmental/tri_latlong.csv")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/apprunner_operations.R \name{apprunner_describe_custom_domains} \alias{apprunner_describe_custom_domains} \title{Return a description of custom domain names that are associated with an App Runner service} \usage{ apprunner_describe_custom_domains( ServiceArn, NextToken = NULL, MaxResults = NULL ) } \arguments{ \item{ServiceArn}{[required] The Amazon Resource Name (ARN) of the App Runner service that you want associated custom domain names to be described for.} \item{NextToken}{A token from a previous result page. It's used for a paginated request. The request retrieves the next result page. All other parameter values must be identical to the ones that are specified in the initial request. If you don't specify \code{NextToken}, the request retrieves the first result page.} \item{MaxResults}{The maximum number of results that each response (result page) can include. It's used for a paginated request. If you don't specify \code{MaxResults}, the request retrieves all available results in a single response.} } \description{ Return a description of custom domain names that are associated with an App Runner service. See \url{https://www.paws-r-sdk.com/docs/apprunner_describe_custom_domains/} for full documentation. } \keyword{internal}
/cran/paws.compute/man/apprunner_describe_custom_domains.Rd
permissive
paws-r/paws
R
false
true
1,338
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/apprunner_operations.R \name{apprunner_describe_custom_domains} \alias{apprunner_describe_custom_domains} \title{Return a description of custom domain names that are associated with an App Runner service} \usage{ apprunner_describe_custom_domains( ServiceArn, NextToken = NULL, MaxResults = NULL ) } \arguments{ \item{ServiceArn}{[required] The Amazon Resource Name (ARN) of the App Runner service that you want associated custom domain names to be described for.} \item{NextToken}{A token from a previous result page. It's used for a paginated request. The request retrieves the next result page. All other parameter values must be identical to the ones that are specified in the initial request. If you don't specify \code{NextToken}, the request retrieves the first result page.} \item{MaxResults}{The maximum number of results that each response (result page) can include. It's used for a paginated request. If you don't specify \code{MaxResults}, the request retrieves all available results in a single response.} } \description{ Return a description of custom domain names that are associated with an App Runner service. See \url{https://www.paws-r-sdk.com/docs/apprunner_describe_custom_domains/} for full documentation. } \keyword{internal}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functions.R \name{read_from_fukan} \alias{read_from_fukan} \title{Read files generated in Fukan System.} \usage{ read_from_fukan(path_to_file) } \arguments{ \item{path_to_file}{The full path of the file you want to open} } \description{ This function allows you to read report analysis from Fukan System } \examples{ read_from_fukan(file.choose()) } \seealso{ \code{\link{write_for_fukan}} } \keyword{Fukan} \keyword{System}
/man/read_from_fukan.Rd
no_license
cristianmejia00/Opener5
R
false
true
503
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functions.R \name{read_from_fukan} \alias{read_from_fukan} \title{Read files generated in Fukan System.} \usage{ read_from_fukan(path_to_file) } \arguments{ \item{path_to_file}{The full path of the file you want to open} } \description{ This function allows you to read report analysis from Fukan System } \examples{ read_from_fukan(file.choose()) } \seealso{ \code{\link{write_for_fukan}} } \keyword{Fukan} \keyword{System}
source("./packages.R") lapply(list.files(here::here("./R"), full.names = TRUE), source) library(knitrProgressBar) all_mzml_files <- dir("/mlab/data/archives/FTMS_raw/CESB/mzml_data", recursive = TRUE, full.names = TRUE, pattern = "mzML$") names(all_mzml_files) = gsub(".mzML$", "", basename(all_mzml_files)) all_zip_files = dir("/mlab/scratch/cesb_data/zip_files/lung_matched_tissue-2022-04-02", pattern = ".zip$", full.names = TRUE) names(all_zip_files) = gsub(".zip$", "", basename(all_zip_files)) zip_mzml = intersect(names(all_mzml_files), names(all_zip_files)) all_mzml_files = all_mzml_files[zip_mzml] pb = knitrProgressBar::progress_estimated(length(all_mzml_files)) msnbase_peaks = purrr::imap(all_mzml_files, function(in_mzml, id_mzml){ tmp = msnbase_centroid(in_mzml, sample_id = id_mzml) knitrProgressBar::update_progress(pb) tmp }) saveRDS(msnbase_peaks, "data/data_output/lung_data/lung_msnbase_peaks.rds")
/lungcancer_all/msnbase_peakset.R
no_license
MoseleyBioinformaticsLab/manuscript.peakCharacterization
R
false
false
931
r
source("./packages.R") lapply(list.files(here::here("./R"), full.names = TRUE), source) library(knitrProgressBar) all_mzml_files <- dir("/mlab/data/archives/FTMS_raw/CESB/mzml_data", recursive = TRUE, full.names = TRUE, pattern = "mzML$") names(all_mzml_files) = gsub(".mzML$", "", basename(all_mzml_files)) all_zip_files = dir("/mlab/scratch/cesb_data/zip_files/lung_matched_tissue-2022-04-02", pattern = ".zip$", full.names = TRUE) names(all_zip_files) = gsub(".zip$", "", basename(all_zip_files)) zip_mzml = intersect(names(all_mzml_files), names(all_zip_files)) all_mzml_files = all_mzml_files[zip_mzml] pb = knitrProgressBar::progress_estimated(length(all_mzml_files)) msnbase_peaks = purrr::imap(all_mzml_files, function(in_mzml, id_mzml){ tmp = msnbase_centroid(in_mzml, sample_id = id_mzml) knitrProgressBar::update_progress(pb) tmp }) saveRDS(msnbase_peaks, "data/data_output/lung_data/lung_msnbase_peaks.rds")
#' @title Create a classification, regression, survival, cluster, cost-sensitive classification or #' multilabel task. #' #' @description #' The task encapsulates the data and specifies - through its subclasses - #' the type of the task. #' It also contains a description object detailing further aspects of the data. #' #' Useful operators are: \code{\link{getTaskFormula}}, #' \code{\link{getTaskFeatureNames}}, #' \code{\link{getTaskData}}, #' \code{\link{getTaskTargets}}, and #' \code{\link{subsetTask}}. #' #' Object members: #' \describe{ #' \item{env [\code{environment}]}{Environment where data for the task are stored. #' Use \code{\link{getTaskData}} in order to access it.} #' \item{weights [\code{numeric}]}{See argument. \code{NULL} if not present.} #' \item{blocking [\code{factor}]}{See argument. \code{NULL} if not present.} #' \item{task.desc [\code{\link{TaskDesc}}]}{Encapsulates further information about the task.} #' } #' #' Notes: #' For multilabel classification we assume that the presence of labels is encoded via logical #' columns in \code{data}. The name of the column specifies the name of the label. \code{target} #' is then a char vector that points to these columns. #' #' @param id [\code{character(1)}]\cr #' Id string for object. #' Default is the name of the R variable passed to \code{data}. #' @param data [\code{data.frame}]\cr #' A data frame containing the features and target variable(s). #' @param target [\code{character(1)} | \code{character(2)} | \code{character(n.classes)}]\cr #' Name(s) of the target variable(s). #' For survival analysis these are the names of the survival time and event columns, #' so it has length 2. For multilabel classification it contains the names of the logical #' columns that encode whether a label is present or not and its length corresponds to the #' number of classes. #' @param costs [\code{data.frame}]\cr #' A numeric matrix or data frame containing the costs of misclassification. #' We assume the general case of observation specific costs. #' This means we have n rows, corresponding to the observations, in the same order as \code{data}. #' The columns correspond to classes and their names are the class labels #' (if unnamed we use y1 to yk as labels). #' Each entry (i,j) of the matrix specifies the cost of predicting class j #' for observation i. #' @param weights [\code{numeric}]\cr #' Optional, non-negative case weight vector to be used during fitting. #' Cannot be set for cost-sensitive learning. #' Default is \code{NULL} which means no (= equal) weights. #' @param blocking [\code{factor}]\cr #' An optional factor of the same length as the number of observations. #' Observations with the same blocking level \dQuote{belong together}. #' Specifically, they are either put all in the training or the test set #' during a resampling iteration. #' Default is \code{NULL} which means no blocking. #' @param positive [\code{character(1)}]\cr #' Positive class for binary classification (otherwise ignored and set to NA). #' Default is the first factor level of the target attribute. #' @param fixup.data [\code{character(1)}]\cr #' Should some basic cleaning up of data be performed? #' Currently this means removing empty factor levels for the columns. #' Possible coices are: #' \dQuote{no} = Don't do it. #' \dQuote{warn} = Do it but warn about it. #' \dQuote{quiet} = Do it but keep silent. #' Default is \dQuote{warn}. #' @param check.data [\code{logical(1)}]\cr #' Should sanity of data be checked initially at task creation? #' You should have good reasons to turn this off (one might be speed). #' Default is \code{TRUE}. #' @param frequency [\code{Integer(1)}]\cr #' The seasonality of the data. A frequency of 7L for daily data means a weekly seasonality, #' 52L is weekly data with a yearly seasonality, 365L is daily data with a yearly seasonality, etc. #' Default is 1L for no seasonality. #' @return [\code{\link{Task}}]. #' @name Task #' @rdname Task #' @aliases ClassifTask RegrTask SurvTask CostSensTask ClusterTask MultilabelTask ForecastRegrTask #' @examples #' library(mlbench) #' data(BostonHousing) #' data(Ionosphere) #' #' makeClassifTask(data = iris, target = "Species") #' makeRegrTask(data = BostonHousing, target = "medv") #' # an example of a classification task with more than those standard arguments: #' blocking = factor(c(rep(1, 51), rep(2, 300))) #' makeClassifTask(id = "myIonosphere", data = Ionosphere, target = "Class", #' positive = "good", blocking = blocking) #' makeClusterTask(data = iris[, -5L]) NULL makeTask = function(type, data, weights = NULL, blocking = NULL, fixup.data = "warn", check.data = TRUE) { if (fixup.data != "no") { if (fixup.data == "quiet") { data = droplevels(data) } else if (fixup.data == "warn") { # the next lines look a bit complicated, we calculate the warning info message dropped = logical(ncol(data)) for (i in seq_col(data)) { if (is.factor(data[[i]]) && any(table(data[[i]]) == 0L)) { dropped[i] = TRUE data[[i]] = droplevels(data[[i]]) } } if (any(dropped)) warningf("Empty factor levels were dropped for columns: %s", collapse(colnames(data)[dropped])) } } if (check.data) { assertDataFrame(data, col.names = "strict") if (class(data)[1] != "data.frame") { warningf("Provided data is not a pure data.frame but from class %s, hence it will be converted.", class(data)[1]) data = as.data.frame(data) } if (!is.null(weights)) assertNumeric(weights, len = nrow(data), any.missing = FALSE, lower = 0) if (!is.null(blocking)) { assertFactor(blocking, len = nrow(data), any.missing = FALSE) if (length(blocking) && length(blocking) != nrow(data)) stop("Blocking has to be of the same length as number of rows in data! Or pass none at all.") } } env = new.env(parent = emptyenv()) env$data = data makeS3Obj("Task", type = type, env = env, weights = weights, blocking = blocking, task.desc = NA ) } #' @importFrom lubridate is.POSIXt checkTaskData = function(data, cols = names(data)) { fun = function(cn, x) { if (is.numeric(x)) { if (anyInfinite(x)) stopf("Column '%s' contains infinite values.", cn) if (anyNaN(x)) stopf("Column '%s' contains NaN values.", cn) } else if (is.factor(x)) { if (any(table(x) == 0L)) stopf("Column '%s' contains empty factor levels.", cn) } else if (is.POSIXt(x)){ if (any(duplicated(x))) stopf("There are duplicate dates") } else { stopf("Unsupported feature type (%s) in column '%s'.", class(x)[1L], cn) } } Map(fun, cn = cols, x = data[cols]) invisible(TRUE) } #' @export print.Task = function(x, print.weights = TRUE, ...) { td = x$task.desc catf("Task: %s", td$id) catf("Type: %s", td$type) catf("Observations: %i", td$size) catf("Features:") catf(printToChar(td$n.feat, collapse = "\n")) catf("Missings: %s", td$has.missings) if (print.weights) catf("Has weights: %s", td$has.weights) catf("Has blocking: %s", td$has.blocking) } #' @export print.TimeTask = function(x, print.weights = TRUE, ...) { td = x$task.desc catf("Task: %s", td$id) catf("Type: %s", td$type) catf("Observations: %i", td$size) catf("Dates:\n Start: %s \n End: %s", td$dates[1], td$dates[2]) catf("Frequency: %i", td$frequency) catf("Features:") catf(printToChar(td$n.feat, collapse = "\n")) catf("Missings: %s", td$has.missings) if (print.weights) catf("Has weights: %s", td$has.weights) catf("Has blocking: %s", td$has.blocking) }
/R/Task.R
no_license
pherephobia/mlr
R
false
false
7,728
r
#' @title Create a classification, regression, survival, cluster, cost-sensitive classification or #' multilabel task. #' #' @description #' The task encapsulates the data and specifies - through its subclasses - #' the type of the task. #' It also contains a description object detailing further aspects of the data. #' #' Useful operators are: \code{\link{getTaskFormula}}, #' \code{\link{getTaskFeatureNames}}, #' \code{\link{getTaskData}}, #' \code{\link{getTaskTargets}}, and #' \code{\link{subsetTask}}. #' #' Object members: #' \describe{ #' \item{env [\code{environment}]}{Environment where data for the task are stored. #' Use \code{\link{getTaskData}} in order to access it.} #' \item{weights [\code{numeric}]}{See argument. \code{NULL} if not present.} #' \item{blocking [\code{factor}]}{See argument. \code{NULL} if not present.} #' \item{task.desc [\code{\link{TaskDesc}}]}{Encapsulates further information about the task.} #' } #' #' Notes: #' For multilabel classification we assume that the presence of labels is encoded via logical #' columns in \code{data}. The name of the column specifies the name of the label. \code{target} #' is then a char vector that points to these columns. #' #' @param id [\code{character(1)}]\cr #' Id string for object. #' Default is the name of the R variable passed to \code{data}. #' @param data [\code{data.frame}]\cr #' A data frame containing the features and target variable(s). #' @param target [\code{character(1)} | \code{character(2)} | \code{character(n.classes)}]\cr #' Name(s) of the target variable(s). #' For survival analysis these are the names of the survival time and event columns, #' so it has length 2. For multilabel classification it contains the names of the logical #' columns that encode whether a label is present or not and its length corresponds to the #' number of classes. #' @param costs [\code{data.frame}]\cr #' A numeric matrix or data frame containing the costs of misclassification. #' We assume the general case of observation specific costs. #' This means we have n rows, corresponding to the observations, in the same order as \code{data}. #' The columns correspond to classes and their names are the class labels #' (if unnamed we use y1 to yk as labels). #' Each entry (i,j) of the matrix specifies the cost of predicting class j #' for observation i. #' @param weights [\code{numeric}]\cr #' Optional, non-negative case weight vector to be used during fitting. #' Cannot be set for cost-sensitive learning. #' Default is \code{NULL} which means no (= equal) weights. #' @param blocking [\code{factor}]\cr #' An optional factor of the same length as the number of observations. #' Observations with the same blocking level \dQuote{belong together}. #' Specifically, they are either put all in the training or the test set #' during a resampling iteration. #' Default is \code{NULL} which means no blocking. #' @param positive [\code{character(1)}]\cr #' Positive class for binary classification (otherwise ignored and set to NA). #' Default is the first factor level of the target attribute. #' @param fixup.data [\code{character(1)}]\cr #' Should some basic cleaning up of data be performed? #' Currently this means removing empty factor levels for the columns. #' Possible coices are: #' \dQuote{no} = Don't do it. #' \dQuote{warn} = Do it but warn about it. #' \dQuote{quiet} = Do it but keep silent. #' Default is \dQuote{warn}. #' @param check.data [\code{logical(1)}]\cr #' Should sanity of data be checked initially at task creation? #' You should have good reasons to turn this off (one might be speed). #' Default is \code{TRUE}. #' @param frequency [\code{Integer(1)}]\cr #' The seasonality of the data. A frequency of 7L for daily data means a weekly seasonality, #' 52L is weekly data with a yearly seasonality, 365L is daily data with a yearly seasonality, etc. #' Default is 1L for no seasonality. #' @return [\code{\link{Task}}]. #' @name Task #' @rdname Task #' @aliases ClassifTask RegrTask SurvTask CostSensTask ClusterTask MultilabelTask ForecastRegrTask #' @examples #' library(mlbench) #' data(BostonHousing) #' data(Ionosphere) #' #' makeClassifTask(data = iris, target = "Species") #' makeRegrTask(data = BostonHousing, target = "medv") #' # an example of a classification task with more than those standard arguments: #' blocking = factor(c(rep(1, 51), rep(2, 300))) #' makeClassifTask(id = "myIonosphere", data = Ionosphere, target = "Class", #' positive = "good", blocking = blocking) #' makeClusterTask(data = iris[, -5L]) NULL makeTask = function(type, data, weights = NULL, blocking = NULL, fixup.data = "warn", check.data = TRUE) { if (fixup.data != "no") { if (fixup.data == "quiet") { data = droplevels(data) } else if (fixup.data == "warn") { # the next lines look a bit complicated, we calculate the warning info message dropped = logical(ncol(data)) for (i in seq_col(data)) { if (is.factor(data[[i]]) && any(table(data[[i]]) == 0L)) { dropped[i] = TRUE data[[i]] = droplevels(data[[i]]) } } if (any(dropped)) warningf("Empty factor levels were dropped for columns: %s", collapse(colnames(data)[dropped])) } } if (check.data) { assertDataFrame(data, col.names = "strict") if (class(data)[1] != "data.frame") { warningf("Provided data is not a pure data.frame but from class %s, hence it will be converted.", class(data)[1]) data = as.data.frame(data) } if (!is.null(weights)) assertNumeric(weights, len = nrow(data), any.missing = FALSE, lower = 0) if (!is.null(blocking)) { assertFactor(blocking, len = nrow(data), any.missing = FALSE) if (length(blocking) && length(blocking) != nrow(data)) stop("Blocking has to be of the same length as number of rows in data! Or pass none at all.") } } env = new.env(parent = emptyenv()) env$data = data makeS3Obj("Task", type = type, env = env, weights = weights, blocking = blocking, task.desc = NA ) } #' @importFrom lubridate is.POSIXt checkTaskData = function(data, cols = names(data)) { fun = function(cn, x) { if (is.numeric(x)) { if (anyInfinite(x)) stopf("Column '%s' contains infinite values.", cn) if (anyNaN(x)) stopf("Column '%s' contains NaN values.", cn) } else if (is.factor(x)) { if (any(table(x) == 0L)) stopf("Column '%s' contains empty factor levels.", cn) } else if (is.POSIXt(x)){ if (any(duplicated(x))) stopf("There are duplicate dates") } else { stopf("Unsupported feature type (%s) in column '%s'.", class(x)[1L], cn) } } Map(fun, cn = cols, x = data[cols]) invisible(TRUE) } #' @export print.Task = function(x, print.weights = TRUE, ...) { td = x$task.desc catf("Task: %s", td$id) catf("Type: %s", td$type) catf("Observations: %i", td$size) catf("Features:") catf(printToChar(td$n.feat, collapse = "\n")) catf("Missings: %s", td$has.missings) if (print.weights) catf("Has weights: %s", td$has.weights) catf("Has blocking: %s", td$has.blocking) } #' @export print.TimeTask = function(x, print.weights = TRUE, ...) { td = x$task.desc catf("Task: %s", td$id) catf("Type: %s", td$type) catf("Observations: %i", td$size) catf("Dates:\n Start: %s \n End: %s", td$dates[1], td$dates[2]) catf("Frequency: %i", td$frequency) catf("Features:") catf(printToChar(td$n.feat, collapse = "\n")) catf("Missings: %s", td$has.missings) if (print.weights) catf("Has weights: %s", td$has.weights) catf("Has blocking: %s", td$has.blocking) }
library(e1071) make_svm_plot_2d <- function(data, C){ svmfit <- svm(y ~., data=data, scale=FALSE, type='C-classification', shrinking=FALSE, kernel='linear', cost=C) # get svm direction svm_params <- get_svm_parmas(svmfit) w_svm <- svm_params['w'][[1]] b_svm <- svm_params['b'][[1]] num_misclassified <- sum(svmfit$fitted != data$y) # plot svm p <- ggplot(data=data) + geom_point(aes(x=x1, y=x2, color=y, shape=y)) + geom_abline(slope=-w_svm[1]/w_svm[2], intercept = b_svm/w_svm[2]) + geom_abline(slope=-w_svm[1]/w_svm[2], intercept = (b_svm + 1)/w_svm[2], linetype = 2) + geom_abline(slope=-w_svm[1]/w_svm[2], intercept = (b_svm - 1)/w_svm[2], linetype = 2) + theme(panel.background = element_blank()) + ggtitle(paste0('SVM fit, C = ', format(C, digits=3), ', num misclassified = ', num_misclassified )) return(p) } get_svm_cv_err <- function(data, svm_model, cost, k_cv, cv_seed=NA){ # returns the SVM cross validation error # use teh cv_seed to make sure folds stay the same across multiple usages n <- dim(data)[1] folds <- get_folds(n, k_cv, cv_seed) cv_errs <- rep(0, k_cv) for(k in 1:k_cv){ # split data into train/test sets data_cv_tr <- data[-folds[[k]], ] data_cv_tst <- data[folds[[k]], ] svmfit_cv <- svm_model(data_cv_tr, cost) cv_errs[k] <- mean(data_cv_tst[['y']] != predict(svmfit_cv, dplyr::select(data_cv_tst, -y))) } mean(cv_errs) } get_svm_parmas <- function(svmfit){ # returns the normal vector and intercept of SVM # svm_model a fit svm object from the e1071 package # returns a list with (w, b) where w is the nv and b is the intercept # returns the normal vector and intercept (w*x + b) w <- colSums(c(svmfit$coefs) * svmfit$SV) b <- svmfit$rho return(list(w=w, b=b)) } get_svm_data <- function(svmfit, data){ cost <- svmfit$cost svm_params <- get_svm_parmas(svmfit) w_svm <- svm_params['w'][[1]] b_svm <- svm_params['b'][[1]] # add alpha coefficients data_svm <- mutate(data, alpha=0, sup_vec=FALSE) data_svm[svmfit$index, 'alpha'] = svmfit$coefs # identify support vectors data_svm[svmfit$index, 'sup_vec'] = TRUE # decision function values data_svm['decision_val'] <- svmfit$decision.values # identify margin vectors i.e. abs(decision value) == 1 epsilon <- min(cost / 100, 1e-5) data_svm <- data_svm %>% mutate(margin_vec = abs(abs(decision_val) - 1) < epsilon) # mutate(margin_vec = near(abs(decision.values), 1)) # sort(abs(abs(data_svm$decision_val) - 1)) # identify slack vectors i.e. non-margin support vectors data_svm <- data_svm %>% mutate(slack_vec = sup_vec & !margin_vec) data_svm }
/notes/more_classification/svm_fun.R
no_license
francestong/stor390
R
false
false
3,082
r
library(e1071) make_svm_plot_2d <- function(data, C){ svmfit <- svm(y ~., data=data, scale=FALSE, type='C-classification', shrinking=FALSE, kernel='linear', cost=C) # get svm direction svm_params <- get_svm_parmas(svmfit) w_svm <- svm_params['w'][[1]] b_svm <- svm_params['b'][[1]] num_misclassified <- sum(svmfit$fitted != data$y) # plot svm p <- ggplot(data=data) + geom_point(aes(x=x1, y=x2, color=y, shape=y)) + geom_abline(slope=-w_svm[1]/w_svm[2], intercept = b_svm/w_svm[2]) + geom_abline(slope=-w_svm[1]/w_svm[2], intercept = (b_svm + 1)/w_svm[2], linetype = 2) + geom_abline(slope=-w_svm[1]/w_svm[2], intercept = (b_svm - 1)/w_svm[2], linetype = 2) + theme(panel.background = element_blank()) + ggtitle(paste0('SVM fit, C = ', format(C, digits=3), ', num misclassified = ', num_misclassified )) return(p) } get_svm_cv_err <- function(data, svm_model, cost, k_cv, cv_seed=NA){ # returns the SVM cross validation error # use teh cv_seed to make sure folds stay the same across multiple usages n <- dim(data)[1] folds <- get_folds(n, k_cv, cv_seed) cv_errs <- rep(0, k_cv) for(k in 1:k_cv){ # split data into train/test sets data_cv_tr <- data[-folds[[k]], ] data_cv_tst <- data[folds[[k]], ] svmfit_cv <- svm_model(data_cv_tr, cost) cv_errs[k] <- mean(data_cv_tst[['y']] != predict(svmfit_cv, dplyr::select(data_cv_tst, -y))) } mean(cv_errs) } get_svm_parmas <- function(svmfit){ # returns the normal vector and intercept of SVM # svm_model a fit svm object from the e1071 package # returns a list with (w, b) where w is the nv and b is the intercept # returns the normal vector and intercept (w*x + b) w <- colSums(c(svmfit$coefs) * svmfit$SV) b <- svmfit$rho return(list(w=w, b=b)) } get_svm_data <- function(svmfit, data){ cost <- svmfit$cost svm_params <- get_svm_parmas(svmfit) w_svm <- svm_params['w'][[1]] b_svm <- svm_params['b'][[1]] # add alpha coefficients data_svm <- mutate(data, alpha=0, sup_vec=FALSE) data_svm[svmfit$index, 'alpha'] = svmfit$coefs # identify support vectors data_svm[svmfit$index, 'sup_vec'] = TRUE # decision function values data_svm['decision_val'] <- svmfit$decision.values # identify margin vectors i.e. abs(decision value) == 1 epsilon <- min(cost / 100, 1e-5) data_svm <- data_svm %>% mutate(margin_vec = abs(abs(decision_val) - 1) < epsilon) # mutate(margin_vec = near(abs(decision.values), 1)) # sort(abs(abs(data_svm$decision_val) - 1)) # identify slack vectors i.e. non-margin support vectors data_svm <- data_svm %>% mutate(slack_vec = sup_vec & !margin_vec) data_svm }
library(fDMA) ### Name: print.altf2 ### Title: Prints 'altf2' Object. ### Aliases: print.altf2 ### ** Examples ## Not run: ##D wti <- crudeoil[-1,1] ##D drivers <- (lag(crudeoil[,-1],k=1))[-1,] ##D a <- altf2(y=wti,x=drivers) ##D ##D print(a) ## End(Not run)
/data/genthat_extracted_code/fDMA/examples/print.altf2.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
268
r
library(fDMA) ### Name: print.altf2 ### Title: Prints 'altf2' Object. ### Aliases: print.altf2 ### ** Examples ## Not run: ##D wti <- crudeoil[-1,1] ##D drivers <- (lag(crudeoil[,-1],k=1))[-1,] ##D a <- altf2(y=wti,x=drivers) ##D ##D print(a) ## End(Not run)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pickerOptions.R \name{pickerOptions} \alias{pickerOptions} \title{Options for `pickerInput`} \usage{ pickerOptions(actionsBox = NULL, container = NULL, countSelectedText = NULL, deselectAllText = NULL, dropdownAlignRight = NULL, dropupAuto = NULL, header = NULL, hideDisabled = NULL, iconBase = NULL, liveSearch = NULL, liveSearchNormalize = NULL, liveSearchPlaceholder = NULL, liveSearchStyle = NULL, maxOptions = NULL, maxOptionsText = NULL, mobile = NULL, multipleSeparator = NULL, noneSelectedText = NULL, noneResultsText = NULL, selectAllText = NULL, selectedTextFormat = NULL, selectOnTab = NULL, showContent = NULL, showIcon = NULL, showSubtext = NULL, showTick = NULL, size = NULL, style = NULL, tickIcon = NULL, title = NULL, virtualScroll = NULL, width = NULL, windowPadding = NULL) } \arguments{ \item{actionsBox}{When set to true, adds two buttons to the top of the dropdown menu (Select All & Deselect All). Type: boolean; Default: false.} \item{container}{When set to a string, appends the select to a specific element or selector, e.g., container: 'body' | '.main-body' Type: string | false; Default: false.} \item{countSelectedText}{Sets the format for the text displayed when selectedTextFormat is count or count > #. {0} is the selected amount. {1} is total available for selection. When set to a function, the first parameter is the number of selected options, and the second is the total number of options. The function must return a string. Type: string | function; Default: function.} \item{deselectAllText}{The text on the button that deselects all options when actionsBox is enabled. Type: string; Default: 'Deselect All'.} \item{dropdownAlignRight}{Align the menu to the right instead of the left. If set to 'auto', the menu will automatically align right if there isn't room for the menu's full width when aligned to the left. Type: boolean | 'auto'; Default: false.} \item{dropupAuto}{checks to see which has more room, above or below. If the dropup has enough room to fully open normally, but there is more room above, the dropup still opens normally. Otherwise, it becomes a dropup. If dropupAuto is set to false, dropups must be called manually. Type: boolean; Default: true.} \item{header}{adds a header to the top of the menu; includes a close button by default Type: string; Default: false.} \item{hideDisabled}{removes disabled options and optgroups from the menu data-hide-disabled: true Type: boolean; Default: false.} \item{iconBase}{Set the base to use a different icon font instead of Glyphicons. If changing iconBase, you might also want to change tickIcon, in case the new icon font uses a different naming scheme. Type: string; Default: 'glyphicon'.} \item{liveSearch}{When set to true, adds a search box to the top of the selectpicker dropdown. Type: boolean; Default: false.} \item{liveSearchNormalize}{Setting liveSearchNormalize to true allows for accent-insensitive searching. Type: boolean; Default: false.} \item{liveSearchPlaceholder}{When set to a string, a placeholder attribute equal to the string will be added to the liveSearch input. Type: string; Default: null.} \item{liveSearchStyle}{When set to 'contains', searching will reveal options that contain the searched text. For example, searching for pl with return both Apple, Plum, and Plantain. When set to 'startsWith', searching for pl will return only Plum and Plantain. Type: string; Default: 'contains'.} \item{maxOptions}{When set to an integer and in a multi-select, the number of selected options cannot exceed the given value. This option can also exist as a data-attribute for an <optgroup>, in which case it only applies to that <optgroup>. Type: integer | false; Default: false.} \item{maxOptionsText}{The text that is displayed when maxOptions is enabled and the maximum number of options for the given scenario have been selected. If a function is used, it must return an array. array[0] is the text used when maxOptions is applied to the entire select element. array[1] is the text used when maxOptions is used on an optgroup. If a string is used, the same text is used for both the element and the optgroup. Type: string | array | function; Default: function.} \item{mobile}{When set to true, enables the device's native menu for select menus. Type: boolean; Default: false.} \item{multipleSeparator}{Set the character displayed in the button that separates selected options. Type: string; Default: ', '.} \item{noneSelectedText}{The text that is displayed when a multiple select has no selected options. Type: string; Default: 'Nothing selected'.} \item{noneResultsText}{The text displayed when a search doesn't return any results. Type: string; Default: 'No results matched {0}'.} \item{selectAllText}{The text on the button that selects all options when actionsBox is enabled. Type: string; Default: 'Select All'.} \item{selectedTextFormat}{Specifies how the selection is displayed with a multiple select. 'values' displays a list of the selected options (separated by multipleSeparator. 'static' simply displays the select element's title. 'count' displays the total number of selected options. 'count > x' behaves like 'values' until the number of selected options is greater than x; after that, it behaves like 'count'. Type: 'values' | 'static' | 'count' | 'count > x' (where x is an integer); Default: 'values'.} \item{selectOnTab}{When set to true, treats the tab character like the enter or space characters within the selectpicker dropdown. Type: boolean; Default: false.} \item{showContent}{When set to true, display custom HTML associated with selected option(s) in the button. When set to false, the option value will be displayed instead. Type: boolean; Default: true.} \item{showIcon}{When set to true, display icon(s) associated with selected option(s) in the button. Type: boolean; Default: true.} \item{showSubtext}{When set to true, display subtext associated with a selected option in the button. Type: boolean; Default: false.} \item{showTick}{Show checkmark on selected option (for items without multiple attribute). Type: boolean; Default: false.} \item{size}{When set to 'auto', the menu always opens up to show as many items as the window will allow without being cut off. When set to an integer, the menu will show the given number of items, even if the dropdown is cut off. When set to false, the menu will always show all items. Type: 'auto' | integer | false; Default: 'auto'.} \item{style}{When set to a string, add the value to the button's style. Type: string | null; Default: null.} \item{tickIcon}{Set which icon to use to display as the "tick" next to selected options. Type: string; Default: 'glyphicon-ok'.} \item{title}{The default title for the selectpicker. Type: string | null; Default: null.} \item{virtualScroll}{If enabled, the items in the dropdown will be rendered using virtualization (i.e. only the items that are within the viewport will be rendered). This drastically improves performance for selects with a large number of options. Set to an integer to only use virtualization if the select has at least that number of options. Type: boolean | integer; Default: 600.} \item{width}{When set to auto, the width of the selectpicker is automatically adjusted to accommodate the widest option. When set to a css-width, the width of the selectpicker is forced inline to the given value. When set to false, all width information is removed. Type: 'auto' | 'fit' | css-width | false (where css-width is a CSS width with units, e.g. 100px); Default: false.} \item{windowPadding}{This is useful in cases where the window has areas that the dropdown menu should not cover - for instance a fixed header. When set to an integer, the same padding will be added to all sides. Alternatively, an array of integers can be used in the format [top, right, bottom, left]. Type: integer | array; Default: 0.} } \description{ Wrapper of options available here: \url{https://developer.snapappointments.com/bootstrap-select/options/} } \note{ Documentation is from Bootstrap-select page. } \examples{ if (interactive()) { library(shiny) library(shinyWidgets) ui <- fluidPage( pickerInput( inputId = "month", label = "Select a month", choices = month.name, multiple = TRUE, options = pickerOptions( actionsBox = TRUE, title = "Please select a month", header = "This is a title" ) ) ) server <- function(input, output, session) { } shinyApp(ui, server) } }
/man/pickerOptions.Rd
permissive
jcheng5/shinyWidgets
R
false
true
8,626
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pickerOptions.R \name{pickerOptions} \alias{pickerOptions} \title{Options for `pickerInput`} \usage{ pickerOptions(actionsBox = NULL, container = NULL, countSelectedText = NULL, deselectAllText = NULL, dropdownAlignRight = NULL, dropupAuto = NULL, header = NULL, hideDisabled = NULL, iconBase = NULL, liveSearch = NULL, liveSearchNormalize = NULL, liveSearchPlaceholder = NULL, liveSearchStyle = NULL, maxOptions = NULL, maxOptionsText = NULL, mobile = NULL, multipleSeparator = NULL, noneSelectedText = NULL, noneResultsText = NULL, selectAllText = NULL, selectedTextFormat = NULL, selectOnTab = NULL, showContent = NULL, showIcon = NULL, showSubtext = NULL, showTick = NULL, size = NULL, style = NULL, tickIcon = NULL, title = NULL, virtualScroll = NULL, width = NULL, windowPadding = NULL) } \arguments{ \item{actionsBox}{When set to true, adds two buttons to the top of the dropdown menu (Select All & Deselect All). Type: boolean; Default: false.} \item{container}{When set to a string, appends the select to a specific element or selector, e.g., container: 'body' | '.main-body' Type: string | false; Default: false.} \item{countSelectedText}{Sets the format for the text displayed when selectedTextFormat is count or count > #. {0} is the selected amount. {1} is total available for selection. When set to a function, the first parameter is the number of selected options, and the second is the total number of options. The function must return a string. Type: string | function; Default: function.} \item{deselectAllText}{The text on the button that deselects all options when actionsBox is enabled. Type: string; Default: 'Deselect All'.} \item{dropdownAlignRight}{Align the menu to the right instead of the left. If set to 'auto', the menu will automatically align right if there isn't room for the menu's full width when aligned to the left. Type: boolean | 'auto'; Default: false.} \item{dropupAuto}{checks to see which has more room, above or below. If the dropup has enough room to fully open normally, but there is more room above, the dropup still opens normally. Otherwise, it becomes a dropup. If dropupAuto is set to false, dropups must be called manually. Type: boolean; Default: true.} \item{header}{adds a header to the top of the menu; includes a close button by default Type: string; Default: false.} \item{hideDisabled}{removes disabled options and optgroups from the menu data-hide-disabled: true Type: boolean; Default: false.} \item{iconBase}{Set the base to use a different icon font instead of Glyphicons. If changing iconBase, you might also want to change tickIcon, in case the new icon font uses a different naming scheme. Type: string; Default: 'glyphicon'.} \item{liveSearch}{When set to true, adds a search box to the top of the selectpicker dropdown. Type: boolean; Default: false.} \item{liveSearchNormalize}{Setting liveSearchNormalize to true allows for accent-insensitive searching. Type: boolean; Default: false.} \item{liveSearchPlaceholder}{When set to a string, a placeholder attribute equal to the string will be added to the liveSearch input. Type: string; Default: null.} \item{liveSearchStyle}{When set to 'contains', searching will reveal options that contain the searched text. For example, searching for pl with return both Apple, Plum, and Plantain. When set to 'startsWith', searching for pl will return only Plum and Plantain. Type: string; Default: 'contains'.} \item{maxOptions}{When set to an integer and in a multi-select, the number of selected options cannot exceed the given value. This option can also exist as a data-attribute for an <optgroup>, in which case it only applies to that <optgroup>. Type: integer | false; Default: false.} \item{maxOptionsText}{The text that is displayed when maxOptions is enabled and the maximum number of options for the given scenario have been selected. If a function is used, it must return an array. array[0] is the text used when maxOptions is applied to the entire select element. array[1] is the text used when maxOptions is used on an optgroup. If a string is used, the same text is used for both the element and the optgroup. Type: string | array | function; Default: function.} \item{mobile}{When set to true, enables the device's native menu for select menus. Type: boolean; Default: false.} \item{multipleSeparator}{Set the character displayed in the button that separates selected options. Type: string; Default: ', '.} \item{noneSelectedText}{The text that is displayed when a multiple select has no selected options. Type: string; Default: 'Nothing selected'.} \item{noneResultsText}{The text displayed when a search doesn't return any results. Type: string; Default: 'No results matched {0}'.} \item{selectAllText}{The text on the button that selects all options when actionsBox is enabled. Type: string; Default: 'Select All'.} \item{selectedTextFormat}{Specifies how the selection is displayed with a multiple select. 'values' displays a list of the selected options (separated by multipleSeparator. 'static' simply displays the select element's title. 'count' displays the total number of selected options. 'count > x' behaves like 'values' until the number of selected options is greater than x; after that, it behaves like 'count'. Type: 'values' | 'static' | 'count' | 'count > x' (where x is an integer); Default: 'values'.} \item{selectOnTab}{When set to true, treats the tab character like the enter or space characters within the selectpicker dropdown. Type: boolean; Default: false.} \item{showContent}{When set to true, display custom HTML associated with selected option(s) in the button. When set to false, the option value will be displayed instead. Type: boolean; Default: true.} \item{showIcon}{When set to true, display icon(s) associated with selected option(s) in the button. Type: boolean; Default: true.} \item{showSubtext}{When set to true, display subtext associated with a selected option in the button. Type: boolean; Default: false.} \item{showTick}{Show checkmark on selected option (for items without multiple attribute). Type: boolean; Default: false.} \item{size}{When set to 'auto', the menu always opens up to show as many items as the window will allow without being cut off. When set to an integer, the menu will show the given number of items, even if the dropdown is cut off. When set to false, the menu will always show all items. Type: 'auto' | integer | false; Default: 'auto'.} \item{style}{When set to a string, add the value to the button's style. Type: string | null; Default: null.} \item{tickIcon}{Set which icon to use to display as the "tick" next to selected options. Type: string; Default: 'glyphicon-ok'.} \item{title}{The default title for the selectpicker. Type: string | null; Default: null.} \item{virtualScroll}{If enabled, the items in the dropdown will be rendered using virtualization (i.e. only the items that are within the viewport will be rendered). This drastically improves performance for selects with a large number of options. Set to an integer to only use virtualization if the select has at least that number of options. Type: boolean | integer; Default: 600.} \item{width}{When set to auto, the width of the selectpicker is automatically adjusted to accommodate the widest option. When set to a css-width, the width of the selectpicker is forced inline to the given value. When set to false, all width information is removed. Type: 'auto' | 'fit' | css-width | false (where css-width is a CSS width with units, e.g. 100px); Default: false.} \item{windowPadding}{This is useful in cases where the window has areas that the dropdown menu should not cover - for instance a fixed header. When set to an integer, the same padding will be added to all sides. Alternatively, an array of integers can be used in the format [top, right, bottom, left]. Type: integer | array; Default: 0.} } \description{ Wrapper of options available here: \url{https://developer.snapappointments.com/bootstrap-select/options/} } \note{ Documentation is from Bootstrap-select page. } \examples{ if (interactive()) { library(shiny) library(shinyWidgets) ui <- fluidPage( pickerInput( inputId = "month", label = "Select a month", choices = month.name, multiple = TRUE, options = pickerOptions( actionsBox = TRUE, title = "Please select a month", header = "This is a title" ) ) ) server <- function(input, output, session) { } shinyApp(ui, server) } }
# Load libraries ---------------------------------------------------------- library("RSQLite") library("lubridate") library("plyr") library("dplyr") library("ggplot2") # Data exploration -------------------------------------------------------- # Connect to db con <- dbConnect(drv="SQLite", "scrape/parkingTickets.db") # List tables and fields dbListTables(con) dbListFields(con, "tickets") # Get the parking tickets database into memory pt <- dbReadTable(con, "tickets") # Structure str(pt) names(pt) # How big? dim(pt) head(pt) tail(pt) # Data processing --------------------------------------------------------- # Create a new datetime variable pt$datetime <- ymd_hm(paste(pt$date, pt$time)) # Convert date into actual dates pt$date <- ymd(pt$date) # Parse meter number pt$meter_no[which(pt$meter_no == pt$meter_no[1])] <- NA_character_ # Some plots -------------------------------------------------------------- # Dates pt_noByDay <- ddply(pt, .(date), summarize, year = year(unique(date)), month = month(unique(date)), wday = wday(unique(date), label = TRUE), total = length(date)) ggplot(pt_noByDay) + geom_line(aes(x=date, y=total)) + facet_wrap(~ year, scales = "free_x", nrow = 5) ggplot(pt_noByDay) + geom_bar(aes(x=month, y=total), stat = "identity") + facet_wrap(~ year, scales = "free_x") ggplot(pt_noByDay) + geom_bar(aes(x=wday, y=total), stat = "identity") + facet_wrap(~ year, scales = "free_x") # Some summaries of data
/explore/explore.R
no_license
jonnybaik/vancouver_parking_tickets
R
false
false
1,549
r
# Load libraries ---------------------------------------------------------- library("RSQLite") library("lubridate") library("plyr") library("dplyr") library("ggplot2") # Data exploration -------------------------------------------------------- # Connect to db con <- dbConnect(drv="SQLite", "scrape/parkingTickets.db") # List tables and fields dbListTables(con) dbListFields(con, "tickets") # Get the parking tickets database into memory pt <- dbReadTable(con, "tickets") # Structure str(pt) names(pt) # How big? dim(pt) head(pt) tail(pt) # Data processing --------------------------------------------------------- # Create a new datetime variable pt$datetime <- ymd_hm(paste(pt$date, pt$time)) # Convert date into actual dates pt$date <- ymd(pt$date) # Parse meter number pt$meter_no[which(pt$meter_no == pt$meter_no[1])] <- NA_character_ # Some plots -------------------------------------------------------------- # Dates pt_noByDay <- ddply(pt, .(date), summarize, year = year(unique(date)), month = month(unique(date)), wday = wday(unique(date), label = TRUE), total = length(date)) ggplot(pt_noByDay) + geom_line(aes(x=date, y=total)) + facet_wrap(~ year, scales = "free_x", nrow = 5) ggplot(pt_noByDay) + geom_bar(aes(x=month, y=total), stat = "identity") + facet_wrap(~ year, scales = "free_x") ggplot(pt_noByDay) + geom_bar(aes(x=wday, y=total), stat = "identity") + facet_wrap(~ year, scales = "free_x") # Some summaries of data
.put.veg.module <- function(getveg.id, bety, input_veg, pfts, outfolder, dir, machine, model, start_date, end_date, new_site, host, overwrite){ #--------------------------------------------------------------------------------------------------# # Write model specific IC files bety <- dplyr::src_postgres(dbname = dbparms$bety$dbname, host = dbparms$bety$host, user = dbparms$bety$user, password = dbparms$bety$password) con <- bety$con # Determine IC file format name and mimetype model_info <- db.query(paste0("SELECT f.name, f.id, mt.type_string from modeltypes as m", " join modeltypes_formats as mf on m.id = mf.modeltype_id", " join formats as f on mf.format_id = f.id", " join mimetypes as mt on f.mimetype_id = mt.id", " where m.name = '", model, "' AND mf.tag='", input_veg$output,"'"), con) logger.info("Begin Model Specific Conversion") formatname <- model_info[1] mimetype <- model_info[3] spp.file <- db.query(paste("SELECT * from dbfiles where container_id =", getveg.id), con) pkg <- "PEcAn.data.land" fcn <- "write_ic" putveg.id <- convert.input(input.id = getveg.id, outfolder = outfolder, formatname = formatname, mimetype = mimetype, site.id = new_site$id, start_date = start_date, end_date = end_date, pkg = pkg, fcn = fcn, con = con, host = host, browndog = NULL, write = TRUE, overwrite = overwrite, # fcn specific args in.path = spp.file$file_path, in.name = spp.file$file_name, model = model, new_site = new_site, pfts = pfts, source = input_veg$source) return(putveg.id) }
/modules/data.land/R/put.veg.module.R
permissive
serbinsh/pecan
R
false
false
2,384
r
.put.veg.module <- function(getveg.id, bety, input_veg, pfts, outfolder, dir, machine, model, start_date, end_date, new_site, host, overwrite){ #--------------------------------------------------------------------------------------------------# # Write model specific IC files bety <- dplyr::src_postgres(dbname = dbparms$bety$dbname, host = dbparms$bety$host, user = dbparms$bety$user, password = dbparms$bety$password) con <- bety$con # Determine IC file format name and mimetype model_info <- db.query(paste0("SELECT f.name, f.id, mt.type_string from modeltypes as m", " join modeltypes_formats as mf on m.id = mf.modeltype_id", " join formats as f on mf.format_id = f.id", " join mimetypes as mt on f.mimetype_id = mt.id", " where m.name = '", model, "' AND mf.tag='", input_veg$output,"'"), con) logger.info("Begin Model Specific Conversion") formatname <- model_info[1] mimetype <- model_info[3] spp.file <- db.query(paste("SELECT * from dbfiles where container_id =", getveg.id), con) pkg <- "PEcAn.data.land" fcn <- "write_ic" putveg.id <- convert.input(input.id = getveg.id, outfolder = outfolder, formatname = formatname, mimetype = mimetype, site.id = new_site$id, start_date = start_date, end_date = end_date, pkg = pkg, fcn = fcn, con = con, host = host, browndog = NULL, write = TRUE, overwrite = overwrite, # fcn specific args in.path = spp.file$file_path, in.name = spp.file$file_name, model = model, new_site = new_site, pfts = pfts, source = input_veg$source) return(putveg.id) }
#' Performs linear regression. #' #' This is truly a great and much-needed function #' #' @param Y vector of outcomes. #' @param covData list of subjects (data corresponding to covariates) #' @return Coefficients \code{coeff} and p-values \code{pval}. #' @export #' @examples #' myLinearRegression(y,covData) myLinearRegression <- function(Y , covData){ if(ncol(covData) < 6 ){ # one column to account for Y p <- GGally::ggpairs(covData, columns = 2:ncol(covData)) print(p) }else print("Too many variables to plot") LR <- lm(Y ~ . , data = covData) LS <- summary(LR) pval <- LS$coefficients[ ,4] pcoef <- LR$coefficients return(list ("coefficients" = cbind(pcoef), "p-values" = cbind(pval))) }
/R/myLinearRegression.R
no_license
himanshu11305/LR
R
false
false
747
r
#' Performs linear regression. #' #' This is truly a great and much-needed function #' #' @param Y vector of outcomes. #' @param covData list of subjects (data corresponding to covariates) #' @return Coefficients \code{coeff} and p-values \code{pval}. #' @export #' @examples #' myLinearRegression(y,covData) myLinearRegression <- function(Y , covData){ if(ncol(covData) < 6 ){ # one column to account for Y p <- GGally::ggpairs(covData, columns = 2:ncol(covData)) print(p) }else print("Too many variables to plot") LR <- lm(Y ~ . , data = covData) LS <- summary(LR) pval <- LS$coefficients[ ,4] pcoef <- LR$coefficients return(list ("coefficients" = cbind(pcoef), "p-values" = cbind(pval))) }
rc<-ranicafe$Coffees weight<-(1/length(rc)) sum(rc*weight) mean(rc) rc<-c(rc, NA) tail(rc, n=5) mean(rc) mean(rc, na.rm=TRUE)
/Chapter02/source/2_5.R
no_license
qlrepdlxj/R
R
false
false
126
r
rc<-ranicafe$Coffees weight<-(1/length(rc)) sum(rc*weight) mean(rc) rc<-c(rc, NA) tail(rc, n=5) mean(rc) mean(rc, na.rm=TRUE)
library(data.tree) mydata = read.table("mushrooms.csv", header=TRUE, sep=",") calEntropy = function(dataset){ posits = sum(dataset[,"class"] == "e") negs = sum(dataset[,"class"] == "p") t = posits + negs if (posits == 0){ entropy = (-(negs/t)*log2(negs/t)) } else if (negs == 0){ entropy = (-(posits/t)*log2(posits/t)) } else{ entropy = (-(posits/t)*log2(posits/t)) + (-(negs/t)*log2(negs/t)) } #entropy = (-(posits/t)*log2(posits/t)) + (-(negs/t)*log2(negs/t)) return (entropy) } calcGain = function(col_name, dataset){ ent = calEntropy(dataset) #print("parent Entropy") #print(ent) #cat = levels(as.factor(dataset[,col_name])) cat = as.character(unique(dataset[,col_name])) #print(length(cat)) children_entropy = 0 for(i in 1 : length(cat)){ L = dataset[,col_name] == cat[i] #print(L) cDataSet = dataset[L,] cDataSet = subset(cDataSet, select = -eval(parse(text = col_name))) x = (nrow(cDataSet)/nrow(dataset))*calEntropy(cDataSet) #print(x) children_entropy = children_entropy + x #(nrow(cDataSet)/nrow(dataset))*calEntropy(cDataSet) #stopp } #print(children_entropy) gain = ent - children_entropy # print(gain) # print(ent) return (gain) } selectCategory = function(dataset){ #catName = '' cols = colnames(dataset)[2:ncol(dataset)]#, select = -class) gain = -99 c = ncol(dataset)-1 for(i in 1 : c){ # print('---------------------') # print(cols[i]) tempGain = calcGain(cols[i], dataset) # print(tempGain) # print('---------------------') if(gain < tempGain){ catName = cols[i] gain = tempGain } #gain = max(gain, calcGain(colnames(dataset)[i], dataset)) } return(catName) } constructTree = function(parentNode, parent_cat_val, col_name, dataset){ L = dataset[,col_name] == parent_cat_val if(length(unique(dataset[L,'class'])) == 1){ nodename = paste(parent_cat_val, ':y=', as.character(unique(dataset[L,'class'])), sep = '') #pure split will give the asnwer as y:poisonous or edible parentNode$AddChild(nodename) }else{ newDataSet = subset(dataset[L,], select = -eval(parse(text = col_name))) select_col = selectCategory(newDataSet); #print(select_col) nodename = paste(parent_cat_val, select_col, sep=':') child = parentNode$AddChild(nodename) #catLevels = levels(as.factor(newDataSet[,select_col])) catLevels = as.character(unique(newDataSet[,select_col])) for(i in 1 : length(catLevels)){ constructTree(child, catLevels[i], select_col, newDataSet); } } } smp_size = floor(0.8 * nrow(mydata)) train_ind = sample(seq_len(nrow(mydata)), size = smp_size) traindata = mydata[train_ind,] testinds = sample(seq_len(nrow(mydata)), size = nrow(mydata) - nrow(traindata) + 10) testdata = mydata[testinds,] colnames(traindata) = colnames(mydata) colnames(testdata) = colnames(mydata) col_name = selectCategory(traindata); dt <- Node$new(col_name) catLevels = as.character(unique(traindata[,col_name])) #catLevels = c('n') for(i in 1:length(catLevels)){ constructTree(dt, catLevels[i], col_name, traindata) } #print(dt) answers = c() classname = function(query, dtree){ children = dtree$children for(i in 1:length(children)){ child_lvl_name = children[[i]]$levelName # print ("child lvl name"); print(child_lvl_name) child_lvl_name = sub('^\\s+.*--', '', child_lvl_name, perl = TRUE) # print(child_lvl_name) cname = "" if(dtree$isRoot == TRUE){ # print("this is root") cname = dtree$levelName; }else{ # print("I ama not root") cname = unlist(strsplit(dtree$levelName, ":"))[2] } cname_value = as.character(query[1,cname]) #cname_value = paste(cname_value, ':', sep='') # print(child_lvl_name) # print(cname_value) #grepVal = grep(cname_value, c(child_lvl_name), perl=TRUE, value=FALSE) grepVal = unlist(strsplit(child_lvl_name, ":"))[1] # print(grepVal) #cname2 = sub('^\\s+.*--', '', child_lvl_name, perl = TRUE) #if(grep(cname_value, child_lvl_name, perl=TRUE, value=TRUE) == child_lvl_name){ if(grepVal == cname_value){ # print ("got it") if(children[[i]]$isLeaf == TRUE){ # print ("this is leaf") class = sub('.*=', '', child_lvl_name, perl= TRUE) return(class) } else{ # print("Not a leaf") # print(i) return(classname(query, children[[i]])) } } else{ # print ("continue") #continue next; } } } # colnames(query) = colnames(testdata) # query = mydata[2000,] # class = classname(query,dt) test = function(testdata, dt){ for (i in 1:nrow(testdata)){ query = testdata[i,] colnames(query) = colnames(testdata) answers = c(answers, classname(query, dt)) } return(answers) } answers = c() answers = test(testdata, dt) accuracy = sum(answers == testdata[,'class'])*100/nrow(testdata) accuracy ############################################################################################################ mush = read.table("mushrooms.csv", header=TRUE, sep=",") samp_size = floor(0.8 * nrow(mush)) tr_inds = sample(seq_len(nrow(mush)), size = samp_size) mushtrain = mush[tr_inds,] test_inds = sample(seq_len(nrow(mush)), size = nrow(mush) - nrow(mushtrain)) mushtest = mush[test_inds,] mushtest = as.data.frame(mushtest) mushtest[,"class"] = NA colnames(mushtest) = colnames(mushtrain) p = mushtrain[which(mushtrain[,'class'] == 'p'),] e = mushtrain[which(mushtrain[,'class'] == 'e'),] p = as.data.frame(p) e = as.data.frame(e) prior = list(p = nrow(p)/nrow(mushtrain), e = nrow(e)/nrow(mushtrain)) probs = list(p = prior[['p']], e = prior[['e']]) mushtest = data.frame(lapply(mushtest, as.character), stringsAsFactors=FALSE) for (i in 1:nrow(mushtest)){ point = mushtest[i,] #print(point) #print(typeof(point)) probs = list(p = prior[['p']], e = prior[['e']]) for (j in 2:(ncol(mushtest))){ probs[['p']] = probs[['p']] * sum(p[,j] == point[1,j])/nrow(p) probs[['e']] = probs[['e']] * sum(e[,j] == point[1,j])/nrow(e) } if (probs[['p']] > probs[['e']]){ mushtest[i,'class'] = 'p' } else{ mushtest[i,'class'] = 'e' } } #print(mushtest) accuracy2 = sum(mushtest[,'class'] == mush[test_inds,'class'])/nrow(mushtest) * 100 #print(accuracy2) print(accuracy) print(accuracy2) #plotter = c(plotter, accuracy2 - accuracy) #print(dt) to see the decision tree
/ProjectDT.R
no_license
manism63/DecisionTree
R
false
false
6,458
r
library(data.tree) mydata = read.table("mushrooms.csv", header=TRUE, sep=",") calEntropy = function(dataset){ posits = sum(dataset[,"class"] == "e") negs = sum(dataset[,"class"] == "p") t = posits + negs if (posits == 0){ entropy = (-(negs/t)*log2(negs/t)) } else if (negs == 0){ entropy = (-(posits/t)*log2(posits/t)) } else{ entropy = (-(posits/t)*log2(posits/t)) + (-(negs/t)*log2(negs/t)) } #entropy = (-(posits/t)*log2(posits/t)) + (-(negs/t)*log2(negs/t)) return (entropy) } calcGain = function(col_name, dataset){ ent = calEntropy(dataset) #print("parent Entropy") #print(ent) #cat = levels(as.factor(dataset[,col_name])) cat = as.character(unique(dataset[,col_name])) #print(length(cat)) children_entropy = 0 for(i in 1 : length(cat)){ L = dataset[,col_name] == cat[i] #print(L) cDataSet = dataset[L,] cDataSet = subset(cDataSet, select = -eval(parse(text = col_name))) x = (nrow(cDataSet)/nrow(dataset))*calEntropy(cDataSet) #print(x) children_entropy = children_entropy + x #(nrow(cDataSet)/nrow(dataset))*calEntropy(cDataSet) #stopp } #print(children_entropy) gain = ent - children_entropy # print(gain) # print(ent) return (gain) } selectCategory = function(dataset){ #catName = '' cols = colnames(dataset)[2:ncol(dataset)]#, select = -class) gain = -99 c = ncol(dataset)-1 for(i in 1 : c){ # print('---------------------') # print(cols[i]) tempGain = calcGain(cols[i], dataset) # print(tempGain) # print('---------------------') if(gain < tempGain){ catName = cols[i] gain = tempGain } #gain = max(gain, calcGain(colnames(dataset)[i], dataset)) } return(catName) } constructTree = function(parentNode, parent_cat_val, col_name, dataset){ L = dataset[,col_name] == parent_cat_val if(length(unique(dataset[L,'class'])) == 1){ nodename = paste(parent_cat_val, ':y=', as.character(unique(dataset[L,'class'])), sep = '') #pure split will give the asnwer as y:poisonous or edible parentNode$AddChild(nodename) }else{ newDataSet = subset(dataset[L,], select = -eval(parse(text = col_name))) select_col = selectCategory(newDataSet); #print(select_col) nodename = paste(parent_cat_val, select_col, sep=':') child = parentNode$AddChild(nodename) #catLevels = levels(as.factor(newDataSet[,select_col])) catLevels = as.character(unique(newDataSet[,select_col])) for(i in 1 : length(catLevels)){ constructTree(child, catLevels[i], select_col, newDataSet); } } } smp_size = floor(0.8 * nrow(mydata)) train_ind = sample(seq_len(nrow(mydata)), size = smp_size) traindata = mydata[train_ind,] testinds = sample(seq_len(nrow(mydata)), size = nrow(mydata) - nrow(traindata) + 10) testdata = mydata[testinds,] colnames(traindata) = colnames(mydata) colnames(testdata) = colnames(mydata) col_name = selectCategory(traindata); dt <- Node$new(col_name) catLevels = as.character(unique(traindata[,col_name])) #catLevels = c('n') for(i in 1:length(catLevels)){ constructTree(dt, catLevels[i], col_name, traindata) } #print(dt) answers = c() classname = function(query, dtree){ children = dtree$children for(i in 1:length(children)){ child_lvl_name = children[[i]]$levelName # print ("child lvl name"); print(child_lvl_name) child_lvl_name = sub('^\\s+.*--', '', child_lvl_name, perl = TRUE) # print(child_lvl_name) cname = "" if(dtree$isRoot == TRUE){ # print("this is root") cname = dtree$levelName; }else{ # print("I ama not root") cname = unlist(strsplit(dtree$levelName, ":"))[2] } cname_value = as.character(query[1,cname]) #cname_value = paste(cname_value, ':', sep='') # print(child_lvl_name) # print(cname_value) #grepVal = grep(cname_value, c(child_lvl_name), perl=TRUE, value=FALSE) grepVal = unlist(strsplit(child_lvl_name, ":"))[1] # print(grepVal) #cname2 = sub('^\\s+.*--', '', child_lvl_name, perl = TRUE) #if(grep(cname_value, child_lvl_name, perl=TRUE, value=TRUE) == child_lvl_name){ if(grepVal == cname_value){ # print ("got it") if(children[[i]]$isLeaf == TRUE){ # print ("this is leaf") class = sub('.*=', '', child_lvl_name, perl= TRUE) return(class) } else{ # print("Not a leaf") # print(i) return(classname(query, children[[i]])) } } else{ # print ("continue") #continue next; } } } # colnames(query) = colnames(testdata) # query = mydata[2000,] # class = classname(query,dt) test = function(testdata, dt){ for (i in 1:nrow(testdata)){ query = testdata[i,] colnames(query) = colnames(testdata) answers = c(answers, classname(query, dt)) } return(answers) } answers = c() answers = test(testdata, dt) accuracy = sum(answers == testdata[,'class'])*100/nrow(testdata) accuracy ############################################################################################################ mush = read.table("mushrooms.csv", header=TRUE, sep=",") samp_size = floor(0.8 * nrow(mush)) tr_inds = sample(seq_len(nrow(mush)), size = samp_size) mushtrain = mush[tr_inds,] test_inds = sample(seq_len(nrow(mush)), size = nrow(mush) - nrow(mushtrain)) mushtest = mush[test_inds,] mushtest = as.data.frame(mushtest) mushtest[,"class"] = NA colnames(mushtest) = colnames(mushtrain) p = mushtrain[which(mushtrain[,'class'] == 'p'),] e = mushtrain[which(mushtrain[,'class'] == 'e'),] p = as.data.frame(p) e = as.data.frame(e) prior = list(p = nrow(p)/nrow(mushtrain), e = nrow(e)/nrow(mushtrain)) probs = list(p = prior[['p']], e = prior[['e']]) mushtest = data.frame(lapply(mushtest, as.character), stringsAsFactors=FALSE) for (i in 1:nrow(mushtest)){ point = mushtest[i,] #print(point) #print(typeof(point)) probs = list(p = prior[['p']], e = prior[['e']]) for (j in 2:(ncol(mushtest))){ probs[['p']] = probs[['p']] * sum(p[,j] == point[1,j])/nrow(p) probs[['e']] = probs[['e']] * sum(e[,j] == point[1,j])/nrow(e) } if (probs[['p']] > probs[['e']]){ mushtest[i,'class'] = 'p' } else{ mushtest[i,'class'] = 'e' } } #print(mushtest) accuracy2 = sum(mushtest[,'class'] == mush[test_inds,'class'])/nrow(mushtest) * 100 #print(accuracy2) print(accuracy) print(accuracy2) #plotter = c(plotter, accuracy2 - accuracy) #print(dt) to see the decision tree
#logging log <- file(snakemake@log[[1]], open="wt") sink(log, append=TRUE) sink(log, append=TRUE, type="message") # Restore output to console #sink() #sink(type="message") #backup R-based installation if conda didn't work or wasn't used #we check if packages are installed first #fix for: libicui18n.so.68: cannot open shared object file #reinstall of stringi connects libraries correctly #install.packages("stringi", repos="http://cran.us.r-project.org") # list of cran packages #cran_packages = c("readr", "plyr", "dplyr", "stringr", "tidyr", "tibble", "reshape2", "foreach", "doParallel") # load or install&load all #package.check <- lapply( # cran_packages, # FUN = function(x) { # if (!require(x, character.only = TRUE)) { # install.packages(x, dependencies = TRUE, repos = "http://cran.us.r-project.org") # library(x, character.only = TRUE) # } # } #) #load libraries library(readr) library(plyr) library(dplyr) library(stringr) library(tidyr) library(tibble) library(reshape2) library(foreach) library(doParallel) message("Acquiring hypothesis variables:") num = snakemake@params[["num"]] name = snakemake@params[["name"]] #threads info for parallel FORK cluster threads = as.numeric(snakemake@threads) # define the number of additional best blast hits to include in the follow-up analyses add_OGs = snakemake@params[["add_OGs"]] # define the minimum expansion factor & expansion difference to call an expansion (part of hypothesis.tsv) # + minimum number of genes of expanded species to even consider an OG expansion_factor = as.numeric(snakemake@params[["expansion_factor"]]) expansion_difference = as.numeric(snakemake@params[["expansion_difference"]]) expanded_genes_min = as.numeric(snakemake@params[["expanded_genes_min"]]) # get user choice whether to perform ploidy normalization or not ploidy_normalization = as.character(snakemake@params[["ploidy_normalization"]]) # get ploidy information of each species (supplied by user; part of species.tsv) species_table <- read.delim("config/species.tsv", header = TRUE, sep = "\t", row.names = "species") # using snakemake propagation + python strsplit() is really cool since the input is just a vector ## even if we have multiple species in expanded, compared or both ;D expanded_in = snakemake@params[["expansion"]] compared_to = snakemake@params[["comparison"]] c_t_species <- compared_to all_species <- unique(c(expanded_in, compared_to)) # defining: # at least Nmin_expanded_in expanded species that are expanded in at least Nmin_compared_to compared_to species Nmin_expanded_in <- as.numeric(snakemake@params[["Nmin_expanded_in"]]) Nmin_compared_to <- as.numeric(snakemake@params[["Nmin_compared_to"]]) # define whether or not only HOGs should be considered that have at least 1 gene from each expanded_in/compared_to species expanded_in_all_found <- as.character(snakemake@params[["expanded_in_all_found"]]) compared_to_all_found <- as.character(snakemake@params[["compared_to_all_found"]]) #read-in Orthogroup-GeneCounts-table message("Reading in Orthogroup-GeneCounts-table:") OG.GC <- readr::read_tsv("orthofinder/final-results/Orthogroups/Orthogroups.GeneCount.tsv") #concatenate all BLAST seaches to one file and create parseable tibble with read_delim #could add that only BLAST searches of our species in this hypothesis are used - toDO for later #easiest and efficient way is to bind outside of the loop #do.call is a nice way rbind(bind_rows[[1]], bind_rows[[2]], ...) message("Concatenating all BLAST seaches to one file and creating parseable tibble:") datalist = list() for (i in Sys.glob("orthofinder/search/*.txt")) { datalist[[i]] <- readr::read_delim(file = i, delim = "\t", col_names = c( "qseqid", "sseqid", "pident", "length", "mismatch", "gapopen", "qstart", "qend", "sstart", "send", "evalue", "bitscore"), col_types = c( qseqid = col_character(), sseqid = col_character(), pident = col_double(), length = col_double(), mismatch = col_double(), gapopen = col_double(), qstart = col_double(), qend = col_double(), sstart = col_double(), send = col_double(), evalue = col_double(), bitscore = col_double() ) ) } all_BLAST <- base::do.call(bind_rows, datalist) #read-in the conversion table: orthofinder sequence IDs and corresponding actual gene/protein names #with "str_remove_all" I also easily get rid of the ":" message("Reading in conversion table based on SequenceIDs.txt:") conversion <- readr::read_delim(file = "orthofinder/SequenceIDs.txt", delim = " ", col_names = c("seqid", "name"), col_types = c( seqid = col_character(), name = col_character()) ) conversion <- conversion %>% dplyr::mutate(seqid = str_remove_all(seqid, ":")) message("Creating reformatted output with actual gene/protein names:") #create new columns with the actual gene/protein names all_BLAST_reformatted <- all_BLAST %>% dplyr::left_join(conversion, by = c("qseqid" = "seqid")) all_BLAST_reformatted <- all_BLAST_reformatted %>% dplyr::left_join(conversion, by = c("sseqid" = "seqid")) #position after qseqid and sseqid respectively all_BLAST_reformatted <- all_BLAST_reformatted %>% dplyr::relocate(name.x, .after = qseqid) all_BLAST_reformatted <- all_BLAST_reformatted %>% dplyr::relocate(name.y, .after = sseqid) #rename to qseqid/sseqid_name all_BLAST_reformatted <- dplyr::rename(all_BLAST_reformatted, qseqid_name = name.x) all_BLAST_reformatted <- dplyr::rename(all_BLAST_reformatted, sseqid_name = name.y) message("Data read-in and reformat:") ph_orthogroups <- readr::read_delim(file = "orthofinder/final-results/Phylogenetic_Hierarchical_Orthogroups/N0.tsv", delim = "\t") #create dataframe with numbers of genes per PHOG #we combine expanded_in and compared_to vectors to easily compute for everything we need HOG_df <- setNames(base::data.frame(matrix(ncol = 1 + length(all_species), nrow = 0)), c("HOG", all_species)) for (i in 1:nrow(ph_orthogroups)) { row = 0 #print(ph_orthogroups[i,]$HOG) row <- c(ph_orthogroups[i,]$HOG) for (j in c(all_species)) { if (is.na(ph_orthogroups[i,][[j]])) { test = 0 row <- c(row, test) } else { test = length(unlist(strsplit(ph_orthogroups[i,][[j]], ","))) row <- c(row, test) } } HOG_df[i,] <- row } HOG_tibble <- as_tibble(HOG_df) for (k in 1:length(all_species)) { o = all_species[k] HOG_tibble[[o]] <- as.numeric(HOG_tibble[[o]]) } #create "copy" as HOG_tibble is currently modified in the upcoming step HOG_tibble_complete <- HOG_tibble #here, we apply the expansion rule/s - could be also be parsed from config? (toDO!) #get() function solves my problems of using the character functions inside dplyr message("Applying expansion rule/s per hypothesis:") # function has to reduce input dataset; this way each iteration (compared species) is included # for all compared species first check for each HOG if expanded species is >= #* # then keep rows in which expanded species has at least # (default: 2) for (e in expanded_in) { # create expansion counter column for each expanded species exp_species_name = paste0("exp_counter_", e) HOG_tibble <- HOG_tibble %>% tibble::add_column(!!(exp_species_name) := 0) if (ploidy_normalization == "YES") { for (c in c_t_species) { HOG_tibble <- HOG_tibble %>% mutate(!!(exp_species_name) := dplyr::case_when( # although cases in which the ct species has no genes are ignored via # the multiplication in the 2nd step, this underlines that we do so # now with the second expansion distance criterium we should have it anyway get(c) > 0 & get(e)/species_table[e, "ploidy"]*2 >= expansion_factor*(get(c))/species_table[c, "ploidy"]*2 ~ get(exp_species_name) + 1, get(c) > 0 & get(e)/species_table[e, "ploidy"]*2 >= expansion_difference + (get(c))/species_table[c, "ploidy"]*2 ~ get(exp_species_name) + 1, TRUE ~ 0, ) ) } } #if the user did not set ploidy_normalization to "YES" #here, no ploidy information is used to divide the number of genes per OG else { for (c in c_t_species) { HOG_tibble <- HOG_tibble %>% mutate(!!(exp_species_name) := dplyr::case_when( get(c) > 0 & get(e) >= expansion_factor*(get(c)) ~ get(exp_species_name) + 1, get(c) > 0 & get(e) >= expansion_difference + get(c) ~ get(exp_species_name) + 1, TRUE ~ 0, ) ) } } } # then we perform summing over all exp_counter_ columns based on user choices # at least Nmin_expanded_in expanded species that are expanded in at least Nmin_compared_to compared_to species # create new column # for each exp_counter_* species check if value >= y # all passing exp_counter_* species are counted; if sum >= x retain HOG HOG_tibble <- HOG_tibble %>% mutate(pass = # selecting columns that we want to use use for subsequent function select(., contains("exp_counter_")) %>% # use pmap on this subset to get a vector of min from each row # dataframe is a list so pmap works on each element of the list; here, each row # we sum the occurences (per row!) of exp_counter_* cols that greater/equal to user cutoff purrr::pmap_dbl(., ~ sum(c(...) >= Nmin_compared_to)) ) # lastly, retain rows/HOGs that pass x cutoff = number of expanded species # & drop unnecessary columns HOG_tibble <- HOG_tibble %>% filter(pass >= Nmin_expanded_in) %>% select(-contains("exp_counter_"), -pass) # additional optional filter1 - expanded_in gene family complete criterium if (expanded_in_all_found == "YES") { HOG_tibble <- HOG_tibble %>% # create column with per-row sum of expanded_in species that have at least 1 gene in the HOG mutate(expanded_in_pass = select(., contains(expanded_in)) %>% purrr::pmap_dbl(., ~ sum(c(...) >= 1)) ) %>% # only keep rows/HOGs in which at least 1 gene of each expanded_in species occurs filter(expanded_in_pass >= length(expanded_in)) %>% # remove expanded_in_pass column select(-expanded_in_pass) } # additional optional filter2 - compared_to gene family complete criterium if (compared_to_all_found == "YES") { HOG_tibble <- HOG_tibble %>% # create column with per-row sum of compared_to species that have at least 1 gene in the HOG mutate(compared_to_pass = select(., contains(compared_to)) %>% purrr::pmap_dbl(., ~ sum(c(...) >= 1)) ) %>% # only keep rows/HOGs in which at least 1 gene of each compared_to species occurs filter(compared_to_pass >= length(compared_to)) %>% # remove compared_to_pass column select(-compared_to_pass) } # optional hard filter for at least # genes in all expanded species # this is useful in difficult ploidy cases and solves downstream issues in small OGs HOG_tibble <- HOG_tibble %>% filter(if_all(contains(expanded_in), ~ . >= expanded_genes_min)) # new object: expanded_HOGs expanded_HOGs <- HOG_tibble # based on filtering criteria create per hypothesis table with: # all HOGs and gene counts + additional column expansion (yes/no) # this is later used in the creation of final tea outputs to create a HOG level table per hypothesis #need to check if there is at least one expanded (H)OG #if not replace_na woud throw error since we are changing types #all of the downstream stuff in this script only makes sense if we in fact do have expanded groups if (nrow(expanded_HOGs) > 0) { #create expansion vector with length of expandsion HOGs expansion <- replicate(nrow(expanded_HOGs), "yes") #print(expansion) expansion_tibble <- full_join(HOG_tibble_complete, tibble::add_column(expanded_HOGs, expansion, .after = "HOG"), by = c("HOG"), suffix = c("", ".remove")) %>% tidyr::replace_na(list(expansion = "no")) %>% select(-c(ends_with(".remove"))) dir.create(paste("tea/", num, "/expansion_tibble/", sep = "")) saveRDS(expansion_tibble, paste("tea/", num, "/expansion_tibble/expansion_tibble.rds", sep = "")) # create genes column in ph_orthogroups file # row merge? - no unite function, really handy ;D ref_ph_orthogroups <- ph_orthogroups %>% tidyr::unite("genes", all_of(all_species), sep =", ", na.rm = TRUE, remove = TRUE) message("Creating .txt files for all expanded OGs with reciprocal best BLAST hits of species in respective hypothesis:") #for each gene/protein name in an interesting OG do (is there a file just with names per OG?): #check "all_BLAST_reformatted" for all entries including these names and create new dataframe/tibble # then, perform filtering and retain certain set of genes/proteins per OG analysis # then, create .txt file per OG with these gene names ## come-up with filter criteria to have better trees? ## I could of course just keep everything and save the evalues, etc.; well, problem for later.. ;D ####> output for snakemake? what about inidividual OG txt files, because starting here parallelisation can really impact dir.create(paste("tea/", num, "/expansion_cp_target_OGs/", sep = "")) ## define custom class for extended blast hits # need a list object to hold all data of this class extended_BLAST_hits <- list() # class for extended BLAST hits info setClass("extended_BLAST_hits", slots=list(blast_table="tbl_df") ) for (i in expanded_HOGs$HOG) { exp_og_genes <- unlist(strsplit(ref_ph_orthogroups[ref_ph_orthogroups$HOG == i,]$genes, split = ", ")) BLAST_hits_exp_og_genes <- dplyr::filter(all_BLAST_reformatted, qseqid_name %in% exp_og_genes | sseqid_name %in% exp_og_genes) sorted_BLAST_hits_exp_og_genes <- arrange(BLAST_hits_exp_og_genes, evalue, -bitscore, -pident) # get gene name of last gene to be added based on number of add_blast_hits all_blast_genes <- na.omit( unique( c( rbind( sorted_BLAST_hits_exp_og_genes$qseqid_name, sorted_BLAST_hits_exp_og_genes$sseqid_name ) ) ) ) # set of all extended blast hits (based on threshold) - vector of gene names (ordered!) # also nice: don't need a conditional since `%>% head(n = add_blast_hits)` will work, # even if add_blast_hits param is > setdiff(all_blast_genes, exp_og_genes) extended_blast_hits_genes <- setdiff(all_blast_genes, exp_og_genes) %>% head(n = add_OGs) # non redundant set of gene names of HOG + n additional blast hits as defined in the user threshold HOG_and_ext_blast_hits_genes <- c(exp_og_genes, extended_blast_hits_genes) #create subset of sorted_BLAST_hits_exp_og_genes table in which only: # exp_og_genes & extended_blast_hits_genes are allowed to appear # this way we have cutoff for the nth best blast hit/gene but also keep all secondary hits HOG_and_ext_blast_hits_table <- sorted_BLAST_hits_exp_og_genes %>% filter(qseqid_name %in% HOG_and_ext_blast_hits_genes) %>% filter(sseqid_name %in% HOG_and_ext_blast_hits_genes) #tt write_lines("", #tt paste("tea/", num, "/expansion_cp_target_OGs/", i, ".txt", sep = "") #tt ) # for each exp. HOG create an extended_BLAST_hits S4 object and collect as part of list ext_B_hits <- new("extended_BLAST_hits", blast_table=HOG_and_ext_blast_hits_table ) # assign name based on name of the underlying expanded HOG ext_B_hits <- list(ext_B_hits) names(ext_B_hits) <- paste0(i) # append to list object extended_BLAST_hits <- c(extended_BLAST_hits, ext_B_hits) } # save extended BLAST hits to hypothesis specific ("num") RDS file #-> to be read and used in final_tea_computation.R script saveRDS(extended_BLAST_hits, paste("tea/", num, "/extended_BLAST_hits/extended_BLAST_hits.RDS", sep = "")) ### Adding OGs instead of BLAST hits ### message("Adding OGs instead of BLAST hits") #transforming ph_orthogroups to long format - nice & neat lookup table ;D long_ph_orthogroups <- ph_orthogroups %>% select(-OG, -`Gene Tree Parent Clade`) %>% melt(., id.vars = c("HOG")) %>% rename(species=variable, id=value) %>% mutate(species = as.character(species)) %>% separate_rows(id, sep = ", ") %>% drop_na() #create final summary list - per OG (name) the cumulative steps of additional OGs summary_add_OG_analysis_list <- vector(mode = "list", length = length(expanded_HOGs$HOG)) length(summary_add_OG_analysis_list) #create classes for nested S3-list structure holding all additional OG sets per OG analysis setClass("add_OG_analysis", slots=list(add_OG_analysis="list") ) setClass("add_OG_set", slots=list(genes="tbl_df") ) dir.create(paste("tea/", num, "/add_OGs_sets/id_lists/", sep = "")) #removing all big files to minimize mem impact of FORK cluster rm(datalist) rm(all_BLAST) rm(all_blast_genes) rm(HOG_and_ext_blast_hits_table) rm(HOG_and_ext_blast_hits_genes) rm(sorted_BLAST_hits_exp_og_genes) rm(extended_BLAST_hits) rm(ext_B_hits) #### FORK cluster since I expect a Linux machine #### autostop=TRUE since I don't want to handle this manually #with my.cluster & stopCluster(my.cluster) I could check the status setup_cluster <- function(){ #define cluster parallel::detectCores() n.cores <- threads n.cores #create the cluster - FORK because this way libraries, variables etc. are copied to the clusters! my.cluster <- parallel::makeForkCluster( n.cores, type = "FORK", autostop=TRUE ) #check cluster definition (optional) print(my.cluster) #register it to be used by %dopar% doParallel::registerDoParallel(cl = my.cluster) #check if it is registered (optional) print( foreach::getDoParRegistered() ) #how many workers are available? (optional) print( foreach::getDoParWorkers() ) } #function to completely remove a fork cluster burn_socks <- function(x){ close.connection(getConnection(x)) } #function to truly get rid of old Cluster/sockets rm_cluster <- function(){ stopImplicitCluster() connections <- showConnections(all = FALSE) socket_connections <- as.data.frame(connections) %>% filter(class == "sockconn") %>% rownames() message("Removing all unwanted FORK connections - purging closed cluster sockets") message("This will kill zombie proesses and free up RAM") lapply(X = socket_connections, FUN = burn_socks) } #setup & start FORK cluster setup_cluster() #we iterate over the expanded OGs pre_summary_add_OG_analysis_list <- foreach(i = expanded_HOGs$HOG) %dopar% { exp_og_genes <- unlist(strsplit(ref_ph_orthogroups[ref_ph_orthogroups$HOG == i,]$genes, split = ", ")) BLAST_hits_exp_og_genes <- dplyr::filter(all_BLAST_reformatted, qseqid_name %in% exp_og_genes | sseqid_name %in% exp_og_genes) sorted_BLAST_hits_exp_og_genes <- arrange(BLAST_hits_exp_og_genes, evalue, -bitscore, -pident) #after the sorting of BLAST hits we move on to merging the OG information into the current exp. OG BLAST hits results #we can create a merged dataframe from the blast table (HOG specific) and the long format HOG table (general) #difficulty is that in case of singletons we have NA under HOG #easiest way I can think of is a named list self_and_closest_ogs <- left_join(sorted_BLAST_hits_exp_og_genes, long_ph_orthogroups, by = c("sseqid_name" = "id")) %>% group_by(sseqid_name) %>% arrange(evalue, -bitscore, -pident) %>% slice(1) %>% ungroup() %>% arrange(evalue, -bitscore, -pident) %>% mutate(HOG = as.character(HOG), HOG = ifelse(is.na(HOG), paste("singleton", sep = "-", cumsum(is.na(HOG))), as.character(HOG))) %>% group_by(HOG) %>% arrange(evalue, -bitscore, -pident) %>% slice(1) %>% ungroup() %>% arrange(evalue, -bitscore, -pident) #first row will should correspond to self match - the OG itself #self_and_closest_ogs #here the information provided by the user regarding max. additional OGs is used #we copy the value to an "internal" object since in the following commands we modify in each iteration depending on the underlying OG/s #the initial add_OGs value chosen by the user is used later and must not be modified add_OGs_internal = add_OGs #need to add check for number of additonal OGs/singletons #e.g. user sets max. add. OGs/singletons to 5 but we only can provide 4 #+ have to consider the expanded OG itself, so: available_add_OGs <- nrow(self_and_closest_ogs) - 1 #available_add_OGs #in this case set add_OGs to max available if (available_add_OGs < add_OGs_internal) { add_OGs_internal <- available_add_OGs } #empty list of precomputed size - add OGs plus the expanded OG itself add_OG_analysis_list <- vector(mode = "list", length = add_OGs_internal + 1) for (j in 1:(add_OGs_internal + 1)) { og_name <- self_and_closest_ogs[j,] %>% pull(HOG) #differnetiate between OG case and a singleton case - different handling: #for HOG get all associated genes/proteins from large long format table #for singletons get singelton gene/protein from the table itself if (!str_detect(og_name, "singleton") == TRUE) { name_curr_close_HOG <- self_and_closest_ogs[j,] %>% pull(HOG) ids_curr_close_HOG <- long_ph_orthogroups %>% filter(HOG %in% name_curr_close_HOG) %>% pull(id) add_OG_analysis_list[[j]] <- ids_curr_close_HOG names(add_OG_analysis_list)[j] <- name_curr_close_HOG } else { name_curr_close_singleton <- self_and_closest_ogs[j,] %>% pull(HOG) id_curr_close_HOG <- self_and_closest_ogs[j,] %>% pull(sseqid_name) add_OG_analysis_list[[j]] <- id_curr_close_HOG names(add_OG_analysis_list)[j] <- name_curr_close_singleton } } #create copy of the list and remove the names #add all previous gene/proteins to the next OG # -> e.g. the former seventh OG list element will contain all genes/proteins of all lower numbered OGs #the final "cum_add_OG_analysis_list" will be a list with each next element having the cumulative set of all previous and own gene/protein ids cum_add_OG_analysis_list <- add_OG_analysis_list names(cum_add_OG_analysis_list) <- NULL #copy_add_OG_analysis_list for (k in 0:(length(cum_add_OG_analysis_list)-1)) { cum_add_OG_analysis_list[[k+1]] <- unlist(c(cum_add_OG_analysis_list[k], cum_add_OG_analysis_list[k+1])) } #create directory/ies - for each HOG under "add_OGs_sets/id_lists/" dir.create(paste("tea/", num, "/add_OGs_sets/id_lists/", i, sep = "")) #iterate over cum. list and write cumulative sets into seperate files for (l in 1:length(cum_add_OG_analysis_list)) { write_lines(cum_add_OG_analysis_list[[l]], paste("tea/", num, "/add_OGs_sets/id_lists/", i, "/add_OGs_set_num-", l, ".txt", sep = "") #paste("../tea/", num, "/add_OGs_sets/", i, "/add_OGs_set_num-", l, ".txt", sep = "") ) } #append current cum. list to summary list & name element after OG #actually we need to do this again and this is redundant;toDo names(cum_add_OG_analysis_list) <- i return(cum_add_OG_analysis_list) } #after cluster is done - remove it rm_cluster() #some final formatting for (i in 1:length(pre_summary_add_OG_analysis_list)) { names(pre_summary_add_OG_analysis_list[[i]]) <- NULL } names(pre_summary_add_OG_analysis_list) <- expanded_HOGs$HOG summary_add_OG_analysis_list <- pre_summary_add_OG_analysis_list ## in this final step we use the summary list to create S3-list nest structure which contains all info ## of the summary but in the format we will embed in the final results object as part of the final_tea computation #length of add OGs analysis summary list summary_length <- length(summary_add_OG_analysis_list) #create empty list of length of the summary list add_og_complete_object <- vector(mode = "list", length = summary_length) #iterate over all OGs in summary list for (og in 1:summary_length) { #amount of sets for current OG - (necessary since less addtional OGs than max wished by user is possible) curr_og_n_sets <- length(summary_add_OG_analysis_list[[og]]) #empty list with n elements = n sets for currently analyzed OG og_all_sets <- vector(mode = "list", length = curr_og_n_sets) for (set in 1:curr_og_n_sets) { curr_set <- new("add_OG_set", genes=as_tibble( unlist( summary_add_OG_analysis_list[[og]][set], ) ) ) curr_set <- list(curr_set) og_all_sets[set] <- curr_set names(og_all_sets)[set] <- paste0("set_", set) } curr_add_og <- new("add_OG_analysis", add_OG_analysis=og_all_sets ) curr_add_og <- list(curr_add_og) add_og_complete_object[og] <- curr_add_og names(add_og_complete_object)[og] <- names(summary_add_OG_analysis_list[og]) } # save summary table for aditional OG analysis to hypothesis specific ("num") RDS file saveRDS(add_og_complete_object, paste("tea/", num, "/add_OGs_object/add_OG_analysis_object.RDS", sep = "")) # nested snakemake checkpoints are annoying at the moment # quick fix - create empty addtional set_num files which we can ignore but nonetheless exist message("adding empty missing files - current workaround to avoid nested snakemake checkpoints") #max_n_sets + 1 since user choice is the number of ADDITIONAL sets but we have ti remember the expanded OG itself (always set_num = 1) max_n_sets = add_OGs + 1 for (n in names(add_og_complete_object)) { og_n_sets <- length( list.files(path = paste0("tea/", num, "/add_OGs_sets/id_lists/", n)) ) og_sets <- list.files(path = paste0("tea/", num, "/add_OGs_sets/id_lists/", n)) if (og_n_sets < max_n_sets) { n_missing_sets <- max_n_sets - og_n_sets for (m in 1:n_missing_sets) { value <- max_n_sets - m + 1 missing_file <- paste0("tea/", num, "/add_OGs_sets/id_lists/", n, "/", "add_OGs_set_num-", value, ".txt") file.create(missing_file) } } } #all of the previous only runs in case we actually find an expanded group in this particular hypothesis #if this not the case we have to perform considerably less work (although of course the hypothesis is rather uninformative) } else { #just add expansion column with "no" for all rows expansion_tibble <- HOG_tibble_complete %>% mutate(expansion = "no") dir.create(paste("tea/", num, "/expansion_tibble/", sep = "")) saveRDS(expansion_tibble, paste("tea/", num, "/expansion_tibble/expansion_tibble.rds", sep = "")) message("No expanded OGs for this hypothesis - creating empty outputs") extended_BLAST_hits <- "empty" # save extended BLAST hits to hypothesis specific ("num") RDS file #-> to be read and used in final_tea_computation.R script saveRDS(extended_BLAST_hits, paste("tea/", num, "/extended_BLAST_hits/extended_BLAST_hits.RDS", sep = "")) add_og_complete_object <- "empty" # save summary table for aditional OG analysis to hypothesis specific ("num") RDS file saveRDS(add_og_complete_object, paste("tea/", num, "/add_OGs_object/add_OG_analysis_object.RDS", sep = "")) dir.create(paste("tea/", num, "/expansion_cp_target_OGs/", sep = "")) #create empty file under add_OG id_lists dir.create(paste0("tea/", num, "/add_OGs_sets/id_lists/")) file.create(paste0("tea/", num, "/add_OGs_sets/id_lists/", "empty.txt")) } ##### #### Lastly, create .check to know everything is done #message("Creating .check - Expansions successfully computed for hypothesis ", num) #expansion_cp_target_OGs.check <- "check" #dir.create(paste("checks/tea/", num, "/", sep="")) #write_file(expansion_cp_target_OGs.check, paste("checks/tea/", num, "/expansion_cp_target_OGs.check", sep = ""))
/workflow/scripts/expansion.R
permissive
tgstoecker/A2TEA.Workflow
R
false
false
30,567
r
#logging log <- file(snakemake@log[[1]], open="wt") sink(log, append=TRUE) sink(log, append=TRUE, type="message") # Restore output to console #sink() #sink(type="message") #backup R-based installation if conda didn't work or wasn't used #we check if packages are installed first #fix for: libicui18n.so.68: cannot open shared object file #reinstall of stringi connects libraries correctly #install.packages("stringi", repos="http://cran.us.r-project.org") # list of cran packages #cran_packages = c("readr", "plyr", "dplyr", "stringr", "tidyr", "tibble", "reshape2", "foreach", "doParallel") # load or install&load all #package.check <- lapply( # cran_packages, # FUN = function(x) { # if (!require(x, character.only = TRUE)) { # install.packages(x, dependencies = TRUE, repos = "http://cran.us.r-project.org") # library(x, character.only = TRUE) # } # } #) #load libraries library(readr) library(plyr) library(dplyr) library(stringr) library(tidyr) library(tibble) library(reshape2) library(foreach) library(doParallel) message("Acquiring hypothesis variables:") num = snakemake@params[["num"]] name = snakemake@params[["name"]] #threads info for parallel FORK cluster threads = as.numeric(snakemake@threads) # define the number of additional best blast hits to include in the follow-up analyses add_OGs = snakemake@params[["add_OGs"]] # define the minimum expansion factor & expansion difference to call an expansion (part of hypothesis.tsv) # + minimum number of genes of expanded species to even consider an OG expansion_factor = as.numeric(snakemake@params[["expansion_factor"]]) expansion_difference = as.numeric(snakemake@params[["expansion_difference"]]) expanded_genes_min = as.numeric(snakemake@params[["expanded_genes_min"]]) # get user choice whether to perform ploidy normalization or not ploidy_normalization = as.character(snakemake@params[["ploidy_normalization"]]) # get ploidy information of each species (supplied by user; part of species.tsv) species_table <- read.delim("config/species.tsv", header = TRUE, sep = "\t", row.names = "species") # using snakemake propagation + python strsplit() is really cool since the input is just a vector ## even if we have multiple species in expanded, compared or both ;D expanded_in = snakemake@params[["expansion"]] compared_to = snakemake@params[["comparison"]] c_t_species <- compared_to all_species <- unique(c(expanded_in, compared_to)) # defining: # at least Nmin_expanded_in expanded species that are expanded in at least Nmin_compared_to compared_to species Nmin_expanded_in <- as.numeric(snakemake@params[["Nmin_expanded_in"]]) Nmin_compared_to <- as.numeric(snakemake@params[["Nmin_compared_to"]]) # define whether or not only HOGs should be considered that have at least 1 gene from each expanded_in/compared_to species expanded_in_all_found <- as.character(snakemake@params[["expanded_in_all_found"]]) compared_to_all_found <- as.character(snakemake@params[["compared_to_all_found"]]) #read-in Orthogroup-GeneCounts-table message("Reading in Orthogroup-GeneCounts-table:") OG.GC <- readr::read_tsv("orthofinder/final-results/Orthogroups/Orthogroups.GeneCount.tsv") #concatenate all BLAST seaches to one file and create parseable tibble with read_delim #could add that only BLAST searches of our species in this hypothesis are used - toDO for later #easiest and efficient way is to bind outside of the loop #do.call is a nice way rbind(bind_rows[[1]], bind_rows[[2]], ...) message("Concatenating all BLAST seaches to one file and creating parseable tibble:") datalist = list() for (i in Sys.glob("orthofinder/search/*.txt")) { datalist[[i]] <- readr::read_delim(file = i, delim = "\t", col_names = c( "qseqid", "sseqid", "pident", "length", "mismatch", "gapopen", "qstart", "qend", "sstart", "send", "evalue", "bitscore"), col_types = c( qseqid = col_character(), sseqid = col_character(), pident = col_double(), length = col_double(), mismatch = col_double(), gapopen = col_double(), qstart = col_double(), qend = col_double(), sstart = col_double(), send = col_double(), evalue = col_double(), bitscore = col_double() ) ) } all_BLAST <- base::do.call(bind_rows, datalist) #read-in the conversion table: orthofinder sequence IDs and corresponding actual gene/protein names #with "str_remove_all" I also easily get rid of the ":" message("Reading in conversion table based on SequenceIDs.txt:") conversion <- readr::read_delim(file = "orthofinder/SequenceIDs.txt", delim = " ", col_names = c("seqid", "name"), col_types = c( seqid = col_character(), name = col_character()) ) conversion <- conversion %>% dplyr::mutate(seqid = str_remove_all(seqid, ":")) message("Creating reformatted output with actual gene/protein names:") #create new columns with the actual gene/protein names all_BLAST_reformatted <- all_BLAST %>% dplyr::left_join(conversion, by = c("qseqid" = "seqid")) all_BLAST_reformatted <- all_BLAST_reformatted %>% dplyr::left_join(conversion, by = c("sseqid" = "seqid")) #position after qseqid and sseqid respectively all_BLAST_reformatted <- all_BLAST_reformatted %>% dplyr::relocate(name.x, .after = qseqid) all_BLAST_reformatted <- all_BLAST_reformatted %>% dplyr::relocate(name.y, .after = sseqid) #rename to qseqid/sseqid_name all_BLAST_reformatted <- dplyr::rename(all_BLAST_reformatted, qseqid_name = name.x) all_BLAST_reformatted <- dplyr::rename(all_BLAST_reformatted, sseqid_name = name.y) message("Data read-in and reformat:") ph_orthogroups <- readr::read_delim(file = "orthofinder/final-results/Phylogenetic_Hierarchical_Orthogroups/N0.tsv", delim = "\t") #create dataframe with numbers of genes per PHOG #we combine expanded_in and compared_to vectors to easily compute for everything we need HOG_df <- setNames(base::data.frame(matrix(ncol = 1 + length(all_species), nrow = 0)), c("HOG", all_species)) for (i in 1:nrow(ph_orthogroups)) { row = 0 #print(ph_orthogroups[i,]$HOG) row <- c(ph_orthogroups[i,]$HOG) for (j in c(all_species)) { if (is.na(ph_orthogroups[i,][[j]])) { test = 0 row <- c(row, test) } else { test = length(unlist(strsplit(ph_orthogroups[i,][[j]], ","))) row <- c(row, test) } } HOG_df[i,] <- row } HOG_tibble <- as_tibble(HOG_df) for (k in 1:length(all_species)) { o = all_species[k] HOG_tibble[[o]] <- as.numeric(HOG_tibble[[o]]) } #create "copy" as HOG_tibble is currently modified in the upcoming step HOG_tibble_complete <- HOG_tibble #here, we apply the expansion rule/s - could be also be parsed from config? (toDO!) #get() function solves my problems of using the character functions inside dplyr message("Applying expansion rule/s per hypothesis:") # function has to reduce input dataset; this way each iteration (compared species) is included # for all compared species first check for each HOG if expanded species is >= #* # then keep rows in which expanded species has at least # (default: 2) for (e in expanded_in) { # create expansion counter column for each expanded species exp_species_name = paste0("exp_counter_", e) HOG_tibble <- HOG_tibble %>% tibble::add_column(!!(exp_species_name) := 0) if (ploidy_normalization == "YES") { for (c in c_t_species) { HOG_tibble <- HOG_tibble %>% mutate(!!(exp_species_name) := dplyr::case_when( # although cases in which the ct species has no genes are ignored via # the multiplication in the 2nd step, this underlines that we do so # now with the second expansion distance criterium we should have it anyway get(c) > 0 & get(e)/species_table[e, "ploidy"]*2 >= expansion_factor*(get(c))/species_table[c, "ploidy"]*2 ~ get(exp_species_name) + 1, get(c) > 0 & get(e)/species_table[e, "ploidy"]*2 >= expansion_difference + (get(c))/species_table[c, "ploidy"]*2 ~ get(exp_species_name) + 1, TRUE ~ 0, ) ) } } #if the user did not set ploidy_normalization to "YES" #here, no ploidy information is used to divide the number of genes per OG else { for (c in c_t_species) { HOG_tibble <- HOG_tibble %>% mutate(!!(exp_species_name) := dplyr::case_when( get(c) > 0 & get(e) >= expansion_factor*(get(c)) ~ get(exp_species_name) + 1, get(c) > 0 & get(e) >= expansion_difference + get(c) ~ get(exp_species_name) + 1, TRUE ~ 0, ) ) } } } # then we perform summing over all exp_counter_ columns based on user choices # at least Nmin_expanded_in expanded species that are expanded in at least Nmin_compared_to compared_to species # create new column # for each exp_counter_* species check if value >= y # all passing exp_counter_* species are counted; if sum >= x retain HOG HOG_tibble <- HOG_tibble %>% mutate(pass = # selecting columns that we want to use use for subsequent function select(., contains("exp_counter_")) %>% # use pmap on this subset to get a vector of min from each row # dataframe is a list so pmap works on each element of the list; here, each row # we sum the occurences (per row!) of exp_counter_* cols that greater/equal to user cutoff purrr::pmap_dbl(., ~ sum(c(...) >= Nmin_compared_to)) ) # lastly, retain rows/HOGs that pass x cutoff = number of expanded species # & drop unnecessary columns HOG_tibble <- HOG_tibble %>% filter(pass >= Nmin_expanded_in) %>% select(-contains("exp_counter_"), -pass) # additional optional filter1 - expanded_in gene family complete criterium if (expanded_in_all_found == "YES") { HOG_tibble <- HOG_tibble %>% # create column with per-row sum of expanded_in species that have at least 1 gene in the HOG mutate(expanded_in_pass = select(., contains(expanded_in)) %>% purrr::pmap_dbl(., ~ sum(c(...) >= 1)) ) %>% # only keep rows/HOGs in which at least 1 gene of each expanded_in species occurs filter(expanded_in_pass >= length(expanded_in)) %>% # remove expanded_in_pass column select(-expanded_in_pass) } # additional optional filter2 - compared_to gene family complete criterium if (compared_to_all_found == "YES") { HOG_tibble <- HOG_tibble %>% # create column with per-row sum of compared_to species that have at least 1 gene in the HOG mutate(compared_to_pass = select(., contains(compared_to)) %>% purrr::pmap_dbl(., ~ sum(c(...) >= 1)) ) %>% # only keep rows/HOGs in which at least 1 gene of each compared_to species occurs filter(compared_to_pass >= length(compared_to)) %>% # remove compared_to_pass column select(-compared_to_pass) } # optional hard filter for at least # genes in all expanded species # this is useful in difficult ploidy cases and solves downstream issues in small OGs HOG_tibble <- HOG_tibble %>% filter(if_all(contains(expanded_in), ~ . >= expanded_genes_min)) # new object: expanded_HOGs expanded_HOGs <- HOG_tibble # based on filtering criteria create per hypothesis table with: # all HOGs and gene counts + additional column expansion (yes/no) # this is later used in the creation of final tea outputs to create a HOG level table per hypothesis #need to check if there is at least one expanded (H)OG #if not replace_na woud throw error since we are changing types #all of the downstream stuff in this script only makes sense if we in fact do have expanded groups if (nrow(expanded_HOGs) > 0) { #create expansion vector with length of expandsion HOGs expansion <- replicate(nrow(expanded_HOGs), "yes") #print(expansion) expansion_tibble <- full_join(HOG_tibble_complete, tibble::add_column(expanded_HOGs, expansion, .after = "HOG"), by = c("HOG"), suffix = c("", ".remove")) %>% tidyr::replace_na(list(expansion = "no")) %>% select(-c(ends_with(".remove"))) dir.create(paste("tea/", num, "/expansion_tibble/", sep = "")) saveRDS(expansion_tibble, paste("tea/", num, "/expansion_tibble/expansion_tibble.rds", sep = "")) # create genes column in ph_orthogroups file # row merge? - no unite function, really handy ;D ref_ph_orthogroups <- ph_orthogroups %>% tidyr::unite("genes", all_of(all_species), sep =", ", na.rm = TRUE, remove = TRUE) message("Creating .txt files for all expanded OGs with reciprocal best BLAST hits of species in respective hypothesis:") #for each gene/protein name in an interesting OG do (is there a file just with names per OG?): #check "all_BLAST_reformatted" for all entries including these names and create new dataframe/tibble # then, perform filtering and retain certain set of genes/proteins per OG analysis # then, create .txt file per OG with these gene names ## come-up with filter criteria to have better trees? ## I could of course just keep everything and save the evalues, etc.; well, problem for later.. ;D ####> output for snakemake? what about inidividual OG txt files, because starting here parallelisation can really impact dir.create(paste("tea/", num, "/expansion_cp_target_OGs/", sep = "")) ## define custom class for extended blast hits # need a list object to hold all data of this class extended_BLAST_hits <- list() # class for extended BLAST hits info setClass("extended_BLAST_hits", slots=list(blast_table="tbl_df") ) for (i in expanded_HOGs$HOG) { exp_og_genes <- unlist(strsplit(ref_ph_orthogroups[ref_ph_orthogroups$HOG == i,]$genes, split = ", ")) BLAST_hits_exp_og_genes <- dplyr::filter(all_BLAST_reformatted, qseqid_name %in% exp_og_genes | sseqid_name %in% exp_og_genes) sorted_BLAST_hits_exp_og_genes <- arrange(BLAST_hits_exp_og_genes, evalue, -bitscore, -pident) # get gene name of last gene to be added based on number of add_blast_hits all_blast_genes <- na.omit( unique( c( rbind( sorted_BLAST_hits_exp_og_genes$qseqid_name, sorted_BLAST_hits_exp_og_genes$sseqid_name ) ) ) ) # set of all extended blast hits (based on threshold) - vector of gene names (ordered!) # also nice: don't need a conditional since `%>% head(n = add_blast_hits)` will work, # even if add_blast_hits param is > setdiff(all_blast_genes, exp_og_genes) extended_blast_hits_genes <- setdiff(all_blast_genes, exp_og_genes) %>% head(n = add_OGs) # non redundant set of gene names of HOG + n additional blast hits as defined in the user threshold HOG_and_ext_blast_hits_genes <- c(exp_og_genes, extended_blast_hits_genes) #create subset of sorted_BLAST_hits_exp_og_genes table in which only: # exp_og_genes & extended_blast_hits_genes are allowed to appear # this way we have cutoff for the nth best blast hit/gene but also keep all secondary hits HOG_and_ext_blast_hits_table <- sorted_BLAST_hits_exp_og_genes %>% filter(qseqid_name %in% HOG_and_ext_blast_hits_genes) %>% filter(sseqid_name %in% HOG_and_ext_blast_hits_genes) #tt write_lines("", #tt paste("tea/", num, "/expansion_cp_target_OGs/", i, ".txt", sep = "") #tt ) # for each exp. HOG create an extended_BLAST_hits S4 object and collect as part of list ext_B_hits <- new("extended_BLAST_hits", blast_table=HOG_and_ext_blast_hits_table ) # assign name based on name of the underlying expanded HOG ext_B_hits <- list(ext_B_hits) names(ext_B_hits) <- paste0(i) # append to list object extended_BLAST_hits <- c(extended_BLAST_hits, ext_B_hits) } # save extended BLAST hits to hypothesis specific ("num") RDS file #-> to be read and used in final_tea_computation.R script saveRDS(extended_BLAST_hits, paste("tea/", num, "/extended_BLAST_hits/extended_BLAST_hits.RDS", sep = "")) ### Adding OGs instead of BLAST hits ### message("Adding OGs instead of BLAST hits") #transforming ph_orthogroups to long format - nice & neat lookup table ;D long_ph_orthogroups <- ph_orthogroups %>% select(-OG, -`Gene Tree Parent Clade`) %>% melt(., id.vars = c("HOG")) %>% rename(species=variable, id=value) %>% mutate(species = as.character(species)) %>% separate_rows(id, sep = ", ") %>% drop_na() #create final summary list - per OG (name) the cumulative steps of additional OGs summary_add_OG_analysis_list <- vector(mode = "list", length = length(expanded_HOGs$HOG)) length(summary_add_OG_analysis_list) #create classes for nested S3-list structure holding all additional OG sets per OG analysis setClass("add_OG_analysis", slots=list(add_OG_analysis="list") ) setClass("add_OG_set", slots=list(genes="tbl_df") ) dir.create(paste("tea/", num, "/add_OGs_sets/id_lists/", sep = "")) #removing all big files to minimize mem impact of FORK cluster rm(datalist) rm(all_BLAST) rm(all_blast_genes) rm(HOG_and_ext_blast_hits_table) rm(HOG_and_ext_blast_hits_genes) rm(sorted_BLAST_hits_exp_og_genes) rm(extended_BLAST_hits) rm(ext_B_hits) #### FORK cluster since I expect a Linux machine #### autostop=TRUE since I don't want to handle this manually #with my.cluster & stopCluster(my.cluster) I could check the status setup_cluster <- function(){ #define cluster parallel::detectCores() n.cores <- threads n.cores #create the cluster - FORK because this way libraries, variables etc. are copied to the clusters! my.cluster <- parallel::makeForkCluster( n.cores, type = "FORK", autostop=TRUE ) #check cluster definition (optional) print(my.cluster) #register it to be used by %dopar% doParallel::registerDoParallel(cl = my.cluster) #check if it is registered (optional) print( foreach::getDoParRegistered() ) #how many workers are available? (optional) print( foreach::getDoParWorkers() ) } #function to completely remove a fork cluster burn_socks <- function(x){ close.connection(getConnection(x)) } #function to truly get rid of old Cluster/sockets rm_cluster <- function(){ stopImplicitCluster() connections <- showConnections(all = FALSE) socket_connections <- as.data.frame(connections) %>% filter(class == "sockconn") %>% rownames() message("Removing all unwanted FORK connections - purging closed cluster sockets") message("This will kill zombie proesses and free up RAM") lapply(X = socket_connections, FUN = burn_socks) } #setup & start FORK cluster setup_cluster() #we iterate over the expanded OGs pre_summary_add_OG_analysis_list <- foreach(i = expanded_HOGs$HOG) %dopar% { exp_og_genes <- unlist(strsplit(ref_ph_orthogroups[ref_ph_orthogroups$HOG == i,]$genes, split = ", ")) BLAST_hits_exp_og_genes <- dplyr::filter(all_BLAST_reformatted, qseqid_name %in% exp_og_genes | sseqid_name %in% exp_og_genes) sorted_BLAST_hits_exp_og_genes <- arrange(BLAST_hits_exp_og_genes, evalue, -bitscore, -pident) #after the sorting of BLAST hits we move on to merging the OG information into the current exp. OG BLAST hits results #we can create a merged dataframe from the blast table (HOG specific) and the long format HOG table (general) #difficulty is that in case of singletons we have NA under HOG #easiest way I can think of is a named list self_and_closest_ogs <- left_join(sorted_BLAST_hits_exp_og_genes, long_ph_orthogroups, by = c("sseqid_name" = "id")) %>% group_by(sseqid_name) %>% arrange(evalue, -bitscore, -pident) %>% slice(1) %>% ungroup() %>% arrange(evalue, -bitscore, -pident) %>% mutate(HOG = as.character(HOG), HOG = ifelse(is.na(HOG), paste("singleton", sep = "-", cumsum(is.na(HOG))), as.character(HOG))) %>% group_by(HOG) %>% arrange(evalue, -bitscore, -pident) %>% slice(1) %>% ungroup() %>% arrange(evalue, -bitscore, -pident) #first row will should correspond to self match - the OG itself #self_and_closest_ogs #here the information provided by the user regarding max. additional OGs is used #we copy the value to an "internal" object since in the following commands we modify in each iteration depending on the underlying OG/s #the initial add_OGs value chosen by the user is used later and must not be modified add_OGs_internal = add_OGs #need to add check for number of additonal OGs/singletons #e.g. user sets max. add. OGs/singletons to 5 but we only can provide 4 #+ have to consider the expanded OG itself, so: available_add_OGs <- nrow(self_and_closest_ogs) - 1 #available_add_OGs #in this case set add_OGs to max available if (available_add_OGs < add_OGs_internal) { add_OGs_internal <- available_add_OGs } #empty list of precomputed size - add OGs plus the expanded OG itself add_OG_analysis_list <- vector(mode = "list", length = add_OGs_internal + 1) for (j in 1:(add_OGs_internal + 1)) { og_name <- self_and_closest_ogs[j,] %>% pull(HOG) #differnetiate between OG case and a singleton case - different handling: #for HOG get all associated genes/proteins from large long format table #for singletons get singelton gene/protein from the table itself if (!str_detect(og_name, "singleton") == TRUE) { name_curr_close_HOG <- self_and_closest_ogs[j,] %>% pull(HOG) ids_curr_close_HOG <- long_ph_orthogroups %>% filter(HOG %in% name_curr_close_HOG) %>% pull(id) add_OG_analysis_list[[j]] <- ids_curr_close_HOG names(add_OG_analysis_list)[j] <- name_curr_close_HOG } else { name_curr_close_singleton <- self_and_closest_ogs[j,] %>% pull(HOG) id_curr_close_HOG <- self_and_closest_ogs[j,] %>% pull(sseqid_name) add_OG_analysis_list[[j]] <- id_curr_close_HOG names(add_OG_analysis_list)[j] <- name_curr_close_singleton } } #create copy of the list and remove the names #add all previous gene/proteins to the next OG # -> e.g. the former seventh OG list element will contain all genes/proteins of all lower numbered OGs #the final "cum_add_OG_analysis_list" will be a list with each next element having the cumulative set of all previous and own gene/protein ids cum_add_OG_analysis_list <- add_OG_analysis_list names(cum_add_OG_analysis_list) <- NULL #copy_add_OG_analysis_list for (k in 0:(length(cum_add_OG_analysis_list)-1)) { cum_add_OG_analysis_list[[k+1]] <- unlist(c(cum_add_OG_analysis_list[k], cum_add_OG_analysis_list[k+1])) } #create directory/ies - for each HOG under "add_OGs_sets/id_lists/" dir.create(paste("tea/", num, "/add_OGs_sets/id_lists/", i, sep = "")) #iterate over cum. list and write cumulative sets into seperate files for (l in 1:length(cum_add_OG_analysis_list)) { write_lines(cum_add_OG_analysis_list[[l]], paste("tea/", num, "/add_OGs_sets/id_lists/", i, "/add_OGs_set_num-", l, ".txt", sep = "") #paste("../tea/", num, "/add_OGs_sets/", i, "/add_OGs_set_num-", l, ".txt", sep = "") ) } #append current cum. list to summary list & name element after OG #actually we need to do this again and this is redundant;toDo names(cum_add_OG_analysis_list) <- i return(cum_add_OG_analysis_list) } #after cluster is done - remove it rm_cluster() #some final formatting for (i in 1:length(pre_summary_add_OG_analysis_list)) { names(pre_summary_add_OG_analysis_list[[i]]) <- NULL } names(pre_summary_add_OG_analysis_list) <- expanded_HOGs$HOG summary_add_OG_analysis_list <- pre_summary_add_OG_analysis_list ## in this final step we use the summary list to create S3-list nest structure which contains all info ## of the summary but in the format we will embed in the final results object as part of the final_tea computation #length of add OGs analysis summary list summary_length <- length(summary_add_OG_analysis_list) #create empty list of length of the summary list add_og_complete_object <- vector(mode = "list", length = summary_length) #iterate over all OGs in summary list for (og in 1:summary_length) { #amount of sets for current OG - (necessary since less addtional OGs than max wished by user is possible) curr_og_n_sets <- length(summary_add_OG_analysis_list[[og]]) #empty list with n elements = n sets for currently analyzed OG og_all_sets <- vector(mode = "list", length = curr_og_n_sets) for (set in 1:curr_og_n_sets) { curr_set <- new("add_OG_set", genes=as_tibble( unlist( summary_add_OG_analysis_list[[og]][set], ) ) ) curr_set <- list(curr_set) og_all_sets[set] <- curr_set names(og_all_sets)[set] <- paste0("set_", set) } curr_add_og <- new("add_OG_analysis", add_OG_analysis=og_all_sets ) curr_add_og <- list(curr_add_og) add_og_complete_object[og] <- curr_add_og names(add_og_complete_object)[og] <- names(summary_add_OG_analysis_list[og]) } # save summary table for aditional OG analysis to hypothesis specific ("num") RDS file saveRDS(add_og_complete_object, paste("tea/", num, "/add_OGs_object/add_OG_analysis_object.RDS", sep = "")) # nested snakemake checkpoints are annoying at the moment # quick fix - create empty addtional set_num files which we can ignore but nonetheless exist message("adding empty missing files - current workaround to avoid nested snakemake checkpoints") #max_n_sets + 1 since user choice is the number of ADDITIONAL sets but we have ti remember the expanded OG itself (always set_num = 1) max_n_sets = add_OGs + 1 for (n in names(add_og_complete_object)) { og_n_sets <- length( list.files(path = paste0("tea/", num, "/add_OGs_sets/id_lists/", n)) ) og_sets <- list.files(path = paste0("tea/", num, "/add_OGs_sets/id_lists/", n)) if (og_n_sets < max_n_sets) { n_missing_sets <- max_n_sets - og_n_sets for (m in 1:n_missing_sets) { value <- max_n_sets - m + 1 missing_file <- paste0("tea/", num, "/add_OGs_sets/id_lists/", n, "/", "add_OGs_set_num-", value, ".txt") file.create(missing_file) } } } #all of the previous only runs in case we actually find an expanded group in this particular hypothesis #if this not the case we have to perform considerably less work (although of course the hypothesis is rather uninformative) } else { #just add expansion column with "no" for all rows expansion_tibble <- HOG_tibble_complete %>% mutate(expansion = "no") dir.create(paste("tea/", num, "/expansion_tibble/", sep = "")) saveRDS(expansion_tibble, paste("tea/", num, "/expansion_tibble/expansion_tibble.rds", sep = "")) message("No expanded OGs for this hypothesis - creating empty outputs") extended_BLAST_hits <- "empty" # save extended BLAST hits to hypothesis specific ("num") RDS file #-> to be read and used in final_tea_computation.R script saveRDS(extended_BLAST_hits, paste("tea/", num, "/extended_BLAST_hits/extended_BLAST_hits.RDS", sep = "")) add_og_complete_object <- "empty" # save summary table for aditional OG analysis to hypothesis specific ("num") RDS file saveRDS(add_og_complete_object, paste("tea/", num, "/add_OGs_object/add_OG_analysis_object.RDS", sep = "")) dir.create(paste("tea/", num, "/expansion_cp_target_OGs/", sep = "")) #create empty file under add_OG id_lists dir.create(paste0("tea/", num, "/add_OGs_sets/id_lists/")) file.create(paste0("tea/", num, "/add_OGs_sets/id_lists/", "empty.txt")) } ##### #### Lastly, create .check to know everything is done #message("Creating .check - Expansions successfully computed for hypothesis ", num) #expansion_cp_target_OGs.check <- "check" #dir.create(paste("checks/tea/", num, "/", sep="")) #write_file(expansion_cp_target_OGs.check, paste("checks/tea/", num, "/expansion_cp_target_OGs.check", sep = ""))
Sparse.Feature <- function(test_seq){ if(class(test_seq)!="DNAStringSet"){stop("The dataset must be of class DNAStringSet")} if(length(unique(width(test_seq)))>1){stop("Each sequence must of equal length")} zz <- as.character(as.character(test_seq)) #########################################################sparse encoding######################## my.sparse <- function(dat){ ss <- unlist(strsplit(dat, split="")) ss[ss=="T"|ss=="TRUE"]<- "X" nx <- nchar(dat) ss[which(ss=="A")] <- "111" ss[which(ss=="X")] <- "001" ss[which(ss=="G")] <- "100" ss[which(ss=="C")] <- "010" tsk <- as.numeric(unlist(strsplit(ss, split=""))) spr_enc <- matrix(tsk, ncol=nx*3, byrow=T) spr_enc } ########################################################Combining features########################### x2 <- t(sapply(zz, my.sparse)) return(x2) }
/R/Sparse.Feature.R
no_license
cran/EncDNA
R
false
false
846
r
Sparse.Feature <- function(test_seq){ if(class(test_seq)!="DNAStringSet"){stop("The dataset must be of class DNAStringSet")} if(length(unique(width(test_seq)))>1){stop("Each sequence must of equal length")} zz <- as.character(as.character(test_seq)) #########################################################sparse encoding######################## my.sparse <- function(dat){ ss <- unlist(strsplit(dat, split="")) ss[ss=="T"|ss=="TRUE"]<- "X" nx <- nchar(dat) ss[which(ss=="A")] <- "111" ss[which(ss=="X")] <- "001" ss[which(ss=="G")] <- "100" ss[which(ss=="C")] <- "010" tsk <- as.numeric(unlist(strsplit(ss, split=""))) spr_enc <- matrix(tsk, ncol=nx*3, byrow=T) spr_enc } ########################################################Combining features########################### x2 <- t(sapply(zz, my.sparse)) return(x2) }
now <- Sys.Date() mode(now) class(now) "as.Date" as.Date("2010-12-31") as.Date("12/31/2010", format="%m/%d/%Y") "to str" format(Sys.Date()) as.character(Sys.Date()) format(Sys.Date(), format="%m/%d/%Y") "help strftime" # help(strftime) # %b Abbreviated month name (“Jan”) # %B Full month name (“January”) # %d Day as a two-digit number # %m Month as a two-digit number # %y Year without century (00–99 # %Y Year with century "construct date" ISOdate(2012,2,29) as.Date(ISOdate(2012,2,29)) "int" d <- as.Date("2010-03-15") as.integer(d) julian(d) as.integer(as.Date("1970-01-01")) as.integer(as.Date("1969-12-21")) "get year, day" d <- as.Date("2010-03-15") p <- as.POSIXlt(d) p$mday p$mon p$year + 1900 s <- as.Date("2012-01-01") e <- as.Date("2012-02-01") date_vector <- seq(from=s, to=e, by=1) date_vector mode(date_vector) class(date_vector) is.vector(date_vector) seq(from=s, by=1, length.out=7)
/R/R_cookbook/strings_dates/date_example.R
no_license
RobLeggett/codes
R
false
false
920
r
now <- Sys.Date() mode(now) class(now) "as.Date" as.Date("2010-12-31") as.Date("12/31/2010", format="%m/%d/%Y") "to str" format(Sys.Date()) as.character(Sys.Date()) format(Sys.Date(), format="%m/%d/%Y") "help strftime" # help(strftime) # %b Abbreviated month name (“Jan”) # %B Full month name (“January”) # %d Day as a two-digit number # %m Month as a two-digit number # %y Year without century (00–99 # %Y Year with century "construct date" ISOdate(2012,2,29) as.Date(ISOdate(2012,2,29)) "int" d <- as.Date("2010-03-15") as.integer(d) julian(d) as.integer(as.Date("1970-01-01")) as.integer(as.Date("1969-12-21")) "get year, day" d <- as.Date("2010-03-15") p <- as.POSIXlt(d) p$mday p$mon p$year + 1900 s <- as.Date("2012-01-01") e <- as.Date("2012-02-01") date_vector <- seq(from=s, to=e, by=1) date_vector mode(date_vector) class(date_vector) is.vector(date_vector) seq(from=s, by=1, length.out=7)
library("matrixStats") library("stats") asWhich <- function(probs, max) { idx <- as.integer(round(probs * max)) if (idx < 1L) { idx <- 1L } else if (idx > max) { idx <- max } idx } # asWhich() rowOrderStats_R <- function(x, probs, ..., useNames = NA) { ans <- apply(x, MARGIN = 1L, FUN = quantile, probs = probs, type = 3L) # Remove Attributes if (is.na(useNames) || !useNames || length(ans) == 0L) attributes(ans) <- NULL ans } # rowOrderStats_R() set.seed(1) # Simulate data in a matrix of any shape nrow <- 60L ncol <- 30L x <- rnorm(nrow * ncol) dim(x) <- c(nrow, ncol) probs <- 0.3 which <- asWhich(probs, max = ncol) y0 <- rowOrderStats_R(x, probs = probs) y1 <- rowOrderStats(x, which = which) stopifnot(all.equal(y1, y0)) y2 <- colOrderStats(t(x), which = which) stopifnot(all.equal(y2, y0)) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Consistency checks # - - - - - - - - - - - - - - - - - - - - - - - - - - - - for (mode in c("integer", "double")) { cat("Consistency checks without NAs:\n") for (kk in 1:3) { cat("Random test #", kk, "\n", sep = "") # Simulate data in a matrix of any shape nrow <- sample(20L, size = 1L) ncol <- sample(20L, size = 1L) x <- rnorm(nrow * ncol) dim(x) <- c(nrow, ncol) cat("mode: ", mode, "\n", sep = "") storage.mode(x) <- mode str(x) probs <- runif(1) which <- asWhich(probs, max = ncol) y0 <- rowOrderStats_R(x, probs = probs) y1 <- rowOrderStats(x, which = which) stopifnot(all.equal(y1, y0)) y2 <- colOrderStats(t(x), which = which) stopifnot(all.equal(y2, y0)) } # for (kk in ...) } # for (mode ...) # Check names attribute x <- matrix(1:9 + 0.1, nrow = 3L, ncol = 3L) probs <- runif(1) which <- asWhich(probs, max = ncol(x)) dimnames <- list(letters[1:3], LETTERS[1:3]) # Test with and without dimnames on x for (setDimnames in c(TRUE, FALSE)) { if (setDimnames) dimnames(x) <- dimnames else dimnames(x) <- NULL # Check names attribute for (useNames in c(NA, TRUE, FALSE)) { y0 <- rowOrderStats_R(x, probs = probs, useNames = useNames) y1 <- rowOrderStats(x, which = which, useNames = useNames) stopifnot(all.equal(y1, y0)) y2 <- colOrderStats(t(x), which = which, useNames = useNames) stopifnot(all.equal(y2, y0)) } }
/tests/rowOrderStats.R
no_license
HenrikBengtsson/matrixStats
R
false
false
2,336
r
library("matrixStats") library("stats") asWhich <- function(probs, max) { idx <- as.integer(round(probs * max)) if (idx < 1L) { idx <- 1L } else if (idx > max) { idx <- max } idx } # asWhich() rowOrderStats_R <- function(x, probs, ..., useNames = NA) { ans <- apply(x, MARGIN = 1L, FUN = quantile, probs = probs, type = 3L) # Remove Attributes if (is.na(useNames) || !useNames || length(ans) == 0L) attributes(ans) <- NULL ans } # rowOrderStats_R() set.seed(1) # Simulate data in a matrix of any shape nrow <- 60L ncol <- 30L x <- rnorm(nrow * ncol) dim(x) <- c(nrow, ncol) probs <- 0.3 which <- asWhich(probs, max = ncol) y0 <- rowOrderStats_R(x, probs = probs) y1 <- rowOrderStats(x, which = which) stopifnot(all.equal(y1, y0)) y2 <- colOrderStats(t(x), which = which) stopifnot(all.equal(y2, y0)) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Consistency checks # - - - - - - - - - - - - - - - - - - - - - - - - - - - - for (mode in c("integer", "double")) { cat("Consistency checks without NAs:\n") for (kk in 1:3) { cat("Random test #", kk, "\n", sep = "") # Simulate data in a matrix of any shape nrow <- sample(20L, size = 1L) ncol <- sample(20L, size = 1L) x <- rnorm(nrow * ncol) dim(x) <- c(nrow, ncol) cat("mode: ", mode, "\n", sep = "") storage.mode(x) <- mode str(x) probs <- runif(1) which <- asWhich(probs, max = ncol) y0 <- rowOrderStats_R(x, probs = probs) y1 <- rowOrderStats(x, which = which) stopifnot(all.equal(y1, y0)) y2 <- colOrderStats(t(x), which = which) stopifnot(all.equal(y2, y0)) } # for (kk in ...) } # for (mode ...) # Check names attribute x <- matrix(1:9 + 0.1, nrow = 3L, ncol = 3L) probs <- runif(1) which <- asWhich(probs, max = ncol(x)) dimnames <- list(letters[1:3], LETTERS[1:3]) # Test with and without dimnames on x for (setDimnames in c(TRUE, FALSE)) { if (setDimnames) dimnames(x) <- dimnames else dimnames(x) <- NULL # Check names attribute for (useNames in c(NA, TRUE, FALSE)) { y0 <- rowOrderStats_R(x, probs = probs, useNames = useNames) y1 <- rowOrderStats(x, which = which, useNames = useNames) stopifnot(all.equal(y1, y0)) y2 <- colOrderStats(t(x), which = which, useNames = useNames) stopifnot(all.equal(y2, y0)) } }
install.packages("tm") #텍스트 마이닝을 위한 패키지 install.packages("slam") #install.packages("dplyr") #install.packages("readr") #파일을 읽어오기 위한 패키지 library(tm) library(slam) library(dplyr) library(NLP4kec) library(readr) # 1. 원문 데이터 및 사전 불러오기 -------------------------------------------------------------------------------------------------------------------------------------------- textData = readRDS("./data/petitions_content_2018.RDS") #동의어 / 불용어 사전 불러오기 stopword_dic = read_csv("./dictionary/stopword_ko.csv") synonym_dic = read_csv("./dictionary/synonym.csv") # 2. 형태소 분석 및 전처리---------------------------------------------------------------------------------------------------------------------------------------------------- #명사, 동사, 형용사만 추출 parsed_vec = r_parser_r(contentVector = textData$content ,language = "ko" ,useEn = T ,korDicPath = "./dictionary/user_dictionary.txt") # 명사만 추출 parsed_vec_noun = r_extract_noun(contentVector = textData$content ,language = "ko" ,useEn = T ,korDicPath = "./dictionary/user_dictionary.txt") # 동의어 처리 parsed_vec = synonym_processing(parsedVector = parsed_vec ,synonymDic = synonym_dic) # Corpus에 doc_id를 추가하기 위한 데이터 프레임 만들기 parsed_df = data.frame(doc_id = textData$doc_id ,text = parsed_vec) saveRDS(parsed_df, file = "./data/parsed_petition_df.RDS") # 나중 재사용을 위해 저장 #Corpus 생성 corp = VCorpus(DataframeSource(parsed_df)) #특수문자 제거 corp = tm_map(corp, removePunctuation) #숫자 삭제 corp = tm_map(corp, removeNumbers) #특정 단어 삭제 corp = tm_map(corp, removeWords, stopword_dic$stopword) saveRDS(corp, file = "./data/corpus_petition.RDS") # 나중 재사용을 위해 저장 # 3. DTM 생성 및 Sparse Term 삭제 -------------------------------------------------------------------------------------------------------------------------------------------------- #Document Term Matrix 생성 (단어 Length는 2로 세팅) dtm = DocumentTermMatrix(corp, control=list(wordLengths=c(2,Inf))) #Term Document Matirx 생성 (DTM에서 행과 열만 바뀐 matrix) tdm = TermDocumentMatrix(corp, control=list(wordLengths=c(2,Inf))) #Sparse Terms 삭제 (값이 작아질 수록 term수가 줄어든다.) dtm = removeSparseTerms(dtm, sparse = 0.98) dtm_mat = as.matrix(dtm) write_csv(as.data.frame(dtm_mat), "./dtm_mat.csv") #단어 발생 빈도 구하기 colSums(as.matrix(dtm)) word_freq = colSums(as.matrix(dtm)) #DTM을 데이터 프레임 형식으로 저장하기 dtm_df = as.data.frame(as.matrix(dtm)) # 4. 단어 빈도 정보 추출하기 -------------------------------------------------------------------------------------------------------------------------------------------------- #단어 개수 구하기 length(word_freq) #단어 빈도 정보 Data Frame 만들기 word_df = data.frame(word = names(word_freq) ,freq = word_freq) #내림차순으로 단어 10개, sorting 하기 word_df %>% arrange(-freq) %>% head(10) # 5. 단어 빈도 정보로 시각화 하기 ---------------------------------------------------------------------------------------------------------------------------------------------- #단어 빈도 시각화 library(ggplot2) #상위 단어 10개만 바차트로 보여주기 top10 = word_df %>% arrange(-freq) %>% head(100) #상위 10개 단어만 추출 ggplot(top10, aes(x=word, y=freq)) + geom_bar(stat = "identity") + theme(axis.text.x = element_text(family = "AppleGothic")) #상위 10개 단어 빈도순으로 정렬하여 바차트로 보여주기 ggplot(top10, aes(x=reorder(word,-freq), y=freq)) + geom_bar(stat = "identity") + theme(axis.text.x = element_text(family = "AppleGothic")) # Word Cloud 그리기 install.packages("wordcloud2") library(wordcloud2) top100 = word_df %>% top_n(100) # 상위 100개 단어만 추출 wordcloud2(data = top100 , color = "random-light" #,color = "random-dark" ,shape = "diamond" ,size = 0.5 ,fontFamily = "나눔고딕") #circle, cardioid, diamond, triangle-forward, triangle, pentagon, star # 6. TF-IDF 값으로 DTM 만들기 ---------------------------------------------------------------------------------------------------------------------------------------------- dtm_tfidf = DocumentTermMatrix(corp ,control=list(wordLengths=c(2,Inf) ,weighting = function(x) weightTfIdf(x, normalize = TRUE))) #Tf-Idf 가중치 주기 dtm_tfidf = removeSparseTerms(dtm_tfidf, sparse = 0.98)
/HDC_textmining_project/script/day1/1.2_text_handling.R
no_license
NamyounKim/RWork
R
false
false
4,936
r
install.packages("tm") #텍스트 마이닝을 위한 패키지 install.packages("slam") #install.packages("dplyr") #install.packages("readr") #파일을 읽어오기 위한 패키지 library(tm) library(slam) library(dplyr) library(NLP4kec) library(readr) # 1. 원문 데이터 및 사전 불러오기 -------------------------------------------------------------------------------------------------------------------------------------------- textData = readRDS("./data/petitions_content_2018.RDS") #동의어 / 불용어 사전 불러오기 stopword_dic = read_csv("./dictionary/stopword_ko.csv") synonym_dic = read_csv("./dictionary/synonym.csv") # 2. 형태소 분석 및 전처리---------------------------------------------------------------------------------------------------------------------------------------------------- #명사, 동사, 형용사만 추출 parsed_vec = r_parser_r(contentVector = textData$content ,language = "ko" ,useEn = T ,korDicPath = "./dictionary/user_dictionary.txt") # 명사만 추출 parsed_vec_noun = r_extract_noun(contentVector = textData$content ,language = "ko" ,useEn = T ,korDicPath = "./dictionary/user_dictionary.txt") # 동의어 처리 parsed_vec = synonym_processing(parsedVector = parsed_vec ,synonymDic = synonym_dic) # Corpus에 doc_id를 추가하기 위한 데이터 프레임 만들기 parsed_df = data.frame(doc_id = textData$doc_id ,text = parsed_vec) saveRDS(parsed_df, file = "./data/parsed_petition_df.RDS") # 나중 재사용을 위해 저장 #Corpus 생성 corp = VCorpus(DataframeSource(parsed_df)) #특수문자 제거 corp = tm_map(corp, removePunctuation) #숫자 삭제 corp = tm_map(corp, removeNumbers) #특정 단어 삭제 corp = tm_map(corp, removeWords, stopword_dic$stopword) saveRDS(corp, file = "./data/corpus_petition.RDS") # 나중 재사용을 위해 저장 # 3. DTM 생성 및 Sparse Term 삭제 -------------------------------------------------------------------------------------------------------------------------------------------------- #Document Term Matrix 생성 (단어 Length는 2로 세팅) dtm = DocumentTermMatrix(corp, control=list(wordLengths=c(2,Inf))) #Term Document Matirx 생성 (DTM에서 행과 열만 바뀐 matrix) tdm = TermDocumentMatrix(corp, control=list(wordLengths=c(2,Inf))) #Sparse Terms 삭제 (값이 작아질 수록 term수가 줄어든다.) dtm = removeSparseTerms(dtm, sparse = 0.98) dtm_mat = as.matrix(dtm) write_csv(as.data.frame(dtm_mat), "./dtm_mat.csv") #단어 발생 빈도 구하기 colSums(as.matrix(dtm)) word_freq = colSums(as.matrix(dtm)) #DTM을 데이터 프레임 형식으로 저장하기 dtm_df = as.data.frame(as.matrix(dtm)) # 4. 단어 빈도 정보 추출하기 -------------------------------------------------------------------------------------------------------------------------------------------------- #단어 개수 구하기 length(word_freq) #단어 빈도 정보 Data Frame 만들기 word_df = data.frame(word = names(word_freq) ,freq = word_freq) #내림차순으로 단어 10개, sorting 하기 word_df %>% arrange(-freq) %>% head(10) # 5. 단어 빈도 정보로 시각화 하기 ---------------------------------------------------------------------------------------------------------------------------------------------- #단어 빈도 시각화 library(ggplot2) #상위 단어 10개만 바차트로 보여주기 top10 = word_df %>% arrange(-freq) %>% head(100) #상위 10개 단어만 추출 ggplot(top10, aes(x=word, y=freq)) + geom_bar(stat = "identity") + theme(axis.text.x = element_text(family = "AppleGothic")) #상위 10개 단어 빈도순으로 정렬하여 바차트로 보여주기 ggplot(top10, aes(x=reorder(word,-freq), y=freq)) + geom_bar(stat = "identity") + theme(axis.text.x = element_text(family = "AppleGothic")) # Word Cloud 그리기 install.packages("wordcloud2") library(wordcloud2) top100 = word_df %>% top_n(100) # 상위 100개 단어만 추출 wordcloud2(data = top100 , color = "random-light" #,color = "random-dark" ,shape = "diamond" ,size = 0.5 ,fontFamily = "나눔고딕") #circle, cardioid, diamond, triangle-forward, triangle, pentagon, star # 6. TF-IDF 값으로 DTM 만들기 ---------------------------------------------------------------------------------------------------------------------------------------------- dtm_tfidf = DocumentTermMatrix(corp ,control=list(wordLengths=c(2,Inf) ,weighting = function(x) weightTfIdf(x, normalize = TRUE))) #Tf-Idf 가중치 주기 dtm_tfidf = removeSparseTerms(dtm_tfidf, sparse = 0.98)
getItemizeDocs = function(functionName, section, keyword = "arguments", first = FALSE, maxSize = 12) { wd = getwd() fileName = paste0(wd, "/man/", functionName, ".Rd") arguments = readItems(fileName, keyword) htmlFile = paste0("html/section-", section, "/", functionName, "-", keyword, ".html") argumentsToHtml(arguments, htmlFile, functionName, first, maxSize) } argumentsToHtml = function(arguments, htmlFile, functionName, first, maxSize = 12) { size = length(arguments) con = file(htmlFile, "w") if(first) { head = '<head><link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous"> <script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo" crossorigin="anonymous"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js" integrity="sha384-UO2eT0CpHqdSJQ6hJty5KVphtPhzWj9WO1clHTMGa3JDZwrnQq4sF86dIHNDz0W1" crossorigin="anonymous"></script> <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js" integrity="sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM" crossorigin="anonymous"></script> <script src="js/list-group.js"></script> </head>' } else { head = '<head><link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous"> <script src="js/list-group.js"></script> </head>' } writeLines(head, con) count = size / maxSize i = 1 while(i <= count+1) { if(i*maxSize >= size && i %% 2 == 1) { index = (i-1)*maxSize + 1 row = '<br><div class="row">' writeLines(row, con) writeListGroup(arguments[c(index:size)], functionName, con, "left") writeTextGroup(arguments[c(index:size)], functionName, con, colSize = 8) } else if(i %% 2 == 0) { index = (i-1)*maxSize + 1 index2 = min(index + maxSize-1, size) writeListGroup(arguments[c(index:index2)], functionName, con, "right") writeTextGroup(arguments[c((index-maxSize):index2)], functionName, con, colSize = 4) } else { index = (i-1)*maxSize + 1 index2 = index + maxSize-1 row = '<br><div class="row">' writeLines(row, con) writeListGroup(arguments[c(index:index2)], functionName, con, "left") } i = i + 1 } end='<br>' writeLines(end, con) close(con) } writeListGroup = function(arguments, functionName, con, side) { listGroup = paste0('<div class="col-4"> <div class="list-group ', side, '" id="list-tab" role="tablist">') writeLines(listGroup, con) toggleItemActive = '<a class="list-group-item list-group-item-action active" id="list-' toggleItem1 = '<a class="list-group-item list-group-item-action" id="list-' toggleItem2 = '-list" data-toggle="list" href="#list-' toggleItem3 = '" role="tab" aria-controls="' toggleItem4 = '">' first = TRUE for(argument in arguments) { id = paste0(argument$name,"-", functionName) if(first){ toggleItem = paste0(toggleItemActive, id, toggleItem2, id, toggleItem3, id, toggleItem4, argument$name, '</a>') } else { toggleItem = paste0(toggleItem1, id, toggleItem2, id, toggleItem3, id, toggleItem4, argument$name, '</a>') } writeLines(toggleItem, con) first = FALSE } end = '</div> </div>' writeLines(end, con) } writeTextGroup = function(arguments, functionName, con, colSize = 8) { textGroup = paste0('<div class="col-', colSize, '"> <div class="tab-content" id="nav-tabContent">') writeLines(textGroup, con) textItemActive = '<div class="tab-pane fade show active" id="list-' textItem1 = '<div class="tab-pane fade" id="list-' textItem2 = '" role="tabpanel" aria-labelledby="list-' textItem3 = '-list">' first = TRUE for(argument in arguments) { id = paste0(argument$name,"-", functionName) if(first){ textItem = paste0(textItemActive, id, textItem2, id, textItem3, argument$text, '</div>') } else { textItem = paste0(textItem1, id, textItem2, id, textItem3, argument$text, '</div>') } writeLines(textItem, con) first = FALSE } end = '</div> </div> </div>' writeLines(end, con) } readItems = function(fileName, keyword) { con = file(fileName, "r") arguments = list() startArg = FALSE while ( TRUE ) { line = readLines(con, n = 1) if ( length(line) == 0 ) { break } if(line == "}" && startArg) { startArg = FALSE break } if(startArg) { argument = data.frame(matrix(ncol = 2, nrow = 1)) names(argument) = c("name", "text") if(strContains("item", line)) { argument$name = getItemName(line) argument$text = getItemText(line) arguments = listAdd(arguments, argument) } } if(strContains(keyword, line)) { startArg = TRUE } } close(con) return(arguments) } strContains = function(str1, str2) { check = grep(str1, str2) return(length(check)>0) } getItemName = function(line) { lineSplit = strsplit(line, "")[[1]] startItem = FALSE itemName = "" for (char in lineSplit) { if(startItem && char=="}") { startItem = FALSE break } if(startItem) { itemName = paste0(itemName, char) } if(char=="{") { startItem = TRUE } } return(itemName) } getItemText = function(line) { lineSplit = strsplit(line, "")[[1]] startNext = FALSE startText = FALSE itemText = "" for (char in lineSplit) { if(startText && char=="}") { startText = FALSE break } if(startText) { itemText = paste0(itemText, char) } if(char=="}") { startNext = TRUE } if(char=="{" && startNext) { startText = TRUE } } return(itemText) } listAdd = function(myList, newItem) { size = length(myList) myList[[size+1]] = newItem return(myList) }
/R/readArguments.R
no_license
resplab/epicManual
R
false
false
6,137
r
getItemizeDocs = function(functionName, section, keyword = "arguments", first = FALSE, maxSize = 12) { wd = getwd() fileName = paste0(wd, "/man/", functionName, ".Rd") arguments = readItems(fileName, keyword) htmlFile = paste0("html/section-", section, "/", functionName, "-", keyword, ".html") argumentsToHtml(arguments, htmlFile, functionName, first, maxSize) } argumentsToHtml = function(arguments, htmlFile, functionName, first, maxSize = 12) { size = length(arguments) con = file(htmlFile, "w") if(first) { head = '<head><link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous"> <script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo" crossorigin="anonymous"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js" integrity="sha384-UO2eT0CpHqdSJQ6hJty5KVphtPhzWj9WO1clHTMGa3JDZwrnQq4sF86dIHNDz0W1" crossorigin="anonymous"></script> <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js" integrity="sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM" crossorigin="anonymous"></script> <script src="js/list-group.js"></script> </head>' } else { head = '<head><link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous"> <script src="js/list-group.js"></script> </head>' } writeLines(head, con) count = size / maxSize i = 1 while(i <= count+1) { if(i*maxSize >= size && i %% 2 == 1) { index = (i-1)*maxSize + 1 row = '<br><div class="row">' writeLines(row, con) writeListGroup(arguments[c(index:size)], functionName, con, "left") writeTextGroup(arguments[c(index:size)], functionName, con, colSize = 8) } else if(i %% 2 == 0) { index = (i-1)*maxSize + 1 index2 = min(index + maxSize-1, size) writeListGroup(arguments[c(index:index2)], functionName, con, "right") writeTextGroup(arguments[c((index-maxSize):index2)], functionName, con, colSize = 4) } else { index = (i-1)*maxSize + 1 index2 = index + maxSize-1 row = '<br><div class="row">' writeLines(row, con) writeListGroup(arguments[c(index:index2)], functionName, con, "left") } i = i + 1 } end='<br>' writeLines(end, con) close(con) } writeListGroup = function(arguments, functionName, con, side) { listGroup = paste0('<div class="col-4"> <div class="list-group ', side, '" id="list-tab" role="tablist">') writeLines(listGroup, con) toggleItemActive = '<a class="list-group-item list-group-item-action active" id="list-' toggleItem1 = '<a class="list-group-item list-group-item-action" id="list-' toggleItem2 = '-list" data-toggle="list" href="#list-' toggleItem3 = '" role="tab" aria-controls="' toggleItem4 = '">' first = TRUE for(argument in arguments) { id = paste0(argument$name,"-", functionName) if(first){ toggleItem = paste0(toggleItemActive, id, toggleItem2, id, toggleItem3, id, toggleItem4, argument$name, '</a>') } else { toggleItem = paste0(toggleItem1, id, toggleItem2, id, toggleItem3, id, toggleItem4, argument$name, '</a>') } writeLines(toggleItem, con) first = FALSE } end = '</div> </div>' writeLines(end, con) } writeTextGroup = function(arguments, functionName, con, colSize = 8) { textGroup = paste0('<div class="col-', colSize, '"> <div class="tab-content" id="nav-tabContent">') writeLines(textGroup, con) textItemActive = '<div class="tab-pane fade show active" id="list-' textItem1 = '<div class="tab-pane fade" id="list-' textItem2 = '" role="tabpanel" aria-labelledby="list-' textItem3 = '-list">' first = TRUE for(argument in arguments) { id = paste0(argument$name,"-", functionName) if(first){ textItem = paste0(textItemActive, id, textItem2, id, textItem3, argument$text, '</div>') } else { textItem = paste0(textItem1, id, textItem2, id, textItem3, argument$text, '</div>') } writeLines(textItem, con) first = FALSE } end = '</div> </div> </div>' writeLines(end, con) } readItems = function(fileName, keyword) { con = file(fileName, "r") arguments = list() startArg = FALSE while ( TRUE ) { line = readLines(con, n = 1) if ( length(line) == 0 ) { break } if(line == "}" && startArg) { startArg = FALSE break } if(startArg) { argument = data.frame(matrix(ncol = 2, nrow = 1)) names(argument) = c("name", "text") if(strContains("item", line)) { argument$name = getItemName(line) argument$text = getItemText(line) arguments = listAdd(arguments, argument) } } if(strContains(keyword, line)) { startArg = TRUE } } close(con) return(arguments) } strContains = function(str1, str2) { check = grep(str1, str2) return(length(check)>0) } getItemName = function(line) { lineSplit = strsplit(line, "")[[1]] startItem = FALSE itemName = "" for (char in lineSplit) { if(startItem && char=="}") { startItem = FALSE break } if(startItem) { itemName = paste0(itemName, char) } if(char=="{") { startItem = TRUE } } return(itemName) } getItemText = function(line) { lineSplit = strsplit(line, "")[[1]] startNext = FALSE startText = FALSE itemText = "" for (char in lineSplit) { if(startText && char=="}") { startText = FALSE break } if(startText) { itemText = paste0(itemText, char) } if(char=="}") { startNext = TRUE } if(char=="{" && startNext) { startText = TRUE } } return(itemText) } listAdd = function(myList, newItem) { size = length(myList) myList[[size+1]] = newItem return(myList) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getBirds.R \name{getBirds} \alias{getBirds} \title{getBirds} \usage{ getBirds(object, points = NA, AOU = NA, years = NA, min.count = NA, max.count = NA, band = NA, interval = NA, visits = NA, times = NA, reps = NA, output = "dataframe") } \arguments{ \item{object}{An NCRNbirds object or a list of such objects.} \item{points}{A character vecotr. The names of one or more points where the data was collected.} \item{AOU}{A character vector. One or more AOU (American Onothological Union) codes of bird species.} \item{years}{A vector of number. will return only data from the indicated years.} \item{min.count}{A numeric vector of length one. Will only return data with a bird count equal to or geater than \code{min.count}} \item{max.count}{A numeric vector of length one. Will only return data with a bird count equal to or less than \code{max.count}} \item{interval}{A numeirc vector. Only observations whose \code{Interval} field matches a value in \code{interval} will be returned.} \item{visits}{A length 1 numeric vector, defaults to NA. Returns data only from the incidated visits.} \item{reps}{A numeric vector of length 1. Defaults to NA. Returns only data from points where the number of years that a point has been visited is greater or equal to the value of \code{times}. This is determined based on the data found in the \code{Visits} slot. @param times A numeric vector of length 1. Returns only data from points where the number of years that a point has been visited is greater or equal to the value of \code{times}. This is determined based on the data found in the \code{Visits} slot.} \item{output}{Either "dataframe" (the default) or "list". Note that this must be in quotes. Determines the type of output from the function.} \item{band.}{A numeirc vector. Only observations whose \code{Distance_id} field matches a value in \code{band} will be returned. Options are \code{1} for birds closer than 50m to the observer, \code{2} for birds between 50 and 100 meters from the observer, \code{c(1,2)} for birds between 0 and 100 meters of the observer, or \code{NA} for all birds regardless of distance.} } \description{ Returns bird monitoring data from the \code{Birds} slot of an \code{NCRNbirds} object. } \details{ Returns the data from the \code{Birds} slot of a single \code{NCNRbirds} object or a \code{list} of such objects. The data can be filtered using the various arguements. he default output is a\code{data.frame}. However, if \code{object} is a \code{list} and \code{output} is "list" then a \code{list} of \code{data.frame}s will be returned. }
/man/getBirds.Rd
no_license
joshuaeveleth/NCRNbirds
R
false
true
2,672
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getBirds.R \name{getBirds} \alias{getBirds} \title{getBirds} \usage{ getBirds(object, points = NA, AOU = NA, years = NA, min.count = NA, max.count = NA, band = NA, interval = NA, visits = NA, times = NA, reps = NA, output = "dataframe") } \arguments{ \item{object}{An NCRNbirds object or a list of such objects.} \item{points}{A character vecotr. The names of one or more points where the data was collected.} \item{AOU}{A character vector. One or more AOU (American Onothological Union) codes of bird species.} \item{years}{A vector of number. will return only data from the indicated years.} \item{min.count}{A numeric vector of length one. Will only return data with a bird count equal to or geater than \code{min.count}} \item{max.count}{A numeric vector of length one. Will only return data with a bird count equal to or less than \code{max.count}} \item{interval}{A numeirc vector. Only observations whose \code{Interval} field matches a value in \code{interval} will be returned.} \item{visits}{A length 1 numeric vector, defaults to NA. Returns data only from the incidated visits.} \item{reps}{A numeric vector of length 1. Defaults to NA. Returns only data from points where the number of years that a point has been visited is greater or equal to the value of \code{times}. This is determined based on the data found in the \code{Visits} slot. @param times A numeric vector of length 1. Returns only data from points where the number of years that a point has been visited is greater or equal to the value of \code{times}. This is determined based on the data found in the \code{Visits} slot.} \item{output}{Either "dataframe" (the default) or "list". Note that this must be in quotes. Determines the type of output from the function.} \item{band.}{A numeirc vector. Only observations whose \code{Distance_id} field matches a value in \code{band} will be returned. Options are \code{1} for birds closer than 50m to the observer, \code{2} for birds between 50 and 100 meters from the observer, \code{c(1,2)} for birds between 0 and 100 meters of the observer, or \code{NA} for all birds regardless of distance.} } \description{ Returns bird monitoring data from the \code{Birds} slot of an \code{NCRNbirds} object. } \details{ Returns the data from the \code{Birds} slot of a single \code{NCNRbirds} object or a \code{list} of such objects. The data can be filtered using the various arguements. he default output is a\code{data.frame}. However, if \code{object} is a \code{list} and \code{output} is "list" then a \code{list} of \code{data.frame}s will be returned. }
mydata<-read.table("household_power_consumption.txt", sep=";", header=TRUE) mydata<- mydata[mydata[, "Date"] %in% c("1/2/2007", "2/2/2007"), ] dateTime<-strptime(paste(mydata$Date, mydata$Time), "%d/%m/%Y %H:%M") a1<-as.numeric(as.character(mydata$Sub_metering_1)) a2<-as.numeric(as.character(mydata$Sub_metering_2)) a3<-as.numeric(as.character(mydata$Sub_metering_3)) png(filename="Plot3.png", width=480, height=480) plot(dateTime, a1, type="l", ylab="Energy sub metering", xlab="", ylim=c(0,max(a1,a2,a3))) lines(dateTime, a2, col="red") lines(dateTime, a3, col="blue") legend('topright', c(colnames(mydata[7]),colnames(mydata[8]), colnames(mydata[9]) ), lty=1, col=c('black', 'red', 'blue')) dev.off()
/plot3.R
no_license
anutikin/ExData_Plotting1
R
false
false
706
r
mydata<-read.table("household_power_consumption.txt", sep=";", header=TRUE) mydata<- mydata[mydata[, "Date"] %in% c("1/2/2007", "2/2/2007"), ] dateTime<-strptime(paste(mydata$Date, mydata$Time), "%d/%m/%Y %H:%M") a1<-as.numeric(as.character(mydata$Sub_metering_1)) a2<-as.numeric(as.character(mydata$Sub_metering_2)) a3<-as.numeric(as.character(mydata$Sub_metering_3)) png(filename="Plot3.png", width=480, height=480) plot(dateTime, a1, type="l", ylab="Energy sub metering", xlab="", ylim=c(0,max(a1,a2,a3))) lines(dateTime, a2, col="red") lines(dateTime, a3, col="blue") legend('topright', c(colnames(mydata[7]),colnames(mydata[8]), colnames(mydata[9]) ), lty=1, col=c('black', 'red', 'blue')) dev.off()
.onLoad <- function(libname, pkgname) { # Runs when loaded but not attached to search() path; e.g., when a package just Imports (not Depends on) data.table "Please read FAQ 2.23 (vignette('datatable-faq')) which explains in detail why data.table adds one for loop to the start of base::cbind.data.frame and base::rbind.data.frame. If there is a better solution we will gladly change it." # Commented as a character string so this message is retained and seen by anyone who types data.table:::.onLoad tt = base::cbind.data.frame ss = body(tt) if (class(ss)!="{") ss = as.call(c(as.name("{"), ss)) prefix = if (!missing(pkgname)) "data.table::" else "" # R provides the arguments when it calls .onLoad, I don't in dev/test if (!length(grep("data.table",ss[[2]]))) { ss = ss[c(1,NA,2:length(ss))] ss[[2]] = parse(text=paste("if (!identical(class(..1),'data.frame')) for (x in list(...)) { if (inherits(x,'data.table')) return(",prefix,"data.table(...)) }",sep=""))[[1]] body(tt)=ss (unlockBinding)("cbind.data.frame",baseenv()) assign("cbind.data.frame",tt,envir=asNamespace("base"),inherits=FALSE) lockBinding("cbind.data.frame",baseenv()) } tt = base::rbind.data.frame ss = body(tt) if (class(ss)!="{") ss = as.call(c(as.name("{"), ss)) if (!length(grep("data.table",ss[[2]]))) { ss = ss[c(1,NA,2:length(ss))] ss[[2]] = parse(text=paste("for (x in list(...)) { if (inherits(x,'data.table')) return(",prefix,".rbind.data.table(...)) }",sep=""))[[1]] # fix for #4995 body(tt)=ss (unlockBinding)("rbind.data.frame",baseenv()) assign("rbind.data.frame",tt,envir=asNamespace("base"),inherits=FALSE) lockBinding("rbind.data.frame",baseenv()) } # Set options for the speed boost in v1.8.0 by avoiding 'default' arg of getOption(,default=) opts = c("datatable.verbose"="FALSE", # datatable.<argument name> "datatable.nomatch"="NA_integer_", # datatable.<argument name> "datatable.optimize"="Inf", # datatable.<argument name> "datatable.print.nrows"="100L", # datatable.<argument name> "datatable.print.topn"="5L", # datatable.<argument name> "datatable.allow.cartesian"="FALSE", # datatable.<argument name> "datatable.dfdispatchwarn"="TRUE", # not a function argument "datatable.warnredundantby"="TRUE", # not a function argument "datatable.alloccol"="quote(max(100L,ncol(DT)+64L))",# argument 'n' of alloc.col. Allocate at least 64 spare slots by default. Needs to be 100L floor to save small object reallocs. "datatable.integer64"="'integer64'", # datatable.<argument name> integer64|double|character "datatable.showProgress"="1L", # in fread "datatable.auto.index"="TRUE", # DT[col=="val"] to auto add index so 2nd time faster "datatable.fread.datatable"="TRUE", "datatable.old.bywithoutby"="FALSE", # temp rollback method for code migration, will be removed in future "datatable.fread.dec.experiment"="TRUE", # temp. will remove once stable "datatable.fread.dec.locale"=if (.Platform$OS.type=="unix") "'fr_FR.utf8'" else "'French_France.1252'" ) for (i in setdiff(names(opts),names(options()))) { eval(parse(text=paste("options(",i,"=",opts[i],")",sep=""))) } # reshape2 # Tried this : # if (!"package:reshape2" %in% search()) { # # temporary until reshape2 pull request to make generic is on CRAN ... # try(library(reshape2, pos="package:base", quietly=TRUE, warn.conflicts=FALSE), silent=TRUE) # } # which works. But then when melt in data.table is loaded, _that's_ what generates the mask message. # There's also a NOTE: Package startup functions should not change the search path. # Therefore, removed. Users will need to make sure reshape2 isn't loaded, or loaded behind data.table on search() # Test R behaviour ... x = 1:3 y = list(x) .R.listCopiesNamed <<- (address(x) != address(y[[1]])) # FALSE from R 3.1 DF = data.frame(a=1:3, b=4:6) add1 = address(DF$a) add2 = address(DF$b) names(DF) = c("A","B") add3 = address(DF$A) add4 = address(DF$B) .R.assignNamesCopiesAll <<- add1 != add3 # FALSE from R 3.1 if ((add1 == add3) != (add2 == add4)) stop("If one column is copied surely the other should be as well, when checking .R.assignNamesCopiesAll") DF = data.frame(a=1:3, b=4:6) add1 = address(DF$a) add2 = address(DF) DF[2,"b"] = 7 # changed b but not a add3 = address(DF$a) add4 = address(DF) .R.subassignCopiesOthers <<- add1 != add3 # FALSE from R 3.1 .R.subassignCopiesVecsxp <<- add2 != add4 # currently TRUE in R 3.1, but could feasibly change invisible() } # Switch on these variables instead of getRversion(). Set to TRUE just to create them as a single logical. They are set by .onLoad() above. .R.listCopiesNamed = TRUE .R.assignNamesCopiesAll = TRUE .R.subassignCopiesOthers = TRUE .R.subassignCopiesVecsxp = TRUE getRversion <- function(...) stop("Reminder to data.table developers: don't use getRversion() internally. Add a behaviour test to .onLoad instead.") # 1) using getRversion() wasted time when R3.0.3beta was released without the changes we expected in getRversion()>"3.0.2". # 2) R-devel and ourselves may wish to tinker with R-devel, turning on and off features in the same version number. So it's better if data.table doesn't hard code expectations into the version number. # 3) The discipline of adding a feaure test here helps fully understand the change. # 4) Defining getRversion with a stop() here helps prevent new switches on getRversion() being added in future. Easily circumvented but the point is to issue the message above. .onUnload <- function(libpath) { # fix for #474. the shared object name is different from package name # So 'detach' doesn't find datatable.so, as it looks by default for data.table.so library.dynam.unload("datatable", libpath) }
/R/onLoad.R
no_license
halcyonhui/data.table
R
false
false
6,310
r
.onLoad <- function(libname, pkgname) { # Runs when loaded but not attached to search() path; e.g., when a package just Imports (not Depends on) data.table "Please read FAQ 2.23 (vignette('datatable-faq')) which explains in detail why data.table adds one for loop to the start of base::cbind.data.frame and base::rbind.data.frame. If there is a better solution we will gladly change it." # Commented as a character string so this message is retained and seen by anyone who types data.table:::.onLoad tt = base::cbind.data.frame ss = body(tt) if (class(ss)!="{") ss = as.call(c(as.name("{"), ss)) prefix = if (!missing(pkgname)) "data.table::" else "" # R provides the arguments when it calls .onLoad, I don't in dev/test if (!length(grep("data.table",ss[[2]]))) { ss = ss[c(1,NA,2:length(ss))] ss[[2]] = parse(text=paste("if (!identical(class(..1),'data.frame')) for (x in list(...)) { if (inherits(x,'data.table')) return(",prefix,"data.table(...)) }",sep=""))[[1]] body(tt)=ss (unlockBinding)("cbind.data.frame",baseenv()) assign("cbind.data.frame",tt,envir=asNamespace("base"),inherits=FALSE) lockBinding("cbind.data.frame",baseenv()) } tt = base::rbind.data.frame ss = body(tt) if (class(ss)!="{") ss = as.call(c(as.name("{"), ss)) if (!length(grep("data.table",ss[[2]]))) { ss = ss[c(1,NA,2:length(ss))] ss[[2]] = parse(text=paste("for (x in list(...)) { if (inherits(x,'data.table')) return(",prefix,".rbind.data.table(...)) }",sep=""))[[1]] # fix for #4995 body(tt)=ss (unlockBinding)("rbind.data.frame",baseenv()) assign("rbind.data.frame",tt,envir=asNamespace("base"),inherits=FALSE) lockBinding("rbind.data.frame",baseenv()) } # Set options for the speed boost in v1.8.0 by avoiding 'default' arg of getOption(,default=) opts = c("datatable.verbose"="FALSE", # datatable.<argument name> "datatable.nomatch"="NA_integer_", # datatable.<argument name> "datatable.optimize"="Inf", # datatable.<argument name> "datatable.print.nrows"="100L", # datatable.<argument name> "datatable.print.topn"="5L", # datatable.<argument name> "datatable.allow.cartesian"="FALSE", # datatable.<argument name> "datatable.dfdispatchwarn"="TRUE", # not a function argument "datatable.warnredundantby"="TRUE", # not a function argument "datatable.alloccol"="quote(max(100L,ncol(DT)+64L))",# argument 'n' of alloc.col. Allocate at least 64 spare slots by default. Needs to be 100L floor to save small object reallocs. "datatable.integer64"="'integer64'", # datatable.<argument name> integer64|double|character "datatable.showProgress"="1L", # in fread "datatable.auto.index"="TRUE", # DT[col=="val"] to auto add index so 2nd time faster "datatable.fread.datatable"="TRUE", "datatable.old.bywithoutby"="FALSE", # temp rollback method for code migration, will be removed in future "datatable.fread.dec.experiment"="TRUE", # temp. will remove once stable "datatable.fread.dec.locale"=if (.Platform$OS.type=="unix") "'fr_FR.utf8'" else "'French_France.1252'" ) for (i in setdiff(names(opts),names(options()))) { eval(parse(text=paste("options(",i,"=",opts[i],")",sep=""))) } # reshape2 # Tried this : # if (!"package:reshape2" %in% search()) { # # temporary until reshape2 pull request to make generic is on CRAN ... # try(library(reshape2, pos="package:base", quietly=TRUE, warn.conflicts=FALSE), silent=TRUE) # } # which works. But then when melt in data.table is loaded, _that's_ what generates the mask message. # There's also a NOTE: Package startup functions should not change the search path. # Therefore, removed. Users will need to make sure reshape2 isn't loaded, or loaded behind data.table on search() # Test R behaviour ... x = 1:3 y = list(x) .R.listCopiesNamed <<- (address(x) != address(y[[1]])) # FALSE from R 3.1 DF = data.frame(a=1:3, b=4:6) add1 = address(DF$a) add2 = address(DF$b) names(DF) = c("A","B") add3 = address(DF$A) add4 = address(DF$B) .R.assignNamesCopiesAll <<- add1 != add3 # FALSE from R 3.1 if ((add1 == add3) != (add2 == add4)) stop("If one column is copied surely the other should be as well, when checking .R.assignNamesCopiesAll") DF = data.frame(a=1:3, b=4:6) add1 = address(DF$a) add2 = address(DF) DF[2,"b"] = 7 # changed b but not a add3 = address(DF$a) add4 = address(DF) .R.subassignCopiesOthers <<- add1 != add3 # FALSE from R 3.1 .R.subassignCopiesVecsxp <<- add2 != add4 # currently TRUE in R 3.1, but could feasibly change invisible() } # Switch on these variables instead of getRversion(). Set to TRUE just to create them as a single logical. They are set by .onLoad() above. .R.listCopiesNamed = TRUE .R.assignNamesCopiesAll = TRUE .R.subassignCopiesOthers = TRUE .R.subassignCopiesVecsxp = TRUE getRversion <- function(...) stop("Reminder to data.table developers: don't use getRversion() internally. Add a behaviour test to .onLoad instead.") # 1) using getRversion() wasted time when R3.0.3beta was released without the changes we expected in getRversion()>"3.0.2". # 2) R-devel and ourselves may wish to tinker with R-devel, turning on and off features in the same version number. So it's better if data.table doesn't hard code expectations into the version number. # 3) The discipline of adding a feaure test here helps fully understand the change. # 4) Defining getRversion with a stop() here helps prevent new switches on getRversion() being added in future. Easily circumvented but the point is to issue the message above. .onUnload <- function(libpath) { # fix for #474. the shared object name is different from package name # So 'detach' doesn't find datatable.so, as it looks by default for data.table.so library.dynam.unload("datatable", libpath) }
calcBetaStatus <- function(comm, status, dis.method, nulls, sub= "pol", occ= FALSE, years, zscore=FALSE){ ## calculate zscores? ## computes dispersion of community matrices, returns output of ## vegan function betadisper ## create community dissimilarity matrix comm.dis <- as.matrix(vegdist(comm, method= dis.method, diag= TRUE, binary= occ)) ## null dissimilarity matrices null.dis <- lapply(nulls, function(x) { as.matrix(vegdist(x, method= dis.method, diag=TRUE)) }) null.dis[[length(nulls) + 1]] <- comm.dis arr <- array(unlist(null.dis), c(dim(comm.dis)[1], dim(comm.dis)[2], length(nulls) + 1)) ## standardize dissimilarities if(!zscore){ less.than <- apply(arr, 1:2, function(x){ sum(x[length(null.dis)] > x) }) equal.2 <- apply(arr, 1:2, function(x){ sum(x[length(null.dis)] == x) }) cor.dis <- as.dist((less.than + 0.5*equal.2)/ length(null.dis), diag= TRUE) }else{ cor.dis <- (comm.dis - apply(arr , 1:2 , mean))/ (apply(arr , 1:2 , sd) + 10^-10) cor.dis <- as.dist(((cor.dis - min(cor.dis))/diff(range(cor.dis))), diag= TRUE) } ## run model mod.beta <- try(betadisper(cor.dis, status, type="centroid"), silent=TRUE) if(inherits(mod.beta, "try-error")) browser() return(mod.beta) }
/analysis/variability/src/beta.R
no_license
lponisio/hedgerow_assembly
R
false
false
1,762
r
calcBetaStatus <- function(comm, status, dis.method, nulls, sub= "pol", occ= FALSE, years, zscore=FALSE){ ## calculate zscores? ## computes dispersion of community matrices, returns output of ## vegan function betadisper ## create community dissimilarity matrix comm.dis <- as.matrix(vegdist(comm, method= dis.method, diag= TRUE, binary= occ)) ## null dissimilarity matrices null.dis <- lapply(nulls, function(x) { as.matrix(vegdist(x, method= dis.method, diag=TRUE)) }) null.dis[[length(nulls) + 1]] <- comm.dis arr <- array(unlist(null.dis), c(dim(comm.dis)[1], dim(comm.dis)[2], length(nulls) + 1)) ## standardize dissimilarities if(!zscore){ less.than <- apply(arr, 1:2, function(x){ sum(x[length(null.dis)] > x) }) equal.2 <- apply(arr, 1:2, function(x){ sum(x[length(null.dis)] == x) }) cor.dis <- as.dist((less.than + 0.5*equal.2)/ length(null.dis), diag= TRUE) }else{ cor.dis <- (comm.dis - apply(arr , 1:2 , mean))/ (apply(arr , 1:2 , sd) + 10^-10) cor.dis <- as.dist(((cor.dis - min(cor.dis))/diff(range(cor.dis))), diag= TRUE) } ## run model mod.beta <- try(betadisper(cor.dis, status, type="centroid"), silent=TRUE) if(inherits(mod.beta, "try-error")) browser() return(mod.beta) }
\name{CD4sim} \alias{CD4sim} \docType{data} \title{Simulated HIV data %% ~~ data name/kind ... ~~ } \description{ %% ~~ A concise (1-5 lines) description of the dataset. ~~ A simulated dataset of HIV patients undergoing treatment. At each timepoint treatment is initiated with increasing probability as CD4 values for patients are decreasing. Treatment has a positive effect on CD4. The lower the CD4 value, the higher risk the patient has for getting the event AIDS. } \usage{data(CD4sim)} \format{ A data frame with 4894 observations on the following 5 variables. \describe{ \item{\code{id}}{id number} \item{\code{time}}{observation times} \item{\code{treat}}{treatment indicator} \item{\code{cd4}}{square root of CD4} \item{\code{AIDS}}{event indicator} } } %\details{ %% ~~ If necessary, more details than the __description__ above ~~ %} \source{Simulated data %% ~~ reference to a publication or URL from which the data were obtained ~~ } %\references{ %% ~~ possibly secondary sources and usages ~~ %} \examples{ data(CD4sim) } \keyword{datasets}
/man/CD4sim.Rd
no_license
cran/FLIM
R
false
false
1,086
rd
\name{CD4sim} \alias{CD4sim} \docType{data} \title{Simulated HIV data %% ~~ data name/kind ... ~~ } \description{ %% ~~ A concise (1-5 lines) description of the dataset. ~~ A simulated dataset of HIV patients undergoing treatment. At each timepoint treatment is initiated with increasing probability as CD4 values for patients are decreasing. Treatment has a positive effect on CD4. The lower the CD4 value, the higher risk the patient has for getting the event AIDS. } \usage{data(CD4sim)} \format{ A data frame with 4894 observations on the following 5 variables. \describe{ \item{\code{id}}{id number} \item{\code{time}}{observation times} \item{\code{treat}}{treatment indicator} \item{\code{cd4}}{square root of CD4} \item{\code{AIDS}}{event indicator} } } %\details{ %% ~~ If necessary, more details than the __description__ above ~~ %} \source{Simulated data %% ~~ reference to a publication or URL from which the data were obtained ~~ } %\references{ %% ~~ possibly secondary sources and usages ~~ %} \examples{ data(CD4sim) } \keyword{datasets}
library(rstan) rstan_options(auto_write = TRUE) options(mc.cores = parallel::detectCores()) chain <- function(K) stan("code.stan", iter = 1000, chains = 1, data = list(K)) zvalues <- function(ch) { s = summary(ch)$summary z = s[, "mean"] / (s[, "sd"] / sqrt(s[, "n_eff"])) z[names(z) != "lp__"] } ch0 = chain(10) get_adaptation_info(ch0) get_sampler_params(ch0) zmax = replicate(100, max(zvalues(chain(10)))) max(zmax)
/post/mcmc-z/code.r
no_license
tpapp/tpapp.github.io
R
false
false
433
r
library(rstan) rstan_options(auto_write = TRUE) options(mc.cores = parallel::detectCores()) chain <- function(K) stan("code.stan", iter = 1000, chains = 1, data = list(K)) zvalues <- function(ch) { s = summary(ch)$summary z = s[, "mean"] / (s[, "sd"] / sqrt(s[, "n_eff"])) z[names(z) != "lp__"] } ch0 = chain(10) get_adaptation_info(ch0) get_sampler_params(ch0) zmax = replicate(100, max(zvalues(chain(10)))) max(zmax)
require(rvest) require(dplyr) require(ggplot2) dog_breeds <- read_html("https://www.olx.ua/zhivotnye/sobaki/q-%D1%81%D0%BE%D0%B1%D0%B0%D0%BA%D0%B0/") %>% html_node(".toplinks.x-normal") %>% html_nodes(".topLink.tdnone.parameter") %>% purrr::map_chr(html_attr, "href") %>% stringr::str_split(pattern = "/") %>% purrr::map_chr(~.[6]) dog_count <- read_html("https://www.olx.ua/zhivotnye/sobaki/q-%D1%81%D0%BE%D0%B1%D0%B0%D0%BA%D0%B0/") %>% html_node(".toplinks.x-normal") %>% html_nodes(".counter.nowrap") %>% purrr::map_chr(html_text) %>% stringr::str_remove_all(" ") %>% as.numeric() dog_name <- read_html("https://www.olx.ua/zhivotnye/sobaki/q-%D1%81%D0%BE%D0%B1%D0%B0%D0%BA%D0%B0/") %>% html_node(".toplinks.x-normal") %>% html_nodes(".link") %>% purrr::map_chr(html_text) dog_name <- dog_name[-84] dog_raw <- tibble(dog_breeds, dog_name, dog_count, dog_page = ceiling(dog_count / 44), pages = purrr::map2(dog_page > 10, dog_page, function(x, y) if(x) return(1:10) else return(1:y))) get_dog <- function(dog, page) { box <- read_html(glue::glue("https://www.olx.ua/zhivotnye/sobaki/{dog}/q-%D1%81%D0%BE%D0%B1%D0%B0%D0%BA%D0%B0/?page={page}")) %>% html_nodes(".wrap") cat(dog, page, "\n") tibble( breed = dog, price = box %>% purrr::map(html_node, ".price") %>% purrr::map_chr(html_text) %>% stringr::str_remove_all("\n") %>% stringr::str_squish(), #box %>% # purrr::map(html_node, ".marginright5.link.linkWithHash.detailsLink") %>% # purrr::map_chr(html_text) %>% # stringr::str_remove_all("\n") %>% # stringr::str_squish() title = box %>% purrr::map(html_node, ".title-cell") %>% purrr::map(html_node, ".lheight22.margintop5") %>% purrr::map_chr(html_text) %>% stringr::str_remove_all("\n") %>% stringr::str_remove_all("\t") %>% stringr::str_squish(), location = box %>% purrr::map(html_node, ".bottom-cell") %>% purrr::map(html_node, ".lheight16") %>% purrr::map(html_node, ".breadcrumb.x-normal") %>% purrr::map_chr(html_text) %>% stringr::str_remove_all("\n") %>% stringr::str_remove_all("\t") %>% stringr::str_squish(), date = box %>% purrr::map(html_node, ".bottom-cell") %>% purrr::map(html_node, ".lheight16") %>% purrr::map(html_nodes, ".breadcrumb.x-normal") %>% purrr::map(~.[[2]]) %>% purrr::map_chr(html_text) %>% stringr::str_remove_all("\n") %>% stringr::str_remove_all("\t") %>% stringr::str_squish() ) } dog_raw <- dog_raw %>% tidyr::unnest("pages") %>% select(dog_breeds, dog_name, pages) dog_list <- purrr::map2(dog_raw$dog_breeds, dog_raw$pages, purrr::safely(get_dog)) dog_df <- purrr::map_dfr(dog_list, ~.$result) %>% distinct() %>% filter(!stringr::str_detect(title, "вяз(ки|ка|ок)")) %>% filter(!stringr::str_detect(title, "осплей")) dog_df$price <- dog_df$price %>% stringr::str_remove_all(" грн.") %>% stringr::str_remove_all(" ") %>% as.numeric() #%>% hist() dog_df <- dog_df %>% filter(!stringr::str_detect(title, "т(э|е)дди")) %>% filter(!stringr::str_detect(title, "teddy")) %>% mutate(title = stringr::str_to_lower(title), breed = ifelse(stringr::str_detect(title, "мальтипу|тэдди|maltipoo"), "maltipoo", breed), breed = ifelse(stringr::str_detect(title, "той пуд"), "toy poodel", breed)) dog_df %>% left_join(dog_raw %>% select(dog_breeds, dog_name), by = c("breed" = "dog_breeds")) %>% mutate( dog_name = stringr::str_to_lower(dog_name), dog_name = case_when( breed == "maltipoo" ~ "мальтіпу", dog_name == "другая" ~ "двірняга", dog_name == "японский хин" ~ "японський хін", dog_name == "бишон фризе" ~ "бішон фрізе", dog_name == "шпиц" ~ "шпіц", dog_name == "московская сторожевая" ~ "московська сторожова", dog_name == "восточно-европейская овчарка" ~ "східно-європейська вівчарка", dog_name == " самоедская собака" ~ "самоєд", dog_name == "цвергпинчер" ~ "цвергпінчер", dog_name == "кавказская овчарка" ~ "кавказька вівчарка", dog_name == "аляскинский маламут" ~ "аляскинський маламут", dog_name == "французский бульдог" ~ "французький бульдог", dog_name == "карликовый пинчер" ~ "карликовий пінчер", dog_name == "бельгийская овчарка" ~ "бельгійська вівчарка", dog_name == "пекинес" ~ "пекінес", dog_name == "колли" ~ "коллі", dog_name == "той-терьер" ~ "той-тер'єр", dog_name == "среднеазиатская овчарка" ~ "середньоазіатська вівчарка", dog_name == "мексиканская голая собака" ~ "мексиканська гола собака", dog_name == "сибирский хаски" ~ "сибірський хаскі", dog_name == "пит-бультерьер" ~ "піт-бультер'єр", dog_name == "стаффордширский бультерьер" ~ "стаффордширський бультер'єр", dog_name == "китайская хохлатая" ~ "китайська хохлата", dog_name == "спаниель" ~ "спаніель", dog_name == "померанский шпиц" ~ "померанський шпіц", dog_name == "вест хайленд уайт терьер" ~ "вест айленд уайт тер'єр", dog_name == "гриффон" ~ "грифон", dog_name == "бультерьер" ~ "бультер'єр", dog_name == "мальтийская болонка" ~ "мальтійська болонка", dog_name == "папийон" ~ "папійон", dog_name == "английский бульдог" ~ "англійський бульдог", dog_name == "русский черный терьер" ~ "російський чорний тер'єр", dog_name == "бордоский дог" ~ "бордоський дог", dog_name == "бернский зенненхунд" ~ "бернський зенненхунд", dog_name == "акита" ~ "акіта", dog_name == "немецкий дог" ~ "німецький дог", dog_name == "золотистый ретривер" ~ "золотистий ретривер", dog_name == "американский бульдог" ~ "американський бульдог", dog_name == "стаффордширский терьер" ~ "стаффордширський тер'єр", dog_name == "немецкая овчарка" ~ "німецька вівчарка", dog_name == "йоркширський терьер" ~ "йоркширский тер'єр", #breed == "burbul" ~ "бурбуль", #breed == "mops" ~ "мопс", #breed == "taksa" ~ "такса", breed == "toy poodel" ~ "той пудель", T ~ dog_name ) ) %>% group_by(dog_name) %>% summarise(std = sd(price, na.rm = T), price = median(price, na.rm = T), n = n(), months = price/2212) %>% arrange(desc(price)) %>% filter(n >= 70) %>% ggplot(aes(reorder(dog_name, months), months)) + geom_hline(yintercept = c(0, 4, 8, 12, 16), linetype = "dotted") + geom_col(fill = "#1f78b4") + geom_text(aes(y = months-0.2, label = round(months, 1)), color = "white", size = 3) + coord_flip() + scale_x_discrete(expand = c(0, 0, 0, 0)) + scale_y_continuous(expand = c(0, 0, 0, 0)) + labs(title = "На скільки місяців оплаченої комуналки вистачить вашого песика?", subtitle = "За даними держстату середній розмір платіжки українця у грудні становив 2 212 гривень. Ми проаналізували оголошення про продаж собак з сайту OLX і виявили, наскільки в дійсності вистачить грошей в середньому з продажу улюбленця в залежності від його породи, виходячи з такого розміру платіжки", caption = "Дані про вартість собак - OLX, дані про розмір платіжки - Держстат") + #xlab("Порода собаки") + xlab("") + ylab("місяців сплаченої комуналки") + hrbrthemes::theme_ipsum(base_family = "Lato", axis_title_size = 10, axis_title_face = "bold") + theme( legend.position = "bottom", plot.title = element_text(size = 23), plot.subtitle = element_text(size = 15), panel.grid.minor = element_blank(), panel.grid.major = element_blank() ) ggsave("images/comunalka.png", width = 14, height = 12, dpi = 400) #get_dog("mops", 1)
/comunalka_dogs.R
no_license
RomanKyrychenko/library
R
false
false
9,337
r
require(rvest) require(dplyr) require(ggplot2) dog_breeds <- read_html("https://www.olx.ua/zhivotnye/sobaki/q-%D1%81%D0%BE%D0%B1%D0%B0%D0%BA%D0%B0/") %>% html_node(".toplinks.x-normal") %>% html_nodes(".topLink.tdnone.parameter") %>% purrr::map_chr(html_attr, "href") %>% stringr::str_split(pattern = "/") %>% purrr::map_chr(~.[6]) dog_count <- read_html("https://www.olx.ua/zhivotnye/sobaki/q-%D1%81%D0%BE%D0%B1%D0%B0%D0%BA%D0%B0/") %>% html_node(".toplinks.x-normal") %>% html_nodes(".counter.nowrap") %>% purrr::map_chr(html_text) %>% stringr::str_remove_all(" ") %>% as.numeric() dog_name <- read_html("https://www.olx.ua/zhivotnye/sobaki/q-%D1%81%D0%BE%D0%B1%D0%B0%D0%BA%D0%B0/") %>% html_node(".toplinks.x-normal") %>% html_nodes(".link") %>% purrr::map_chr(html_text) dog_name <- dog_name[-84] dog_raw <- tibble(dog_breeds, dog_name, dog_count, dog_page = ceiling(dog_count / 44), pages = purrr::map2(dog_page > 10, dog_page, function(x, y) if(x) return(1:10) else return(1:y))) get_dog <- function(dog, page) { box <- read_html(glue::glue("https://www.olx.ua/zhivotnye/sobaki/{dog}/q-%D1%81%D0%BE%D0%B1%D0%B0%D0%BA%D0%B0/?page={page}")) %>% html_nodes(".wrap") cat(dog, page, "\n") tibble( breed = dog, price = box %>% purrr::map(html_node, ".price") %>% purrr::map_chr(html_text) %>% stringr::str_remove_all("\n") %>% stringr::str_squish(), #box %>% # purrr::map(html_node, ".marginright5.link.linkWithHash.detailsLink") %>% # purrr::map_chr(html_text) %>% # stringr::str_remove_all("\n") %>% # stringr::str_squish() title = box %>% purrr::map(html_node, ".title-cell") %>% purrr::map(html_node, ".lheight22.margintop5") %>% purrr::map_chr(html_text) %>% stringr::str_remove_all("\n") %>% stringr::str_remove_all("\t") %>% stringr::str_squish(), location = box %>% purrr::map(html_node, ".bottom-cell") %>% purrr::map(html_node, ".lheight16") %>% purrr::map(html_node, ".breadcrumb.x-normal") %>% purrr::map_chr(html_text) %>% stringr::str_remove_all("\n") %>% stringr::str_remove_all("\t") %>% stringr::str_squish(), date = box %>% purrr::map(html_node, ".bottom-cell") %>% purrr::map(html_node, ".lheight16") %>% purrr::map(html_nodes, ".breadcrumb.x-normal") %>% purrr::map(~.[[2]]) %>% purrr::map_chr(html_text) %>% stringr::str_remove_all("\n") %>% stringr::str_remove_all("\t") %>% stringr::str_squish() ) } dog_raw <- dog_raw %>% tidyr::unnest("pages") %>% select(dog_breeds, dog_name, pages) dog_list <- purrr::map2(dog_raw$dog_breeds, dog_raw$pages, purrr::safely(get_dog)) dog_df <- purrr::map_dfr(dog_list, ~.$result) %>% distinct() %>% filter(!stringr::str_detect(title, "вяз(ки|ка|ок)")) %>% filter(!stringr::str_detect(title, "осплей")) dog_df$price <- dog_df$price %>% stringr::str_remove_all(" грн.") %>% stringr::str_remove_all(" ") %>% as.numeric() #%>% hist() dog_df <- dog_df %>% filter(!stringr::str_detect(title, "т(э|е)дди")) %>% filter(!stringr::str_detect(title, "teddy")) %>% mutate(title = stringr::str_to_lower(title), breed = ifelse(stringr::str_detect(title, "мальтипу|тэдди|maltipoo"), "maltipoo", breed), breed = ifelse(stringr::str_detect(title, "той пуд"), "toy poodel", breed)) dog_df %>% left_join(dog_raw %>% select(dog_breeds, dog_name), by = c("breed" = "dog_breeds")) %>% mutate( dog_name = stringr::str_to_lower(dog_name), dog_name = case_when( breed == "maltipoo" ~ "мальтіпу", dog_name == "другая" ~ "двірняга", dog_name == "японский хин" ~ "японський хін", dog_name == "бишон фризе" ~ "бішон фрізе", dog_name == "шпиц" ~ "шпіц", dog_name == "московская сторожевая" ~ "московська сторожова", dog_name == "восточно-европейская овчарка" ~ "східно-європейська вівчарка", dog_name == " самоедская собака" ~ "самоєд", dog_name == "цвергпинчер" ~ "цвергпінчер", dog_name == "кавказская овчарка" ~ "кавказька вівчарка", dog_name == "аляскинский маламут" ~ "аляскинський маламут", dog_name == "французский бульдог" ~ "французький бульдог", dog_name == "карликовый пинчер" ~ "карликовий пінчер", dog_name == "бельгийская овчарка" ~ "бельгійська вівчарка", dog_name == "пекинес" ~ "пекінес", dog_name == "колли" ~ "коллі", dog_name == "той-терьер" ~ "той-тер'єр", dog_name == "среднеазиатская овчарка" ~ "середньоазіатська вівчарка", dog_name == "мексиканская голая собака" ~ "мексиканська гола собака", dog_name == "сибирский хаски" ~ "сибірський хаскі", dog_name == "пит-бультерьер" ~ "піт-бультер'єр", dog_name == "стаффордширский бультерьер" ~ "стаффордширський бультер'єр", dog_name == "китайская хохлатая" ~ "китайська хохлата", dog_name == "спаниель" ~ "спаніель", dog_name == "померанский шпиц" ~ "померанський шпіц", dog_name == "вест хайленд уайт терьер" ~ "вест айленд уайт тер'єр", dog_name == "гриффон" ~ "грифон", dog_name == "бультерьер" ~ "бультер'єр", dog_name == "мальтийская болонка" ~ "мальтійська болонка", dog_name == "папийон" ~ "папійон", dog_name == "английский бульдог" ~ "англійський бульдог", dog_name == "русский черный терьер" ~ "російський чорний тер'єр", dog_name == "бордоский дог" ~ "бордоський дог", dog_name == "бернский зенненхунд" ~ "бернський зенненхунд", dog_name == "акита" ~ "акіта", dog_name == "немецкий дог" ~ "німецький дог", dog_name == "золотистый ретривер" ~ "золотистий ретривер", dog_name == "американский бульдог" ~ "американський бульдог", dog_name == "стаффордширский терьер" ~ "стаффордширський тер'єр", dog_name == "немецкая овчарка" ~ "німецька вівчарка", dog_name == "йоркширський терьер" ~ "йоркширский тер'єр", #breed == "burbul" ~ "бурбуль", #breed == "mops" ~ "мопс", #breed == "taksa" ~ "такса", breed == "toy poodel" ~ "той пудель", T ~ dog_name ) ) %>% group_by(dog_name) %>% summarise(std = sd(price, na.rm = T), price = median(price, na.rm = T), n = n(), months = price/2212) %>% arrange(desc(price)) %>% filter(n >= 70) %>% ggplot(aes(reorder(dog_name, months), months)) + geom_hline(yintercept = c(0, 4, 8, 12, 16), linetype = "dotted") + geom_col(fill = "#1f78b4") + geom_text(aes(y = months-0.2, label = round(months, 1)), color = "white", size = 3) + coord_flip() + scale_x_discrete(expand = c(0, 0, 0, 0)) + scale_y_continuous(expand = c(0, 0, 0, 0)) + labs(title = "На скільки місяців оплаченої комуналки вистачить вашого песика?", subtitle = "За даними держстату середній розмір платіжки українця у грудні становив 2 212 гривень. Ми проаналізували оголошення про продаж собак з сайту OLX і виявили, наскільки в дійсності вистачить грошей в середньому з продажу улюбленця в залежності від його породи, виходячи з такого розміру платіжки", caption = "Дані про вартість собак - OLX, дані про розмір платіжки - Держстат") + #xlab("Порода собаки") + xlab("") + ylab("місяців сплаченої комуналки") + hrbrthemes::theme_ipsum(base_family = "Lato", axis_title_size = 10, axis_title_face = "bold") + theme( legend.position = "bottom", plot.title = element_text(size = 23), plot.subtitle = element_text(size = 15), panel.grid.minor = element_blank(), panel.grid.major = element_blank() ) ggsave("images/comunalka.png", width = 14, height = 12, dpi = 400) #get_dog("mops", 1)
# Load packages into memory library(dplyr) library(ggplot2) library(statsr) data(arbuthnot) arbuthnot dim(arbuthnot) names(arbuthnot) arbuthnot$boys arbuthnot$girls[1] ggplot(data = arbuthnot, aes(x = year, y = girls)) + geom_point() ?ggplot 5218+4683 arbuthnot$boys + arbuthnot$girls arbuthnot <- arbuthnot %>% mutate(total=boys+girls) # line plot with total, year ggplot(data = arbuthnot, aes(x = year, y = total)) + geom_line() # line plot + scatter plot ggplot(data = arbuthnot, aes(x = year, y = total)) + geom_line() + geom_point() # line plot + scatter plot with boys vs time ggplot(data = arbuthnot, aes(x = year, y = boys)) + geom_line() + geom_point() arbuthnot <- arbuthnot %>% mutate(more_boys = boys > girls) data(present) summary(present) range(present$year) present <- present %>% mutate(total=boys+girls) present <- present %>% mutate(prop_boys=boys/total) # line plot + scatter plot with proportion of boys vs time ggplot(data = present, aes(x = year, y = prop_boys)) + geom_line() + geom_point() present <- present %>% mutate(more_boys = boys > girls) summary(present$more_boys) table(present$more_boys) # there are more boys every year than girls present <- present %>% mutate(prop_boy_girl = boys/girls) # line plot + scatter plot with ratio of boys to girl over time ggplot(data = present, aes(x = year, y = prop_boy_girl)) + geom_line() + geom_point() max(present$total) present[max(present$total) == present$total]
/Week1/week1.R
no_license
bwbelljr/Intro-Prob-Data
R
false
false
1,578
r
# Load packages into memory library(dplyr) library(ggplot2) library(statsr) data(arbuthnot) arbuthnot dim(arbuthnot) names(arbuthnot) arbuthnot$boys arbuthnot$girls[1] ggplot(data = arbuthnot, aes(x = year, y = girls)) + geom_point() ?ggplot 5218+4683 arbuthnot$boys + arbuthnot$girls arbuthnot <- arbuthnot %>% mutate(total=boys+girls) # line plot with total, year ggplot(data = arbuthnot, aes(x = year, y = total)) + geom_line() # line plot + scatter plot ggplot(data = arbuthnot, aes(x = year, y = total)) + geom_line() + geom_point() # line plot + scatter plot with boys vs time ggplot(data = arbuthnot, aes(x = year, y = boys)) + geom_line() + geom_point() arbuthnot <- arbuthnot %>% mutate(more_boys = boys > girls) data(present) summary(present) range(present$year) present <- present %>% mutate(total=boys+girls) present <- present %>% mutate(prop_boys=boys/total) # line plot + scatter plot with proportion of boys vs time ggplot(data = present, aes(x = year, y = prop_boys)) + geom_line() + geom_point() present <- present %>% mutate(more_boys = boys > girls) summary(present$more_boys) table(present$more_boys) # there are more boys every year than girls present <- present %>% mutate(prop_boy_girl = boys/girls) # line plot + scatter plot with ratio of boys to girl over time ggplot(data = present, aes(x = year, y = prop_boy_girl)) + geom_line() + geom_point() max(present$total) present[max(present$total) == present$total]
context("test-detect") data("awtew") test_that("hs_str_detect is the presence of a pattern", { expect_equal(hs_str_detect(c("a","b"),"a"), c(T,F)) expect_equal(sum(hs_str_detect(awtew, "[L|l]ord")), 240) }) test_that("hs_str_detect of factors works", { expect_equal(hs_str_detect(factor(c("a","b")),"a"), c(T,F)) }) test_that("hs_str_detect works for NA", { expect_equal(hs_str_detect(c(NA),"a"), c(NA)) expect_equal(hs_str_detect(c(NA,"a"),"a"), c(NA,T)) })
/tests/testthat/test-detect.R
no_license
gmcrosh/hypeRscan
R
false
false
471
r
context("test-detect") data("awtew") test_that("hs_str_detect is the presence of a pattern", { expect_equal(hs_str_detect(c("a","b"),"a"), c(T,F)) expect_equal(sum(hs_str_detect(awtew, "[L|l]ord")), 240) }) test_that("hs_str_detect of factors works", { expect_equal(hs_str_detect(factor(c("a","b")),"a"), c(T,F)) }) test_that("hs_str_detect works for NA", { expect_equal(hs_str_detect(c(NA),"a"), c(NA)) expect_equal(hs_str_detect(c(NA,"a"),"a"), c(NA,T)) })
# Davies et al. R code defining PWR plot helper functions. # # By Jim Regetz (NCEAS) # simple helper function for rescaling values rescale <- function(x, lower=NULL, upper=NULL, na.rm=TRUE) { if (is.null(lower)) { lower <- min(x, na.rm=na.rm) } if (is.null(upper)) { upper <- max(x, na.rm=na.rm) } (x - lower) / (upper - lower) } # modified version of phylobase:::treePlot tp <- function (phy, show.tip.label=TRUE, show.node.label=FALSE, tip.order=NULL, tip.plot.fun="bubbles", plot.at.tip=TRUE, edge.color="black", node.color="black", tip.color="black", edge.width=1, newpage=TRUE, margins=c(1.1, 1.1, 1.1, 1.1), ...) { if (!inherits(phy, "phylo4")) stop("treePlot requires a phylo4 or phylo4d object") if (!isRooted(phy)) stop("treePlot function requires a rooted tree.") if (newpage) grid.newpage() type <- "phylogram" Nedges <- nEdges(phy) Ntips <- nTips(phy) if (!is.null(tip.order) && length(tip.order) > 1) { if (length(tip.order) != Ntips) { stop("tip.order must be the same length as nTips(phy)") } if (is.numeric(tip.order)) { tip.order <- tip.order } else { if (is.character(tip.order)) { tip.order <- as.numeric(names(tipLabels(phy))[ match(tip.order, tipLabels(phy))]) } } tip.order <- rev(tip.order) } if (!hasEdgeLength(phy) || type == "cladogram") { edgeLength(phy) <- rep(1, Nedges) } xxyy <- phyloXXYY(phy, tip.order) pushViewport(plotViewport(margins=margins)) pb(type=type, show.node.label=show.node.label, rot=0, edge.color=edge.color, node.color=node.color, tip.color=tip.color, edge.width=edge.width, show.tip.label=show.tip.label, newpage=TRUE, ..., XXYY=xxyy) upViewport() } # modified version of phylobase:::phylobubbles pb <- function (type=type, place.tip.label="right", show.node.label=show.node.label, show.tip.label=show.tip.label, edge.color=edge.color, node.color=node.color, tip.color=tip.color, edge.width=edge.width, newpage=TRUE, cex=1, pex=1, aex=1, ..., XXYY, square=FALSE, show.estimates=TRUE, lower=NULL, upper=NULL) { nVars <- 1 lab.right <- ifelse(place.tip.label %in% c("right", "both"), TRUE, FALSE) && show.tip.label lab.left <- ifelse(place.tip.label %in% c("left", "both"), TRUE, FALSE) && show.tip.label phy <- XXYY$phy tmin <- min(tdata(phy, type="tip"), na.rm=TRUE) tmax <- max(tdata(phy, type="tip"), na.rm=TRUE) pedges <- edges(phy) tip.order <- XXYY$torder tipdata <- tdata(phy, type="tip")[tip.order, , drop=FALSE] dlabwdth <- max(stringWidth(colnames(tipdata))) * 1.2 if (convertWidth(dlabwdth, "cm", valueOnly=TRUE) < 2) { dlabwdth <- unit(2, "cm") } phyplotlayout <- grid.layout(nrow=2, ncol=2, heights=unit.c(unit(1, "null"), dlabwdth), widths=unit(c(1, 1), c("null", "null"), list(NULL, NULL))) pushViewport(viewport(layout=phyplotlayout, name="phyplotlayout")) pushViewport(viewport(layout.pos.row=1:2, layout.pos.col=2, height=unit(1, "npc") + convertUnit(dlabwdth, "npc"), name="bubbleplots", default.units="native")) tys <- XXYY$yy[pedges[, 2] <= nTips(phy)] tys <- tys[match(names(tipLabels(phy))[tip.order], XXYY$torder)] maxr <- ifelse(ncol(tipdata) > nTips(phy), 1/ncol(tipdata), 1/nTips(phy)) tipdataS <- apply(tipdata, 2, function(x) (maxr * x)/max(abs(x), na.rm=TRUE)) if (is.null(lower)) { lower <- min(tipdata) } if (is.null(upper)) { upper <- max(tipdata) } tipdataS2 <- rescale(tipdata, lower, upper) if (nVars == 1) { xpos <- 0.5 } else { xpos <- seq(0 + maxr + 0.12, 1 - maxr - 0.12, length.out=nVars) } xrep <- rep(xpos, each=length(tys)) yrep <- rep(tys, nVars) ccol <- ifelse(tipdata < 0, "black", "white") naxs <- matrix(xrep, ncol=nVars) nays <- matrix(yrep, ncol=nVars) dnas <- is.na(tipdataS) naxs <- naxs[dnas] nays <- nays[dnas] tipdataS[is.na(tipdataS)] <- 0 + 0.001 if (lab.right) { tiplabwidth <- max(stringWidth(tipLabels(phy))) } else { tiplabwidth <- unit(0, "null", NULL) } bublayout <- grid.layout(nrow=2, ncol=2, widths=unit.c(unit(1, "null", NULL), tiplabwidth), heights=unit.c(unit(1, "null", NULL), dlabwdth)) pushViewport(viewport(x=0.5, y=0.5, width=0.95, height=1, layout=bublayout, name="bublayout")) pushViewport(viewport(name="bubble_plots", layout=bublayout, layout.pos.col=1, layout.pos.row=1)) # plot x-axis labs <- pretty(c(lower, upper)) vals <- rescale(labs, lower, upper) labs <- format(labs[0 <= vals & vals <= 1], nsmall=1) vals <- vals[0 <= vals & vals <= 1] ex <- -0.02 * aex grid.segments(x0=min(vals), x1=max(vals), y0=ex, y1=ex) grid.segments(x0=vals, x1=vals, y0=ex+0.01, y1=ex, gp=gpar(col="black")) grid.text(labs, x=vals, y=ex-0.01*aex, gp=gpar(cex=0.5*cex)) grid.text("Coefficient", x=0.5, y=ex-0.02*aex, gp = gpar(cex=0.5*cex)) # plot interesting results grid.segments(x0=tipdataS2$gest, x1=tipdataS2$gest, y0=0, y1=1, gp=gpar(col="grey")) grid.segments(x0=tipdataS2$glb, x1=tipdataS2$glb, y0=0, y1=1, gp=gpar(col="grey", lty="dashed")) grid.segments(x0=tipdataS2$gub, x1=tipdataS2$gub, y0=0, y1=1, gp=gpar(col="grey", lty="dashed")) grid.segments(x0=tipdataS2$lb, x1=tipdataS2$ub, y0=tys, y1=tys, gp=gpar(col="red")) if (!is.null(tipdataS2$lb.1) & !is.null(tipdataS2$ub.1)) { grid.segments(x0=tipdataS2$lb.1, x1=tipdataS2$ub.1, y0=tys+0.3*diff(tys[1:2]), y1=tys+0.3*diff(tys[1:2]), gp=gpar(col="grey")) } if (!is.null(tipdataS2$lb.2) & !is.null(tipdataS2$ub.2)) { grid.segments(x0=tipdataS2$lb.2, x1=tipdataS2$ub.2, y0=tys+0.6*diff(tys[1:2]), y1=tys+0.6*diff(tys[1:2]), gp=gpar(col="green")) } if (!is.null(tipdataS2$simcoef)) { grid.points(tipdataS2$simcoef, yrep, pch=1, size = unit(0.03*pex, "npc")) } if (show.estimates) { grid.points(tipdataS2$est, yrep, pch=16, size=unit(0.02*pex, "npc"), gp=gpar(col="red")) } if (length(naxs) > 0) { grid.points(naxs, nays, pch=4) } upViewport() if (lab.right) { pushViewport(viewport(name="bubble_tip_labels", layout=bublayout, layout.pos.col=2, layout.pos.row=1)) tt <- sub("_", " ", tipLabels(phy)[tip.order]) grid.text(tt, 0.1, tys, just="left", gp=gpar(cex=0.5*cex)) upViewport() } pushViewport(viewport(name="bubble_data_labels", layout=bublayout, layout.pos.col=1, layout.pos.row=2)) datalaboffset <- convertUnit(unit(15, "mm"), "npc", valueOnly=TRUE) upViewport(3) pushViewport(viewport(layout.pos.row=2, layout.pos.col=1, name="bubblelegend")) yyy <- phylobase:::.bubLegendGrob(tipdata, tipdataS) grid.draw(yyy) upViewport() pushViewport(viewport(layout.pos.row=1, layout.pos.col=1, name="tree")) plotOneTree(XXYY, "phylogram", show.tip.label=FALSE, show.node.label, edge.color, node.color, tip.color, edge.width, rot=0) upViewport(2) }
/pwr-plots.R
no_license
lizzieinvancouver/pwr
R
false
false
7,439
r
# Davies et al. R code defining PWR plot helper functions. # # By Jim Regetz (NCEAS) # simple helper function for rescaling values rescale <- function(x, lower=NULL, upper=NULL, na.rm=TRUE) { if (is.null(lower)) { lower <- min(x, na.rm=na.rm) } if (is.null(upper)) { upper <- max(x, na.rm=na.rm) } (x - lower) / (upper - lower) } # modified version of phylobase:::treePlot tp <- function (phy, show.tip.label=TRUE, show.node.label=FALSE, tip.order=NULL, tip.plot.fun="bubbles", plot.at.tip=TRUE, edge.color="black", node.color="black", tip.color="black", edge.width=1, newpage=TRUE, margins=c(1.1, 1.1, 1.1, 1.1), ...) { if (!inherits(phy, "phylo4")) stop("treePlot requires a phylo4 or phylo4d object") if (!isRooted(phy)) stop("treePlot function requires a rooted tree.") if (newpage) grid.newpage() type <- "phylogram" Nedges <- nEdges(phy) Ntips <- nTips(phy) if (!is.null(tip.order) && length(tip.order) > 1) { if (length(tip.order) != Ntips) { stop("tip.order must be the same length as nTips(phy)") } if (is.numeric(tip.order)) { tip.order <- tip.order } else { if (is.character(tip.order)) { tip.order <- as.numeric(names(tipLabels(phy))[ match(tip.order, tipLabels(phy))]) } } tip.order <- rev(tip.order) } if (!hasEdgeLength(phy) || type == "cladogram") { edgeLength(phy) <- rep(1, Nedges) } xxyy <- phyloXXYY(phy, tip.order) pushViewport(plotViewport(margins=margins)) pb(type=type, show.node.label=show.node.label, rot=0, edge.color=edge.color, node.color=node.color, tip.color=tip.color, edge.width=edge.width, show.tip.label=show.tip.label, newpage=TRUE, ..., XXYY=xxyy) upViewport() } # modified version of phylobase:::phylobubbles pb <- function (type=type, place.tip.label="right", show.node.label=show.node.label, show.tip.label=show.tip.label, edge.color=edge.color, node.color=node.color, tip.color=tip.color, edge.width=edge.width, newpage=TRUE, cex=1, pex=1, aex=1, ..., XXYY, square=FALSE, show.estimates=TRUE, lower=NULL, upper=NULL) { nVars <- 1 lab.right <- ifelse(place.tip.label %in% c("right", "both"), TRUE, FALSE) && show.tip.label lab.left <- ifelse(place.tip.label %in% c("left", "both"), TRUE, FALSE) && show.tip.label phy <- XXYY$phy tmin <- min(tdata(phy, type="tip"), na.rm=TRUE) tmax <- max(tdata(phy, type="tip"), na.rm=TRUE) pedges <- edges(phy) tip.order <- XXYY$torder tipdata <- tdata(phy, type="tip")[tip.order, , drop=FALSE] dlabwdth <- max(stringWidth(colnames(tipdata))) * 1.2 if (convertWidth(dlabwdth, "cm", valueOnly=TRUE) < 2) { dlabwdth <- unit(2, "cm") } phyplotlayout <- grid.layout(nrow=2, ncol=2, heights=unit.c(unit(1, "null"), dlabwdth), widths=unit(c(1, 1), c("null", "null"), list(NULL, NULL))) pushViewport(viewport(layout=phyplotlayout, name="phyplotlayout")) pushViewport(viewport(layout.pos.row=1:2, layout.pos.col=2, height=unit(1, "npc") + convertUnit(dlabwdth, "npc"), name="bubbleplots", default.units="native")) tys <- XXYY$yy[pedges[, 2] <= nTips(phy)] tys <- tys[match(names(tipLabels(phy))[tip.order], XXYY$torder)] maxr <- ifelse(ncol(tipdata) > nTips(phy), 1/ncol(tipdata), 1/nTips(phy)) tipdataS <- apply(tipdata, 2, function(x) (maxr * x)/max(abs(x), na.rm=TRUE)) if (is.null(lower)) { lower <- min(tipdata) } if (is.null(upper)) { upper <- max(tipdata) } tipdataS2 <- rescale(tipdata, lower, upper) if (nVars == 1) { xpos <- 0.5 } else { xpos <- seq(0 + maxr + 0.12, 1 - maxr - 0.12, length.out=nVars) } xrep <- rep(xpos, each=length(tys)) yrep <- rep(tys, nVars) ccol <- ifelse(tipdata < 0, "black", "white") naxs <- matrix(xrep, ncol=nVars) nays <- matrix(yrep, ncol=nVars) dnas <- is.na(tipdataS) naxs <- naxs[dnas] nays <- nays[dnas] tipdataS[is.na(tipdataS)] <- 0 + 0.001 if (lab.right) { tiplabwidth <- max(stringWidth(tipLabels(phy))) } else { tiplabwidth <- unit(0, "null", NULL) } bublayout <- grid.layout(nrow=2, ncol=2, widths=unit.c(unit(1, "null", NULL), tiplabwidth), heights=unit.c(unit(1, "null", NULL), dlabwdth)) pushViewport(viewport(x=0.5, y=0.5, width=0.95, height=1, layout=bublayout, name="bublayout")) pushViewport(viewport(name="bubble_plots", layout=bublayout, layout.pos.col=1, layout.pos.row=1)) # plot x-axis labs <- pretty(c(lower, upper)) vals <- rescale(labs, lower, upper) labs <- format(labs[0 <= vals & vals <= 1], nsmall=1) vals <- vals[0 <= vals & vals <= 1] ex <- -0.02 * aex grid.segments(x0=min(vals), x1=max(vals), y0=ex, y1=ex) grid.segments(x0=vals, x1=vals, y0=ex+0.01, y1=ex, gp=gpar(col="black")) grid.text(labs, x=vals, y=ex-0.01*aex, gp=gpar(cex=0.5*cex)) grid.text("Coefficient", x=0.5, y=ex-0.02*aex, gp = gpar(cex=0.5*cex)) # plot interesting results grid.segments(x0=tipdataS2$gest, x1=tipdataS2$gest, y0=0, y1=1, gp=gpar(col="grey")) grid.segments(x0=tipdataS2$glb, x1=tipdataS2$glb, y0=0, y1=1, gp=gpar(col="grey", lty="dashed")) grid.segments(x0=tipdataS2$gub, x1=tipdataS2$gub, y0=0, y1=1, gp=gpar(col="grey", lty="dashed")) grid.segments(x0=tipdataS2$lb, x1=tipdataS2$ub, y0=tys, y1=tys, gp=gpar(col="red")) if (!is.null(tipdataS2$lb.1) & !is.null(tipdataS2$ub.1)) { grid.segments(x0=tipdataS2$lb.1, x1=tipdataS2$ub.1, y0=tys+0.3*diff(tys[1:2]), y1=tys+0.3*diff(tys[1:2]), gp=gpar(col="grey")) } if (!is.null(tipdataS2$lb.2) & !is.null(tipdataS2$ub.2)) { grid.segments(x0=tipdataS2$lb.2, x1=tipdataS2$ub.2, y0=tys+0.6*diff(tys[1:2]), y1=tys+0.6*diff(tys[1:2]), gp=gpar(col="green")) } if (!is.null(tipdataS2$simcoef)) { grid.points(tipdataS2$simcoef, yrep, pch=1, size = unit(0.03*pex, "npc")) } if (show.estimates) { grid.points(tipdataS2$est, yrep, pch=16, size=unit(0.02*pex, "npc"), gp=gpar(col="red")) } if (length(naxs) > 0) { grid.points(naxs, nays, pch=4) } upViewport() if (lab.right) { pushViewport(viewport(name="bubble_tip_labels", layout=bublayout, layout.pos.col=2, layout.pos.row=1)) tt <- sub("_", " ", tipLabels(phy)[tip.order]) grid.text(tt, 0.1, tys, just="left", gp=gpar(cex=0.5*cex)) upViewport() } pushViewport(viewport(name="bubble_data_labels", layout=bublayout, layout.pos.col=1, layout.pos.row=2)) datalaboffset <- convertUnit(unit(15, "mm"), "npc", valueOnly=TRUE) upViewport(3) pushViewport(viewport(layout.pos.row=2, layout.pos.col=1, name="bubblelegend")) yyy <- phylobase:::.bubLegendGrob(tipdata, tipdataS) grid.draw(yyy) upViewport() pushViewport(viewport(layout.pos.row=1, layout.pos.col=1, name="tree")) plotOneTree(XXYY, "phylogram", show.tip.label=FALSE, show.node.label, edge.color, node.color, tip.color, edge.width, rot=0) upViewport(2) }
batting<-read.csv('Batting.csv') print(head(batting)) str(batting) print(head(batting$AB)) print(head(batting$X2B)) #Creating a new column BA for calculating the batting average batting$BA <- batting$H/batting$AB print(tail(batting$BA,5)) batting$OBP <- (batting$H+batting$BB+batting$HBP)/(batting$AB+batting$BB+batting$HBP+batting$SF) batting$X1B <- batting$H - batting$X2B - batting$X3B - batting$HR batting$SLG <- (batting$X1B+ 2*batting$X2B + 3*batting$X3B + 4 * batting$HR)/batting$AB str(batting) salary <- read.csv('Salaries.csv') print(summary(batting)) #have batting data from 1985 batting <- subset(batting,yearID>=1985) print(summary(batting)) #merging batting and salary dataframe combo <- merge(batting,salary, by=c('playerID','yearID')) print(summary(combo)) #finding the lost player lost_players <- subset(combo,playerID %in% c('giambja01','damonjo01','saenzol01')) print(lost_players) lost_players <- subset(lost_players,yearID==2001) lost_players<- lost_players[,c('playerID','H','X2B','HR','OBP','SLG','BA','AB')] print(lost_players) mean_lostPlayer_OBP <- mean(lost_players$OBP) mean_lost_players_AB <- mean(lost_players$AB) print(mean_lostPlayer_OBP) print(mean_lost_players_AB) # Finding replacement for the lost player library('dplyr') avail_players <- filter(combo,yearID==2001) library('ggplot2') p1<- ggplot(avail_players,aes(OBP,salary)) +geom_point() print(p1) #segregating available players for OBP>0 and salary < 8 million filter1<- filter(avail_players,OBP>0, salary< 8000000) print(filter1) #Segregating data for At bats greater than lost players filter2 <- filter(filter1, AB>500) print(filter2) filter3<- filter(filter2, OBP>=mean_lostPlayer_OBP) print("All filters applied") print(filter3) p2 <- ggplot(filter3,aes(OBP,salary))+geom_point() library('plotly') p3<-ggplotly(p2) print(p3) player <- filter3[,c('playerID','OBP','AB','salary')] #print(player) possible<-arrange(player,desc(OBP)) print(possible) print(possible[2:4,])
/Money_Ball_Project.R
no_license
aswin19031997/PractiseProjects
R
false
false
2,014
r
batting<-read.csv('Batting.csv') print(head(batting)) str(batting) print(head(batting$AB)) print(head(batting$X2B)) #Creating a new column BA for calculating the batting average batting$BA <- batting$H/batting$AB print(tail(batting$BA,5)) batting$OBP <- (batting$H+batting$BB+batting$HBP)/(batting$AB+batting$BB+batting$HBP+batting$SF) batting$X1B <- batting$H - batting$X2B - batting$X3B - batting$HR batting$SLG <- (batting$X1B+ 2*batting$X2B + 3*batting$X3B + 4 * batting$HR)/batting$AB str(batting) salary <- read.csv('Salaries.csv') print(summary(batting)) #have batting data from 1985 batting <- subset(batting,yearID>=1985) print(summary(batting)) #merging batting and salary dataframe combo <- merge(batting,salary, by=c('playerID','yearID')) print(summary(combo)) #finding the lost player lost_players <- subset(combo,playerID %in% c('giambja01','damonjo01','saenzol01')) print(lost_players) lost_players <- subset(lost_players,yearID==2001) lost_players<- lost_players[,c('playerID','H','X2B','HR','OBP','SLG','BA','AB')] print(lost_players) mean_lostPlayer_OBP <- mean(lost_players$OBP) mean_lost_players_AB <- mean(lost_players$AB) print(mean_lostPlayer_OBP) print(mean_lost_players_AB) # Finding replacement for the lost player library('dplyr') avail_players <- filter(combo,yearID==2001) library('ggplot2') p1<- ggplot(avail_players,aes(OBP,salary)) +geom_point() print(p1) #segregating available players for OBP>0 and salary < 8 million filter1<- filter(avail_players,OBP>0, salary< 8000000) print(filter1) #Segregating data for At bats greater than lost players filter2 <- filter(filter1, AB>500) print(filter2) filter3<- filter(filter2, OBP>=mean_lostPlayer_OBP) print("All filters applied") print(filter3) p2 <- ggplot(filter3,aes(OBP,salary))+geom_point() library('plotly') p3<-ggplotly(p2) print(p3) player <- filter3[,c('playerID','OBP','AB','salary')] #print(player) possible<-arrange(player,desc(OBP)) print(possible) print(possible[2:4,])
#' Invoke a Data Viewer #' #' Invoke a spreadsheet-style data viewer on a matrix-like \code{R} object. #' #' @param x an \code{R} object that can be forecd to a character representation #'of a data frame. #' @param title the title for the viewer window. Defaults to the name of \code{x}. #' @return Invisibly returns \code{NULL}. The functions opens a widnow showing the #' formatted contents of \code{x} and returns immediately. #' @export View <- function(x, title) { if(missing(title)) { title <- deparse(substitute(x)) } # Start looking for View beyond where this one came from SP <- search() # Look for RStudio, else utils if("tools:rstudio" %in% SP) { pos <- "tools:rstudio" } else { pos <- "package:utils" } vf <- get("View", pos=pos, mode="function") invisible(vf(format(x), title=title)) }
/R/View.r
permissive
Zhenglei-BCS/smwrQW
R
false
false
826
r
#' Invoke a Data Viewer #' #' Invoke a spreadsheet-style data viewer on a matrix-like \code{R} object. #' #' @param x an \code{R} object that can be forecd to a character representation #'of a data frame. #' @param title the title for the viewer window. Defaults to the name of \code{x}. #' @return Invisibly returns \code{NULL}. The functions opens a widnow showing the #' formatted contents of \code{x} and returns immediately. #' @export View <- function(x, title) { if(missing(title)) { title <- deparse(substitute(x)) } # Start looking for View beyond where this one came from SP <- search() # Look for RStudio, else utils if("tools:rstudio" %in% SP) { pos <- "tools:rstudio" } else { pos <- "package:utils" } vf <- get("View", pos=pos, mode="function") invisible(vf(format(x), title=title)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_documentation.R \docType{data} \name{math} \alias{math} \title{Math Achievement Dataset} \format{A data frame with xx rows and 6 variables: \describe{ \item{School}{Which school the subject attends} \item{Minority}{description} \item{Sex}{Gender of the subject} \item{SES}{Socioeconomic status of the subject} \item{MathAch}{description} \item{MEANSES}{description} }} \usage{ math } \description{ Examines the relationship between various variables and math achievement. Great for HLM. } \keyword{datasets}
/man/math.Rd
no_license
jcberny/flexplot
R
false
true
603
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_documentation.R \docType{data} \name{math} \alias{math} \title{Math Achievement Dataset} \format{A data frame with xx rows and 6 variables: \describe{ \item{School}{Which school the subject attends} \item{Minority}{description} \item{Sex}{Gender of the subject} \item{SES}{Socioeconomic status of the subject} \item{MathAch}{description} \item{MEANSES}{description} }} \usage{ math } \description{ Examines the relationship between various variables and math achievement. Great for HLM. } \keyword{datasets}
makeCacheMatrix <- function(x = matrix()) { # Change numeric to matrix m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setInverse <- function(solve) m <<- solve # Change mean to solve getInverse <- function() m list(set = set, get = get, setInverse = setInverse, # Change setmean to setInverse getInverse = getInverse) # Change setmean to setInverse }
/Test_makeCacheMatrix.R
no_license
Banelin/ProgrammingAssignment2
R
false
false
440
r
makeCacheMatrix <- function(x = matrix()) { # Change numeric to matrix m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setInverse <- function(solve) m <<- solve # Change mean to solve getInverse <- function() m list(set = set, get = get, setInverse = setInverse, # Change setmean to setInverse getInverse = getInverse) # Change setmean to setInverse }
#' Hover Plot #' #' @param database the database containing musical data you want to analyze. We suggest the billboard::spotify_track_data #' @param year_col a column of your df containing the years #' @param filtered_year a string, the year you want to look at #' @param x_axis a column of your df, the first dimension you want to plot against - such as energy #' @param y_axis a column of your df, the second dimension you want to plot against - such as danceability #' @param track_name a column of your df, information 1 you want to displayed when hovering - such as track_name #' @param artist_name a column of your df, information 2 you want to displayed when hovering - such as artist_name #' @param title a string, the title of your plot #' #' @return a plot #' @export #' #' @import dplyr #' @importFrom glue glue #' @importFrom plotly ggplotly #' @import ggplot2 #' #' #' #' @examples #' \dontrun{ #' hover.plot(spotify_track_data,"1999",year,energy,danceability,track_name,artist_name,"Charts") #' } hover.plot <- function(database,filtered_year,year_col,x_axis,y_axis,track_name = track_name, artist_name = artist_name,title="Billboard Top 100 musical charts of "){ #enquo all the columns that we need to use x_axis <- enquo(x_axis) y_axis <- enquo(y_axis) artist_name <- enquo(artist_name) track_name <- enquo(track_name) year_col <- enquo(year_col) tracklist <- database %>% filter(!!year_col == filtered_year | !!year_col == "0" ) %>% select(!!year_col,!!artist_name,!!track_name,!!x_axis,!!y_axis) plot <- ggplot(tracklist, aes(!!x_axis, !!y_axis)) + geom_point(aes(Trackname = (!!track_name), Artist= (!!artist_name), size = 0.1),alpha = 1/2) + ggtitle(glue::glue("{title}{filtered_year}")) + theme_minimal() +xlim(0,1) + ylim (0,1) plot.with.hover <- ggplotly(plot) %>% config(displayModeBar = F) %>% layout(xaxis=list(fixedrange=TRUE)) %>% layout(yaxis=list(fixedrange=TRUE)) %>% layout(hoverlabel = list(bgcolor = "white",font = list(family = "sans serif",size = 12, color = "black"))) return(plot.with.hover) } #' Hover Plot Shiny #' #' @description specific function for the shiny app #' #' @param data a dataframe #' @param x the x variable to be displayed in the plot #' @param y the y variable to be displayed in the plot #' @param chosen_year the year for which we #' #' @return a plotly plot #' #' @import dplyr #' @import graphics #' @importFrom glue glue #' @import plotly #' @import ggplot2 #' #' @examples #' \dontrun{ #' hover.plot.shiny(spotify_track_data, input$x,input$y,input$year) #' } hover.plot.shiny <- function(data,x,y,chosen_year) { tracklist <- data %>% filter(year == chosen_year | year == "0" ) %>% select(artist_name,year,track_name,x,y) plot <- ggplot(tracklist,x=x,y =y) + geom_point(aes_string(x=x,y = y,Trackname = as.factor(tracklist$track_name),Artist = as.factor(tracklist$artist_name)),alpha = 0.5) + ggtitle(glue::glue("Billboard Top 100 musical charts of {chosen_year}")) + theme_minimal() + xlim(0,1) + ylim (0,1) hover.plot <- ggplotly(plot) %>% config(displayModeBar = F) %>% layout(xaxis=list(fixedrange=TRUE)) %>% layout(yaxis=list(fixedrange=TRUE)) %>% layout(hoverlabel = list(bgcolor = "white", font = list(family = "sans serif", size = 12, color = "black"))); return(hover.plot) }
/working_files/removed_functions_frompkg/plots.R
no_license
meakulpa/rubasic
R
false
false
3,860
r
#' Hover Plot #' #' @param database the database containing musical data you want to analyze. We suggest the billboard::spotify_track_data #' @param year_col a column of your df containing the years #' @param filtered_year a string, the year you want to look at #' @param x_axis a column of your df, the first dimension you want to plot against - such as energy #' @param y_axis a column of your df, the second dimension you want to plot against - such as danceability #' @param track_name a column of your df, information 1 you want to displayed when hovering - such as track_name #' @param artist_name a column of your df, information 2 you want to displayed when hovering - such as artist_name #' @param title a string, the title of your plot #' #' @return a plot #' @export #' #' @import dplyr #' @importFrom glue glue #' @importFrom plotly ggplotly #' @import ggplot2 #' #' #' #' @examples #' \dontrun{ #' hover.plot(spotify_track_data,"1999",year,energy,danceability,track_name,artist_name,"Charts") #' } hover.plot <- function(database,filtered_year,year_col,x_axis,y_axis,track_name = track_name, artist_name = artist_name,title="Billboard Top 100 musical charts of "){ #enquo all the columns that we need to use x_axis <- enquo(x_axis) y_axis <- enquo(y_axis) artist_name <- enquo(artist_name) track_name <- enquo(track_name) year_col <- enquo(year_col) tracklist <- database %>% filter(!!year_col == filtered_year | !!year_col == "0" ) %>% select(!!year_col,!!artist_name,!!track_name,!!x_axis,!!y_axis) plot <- ggplot(tracklist, aes(!!x_axis, !!y_axis)) + geom_point(aes(Trackname = (!!track_name), Artist= (!!artist_name), size = 0.1),alpha = 1/2) + ggtitle(glue::glue("{title}{filtered_year}")) + theme_minimal() +xlim(0,1) + ylim (0,1) plot.with.hover <- ggplotly(plot) %>% config(displayModeBar = F) %>% layout(xaxis=list(fixedrange=TRUE)) %>% layout(yaxis=list(fixedrange=TRUE)) %>% layout(hoverlabel = list(bgcolor = "white",font = list(family = "sans serif",size = 12, color = "black"))) return(plot.with.hover) } #' Hover Plot Shiny #' #' @description specific function for the shiny app #' #' @param data a dataframe #' @param x the x variable to be displayed in the plot #' @param y the y variable to be displayed in the plot #' @param chosen_year the year for which we #' #' @return a plotly plot #' #' @import dplyr #' @import graphics #' @importFrom glue glue #' @import plotly #' @import ggplot2 #' #' @examples #' \dontrun{ #' hover.plot.shiny(spotify_track_data, input$x,input$y,input$year) #' } hover.plot.shiny <- function(data,x,y,chosen_year) { tracklist <- data %>% filter(year == chosen_year | year == "0" ) %>% select(artist_name,year,track_name,x,y) plot <- ggplot(tracklist,x=x,y =y) + geom_point(aes_string(x=x,y = y,Trackname = as.factor(tracklist$track_name),Artist = as.factor(tracklist$artist_name)),alpha = 0.5) + ggtitle(glue::glue("Billboard Top 100 musical charts of {chosen_year}")) + theme_minimal() + xlim(0,1) + ylim (0,1) hover.plot <- ggplotly(plot) %>% config(displayModeBar = F) %>% layout(xaxis=list(fixedrange=TRUE)) %>% layout(yaxis=list(fixedrange=TRUE)) %>% layout(hoverlabel = list(bgcolor = "white", font = list(family = "sans serif", size = 12, color = "black"))); return(hover.plot) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/charToRaw.R \name{charToRaw} \alias{charToRaw} \title{fun_name} \usage{ charToRaw(params) } \arguments{ \item{param}{fun_name} } \description{ kolejna funkcja podmieniona }
/man/charToRaw.Rd
no_license
granatb/RapeR
R
false
true
251
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/charToRaw.R \name{charToRaw} \alias{charToRaw} \title{fun_name} \usage{ charToRaw(params) } \arguments{ \item{param}{fun_name} } \description{ kolejna funkcja podmieniona }
# data will be generated from tau1=tau^2, tau2=tau^2/2, and tau3=tau^2/4 library(parallel) simRep <- 5000 # Replication times in one simulation pvalue.true <- .05 # Testing type I error b.var <- c(0) # The set of varaince of random covariates b as random slope smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1 cores <- 4 r.sim <- b.var run_one_sample <- function(iter){ library(refund) library(lme4) library(nlme) library(arm) library(RLRsim) library(MASS) set.seed(iter) D <- 80 # grid number total nSubj <- 20 # 200 # I the number of curves nRep <- 20 # 20 # datasets for each covariance function totalN <- nSubj * nRep thetaK.true <- 2 timeGrid <- (1:D)/D npc.true <- 4 percent <- 0.95 SNR <- 3 # 5, signal noise ratio' sd.epsilon <- 1 # or 0.5 delta.true <- 0.5 a.mean <- 0 gamma.true <- 2 gammaVar.true <- 1 # hot gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1)) gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ] # warm gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1)) gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ] dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX #generate functional covariates lambda.sim <- function(degree) { return(0.5^(degree - 1)) } psi.fourier <- function(t, degree) { result <- NA if(degree == 1){ result <- sqrt(2) * sinpi(2*t) }else if(degree == 2){ result <- sqrt(2) * cospi(4*t) }else if(degree == 3){ result <- sqrt(2) * sinpi(4*t) }else if(degree == 4){ result <- sqrt(2) * cospi(2*t) } return(result) } lambdaVec.true <- mapply(lambda.sim, 1: npc.true) psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)), nrow = npc.true, ncol = D, byrow = TRUE) ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true)) Mt.true <- ascore.true %*% psi.true error <- rnorm(totalN, mean = 0, sd = sd.epsilon) #thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true))) thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4, r.sim/8))) thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ] betaM.true <- thetaIK.true * ascore.true betaM.true <- rowSums(betaM.true) Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error ########################################################################## ID <- rep(1:nSubj, each = nRep) if(smooth == 0){ Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D) } if(smooth == 1){ Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var Mt.hat <- Mt.true } M <- Mt.hat # M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0 t <- (1:D)/D knots <- 5 # previous setting 10 p <- 5 # previous setting p <- 7, the number of degree for B-splines we use results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen! npc <- results$npc score <- results$scores ascore <- score[, 1:npc]/sqrt(D) # plot(results$efunctions[,2]*sqrt(D)) # lines(1:80, psi.fourier(timeGrid, 2)) #match very well # to compare lambda: results$evalues/(D) # to compare estimated M, Mt.hat, Mt.true # a<-results$scores %*% t(results$efunctions) # plot(M[300,]) #Mt.hat # lines(a[300,]+results$mu,col="red") # estimated M # lines(Mt.true[300,], col="blue") #true Mt ########################################################################### dummyX <- cbind(dummyX, -dummyX + 1) z.sim.uni = c() ID.uni <- c(rbind(matrix(1:(nSubj*npc), nrow = npc, ncol = nSubj), matrix(0, nrow = nRep - npc, ncol = nSubj))) for(k in 1:nSubj){ svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i u.tra <- t(svd$v) u <- svd$u d <- (svd$d)[1:npc] # u <- cbind(u, Null(u)) Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)] dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ] ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ], matrix(0, nrow = nRep - npc, ncol = npc)) z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc)) } ########################################################################### designMatrix <- data.frame(rating = Y, temp.1 = dummyX[, 1], temp.2 = dummyX[, 2], ID = as.factor(ID), ascore = ascore, ID.uni = as.factor(ID.uni), z.sim.uni = z.sim.uni) # 'lmer' model designMatrix.lmm <- designMatrix additive0.sim <- paste(1:npc, collapse = " + ascore.") additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.") #additive.heter <- paste0(" + (0 + ascore.", 1:npc, " | ID)", collapse = "") #model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", # additive0.sim, # " + (0 + temp.1 | ID) + (0 + temp.2 | ID) ", # additive.heter, # sep = "")) test_time1 <-system.time({ tests2 <- list() for(i in 1:npc){ ii <- paste("ascore.", i, sep = "") #model only contains the random effect that want to be tested f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", additive0.sim, " + (0 +", ii, " | ID)", sep = "")) m.slope <- lmer(f.slope, data = designMatrix.lmm) #full model under the alternative mA <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", additive0.sim, " + (0 +", ii, " | ID)", "+ (0 + temp.1 | ID) + (0 + temp.2 | ID)", sep = "")) fullReml <- lmer(mA, data = designMatrix.lmm) #model under the null f0 <- as.formula(paste(" . ~ . - (0 + ", ii, "| ID)")) m0 <- update(fullReml, f0) tests2[[i]] <- exactRLRT(m.slope, fullReml, m0) } multiTest1 <- sapply(tests2, function(x) { c(statistic = x$statistic[1], "p-value" = x$p[1])}) pvalues.bonf <- p.adjust(multiTest1[2,], "bonferroni") }) # Confusion of modifying test_time2 <-system.time({ model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", additive0.sim, " + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)", sep = "")) f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", additive0.sim, " + (0 + z.sim.uni | ID.uni)", sep = "")) m.slope <- lmer(f.slope, data = designMatrix.lmm) fullReml <- lmer(model.sim, data = designMatrix.lmm) f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)") m0 <- update(fullReml, f0) tests1 <- exactRLRT(m.slope, fullReml, m0) pvalues <- tests1$p[1] }) ################################################################################### return(list(realTau = r.sim, pvalues.bonf = pvalues.bonf, pvalues = pvalues, Merror.Var = Merror.Var, smooth = smooth, npc = npc, tests.bonf = tests2, tests = tests1, test_time = c(test_time1[3],test_time2[3])))#test_time1 is for bonferroni; test_time2 is for our model } # Setup parallel #cores <- detectCores() cluster <- makeCluster(cores) clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function fileName <- paste("test_4eigen_size_", smooth, "_",b.var,"_seed1_grp20-rep20.RData", sep = "") # Saving file's name # run the simulation loopIndex <- 1 power2.sim <- list() node_results <- parLapply(cluster, 1:simRep, run_one_sample) # result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau, # pvalue = x$pvalue)}) #result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau, # pvalues.bonf = x$pvalues.bonf, # smooth = x$smooth, # npc = x$npc)}) #resultDoubleList.sim[[loopIndex]] <- node_results #save.image(file=fileName) # Auto Save #table1.sim <- sapply(result1.sim, function(x) { # c(sens = (sum(x$pvalue <= pvalue.true) > 0))}) #Power1 <- mean(table1.sim) #cat("nRandCovariate: ", nRandCovariate, fill = TRUE) #cat("Power1: ", Power1, fill = TRUE) #power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim) table1.sim <- sapply(node_results, function(x){ c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))}) Power1 <- mean(table1.sim) table2.sim <- sapply(node_results, function(x){ c(overall.sens = (sum(x$pvalues <= pvalue.true) > 0))}) Power2 <- mean(table2.sim) npc.sim <- sapply(node_results, function(x){ x$npc}) npc.sum <- table(npc.sim) table2.time <- sapply(node_results, function(x){ x$test_time }) single_testtime <- rowMeans(table2.time) #row1 is testtime for bonferroni; row2 is time for our method power2.sim[[loopIndex]] <- list(Power1.bonf = Power1, Power2 = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth, npc.table = npc.sum, single_test_sec = single_testtime) save(power2.sim, file=fileName) # Auto Save stopCluster(cluster)
/full simulation/summer/size/seed1/seed1_new/test_highorder/test_usingpaper_s_2020.R
no_license
wma9/FMRI-project
R
false
false
10,539
r
# data will be generated from tau1=tau^2, tau2=tau^2/2, and tau3=tau^2/4 library(parallel) simRep <- 5000 # Replication times in one simulation pvalue.true <- .05 # Testing type I error b.var <- c(0) # The set of varaince of random covariates b as random slope smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1 cores <- 4 r.sim <- b.var run_one_sample <- function(iter){ library(refund) library(lme4) library(nlme) library(arm) library(RLRsim) library(MASS) set.seed(iter) D <- 80 # grid number total nSubj <- 20 # 200 # I the number of curves nRep <- 20 # 20 # datasets for each covariance function totalN <- nSubj * nRep thetaK.true <- 2 timeGrid <- (1:D)/D npc.true <- 4 percent <- 0.95 SNR <- 3 # 5, signal noise ratio' sd.epsilon <- 1 # or 0.5 delta.true <- 0.5 a.mean <- 0 gamma.true <- 2 gammaVar.true <- 1 # hot gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1)) gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ] # warm gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1)) gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ] dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX #generate functional covariates lambda.sim <- function(degree) { return(0.5^(degree - 1)) } psi.fourier <- function(t, degree) { result <- NA if(degree == 1){ result <- sqrt(2) * sinpi(2*t) }else if(degree == 2){ result <- sqrt(2) * cospi(4*t) }else if(degree == 3){ result <- sqrt(2) * sinpi(4*t) }else if(degree == 4){ result <- sqrt(2) * cospi(2*t) } return(result) } lambdaVec.true <- mapply(lambda.sim, 1: npc.true) psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)), nrow = npc.true, ncol = D, byrow = TRUE) ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true)) Mt.true <- ascore.true %*% psi.true error <- rnorm(totalN, mean = 0, sd = sd.epsilon) #thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true))) thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4, r.sim/8))) thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ] betaM.true <- thetaIK.true * ascore.true betaM.true <- rowSums(betaM.true) Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error ########################################################################## ID <- rep(1:nSubj, each = nRep) if(smooth == 0){ Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D) } if(smooth == 1){ Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var Mt.hat <- Mt.true } M <- Mt.hat # M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0 t <- (1:D)/D knots <- 5 # previous setting 10 p <- 5 # previous setting p <- 7, the number of degree for B-splines we use results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen! npc <- results$npc score <- results$scores ascore <- score[, 1:npc]/sqrt(D) # plot(results$efunctions[,2]*sqrt(D)) # lines(1:80, psi.fourier(timeGrid, 2)) #match very well # to compare lambda: results$evalues/(D) # to compare estimated M, Mt.hat, Mt.true # a<-results$scores %*% t(results$efunctions) # plot(M[300,]) #Mt.hat # lines(a[300,]+results$mu,col="red") # estimated M # lines(Mt.true[300,], col="blue") #true Mt ########################################################################### dummyX <- cbind(dummyX, -dummyX + 1) z.sim.uni = c() ID.uni <- c(rbind(matrix(1:(nSubj*npc), nrow = npc, ncol = nSubj), matrix(0, nrow = nRep - npc, ncol = nSubj))) for(k in 1:nSubj){ svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i u.tra <- t(svd$v) u <- svd$u d <- (svd$d)[1:npc] # u <- cbind(u, Null(u)) Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)] dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ] ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ], matrix(0, nrow = nRep - npc, ncol = npc)) z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc)) } ########################################################################### designMatrix <- data.frame(rating = Y, temp.1 = dummyX[, 1], temp.2 = dummyX[, 2], ID = as.factor(ID), ascore = ascore, ID.uni = as.factor(ID.uni), z.sim.uni = z.sim.uni) # 'lmer' model designMatrix.lmm <- designMatrix additive0.sim <- paste(1:npc, collapse = " + ascore.") additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.") #additive.heter <- paste0(" + (0 + ascore.", 1:npc, " | ID)", collapse = "") #model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", # additive0.sim, # " + (0 + temp.1 | ID) + (0 + temp.2 | ID) ", # additive.heter, # sep = "")) test_time1 <-system.time({ tests2 <- list() for(i in 1:npc){ ii <- paste("ascore.", i, sep = "") #model only contains the random effect that want to be tested f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", additive0.sim, " + (0 +", ii, " | ID)", sep = "")) m.slope <- lmer(f.slope, data = designMatrix.lmm) #full model under the alternative mA <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", additive0.sim, " + (0 +", ii, " | ID)", "+ (0 + temp.1 | ID) + (0 + temp.2 | ID)", sep = "")) fullReml <- lmer(mA, data = designMatrix.lmm) #model under the null f0 <- as.formula(paste(" . ~ . - (0 + ", ii, "| ID)")) m0 <- update(fullReml, f0) tests2[[i]] <- exactRLRT(m.slope, fullReml, m0) } multiTest1 <- sapply(tests2, function(x) { c(statistic = x$statistic[1], "p-value" = x$p[1])}) pvalues.bonf <- p.adjust(multiTest1[2,], "bonferroni") }) # Confusion of modifying test_time2 <-system.time({ model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", additive0.sim, " + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)", sep = "")) f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.", additive0.sim, " + (0 + z.sim.uni | ID.uni)", sep = "")) m.slope <- lmer(f.slope, data = designMatrix.lmm) fullReml <- lmer(model.sim, data = designMatrix.lmm) f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)") m0 <- update(fullReml, f0) tests1 <- exactRLRT(m.slope, fullReml, m0) pvalues <- tests1$p[1] }) ################################################################################### return(list(realTau = r.sim, pvalues.bonf = pvalues.bonf, pvalues = pvalues, Merror.Var = Merror.Var, smooth = smooth, npc = npc, tests.bonf = tests2, tests = tests1, test_time = c(test_time1[3],test_time2[3])))#test_time1 is for bonferroni; test_time2 is for our model } # Setup parallel #cores <- detectCores() cluster <- makeCluster(cores) clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function fileName <- paste("test_4eigen_size_", smooth, "_",b.var,"_seed1_grp20-rep20.RData", sep = "") # Saving file's name # run the simulation loopIndex <- 1 power2.sim <- list() node_results <- parLapply(cluster, 1:simRep, run_one_sample) # result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau, # pvalue = x$pvalue)}) #result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau, # pvalues.bonf = x$pvalues.bonf, # smooth = x$smooth, # npc = x$npc)}) #resultDoubleList.sim[[loopIndex]] <- node_results #save.image(file=fileName) # Auto Save #table1.sim <- sapply(result1.sim, function(x) { # c(sens = (sum(x$pvalue <= pvalue.true) > 0))}) #Power1 <- mean(table1.sim) #cat("nRandCovariate: ", nRandCovariate, fill = TRUE) #cat("Power1: ", Power1, fill = TRUE) #power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim) table1.sim <- sapply(node_results, function(x){ c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))}) Power1 <- mean(table1.sim) table2.sim <- sapply(node_results, function(x){ c(overall.sens = (sum(x$pvalues <= pvalue.true) > 0))}) Power2 <- mean(table2.sim) npc.sim <- sapply(node_results, function(x){ x$npc}) npc.sum <- table(npc.sim) table2.time <- sapply(node_results, function(x){ x$test_time }) single_testtime <- rowMeans(table2.time) #row1 is testtime for bonferroni; row2 is time for our method power2.sim[[loopIndex]] <- list(Power1.bonf = Power1, Power2 = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth, npc.table = npc.sum, single_test_sec = single_testtime) save(power2.sim, file=fileName) # Auto Save stopCluster(cluster)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/measures_multiclass.R \name{WKAPPA} \alias{WKAPPA} \title{Mean quadratic weighted kappa} \usage{ WKAPPA(truth, response) } \arguments{ \item{truth}{vector of true values} \item{response}{vector of predicted values} } \description{ Defined as: 1 - sum(weights * conf.mat) / sum(weights * expected.mat), the weight matrix measures seriousness of disagreement with the squared euclidean metric. }
/man/WKAPPA.Rd
no_license
pdwaggoner/measures
R
false
true
473
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/measures_multiclass.R \name{WKAPPA} \alias{WKAPPA} \title{Mean quadratic weighted kappa} \usage{ WKAPPA(truth, response) } \arguments{ \item{truth}{vector of true values} \item{response}{vector of predicted values} } \description{ Defined as: 1 - sum(weights * conf.mat) / sum(weights * expected.mat), the weight matrix measures seriousness of disagreement with the squared euclidean metric. }
# folder creation for NIR iterative predictions root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/spatialPredictions/tiles/" fols<- as.numeric(list.files(root, full.names = FALSE)) length(fols) fols<- fols[order(fols)] fols for (i in 1:length(fols)){ #make path se.fol<- fols[i] se.fol pat<- paste0(root,se.fol,"/NIR_its/") pat dir.create(pat) print(i)}
/Production/DSM/SoilColour/tile_folder_creation.R
permissive
AusSoilsDSM/SLGA
R
false
false
409
r
# folder creation for NIR iterative predictions root<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soilColour/spatialPredictions/tiles/" fols<- as.numeric(list.files(root, full.names = FALSE)) length(fols) fols<- fols[order(fols)] fols for (i in 1:length(fols)){ #make path se.fol<- fols[i] se.fol pat<- paste0(root,se.fol,"/NIR_its/") pat dir.create(pat) print(i)}
visualize_cluster = function(gff, cluster, transcript_name){ require("stringr"); require("rtracklayer"); require("GenomicRanges") gr = import.gff(gff) gr_exon = gr[gr$type=="exon"] gr_exon = gr_exon[which(gr_exon$transcript_id %in% transcript_name)] gr_list_input = split(gr_exon, gr_exon$transcript_id) isoform_count = length(unique(gr_exon$transcript_id)) print(paste(Sys.time(), ": preprocessing")); flush.console() cov = base::as.vector(GenomicRanges::coverage(gr_exon)[[1]]) extract = which(cov>0) extract_high = which(cov>isoform_count*0.1) chr = unique(seqnames(gr_list_input[[1]])) gr_extract = reduce(GRanges(chr, range=IRanges(start=extract, end=extract))) gr_extract_high = reduce(GRanges(seqnames=chr, range=IRanges(extract_high, extract_high))) gr_base = reduce(gr_exon) gr_subject = gr_extract_high gr_intron = GenomicRanges::setdiff(GRanges(seqname=chr, ranges = IRanges(start(gr_base), end(gr_base))), gr_subject) num_bins = length(gr_intron) + length(gr_subject) gr_exons = gr_subject; gr_exons$type = "exon" gr_introns = gr_intron; if (length(gr_introns) > 0) { gr_introns$type="intron" } gr_tract = c(gr_exons, gr_introns) gr_tract = gr_tract[order(start(gr_tract))] indx = order(cluster) print(paste(Sys.time(), ": plotting")); flush.console() #png(out_png, width = max(800,num_bins*80), height=max(length(indx)*15,600)) par(mar=rep(5,4)) plot(c(0, num_bins), c(0, length(indx)), ty="n", xaxt="n", yaxt="n", ylab="", xlab="", main="Transcript Isoform Visualization") rect(xleft=0:(num_bins-1), xright=1:num_bins, ytop=0, ybot=-1) #, col=(gr_tract$type=="exon")+2) text(x=((0:(length(gr_tract)-1)+1:length(gr_tract))/2), y = -0.5, labels = width(gr_tract)) count=1 widths = NULL for(j in indx){ gr_subject= gr_list_input[[j]] ol = findOverlaps(gr_subject, gr_tract) qh = queryHits(ol); sh = subjectHits(ol) lefts = by(sh, qh, min) rights = by(sh, qh, max) ids = names(lefts) start = (start(gr_subject)[as.numeric(ids)] - start(gr_tract)[lefts])/width(gr_tract)[lefts]+lefts-1 end = rights - ( (end(gr_tract)[rights]) - end(gr_subject)[as.numeric(ids)])/width(gr_tract)[rights] rect(xleft = start, xright=end, ytop = count, ybot = count-1, col = sort(cluster)[count]) count = count+1 widths = c(widths, sum(width(gr_subject))) } par(xpd=NA) nn <- names(gr_list_input) text(x = -(num_bins)/15, y = 0:(length(indx))-0.5, pos=3, cex=0.8, labels= c(strtrim(nn[indx], 10), "Name")) text(x = num_bins*16/15, y = 0:(length(indx)+1)-0.5, pos=4, cex=0.8, labels=c(sum(width(gr_tract)), widths, "Length")) #dev.off() print(paste(Sys.time(), ": done")); flush.console() }
/DailyLogProgress/visualize_cluster_2:20.R
permissive
DennisWangJAX/AlternativeSplicingPacBio
R
false
false
2,717
r
visualize_cluster = function(gff, cluster, transcript_name){ require("stringr"); require("rtracklayer"); require("GenomicRanges") gr = import.gff(gff) gr_exon = gr[gr$type=="exon"] gr_exon = gr_exon[which(gr_exon$transcript_id %in% transcript_name)] gr_list_input = split(gr_exon, gr_exon$transcript_id) isoform_count = length(unique(gr_exon$transcript_id)) print(paste(Sys.time(), ": preprocessing")); flush.console() cov = base::as.vector(GenomicRanges::coverage(gr_exon)[[1]]) extract = which(cov>0) extract_high = which(cov>isoform_count*0.1) chr = unique(seqnames(gr_list_input[[1]])) gr_extract = reduce(GRanges(chr, range=IRanges(start=extract, end=extract))) gr_extract_high = reduce(GRanges(seqnames=chr, range=IRanges(extract_high, extract_high))) gr_base = reduce(gr_exon) gr_subject = gr_extract_high gr_intron = GenomicRanges::setdiff(GRanges(seqname=chr, ranges = IRanges(start(gr_base), end(gr_base))), gr_subject) num_bins = length(gr_intron) + length(gr_subject) gr_exons = gr_subject; gr_exons$type = "exon" gr_introns = gr_intron; if (length(gr_introns) > 0) { gr_introns$type="intron" } gr_tract = c(gr_exons, gr_introns) gr_tract = gr_tract[order(start(gr_tract))] indx = order(cluster) print(paste(Sys.time(), ": plotting")); flush.console() #png(out_png, width = max(800,num_bins*80), height=max(length(indx)*15,600)) par(mar=rep(5,4)) plot(c(0, num_bins), c(0, length(indx)), ty="n", xaxt="n", yaxt="n", ylab="", xlab="", main="Transcript Isoform Visualization") rect(xleft=0:(num_bins-1), xright=1:num_bins, ytop=0, ybot=-1) #, col=(gr_tract$type=="exon")+2) text(x=((0:(length(gr_tract)-1)+1:length(gr_tract))/2), y = -0.5, labels = width(gr_tract)) count=1 widths = NULL for(j in indx){ gr_subject= gr_list_input[[j]] ol = findOverlaps(gr_subject, gr_tract) qh = queryHits(ol); sh = subjectHits(ol) lefts = by(sh, qh, min) rights = by(sh, qh, max) ids = names(lefts) start = (start(gr_subject)[as.numeric(ids)] - start(gr_tract)[lefts])/width(gr_tract)[lefts]+lefts-1 end = rights - ( (end(gr_tract)[rights]) - end(gr_subject)[as.numeric(ids)])/width(gr_tract)[rights] rect(xleft = start, xright=end, ytop = count, ybot = count-1, col = sort(cluster)[count]) count = count+1 widths = c(widths, sum(width(gr_subject))) } par(xpd=NA) nn <- names(gr_list_input) text(x = -(num_bins)/15, y = 0:(length(indx))-0.5, pos=3, cex=0.8, labels= c(strtrim(nn[indx], 10), "Name")) text(x = num_bins*16/15, y = 0:(length(indx)+1)-0.5, pos=4, cex=0.8, labels=c(sum(width(gr_tract)), widths, "Length")) #dev.off() print(paste(Sys.time(), ": done")); flush.console() }
#!/usr/bin/env Rscript argv <- commandArgs(trailingOnly = TRUE) options(stringsAsFactors = FALSE) source('Util.R') if(length(argv) != 6) { q() } ld.idx <- as.integer(argv[1]) # e.g., ld.idx = 133 qtl.file <- argv[2] # e.g., qtl.file = 'cis-eqtl/geuvadis/133_qtl.txt.gz' geno.dir <- argv[3] # e.g., geno.dir = '1KG_EUR' # (eQTL genotype matrix) gammax.input <- as.numeric(argv[4]) # e.g., gammax.input = 1e4 eig.tol <- as.numeric(argv[5]) # e.g., eig.tol = 1e-2 out.hdr <- argv[6] # e.g., out.hdr = 'temp.cammel' dir.create(dirname(out.hdr), recursive = TRUE, showWarnings = FALSE) out.tab.file <- out.hdr %&&% '.mediation.gz' out.null.file <- out.hdr %&&% '.null.gz' .files <- c(out.tab.file, out.null.file) if(all(sapply(.files, file.exists))) { log.msg('all the output files exist: %s\n', paste(.files, collapse = ', ')) q() } if(!file.exists(qtl.file)) { log.msg('QTL file does not exist: %s\n', qtl.file) q() } ################################################################ library(zqtl) library(dplyr) library(readr) library(methods) source('Util-cammel.R') ld.info.file <- 'ldblocks/EUR/fourier_ls-all.bed' ld.info.tab <- read_tsv(ld.info.file) chr.input <- gsub(pattern = 'chr', replacement = '', ld.info.tab[ld.idx, 'chr']) %>% as.integer() ld.lb.input <- ld.info.tab[ld.idx, 'start'] %>% as.integer() ld.ub.input <- ld.info.tab[ld.idx, 'stop'] %>% as.integer() gwas.dir <- './gwas_stat/' mil.ea.gwas.file <- gwas.dir %&&% '/ptsd_mil_ea_' %&&% ld.idx %&&% '.txt.gz' civ.ea.gwas.file <- gwas.dir %&&% '/ptsd_civ_ea_' %&&% ld.idx %&&% '.txt.gz' .files <- c(mil.ea.gwas.file, civ.ea.gwas.file) if(!all(sapply(.files, file.exists))) { log.msg('Missing input files: %s\n', paste(.files, collapse = ', ')) q() } temp.dir <- system('mkdir -p /broad/hptmp/ypp/cammel-ptsd/' %&&% out.hdr %&&% '; mktemp -d /broad/hptmp/ypp/cammel-ptsd/' %&&% out.hdr %&&% '/temp.XXXXXXXX', intern = TRUE, ignore.stderr = TRUE) dir.create(temp.dir, recursive = TRUE, showWarnings = FALSE) if(file.exists(geno.dir %&&% '/chr' %&&% chr.input %&&% '.bed')) { plink.eqtl <- subset.plink(geno.dir %&&% '/chr' %&&% chr.input, chr.input, ld.lb.input, ld.ub.input, temp.dir) } else if (file.exists(geno.dir %&&% '.bed')) { plink.eqtl <- subset.plink(geno.dir, chr.input, ld.lb.input, ld.ub.input, temp.dir) } else { plink.eqtl <- NULL } plink.gwas <- subset.plink('1KG_EUR/chr' %&&% chr.input, chr.input, ld.lb.input, ld.ub.input, temp.dir) ## Read and match two PLINK filesets plink.matched <- match.plink(plink.gwas, plink.eqtl) if(is.null(plink.matched)) { log.msg('No common variants between eQTL and GWAS reference panels') write_tsv(data.frame(), path = out.tab.file) write_tsv(data.frame(), path = out.null.file) q() } plink.gwas <- plink.matched$gwas plink.eqtl <- plink.matched$qtl ################################################################ ## Read QTL statistics and measure basic statistics ## chr rs snp.loc med.id qtl.a1 qtl.a2 qtl.beta qtl.z ## i c i c c c d d qtl.tab <- read_tsv(qtl.file, col_types = 'icicccdd') if(nrow(qtl.tab) == 0) { write_tsv(data.frame(), path = out.tab.file) write_tsv(data.frame(), path = out.null.file) log.msg('Empty QTL file\n') q() } vb.opt <- list(pi.ub = -2, pi.lb = -5, tau = -5, do.hyper = TRUE, tol = 1e-8, gammax = gammax.input, vbiter = 3500, do.stdize = TRUE, eigen.tol = eig.tol, rate = 1e-2, nsample = 10, print.interv = 500, weight = FALSE, do.rescale = TRUE) ################################################################ mil.ea.gwas.tab <- read_tsv(mil.ea.gwas.file) mil.ea.matched <- mil.ea.gwas.tab %>% match.allele(plink.obj = plink.eqtl, qtl.tab = qtl.tab) civ.ea.gwas.tab <- read_tsv(civ.ea.gwas.file) civ.ea.matched <- civ.ea.gwas.tab %>% match.allele(plink.obj = plink.eqtl, qtl.tab = qtl.tab) ################################################################ mil.ea.data <- mil.ea.matched %>% make.zqtl.data() mil.ea.effect <- mil.ea.data %>% run.cammel(xx.gwas = plink.gwas$BED, xx.med = plink.eqtl$BED, opt = vb.opt) %>% get.effect.tab(z.data = mil.ea.data, gwas.tab = mil.ea.gwas.tab, qtl.tab = qtl.tab, data.name = 'MIL.EA') mil.ea.null.out <- mil.ea.data %>% run.cammel.null(xx.gwas = plink.gwas$BED, xx.med = plink.eqtl$BED, n.null = 1, opt = vb.opt) %>% mutate(gwas = 'MIL.EA') ################################################################ civ.ea.data <- civ.ea.matched %>% make.zqtl.data() civ.ea.effect <- civ.ea.data %>% run.cammel(xx.gwas = plink.gwas$BED, xx.med = plink.eqtl$BED, opt = vb.opt) %>% get.effect.tab(z.data = civ.ea.data, gwas.tab = civ.ea.gwas.tab, qtl.tab = qtl.tab, data.name = 'CIV.EA') civ.ea.null.out <- civ.ea.data %>% run.cammel.null(xx.gwas = plink.gwas$BED, xx.med = plink.eqtl$BED, n.null = 1, opt = vb.opt) %>% mutate(gwas = 'CIV.EA') ################################################################ out.tab <- list(mil.ea.effect, civ.ea.effect) %>% bind_rows() %>% dplyr::mutate(chr = chr.input, ld.lb = ld.lb.input, ld.ub = ld.ub.input) null.tab <- list(mil.ea.null.out, civ.ea.null.out) %>% bind_rows() write_tsv(out.tab, path = out.tab.file) write_tsv(null.tab, path = out.null.file) system('rm -r ' %&&% temp.dir) log.msg('Successfully finished!\n')
/make.cammel-ptsd_ea.R
no_license
YPARK/cammel-gwas
R
false
false
5,718
r
#!/usr/bin/env Rscript argv <- commandArgs(trailingOnly = TRUE) options(stringsAsFactors = FALSE) source('Util.R') if(length(argv) != 6) { q() } ld.idx <- as.integer(argv[1]) # e.g., ld.idx = 133 qtl.file <- argv[2] # e.g., qtl.file = 'cis-eqtl/geuvadis/133_qtl.txt.gz' geno.dir <- argv[3] # e.g., geno.dir = '1KG_EUR' # (eQTL genotype matrix) gammax.input <- as.numeric(argv[4]) # e.g., gammax.input = 1e4 eig.tol <- as.numeric(argv[5]) # e.g., eig.tol = 1e-2 out.hdr <- argv[6] # e.g., out.hdr = 'temp.cammel' dir.create(dirname(out.hdr), recursive = TRUE, showWarnings = FALSE) out.tab.file <- out.hdr %&&% '.mediation.gz' out.null.file <- out.hdr %&&% '.null.gz' .files <- c(out.tab.file, out.null.file) if(all(sapply(.files, file.exists))) { log.msg('all the output files exist: %s\n', paste(.files, collapse = ', ')) q() } if(!file.exists(qtl.file)) { log.msg('QTL file does not exist: %s\n', qtl.file) q() } ################################################################ library(zqtl) library(dplyr) library(readr) library(methods) source('Util-cammel.R') ld.info.file <- 'ldblocks/EUR/fourier_ls-all.bed' ld.info.tab <- read_tsv(ld.info.file) chr.input <- gsub(pattern = 'chr', replacement = '', ld.info.tab[ld.idx, 'chr']) %>% as.integer() ld.lb.input <- ld.info.tab[ld.idx, 'start'] %>% as.integer() ld.ub.input <- ld.info.tab[ld.idx, 'stop'] %>% as.integer() gwas.dir <- './gwas_stat/' mil.ea.gwas.file <- gwas.dir %&&% '/ptsd_mil_ea_' %&&% ld.idx %&&% '.txt.gz' civ.ea.gwas.file <- gwas.dir %&&% '/ptsd_civ_ea_' %&&% ld.idx %&&% '.txt.gz' .files <- c(mil.ea.gwas.file, civ.ea.gwas.file) if(!all(sapply(.files, file.exists))) { log.msg('Missing input files: %s\n', paste(.files, collapse = ', ')) q() } temp.dir <- system('mkdir -p /broad/hptmp/ypp/cammel-ptsd/' %&&% out.hdr %&&% '; mktemp -d /broad/hptmp/ypp/cammel-ptsd/' %&&% out.hdr %&&% '/temp.XXXXXXXX', intern = TRUE, ignore.stderr = TRUE) dir.create(temp.dir, recursive = TRUE, showWarnings = FALSE) if(file.exists(geno.dir %&&% '/chr' %&&% chr.input %&&% '.bed')) { plink.eqtl <- subset.plink(geno.dir %&&% '/chr' %&&% chr.input, chr.input, ld.lb.input, ld.ub.input, temp.dir) } else if (file.exists(geno.dir %&&% '.bed')) { plink.eqtl <- subset.plink(geno.dir, chr.input, ld.lb.input, ld.ub.input, temp.dir) } else { plink.eqtl <- NULL } plink.gwas <- subset.plink('1KG_EUR/chr' %&&% chr.input, chr.input, ld.lb.input, ld.ub.input, temp.dir) ## Read and match two PLINK filesets plink.matched <- match.plink(plink.gwas, plink.eqtl) if(is.null(plink.matched)) { log.msg('No common variants between eQTL and GWAS reference panels') write_tsv(data.frame(), path = out.tab.file) write_tsv(data.frame(), path = out.null.file) q() } plink.gwas <- plink.matched$gwas plink.eqtl <- plink.matched$qtl ################################################################ ## Read QTL statistics and measure basic statistics ## chr rs snp.loc med.id qtl.a1 qtl.a2 qtl.beta qtl.z ## i c i c c c d d qtl.tab <- read_tsv(qtl.file, col_types = 'icicccdd') if(nrow(qtl.tab) == 0) { write_tsv(data.frame(), path = out.tab.file) write_tsv(data.frame(), path = out.null.file) log.msg('Empty QTL file\n') q() } vb.opt <- list(pi.ub = -2, pi.lb = -5, tau = -5, do.hyper = TRUE, tol = 1e-8, gammax = gammax.input, vbiter = 3500, do.stdize = TRUE, eigen.tol = eig.tol, rate = 1e-2, nsample = 10, print.interv = 500, weight = FALSE, do.rescale = TRUE) ################################################################ mil.ea.gwas.tab <- read_tsv(mil.ea.gwas.file) mil.ea.matched <- mil.ea.gwas.tab %>% match.allele(plink.obj = plink.eqtl, qtl.tab = qtl.tab) civ.ea.gwas.tab <- read_tsv(civ.ea.gwas.file) civ.ea.matched <- civ.ea.gwas.tab %>% match.allele(plink.obj = plink.eqtl, qtl.tab = qtl.tab) ################################################################ mil.ea.data <- mil.ea.matched %>% make.zqtl.data() mil.ea.effect <- mil.ea.data %>% run.cammel(xx.gwas = plink.gwas$BED, xx.med = plink.eqtl$BED, opt = vb.opt) %>% get.effect.tab(z.data = mil.ea.data, gwas.tab = mil.ea.gwas.tab, qtl.tab = qtl.tab, data.name = 'MIL.EA') mil.ea.null.out <- mil.ea.data %>% run.cammel.null(xx.gwas = plink.gwas$BED, xx.med = plink.eqtl$BED, n.null = 1, opt = vb.opt) %>% mutate(gwas = 'MIL.EA') ################################################################ civ.ea.data <- civ.ea.matched %>% make.zqtl.data() civ.ea.effect <- civ.ea.data %>% run.cammel(xx.gwas = plink.gwas$BED, xx.med = plink.eqtl$BED, opt = vb.opt) %>% get.effect.tab(z.data = civ.ea.data, gwas.tab = civ.ea.gwas.tab, qtl.tab = qtl.tab, data.name = 'CIV.EA') civ.ea.null.out <- civ.ea.data %>% run.cammel.null(xx.gwas = plink.gwas$BED, xx.med = plink.eqtl$BED, n.null = 1, opt = vb.opt) %>% mutate(gwas = 'CIV.EA') ################################################################ out.tab <- list(mil.ea.effect, civ.ea.effect) %>% bind_rows() %>% dplyr::mutate(chr = chr.input, ld.lb = ld.lb.input, ld.ub = ld.ub.input) null.tab <- list(mil.ea.null.out, civ.ea.null.out) %>% bind_rows() write_tsv(out.tab, path = out.tab.file) write_tsv(null.tab, path = out.null.file) system('rm -r ' %&&% temp.dir) log.msg('Successfully finished!\n')
library(EpiModelHIV) orig <- readRDS("out/est/netest.rds") netstats <- readRDS("out/est/netstats.rds") epistats <- readRDS("out/est/epistats.rds") full_tx_eff <- rep(1, 3) param <- param_msm( netstats = netstats, epistats = epistats, hiv.test.rate = c(0.00385, 0.00385, 0.0069), hiv.test.late.prob = rep(0, 3), tx.init.prob = c(0.1775, 0.19, 0.2521), tt.part.supp = 1 - full_tx_eff, tt.full.supp = full_tx_eff, tt.dur.supp = rep(0, 3), tx.halt.part.prob = c(0.0065, 0.0053, 0.003), tx.halt.full.rr = rep(0.45, 3), tx.halt.dur.rr = rep(0.45, 3), tx.reinit.part.prob = rep(0.00255, 3), tx.reinit.full.rr = rep(1, 3), tx.reinit.dur.rr = rep(1, 3), max.time.off.tx.full.int = 52 * 15, max.time.on.tx.part.int = 52 * 10, max.time.off.tx.part.int = 52 * 10, aids.mr = 1 / 250, trans.scale = c(2.68, 0.4, 0.27), #c(2.21, 0.405, 0.255), acts.scale = 1.00, acts.aids.vl = 5.75, circ.prob = c(0.874, 0.874, 0.918), a.rate = 0.00052, prep.start = (52 * 60) + 1, riskh.start = 52 * 59, prep.adhr.dist = c(0.089, 0.127, 0.784), prep.adhr.hr = c(0.69, 0.19, 0.01), prep.start.prob = 0.71, # 0.00896, prep.discont.rate = 0.02138792, # 1 - (2^(-1/(224.4237/7))) ## prep.tst.int = 90/7, # do I need that? ## prep.risk.int = 182/7, # do I need that? ## prep.sti.screen.int = 182/7, ## prep.sti.prob.tx = 1, prep.risk.reassess.method = "year", prep.require.lnt = TRUE, # FALSE -> start with random PrEP initiation # Injectable PrEP specific prep.la.start = Inf, #(52*60)+1, prepla.discont.rate = 1 - (2^(-1/781)), prep.prob.oral = 1, prep.inj.int = 8, prep.adhr.dist.la = c(0.215, 0.785), # only 2 adherence classes prepla.dlevel.icpt = 3.98, prepla.dlevel.icpt.err = 2, prepla.dlevel.halflife.int = 40, prep.la.hr.beta = -9.0599, prep.la.hr.rel = 1 ) ## must be set by the calling script if (lnt == FALSE) { param$prep.require.lnt = FALSE param$prep.start.prob = 0.00411 } init <- init_msm( prev.ugc = 0, prev.rct = 0, prev.rgc = 0, prev.uct = 0 )
/R/utils-sim_calib_params.R
permissive
EpiModel/injectable-prep
R
false
false
2,056
r
library(EpiModelHIV) orig <- readRDS("out/est/netest.rds") netstats <- readRDS("out/est/netstats.rds") epistats <- readRDS("out/est/epistats.rds") full_tx_eff <- rep(1, 3) param <- param_msm( netstats = netstats, epistats = epistats, hiv.test.rate = c(0.00385, 0.00385, 0.0069), hiv.test.late.prob = rep(0, 3), tx.init.prob = c(0.1775, 0.19, 0.2521), tt.part.supp = 1 - full_tx_eff, tt.full.supp = full_tx_eff, tt.dur.supp = rep(0, 3), tx.halt.part.prob = c(0.0065, 0.0053, 0.003), tx.halt.full.rr = rep(0.45, 3), tx.halt.dur.rr = rep(0.45, 3), tx.reinit.part.prob = rep(0.00255, 3), tx.reinit.full.rr = rep(1, 3), tx.reinit.dur.rr = rep(1, 3), max.time.off.tx.full.int = 52 * 15, max.time.on.tx.part.int = 52 * 10, max.time.off.tx.part.int = 52 * 10, aids.mr = 1 / 250, trans.scale = c(2.68, 0.4, 0.27), #c(2.21, 0.405, 0.255), acts.scale = 1.00, acts.aids.vl = 5.75, circ.prob = c(0.874, 0.874, 0.918), a.rate = 0.00052, prep.start = (52 * 60) + 1, riskh.start = 52 * 59, prep.adhr.dist = c(0.089, 0.127, 0.784), prep.adhr.hr = c(0.69, 0.19, 0.01), prep.start.prob = 0.71, # 0.00896, prep.discont.rate = 0.02138792, # 1 - (2^(-1/(224.4237/7))) ## prep.tst.int = 90/7, # do I need that? ## prep.risk.int = 182/7, # do I need that? ## prep.sti.screen.int = 182/7, ## prep.sti.prob.tx = 1, prep.risk.reassess.method = "year", prep.require.lnt = TRUE, # FALSE -> start with random PrEP initiation # Injectable PrEP specific prep.la.start = Inf, #(52*60)+1, prepla.discont.rate = 1 - (2^(-1/781)), prep.prob.oral = 1, prep.inj.int = 8, prep.adhr.dist.la = c(0.215, 0.785), # only 2 adherence classes prepla.dlevel.icpt = 3.98, prepla.dlevel.icpt.err = 2, prepla.dlevel.halflife.int = 40, prep.la.hr.beta = -9.0599, prep.la.hr.rel = 1 ) ## must be set by the calling script if (lnt == FALSE) { param$prep.require.lnt = FALSE param$prep.start.prob = 0.00411 } init <- init_msm( prev.ugc = 0, prev.rct = 0, prev.rgc = 0, prev.uct = 0 )