content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
#######################
# Rare Interactive Tool
#######################
#############
# Philippines
#############
#############################
# Managed access and reserves
#############################
######################################################
######################################################
### 0. Preparing the environment and packages
# Clean environment
rm(list = ls())
# Preparing packages
if (!require("pacman")) install.packages("pacman")
# Load packages
pacman::p_load(berryFunctions,dplyr,raster,rgdal,sf,sp, stringr)
######################################################
######################################################
### 1. Setting up directories and loading the required data for analysis
## Make sure to copy a country's files are in the appropriate directory (managed access, coral, habitat quality, etc.)
## NOTE: This should be completed before running this code
## It will be preference if to have all the countries's data in a single main directory
## or in separate subdirectories
## Single directory will have lots of files, while separate directories will lead to have
## to setting up many directories in the code
## 1a. Set the directories where the raw managed access and reserve data are currently stored
managed_access_dir <- "country_projects\\phl\\data\\a_raw_data\\managed_access_areas"
reserve_dir <- "country_projects\\phl\\data\\a_raw_data\\existing_reserve"
## 1b. setting output directories
tool_dir <- "country_projects\\phl\\data\\d_tool_data"
## 1c. inspect the directories
list.files(managed_access_dir)
list.files(reserve_dir)
######################################################
######################################################
### 2. load the data
## 2a. load managed access and reserve data
phl_ma <- st_read(dsn = managed_access_dir, layer = "phl_proposed_ma")
phl_reserves <- st_read(dsn = reserve_dir, layer = "phl_reserves_established")
######################################################
######################################################
### 3. Inspect the data (classes, crs, etc.)
## 3a. Examine the top of the data
head(phl_ma)
head(phl_reserves)
## 3b. Inspect crs and set crs values if needed for later analyses
crs(phl_ma)
crs(phl_reserves)
######################################################
######################################################
### 4. Cleaning and preparing data
## 4a. managed access areas
ma <- phl_ma %>%
dplyr::mutate(iso3 = "PHL", country = "Philippines") %>%
dplyr::select(iso3, country, MUNNAME, Area_ha, PROVNAME) %>%
dplyr::mutate(MUNNAME = str_to_title(MUNNAME), PROVNAME = str_to_title(PROVNAME)) %>%
dplyr::mutate(MUNNAME = recode(MUNNAME, "City Of Escalante" = "City of Escalante"),
PROVNAME = recode(PROVNAME, "Surigao Del Norte" = "Surigao del Norte")) %>%
dplyr::rename(region = PROVNAME, maa = MUNNAME, maa_area = Area_ha)
## 4b. reserves
reserve <- phl_reserves %>%
dplyr::mutate(iso3 = "PHL") %>%
dplyr::select(MPA_name, Area_ha, iso3) %>%
dplyr::rename(reserve_name = MPA_name, area_ha = Area_ha)
######################################################
######################################################
### 8. Saving as a GeoPackage
st_write(obj = ma, dsn = paste0(tool_dir, "/", "managed_access_areas.shp"), append = F)
st_write(obj = reserve, dsn = paste0(tool_dir, "/", "existing_reserves.shp"), append = F)
|
/rare_interactive_tool/country_projects/phl/code/rare_tool_phl_8_managed_access_reserves.R
|
no_license
|
bpfree/work_sample
|
R
| false | false | 3,419 |
r
|
#######################
# Rare Interactive Tool
#######################
#############
# Philippines
#############
#############################
# Managed access and reserves
#############################
######################################################
######################################################
### 0. Preparing the environment and packages
# Clean environment
rm(list = ls())
# Preparing packages
if (!require("pacman")) install.packages("pacman")
# Load packages
pacman::p_load(berryFunctions,dplyr,raster,rgdal,sf,sp, stringr)
######################################################
######################################################
### 1. Setting up directories and loading the required data for analysis
## Make sure to copy a country's files are in the appropriate directory (managed access, coral, habitat quality, etc.)
## NOTE: This should be completed before running this code
## It will be preference if to have all the countries's data in a single main directory
## or in separate subdirectories
## Single directory will have lots of files, while separate directories will lead to have
## to setting up many directories in the code
## 1a. Set the directories where the raw managed access and reserve data are currently stored
managed_access_dir <- "country_projects\\phl\\data\\a_raw_data\\managed_access_areas"
reserve_dir <- "country_projects\\phl\\data\\a_raw_data\\existing_reserve"
## 1b. setting output directories
tool_dir <- "country_projects\\phl\\data\\d_tool_data"
## 1c. inspect the directories
list.files(managed_access_dir)
list.files(reserve_dir)
######################################################
######################################################
### 2. load the data
## 2a. load managed access and reserve data
phl_ma <- st_read(dsn = managed_access_dir, layer = "phl_proposed_ma")
phl_reserves <- st_read(dsn = reserve_dir, layer = "phl_reserves_established")
######################################################
######################################################
### 3. Inspect the data (classes, crs, etc.)
## 3a. Examine the top of the data
head(phl_ma)
head(phl_reserves)
## 3b. Inspect crs and set crs values if needed for later analyses
crs(phl_ma)
crs(phl_reserves)
######################################################
######################################################
### 4. Cleaning and preparing data
## 4a. managed access areas
ma <- phl_ma %>%
dplyr::mutate(iso3 = "PHL", country = "Philippines") %>%
dplyr::select(iso3, country, MUNNAME, Area_ha, PROVNAME) %>%
dplyr::mutate(MUNNAME = str_to_title(MUNNAME), PROVNAME = str_to_title(PROVNAME)) %>%
dplyr::mutate(MUNNAME = recode(MUNNAME, "City Of Escalante" = "City of Escalante"),
PROVNAME = recode(PROVNAME, "Surigao Del Norte" = "Surigao del Norte")) %>%
dplyr::rename(region = PROVNAME, maa = MUNNAME, maa_area = Area_ha)
## 4b. reserves
reserve <- phl_reserves %>%
dplyr::mutate(iso3 = "PHL") %>%
dplyr::select(MPA_name, Area_ha, iso3) %>%
dplyr::rename(reserve_name = MPA_name, area_ha = Area_ha)
######################################################
######################################################
### 8. Saving as a GeoPackage
st_write(obj = ma, dsn = paste0(tool_dir, "/", "managed_access_areas.shp"), append = F)
st_write(obj = reserve, dsn = paste0(tool_dir, "/", "existing_reserves.shp"), append = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/folded.R
\name{left_join.folded}
\alias{left_join.folded}
\title{Left-join Folded}
\usage{
\method{left_join}{folded}(.data, ..., .dots)
}
\arguments{
\item{.data}{passed to next method}
\item{...}{passed to next method}
\item{.dots}{passed to next method}
}
\value{
folded
}
\description{
Left-joins folded.
}
\keyword{internal}
|
/man/left_join.folded.Rd
|
no_license
|
cran/fold
|
R
| false | true | 432 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/folded.R
\name{left_join.folded}
\alias{left_join.folded}
\title{Left-join Folded}
\usage{
\method{left_join}{folded}(.data, ..., .dots)
}
\arguments{
\item{.data}{passed to next method}
\item{...}{passed to next method}
\item{.dots}{passed to next method}
}
\value{
folded
}
\description{
Left-joins folded.
}
\keyword{internal}
|
# Create plot 2
hpcAll <- read.csv('household_power_consumption.txt',sep=';',stringsAsFactors=FALSE)
hpc <- hpcAll[hpcAll$Date == '1/2/2007' | hpcAll$Date == '2/2/2007',]
DTs <- strptime(paste(hpc$Date, hpc$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
hpc <- cbind(DTs,hpc)
png('plot2.png', width=480, height=480, bg="transparent")
with(hpc, plot(DTs, Global_active_power, type='l', main='', xlab='',ylab='Global Active Power (kilowatts)'))
dev.off()
|
/plot2.R
|
no_license
|
petehinchliffe/ExData_Plotting1
|
R
| false | false | 444 |
r
|
# Create plot 2
hpcAll <- read.csv('household_power_consumption.txt',sep=';',stringsAsFactors=FALSE)
hpc <- hpcAll[hpcAll$Date == '1/2/2007' | hpcAll$Date == '2/2/2007',]
DTs <- strptime(paste(hpc$Date, hpc$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
hpc <- cbind(DTs,hpc)
png('plot2.png', width=480, height=480, bg="transparent")
with(hpc, plot(DTs, Global_active_power, type='l', main='', xlab='',ylab='Global Active Power (kilowatts)'))
dev.off()
|
library(data.table)
## 1. Merges the training and the test sets to create one data set
## The data is read and converted into a single data frame
features <- read.csv('./UCI HAR Dataset/features.txt', header = FALSE, sep = ' ')
features <- as.character(features[,2])
train_x <- read.table('./UCI HAR Dataset/train/X_train.txt')
train_y <- read.csv('./UCI HAR Dataset/train/y_train.txt', header = FALSE, sep = ' ')
train_subject <- read.csv('./UCI HAR Dataset/train/subject_train.txt',header = FALSE, sep = ' ')
data_train <- data.frame(train_subject, train_y, train_x)
names(data_train) <- c(c('subject', 'activity'), features)
test_x <- read.table('./UCI HAR Dataset/test/X_test.txt')
test_y <- read.csv('./UCI HAR Dataset/test/y_test.txt', header = FALSE, sep = ' ')
test_subject <- read.csv('./UCI HAR Dataset/test/subject_test.txt', header = FALSE, sep = ' ')
data_test <- data.frame(test_subject, test_y, test_x)
names(data_test) <- c(c('subject', 'activity'), features)
data_all <- rbind(data_train, data_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement
mean_std <- grep('mean|std', features)
data_sub <- data_all[,c(1,2,mean_std + 2)]
# 3. Uses descriptive activity names to name the activities in the data set
act_labels <- read.table('./UCI HAR Dataset/activity_labels.txt', header = FALSE)
act_labels <- as.character(act_labels[,2])
data_sub$activity <- act_labels[data_sub$activity]
data_sub
# 4. Appropriately labels the data set with descriptive variable names
name_new <- names(data_sub)
name_new <- gsub("[(][)]", "", name_new)
name_new <- gsub("^t", "TimeDomain_", name_new)
name_new <- gsub("^f", "FrequencyDomain_", name_new)
name_new <- gsub("Acc", "Accelerometer", name_new)
name_new <- gsub("Gyro", "Gyroscope", name_new)
name_new <- gsub("Mag", "Magnitude", name_new)
name_new <- gsub("-mean-", "_Mean_", name_new)
name_new <- gsub("-std-", "_StandardDeviation_", name_new)
name_new <- gsub("-", "_", name_new)
names(data_sub) <- name_new
data_sub
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
data <- aggregate(data_sub[,3:81], by = list(activity = data_sub$activity, subject = data_sub$subject),FUN = mean)
write.table(x = data, file = "data.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
nvia/cleandata
|
R
| false | false | 2,355 |
r
|
library(data.table)
## 1. Merges the training and the test sets to create one data set
## The data is read and converted into a single data frame
features <- read.csv('./UCI HAR Dataset/features.txt', header = FALSE, sep = ' ')
features <- as.character(features[,2])
train_x <- read.table('./UCI HAR Dataset/train/X_train.txt')
train_y <- read.csv('./UCI HAR Dataset/train/y_train.txt', header = FALSE, sep = ' ')
train_subject <- read.csv('./UCI HAR Dataset/train/subject_train.txt',header = FALSE, sep = ' ')
data_train <- data.frame(train_subject, train_y, train_x)
names(data_train) <- c(c('subject', 'activity'), features)
test_x <- read.table('./UCI HAR Dataset/test/X_test.txt')
test_y <- read.csv('./UCI HAR Dataset/test/y_test.txt', header = FALSE, sep = ' ')
test_subject <- read.csv('./UCI HAR Dataset/test/subject_test.txt', header = FALSE, sep = ' ')
data_test <- data.frame(test_subject, test_y, test_x)
names(data_test) <- c(c('subject', 'activity'), features)
data_all <- rbind(data_train, data_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement
mean_std <- grep('mean|std', features)
data_sub <- data_all[,c(1,2,mean_std + 2)]
# 3. Uses descriptive activity names to name the activities in the data set
act_labels <- read.table('./UCI HAR Dataset/activity_labels.txt', header = FALSE)
act_labels <- as.character(act_labels[,2])
data_sub$activity <- act_labels[data_sub$activity]
data_sub
# 4. Appropriately labels the data set with descriptive variable names
name_new <- names(data_sub)
name_new <- gsub("[(][)]", "", name_new)
name_new <- gsub("^t", "TimeDomain_", name_new)
name_new <- gsub("^f", "FrequencyDomain_", name_new)
name_new <- gsub("Acc", "Accelerometer", name_new)
name_new <- gsub("Gyro", "Gyroscope", name_new)
name_new <- gsub("Mag", "Magnitude", name_new)
name_new <- gsub("-mean-", "_Mean_", name_new)
name_new <- gsub("-std-", "_StandardDeviation_", name_new)
name_new <- gsub("-", "_", name_new)
names(data_sub) <- name_new
data_sub
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
data <- aggregate(data_sub[,3:81], by = list(activity = data_sub$activity, subject = data_sub$subject),FUN = mean)
write.table(x = data, file = "data.txt", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/descend.R
\name{getPval}
\alias{getPval}
\title{Grab the likelihood ratio test p-values if the tests are performed from a list of descend objects}
\usage{
getPval(descend.list)
}
\arguments{
\item{descend.list}{a list of descend objects computed from {\code{\link{runDescend}}}}
}
\value{
A matrix of one column. Each row is for a distribution measurement or a coefficient if covariates are presented.
}
\description{
Grab the likelihood ratio test p-values if the tests are performed from a list of descend objects
}
|
/man/getPval.Rd
|
no_license
|
jingshuw/descend
|
R
| false | true | 596 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/descend.R
\name{getPval}
\alias{getPval}
\title{Grab the likelihood ratio test p-values if the tests are performed from a list of descend objects}
\usage{
getPval(descend.list)
}
\arguments{
\item{descend.list}{a list of descend objects computed from {\code{\link{runDescend}}}}
}
\value{
A matrix of one column. Each row is for a distribution measurement or a coefficient if covariates are presented.
}
\description{
Grab the likelihood ratio test p-values if the tests are performed from a list of descend objects
}
|
################################
### Code for ecological statistics for
### "Divergent extremes but convergent recovery of bacterial and archaeal soil
### communities to an ongoing subterranean coal mine fire"
### by SH Lee, JW Sorensen, KL Grady, TC Tobin and A Shade
### Prepared 12 November 2016
### Author: Ashley Shade, Michigan State University; shade.ashley <at> gmail.com
################################
#
# Before you start
# Make sure you are using the latest version of R (and Rstudio)
# The following packages (and their dependencies) are needed to run the whole analysis
# calibrate 1.7.2
# gplots 3.0.1
# ggplot2 2.1.0
# indicspecies 1.7.5
# limma 3.26.9
# mass 7.3-45 (calibrate dependency)
# outliers 0.14
# reshape2 1.4.1
# vegan 2.4-0
# reldist 1.6-6
# bipartite 2.06.1
# GUniFrac 1.0
# ape 3.5
# phangorn 2.0-2
#
################################
### Plotting soil contextual data
################################
#load R libraries for this section
library(ggplot2)
library(reshape2)
library(outliers)
#read in mapping file with soil data
map=read.table("InputFiles/Centralia_Collapsed_Map_forR.txt", header=TRUE, sep="\t")
#plot chemistry v. temperature (Supporting Figure 3)
#melt data
map.long=melt(map, id.vars=c("SampleID", "SoilTemperature_to10cm", "Classification"), measure.vars=c("NO3N_ppm","NH4N_ppm","pH","SulfateSulfur_ppm","K_ppm","Ca_ppm","Mg_ppm","OrganicMatter_500","Fe_ppm", "As_ppm", "P_ppm", "SoilMoisture_Per"))
#make a gradient color palette, note bias
GnYlOrRd=colorRampPalette(colors=c("green", "yellow", "orange","red"), bias=2)
sfig3=ggplot(map.long, aes(y=as.numeric(SoilTemperature_to10cm), x=value))+
#add points layer
geom_point(aes(y=as.numeric(SoilTemperature_to10cm), x=value, shape=Classification, color=as.numeric(SoilTemperature_to10cm)))+
#set facet with 4 columns, make x-axes appropriate for each variable
facet_wrap(~variable, ncol=4, scales="free_x")+
#set gradient for temperature and add gradient colorbar
scale_color_gradientn(colours=GnYlOrRd(5), guide="colorbar", guide_legend(title="Temperature"))+
#omit the legend for the size of the points
scale_size(guide=FALSE)+
#define the axis labels
labs(y="Temperature (Celsius)", x=" ")+
#set a simple theme
theme_bw(base_size=10)
sfig3
#ggsave("Figures/SFig3.eps", width=178, units="mm")
##Subset contextual data inclusive of soil quantitative variables
env=map[,c("SoilTemperature_to10cm", "NO3N_ppm", "pH", "K_ppm", "Mg_ppm", "OrganicMatter_500", "NH4N_ppm", "SulfateSulfur_ppm", "Ca_ppm", "Fe_ppm", "As_ppm", "P_ppm", "SoilMoisture_Per","Fire_history")]
##Test for outliers, loop will print all significant outliers and their sampleID - these were not removed from analysis
for (i in 1:ncol(env)){
x=grubbs.test(env[,i], type=10)
if(x$p.value < 0.05){
print(colnames(env)[i])
print(row.names(env)[env[,i]==max(env[,i])])
}
}
#samples 13 (for pH, Ca) and 10 (for NO3N, NH4N,Fe) are common outliers - both have high temps. Sample 3 is also outlier for Mg and OM; this is a recovered site. Generally this test indicates a lot of variability.
#correlation test between temperature and other soil chemistry
for(i in 1:ncol(env)){
ct=cor.test(env[,"SoilTemperature_to10cm"],env[,i])
if (ct$p.value < 0.05){
print(colnames(env)[i])
print(ct)
}
}
#extract means from recovered and reference soils' pH
mean(env[map[,"Classification"]=="Reference","pH"])
mean(env[map[,"Classification"]== "Recovered","pH"])
#plot cell counts and 16S rRNA qPCR data (Supporting Figure 2)
map.long.counts=melt(map, id.vars=c("SampleID", "Classification"), measure.vars=c("rRNA_gene_copies_per_g_dry_soil","CellCounts_per_g_dry_soil"))
labels=c(rRNA_gene_copies_per_g_dry_soil="rRNA gene copies",CellCounts_per_g_dry_soil="Cell counts")
sfig4 <- ggplot(data=map.long.counts, aes(x=Classification, y=value))+
geom_boxplot() +
geom_jitter(aes(shape=Classification))+
facet_grid(variable~., scales="free_y", labeller=labeller(variable = labels))+
scale_shape(guide=FALSE)+
#scale_color_manual(values=colors)+
scale_x_discrete(name="Fire classification")+
scale_y_continuous(name="value per g dry soil")+
theme_bw(base_size=10)
sfig4
#ggsave("Figures/SFig4.eps", width=86, units="mm")
#Pariwise t-tests for cell counts
t.test(map[map[,"Classification"]=="Recovered","CellCounts_per_g_dry_soil"],map[map[,"Classification"]=="FireAffected","CellCounts_per_g_dry_soil"])
t.test(map[map[,"Classification"]=="Recovered","CellCounts_per_g_dry_soil"],map[map[,"Classification"]=="Reference","CellCounts_per_g_dry_soil"])
t.test(map[map[,"Classification"]=="FireAffected","CellCounts_per_g_dry_soil"],map[map[,"Classification"]=="Reference","CellCounts_per_g_dry_soil"])
#Pairwise t-tests for qPCR
t.test(map[map[,"Classification"]=="Recovered","rRNA_gene_copies_per_g_dry_soil"],map[map[,"Classification"]=="FireAffected","rRNA_gene_copies_per_g_dry_soil"])
t.test(map[map[,"Classification"]=="Recovered","rRNA_gene_copies_per_g_dry_soil"],map[map[,"Classification"]=="Reference","rRNA_gene_copies_per_g_dry_soil"])
t.test(map[map[,"Classification"]=="Reference","rRNA_gene_copies_per_g_dry_soil"],map[map[,"Classification"]=="FireAffected","rRNA_gene_copies_per_g_dry_soil"])
################################
### Preparing OTU and distance tables for analysis
################################
#load R libraries for this section
library(ggplot2)
library(reshape2)
library(vegan)
#read in community OTU table, and transpose (rarefied collapsed MASTER table, output from QIIME)
comm=read.table("InputFiles/MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000.txt", header=TRUE, row.names=1, check.names=FALSE, sep="\t")
#remove consensus lineage from otu table
rdp=comm[,"ConsensusLineage"]
comm=comm[,-ncol(comm)]
#How many total QCed sequences?
sum(colSums(comm))
#sort community by colnames (to be in the consistent, consecutive order for all analyses)
comm=comm[,order(colnames(comm))]
#who are the singleton OTUs (observed 1 time in an abundance of 1 sequence)?
singletonOTUs=row.names(comm)[rowSums(comm)==1]
length(singletonOTUs)
#total 1374 singleton OTUs
g=grep("_dn", singletonOTUs)
length(g)
#1201 de novo OTUs are singletons
#who are the remaining de novo OTUs?
g=grep("_dn_",row.names(comm))
dn=rdp[g]
rdp.nosigs=rdp[rowSums(comm)>1]
#designate a full dataset
comm.sigs=comm
#remove OTUs with an abundance = 1, across the entire dataset (singleton OTUs)
comm=comm[rowSums(comm)>1,]
sum(colSums(comm))
#transpose matrix
comm.t=t(comm)
### Read in resemblance matrices
#read in weighted unifrac table (output from QIIME)
uf=read.table("InputFiles/weighted_unifrac_MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000.txt", header=TRUE, row.names=1)
#sort by rows, columns (so they are in the consecutive order)
uf=uf[order(row.names(uf)),order(colnames(uf))]
uf.d=as.dist(uf)
#read in the unweighted unifrac table (output from QIIME)
uwuf=read.table("InputFiles/unweighted_unifrac_MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000.txt", header=TRUE, row.names=1)
#sort by rows, columns (so they are in the consecutive order)
uwuf=uwuf[order(row.names(uwuf)),order(colnames(uwuf))]
uwuf.d=as.dist(uwuf)
#read in the normalized weighted unifrac table (output from QIIME)
nwuf=read.table("InputFiles/weighted_normalized_unifrac_MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000.txt", header=TRUE, row.names=1)
#sort by rows, columns (so all tables are in the consecutive order)
nwuf=nwuf[order(row.names(nwuf)),order(colnames(nwuf))]
nwuf.d=as.dist(nwuf)
#assign fire classification
fireclass=map[,"Classification"]
ref.t=comm.t[map$Classification=="Reference",]
rec.t=comm.t[map$Classification=="Recovered",]
fire.t=comm.t[map$Classification=="FireAffected",]
################################
### Calculate and plot within-sample (alpha) diversity
################################
#read in alpha diversity table (output from QIIME)
div=read.table("InputFiles/MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000_alphadiv.txt", header=TRUE)
#sort by sample ID (so that they are in consecutive order)
div=div[order(row.names(div)),]
#calculate pielou's evenness from OTU table
s=specnumber(comm.t)
h=diversity(comm.t,index="shannon")
pielou=h/log(s)
#combine alpha diversity data and fire classification (from map file)
div=cbind(row.names(div),div,pielou, map$Classification)
colnames(div)=c("SampleID", "PD", "Richness", "Pielou", "Classification")
#plot (Figure 1)
#reshape the data
div.long=melt(div, id.vars=c("SampleID", "Classification"))
#plot a facet
#comment toggle for color v. bw
colors=c("red", "yellow", "green")
fig1 <- ggplot(data=div.long, aes(x=Classification, y=value))+
geom_boxplot() +
geom_jitter(aes(shape=Classification))+
#geom_jitter(aes(color=Classification, cex=1.5))+
facet_grid(variable~., scales="free_y")+
#scale_shape(guide=FALSE)+
scale_size(guide=FALSE)+
scale_color_manual(values=colors)+
scale_x_discrete(name="Fire classification")+
scale_y_continuous(name="Diversity value")+
theme_bw(base_size=10)
fig1
ggsave("Figures/Fig1.eps", width=86, units="mm")
#ttest
v=c("PD", "Richness", "Pielou")
outdiv=NULL
for(i in 1:length(v)){
#subset the data to test one phylum at a time
active=div[div$Classification=="FireAffected",colnames(div)==v[i]]
recov=div[div$Classification=="Recovered",colnames(div)==v[i]]
ref=div[div$Classification=="Reference",colnames(div)==v[i]]
#perform the test
test1=t.test(active, recov, paired=FALSE, var.equal = FALSE)
test2=t.test(active, ref, paired=FALSE, var.equal = FALSE)
test3=t.test(ref, recov, paired=FALSE, var.equal = FALSE)
test1.out=c(v[i],"ActivevRecov",test1$statistic, test1$parameter, test1$p.value)
test2.out=c(v[i],"ActivevRef",test2$statistic, test2$parameter, test2$p.value)
test3.out=c(v[i],"RefvRecov",test3$statistic, test3$parameter, test3$p.value)
outdiv=rbind(outdiv, test1.out, test2.out, test3.out)
}
outdiv
################################
### Analysis of technical replicates
################################
#Supporting Table 2 - assessing reproducibility among technical replicates
techdiv=read.table("InputFiles/OTU_hdf5_filteredfailedalignments_rdp_rmCM_even53000_alphadiv.txt") #output from QIIME
techdiv.out=NULL
sampleIDs=c("C01", "C02", "C03", "C04", "C05", "C06", "C07", "C08", "C09", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C18")
for(i in 1:length(sampleIDs)){
temp=techdiv[grep(sampleIDs[i], row.names(techdiv)),]
temp2=c(mapply(mean,temp), mapply(sd,temp))
techdiv.out=rbind(techdiv.out,temp2)
}
row.names(techdiv.out)=sampleIDs
colnames(techdiv.out)=c("PD_mean", "Richness_mean", "PD_sd", "Richness_sd")
#write.table(techdiv.out, "Results/AlphaDiv_TechnicalReps.txt", quote=FALSE, sep="\t")
#Supporting PCoA (SFig 2)- assessing reproducibility among technical replicates
beta <- read.table("InputFiles/weighted_unifrac_OTU_hdf5_filteredfailedalignments_rdp_rmCM_even53000.txt", sep="\t", stringsAsFactors = FALSE, header = TRUE, row.names=1)
map.f<- read.table("InputFiles/Centralia_Full_Map.txt", sep="\t", stringsAsFactors = FALSE, header = TRUE, row.names=1)
beta <- beta[order(row.names(beta)),order(colnames(beta))]
#Remove Mock
beta <- beta[-55,-55]
library(vegan)
beta.pcoa<- cmdscale(beta, eig=TRUE)
ax1.v.f=beta.pcoa$eig[1]/sum(beta.pcoa$eig)
ax2.v.f=beta.pcoa$eig[2]/sum(beta.pcoa$eig)
coordinates <- as.data.frame(beta.pcoa$points)
Samples <- map$Sample
coordinates$Sample<- map.f$Sample
coordinates_avg_sd <- NULL
for (i in 1:length(Samples)){
Site <- coordinates[coordinates$Sample==Samples[i],]
AX1 <- c(mean(Site[,1]),sd(Site[,1]))
AX2 <- c(mean(Site[,2]),sd(Site[,2]))
coordinates_avg_sd<- rbind(coordinates_avg_sd,c(AX1,AX2))
}
row.names(coordinates_avg_sd)<-Samples
unique(map$Classification)
Class=rep('black',nrow(map))
Class[map$Classification=="FireAffected"]='red'
Class[map$Classification=="Reference"]='green'
Class[map$Classification=="Recovered"]='yellow'
library(calibrate)
#SFig 2
dev.off()
setEPS()
postscript("Figures/SFig2.eps", width = 6, height=6, pointsize=8,paper="special")
plot(coordinates_avg_sd[,1],coordinates_avg_sd[,3] ,cex=1.5,pch=21,bg=Class,main="Averaged Technical Replicates Weighted UniFrac PCoA",xlab= paste("PCoA1: ",100*round(ax1.v.f,3),"% var. explained",sep=""), ylab= paste("PCoA2: ",100* round(ax2.v.f,3),"% var. explained",sep=""))
textxy(X=coordinates_avg_sd[,1], Y=coordinates_avg_sd[,3],labs=map$Sample, cex=1)
arrows(coordinates_avg_sd[,1], coordinates_avg_sd[,3]- coordinates_avg_sd[,4], coordinates_avg_sd[,1], coordinates_avg_sd[,3]+ coordinates_avg_sd[,4], length=0.05, angle=90, code=3)
arrows(coordinates_avg_sd[,1]- coordinates_avg_sd[,2], coordinates_avg_sd[,3], coordinates_avg_sd[,1] + coordinates_avg_sd[,2], coordinates_avg_sd[,3], length=0.05, angle=90, code=3)
dev.off()
################################
### Phylum-level responses to fire
################################
#load R libraries for this section
library(ggplot2)
#read in phylum level OTU table (QIIME output)
comm.phylum=read.table("InputFiles/MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000_L2.txt", sep="\t", header=TRUE, row.names=1) #output from QIIME
##sort by sample ID (so that they are in consecutive order)
comm.phylum=comm.phylum[,order(colnames(comm.phylum))]
#combine phyla that contribute less than 0.01 each
below01=comm.phylum[rowSums(comm.phylum)<0.01,]
below01.cs=colSums(below01)
#remove those below01 phyla from the table
comm.phylum=comm.phylum[rowSums(comm.phylum)>0.01,]
#add the summary from the <0.01
comm.phylum=rbind(comm.phylum,below01.cs)
#rename the last row
row.names(comm.phylum)[nrow(comm.phylum)]="Below_0.01"
#for character string trucation in R : http://stackoverflow.com/questions/10883605/truncating-the-end-of-a-string-in-r-after-a-character-that-can-be-present-zero-o
phylumnames=sub(".*p__", "", row.names(comm.phylum))
row.names(comm.phylum)=phylumnames
#assign fire classifications to samples
fireclass=map[,"Classification"]
p.active=comm.phylum[,fireclass=="FireAffected"]
p.recov=comm.phylum[,fireclass=="Recovered"]
p.ref=comm.phylum[,fireclass=="Reference"]
#Calculate a mean phylum rel. abundance across all of the samples that are within each activity group
m.active=apply(p.active,1,mean)
m.recov=apply(p.recov,1,mean)
m.ref=apply(p.ref,1,mean)
m.summary.p=cbind(m.active, m.recov,m.ref)
colnames(m.summary.p)=c("FireAffected", "Recovered", "Reference")
#sort in decreasing total abundance order
m.summary.p=m.summary.p[order(rowSums(m.summary.p),decreasing=TRUE),]
#plot (Figure 3)
m.summary.p.long=melt(m.summary.p, id.vars=row.names(m.summary.p),measure.vars=c("FireAffected", "Recovered", "Reference"))
colors=c("red", "yellow", "green")
fig3=ggplot(m.summary.p.long, aes(x=Var1, y=value, fill=Var2))+
geom_dotplot(binaxis="y", dotsize = 3)+
facet_grid(Var2~.)+
scale_fill_manual(values=colors, guide=FALSE)+
labs(x="Phylum", y="Mean relative abundance", las=1)+
theme(axis.text.x = element_text(angle = 90, size = 10, face = "italic"))
fig3
ggsave("Figures/Fig3.eps", width=178, units="mm")
#Welch's t-test for all phyla
u=row.names(comm.phylum)
out=NULL
for(i in 1:length(u)){
#subset the data to test one phylum at a time
active=comm.phylum[row.names(comm.phylum)==u[i],fireclass=="FireAffected"]
recov=comm.phylum[row.names(comm.phylum)==u[i],fireclass=="Recovered"]
#perform the test
test=t.test(active, recov, paired=FALSE, var.equal = FALSE)
test.out=c(row.names(comm.phylum)[i],test$statistic, test$parameter, test$p.value)
out=rbind(out,test.out)
}
colnames(out)=c("Phylum", "Tstatistic", "DF", "pvalue")
#all results: Supporting Table 8
out
#extract overrepresented in fire
out[out[,"pvalue"]<0.05 & out[,"Tstatistic"]>0,]
#extracted overrepresented in recovered
out[out[,"pvalue"]<0.05 & out[,"Tstatistic"]<0,]
#write.table(out, "Results/Phylum_ttest.txt",quote=FALSE, sep="\t")
################################
### Comparative (beta) diversity
################################
#load R libraries for this section
library(calibrate)
library(ggplot2)
library(vegan)
# use weighted unifrac
uf.pcoa=cmdscale(uf.d, eig=TRUE)
#calculate percent variance explained, then add to plot
ax1.v=uf.pcoa$eig[1]/sum(uf.pcoa$eig)
ax2.v=uf.pcoa$eig[2]/sum(uf.pcoa$eig)
envEF=envfit(uf.pcoa, env)
#Supporting Table 4
envEF
unique(map$Classification)
Class=rep('black',nrow(map))
Class[map$Classification=="FireAffected"]='red'
Class[map$Classification=="Reference"]='green'
Class[map$Classification=="Recovered"]='yellow'
#export figure 2
#textxy is from the calibrate library
dev.off()
setEPS()
postscript("Figures/Fig2.eps", width = 3.385, height=3.385, pointsize=8,paper="special")
plot(uf.pcoa$points[,1],uf.pcoa$points[,2] ,cex=1.5,pch=21,bg=Class,main="Weighted UniFrac PCoA", xlab= paste("PCoA1: ",100*round(ax1.v,3),"% var. explained",sep=""), ylab= paste("PCoA2: ",100*round(ax2.v,3),"%var. explained",sep=""))
textxy(X=uf.pcoa$points[,1], Y=uf.pcoa$points[,2],labs=map$SampleID, cex=0.8)
legend('bottomleft',c('Fire Affected','Recovered','Reference'),pch=21,pt.bg=c("red", "yellow", "green"),lty=0)
plot(envEF, p.max=0.10, col="black", cex=1)
dev.off()
#perform hypothesis testing on fire-affected v. recovered+reference sites
#permanova
Class2=sub("green", "yellow", Class)
a=adonis(uf.d~Class2, distance=TRUE, permutations=1000)
a
#multivariate dispersion with Tukey HSD
b=betadisper(uf.d, group=Class2)
TukeyHSD(b, which = "group", ordered = FALSE,conf.level = 0.95)
#mantel w/ spatial distances
space=read.table("InputFiles/spatialdistancematrix.txt", header=TRUE, row.names=1)
space.d=as.dist(space)
mantel(uf.d,space.d)
################################
### Do different resemblances agree in their overarching patterns?
################################
#Supporting Table 3A:
#the variance explained by each distance (taxonomic/phylogenetic and weighted/unweighted)
bc.d=vegdist(t(comm), method="bray")
sor.d=vegdist(t(comm), method="bray",binary=TRUE)
# PCoA using unweighted unifrac (QIIME output - unweighted phylogenetic)
uwuf.pcoa=cmdscale(uwuf.d, eig=TRUE)
#calculate percent variance explained, then add to plot
ax1.v.uwuf=uwuf.pcoa$eig[1]/sum(uwuf.pcoa$eig)
ax2.v.uwuf=uwuf.pcoa$eig[2]/sum(uwuf.pcoa$eig)
# PCoA using bray-curtis (vegan output - weighted taxonomic)
bc.pcoa=cmdscale(bc.d, eig=TRUE)
#calculate percent variance explained, then add to plot
ax1.v.bc=bc.pcoa$eig[1]/sum(bc.pcoa$eig)
ax2.v.bc=bc.pcoa$eig[2]/sum(bc.pcoa$eig)
#PCoA using sorensen (vegan output - unweighted taxonomic)
sor.pcoa=cmdscale(sor.d, eig=TRUE)
#calculate percent variance explained, then add to plot
ax1.v.sor=sor.pcoa$eig[1]/sum(sor.pcoa$eig)
ax2.v.sor=sor.pcoa$eig[2]/sum(sor.pcoa$eig)
#Mantel and PROTEST tests between all resemblances (Supporting Table 3B)
resem=list(uf.d,uwuf.d,nwuf.d,bc.d,sor.d)
#this loops a bit funny but all pairwise results are available
names=c("weighted_UniFrac", "unweighted_UniFrac", "normalized_weighted_UniFrac", "BrayCurtis", "Sorenson")
m.out=NULL
for (i in 1:length(resem)){
dist1=resem[[i]]
print(i)
j=i+1
for(j in 2:length(resem)){
dist2=resem[[j]]
print(j)
#Mantel
m=mantel(dist1,dist2)
#Protest
pr=protest(dist1,dist2)
#results out
m.v=c(names[i], names[j],m$statistic, m$signif, pr$t0, pr$ss, pr$signif)
m.out=rbind(m.out,m.v)
}
}
#Supporting Table 3B
colnames(m.out)=c("Dist1", "Dist2", "Mantel_R", "Mantel_p", "PROTEST_R", "PROTEST_m12", "PROTEST_p")
m.out
#write.table(m.out, "Results/MantelDist.txt", quote=FALSE, sep="\t")
################################
### Comparative diversity of fire-affected samples
################################
#load R libraries for this section
library(vegan)
#reduce uf to fire only
uf.fire=uf[map$Classification=="FireAffected",map$Classification=="FireAffected"]
uf.fire.d=as.dist(uf.fire)
env.fire=env[map$Classification=="FireAffected",]
labels=map[map$Classification=="FireAffected","SampleID"]
#PCoA for fire sites only
uf.fire.pcoa=cmdscale(uf.fire.d, eig=TRUE)
#fit environmental variables
envFIT.fire=envfit(uf.fire.pcoa, env=env.fire)
#print results to screen (Supporting Table 5)
envFIT.fire
#df <- data.frame((envFIT.fire$vectors)$arrows, (envFIT.fire$vectors)$r, (envFIT.fire$vectors)$pvals)
#write.table(df, "Results/ENV_Fire.txt", quote=FALSE, sep="\t")
#calculate %var. explained by each axis
ax1.v.f=uf.fire.pcoa$eig[1]/sum(uf.fire.pcoa$eig)
ax2.v.f=uf.fire.pcoa$eig[2]/sum(uf.fire.pcoa$eig)
#CAP for fire-sites, constrained by temperature
#to determine explanatory value of abiotic factors for fire-affected sites, after temp is accounted for
#make vector of temperature only
temp=env.fire[,"SoilTemperature_to10cm"]
#CAP
cap1=capscale(uf.fire.d~Condition(temp))
#fit environmental variables
c.ef=envfit(cap1, env.fire)
#print results to screen (Supporting Table 6)
c.ef
#df <- data.frame((c.ef$vectors)$arrows, (c.ef$vectors)$r, (c.ef$vectors)$pvals)
#write.table(df, "Results/CAP.txt", quote=FALSE, sep="\t")
#calculate % var. explained by each axis
ax1.v.f.t=cap1$CA$eig[1]/sum(cap1$CA$eig)
ax2.v.f.t=cap1$CA$eig[2]/sum(cap1$CA$eig)
#Plot: supporting Figure 6
setEPS()
postscript("Figures/SFig6AB.eps", width = 6.770, height=3.385, pointsize=8,paper="special")
par(mfrow=c(1,2))
plot(uf.fire.pcoa$points[,1],uf.fire.pcoa$points[,2], main= "(A) Fire-affected soils PCoA", type="n",xlab=paste("PCoA1: ",100*round(ax1.v.f,3),"% var. explained",sep=""), ylab= paste("PCoA2: ",100*round(ax2.v.f,3),"% var. explained",sep=""))
textxy(X=uf.fire.pcoa$points[,1], Y=uf.fire.pcoa$points[,2],labs=labels, offset=0, cex=0.8)
plot(envFIT.fire, p=0.10)
plot(cap1, cex=0.9,main = "(B) Temperature-constrained \nfire-affected soils PCoA", xlab=paste("CAP Ax1: ",100*round(ax1.v.f.t,3),"%var. explained",sep=""), ylab=paste("CAP Ax2: ",100*round(ax2.v.f.t,3),"%var. explained",sep=""))
plot(c.ef, p= 0.10)
dev.off()
################################
### Sloan neutral model
################################
#NOTE: must use full dataset (including singleton OTUs) for this analysis
#Source for model fits is from Burns et al. ISMEJ 2015, downloaded R code from their supporting materials
#Source code requires: minpack.lm, Hmisc, stats4 packages - make sure they are installed (and their dependencies)
source("MiscSourceScripts/sncm.fit_function.r")
#assign variables for function
spp=t(comm.sigs)
taxon=as.vector(rdp)
ref.t.sigs=spp[map$Classification=="Reference",]
rec.t.sigs=spp[map$Classification=="Recovered",]
rec.t.sigs.NZ<- rec.t.sigs[,colSums(rec.t.sigs)>0]
fire.t.sigs=spp[map$Classification=="FireAffected",]
fire.t.sigs.NZ<-fire.t.sigs[,colSums(fire.t.sigs)>0]
#Models for the whole community
obs.np=sncm.fit(spp,taxon=rdp, stats=FALSE, pool=NULL)
sta.np=sncm.fit(spp,taxon=rdp, stats=TRUE, pool=NULL)
#Models for each classification
#fire affected: total - asks the question: in itself, do the fire-affected sites follow neutral
obs.fireT=sncm.fit(fire.t.sigs.NZ,taxon=rdp, stats=FALSE, pool=NULL)
sta.fireT=sncm.fit(fire.t.sigs.NZ,taxon=rdp, stats=TRUE, pool=NULL)
#recovered : total - asks the question: do recovered sites follow neutral expectations?
obs.recT=sncm.fit(rec.t.sigs.NZ,taxon=rdp, stats=FALSE, pool=NULL)
sta.recT=sncm.fit(rec.t.sigs.NZ,taxon=rdp, stats=TRUE, pool=NULL)
results=rbind(sta.np, sta.fireT, sta.recT)
row.names(results)=c("all", "Fire_Affected", "Recovered")
#par(mfrow=c(2,3)) #for plotting in R studio w/out export
l1=list(obs.np, obs.recT, obs.fireT)
l2=list(sta.np, sta.recT, sta.fireT)
names=c("(A) All", "(B) Recovered", "(C) Fire_Affected")
out.sta=NULL
#Plot supporting Fig 7 panels
for(i in 1:length(l1)){
#define data
temp=as.data.frame(l1[i])
sta=as.data.frame(l2[i])
#how many taxa are above their prediction, and below?
above.pred=sum(temp$freq > (temp$pred.upr), na.rm=TRUE)/sta$Richness
below.pred=sum(temp$freq < (temp$pred.lwr), na.rm=TRUE)/sta$Richness
out=c(above.pred, below.pred)
ap= temp$freq > (temp$pred.upr)
bp= temp$freq < (temp$pred.lwr)
#plot figure (SFig7)
setEPS()
if(i == 1){
postscript("Figures/SFig7A.eps", width = 2.33, height=3, pointsize=10,paper="special")
}
if (i == 2){
postscript("Figures/SFig7B.eps", width = 2.33, height=3, pointsize=10,paper="special")
}
if (i ==3){
postscript("Figures/SFig7C.eps", width = 2.33, height=3, pointsize=10,paper="special")
}
plot(x=log(temp$p), y=temp$freq, main=names[i], xlab="Log Abundance", ylab="Occurrence Frequency")
points(x=log(temp$p[ap==TRUE]), y=temp$freq[ap==TRUE], col="red", pch=19)
points(x=log(temp$p[bp==TRUE]), y=temp$freq[bp==TRUE], col="blue", pch=19)
lines(temp$freq.pred~log(temp$p), col="yellow", lty=1, lwd=6)
lines(temp$pred.upr~log(temp$p), col="yellow", lty=1, lwd=3)
lines(temp$pred.lwr~log(temp$p), col="yellow", lty=1, lwd=3)
dev.off()
out.sta=rbind(out.sta, out)
}
colnames(out.sta)=c("%AbovePred", "%BelowPred")
#Supporting Table 7
results=cbind(results, out.sta)
results
#write.table(results, "Results/SloanNeutralModel.txt", quote=FALSE, sep="\t")
################################
### Beta null models
################################
#MODIFIED by als to use our dataset (comm.t) instead of "dune" and to only include the abundance-based model. We also changed the number of patches to by 18 to match with the dataset.
#ORIGINAL scripts available in the appendix of the work below, published in Oikos (Appendix oik.02803, also R_analysis/oik-02803-appendix-to-Tucker2016/)
#Note that beta null models with weighted UniFrac require ~75 hours walltime to complete with 4Gb memory and 1 processing node; beta-null models with Bray-Curtis only require ~30 hours
#######################
### Code for example metacommunity simulation and beta-null deviation calculations
### with "Differentiating between niche and neutral assembly in metacommunities using
### null models of beta-diversity"
### Prepared May 14, 2014
### Authors Caroline Tucker, Lauren Shoemaker, Brett Melbourne
#######################
## Load required source files and libraries
library(reldist)
library(vegan)
library(bipartite)
source("oik-02803-appendix-to-Tucker2016/MetacommunityDynamicsFctsOikos.R")
source("oik-02803-appendix-to-Tucker2016/PANullDevFctsOikos.R")
##packages for UniFrac Null Model (weighted) #als add
library(GUniFrac)
library(ape)
library(phangorn)
tree <- read.tree("MASTER_RepSeqs_aligned_clean.tre")
is.rooted(tree)
#https://github.com/joey711/phyloseq/issues/235
#FastUniFrac trees are unrooted; calculation is done using mid-point root.
tree <- midpoint(tree)
is.rooted(tree)
#formatting problem with tree tip labels - for some reason tree dn OTUs have extra quotes around them and this needs to be removed
tree$tip.label=gsub("'","", tree$tip.label)
### Prepare and calculate abundance beta-null deviation metric
## Adjusted from Stegen et al 2012 GEB
bbs.sp.site <- comm.t
patches=nrow(bbs.sp.site)
rand <- 999
#note - two randomization runs in < 8 min on my laptop
null.alphas <- matrix(NA, ncol(comm.t), rand)
null.alpha <- matrix(NA, ncol(comm.t), rand)
expected_beta <- matrix(NA, 1, rand)
null.gamma <- matrix(NA, 1, rand)
null.alpha.comp <- numeric()
bucket_bray_res <- matrix(NA, patches, rand)
bucket_wuf_res <- matrix(NA, patches, rand) #als add
bbs.sp.site = ceiling(bbs.sp.site/max(bbs.sp.site))
mean.alpha = sum(bbs.sp.site)/nrow(bbs.sp.site) #mean.alpha
gamma <- ncol(bbs.sp.site) #gamma
obs_beta <- 1-mean.alpha/gamma
obs_beta_all <- 1-rowSums(bbs.sp.site)/gamma
##Generate null patches
for (randomize in 1:rand) {
null.dist = comm.t
for (species in 1:ncol(null.dist)) {
tot.abund = sum(null.dist[,species])
null.dist[,species] = 0
for (individual in 1:tot.abund) {
sampled.site = sample(c(1:nrow(bbs.sp.site)), 1)
null.dist[sampled.site, species] = null.dist[sampled.site, species] + 1
}
}
##Calculate null deviation for null patches and store
null.alphas[,randomize] <- apply(null.dist, 2, function(x){sum(ifelse(x > 0, 1, 0))})
null.gamma[1, randomize] <- sum(ifelse(rowSums(null.dist)>0, 1, 0))
expected_beta[1, randomize] <- 1 - mean(null.alphas[,randomize]/null.gamma[,randomize])
null.alpha <- mean(null.alphas[,randomize])
null.alpha.comp <- c(null.alpha.comp, null.alpha)
bucket_bray <- as.matrix(vegdist(null.dist, "bray"))
wuf<-(GUniFrac(null.dist, tree, alpha=1)) #als add
#wuf<-(GUniFrac(comm.t, tree, alpha=1)) #als add test that comparable values are calculated as with QIIME
bucket_wuf <- as.matrix(wuf$unifracs[,,"d_1"]) #als add
diag(bucket_bray) <- NA
diag(bucket_wuf) <- NA #als add
bucket_bray_res[,randomize] <- apply(bucket_bray, 2, FUN="mean", na.rm=TRUE)
bucket_wuf_res[,randomize] <- apply(bucket_wuf, 2, FUN="mean", na.rm=TRUE) #als add
} ## end randomize loop
## Calculate beta-diversity for obs metacommunity
beta_comm_abund <- vegdist(comm.t, "bray")
wuf_comm_abund <- GUniFrac(comm.t, tree, alpha=1) #als add
res_beta_comm_abund <- as.matrix(as.dist(beta_comm_abund))
res_wuf_comm_abund <- as.matrix(as.dist(wuf_comm_abund$unifracs[,,"d_1"])) #als add
diag(res_beta_comm_abund) <- NA
diag(res_wuf_comm_abund) <- NA #als add
# output beta diversity (Bray)
beta_div_abund_stoch <- apply(res_beta_comm_abund, 2, FUN="mean", na.rm=TRUE)
wuf_div_abund_stoch <- apply(res_wuf_comm_abund, 2, FUN="mean", na.rm=TRUE) #als add
# output abundance beta-null deviation
bray_abund_null_dev <- beta_div_abund_stoch - mean(bucket_bray_res)
wuf_abund_null_dev <- wuf_div_abund_stoch - mean(bucket_wuf_res) #als add
### Outputs:
#beta_div_stoch - Jaccard beta-diversity for the metacommunity, average value (of all pairwise comparisons) for each patch
#beta_div_abund_stoch - Bray-Curtis beta-diversity for the metacommunity, average value (of all pairwise comparisons) for each patch
#PA_null_dev - presence-absence null deviation values or the metacommunity, average value (of all pairwise comparisons) for each patch
#abund_null_dev - abundance null deviation values or the metacommunity, average value (of all pairwise comparisons) for each patch
###
#END script by Tucker et al.
#######################
#plotting and statistical tests
betanull.out=data.frame(I(beta_div_abund_stoch),I(bray_abund_null_dev),I(wuf_div_abund_stoch),I(wuf_abund_null_dev),I(map[,"SampleID"]),as.character(map[,"Classification"]), as.numeric(map[,"SoilTemperature_to10cm"]), stringsAsFactors=FALSE)
colnames(betanull.out)=c("BRAY_beta_div_abund_stoch", "BRAY_AbundanceNullDeviation", "WUF_div_abund_stoch","WUF_AbundanceNullDeviation","SampleID","Classification", "SoilTemperature_to10cm")
#write.table(betanull.out, "Results/bnullout_r1.txt", quote=FALSE, sep="\t")
#betanull.out=read.table("Results/bnullout_r1.txt", header=TRUE, sep="\t")
##plottingorder orders samples along a chronosequence and disturbance intensity gradient, by 1) reference samples, 2) fire-affected, sites ranked from hottest to coolest soil temperatures; and 3) recovered sites ranked from hottest to coolest soil temepratures
plottingorder=c(13,15,12,17,14,9,16,1,6,4,11,8,3,7,5,10,2,18)
library("reshape2")
bnull.long=melt(betanull.out, id.vars=c("SampleID", "Classification","SoilTemperature_to10cm"), measure.vars=c("BRAY_AbundanceNullDeviation", "WUF_AbundanceNullDeviation"), col=)
GnYlOrRd=colorRampPalette(colors=c("green", "yellow", "orange","red"), bias=2)
fig4A <- ggplot(data=bnull.long, aes(x=Classification, y=as.numeric(value)))+
geom_boxplot()+
geom_jitter(aes(color=as.numeric(SoilTemperature_to10cm), y=as.numeric(value)))+
facet_grid(variable~., scales="free_y")+
scale_size(guide=FALSE)+
scale_color_gradientn(colours=GnYlOrRd(5), guide="colorbar", guide_legend(title="Temp"))+
scale_x_discrete(name="Fire classification", limits=c("Reference", "FireAffected", "Recovered"))+
scale_y_continuous(name="Abundance Null Deviation")+
theme_bw(base_size=10)
fig4A
bnull.long.bray=bnull.long[bnull.long[,"variable"]=="BRAY_AbundanceNullDeviation",]
fig4B <- ggplot(data=bnull.long.bray, aes(x=plottingorder, y=as.numeric(value)))+
geom_point(aes(color=as.numeric(SoilTemperature_to10cm), y=as.numeric(value)))+
scale_size(guide=FALSE)+
scale_color_gradientn(colours=GnYlOrRd(5), guide="colorbar", guide_legend(title="Temperature (Celsius)"))+
scale_x_continuous(name="Disturbance Intensity", breaks=c(1.5,7,15), labels=c("Ref", "FireAffected", "Recovered"))+
scale_y_continuous(name="Abundance Null Deviation")+
geom_vline(xintercept=c(2.5,11.5), col="gray", lty="dashed")+
theme_bw(base_size=10)+
theme(legend.position="none")
fig4B
bnull.long.wuf=bnull.long[bnull.long[,"variable"]=="WUF_AbundanceNullDeviation",]
fig4C <- ggplot(data=bnull.long.wuf, aes(x=plottingorder, y=as.numeric(value)))+
geom_point(aes(color=as.numeric(SoilTemperature_to10cm), y=as.numeric(value)))+
scale_size(guide=FALSE)+
scale_color_gradientn(colours=GnYlOrRd(5), guide="colorbar", guide_legend(title="Temperature (Celsius)"))+
scale_x_continuous(name="Disturbance Intensity", breaks=c(1.5,7,15), labels=c("Ref", "FireAffected", "Recovered"))+
scale_y_continuous(name="Abundance Null Deviation")+
geom_vline(xintercept=c(2.5,11.5), col="gray", lty="dashed")+
theme_bw(base_size=10)+
theme(legend.position="none")
fig4C
#Multiplot script written by Winston Chang
source("MiscSourceScripts/multiplot.R")
dev.off()
setEPS()
postscript("Figures/Fig4ABC.eps", width = 3.385, height=5, pointsize=9,paper="special")
multiplot(fig4A, fig4B, fig4C, cols=1)
dev.off()
#Pairwise t-tests for Bray Beta Null
t.test(betanull.out[betanull.out[,"Classification"]=="Recovered","BRAY_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="FireAffected","BRAY_AbundanceNullDeviation"])
t.test(betanull.out[betanull.out[,"Classification"]=="Recovered","BRAY_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="Reference","BRAY_AbundanceNullDeviation"])
t.test(betanull.out[betanull.out[,"Classification"]=="Reference","BRAY_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="FireAffected","BRAY_AbundanceNullDeviation"])
#recovered and fire-affected are statistically distinct, p < 0.0006, all other comparisons p > 0.05
#Pairwise t-tests for WUF Beta Null
t.test(betanull.out[betanull.out[,"Classification"]=="Recovered","WUF_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="FireAffected","WUF_AbundanceNullDeviation"])
t.test(betanull.out[betanull.out[,"Classification"]=="Recovered","WUF_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="Reference","WUF_AbundanceNullDeviation"])
t.test(betanull.out[betanull.out[,"Classification"]=="Reference","WUF_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="FireAffected","WUF_AbundanceNullDeviation"])
#recovered and fire-affected are distinct, p < 0.04, all other comparisons p > 0.05
#Are the WUF and Bray beta null correlated?
cor.test(bnull.long.wuf[,"value"], bnull.long.bray[,"value"])
#Pearson's R = 0.71, p = 0.001
################################
### Dominant taxa analysis
################################
#Extract cumulative most abundant OTUs from fire-affected soils - for Table 1
dim(fire)
fire.ordered=fire[order(rowSums(fire),decreasing=TRUE),]
perc=rowSums(fire.ordered)/sum(rowSums(fire.ordered))
#Analysis of the top 10 most prevalent taxa in fire-affected and recovered soils
#libraries needed for this
library(vegan)
library(gplots)
#Do hot soils have consistent dominant membership?
fire=t(fire.t)
fire.new=fire[rowSums(fire)>0,]
rdp.fire=as.vector(rdp.nosigs[rowSums(fire)>0])
dim(fire.new)
rec=t(rec.t)
rec.new=rec[rowSums(rec)>0,]
rdp.rec=as.vector(rdp.nosigs[rowSums(rec)>0])
dim(rec.new)
#Function to provide the OTU numbers and Taxonomic IDs are the top (default=10) in each site.
extractdominant.f<-function(data,rdp,top.no=10){
out1=NULL
out2=NULL
for(i in 1:ncol(data)){
s=sort(data[,i], decreasing=TRUE, index.return=TRUE)
otuIDs=names(s$x[1:top.no])
rdp.out=rdp[s$ix[1:top.no]]
sampleID=c(rep(colnames(data)[[i]],top.no))
temp=cbind(sampleID,otuIDs)
out1=rbind(out1,temp)
out2=cbind(out2,rdp.out)
}
colnames(out2)=colnames(data)
#write.table(out2, paste("Results/rdp_",top.no,".txt",sep=""), quote=FALSE, sep="\t")
#who are the top-10 ranked
u=unique(out1[,2])
l=length(unique(out1[,2]))
actual.prop=l/dim(out1)[[1]]
expected.prop=top.no/dim(out1)[[1]]
print("Unique OTU IDs within the most abundant")
print(u)
print("Number of unique OTUs within the most abundant")
print(l)
print("Redundancy index given the number of samples and the top number selected 1.00 means completely nonredundant, every top taxa was observed only 1 time across all samples")
print(actual.prop)
print("Expected redundancy index")
print(expected.prop)
#print("List of top taxa by sample")
#print(out2)
return(out2)
}
fire.out=extractdominant.f(fire.new,rdp.fire,10)
rec.out=extractdominant.f(rec.new,rdp.rec,10)
data=NULL
data=fire.new
top.no=10
rdp.in=rdp.fire
subsettop.f=function(data, top.no, rdp.in){
otuIDs=NULL
rdpIDs=NULL
for(i in 1:ncol(data)){
s=sort(data[,i], decreasing=TRUE, index.return=TRUE)
otuIDs=c(otuIDs, names(s$x[1:top.no]))
rdpIDs=c(rdpIDs, rdp.in[s$ix[1:top.no]])
}
temp=cbind(otuIDs,rdpIDs)
#print(temp)
u.top=unique(otuIDs)
#temp.u=temp[is.element(temp[,"otuIDs"],u.top),]
#write.table(temp.u, "Results/OTURDP_Top10.txt", sep="\t", quote=FALSE)
top10.otu=NULL
for(j in 1:nrow(data)){
if(is.element(row.names(data)[j],u.top)){
top10.otu=rbind(top10.otu,data[j,])
}
}
row.names(top10.otu)=u.top
colnames(top10.otu)=colnames(data)
return(top10.otu)
}
topfire=subsettop.f(fire.new,10,rdp.fire)
#how many OTUs are de novo?
length(grep("dn",rownames(topfire)))
#create color pallette; see: http://colorbrewer2.org/
hc=colorRampPalette(c("#91bfdb","white","#fc8d59"), interpolate="linear")
topfire.pa=1*(topfire>0)
sum(rowSums(topfire.pa)==9)
toprec=subsettop.f(rec.new,10, rdp.rec)
#how many OTUs are de novo
length(grep("dn",rownames(toprec)))
#Figure 5
dev.off()
setEPS()
postscript("Figures/Fig5A.eps", width = 3.5, height=7, pointsize=10, paper="special")
heatmap.2(topfire,col=hc(100),scale="column",key=TRUE,symkey=FALSE, trace="none", density.info="none",dendrogram="both", margins=c(5,13), srtCol=90)
dev.off()
setEPS()
postscript("Figures/Fig5B.eps", width = 3.5, height=7, pointsize=10, paper="special")
heatmap.2(toprec,col=hc(100),scale="column",key=TRUE,symkey=FALSE, trace="none", density.info="none",dendrogram="both", margins=c(5,13), srtCol=90)
dev.off()
|
/R_analysis/Centralia2014_AmpliconWorkflow.R
|
permissive
|
kun-ecology/PAPER_LeeSorensen_ISMEJ_2017
|
R
| false | false | 38,831 |
r
|
################################
### Code for ecological statistics for
### "Divergent extremes but convergent recovery of bacterial and archaeal soil
### communities to an ongoing subterranean coal mine fire"
### by SH Lee, JW Sorensen, KL Grady, TC Tobin and A Shade
### Prepared 12 November 2016
### Author: Ashley Shade, Michigan State University; shade.ashley <at> gmail.com
################################
#
# Before you start
# Make sure you are using the latest version of R (and Rstudio)
# The following packages (and their dependencies) are needed to run the whole analysis
# calibrate 1.7.2
# gplots 3.0.1
# ggplot2 2.1.0
# indicspecies 1.7.5
# limma 3.26.9
# mass 7.3-45 (calibrate dependency)
# outliers 0.14
# reshape2 1.4.1
# vegan 2.4-0
# reldist 1.6-6
# bipartite 2.06.1
# GUniFrac 1.0
# ape 3.5
# phangorn 2.0-2
#
################################
### Plotting soil contextual data
################################
#load R libraries for this section
library(ggplot2)
library(reshape2)
library(outliers)
#read in mapping file with soil data
map=read.table("InputFiles/Centralia_Collapsed_Map_forR.txt", header=TRUE, sep="\t")
#plot chemistry v. temperature (Supporting Figure 3)
#melt data
map.long=melt(map, id.vars=c("SampleID", "SoilTemperature_to10cm", "Classification"), measure.vars=c("NO3N_ppm","NH4N_ppm","pH","SulfateSulfur_ppm","K_ppm","Ca_ppm","Mg_ppm","OrganicMatter_500","Fe_ppm", "As_ppm", "P_ppm", "SoilMoisture_Per"))
#make a gradient color palette, note bias
GnYlOrRd=colorRampPalette(colors=c("green", "yellow", "orange","red"), bias=2)
sfig3=ggplot(map.long, aes(y=as.numeric(SoilTemperature_to10cm), x=value))+
#add points layer
geom_point(aes(y=as.numeric(SoilTemperature_to10cm), x=value, shape=Classification, color=as.numeric(SoilTemperature_to10cm)))+
#set facet with 4 columns, make x-axes appropriate for each variable
facet_wrap(~variable, ncol=4, scales="free_x")+
#set gradient for temperature and add gradient colorbar
scale_color_gradientn(colours=GnYlOrRd(5), guide="colorbar", guide_legend(title="Temperature"))+
#omit the legend for the size of the points
scale_size(guide=FALSE)+
#define the axis labels
labs(y="Temperature (Celsius)", x=" ")+
#set a simple theme
theme_bw(base_size=10)
sfig3
#ggsave("Figures/SFig3.eps", width=178, units="mm")
##Subset contextual data inclusive of soil quantitative variables
env=map[,c("SoilTemperature_to10cm", "NO3N_ppm", "pH", "K_ppm", "Mg_ppm", "OrganicMatter_500", "NH4N_ppm", "SulfateSulfur_ppm", "Ca_ppm", "Fe_ppm", "As_ppm", "P_ppm", "SoilMoisture_Per","Fire_history")]
##Test for outliers, loop will print all significant outliers and their sampleID - these were not removed from analysis
for (i in 1:ncol(env)){
x=grubbs.test(env[,i], type=10)
if(x$p.value < 0.05){
print(colnames(env)[i])
print(row.names(env)[env[,i]==max(env[,i])])
}
}
#samples 13 (for pH, Ca) and 10 (for NO3N, NH4N,Fe) are common outliers - both have high temps. Sample 3 is also outlier for Mg and OM; this is a recovered site. Generally this test indicates a lot of variability.
#correlation test between temperature and other soil chemistry
for(i in 1:ncol(env)){
ct=cor.test(env[,"SoilTemperature_to10cm"],env[,i])
if (ct$p.value < 0.05){
print(colnames(env)[i])
print(ct)
}
}
#extract means from recovered and reference soils' pH
mean(env[map[,"Classification"]=="Reference","pH"])
mean(env[map[,"Classification"]== "Recovered","pH"])
#plot cell counts and 16S rRNA qPCR data (Supporting Figure 2)
map.long.counts=melt(map, id.vars=c("SampleID", "Classification"), measure.vars=c("rRNA_gene_copies_per_g_dry_soil","CellCounts_per_g_dry_soil"))
labels=c(rRNA_gene_copies_per_g_dry_soil="rRNA gene copies",CellCounts_per_g_dry_soil="Cell counts")
sfig4 <- ggplot(data=map.long.counts, aes(x=Classification, y=value))+
geom_boxplot() +
geom_jitter(aes(shape=Classification))+
facet_grid(variable~., scales="free_y", labeller=labeller(variable = labels))+
scale_shape(guide=FALSE)+
#scale_color_manual(values=colors)+
scale_x_discrete(name="Fire classification")+
scale_y_continuous(name="value per g dry soil")+
theme_bw(base_size=10)
sfig4
#ggsave("Figures/SFig4.eps", width=86, units="mm")
#Pariwise t-tests for cell counts
t.test(map[map[,"Classification"]=="Recovered","CellCounts_per_g_dry_soil"],map[map[,"Classification"]=="FireAffected","CellCounts_per_g_dry_soil"])
t.test(map[map[,"Classification"]=="Recovered","CellCounts_per_g_dry_soil"],map[map[,"Classification"]=="Reference","CellCounts_per_g_dry_soil"])
t.test(map[map[,"Classification"]=="FireAffected","CellCounts_per_g_dry_soil"],map[map[,"Classification"]=="Reference","CellCounts_per_g_dry_soil"])
#Pairwise t-tests for qPCR
t.test(map[map[,"Classification"]=="Recovered","rRNA_gene_copies_per_g_dry_soil"],map[map[,"Classification"]=="FireAffected","rRNA_gene_copies_per_g_dry_soil"])
t.test(map[map[,"Classification"]=="Recovered","rRNA_gene_copies_per_g_dry_soil"],map[map[,"Classification"]=="Reference","rRNA_gene_copies_per_g_dry_soil"])
t.test(map[map[,"Classification"]=="Reference","rRNA_gene_copies_per_g_dry_soil"],map[map[,"Classification"]=="FireAffected","rRNA_gene_copies_per_g_dry_soil"])
################################
### Preparing OTU and distance tables for analysis
################################
#load R libraries for this section
library(ggplot2)
library(reshape2)
library(vegan)
#read in community OTU table, and transpose (rarefied collapsed MASTER table, output from QIIME)
comm=read.table("InputFiles/MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000.txt", header=TRUE, row.names=1, check.names=FALSE, sep="\t")
#remove consensus lineage from otu table
rdp=comm[,"ConsensusLineage"]
comm=comm[,-ncol(comm)]
#How many total QCed sequences?
sum(colSums(comm))
#sort community by colnames (to be in the consistent, consecutive order for all analyses)
comm=comm[,order(colnames(comm))]
#who are the singleton OTUs (observed 1 time in an abundance of 1 sequence)?
singletonOTUs=row.names(comm)[rowSums(comm)==1]
length(singletonOTUs)
#total 1374 singleton OTUs
g=grep("_dn", singletonOTUs)
length(g)
#1201 de novo OTUs are singletons
#who are the remaining de novo OTUs?
g=grep("_dn_",row.names(comm))
dn=rdp[g]
rdp.nosigs=rdp[rowSums(comm)>1]
#designate a full dataset
comm.sigs=comm
#remove OTUs with an abundance = 1, across the entire dataset (singleton OTUs)
comm=comm[rowSums(comm)>1,]
sum(colSums(comm))
#transpose matrix
comm.t=t(comm)
### Read in resemblance matrices
#read in weighted unifrac table (output from QIIME)
uf=read.table("InputFiles/weighted_unifrac_MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000.txt", header=TRUE, row.names=1)
#sort by rows, columns (so they are in the consecutive order)
uf=uf[order(row.names(uf)),order(colnames(uf))]
uf.d=as.dist(uf)
#read in the unweighted unifrac table (output from QIIME)
uwuf=read.table("InputFiles/unweighted_unifrac_MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000.txt", header=TRUE, row.names=1)
#sort by rows, columns (so they are in the consecutive order)
uwuf=uwuf[order(row.names(uwuf)),order(colnames(uwuf))]
uwuf.d=as.dist(uwuf)
#read in the normalized weighted unifrac table (output from QIIME)
nwuf=read.table("InputFiles/weighted_normalized_unifrac_MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000.txt", header=TRUE, row.names=1)
#sort by rows, columns (so all tables are in the consecutive order)
nwuf=nwuf[order(row.names(nwuf)),order(colnames(nwuf))]
nwuf.d=as.dist(nwuf)
#assign fire classification
fireclass=map[,"Classification"]
ref.t=comm.t[map$Classification=="Reference",]
rec.t=comm.t[map$Classification=="Recovered",]
fire.t=comm.t[map$Classification=="FireAffected",]
################################
### Calculate and plot within-sample (alpha) diversity
################################
#read in alpha diversity table (output from QIIME)
div=read.table("InputFiles/MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000_alphadiv.txt", header=TRUE)
#sort by sample ID (so that they are in consecutive order)
div=div[order(row.names(div)),]
#calculate pielou's evenness from OTU table
s=specnumber(comm.t)
h=diversity(comm.t,index="shannon")
pielou=h/log(s)
#combine alpha diversity data and fire classification (from map file)
div=cbind(row.names(div),div,pielou, map$Classification)
colnames(div)=c("SampleID", "PD", "Richness", "Pielou", "Classification")
#plot (Figure 1)
#reshape the data
div.long=melt(div, id.vars=c("SampleID", "Classification"))
#plot a facet
#comment toggle for color v. bw
colors=c("red", "yellow", "green")
fig1 <- ggplot(data=div.long, aes(x=Classification, y=value))+
geom_boxplot() +
geom_jitter(aes(shape=Classification))+
#geom_jitter(aes(color=Classification, cex=1.5))+
facet_grid(variable~., scales="free_y")+
#scale_shape(guide=FALSE)+
scale_size(guide=FALSE)+
scale_color_manual(values=colors)+
scale_x_discrete(name="Fire classification")+
scale_y_continuous(name="Diversity value")+
theme_bw(base_size=10)
fig1
ggsave("Figures/Fig1.eps", width=86, units="mm")
#ttest
v=c("PD", "Richness", "Pielou")
outdiv=NULL
for(i in 1:length(v)){
#subset the data to test one phylum at a time
active=div[div$Classification=="FireAffected",colnames(div)==v[i]]
recov=div[div$Classification=="Recovered",colnames(div)==v[i]]
ref=div[div$Classification=="Reference",colnames(div)==v[i]]
#perform the test
test1=t.test(active, recov, paired=FALSE, var.equal = FALSE)
test2=t.test(active, ref, paired=FALSE, var.equal = FALSE)
test3=t.test(ref, recov, paired=FALSE, var.equal = FALSE)
test1.out=c(v[i],"ActivevRecov",test1$statistic, test1$parameter, test1$p.value)
test2.out=c(v[i],"ActivevRef",test2$statistic, test2$parameter, test2$p.value)
test3.out=c(v[i],"RefvRecov",test3$statistic, test3$parameter, test3$p.value)
outdiv=rbind(outdiv, test1.out, test2.out, test3.out)
}
outdiv
################################
### Analysis of technical replicates
################################
#Supporting Table 2 - assessing reproducibility among technical replicates
techdiv=read.table("InputFiles/OTU_hdf5_filteredfailedalignments_rdp_rmCM_even53000_alphadiv.txt") #output from QIIME
techdiv.out=NULL
sampleIDs=c("C01", "C02", "C03", "C04", "C05", "C06", "C07", "C08", "C09", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C18")
for(i in 1:length(sampleIDs)){
temp=techdiv[grep(sampleIDs[i], row.names(techdiv)),]
temp2=c(mapply(mean,temp), mapply(sd,temp))
techdiv.out=rbind(techdiv.out,temp2)
}
row.names(techdiv.out)=sampleIDs
colnames(techdiv.out)=c("PD_mean", "Richness_mean", "PD_sd", "Richness_sd")
#write.table(techdiv.out, "Results/AlphaDiv_TechnicalReps.txt", quote=FALSE, sep="\t")
#Supporting PCoA (SFig 2)- assessing reproducibility among technical replicates
beta <- read.table("InputFiles/weighted_unifrac_OTU_hdf5_filteredfailedalignments_rdp_rmCM_even53000.txt", sep="\t", stringsAsFactors = FALSE, header = TRUE, row.names=1)
map.f<- read.table("InputFiles/Centralia_Full_Map.txt", sep="\t", stringsAsFactors = FALSE, header = TRUE, row.names=1)
beta <- beta[order(row.names(beta)),order(colnames(beta))]
#Remove Mock
beta <- beta[-55,-55]
library(vegan)
beta.pcoa<- cmdscale(beta, eig=TRUE)
ax1.v.f=beta.pcoa$eig[1]/sum(beta.pcoa$eig)
ax2.v.f=beta.pcoa$eig[2]/sum(beta.pcoa$eig)
coordinates <- as.data.frame(beta.pcoa$points)
Samples <- map$Sample
coordinates$Sample<- map.f$Sample
coordinates_avg_sd <- NULL
for (i in 1:length(Samples)){
Site <- coordinates[coordinates$Sample==Samples[i],]
AX1 <- c(mean(Site[,1]),sd(Site[,1]))
AX2 <- c(mean(Site[,2]),sd(Site[,2]))
coordinates_avg_sd<- rbind(coordinates_avg_sd,c(AX1,AX2))
}
row.names(coordinates_avg_sd)<-Samples
unique(map$Classification)
Class=rep('black',nrow(map))
Class[map$Classification=="FireAffected"]='red'
Class[map$Classification=="Reference"]='green'
Class[map$Classification=="Recovered"]='yellow'
library(calibrate)
#SFig 2
dev.off()
setEPS()
postscript("Figures/SFig2.eps", width = 6, height=6, pointsize=8,paper="special")
plot(coordinates_avg_sd[,1],coordinates_avg_sd[,3] ,cex=1.5,pch=21,bg=Class,main="Averaged Technical Replicates Weighted UniFrac PCoA",xlab= paste("PCoA1: ",100*round(ax1.v.f,3),"% var. explained",sep=""), ylab= paste("PCoA2: ",100* round(ax2.v.f,3),"% var. explained",sep=""))
textxy(X=coordinates_avg_sd[,1], Y=coordinates_avg_sd[,3],labs=map$Sample, cex=1)
arrows(coordinates_avg_sd[,1], coordinates_avg_sd[,3]- coordinates_avg_sd[,4], coordinates_avg_sd[,1], coordinates_avg_sd[,3]+ coordinates_avg_sd[,4], length=0.05, angle=90, code=3)
arrows(coordinates_avg_sd[,1]- coordinates_avg_sd[,2], coordinates_avg_sd[,3], coordinates_avg_sd[,1] + coordinates_avg_sd[,2], coordinates_avg_sd[,3], length=0.05, angle=90, code=3)
dev.off()
################################
### Phylum-level responses to fire
################################
#load R libraries for this section
library(ggplot2)
#read in phylum level OTU table (QIIME output)
comm.phylum=read.table("InputFiles/MASTER_OTU_hdf5_filteredfailedalignments_rdp_rmCM_collapse_even321000_L2.txt", sep="\t", header=TRUE, row.names=1) #output from QIIME
##sort by sample ID (so that they are in consecutive order)
comm.phylum=comm.phylum[,order(colnames(comm.phylum))]
#combine phyla that contribute less than 0.01 each
below01=comm.phylum[rowSums(comm.phylum)<0.01,]
below01.cs=colSums(below01)
#remove those below01 phyla from the table
comm.phylum=comm.phylum[rowSums(comm.phylum)>0.01,]
#add the summary from the <0.01
comm.phylum=rbind(comm.phylum,below01.cs)
#rename the last row
row.names(comm.phylum)[nrow(comm.phylum)]="Below_0.01"
#for character string trucation in R : http://stackoverflow.com/questions/10883605/truncating-the-end-of-a-string-in-r-after-a-character-that-can-be-present-zero-o
phylumnames=sub(".*p__", "", row.names(comm.phylum))
row.names(comm.phylum)=phylumnames
#assign fire classifications to samples
fireclass=map[,"Classification"]
p.active=comm.phylum[,fireclass=="FireAffected"]
p.recov=comm.phylum[,fireclass=="Recovered"]
p.ref=comm.phylum[,fireclass=="Reference"]
#Calculate a mean phylum rel. abundance across all of the samples that are within each activity group
m.active=apply(p.active,1,mean)
m.recov=apply(p.recov,1,mean)
m.ref=apply(p.ref,1,mean)
m.summary.p=cbind(m.active, m.recov,m.ref)
colnames(m.summary.p)=c("FireAffected", "Recovered", "Reference")
#sort in decreasing total abundance order
m.summary.p=m.summary.p[order(rowSums(m.summary.p),decreasing=TRUE),]
#plot (Figure 3)
m.summary.p.long=melt(m.summary.p, id.vars=row.names(m.summary.p),measure.vars=c("FireAffected", "Recovered", "Reference"))
colors=c("red", "yellow", "green")
fig3=ggplot(m.summary.p.long, aes(x=Var1, y=value, fill=Var2))+
geom_dotplot(binaxis="y", dotsize = 3)+
facet_grid(Var2~.)+
scale_fill_manual(values=colors, guide=FALSE)+
labs(x="Phylum", y="Mean relative abundance", las=1)+
theme(axis.text.x = element_text(angle = 90, size = 10, face = "italic"))
fig3
ggsave("Figures/Fig3.eps", width=178, units="mm")
#Welch's t-test for all phyla
u=row.names(comm.phylum)
out=NULL
for(i in 1:length(u)){
#subset the data to test one phylum at a time
active=comm.phylum[row.names(comm.phylum)==u[i],fireclass=="FireAffected"]
recov=comm.phylum[row.names(comm.phylum)==u[i],fireclass=="Recovered"]
#perform the test
test=t.test(active, recov, paired=FALSE, var.equal = FALSE)
test.out=c(row.names(comm.phylum)[i],test$statistic, test$parameter, test$p.value)
out=rbind(out,test.out)
}
colnames(out)=c("Phylum", "Tstatistic", "DF", "pvalue")
#all results: Supporting Table 8
out
#extract overrepresented in fire
out[out[,"pvalue"]<0.05 & out[,"Tstatistic"]>0,]
#extracted overrepresented in recovered
out[out[,"pvalue"]<0.05 & out[,"Tstatistic"]<0,]
#write.table(out, "Results/Phylum_ttest.txt",quote=FALSE, sep="\t")
################################
### Comparative (beta) diversity
################################
#load R libraries for this section
library(calibrate)
library(ggplot2)
library(vegan)
# use weighted unifrac
uf.pcoa=cmdscale(uf.d, eig=TRUE)
#calculate percent variance explained, then add to plot
ax1.v=uf.pcoa$eig[1]/sum(uf.pcoa$eig)
ax2.v=uf.pcoa$eig[2]/sum(uf.pcoa$eig)
envEF=envfit(uf.pcoa, env)
#Supporting Table 4
envEF
unique(map$Classification)
Class=rep('black',nrow(map))
Class[map$Classification=="FireAffected"]='red'
Class[map$Classification=="Reference"]='green'
Class[map$Classification=="Recovered"]='yellow'
#export figure 2
#textxy is from the calibrate library
dev.off()
setEPS()
postscript("Figures/Fig2.eps", width = 3.385, height=3.385, pointsize=8,paper="special")
plot(uf.pcoa$points[,1],uf.pcoa$points[,2] ,cex=1.5,pch=21,bg=Class,main="Weighted UniFrac PCoA", xlab= paste("PCoA1: ",100*round(ax1.v,3),"% var. explained",sep=""), ylab= paste("PCoA2: ",100*round(ax2.v,3),"%var. explained",sep=""))
textxy(X=uf.pcoa$points[,1], Y=uf.pcoa$points[,2],labs=map$SampleID, cex=0.8)
legend('bottomleft',c('Fire Affected','Recovered','Reference'),pch=21,pt.bg=c("red", "yellow", "green"),lty=0)
plot(envEF, p.max=0.10, col="black", cex=1)
dev.off()
#perform hypothesis testing on fire-affected v. recovered+reference sites
#permanova
Class2=sub("green", "yellow", Class)
a=adonis(uf.d~Class2, distance=TRUE, permutations=1000)
a
#multivariate dispersion with Tukey HSD
b=betadisper(uf.d, group=Class2)
TukeyHSD(b, which = "group", ordered = FALSE,conf.level = 0.95)
#mantel w/ spatial distances
space=read.table("InputFiles/spatialdistancematrix.txt", header=TRUE, row.names=1)
space.d=as.dist(space)
mantel(uf.d,space.d)
################################
### Do different resemblances agree in their overarching patterns?
################################
#Supporting Table 3A:
#the variance explained by each distance (taxonomic/phylogenetic and weighted/unweighted)
bc.d=vegdist(t(comm), method="bray")
sor.d=vegdist(t(comm), method="bray",binary=TRUE)
# PCoA using unweighted unifrac (QIIME output - unweighted phylogenetic)
uwuf.pcoa=cmdscale(uwuf.d, eig=TRUE)
#calculate percent variance explained, then add to plot
ax1.v.uwuf=uwuf.pcoa$eig[1]/sum(uwuf.pcoa$eig)
ax2.v.uwuf=uwuf.pcoa$eig[2]/sum(uwuf.pcoa$eig)
# PCoA using bray-curtis (vegan output - weighted taxonomic)
bc.pcoa=cmdscale(bc.d, eig=TRUE)
#calculate percent variance explained, then add to plot
ax1.v.bc=bc.pcoa$eig[1]/sum(bc.pcoa$eig)
ax2.v.bc=bc.pcoa$eig[2]/sum(bc.pcoa$eig)
#PCoA using sorensen (vegan output - unweighted taxonomic)
sor.pcoa=cmdscale(sor.d, eig=TRUE)
#calculate percent variance explained, then add to plot
ax1.v.sor=sor.pcoa$eig[1]/sum(sor.pcoa$eig)
ax2.v.sor=sor.pcoa$eig[2]/sum(sor.pcoa$eig)
#Mantel and PROTEST tests between all resemblances (Supporting Table 3B)
resem=list(uf.d,uwuf.d,nwuf.d,bc.d,sor.d)
#this loops a bit funny but all pairwise results are available
names=c("weighted_UniFrac", "unweighted_UniFrac", "normalized_weighted_UniFrac", "BrayCurtis", "Sorenson")
m.out=NULL
for (i in 1:length(resem)){
dist1=resem[[i]]
print(i)
j=i+1
for(j in 2:length(resem)){
dist2=resem[[j]]
print(j)
#Mantel
m=mantel(dist1,dist2)
#Protest
pr=protest(dist1,dist2)
#results out
m.v=c(names[i], names[j],m$statistic, m$signif, pr$t0, pr$ss, pr$signif)
m.out=rbind(m.out,m.v)
}
}
#Supporting Table 3B
colnames(m.out)=c("Dist1", "Dist2", "Mantel_R", "Mantel_p", "PROTEST_R", "PROTEST_m12", "PROTEST_p")
m.out
#write.table(m.out, "Results/MantelDist.txt", quote=FALSE, sep="\t")
################################
### Comparative diversity of fire-affected samples
################################
#load R libraries for this section
library(vegan)
#reduce uf to fire only
uf.fire=uf[map$Classification=="FireAffected",map$Classification=="FireAffected"]
uf.fire.d=as.dist(uf.fire)
env.fire=env[map$Classification=="FireAffected",]
labels=map[map$Classification=="FireAffected","SampleID"]
#PCoA for fire sites only
uf.fire.pcoa=cmdscale(uf.fire.d, eig=TRUE)
#fit environmental variables
envFIT.fire=envfit(uf.fire.pcoa, env=env.fire)
#print results to screen (Supporting Table 5)
envFIT.fire
#df <- data.frame((envFIT.fire$vectors)$arrows, (envFIT.fire$vectors)$r, (envFIT.fire$vectors)$pvals)
#write.table(df, "Results/ENV_Fire.txt", quote=FALSE, sep="\t")
#calculate %var. explained by each axis
ax1.v.f=uf.fire.pcoa$eig[1]/sum(uf.fire.pcoa$eig)
ax2.v.f=uf.fire.pcoa$eig[2]/sum(uf.fire.pcoa$eig)
#CAP for fire-sites, constrained by temperature
#to determine explanatory value of abiotic factors for fire-affected sites, after temp is accounted for
#make vector of temperature only
temp=env.fire[,"SoilTemperature_to10cm"]
#CAP
cap1=capscale(uf.fire.d~Condition(temp))
#fit environmental variables
c.ef=envfit(cap1, env.fire)
#print results to screen (Supporting Table 6)
c.ef
#df <- data.frame((c.ef$vectors)$arrows, (c.ef$vectors)$r, (c.ef$vectors)$pvals)
#write.table(df, "Results/CAP.txt", quote=FALSE, sep="\t")
#calculate % var. explained by each axis
ax1.v.f.t=cap1$CA$eig[1]/sum(cap1$CA$eig)
ax2.v.f.t=cap1$CA$eig[2]/sum(cap1$CA$eig)
#Plot: supporting Figure 6
setEPS()
postscript("Figures/SFig6AB.eps", width = 6.770, height=3.385, pointsize=8,paper="special")
par(mfrow=c(1,2))
plot(uf.fire.pcoa$points[,1],uf.fire.pcoa$points[,2], main= "(A) Fire-affected soils PCoA", type="n",xlab=paste("PCoA1: ",100*round(ax1.v.f,3),"% var. explained",sep=""), ylab= paste("PCoA2: ",100*round(ax2.v.f,3),"% var. explained",sep=""))
textxy(X=uf.fire.pcoa$points[,1], Y=uf.fire.pcoa$points[,2],labs=labels, offset=0, cex=0.8)
plot(envFIT.fire, p=0.10)
plot(cap1, cex=0.9,main = "(B) Temperature-constrained \nfire-affected soils PCoA", xlab=paste("CAP Ax1: ",100*round(ax1.v.f.t,3),"%var. explained",sep=""), ylab=paste("CAP Ax2: ",100*round(ax2.v.f.t,3),"%var. explained",sep=""))
plot(c.ef, p= 0.10)
dev.off()
################################
### Sloan neutral model
################################
#NOTE: must use full dataset (including singleton OTUs) for this analysis
#Source for model fits is from Burns et al. ISMEJ 2015, downloaded R code from their supporting materials
#Source code requires: minpack.lm, Hmisc, stats4 packages - make sure they are installed (and their dependencies)
source("MiscSourceScripts/sncm.fit_function.r")
#assign variables for function
spp=t(comm.sigs)
taxon=as.vector(rdp)
ref.t.sigs=spp[map$Classification=="Reference",]
rec.t.sigs=spp[map$Classification=="Recovered",]
rec.t.sigs.NZ<- rec.t.sigs[,colSums(rec.t.sigs)>0]
fire.t.sigs=spp[map$Classification=="FireAffected",]
fire.t.sigs.NZ<-fire.t.sigs[,colSums(fire.t.sigs)>0]
#Models for the whole community
obs.np=sncm.fit(spp,taxon=rdp, stats=FALSE, pool=NULL)
sta.np=sncm.fit(spp,taxon=rdp, stats=TRUE, pool=NULL)
#Models for each classification
#fire affected: total - asks the question: in itself, do the fire-affected sites follow neutral
obs.fireT=sncm.fit(fire.t.sigs.NZ,taxon=rdp, stats=FALSE, pool=NULL)
sta.fireT=sncm.fit(fire.t.sigs.NZ,taxon=rdp, stats=TRUE, pool=NULL)
#recovered : total - asks the question: do recovered sites follow neutral expectations?
obs.recT=sncm.fit(rec.t.sigs.NZ,taxon=rdp, stats=FALSE, pool=NULL)
sta.recT=sncm.fit(rec.t.sigs.NZ,taxon=rdp, stats=TRUE, pool=NULL)
results=rbind(sta.np, sta.fireT, sta.recT)
row.names(results)=c("all", "Fire_Affected", "Recovered")
#par(mfrow=c(2,3)) #for plotting in R studio w/out export
l1=list(obs.np, obs.recT, obs.fireT)
l2=list(sta.np, sta.recT, sta.fireT)
names=c("(A) All", "(B) Recovered", "(C) Fire_Affected")
out.sta=NULL
#Plot supporting Fig 7 panels
for(i in 1:length(l1)){
#define data
temp=as.data.frame(l1[i])
sta=as.data.frame(l2[i])
#how many taxa are above their prediction, and below?
above.pred=sum(temp$freq > (temp$pred.upr), na.rm=TRUE)/sta$Richness
below.pred=sum(temp$freq < (temp$pred.lwr), na.rm=TRUE)/sta$Richness
out=c(above.pred, below.pred)
ap= temp$freq > (temp$pred.upr)
bp= temp$freq < (temp$pred.lwr)
#plot figure (SFig7)
setEPS()
if(i == 1){
postscript("Figures/SFig7A.eps", width = 2.33, height=3, pointsize=10,paper="special")
}
if (i == 2){
postscript("Figures/SFig7B.eps", width = 2.33, height=3, pointsize=10,paper="special")
}
if (i ==3){
postscript("Figures/SFig7C.eps", width = 2.33, height=3, pointsize=10,paper="special")
}
plot(x=log(temp$p), y=temp$freq, main=names[i], xlab="Log Abundance", ylab="Occurrence Frequency")
points(x=log(temp$p[ap==TRUE]), y=temp$freq[ap==TRUE], col="red", pch=19)
points(x=log(temp$p[bp==TRUE]), y=temp$freq[bp==TRUE], col="blue", pch=19)
lines(temp$freq.pred~log(temp$p), col="yellow", lty=1, lwd=6)
lines(temp$pred.upr~log(temp$p), col="yellow", lty=1, lwd=3)
lines(temp$pred.lwr~log(temp$p), col="yellow", lty=1, lwd=3)
dev.off()
out.sta=rbind(out.sta, out)
}
colnames(out.sta)=c("%AbovePred", "%BelowPred")
#Supporting Table 7
results=cbind(results, out.sta)
results
#write.table(results, "Results/SloanNeutralModel.txt", quote=FALSE, sep="\t")
################################
### Beta null models
################################
#MODIFIED by als to use our dataset (comm.t) instead of "dune" and to only include the abundance-based model. We also changed the number of patches to by 18 to match with the dataset.
#ORIGINAL scripts available in the appendix of the work below, published in Oikos (Appendix oik.02803, also R_analysis/oik-02803-appendix-to-Tucker2016/)
#Note that beta null models with weighted UniFrac require ~75 hours walltime to complete with 4Gb memory and 1 processing node; beta-null models with Bray-Curtis only require ~30 hours
#######################
### Code for example metacommunity simulation and beta-null deviation calculations
### with "Differentiating between niche and neutral assembly in metacommunities using
### null models of beta-diversity"
### Prepared May 14, 2014
### Authors Caroline Tucker, Lauren Shoemaker, Brett Melbourne
#######################
## Load required source files and libraries
library(reldist)
library(vegan)
library(bipartite)
source("oik-02803-appendix-to-Tucker2016/MetacommunityDynamicsFctsOikos.R")
source("oik-02803-appendix-to-Tucker2016/PANullDevFctsOikos.R")
##packages for UniFrac Null Model (weighted) #als add
library(GUniFrac)
library(ape)
library(phangorn)
tree <- read.tree("MASTER_RepSeqs_aligned_clean.tre")
is.rooted(tree)
#https://github.com/joey711/phyloseq/issues/235
#FastUniFrac trees are unrooted; calculation is done using mid-point root.
tree <- midpoint(tree)
is.rooted(tree)
#formatting problem with tree tip labels - for some reason tree dn OTUs have extra quotes around them and this needs to be removed
tree$tip.label=gsub("'","", tree$tip.label)
### Prepare and calculate abundance beta-null deviation metric
## Adjusted from Stegen et al 2012 GEB
bbs.sp.site <- comm.t
patches=nrow(bbs.sp.site)
rand <- 999
#note - two randomization runs in < 8 min on my laptop
null.alphas <- matrix(NA, ncol(comm.t), rand)
null.alpha <- matrix(NA, ncol(comm.t), rand)
expected_beta <- matrix(NA, 1, rand)
null.gamma <- matrix(NA, 1, rand)
null.alpha.comp <- numeric()
bucket_bray_res <- matrix(NA, patches, rand)
bucket_wuf_res <- matrix(NA, patches, rand) #als add
bbs.sp.site = ceiling(bbs.sp.site/max(bbs.sp.site))
mean.alpha = sum(bbs.sp.site)/nrow(bbs.sp.site) #mean.alpha
gamma <- ncol(bbs.sp.site) #gamma
obs_beta <- 1-mean.alpha/gamma
obs_beta_all <- 1-rowSums(bbs.sp.site)/gamma
##Generate null patches
for (randomize in 1:rand) {
null.dist = comm.t
for (species in 1:ncol(null.dist)) {
tot.abund = sum(null.dist[,species])
null.dist[,species] = 0
for (individual in 1:tot.abund) {
sampled.site = sample(c(1:nrow(bbs.sp.site)), 1)
null.dist[sampled.site, species] = null.dist[sampled.site, species] + 1
}
}
##Calculate null deviation for null patches and store
null.alphas[,randomize] <- apply(null.dist, 2, function(x){sum(ifelse(x > 0, 1, 0))})
null.gamma[1, randomize] <- sum(ifelse(rowSums(null.dist)>0, 1, 0))
expected_beta[1, randomize] <- 1 - mean(null.alphas[,randomize]/null.gamma[,randomize])
null.alpha <- mean(null.alphas[,randomize])
null.alpha.comp <- c(null.alpha.comp, null.alpha)
bucket_bray <- as.matrix(vegdist(null.dist, "bray"))
wuf<-(GUniFrac(null.dist, tree, alpha=1)) #als add
#wuf<-(GUniFrac(comm.t, tree, alpha=1)) #als add test that comparable values are calculated as with QIIME
bucket_wuf <- as.matrix(wuf$unifracs[,,"d_1"]) #als add
diag(bucket_bray) <- NA
diag(bucket_wuf) <- NA #als add
bucket_bray_res[,randomize] <- apply(bucket_bray, 2, FUN="mean", na.rm=TRUE)
bucket_wuf_res[,randomize] <- apply(bucket_wuf, 2, FUN="mean", na.rm=TRUE) #als add
} ## end randomize loop
## Calculate beta-diversity for obs metacommunity
beta_comm_abund <- vegdist(comm.t, "bray")
wuf_comm_abund <- GUniFrac(comm.t, tree, alpha=1) #als add
res_beta_comm_abund <- as.matrix(as.dist(beta_comm_abund))
res_wuf_comm_abund <- as.matrix(as.dist(wuf_comm_abund$unifracs[,,"d_1"])) #als add
diag(res_beta_comm_abund) <- NA
diag(res_wuf_comm_abund) <- NA #als add
# output beta diversity (Bray)
beta_div_abund_stoch <- apply(res_beta_comm_abund, 2, FUN="mean", na.rm=TRUE)
wuf_div_abund_stoch <- apply(res_wuf_comm_abund, 2, FUN="mean", na.rm=TRUE) #als add
# output abundance beta-null deviation
bray_abund_null_dev <- beta_div_abund_stoch - mean(bucket_bray_res)
wuf_abund_null_dev <- wuf_div_abund_stoch - mean(bucket_wuf_res) #als add
### Outputs:
#beta_div_stoch - Jaccard beta-diversity for the metacommunity, average value (of all pairwise comparisons) for each patch
#beta_div_abund_stoch - Bray-Curtis beta-diversity for the metacommunity, average value (of all pairwise comparisons) for each patch
#PA_null_dev - presence-absence null deviation values or the metacommunity, average value (of all pairwise comparisons) for each patch
#abund_null_dev - abundance null deviation values or the metacommunity, average value (of all pairwise comparisons) for each patch
###
#END script by Tucker et al.
#######################
#plotting and statistical tests
betanull.out=data.frame(I(beta_div_abund_stoch),I(bray_abund_null_dev),I(wuf_div_abund_stoch),I(wuf_abund_null_dev),I(map[,"SampleID"]),as.character(map[,"Classification"]), as.numeric(map[,"SoilTemperature_to10cm"]), stringsAsFactors=FALSE)
colnames(betanull.out)=c("BRAY_beta_div_abund_stoch", "BRAY_AbundanceNullDeviation", "WUF_div_abund_stoch","WUF_AbundanceNullDeviation","SampleID","Classification", "SoilTemperature_to10cm")
#write.table(betanull.out, "Results/bnullout_r1.txt", quote=FALSE, sep="\t")
#betanull.out=read.table("Results/bnullout_r1.txt", header=TRUE, sep="\t")
##plottingorder orders samples along a chronosequence and disturbance intensity gradient, by 1) reference samples, 2) fire-affected, sites ranked from hottest to coolest soil temperatures; and 3) recovered sites ranked from hottest to coolest soil temepratures
plottingorder=c(13,15,12,17,14,9,16,1,6,4,11,8,3,7,5,10,2,18)
library("reshape2")
bnull.long=melt(betanull.out, id.vars=c("SampleID", "Classification","SoilTemperature_to10cm"), measure.vars=c("BRAY_AbundanceNullDeviation", "WUF_AbundanceNullDeviation"), col=)
GnYlOrRd=colorRampPalette(colors=c("green", "yellow", "orange","red"), bias=2)
fig4A <- ggplot(data=bnull.long, aes(x=Classification, y=as.numeric(value)))+
geom_boxplot()+
geom_jitter(aes(color=as.numeric(SoilTemperature_to10cm), y=as.numeric(value)))+
facet_grid(variable~., scales="free_y")+
scale_size(guide=FALSE)+
scale_color_gradientn(colours=GnYlOrRd(5), guide="colorbar", guide_legend(title="Temp"))+
scale_x_discrete(name="Fire classification", limits=c("Reference", "FireAffected", "Recovered"))+
scale_y_continuous(name="Abundance Null Deviation")+
theme_bw(base_size=10)
fig4A
bnull.long.bray=bnull.long[bnull.long[,"variable"]=="BRAY_AbundanceNullDeviation",]
fig4B <- ggplot(data=bnull.long.bray, aes(x=plottingorder, y=as.numeric(value)))+
geom_point(aes(color=as.numeric(SoilTemperature_to10cm), y=as.numeric(value)))+
scale_size(guide=FALSE)+
scale_color_gradientn(colours=GnYlOrRd(5), guide="colorbar", guide_legend(title="Temperature (Celsius)"))+
scale_x_continuous(name="Disturbance Intensity", breaks=c(1.5,7,15), labels=c("Ref", "FireAffected", "Recovered"))+
scale_y_continuous(name="Abundance Null Deviation")+
geom_vline(xintercept=c(2.5,11.5), col="gray", lty="dashed")+
theme_bw(base_size=10)+
theme(legend.position="none")
fig4B
bnull.long.wuf=bnull.long[bnull.long[,"variable"]=="WUF_AbundanceNullDeviation",]
fig4C <- ggplot(data=bnull.long.wuf, aes(x=plottingorder, y=as.numeric(value)))+
geom_point(aes(color=as.numeric(SoilTemperature_to10cm), y=as.numeric(value)))+
scale_size(guide=FALSE)+
scale_color_gradientn(colours=GnYlOrRd(5), guide="colorbar", guide_legend(title="Temperature (Celsius)"))+
scale_x_continuous(name="Disturbance Intensity", breaks=c(1.5,7,15), labels=c("Ref", "FireAffected", "Recovered"))+
scale_y_continuous(name="Abundance Null Deviation")+
geom_vline(xintercept=c(2.5,11.5), col="gray", lty="dashed")+
theme_bw(base_size=10)+
theme(legend.position="none")
fig4C
#Multiplot script written by Winston Chang
source("MiscSourceScripts/multiplot.R")
dev.off()
setEPS()
postscript("Figures/Fig4ABC.eps", width = 3.385, height=5, pointsize=9,paper="special")
multiplot(fig4A, fig4B, fig4C, cols=1)
dev.off()
#Pairwise t-tests for Bray Beta Null
t.test(betanull.out[betanull.out[,"Classification"]=="Recovered","BRAY_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="FireAffected","BRAY_AbundanceNullDeviation"])
t.test(betanull.out[betanull.out[,"Classification"]=="Recovered","BRAY_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="Reference","BRAY_AbundanceNullDeviation"])
t.test(betanull.out[betanull.out[,"Classification"]=="Reference","BRAY_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="FireAffected","BRAY_AbundanceNullDeviation"])
#recovered and fire-affected are statistically distinct, p < 0.0006, all other comparisons p > 0.05
#Pairwise t-tests for WUF Beta Null
t.test(betanull.out[betanull.out[,"Classification"]=="Recovered","WUF_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="FireAffected","WUF_AbundanceNullDeviation"])
t.test(betanull.out[betanull.out[,"Classification"]=="Recovered","WUF_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="Reference","WUF_AbundanceNullDeviation"])
t.test(betanull.out[betanull.out[,"Classification"]=="Reference","WUF_AbundanceNullDeviation"],betanull.out[betanull.out[,"Classification"]=="FireAffected","WUF_AbundanceNullDeviation"])
#recovered and fire-affected are distinct, p < 0.04, all other comparisons p > 0.05
#Are the WUF and Bray beta null correlated?
cor.test(bnull.long.wuf[,"value"], bnull.long.bray[,"value"])
#Pearson's R = 0.71, p = 0.001
################################
### Dominant taxa analysis
################################
#Extract cumulative most abundant OTUs from fire-affected soils - for Table 1
dim(fire)
fire.ordered=fire[order(rowSums(fire),decreasing=TRUE),]
perc=rowSums(fire.ordered)/sum(rowSums(fire.ordered))
#Analysis of the top 10 most prevalent taxa in fire-affected and recovered soils
#libraries needed for this
library(vegan)
library(gplots)
#Do hot soils have consistent dominant membership?
fire=t(fire.t)
fire.new=fire[rowSums(fire)>0,]
rdp.fire=as.vector(rdp.nosigs[rowSums(fire)>0])
dim(fire.new)
rec=t(rec.t)
rec.new=rec[rowSums(rec)>0,]
rdp.rec=as.vector(rdp.nosigs[rowSums(rec)>0])
dim(rec.new)
#Function to provide the OTU numbers and Taxonomic IDs are the top (default=10) in each site.
extractdominant.f<-function(data,rdp,top.no=10){
out1=NULL
out2=NULL
for(i in 1:ncol(data)){
s=sort(data[,i], decreasing=TRUE, index.return=TRUE)
otuIDs=names(s$x[1:top.no])
rdp.out=rdp[s$ix[1:top.no]]
sampleID=c(rep(colnames(data)[[i]],top.no))
temp=cbind(sampleID,otuIDs)
out1=rbind(out1,temp)
out2=cbind(out2,rdp.out)
}
colnames(out2)=colnames(data)
#write.table(out2, paste("Results/rdp_",top.no,".txt",sep=""), quote=FALSE, sep="\t")
#who are the top-10 ranked
u=unique(out1[,2])
l=length(unique(out1[,2]))
actual.prop=l/dim(out1)[[1]]
expected.prop=top.no/dim(out1)[[1]]
print("Unique OTU IDs within the most abundant")
print(u)
print("Number of unique OTUs within the most abundant")
print(l)
print("Redundancy index given the number of samples and the top number selected 1.00 means completely nonredundant, every top taxa was observed only 1 time across all samples")
print(actual.prop)
print("Expected redundancy index")
print(expected.prop)
#print("List of top taxa by sample")
#print(out2)
return(out2)
}
fire.out=extractdominant.f(fire.new,rdp.fire,10)
rec.out=extractdominant.f(rec.new,rdp.rec,10)
data=NULL
data=fire.new
top.no=10
rdp.in=rdp.fire
subsettop.f=function(data, top.no, rdp.in){
otuIDs=NULL
rdpIDs=NULL
for(i in 1:ncol(data)){
s=sort(data[,i], decreasing=TRUE, index.return=TRUE)
otuIDs=c(otuIDs, names(s$x[1:top.no]))
rdpIDs=c(rdpIDs, rdp.in[s$ix[1:top.no]])
}
temp=cbind(otuIDs,rdpIDs)
#print(temp)
u.top=unique(otuIDs)
#temp.u=temp[is.element(temp[,"otuIDs"],u.top),]
#write.table(temp.u, "Results/OTURDP_Top10.txt", sep="\t", quote=FALSE)
top10.otu=NULL
for(j in 1:nrow(data)){
if(is.element(row.names(data)[j],u.top)){
top10.otu=rbind(top10.otu,data[j,])
}
}
row.names(top10.otu)=u.top
colnames(top10.otu)=colnames(data)
return(top10.otu)
}
topfire=subsettop.f(fire.new,10,rdp.fire)
#how many OTUs are de novo?
length(grep("dn",rownames(topfire)))
#create color pallette; see: http://colorbrewer2.org/
hc=colorRampPalette(c("#91bfdb","white","#fc8d59"), interpolate="linear")
topfire.pa=1*(topfire>0)
sum(rowSums(topfire.pa)==9)
toprec=subsettop.f(rec.new,10, rdp.rec)
#how many OTUs are de novo
length(grep("dn",rownames(toprec)))
#Figure 5
dev.off()
setEPS()
postscript("Figures/Fig5A.eps", width = 3.5, height=7, pointsize=10, paper="special")
heatmap.2(topfire,col=hc(100),scale="column",key=TRUE,symkey=FALSE, trace="none", density.info="none",dendrogram="both", margins=c(5,13), srtCol=90)
dev.off()
setEPS()
postscript("Figures/Fig5B.eps", width = 3.5, height=7, pointsize=10, paper="special")
heatmap.2(toprec,col=hc(100),scale="column",key=TRUE,symkey=FALSE, trace="none", density.info="none",dendrogram="both", margins=c(5,13), srtCol=90)
dev.off()
|
| pc = 0xc002 | a = 0x00 | x = 0x15 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x00 | x = 0x15 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc006 | a = 0x00 | x = 0x15 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0050] = 0x15 |
| pc = 0xc007 | a = 0x00 | x = 0x16 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc009 | a = 0x00 | x = 0x15 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0050] = 0x15 |
| pc = 0xc00a | a = 0x00 | x = 0x16 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
|
/res/store-load-11.r
|
no_license
|
HeitorBRaymundo/861
|
R
| false | false | 570 |
r
|
| pc = 0xc002 | a = 0x00 | x = 0x15 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x00 | x = 0x15 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc006 | a = 0x00 | x = 0x15 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0050] = 0x15 |
| pc = 0xc007 | a = 0x00 | x = 0x16 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc009 | a = 0x00 | x = 0x15 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0050] = 0x15 |
| pc = 0xc00a | a = 0x00 | x = 0x16 | y = 0x0b | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
|
download.file(url = "https://ndownloader.figshare.com/files/2292169", destfile = "data/portal_daa_joined.csv")
surveys <- read.csv(file = "Data/portal_daa_joined.csv")
surveys
surveys[,-c(2:4)]
surveys[1:400,1]
surveys[1:400,1:8]
col158 <- c(1,5,6,7,8)
surveys[1:400,col158]
surveys_subset <- c(surveys[1:400,col158])
surveys_subset
surveys_subset[which($hindfoot_length>32)]
surveys_subset$hindfoot_length<- as.numeric(surveys_subset$hindfoot_length) #tried to do this because there are NA values and I think that was messing with my outputs
surveys_subset$hindfoot_length
surveys_subset$hindfoot_length<- as.numeric(surveys_subset$hindfoot_length) #Tried to make tem numeric so I can do a histogram, did not seem to work
surveys_long_feet<-subset(surveys_subset, hindfoot_length>32)
surveys_long_feet #gave "$<NA>NULL" output because there are NA values
hist(surveys_long_feet$hindfoot_length) #"x must be numeric..."I thought I made it numeric above..did not work
surveys_long_feet$hindfoot_length<- as.character(surveys_long_feet$hindfoot_length)
hist(surveys_long_feet$hindfoot_length)
#gave same error as above because it is still a character...I give up, sorry Arthur!
|
/Script/Week_4_Assignment_ST.R
|
no_license
|
gge-ucd/r-davis-in-class-Sadie-Trombley
|
R
| false | false | 1,231 |
r
|
download.file(url = "https://ndownloader.figshare.com/files/2292169", destfile = "data/portal_daa_joined.csv")
surveys <- read.csv(file = "Data/portal_daa_joined.csv")
surveys
surveys[,-c(2:4)]
surveys[1:400,1]
surveys[1:400,1:8]
col158 <- c(1,5,6,7,8)
surveys[1:400,col158]
surveys_subset <- c(surveys[1:400,col158])
surveys_subset
surveys_subset[which($hindfoot_length>32)]
surveys_subset$hindfoot_length<- as.numeric(surveys_subset$hindfoot_length) #tried to do this because there are NA values and I think that was messing with my outputs
surveys_subset$hindfoot_length
surveys_subset$hindfoot_length<- as.numeric(surveys_subset$hindfoot_length) #Tried to make tem numeric so I can do a histogram, did not seem to work
surveys_long_feet<-subset(surveys_subset, hindfoot_length>32)
surveys_long_feet #gave "$<NA>NULL" output because there are NA values
hist(surveys_long_feet$hindfoot_length) #"x must be numeric..."I thought I made it numeric above..did not work
surveys_long_feet$hindfoot_length<- as.character(surveys_long_feet$hindfoot_length)
hist(surveys_long_feet$hindfoot_length)
#gave same error as above because it is still a character...I give up, sorry Arthur!
|
#--------------------------------------------------------------------------------------------------
#Adding new data source: GDSC1 and GDSC2
#
#--------------------------------------------------------------------------------------------------
# There are 970 and 969 cell lines
#
# ---------------------------------------------------------
cann1 = read.csv("./inst/extdata/gdsc1_cell_ann_curated.csv", row.names = 1, stringsAsFactors = F, check.names = F)
cann2 = read.csv("./inst/extdata/gdsc2_cell_ann_curated.csv", row.names = 1, stringsAsFactors = F, check.names = F) # 7 news
dim(cann1); dim(cann2)
# 970 - 24 ; 969 - 24
# length(intersect(cann1$Sample.Name, gdsc1Data::drugData@sampleData@samples$Name)) # 970
# length(intersect(cann2$Sample.Name, gdsc2Data::drugData@sampleData@samples$Name)) # 969
ctable=rcellminerUtilsCDB::cellLineMatchTab
dim(ctable) # 2068 - 28
## GDSC1------------------------------------------------------------
ctable$gdsc1 = NA
ind1 = match(cann1$Sample.Name.Old, ctable$gdscDec15)
## notf1 =which(is.na(ind1)) # zero
# kk = which(cann1$Sample.Name!=cann1$Sample.Name.Old)
# View(cann1[kk,])
##
ctable$gdsc1[ind1] = cann1$Sample.Name
## GDSC2 -----------------------------------------------------------
ctable$gdsc2 = NA
ind2 = match(cann2$Sample.Name.Old, ctable$gdscDec15)
notf2 = which(is.na(ind2))
length(notf2) # 7
ctable$gdsc2[ind2[-notf2]] = cann2$Sample.Name[-notf2]
# new ones to dec15
cvcls = cann2$`cellosaurus ID`[which(cann2$`cellosaurus ID`!="")]
newc = cann2$Sample.Name[which(cann2$`cellosaurus ID`!="")]
ind3 = match(cvcls, ctable$cellosaurus_accession)
ctable$gdsc2[ind3] = newc
# View(ctable[ind3,])
## OK done for both datasets !!!
length(which(!is.na(ctable$gdsc1))) # 970
length(which(!is.na(ctable$gdsc2))) # 969
length(which(!is.na(ctable$gdsc1) & !is.na(ctable$gdsc2) )) # 961
## ======================
dim(ctable) # 2068 30
cellLineMatchTab <- ctable
save(cellLineMatchTab, file = "data/cellLineMatchTab.RData")
## END
|
/inst/extdata/Add_GDSC_1_2_cell_lines.R
|
no_license
|
CBIIT/rcellminerUtilsCDB
|
R
| false | false | 2,017 |
r
|
#--------------------------------------------------------------------------------------------------
#Adding new data source: GDSC1 and GDSC2
#
#--------------------------------------------------------------------------------------------------
# There are 970 and 969 cell lines
#
# ---------------------------------------------------------
cann1 = read.csv("./inst/extdata/gdsc1_cell_ann_curated.csv", row.names = 1, stringsAsFactors = F, check.names = F)
cann2 = read.csv("./inst/extdata/gdsc2_cell_ann_curated.csv", row.names = 1, stringsAsFactors = F, check.names = F) # 7 news
dim(cann1); dim(cann2)
# 970 - 24 ; 969 - 24
# length(intersect(cann1$Sample.Name, gdsc1Data::drugData@sampleData@samples$Name)) # 970
# length(intersect(cann2$Sample.Name, gdsc2Data::drugData@sampleData@samples$Name)) # 969
ctable=rcellminerUtilsCDB::cellLineMatchTab
dim(ctable) # 2068 - 28
## GDSC1------------------------------------------------------------
ctable$gdsc1 = NA
ind1 = match(cann1$Sample.Name.Old, ctable$gdscDec15)
## notf1 =which(is.na(ind1)) # zero
# kk = which(cann1$Sample.Name!=cann1$Sample.Name.Old)
# View(cann1[kk,])
##
ctable$gdsc1[ind1] = cann1$Sample.Name
## GDSC2 -----------------------------------------------------------
ctable$gdsc2 = NA
ind2 = match(cann2$Sample.Name.Old, ctable$gdscDec15)
notf2 = which(is.na(ind2))
length(notf2) # 7
ctable$gdsc2[ind2[-notf2]] = cann2$Sample.Name[-notf2]
# new ones to dec15
cvcls = cann2$`cellosaurus ID`[which(cann2$`cellosaurus ID`!="")]
newc = cann2$Sample.Name[which(cann2$`cellosaurus ID`!="")]
ind3 = match(cvcls, ctable$cellosaurus_accession)
ctable$gdsc2[ind3] = newc
# View(ctable[ind3,])
## OK done for both datasets !!!
length(which(!is.na(ctable$gdsc1))) # 970
length(which(!is.na(ctable$gdsc2))) # 969
length(which(!is.na(ctable$gdsc1) & !is.na(ctable$gdsc2) )) # 961
## ======================
dim(ctable) # 2068 30
cellLineMatchTab <- ctable
save(cellLineMatchTab, file = "data/cellLineMatchTab.RData")
## END
|
# Script for Getting and Cleaning Data Course Project
# You should create one R script called run_analysis.R that does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
# Please make sure to have installed dplyr package, if not run
# run the following 2 lines of code
# install.packages("dplyr")
# library(dplyr)
require(plyr)
require(dplyr)
# 1. Merges the training and the test sets to create one data set.
#set working directory
setwd("C:/Users/DJ/Desktop/Coursera/Getting and Cleaning Data/Course Project/getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset")
# Read all tables into R
actLabels <- read.table("./activity_labels.txt", col.names = c("ActivityID", "Activity"))
features <- read.table("./features.txt", colClasses = "character")
# test data
testX <- read.table("./test/X_test.txt")
testY <- read.table("./test/Y_test.txt")
subjectTest <- read.table("./test/subject_test.txt")
#train data
trainX <- read.table("./train/X_train.txt")
trainY <- read.table("./train/Y_train.txt")
subjectTrain <- read.table("./train/subject_train.txt")
# COmbine 2 data sets together
Test <- cbind(testY,cbind(subjectTest, testX))
Train <- cbind(trainY,cbind(subjectTrain, trainX))
TestAndTrain <- rbind(Test, Train)
# Label Columns
TestAndTrainLabels <- rbind(c(562, "ActivityID"), c(563, "SubjectID"), features)
names(TestAndTrain) <- TestAndTrainLabels[,2]
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
TestAndTrainSubset <- TestAndTrain[,grepl("mean[()]|std[()]|ActivityID|SubjectID", names(TestAndTrain))]
# 3. Uses descriptive activity names to name the activities in the data set
TestAndTrainSubset <- inner_join(TestAndTrainSubset, actLabels, by = "ActivityID")
# 4. Appropriately labels the data set with descriptive variable names
names(TestAndTrainSubset) <- gsub("mean[(][)]", "Mean_Value", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("std[(][)]", "Standard_Deviation", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("[-]", "_", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("^t", "Time_", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("^f", "Frequency_", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("Acc", "Acceleration", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("Mag", "Magnitude", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("Gyro", "Gyroscopic", names(TestAndTrainSubset))
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
TidyDataSubset <- arrange(ddply(TestAndTrainSubset, c("Activity", "SubjectID"), numcolwise(mean)), ActivityID)
write.table(TidyDataSubset, file = "TidyDataSubset.txt")
|
/Desktop/Coursera/Getting and Cleaning Data/Getting-Cleaning-Data-Project/run_analysis.R
|
no_license
|
dctb13/Getting-And-Cleaning-Data
|
R
| false | false | 3,217 |
r
|
# Script for Getting and Cleaning Data Course Project
# You should create one R script called run_analysis.R that does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
# Please make sure to have installed dplyr package, if not run
# run the following 2 lines of code
# install.packages("dplyr")
# library(dplyr)
require(plyr)
require(dplyr)
# 1. Merges the training and the test sets to create one data set.
#set working directory
setwd("C:/Users/DJ/Desktop/Coursera/Getting and Cleaning Data/Course Project/getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset")
# Read all tables into R
actLabels <- read.table("./activity_labels.txt", col.names = c("ActivityID", "Activity"))
features <- read.table("./features.txt", colClasses = "character")
# test data
testX <- read.table("./test/X_test.txt")
testY <- read.table("./test/Y_test.txt")
subjectTest <- read.table("./test/subject_test.txt")
#train data
trainX <- read.table("./train/X_train.txt")
trainY <- read.table("./train/Y_train.txt")
subjectTrain <- read.table("./train/subject_train.txt")
# COmbine 2 data sets together
Test <- cbind(testY,cbind(subjectTest, testX))
Train <- cbind(trainY,cbind(subjectTrain, trainX))
TestAndTrain <- rbind(Test, Train)
# Label Columns
TestAndTrainLabels <- rbind(c(562, "ActivityID"), c(563, "SubjectID"), features)
names(TestAndTrain) <- TestAndTrainLabels[,2]
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
TestAndTrainSubset <- TestAndTrain[,grepl("mean[()]|std[()]|ActivityID|SubjectID", names(TestAndTrain))]
# 3. Uses descriptive activity names to name the activities in the data set
TestAndTrainSubset <- inner_join(TestAndTrainSubset, actLabels, by = "ActivityID")
# 4. Appropriately labels the data set with descriptive variable names
names(TestAndTrainSubset) <- gsub("mean[(][)]", "Mean_Value", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("std[(][)]", "Standard_Deviation", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("[-]", "_", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("^t", "Time_", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("^f", "Frequency_", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("Acc", "Acceleration", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("Mag", "Magnitude", names(TestAndTrainSubset))
names(TestAndTrainSubset) <- gsub("Gyro", "Gyroscopic", names(TestAndTrainSubset))
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
TidyDataSubset <- arrange(ddply(TestAndTrainSubset, c("Activity", "SubjectID"), numcolwise(mean)), ActivityID)
write.table(TidyDataSubset, file = "TidyDataSubset.txt")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{raw}
\alias{raw}
\title{Raw sample dataset}
\format{
A data frame with 2000 genes and 815 cells:
}
\source{
GEO GSM2861514
}
\usage{
raw
}
\description{
A subsample of a real sc-RNAseq dataset
}
\keyword{datasets}
|
/man/raw.Rd
|
no_license
|
seriph78/COTAN_stable
|
R
| false | true | 320 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{raw}
\alias{raw}
\title{Raw sample dataset}
\format{
A data frame with 2000 genes and 815 cells:
}
\source{
GEO GSM2861514
}
\usage{
raw
}
\description{
A subsample of a real sc-RNAseq dataset
}
\keyword{datasets}
|
context("overlapping-areas")
library(sf)
p_self <- st_sfc(st_polygon(list(cbind(c(0, 0, 1, -0.1, -0.1, 1.5, 0),
c(0, 1, 1, 0.2, 1.2, 1.2, 0)))))
plot(p_self)
p_valid <- st_sfc(st_polygon(list(cbind(c(0, 0, 1, 0, 0, -0.1, -0.1, 1.5, 0),
c(0, 0.272727272727272, 1, 1, 0.272727272727272, 0.2, 1.2, 1.2, 0)))))
st_sfc(p_self, p_valid)
test_that("the triangulate and rebuild fix works", {
expect_equal(fix_overlapping_area(p_self), p_valid)
})
|
/tests/testthat/test-overlapping-areas.R
|
no_license
|
r-gris/torpor
|
R
| false | false | 536 |
r
|
context("overlapping-areas")
library(sf)
p_self <- st_sfc(st_polygon(list(cbind(c(0, 0, 1, -0.1, -0.1, 1.5, 0),
c(0, 1, 1, 0.2, 1.2, 1.2, 0)))))
plot(p_self)
p_valid <- st_sfc(st_polygon(list(cbind(c(0, 0, 1, 0, 0, -0.1, -0.1, 1.5, 0),
c(0, 0.272727272727272, 1, 1, 0.272727272727272, 0.2, 1.2, 1.2, 0)))))
st_sfc(p_self, p_valid)
test_that("the triangulate and rebuild fix works", {
expect_equal(fix_overlapping_area(p_self), p_valid)
})
|
#######################################################################
#### Plot ChEA results
#######################################################################
## Goal: Analyze ChEA results and generate analysis plots
## Data structures:
## - df_DE
#######################################################################
#### SET UP
#######################################################################
source("../scripts/R/load.R")
#######################################################################
#### Set relevant input and output directories
#######################################################################
## Inputs
dataDirs <- c("./")
## Outputs
dataDir <- "../results/AALE_KRAS-RIT1/ChEA"
resultsDir <- "../results/AALE_KRAS-RIT1/ChEA"
figsDir <- "../results/AALE_KRAS-RIT1/Figures"
load("df_DE.RData")
perturbations <- unique(df_DE$Perturbation)
#######################################################################
#### Load ChEA3 output files
#######################################################################
libraries <- c("ENCODE_ChIP-seq",
"ReMap_ChIP-seq",
"Literature_ChIP-seq",
"ARCHS4_Coexpression",
"GTEx_Coexpression",
"Enrichr_Queries")
for (library in libraries) {
foo <- fread(paste0(dataDir, "/RIT1-M90I_Renilla_", library, ".tsv"))
bar <- fread(paste0(dataDir, "/KRAS-G12V_Renilla_", library, ".tsv"))
}
#######################################################################
#### Plot top enriched TFs (Enrichr query)
#######################################################################
df_enrichr <- data.frame(Query_Name = character(),
Rank = integer(),
Scaled_Rank = double(),
Set_name = character(),
TF = character(),
Intersect = integer(),
FET_pvalue = double(),
FDR = double(),
Odds_Ratio = double(),
Library = character())
## Load Enrichr Queries results table
for (perturb in perturbations) {
foo <- fread(paste0(dataDir, "/", perturb, "_Renilla_Enrichr_Queries.tsv"))
foo <- as_tibble(foo) %>%
mutate(Perturbation = perturb)
names(foo) <- gsub(" ", "_", names(foo))
names(foo) <- gsub("p-value", "pvalue", names(foo))
print(head(foo))
dim(filter(foo, FDR < 0.05))
df_enrichr <- bind_rows(df_enrichr, foo)
}
df_enrichr <- as_tibble(df_enrichr)
EMT_gene_list_short <- c("HIC1", "TWIST1", "HOXD9", "FOXQ1", "FOSL1")
df_EMT_genes <- data.frame(TF = EMT_gene_list_short, EMT_status = "DB-Confirmed")
EMT_gene_list <- c("PRRX2", "FOXS1", "FOXC2", "BNC1", "FOSB", "HOXC6", "HOXB9", "FOXF1", "SIM2", "FOXL1", "EHF", "EGR2", "ATF3", "ZNF750", "FOXF2", "HOXC8", "JDP2", "HOXA10", "OVOL1", "ELF5", "SOX7", "MSX2", "ASCL2", "RELB", "HEY1")
+df <- data.frame(TF = EMT_gene_list, EMT_status = "Literature")
df_EMT_genes <- bind_rows(df_EMT_genes, df)
df <- data.frame(TF = c("SNAI1", "SNAI2"), EMT_status = "SNAIL")
df_EMT_genes <- bind_rows(df_EMT_genes, df)
## Take top 25 enriched TFs by p-value
for (perturb in perturbations) {
df_plot <- df_enrichr %>%
filter(Perturbation == perturb) %>%
arrange(FET_pvalue) %>%
dplyr::slice(1:25) %>%
left_join(df_EMT_genes)
df_plot$EMT_status[is.na(df_plot$EMT_status)] <- "No"
xmax <- max(-log10(df_plot$FET_pvalue))
colors <- c("#de2d26", "#fc9272", "#fee0d2", "#bdbdbd")
names(colors) <- c("SNAIL", "DB-Confirmed", "Literature", "No")
## Plot horizontal bar plot of top 25 TF p-values
g <- ggplot(df_plot) +
geom_col(aes(x = reorder(TF, -FET_pvalue),
y = -log10(FET_pvalue),
fill = EMT_status),
width = .75) +
coord_flip() +
scale_fill_manual(values = colors,
guide = FALSE) +
scale_x_discrete(name = "Transcription Factor") +
scale_y_continuous(name = "-log10(FET p-value)",
limits = c(0, xmax+2),
expand = c(0,0)) +
theme_classic(base_size = 24) +
theme(axis.ticks.y = element_blank(),
axis.text.x = element_text(margin = margin(t = 6, b = 8),
size = 24),
axis.text.y = element_text(margin = margin(r = 4, l = 6),
size = 20))
ggsave(plot = g, file = paste0("/", perturb, "_Enrichr-top25-bars.pdf"),
device = "pdf",
height = 8,
width = 10,
path = resultsDir)
}
#######################################################################
#### Compare KRAS and RIT1 Enrichr query results
#######################################################################
foo <- fread(paste0(dataDir, "/RIT1-M90I_Renilla_Enrichr_Queries.tsv"))
bar <- fread(paste0(dataDir, "/KRAS-G12V_Renilla_Enrichr_Queries.tsv"))
df_enrichr <- inner_join(foo, bar,
by = c("Query Name", "TF", "Library"),
suffix = c("_RIT1-M90I", "_KRAS-G12V"))
df_enrichr %>% filter(TF == "EHF")
df_plot <- df_enrichr
g <- ggplot(df_plot) +
geom_point(aes(x = -log10(`FET p-value_KRAS-G12V`), y = -log10(`FET p-value_RIT1-M90I`)),
alpha = 0.5, size = 2, shape = 16) +
geom_abline(color = "grey") +
scale_x_continuous(limits = c(0, 47)) +
scale_y_continuous(limits = c(0, 35)) +
theme_classic(base_size = 18)
ggsave(g, file = "/Enrichr_pval_scatterplot.pdf",
device = "pdf",
height = 6,
width = 6,
path = resultsDir)
foo1 <- mutate(foo, Perturbation = "RIT1-M90I")
bar1 <- mutate(bar, Perturbation = "KRAS-G12V")
df_plot <- bind_rows(foo1, bar1) %>%
mutate(Perturbation = factor(Perturbation,
levels = c("KRAS-G12V", "RIT1-M90I")))
g <- ggplot(df_plot, aes(x = Perturbation, y = -log10(FET_pvalue), group = TF)) +
geom_point() +
stat_summary(geom="line") +
theme_classic(base_size = 18)
ggsave(g, file = "/Enrichr_difference.pdf",
device = "pdf",
height = 7,
width = 6,
path = resultsDir)
#######################################################################
#### Compare KRAS and RIT1 overall TF enrichment
#######################################################################
## Score := Mean Integrated Rank
## Library := Rank of TF in each library analysis that could be performed
foo <- as_tibble(fread(paste0(dataDir, "/RIT1-M90I_Renilla_Integrated_meanRank.tsv")))
bar <- as_tibble(fread(paste0(dataDir, "/KRAS-G12V_Renilla_Integrated_meanRank.tsv")))
df_chea <- inner_join(foo, bar,
by = c("Query Name", "TF"),
suffix = c("_RIT1_M90I", "_KRAS_G12V"))
df_chea %>% filter(TF == "EHF")
df_plot <- df_chea
g <- ggplot(df_plot) +
geom_point(aes(x = Score_KRAS_G12V, y = Score_RIT1_M90I),
alpha = 0.5, size = 2, shape = 16) +
geom_abline(color = "grey") +
theme_classic(base_size = 18)
ggsave(g, file = "/MeanRank_scatterplot.pdf",
device = "pdf",
height = 6,
width = 6,
path = resultsDir)
df_chea_top <- df_chea %>%
filter(Rank_RIT1_M90I < 50 | Rank_KRAS_G12V < 50) %>%
mutate(score_diff = abs(Score_RIT1_M90I - Score_KRAS_G12V))
# filter(score_diff > 200)
foo1 <- mutate(foo, Perturbation = "RIT1-M90I")
bar1 <- mutate(bar, Perturbation = "KRAS-G12V")
df_plot <- bind_rows(foo1, bar1) %>%
mutate(Perturbation = factor(Perturbation,
levels = c("KRAS-G12V", "RIT1-M90I"))) %>%
filter(TF %in% df_chea_top$TF)
g <- ggplot(df_plot, aes(x = Perturbation, y = Score, group = TF)) +
geom_point() +
stat_summary(geom="line", alpha = 0.5) +
theme_classic(base_size = 18)
ggsave(g, file = "/MeanRank_difference_top.pdf",
device = "pdf",
height = 6,
width = 6,
path = resultsDir)
|
/analysis_ChEA.R
|
no_license
|
aprilflow/KRAS-RIT1-profiling
|
R
| false | false | 8,223 |
r
|
#######################################################################
#### Plot ChEA results
#######################################################################
## Goal: Analyze ChEA results and generate analysis plots
## Data structures:
## - df_DE
#######################################################################
#### SET UP
#######################################################################
source("../scripts/R/load.R")
#######################################################################
#### Set relevant input and output directories
#######################################################################
## Inputs
dataDirs <- c("./")
## Outputs
dataDir <- "../results/AALE_KRAS-RIT1/ChEA"
resultsDir <- "../results/AALE_KRAS-RIT1/ChEA"
figsDir <- "../results/AALE_KRAS-RIT1/Figures"
load("df_DE.RData")
perturbations <- unique(df_DE$Perturbation)
#######################################################################
#### Load ChEA3 output files
#######################################################################
libraries <- c("ENCODE_ChIP-seq",
"ReMap_ChIP-seq",
"Literature_ChIP-seq",
"ARCHS4_Coexpression",
"GTEx_Coexpression",
"Enrichr_Queries")
for (library in libraries) {
foo <- fread(paste0(dataDir, "/RIT1-M90I_Renilla_", library, ".tsv"))
bar <- fread(paste0(dataDir, "/KRAS-G12V_Renilla_", library, ".tsv"))
}
#######################################################################
#### Plot top enriched TFs (Enrichr query)
#######################################################################
df_enrichr <- data.frame(Query_Name = character(),
Rank = integer(),
Scaled_Rank = double(),
Set_name = character(),
TF = character(),
Intersect = integer(),
FET_pvalue = double(),
FDR = double(),
Odds_Ratio = double(),
Library = character())
## Load Enrichr Queries results table
for (perturb in perturbations) {
foo <- fread(paste0(dataDir, "/", perturb, "_Renilla_Enrichr_Queries.tsv"))
foo <- as_tibble(foo) %>%
mutate(Perturbation = perturb)
names(foo) <- gsub(" ", "_", names(foo))
names(foo) <- gsub("p-value", "pvalue", names(foo))
print(head(foo))
dim(filter(foo, FDR < 0.05))
df_enrichr <- bind_rows(df_enrichr, foo)
}
df_enrichr <- as_tibble(df_enrichr)
EMT_gene_list_short <- c("HIC1", "TWIST1", "HOXD9", "FOXQ1", "FOSL1")
df_EMT_genes <- data.frame(TF = EMT_gene_list_short, EMT_status = "DB-Confirmed")
EMT_gene_list <- c("PRRX2", "FOXS1", "FOXC2", "BNC1", "FOSB", "HOXC6", "HOXB9", "FOXF1", "SIM2", "FOXL1", "EHF", "EGR2", "ATF3", "ZNF750", "FOXF2", "HOXC8", "JDP2", "HOXA10", "OVOL1", "ELF5", "SOX7", "MSX2", "ASCL2", "RELB", "HEY1")
+df <- data.frame(TF = EMT_gene_list, EMT_status = "Literature")
df_EMT_genes <- bind_rows(df_EMT_genes, df)
df <- data.frame(TF = c("SNAI1", "SNAI2"), EMT_status = "SNAIL")
df_EMT_genes <- bind_rows(df_EMT_genes, df)
## Take top 25 enriched TFs by p-value
for (perturb in perturbations) {
df_plot <- df_enrichr %>%
filter(Perturbation == perturb) %>%
arrange(FET_pvalue) %>%
dplyr::slice(1:25) %>%
left_join(df_EMT_genes)
df_plot$EMT_status[is.na(df_plot$EMT_status)] <- "No"
xmax <- max(-log10(df_plot$FET_pvalue))
colors <- c("#de2d26", "#fc9272", "#fee0d2", "#bdbdbd")
names(colors) <- c("SNAIL", "DB-Confirmed", "Literature", "No")
## Plot horizontal bar plot of top 25 TF p-values
g <- ggplot(df_plot) +
geom_col(aes(x = reorder(TF, -FET_pvalue),
y = -log10(FET_pvalue),
fill = EMT_status),
width = .75) +
coord_flip() +
scale_fill_manual(values = colors,
guide = FALSE) +
scale_x_discrete(name = "Transcription Factor") +
scale_y_continuous(name = "-log10(FET p-value)",
limits = c(0, xmax+2),
expand = c(0,0)) +
theme_classic(base_size = 24) +
theme(axis.ticks.y = element_blank(),
axis.text.x = element_text(margin = margin(t = 6, b = 8),
size = 24),
axis.text.y = element_text(margin = margin(r = 4, l = 6),
size = 20))
ggsave(plot = g, file = paste0("/", perturb, "_Enrichr-top25-bars.pdf"),
device = "pdf",
height = 8,
width = 10,
path = resultsDir)
}
#######################################################################
#### Compare KRAS and RIT1 Enrichr query results
#######################################################################
foo <- fread(paste0(dataDir, "/RIT1-M90I_Renilla_Enrichr_Queries.tsv"))
bar <- fread(paste0(dataDir, "/KRAS-G12V_Renilla_Enrichr_Queries.tsv"))
df_enrichr <- inner_join(foo, bar,
by = c("Query Name", "TF", "Library"),
suffix = c("_RIT1-M90I", "_KRAS-G12V"))
df_enrichr %>% filter(TF == "EHF")
df_plot <- df_enrichr
g <- ggplot(df_plot) +
geom_point(aes(x = -log10(`FET p-value_KRAS-G12V`), y = -log10(`FET p-value_RIT1-M90I`)),
alpha = 0.5, size = 2, shape = 16) +
geom_abline(color = "grey") +
scale_x_continuous(limits = c(0, 47)) +
scale_y_continuous(limits = c(0, 35)) +
theme_classic(base_size = 18)
ggsave(g, file = "/Enrichr_pval_scatterplot.pdf",
device = "pdf",
height = 6,
width = 6,
path = resultsDir)
foo1 <- mutate(foo, Perturbation = "RIT1-M90I")
bar1 <- mutate(bar, Perturbation = "KRAS-G12V")
df_plot <- bind_rows(foo1, bar1) %>%
mutate(Perturbation = factor(Perturbation,
levels = c("KRAS-G12V", "RIT1-M90I")))
g <- ggplot(df_plot, aes(x = Perturbation, y = -log10(FET_pvalue), group = TF)) +
geom_point() +
stat_summary(geom="line") +
theme_classic(base_size = 18)
ggsave(g, file = "/Enrichr_difference.pdf",
device = "pdf",
height = 7,
width = 6,
path = resultsDir)
#######################################################################
#### Compare KRAS and RIT1 overall TF enrichment
#######################################################################
## Score := Mean Integrated Rank
## Library := Rank of TF in each library analysis that could be performed
foo <- as_tibble(fread(paste0(dataDir, "/RIT1-M90I_Renilla_Integrated_meanRank.tsv")))
bar <- as_tibble(fread(paste0(dataDir, "/KRAS-G12V_Renilla_Integrated_meanRank.tsv")))
df_chea <- inner_join(foo, bar,
by = c("Query Name", "TF"),
suffix = c("_RIT1_M90I", "_KRAS_G12V"))
df_chea %>% filter(TF == "EHF")
df_plot <- df_chea
g <- ggplot(df_plot) +
geom_point(aes(x = Score_KRAS_G12V, y = Score_RIT1_M90I),
alpha = 0.5, size = 2, shape = 16) +
geom_abline(color = "grey") +
theme_classic(base_size = 18)
ggsave(g, file = "/MeanRank_scatterplot.pdf",
device = "pdf",
height = 6,
width = 6,
path = resultsDir)
df_chea_top <- df_chea %>%
filter(Rank_RIT1_M90I < 50 | Rank_KRAS_G12V < 50) %>%
mutate(score_diff = abs(Score_RIT1_M90I - Score_KRAS_G12V))
# filter(score_diff > 200)
foo1 <- mutate(foo, Perturbation = "RIT1-M90I")
bar1 <- mutate(bar, Perturbation = "KRAS-G12V")
df_plot <- bind_rows(foo1, bar1) %>%
mutate(Perturbation = factor(Perturbation,
levels = c("KRAS-G12V", "RIT1-M90I"))) %>%
filter(TF %in% df_chea_top$TF)
g <- ggplot(df_plot, aes(x = Perturbation, y = Score, group = TF)) +
geom_point() +
stat_summary(geom="line", alpha = 0.5) +
theme_classic(base_size = 18)
ggsave(g, file = "/MeanRank_difference_top.pdf",
device = "pdf",
height = 6,
width = 6,
path = resultsDir)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat.R
\name{ca.test}
\alias{ca.test}
\alias{ca.test.default}
\alias{ca.test.formula}
\title{Cochran-Armitage test for trend}
\usage{
ca.test(x, ...)
\method{ca.test}{default}(x, g, ..., score = NULL, simulate.p.value = FALSE, B = 2000L)
\method{ca.test}{formula}(formula, data, ...)
}
\arguments{
\item{x}{a factor-like vector giving the (unordered) variable (equivalently
the row variable of a contingency table)
alternatively, \code{x} can be a \code{2 x c} table or matrix with
exactly two rows and at least three ordered columns; \code{x} may also
be a list of the row variable split by the ordered column variable in
which case the list is assumed to be ordered, i.e.,
\code{x[[1]] < x[[2]] < ... < x[[c]]}; see examples}
\item{...}{further arguments to be passed to or from methods}
\item{g}{a factor-like vector giving the \emph{ordered} group for each
corresponding element of \code{x}, ignored with a warning if \code{x} is
a list or table; if \code{g} is not a factor, it will be coerced, and
groups will be ordered as sort(unique(g)); see \code{\link{factor}}}
\item{score}{group score for each column, default is \code{1:ncol}}
\item{simulate.p.value}{logical; if \code{TRUE}, p-value is computed using
by Monte Carlo simulation}
\item{B}{an integer specifying the number of replicates used in the Monte
Carlo test}
\item{formula}{a formula of the form \code{row ~ column} where \code{row}
gives the row variable having two unique values and \code{column} gives
the \emph{ordered} column variable}
\item{data}{an optional matrix or data frame (or similar: see
\code{\link{model.frame}}) containing the variables in \code{formula};
by default the variables are taken from \code{environment(formula)}}
}
\value{
A list with class "\code{htest}" containing the following elements:
\item{\code{statistic}}{the chi-squared test statistic}
\item{\code{parameter}}{the degrees of freedom of the approximate chi-
squared distribution of the test statistic}
\item{\code{p.value}}{the p-value of the test (two-sided)}
\item{\code{method}}{a character string describing the test, and,
optionally, the number of Monte Carlo replications, if applicable}
\item{\code{data.name}}{a character string giving the names of the data}
\item{\code{conf.int}}{optionally (if \code{simulate.p.value = TRUE}),
the 99\% confidence interval of the Monte Carlo p-value}
\item{\code{summary}}{optionally (if \code{simulate.p.value = TRUE}),
a summary of the simulated test statistics}
}
\description{
Performs a Cochran-Armitage chi-squared test for trend in proportions for
a \code{2 x c} contingency table with a nominal row (r == 2) and ordinal
column (c > 2) variable.
}
\examples{
## example from stats::prop.trend.test
smokers <- c(83, 90, 129, 70)
patients <- c(86, 93, 136, 82)
prop.test(smokers, patients)
prop.trend.test(smokers, patients)
# DescTools::CochranArmitageTest(rbind(smokers, patients - smokers))
ca.test(rbind(smokers, patients - smokers))
ca.test(rbind(smokers, patients - smokers), score = c(0, 0, 1, 2))
## equivalent ways to call ca.test
dat <- data.frame(x = mtcars$vs, y = mtcars$gear)
ca.test(dat$x, dat$y)
ca.test(x ~ y, dat)
ca.test(split(dat$x, dat$y))
ca.test(table(dat$x, dat$y))
\dontrun{
## simulate p-value with 1k replicates
set.seed(1)
ca.test(rbind(smokers, patients - smokers), simulate.p.value = TRUE, B = 1000)
}
}
\seealso{
\code{\link{prop.trend.test}}; \code{\link{jt.test}} for doubly-ordered tables;
\code{\link{cuzick.test}}; \code{DescTools::CochranArmitageTest}
}
|
/man/ca.test.Rd
|
no_license
|
raredd/rawr
|
R
| false | true | 3,607 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat.R
\name{ca.test}
\alias{ca.test}
\alias{ca.test.default}
\alias{ca.test.formula}
\title{Cochran-Armitage test for trend}
\usage{
ca.test(x, ...)
\method{ca.test}{default}(x, g, ..., score = NULL, simulate.p.value = FALSE, B = 2000L)
\method{ca.test}{formula}(formula, data, ...)
}
\arguments{
\item{x}{a factor-like vector giving the (unordered) variable (equivalently
the row variable of a contingency table)
alternatively, \code{x} can be a \code{2 x c} table or matrix with
exactly two rows and at least three ordered columns; \code{x} may also
be a list of the row variable split by the ordered column variable in
which case the list is assumed to be ordered, i.e.,
\code{x[[1]] < x[[2]] < ... < x[[c]]}; see examples}
\item{...}{further arguments to be passed to or from methods}
\item{g}{a factor-like vector giving the \emph{ordered} group for each
corresponding element of \code{x}, ignored with a warning if \code{x} is
a list or table; if \code{g} is not a factor, it will be coerced, and
groups will be ordered as sort(unique(g)); see \code{\link{factor}}}
\item{score}{group score for each column, default is \code{1:ncol}}
\item{simulate.p.value}{logical; if \code{TRUE}, p-value is computed using
by Monte Carlo simulation}
\item{B}{an integer specifying the number of replicates used in the Monte
Carlo test}
\item{formula}{a formula of the form \code{row ~ column} where \code{row}
gives the row variable having two unique values and \code{column} gives
the \emph{ordered} column variable}
\item{data}{an optional matrix or data frame (or similar: see
\code{\link{model.frame}}) containing the variables in \code{formula};
by default the variables are taken from \code{environment(formula)}}
}
\value{
A list with class "\code{htest}" containing the following elements:
\item{\code{statistic}}{the chi-squared test statistic}
\item{\code{parameter}}{the degrees of freedom of the approximate chi-
squared distribution of the test statistic}
\item{\code{p.value}}{the p-value of the test (two-sided)}
\item{\code{method}}{a character string describing the test, and,
optionally, the number of Monte Carlo replications, if applicable}
\item{\code{data.name}}{a character string giving the names of the data}
\item{\code{conf.int}}{optionally (if \code{simulate.p.value = TRUE}),
the 99\% confidence interval of the Monte Carlo p-value}
\item{\code{summary}}{optionally (if \code{simulate.p.value = TRUE}),
a summary of the simulated test statistics}
}
\description{
Performs a Cochran-Armitage chi-squared test for trend in proportions for
a \code{2 x c} contingency table with a nominal row (r == 2) and ordinal
column (c > 2) variable.
}
\examples{
## example from stats::prop.trend.test
smokers <- c(83, 90, 129, 70)
patients <- c(86, 93, 136, 82)
prop.test(smokers, patients)
prop.trend.test(smokers, patients)
# DescTools::CochranArmitageTest(rbind(smokers, patients - smokers))
ca.test(rbind(smokers, patients - smokers))
ca.test(rbind(smokers, patients - smokers), score = c(0, 0, 1, 2))
## equivalent ways to call ca.test
dat <- data.frame(x = mtcars$vs, y = mtcars$gear)
ca.test(dat$x, dat$y)
ca.test(x ~ y, dat)
ca.test(split(dat$x, dat$y))
ca.test(table(dat$x, dat$y))
\dontrun{
## simulate p-value with 1k replicates
set.seed(1)
ca.test(rbind(smokers, patients - smokers), simulate.p.value = TRUE, B = 1000)
}
}
\seealso{
\code{\link{prop.trend.test}}; \code{\link{jt.test}} for doubly-ordered tables;
\code{\link{cuzick.test}}; \code{DescTools::CochranArmitageTest}
}
|
library(tidyhydat)
### Name: hy_stn_datum_conv
### Title: Extract station datum conversions from HYDAT database
### Aliases: hy_stn_datum_conv
### ** Examples
## Not run:
##D hy_stn_datum_conv(station_number = c("02JE013","08MF005"))
## End(Not run)
|
/data/genthat_extracted_code/tidyhydat/examples/hy_stn_datum_conv.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 258 |
r
|
library(tidyhydat)
### Name: hy_stn_datum_conv
### Title: Extract station datum conversions from HYDAT database
### Aliases: hy_stn_datum_conv
### ** Examples
## Not run:
##D hy_stn_datum_conv(station_number = c("02JE013","08MF005"))
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_tar.R
\name{URLs_WIKITEXT}
\alias{URLs_WIKITEXT}
\title{WIKITEXT dataset}
\usage{
URLs_WIKITEXT(filename = "WIKITEXT", untar = TRUE)
}
\arguments{
\item{filename}{the name of the file}
\item{untar}{logical, whether to untar the '.tgz' file}
}
\value{
None
}
\description{
download WIKITEXT dataset
}
|
/man/URLs_WIKITEXT.Rd
|
permissive
|
Cdk29/fastai
|
R
| false | true | 388 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_tar.R
\name{URLs_WIKITEXT}
\alias{URLs_WIKITEXT}
\title{WIKITEXT dataset}
\usage{
URLs_WIKITEXT(filename = "WIKITEXT", untar = TRUE)
}
\arguments{
\item{filename}{the name of the file}
\item{untar}{logical, whether to untar the '.tgz' file}
}
\value{
None
}
\description{
download WIKITEXT dataset
}
|
## Heterosexual model test script
library(EpiModelHIV)
st <- make_nw_het(part.dur = 2013)
est <- netest(st$nw,
formation = st$formation,
target.stats = st$stats,
coef.form = -Inf,
coef.diss = st$coef.diss,
constraints = ~bd(maxout = 3),
set.control.ergm = control.ergm(MCMLE.maxit = 500, MPLE.type = "penalized"))
dx <- netdx(est, nsims = 5, nsteps = 250,
set.control.ergm = control.simulate.ergm(MCMC.burnin = 1e6))
print(dx)
plot(dx)
param <- param_het()
init <- init_het(i.prev.male = 0.25, i.prev.feml = 0.25)
control <- control_het(nsteps = 2600)
sim <- netsim(est, param, init, control)
|
/inst/het-test-script.R
|
no_license
|
dth2/EpiModelHIV_SHAMP
|
R
| false | false | 695 |
r
|
## Heterosexual model test script
library(EpiModelHIV)
st <- make_nw_het(part.dur = 2013)
est <- netest(st$nw,
formation = st$formation,
target.stats = st$stats,
coef.form = -Inf,
coef.diss = st$coef.diss,
constraints = ~bd(maxout = 3),
set.control.ergm = control.ergm(MCMLE.maxit = 500, MPLE.type = "penalized"))
dx <- netdx(est, nsims = 5, nsteps = 250,
set.control.ergm = control.simulate.ergm(MCMC.burnin = 1e6))
print(dx)
plot(dx)
param <- param_het()
init <- init_het(i.prev.male = 0.25, i.prev.feml = 0.25)
control <- control_het(nsteps = 2600)
sim <- netsim(est, param, init, control)
|
library(pact)
### Name: KfoldCV
### Title: Split a dataset into k parts for k-fold cross-validation
### Aliases: KfoldCV
### ** Examples
KfoldCV(15,3)
KfoldCV(15,15)
|
/data/genthat_extracted_code/pact/examples/KfoldCV.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 173 |
r
|
library(pact)
### Name: KfoldCV
### Title: Split a dataset into k parts for k-fold cross-validation
### Aliases: KfoldCV
### ** Examples
KfoldCV(15,3)
KfoldCV(15,15)
|
# R_code_variability.r
# Ghiacciaio del Similaun
# DAY 1
# librerie
require(raster)
require(RStoolbox)
# Set working directory
setwd("C:/lab/")
# Carichiamo sentinel.png
sent <- brick("sentinel.png")
# Plottaggio...
# NIR = 1, RED = 2, GREEN = 3
plotRGB(sent) # Stretch automatico lineare
#plotRGB(sent, r = 1, g = 2, b = 3, stretch = "lin")
plotRGB(sent, r = 2, g = 1, b = 3, stretch = "lin")
sent
# class : RasterBrick
# dimensions : 794, 798, 633612, 4 (nrow, ncol, ncell, nlayers)
# resolution : 1, 1 (x, y)
# extent : 0, 798, 0, 794 (xmin, xmax, ymin, ymax)
# crs : NA
# source : C:/lab/sentinel.png
# names : sentinel.1, sentinel.2, sentinel.3, sentinel.4
# min values : 0, 0, 0, 0
# max values : 255, 255, 255, 255
# Assegnamo le singole bande a variabili per richiamarle più easy...
nir <- sent$sentinel.1
red <- sent$sentinel.2
ndvi = (nir - red) / (nir + red)
plot(ndvi)
# Cambio palette...
cl <- colorRampPalette(c('black', 'white', 'red', 'magenta', 'green')) (200)
plot(ndvi, col = cl)
# Adesso calcoliamo la variabilità dell'immagine!
#funzione focal() per usare la moving window
ndvisd3 <- focal(ndvi, w = matrix(1/9, nrow = 3, ncol = 3), fun = sd)
# Cambiamo la palette...
clsd <- colorRampPalette(c('blue','green','pink','magenta','orange','brown','red','yellow')) (200)
plot(ndvisd3, col=clsd)
# calcoliamo la media della biomassa...
ndvimean3 <- focal(ndvi, w = matrix(1/9, nrow = 3, ncol = 3), fun = mean)
plot(ndvimean3, col = clsd)
# Cambiamo la grandezza della moving window...
# 13 x 13 Standard Deviation
ndvisd13 <- focal(ndvi, w = matrix(1/169, nrow = 13, ncol = 13), fun = sd)
plot(ndvisd13, col = clsd)
# 5 x 5 Standard Deviation
ndvisd5 <- focal(ndvi, w = matrix(1/25, nrow = 5, ncol = 5), fun = sd)
plot(ndvisd5, col = clsd)
# Facciamo l'analisi delle componenti principali
# PCA
sentpca <- rasterPCA(sent)
plot(sentpca$map)
# Per vedere quanta variabilità spiegano le singole componenti...
summary(sentpca$model)
# Importance of components:
# Comp.1 Comp.2 Comp.3
# Standard deviation 77.3362848 53.5145531 5.765599616
# Proportion of Variance 0.6736804 0.3225753 0.003744348 (Prop. di var. spiegata)
# Cumulative Proportion 0.6736804 0.9962557 1.000000000
# Comp.4
# Standard deviation 0
# Proportion of Variance 0
# Cumulative Proportion 1
# La prima PC spiega il 67.36% delle informazioni originali.
# DAY 2
# Dichiaro nuovamente le librerie e setto la working directory
library(ggplot2)
library(gridExtra)
library(viridis) # serve per i colori, per colorare i plot di ggplot in modo automatico!
# sent <- brick("sentinel.png")
# sentpca <- rasterPCA(sent)
# plot(sentpca)
# summary(sentpca)
# sentpca per vedere tutte le variabilità (vedi day 1)
# PC1 --> ha più info all'interno dell'immagine
# Funzione focal per passare la moving window e calcolare la deviazione standard (variabilità di tutti i dati originali) e la riportavamo sul valore centrale.
# Sposto la moving window e il processo riparte!
sentpca$map$PC1 #seleziono solo la PC1...
# Calcoliamo la variabilità sulla pc1
# Moving window 3 x 3
pc1sd3 <- focal(pc1, w = matrix(1/9, nrow = 3, ncol = 3), fun = sd)
clsd <- colorRampPalette(c('blue','green','pink','magenta','orange','brown','red','yellow')) (200)
plot(pc1sd3, col=clsd) # come cambiano i valori su una singola banda
# Moving window 5 x 5
pc1sd5 <- focal(pc1, w=matrix(1/25, nrow=5, ncol=5), fun=sd)
clsd <- colorRampPalette(c('blue','green','pink','magenta','orange','brown','red','yellow')) (200)
plot(pc1sd5, col=clsd)
# source() test!
source("source_test_lezione.r") # Per prendere e caricare un codice dall'esterno!
# Plottare i nostri dati tramite ggplot2
#ggplot() # mi crea una finestra vuota
ggolot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer))
ggplot() +
+ geom_raster(pc1sd3, mapping = aes(x = x, y = y, fill = layer))
# Usando viridis!
ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis()
ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis() +
ggtitle("Standard deviation of PC1 by viridis colour scale")
ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis(option = "magma") +
ggtitle("Standard deviation of PC1 by magma colour scale")
# grid arrange
# associamo ogni plottaggio ad un oggetto...
p1 <- ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis() +
ggtitle("Standard deviation of PC1 by viridis colour scale")
p2 <- ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis(option = "magma") +
ggtitle("Standard deviation of PC1 by magma colour scale")
p3 <- ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis(option = "turbo") +
ggtitle("Standard deviation of PC1 by turbo colour scale")
grid.arrange(p1, p2, p3, nrow = 1)
# ----- END -----
|
/R_code_variability.r
|
no_license
|
chiarasalv23/telerilevamento_2021
|
R
| false | false | 5,134 |
r
|
# R_code_variability.r
# Ghiacciaio del Similaun
# DAY 1
# librerie
require(raster)
require(RStoolbox)
# Set working directory
setwd("C:/lab/")
# Carichiamo sentinel.png
sent <- brick("sentinel.png")
# Plottaggio...
# NIR = 1, RED = 2, GREEN = 3
plotRGB(sent) # Stretch automatico lineare
#plotRGB(sent, r = 1, g = 2, b = 3, stretch = "lin")
plotRGB(sent, r = 2, g = 1, b = 3, stretch = "lin")
sent
# class : RasterBrick
# dimensions : 794, 798, 633612, 4 (nrow, ncol, ncell, nlayers)
# resolution : 1, 1 (x, y)
# extent : 0, 798, 0, 794 (xmin, xmax, ymin, ymax)
# crs : NA
# source : C:/lab/sentinel.png
# names : sentinel.1, sentinel.2, sentinel.3, sentinel.4
# min values : 0, 0, 0, 0
# max values : 255, 255, 255, 255
# Assegnamo le singole bande a variabili per richiamarle più easy...
nir <- sent$sentinel.1
red <- sent$sentinel.2
ndvi = (nir - red) / (nir + red)
plot(ndvi)
# Cambio palette...
cl <- colorRampPalette(c('black', 'white', 'red', 'magenta', 'green')) (200)
plot(ndvi, col = cl)
# Adesso calcoliamo la variabilità dell'immagine!
#funzione focal() per usare la moving window
ndvisd3 <- focal(ndvi, w = matrix(1/9, nrow = 3, ncol = 3), fun = sd)
# Cambiamo la palette...
clsd <- colorRampPalette(c('blue','green','pink','magenta','orange','brown','red','yellow')) (200)
plot(ndvisd3, col=clsd)
# calcoliamo la media della biomassa...
ndvimean3 <- focal(ndvi, w = matrix(1/9, nrow = 3, ncol = 3), fun = mean)
plot(ndvimean3, col = clsd)
# Cambiamo la grandezza della moving window...
# 13 x 13 Standard Deviation
ndvisd13 <- focal(ndvi, w = matrix(1/169, nrow = 13, ncol = 13), fun = sd)
plot(ndvisd13, col = clsd)
# 5 x 5 Standard Deviation
ndvisd5 <- focal(ndvi, w = matrix(1/25, nrow = 5, ncol = 5), fun = sd)
plot(ndvisd5, col = clsd)
# Facciamo l'analisi delle componenti principali
# PCA
sentpca <- rasterPCA(sent)
plot(sentpca$map)
# Per vedere quanta variabilità spiegano le singole componenti...
summary(sentpca$model)
# Importance of components:
# Comp.1 Comp.2 Comp.3
# Standard deviation 77.3362848 53.5145531 5.765599616
# Proportion of Variance 0.6736804 0.3225753 0.003744348 (Prop. di var. spiegata)
# Cumulative Proportion 0.6736804 0.9962557 1.000000000
# Comp.4
# Standard deviation 0
# Proportion of Variance 0
# Cumulative Proportion 1
# La prima PC spiega il 67.36% delle informazioni originali.
# DAY 2
# Dichiaro nuovamente le librerie e setto la working directory
library(ggplot2)
library(gridExtra)
library(viridis) # serve per i colori, per colorare i plot di ggplot in modo automatico!
# sent <- brick("sentinel.png")
# sentpca <- rasterPCA(sent)
# plot(sentpca)
# summary(sentpca)
# sentpca per vedere tutte le variabilità (vedi day 1)
# PC1 --> ha più info all'interno dell'immagine
# Funzione focal per passare la moving window e calcolare la deviazione standard (variabilità di tutti i dati originali) e la riportavamo sul valore centrale.
# Sposto la moving window e il processo riparte!
sentpca$map$PC1 #seleziono solo la PC1...
# Calcoliamo la variabilità sulla pc1
# Moving window 3 x 3
pc1sd3 <- focal(pc1, w = matrix(1/9, nrow = 3, ncol = 3), fun = sd)
clsd <- colorRampPalette(c('blue','green','pink','magenta','orange','brown','red','yellow')) (200)
plot(pc1sd3, col=clsd) # come cambiano i valori su una singola banda
# Moving window 5 x 5
pc1sd5 <- focal(pc1, w=matrix(1/25, nrow=5, ncol=5), fun=sd)
clsd <- colorRampPalette(c('blue','green','pink','magenta','orange','brown','red','yellow')) (200)
plot(pc1sd5, col=clsd)
# source() test!
source("source_test_lezione.r") # Per prendere e caricare un codice dall'esterno!
# Plottare i nostri dati tramite ggplot2
#ggplot() # mi crea una finestra vuota
ggolot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer))
ggplot() +
+ geom_raster(pc1sd3, mapping = aes(x = x, y = y, fill = layer))
# Usando viridis!
ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis()
ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis() +
ggtitle("Standard deviation of PC1 by viridis colour scale")
ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis(option = "magma") +
ggtitle("Standard deviation of PC1 by magma colour scale")
# grid arrange
# associamo ogni plottaggio ad un oggetto...
p1 <- ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis() +
ggtitle("Standard deviation of PC1 by viridis colour scale")
p2 <- ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis(option = "magma") +
ggtitle("Standard deviation of PC1 by magma colour scale")
p3 <- ggplot() +
geom_raster(pc1sd5, mapping = aes(x = x, y = y, fill = layer)) +
scale_fill_viridis(option = "turbo") +
ggtitle("Standard deviation of PC1 by turbo colour scale")
grid.arrange(p1, p2, p3, nrow = 1)
# ----- END -----
|
#' Cohort Status Trace Plot
#'
#' @param mapvizieR_obj conforming mapvizieR obj
#' @param studentids vector of studentids
#' @param measurementscale target subject
#' @param match_method do we limit to matched students, and if so, how?
#' no matching = any student record in the studentids.
#' UNIMPLEMENTED METHODS / TODO
#' strict = only kids who appear in all terms
#' strict after imputation = impute first, then use stritc method
#' back one = look back one test term, and only include kids who can be matched
#' @param first_and_spring_only show all terms, or only entry & spring?
#' default is TRUE.
#' @param entry_grade_seasons which grade_level_seasons are entry grades?
#' @param collapse_schools treats all students as part of the same 'school' for purposes of plotting, so that one trajectory is shown.
#' default is TRUE. if FALSE will separate lines by school and show a lengend.
#' @param retention_strategy
#' @param plot_labels c('RIT', 'NPR'). 'RIT' is default.
#'
#' @return a ggplot object
#' @export
#' @param mapvizieR_obj
#' @param studentids
#' @param measurementscale
#' @param match_method
#' @param first_and_spring_only
#' @param entry_grade_seasons
#' @param collapse_schools
#' @param plot_labels
#'
#' @return a ggplot object
#' @export
cohort_status_trace_plot <- function(
mapvizieR_obj,
studentids,
measurementscale,
match_method = 'no matching',
first_and_spring_only = TRUE,
entry_grade_seasons = c(-0.8, 4.2),
collapse_schools = TRUE,
retention_strategy = 'collapse',
small_n_cutoff = -1,
plot_labels = 'RIT'
) {
#opening parameter checks
valid_retention <- c('collapse', 'filter_small')
retention_strategy %>% ensurer::ensure_that(
. %in% valid_retention ~
paste0("retention_strategy should be either one of: ", paste(valid_retention, collapse = ', '))
)
#mv consistency checks
mv_opening_checks(mapvizieR_obj, studentids, 1)
#limit
this_cdf <- mv_limit_cdf(mapvizieR_obj, studentids, measurementscale)
#prep the internal cdf for summary(). zero out map_year_academic and termname to prevent retained students from showing
#as unique terms
if (retention_strategy == 'collapse') {
this_cdf <- cdf_collapse_by_grade(this_cdf)
}
#summary groups by school. if you want transfers in prior years to show as one unit, you want to collapse schools.
if (collapse_schools) {
this_cdf$schoolname <- table(this_cdf$schoolname) %>% sort(decreasing = TRUE) %>% names() %>% magrittr::extract(1)
}
#cdf summary
this_sum <- summary(this_cdf)
if (retention_strategy == 'filter_small') {
this_sum <- this_sum[this_sum$n_students >= small_n_cutoff * max(this_sum$n_students), ]
}
if(plot_labels == 'RIT') {
this_sum$label_text <- this_sum$mean_testritscore %>% round(1)
}
if(plot_labels == 'NPR') {
this_sum$label_text <- this_sum$cohort_status_npr %>% round(1)
}
p <- ggplot(
data = this_sum,
aes(
x = grade_level_season,
y = cohort_status_npr,
label = label_text,
color = schoolname
)
) +
geom_point() +
geom_line() +
geom_text()
p <- p +
theme_bw() +
theme(
panel.grid = element_blank()
) +
scale_y_continuous(
limits = c(0, 100),
breaks = seq(0, 100, 10)
) +
scale_x_continuous(
breaks = this_sum$grade_level_season %>% unique(),
labels = this_sum$grade_level_season %>% unique() %>%
lapply(fall_spring_me) %>% unlist(),
limits = c(
this_sum$grade_level_season %>% unique() %>% min() - .1,
this_sum$grade_level_season %>% unique() %>% max() + .1
)
)
p <- p +
labs(
x = 'Grade & Season',
y = 'Grade/Cohort Status Percentile'
)
if (collapse_schools) {
p <- p + theme(legend.position = 'none')
}
p
}
|
/R/cohort_status_trace_plot.R
|
no_license
|
charkins24/mapvizieR
|
R
| false | false | 3,816 |
r
|
#' Cohort Status Trace Plot
#'
#' @param mapvizieR_obj conforming mapvizieR obj
#' @param studentids vector of studentids
#' @param measurementscale target subject
#' @param match_method do we limit to matched students, and if so, how?
#' no matching = any student record in the studentids.
#' UNIMPLEMENTED METHODS / TODO
#' strict = only kids who appear in all terms
#' strict after imputation = impute first, then use stritc method
#' back one = look back one test term, and only include kids who can be matched
#' @param first_and_spring_only show all terms, or only entry & spring?
#' default is TRUE.
#' @param entry_grade_seasons which grade_level_seasons are entry grades?
#' @param collapse_schools treats all students as part of the same 'school' for purposes of plotting, so that one trajectory is shown.
#' default is TRUE. if FALSE will separate lines by school and show a lengend.
#' @param retention_strategy
#' @param plot_labels c('RIT', 'NPR'). 'RIT' is default.
#'
#' @return a ggplot object
#' @export
#' @param mapvizieR_obj
#' @param studentids
#' @param measurementscale
#' @param match_method
#' @param first_and_spring_only
#' @param entry_grade_seasons
#' @param collapse_schools
#' @param plot_labels
#'
#' @return a ggplot object
#' @export
cohort_status_trace_plot <- function(
mapvizieR_obj,
studentids,
measurementscale,
match_method = 'no matching',
first_and_spring_only = TRUE,
entry_grade_seasons = c(-0.8, 4.2),
collapse_schools = TRUE,
retention_strategy = 'collapse',
small_n_cutoff = -1,
plot_labels = 'RIT'
) {
#opening parameter checks
valid_retention <- c('collapse', 'filter_small')
retention_strategy %>% ensurer::ensure_that(
. %in% valid_retention ~
paste0("retention_strategy should be either one of: ", paste(valid_retention, collapse = ', '))
)
#mv consistency checks
mv_opening_checks(mapvizieR_obj, studentids, 1)
#limit
this_cdf <- mv_limit_cdf(mapvizieR_obj, studentids, measurementscale)
#prep the internal cdf for summary(). zero out map_year_academic and termname to prevent retained students from showing
#as unique terms
if (retention_strategy == 'collapse') {
this_cdf <- cdf_collapse_by_grade(this_cdf)
}
#summary groups by school. if you want transfers in prior years to show as one unit, you want to collapse schools.
if (collapse_schools) {
this_cdf$schoolname <- table(this_cdf$schoolname) %>% sort(decreasing = TRUE) %>% names() %>% magrittr::extract(1)
}
#cdf summary
this_sum <- summary(this_cdf)
if (retention_strategy == 'filter_small') {
this_sum <- this_sum[this_sum$n_students >= small_n_cutoff * max(this_sum$n_students), ]
}
if(plot_labels == 'RIT') {
this_sum$label_text <- this_sum$mean_testritscore %>% round(1)
}
if(plot_labels == 'NPR') {
this_sum$label_text <- this_sum$cohort_status_npr %>% round(1)
}
p <- ggplot(
data = this_sum,
aes(
x = grade_level_season,
y = cohort_status_npr,
label = label_text,
color = schoolname
)
) +
geom_point() +
geom_line() +
geom_text()
p <- p +
theme_bw() +
theme(
panel.grid = element_blank()
) +
scale_y_continuous(
limits = c(0, 100),
breaks = seq(0, 100, 10)
) +
scale_x_continuous(
breaks = this_sum$grade_level_season %>% unique(),
labels = this_sum$grade_level_season %>% unique() %>%
lapply(fall_spring_me) %>% unlist(),
limits = c(
this_sum$grade_level_season %>% unique() %>% min() - .1,
this_sum$grade_level_season %>% unique() %>% max() + .1
)
)
p <- p +
labs(
x = 'Grade & Season',
y = 'Grade/Cohort Status Percentile'
)
if (collapse_schools) {
p <- p + theme(legend.position = 'none')
}
p
}
|
# 데이터 불러오기
raw_welfare <- read.spss(file = 'Koweps_hpc10_2015_beta1.sav', to.data.frame = T, reencode='utf-8')
# 복사본 만들기
Welfare <- raw_welfare
# 데이터 검토하기
head(Welfare)
tail(Welfare)
View(Welfare)
dim(Welfare)
str(Welfare)
summary(Welfare)
# 변수명 바꾸기
Welfare <- rename(Welfare,
sex = h10_g3,
birth = h10_g4,
marriage = h10_g10,
religion = h10_g11,
income = p1002_8aq1,
code_job = h10_eco9,
code_region = h10_reg7)
# 성변 변수 검토 및 전처리
class(Welfare$sex)
table(Welfare$sex)
# 이상치 확인
table(Welfare$sex)
# 이상치 결측 처리
Welfare$sex <- ifelse(Welfare$sex == 9, NA, Welfare$sex) #모른다고 답하거나 응답하지 않았을 경우는 9로 코딩되어 있음
# 결측치 확인
table(is.na(Welfare$sex))
# 성별 항목 이름 부여
Welfare$sex <- ifelse(Welfare$sex == 1, "male", "female")
table(Welfare$sex)
qplot(Welfare$sex)
# 월급 변수 검토 및 전처리
class(Welfare$income)
summary(Welfare$income)
qplot(Welfare$income)
qplot(Welfare$income) + xlim(0,1000) # 0~1000까지만 표현되게 설정
# 이상치 확인
summary(Welfare$income)
# 이상치 결측 확인
Welfare$income <- ifelse(Welfare$income %in% c(0, 9999), NA, Welfare$income)
# 결측치 확인
table(is.na(Welfare$income))
# 성별에 따른 월급 차이 분석하기
sex_income <- Welfare %>%
filter(!is.na(income)) %>%
summarise(mean_income = mean(income))
sex_income
# 그래프 만들기
ggplot(data = sex_income, aes(x = sex, y = mean_income)) + geom_col()
|
/09-2. 성별에 따른 월급 차이.R
|
no_license
|
xoyeon/Doit_R
|
R
| false | false | 1,745 |
r
|
# 데이터 불러오기
raw_welfare <- read.spss(file = 'Koweps_hpc10_2015_beta1.sav', to.data.frame = T, reencode='utf-8')
# 복사본 만들기
Welfare <- raw_welfare
# 데이터 검토하기
head(Welfare)
tail(Welfare)
View(Welfare)
dim(Welfare)
str(Welfare)
summary(Welfare)
# 변수명 바꾸기
Welfare <- rename(Welfare,
sex = h10_g3,
birth = h10_g4,
marriage = h10_g10,
religion = h10_g11,
income = p1002_8aq1,
code_job = h10_eco9,
code_region = h10_reg7)
# 성변 변수 검토 및 전처리
class(Welfare$sex)
table(Welfare$sex)
# 이상치 확인
table(Welfare$sex)
# 이상치 결측 처리
Welfare$sex <- ifelse(Welfare$sex == 9, NA, Welfare$sex) #모른다고 답하거나 응답하지 않았을 경우는 9로 코딩되어 있음
# 결측치 확인
table(is.na(Welfare$sex))
# 성별 항목 이름 부여
Welfare$sex <- ifelse(Welfare$sex == 1, "male", "female")
table(Welfare$sex)
qplot(Welfare$sex)
# 월급 변수 검토 및 전처리
class(Welfare$income)
summary(Welfare$income)
qplot(Welfare$income)
qplot(Welfare$income) + xlim(0,1000) # 0~1000까지만 표현되게 설정
# 이상치 확인
summary(Welfare$income)
# 이상치 결측 확인
Welfare$income <- ifelse(Welfare$income %in% c(0, 9999), NA, Welfare$income)
# 결측치 확인
table(is.na(Welfare$income))
# 성별에 따른 월급 차이 분석하기
sex_income <- Welfare %>%
filter(!is.na(income)) %>%
summarise(mean_income = mean(income))
sex_income
# 그래프 만들기
ggplot(data = sex_income, aes(x = sex, y = mean_income)) + geom_col()
|
# Just plotting 5 examples that we have now for the various models.
library(smcsmcTools)
library(data.table)
library(dplyr)
library(ggplot2)
library(GGally)
scenarios = c("backward", "forward", "bidirectional")#, "backward")#, "bidirectional", "realistic")
mplots <- list()
neplots <- list()
i <- 1
migs = c("0.0", "0.1", "0.3", "0.5", "0.7", "0.9")
situations = c(0,2,4)
midpoints <- c("40000", "50000", "60000", "70000")
g = 29
matrices <- list()
m <- list()
ne = list()
j = 1
mat <- matrix(, nrow = length(midpoints), ncol = length(migs))
emat <- matrix(, nrow = length(midpoints), ncol = length(migs))
for (sit in situations){
i = 1
plots <- list()
eplots <- list()
for (s in scenarios){
j = 1
for(mid in midpoints){
k = 1
for(mig in migs){
emat[j, k] <- avg_migr( file = paste0("~/repos/dirmig/data/spvaryingmig/", s, "_", mid, "_10000_", mig, "_", sit, ".out"), ancient = 100000, modern = 0, g = 29)$integrated[1]
mat[j,k] <- avg_migr( file = paste0("~/repos/dirmig/data/spvaryingmig/", s, "_", mid, "_10000_", mig, "_", sit, ".out"), ancient = 100000, modern = 0, g = 29)$integrated[2]
k = k+1
}
j = j + 1
}
rownames(mat) <- midpoints
colnames(mat) <- factor(round(1-exp(-as.numeric(migs)), 3))
rownames(emat) <- midpoints
colnames(emat) <- factor(round(1-exp(-as.numeric(migs)), 3))
df <- reshape2::melt(t(mat)) %>% as.data.frame() %>% mutate(Var1 = as.character(Var1))
df2 <- reshape2::melt(t(emat)) %>% as.data.frame() %>% mutate(Var1 = as.character(Var1))
plots[[i]] <- ggplot(df, aes(x = factor(Var1), y = Var2, fill = value)) +
geom_tile() +
geom_text(aes(label=round(value, 3)), color="white") +
xlab("Amount of Migration \n(Proportion Replaced per Generation)") +
ylab("Time of Migration \n(years before present)") +
scale_fill_gradient(limits = c(0, 0.6)) +
theme_bw() +
theme(legend.position = "none",
panel.border = element_blank(),
panel.grid = element_blank(),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15))
eplots[[i]] <- ggplot(df2, aes(x = factor(Var1), y = Var2, fill = value)) +
geom_tile() +
geom_text(aes(label=round(value, 3)), color="white") +
xlab("Amount of Migration \n(Proportion Replaced per Generation)") +
ylab("Time of Migration \n(years before present)") +
scale_fill_gradient(limits = c(0, 0.6)) +
theme_bw() +
theme(legend.position = "none",
panel.border = element_blank(),
panel.grid = element_blank(),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15))
i = i + 1
}
p <- ggmatrix(c(plots, eplots), nrow=3, ncol = 2, byrow = F, xlab = "Amount of Migration (Proportion Replaced per Generation)", ylab = "Time of Migration (years before present)", yAxisLabels = c("Backwards", "Forwards", "Bidirectional"), xAxisLabels = c("Inferred Backwards Migration", "Inferred Forwards Migration")) + theme(axis.title.x = element_text(size = 12), axis.title.y = element_text(size = 12))
ggsave(p, file = paste0("~/repos/dirmig/plot/sims/recovered_migration_", sit, ".pdf"), height = 4, width = 10, units = "in")
}
|
/r/all_mig_simulation_heatmaps.R
|
no_license
|
Chris1221/ancient_african_admixture
|
R
| false | false | 3,337 |
r
|
# Just plotting 5 examples that we have now for the various models.
library(smcsmcTools)
library(data.table)
library(dplyr)
library(ggplot2)
library(GGally)
scenarios = c("backward", "forward", "bidirectional")#, "backward")#, "bidirectional", "realistic")
mplots <- list()
neplots <- list()
i <- 1
migs = c("0.0", "0.1", "0.3", "0.5", "0.7", "0.9")
situations = c(0,2,4)
midpoints <- c("40000", "50000", "60000", "70000")
g = 29
matrices <- list()
m <- list()
ne = list()
j = 1
mat <- matrix(, nrow = length(midpoints), ncol = length(migs))
emat <- matrix(, nrow = length(midpoints), ncol = length(migs))
for (sit in situations){
i = 1
plots <- list()
eplots <- list()
for (s in scenarios){
j = 1
for(mid in midpoints){
k = 1
for(mig in migs){
emat[j, k] <- avg_migr( file = paste0("~/repos/dirmig/data/spvaryingmig/", s, "_", mid, "_10000_", mig, "_", sit, ".out"), ancient = 100000, modern = 0, g = 29)$integrated[1]
mat[j,k] <- avg_migr( file = paste0("~/repos/dirmig/data/spvaryingmig/", s, "_", mid, "_10000_", mig, "_", sit, ".out"), ancient = 100000, modern = 0, g = 29)$integrated[2]
k = k+1
}
j = j + 1
}
rownames(mat) <- midpoints
colnames(mat) <- factor(round(1-exp(-as.numeric(migs)), 3))
rownames(emat) <- midpoints
colnames(emat) <- factor(round(1-exp(-as.numeric(migs)), 3))
df <- reshape2::melt(t(mat)) %>% as.data.frame() %>% mutate(Var1 = as.character(Var1))
df2 <- reshape2::melt(t(emat)) %>% as.data.frame() %>% mutate(Var1 = as.character(Var1))
plots[[i]] <- ggplot(df, aes(x = factor(Var1), y = Var2, fill = value)) +
geom_tile() +
geom_text(aes(label=round(value, 3)), color="white") +
xlab("Amount of Migration \n(Proportion Replaced per Generation)") +
ylab("Time of Migration \n(years before present)") +
scale_fill_gradient(limits = c(0, 0.6)) +
theme_bw() +
theme(legend.position = "none",
panel.border = element_blank(),
panel.grid = element_blank(),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15))
eplots[[i]] <- ggplot(df2, aes(x = factor(Var1), y = Var2, fill = value)) +
geom_tile() +
geom_text(aes(label=round(value, 3)), color="white") +
xlab("Amount of Migration \n(Proportion Replaced per Generation)") +
ylab("Time of Migration \n(years before present)") +
scale_fill_gradient(limits = c(0, 0.6)) +
theme_bw() +
theme(legend.position = "none",
panel.border = element_blank(),
panel.grid = element_blank(),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15))
i = i + 1
}
p <- ggmatrix(c(plots, eplots), nrow=3, ncol = 2, byrow = F, xlab = "Amount of Migration (Proportion Replaced per Generation)", ylab = "Time of Migration (years before present)", yAxisLabels = c("Backwards", "Forwards", "Bidirectional"), xAxisLabels = c("Inferred Backwards Migration", "Inferred Forwards Migration")) + theme(axis.title.x = element_text(size = 12), axis.title.y = element_text(size = 12))
ggsave(p, file = paste0("~/repos/dirmig/plot/sims/recovered_migration_", sit, ".pdf"), height = 4, width = 10, units = "in")
}
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453965e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615845066-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 736 |
r
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453965e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{sentiment_lexicon_neg_en}
\alias{sentiment_lexicon_neg_en}
\title{Negative Lexicon (en)}
\format{A Character vector}
\source{
http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html#datasets
}
\description{
Negative Lexicon (en)
}
\seealso{
Other example datasets: \code{\link{names_female_en}},
\code{\link{names_female_es}},
\code{\link{names_male_en}}, \code{\link{names_male_es}},
\code{\link{senate_tweets}},
\code{\link{senators_profile}}, \code{\link{senators}},
\code{\link{sentiment_lexicon_pos_en}},
\code{\link{warriner_et_al_en}},
\code{\link{warriner_et_al_es}}
Other lexicon datasets: \code{\link{sentiment_lexicon_pos_en}},
\code{\link{warriner_et_al_en}},
\code{\link{warriner_et_al_es}}
}
\concept{example datasets}
\concept{lexicon datasets}
|
/man/sentiment_lexicon_neg_en.Rd
|
no_license
|
fentonmartin/twitterreport
|
R
| false | true | 870 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{sentiment_lexicon_neg_en}
\alias{sentiment_lexicon_neg_en}
\title{Negative Lexicon (en)}
\format{A Character vector}
\source{
http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html#datasets
}
\description{
Negative Lexicon (en)
}
\seealso{
Other example datasets: \code{\link{names_female_en}},
\code{\link{names_female_es}},
\code{\link{names_male_en}}, \code{\link{names_male_es}},
\code{\link{senate_tweets}},
\code{\link{senators_profile}}, \code{\link{senators}},
\code{\link{sentiment_lexicon_pos_en}},
\code{\link{warriner_et_al_en}},
\code{\link{warriner_et_al_es}}
Other lexicon datasets: \code{\link{sentiment_lexicon_pos_en}},
\code{\link{warriner_et_al_en}},
\code{\link{warriner_et_al_es}}
}
\concept{example datasets}
\concept{lexicon datasets}
|
\name{nonparadom}
\alias{nonparadom}
\title{nonparadom}
\description{Tests for nonparallel dominance, a form of asymmetry in
predictability, between i to j and k to L (Wampold, 1984, 1989, 1992, 1995).}
\usage{
nonparadom(data, i, j, k, L, labels = NULL, lag = 1, adjacent = TRUE,
tailed = 1, permtest = FALSE, nperms = 10)
}
\arguments{
\item{data}{
\code{}A one-column dataframe, or a vector of code sequences, or a square
frequency transition matrix. If data is not a frequency transition matrix,
then data must be either (a) a series of string (non-numeric) code values,
or (b) a series of integer codes with values ranging from "1" to what
ever value the user specifies in the "ncodes" argument. There should be no
code values with zero frequencies. Missing values are not permitted.
}
\item{i}{
\code{}Code value for i.
}
\item{j}{
\code{}Code value for j.
}
\item{k}{
\code{}Code value for k.
}
\item{L}{
\code{}Code value for L.
}
\item{labels}{
\code{}Optional argument for providing labels to the code values. Accepts a
list of string variables. If
unspecified, codes will be labeled "Code1", "Code2", etc.
}
\item{lag}{
\code{}The lag number for the analyses.
}
\item{adjacent}{
\code{}Can adjacent values be coded the same? Options are "TRUE" for yes or "FALSE" for no.
}
\item{tailed}{
\code{}Specify whether significance tests are one-tailed or two-tailed. Options are "1" or "2".
}
\item{permtest}{
\code{}Do you want to run permutation tests of significance?
Options are "FALSE" for no, or "TRUE" for yes. Warning: these computations can be time consuming.
}
\item{nperms}{
\code{}The number of permutations per block.
}
}
\details{
Tests for nonparallel dominance or asymmetry in predictability, which is the
difference in predictability between i to j and k to L, as described by
Wampold (1984, 1989, 1992, 1995).
Parallel dominance (another function in this package) is the difference in
predictability between i to j and j to i.
In parallel dominance the i and j values across the two pairs of codes
are the same. In nonparallel dominance, the i and j values across the
two pairs of codes may vary, i.e., they do not have to be the same.
}
\value{Displays the transitional frequency matrix, expected frequencies,
expected and observed nonparallel dominance frequencies, kappas,
the z values for the kappas, and the significance levels.
Returns a list with the following elements:
\item{freqs}{The transitional frequency matrix}
\item{expfreqs}{The expected frequencies}
\item{npdomfreqs}{The nonparallel dominance frequencies}
\item{expnpdomfreqs}{The expected nonparallel dominance frequencies}
\item{domtypes}{There are 4 sequential dominance case types
described by Wampold (1989). These cases describe the direction
of the effect for \emph{i} on \emph{j} and \emph{j} on \emph{i}.
The four cases are: (1) \emph{i} increases \emph{j}, and \emph{j}
increases \emph{i}, (2) \emph{i} decreases \emph{j}, and \emph{j}
decreases \emph{i}, (3) \emph{i} increases \emph{j}, and \emph{j}
decreases \emph{i}, and (4) \emph{i} decreases \emph{j}, and \emph{j}
increases \emph{i}. Each cell of this matrix indicates the case that
applies to the transition indicated by the cell.}
\item{kappas}{The nonparallel dominance kappas}
\item{z}{The z values for the kappas}
\item{pk}{The p-values for the kappas}
}
\references{
{O'Connor, B. P. (1999). Simple and flexible SAS and SPSS programs for analyzing
lag-sequential categorical data. \emph{Behavior Research Methods,
Instrumentation, and Computers, 31,} 718-726.}
\cr\cr {Wampold, B. E., & Margolin, G. (1982). Nonparametric strategies to test
the independence of behavioral states in sequential data. \emph{Psychological
Bulletin, 92,} 755-765.}
\cr\cr {Wampold, B. E. (1984). Tests of dominance in sequential categorical data.
\emph{Psychological Bulletin, 96,} 424-429.}
\cr\cr {Wampold, B. E. (1989). Kappa as a measure of pattern in sequential data.
\emph{Quality & Quantity, 23,} 171-187.}
\cr\cr {Wampold, B. E. (1992). The intensive examination of social interactions.
In T. Kratochwill & J. Levin (Eds.), \emph{Single-case research design and
analysis: New directions for psychology and education} (pp. 93-131).
Hillsdale, NJ: Erlbaum.}
\cr\cr {Wampold, B. E. (1995). Analysis of behavior sequences in psychotherapy.
In J. Siegfried (Ed.), \emph{Therapeutic and everyday discourse as behavior
change: Towards a micro-analysis in psychotherapy process research}
(pp. 189-214). Norwood, NJ: Ablex.}
}
\author{Zakary A. Draper & Brian P. O'Connor}
\examples{
nonparadom(data_Wampold_1984, i = 6, j = 1, k = 3, L = 4,
labels = c('HPos','HNeu','HNeg','WPos','WNeu','WNeg'),
permtest = TRUE, nperms = 1000)
}
\keyword{ Sequential Analysis }
|
/man/nonparadom.Rd
|
no_license
|
cran/LagSequential
|
R
| false | false | 4,928 |
rd
|
\name{nonparadom}
\alias{nonparadom}
\title{nonparadom}
\description{Tests for nonparallel dominance, a form of asymmetry in
predictability, between i to j and k to L (Wampold, 1984, 1989, 1992, 1995).}
\usage{
nonparadom(data, i, j, k, L, labels = NULL, lag = 1, adjacent = TRUE,
tailed = 1, permtest = FALSE, nperms = 10)
}
\arguments{
\item{data}{
\code{}A one-column dataframe, or a vector of code sequences, or a square
frequency transition matrix. If data is not a frequency transition matrix,
then data must be either (a) a series of string (non-numeric) code values,
or (b) a series of integer codes with values ranging from "1" to what
ever value the user specifies in the "ncodes" argument. There should be no
code values with zero frequencies. Missing values are not permitted.
}
\item{i}{
\code{}Code value for i.
}
\item{j}{
\code{}Code value for j.
}
\item{k}{
\code{}Code value for k.
}
\item{L}{
\code{}Code value for L.
}
\item{labels}{
\code{}Optional argument for providing labels to the code values. Accepts a
list of string variables. If
unspecified, codes will be labeled "Code1", "Code2", etc.
}
\item{lag}{
\code{}The lag number for the analyses.
}
\item{adjacent}{
\code{}Can adjacent values be coded the same? Options are "TRUE" for yes or "FALSE" for no.
}
\item{tailed}{
\code{}Specify whether significance tests are one-tailed or two-tailed. Options are "1" or "2".
}
\item{permtest}{
\code{}Do you want to run permutation tests of significance?
Options are "FALSE" for no, or "TRUE" for yes. Warning: these computations can be time consuming.
}
\item{nperms}{
\code{}The number of permutations per block.
}
}
\details{
Tests for nonparallel dominance or asymmetry in predictability, which is the
difference in predictability between i to j and k to L, as described by
Wampold (1984, 1989, 1992, 1995).
Parallel dominance (another function in this package) is the difference in
predictability between i to j and j to i.
In parallel dominance the i and j values across the two pairs of codes
are the same. In nonparallel dominance, the i and j values across the
two pairs of codes may vary, i.e., they do not have to be the same.
}
\value{Displays the transitional frequency matrix, expected frequencies,
expected and observed nonparallel dominance frequencies, kappas,
the z values for the kappas, and the significance levels.
Returns a list with the following elements:
\item{freqs}{The transitional frequency matrix}
\item{expfreqs}{The expected frequencies}
\item{npdomfreqs}{The nonparallel dominance frequencies}
\item{expnpdomfreqs}{The expected nonparallel dominance frequencies}
\item{domtypes}{There are 4 sequential dominance case types
described by Wampold (1989). These cases describe the direction
of the effect for \emph{i} on \emph{j} and \emph{j} on \emph{i}.
The four cases are: (1) \emph{i} increases \emph{j}, and \emph{j}
increases \emph{i}, (2) \emph{i} decreases \emph{j}, and \emph{j}
decreases \emph{i}, (3) \emph{i} increases \emph{j}, and \emph{j}
decreases \emph{i}, and (4) \emph{i} decreases \emph{j}, and \emph{j}
increases \emph{i}. Each cell of this matrix indicates the case that
applies to the transition indicated by the cell.}
\item{kappas}{The nonparallel dominance kappas}
\item{z}{The z values for the kappas}
\item{pk}{The p-values for the kappas}
}
\references{
{O'Connor, B. P. (1999). Simple and flexible SAS and SPSS programs for analyzing
lag-sequential categorical data. \emph{Behavior Research Methods,
Instrumentation, and Computers, 31,} 718-726.}
\cr\cr {Wampold, B. E., & Margolin, G. (1982). Nonparametric strategies to test
the independence of behavioral states in sequential data. \emph{Psychological
Bulletin, 92,} 755-765.}
\cr\cr {Wampold, B. E. (1984). Tests of dominance in sequential categorical data.
\emph{Psychological Bulletin, 96,} 424-429.}
\cr\cr {Wampold, B. E. (1989). Kappa as a measure of pattern in sequential data.
\emph{Quality & Quantity, 23,} 171-187.}
\cr\cr {Wampold, B. E. (1992). The intensive examination of social interactions.
In T. Kratochwill & J. Levin (Eds.), \emph{Single-case research design and
analysis: New directions for psychology and education} (pp. 93-131).
Hillsdale, NJ: Erlbaum.}
\cr\cr {Wampold, B. E. (1995). Analysis of behavior sequences in psychotherapy.
In J. Siegfried (Ed.), \emph{Therapeutic and everyday discourse as behavior
change: Towards a micro-analysis in psychotherapy process research}
(pp. 189-214). Norwood, NJ: Ablex.}
}
\author{Zakary A. Draper & Brian P. O'Connor}
\examples{
nonparadom(data_Wampold_1984, i = 6, j = 1, k = 3, L = 4,
labels = c('HPos','HNeu','HNeg','WPos','WNeu','WNeg'),
permtest = TRUE, nperms = 1000)
}
\keyword{ Sequential Analysis }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grid-utils.R
\name{get_grids}
\alias{get_grids}
\title{Import multiple grid data sets}
\usage{
get_grids(file_paths, extent = extent(-180, 180, -90, 90), shp = NULL)
}
\arguments{
\item{file_paths}{Full file paths to grid data.}
\item{extent}{The extent of the grid file that should be loaded.
See \code{\link[raster]{crop}}.}
\item{shp}{If a (polygon) shp file provided, it will be used to
mask everything outside the specified polygons (can be slow,
depending on \code{shp} object. See \code{\link[raster]{mask}}.}
}
\description{
Import multiple grid data sets
}
|
/tcruziutils/man/get_grids.Rd
|
permissive
|
jgjuarez/chagas-vector-sdm
|
R
| false | true | 646 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grid-utils.R
\name{get_grids}
\alias{get_grids}
\title{Import multiple grid data sets}
\usage{
get_grids(file_paths, extent = extent(-180, 180, -90, 90), shp = NULL)
}
\arguments{
\item{file_paths}{Full file paths to grid data.}
\item{extent}{The extent of the grid file that should be loaded.
See \code{\link[raster]{crop}}.}
\item{shp}{If a (polygon) shp file provided, it will be used to
mask everything outside the specified polygons (can be slow,
depending on \code{shp} object. See \code{\link[raster]{mask}}.}
}
\description{
Import multiple grid data sets
}
|
###JAGS script
###running the Correlated Chain-Ladder (CCL) model
#estimate parameters alpha and beta
#predicting logloss, mu[i] and logloss[i]
nburn=10000
modelString = "model {
mu[1]<-alpha[w[1]]+beta[d[1]]
logloss[1]~dnorm(mu[1],1/sig2[1])
for (i in 2:length(w)){
mu[i]<-alpha[w[i]]+beta[d[i]]+rho*(logloss[i-1]-mu[i-1])*wne1[i]
logloss[i]~dnorm(mu[i],1/sig2[i])
}
### set up sig2
for (i in 1:length(w)){
sig2[i]<-sigd2[d[i]]
}
for (j in 1:10){
sigd2[j]<-sum(a[j:10])
}
for (k in 1:10){
a[k]~dunif(0.000001,1)
}
#
# specify priors
#
for (i in 1:numlev){
alpha[i]~dnorm(log(premium[i])+logelr,.1)
}
logelr~dnorm(logelr_mean,logelr_sig)
logelr_mean ~ dnorm(0,1)
logelr_sig ~ dexp(1)
#
for (i in 1:9){
beta[i]~dnorm(beta_mu[i],beta_sig[i])
beta_mu[i] ~ dnorm(0,1)
beta_sig[i] ~ dexp(1)
}
beta[10]<-0
rho~dunif(-1,1)
# rho~dunif(-.00001,.00001) # Use for LCL model
}"
###Initialize JAGS model
inits1=list(.RNG.name= "base::Wichmann-Hill",
.RNG.seed= 12341)
inits2=list(.RNG.name= "base::Marsaglia-Multicarry",
.RNG.seed= 12342)
inits3=list(.RNG.name= "base::Super-Duper",
.RNG.seed= 12343)
inits4=list(.RNG.name= "base::Mersenne-Twister",
.RNG.seed= 12344)
data.for.jags=list(premium= premium[1:10],
logloss = log(rloss),
numlev = numw,
w = rdata$w,
wne1 = rdata$wne1,
d = rdata$d)
###run the model
nthin=2
maxpsrf=2
while (maxpsrf>1.05){
nthin=nthin*2
print(paste("nthin =",nthin))
jagout=run.jags(model=modelString,monitor=c("alpha","beta[1:9]","sigd2","rho"),
data=data.for.jags,n.chains=4,method="parallel",
inits=list(inits1,inits2,inits3,inits4),thin=nthin,silent.jags=F,
plots=TRUE,burnin=nburn,sample=2500,psrf.target=1.05)
gelman=gelman.diag(jagout)
maxpsrf=max(gelman$psrf[,1])
print(paste("maxpsrf =",maxpsrf))
}
|
/scripts/misc_models/stan3.r
|
no_license
|
blakeshurtz/actuarialsci
|
R
| false | false | 1,959 |
r
|
###JAGS script
###running the Correlated Chain-Ladder (CCL) model
#estimate parameters alpha and beta
#predicting logloss, mu[i] and logloss[i]
nburn=10000
modelString = "model {
mu[1]<-alpha[w[1]]+beta[d[1]]
logloss[1]~dnorm(mu[1],1/sig2[1])
for (i in 2:length(w)){
mu[i]<-alpha[w[i]]+beta[d[i]]+rho*(logloss[i-1]-mu[i-1])*wne1[i]
logloss[i]~dnorm(mu[i],1/sig2[i])
}
### set up sig2
for (i in 1:length(w)){
sig2[i]<-sigd2[d[i]]
}
for (j in 1:10){
sigd2[j]<-sum(a[j:10])
}
for (k in 1:10){
a[k]~dunif(0.000001,1)
}
#
# specify priors
#
for (i in 1:numlev){
alpha[i]~dnorm(log(premium[i])+logelr,.1)
}
logelr~dnorm(logelr_mean,logelr_sig)
logelr_mean ~ dnorm(0,1)
logelr_sig ~ dexp(1)
#
for (i in 1:9){
beta[i]~dnorm(beta_mu[i],beta_sig[i])
beta_mu[i] ~ dnorm(0,1)
beta_sig[i] ~ dexp(1)
}
beta[10]<-0
rho~dunif(-1,1)
# rho~dunif(-.00001,.00001) # Use for LCL model
}"
###Initialize JAGS model
inits1=list(.RNG.name= "base::Wichmann-Hill",
.RNG.seed= 12341)
inits2=list(.RNG.name= "base::Marsaglia-Multicarry",
.RNG.seed= 12342)
inits3=list(.RNG.name= "base::Super-Duper",
.RNG.seed= 12343)
inits4=list(.RNG.name= "base::Mersenne-Twister",
.RNG.seed= 12344)
data.for.jags=list(premium= premium[1:10],
logloss = log(rloss),
numlev = numw,
w = rdata$w,
wne1 = rdata$wne1,
d = rdata$d)
###run the model
nthin=2
maxpsrf=2
while (maxpsrf>1.05){
nthin=nthin*2
print(paste("nthin =",nthin))
jagout=run.jags(model=modelString,monitor=c("alpha","beta[1:9]","sigd2","rho"),
data=data.for.jags,n.chains=4,method="parallel",
inits=list(inits1,inits2,inits3,inits4),thin=nthin,silent.jags=F,
plots=TRUE,burnin=nburn,sample=2500,psrf.target=1.05)
gelman=gelman.diag(jagout)
maxpsrf=max(gelman$psrf[,1])
print(paste("maxpsrf =",maxpsrf))
}
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/large_intestine.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.15,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/large_intestine/large_intestine_030.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/large_intestine/large_intestine_030.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 379 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/large_intestine.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.15,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/large_intestine/large_intestine_030.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# TS Difference
uva.duke.l <- uva.xplant$Liver - duke.xplant$Liver
# TS Plot
png("./ts_diff_uva_duke_l.png", width=900, height=900)
par(mfrow=c(1,1), ps=20)
plot(uva.xplant$Year, uva.duke.l, col = "blue", type = "l",
xlab = "Time",
ylab = "UVa - Duke",
main = "Difference between Kidney Transplants at UVA and Duke")
abline(0,0)
dev.off()
# LM Model
uva.duke.l.lm <- lm(uva.duke.l ~ r11.donor$Liver)
summary(uva.duke.l.lm)
# LM Model Diagnostics
png("./lm_diag_diff_uva_duke_l.png", width=900, height=900)
par(mfrow = c(2,2),ps=20)
plot(uva.duke.l.lm)
par(mfrow = c(1,1))
dev.off()
# ACF/PACF of Residuals
png("./acf_diff_uva_duke_l.png", width=900, height=900)
par(mfcol = c(1,2),ps = 20)
acf(uva.duke.l.lm$residuals)
pacf(uva.duke.l.lm$residuals)
par(mfcol = c(1,1))
dev.off()
# AR Suggestion
(uva.duke.l.ar <- ar(uva.duke.l.lm$residuals))
# AIC for Different AR Lags
png("./aic_ar_diff_uva_duke_l.png", width=900, height=900)
par(mfcol = c(1,1),ps = 20)
plot(uva.duke.l.ar$aic, type = "h")
dev.off()
# Adding the time series model
# AR(1)
uva.duke.l.lm.e1 <- uva.duke.l.lm$resid[1:24] # new var..1 AR term put resid into AR
r11k <- r11.donor$Liver[2:25] # need lag of use 1-24 to pred 2-25
uva.duke.l.ar1 <- uva.xplant$Liver[2:25] - duke.xplant$Liver[2:25] # lag from 1-24 to pred 2-25 diff
uva.duke.l.dm <- data.frame(uva.duke.l.ar1, r11k, uva.duke.l.lm.e1)
summary(uva.duke.l.dm)
# Linear model with time series component
uva.duke.l.lm2<- lm(uva.duke.l.ar1 ~ ., data = uva.duke.l.dm)
summary(uva.duke.l.lm2)
AIC(uva.duke.l.lm2)
lm.fitted <- fitted(uva.duke.l.lm2)
lm.resid <- residuals(uva.duke.l.lm2)
lm.model <- model.matrix(uva.duke.l.lm2)
lm.boot <- RTSB(uva.duke.l.ar1, r11k, lm.fitted, lm.resid, lm.model, 5000)
lm.boot
boot.ci(lm.boot,0.95,type=c('bca','perc'),index=1)
boot.ci(lm.boot,0.95,type=c('bca','perc'),index=2)
boot.ci(lm.boot,0.95,type=c('bca','perc'),index=3)
# diagnostics
png("./lm-ar1_diag_diff_uva_duke_l.png", width=900, height=900)
par(mfrow = c(2,2),ps=20)
plot(uva.duke.l.lm2)
par(mfrow = c(1,1))
dev.off()
png("./lm-ar1_acf_diff_uva_duke_l.png", width=900, height=900)
par(mfrow =c(1,2),ps=20)
acf(uva.duke.l.lm2$residuals)
pacf(uva.duke.l.lm2$residuals)
par(mfrow =c(1,1))
dev.off()
|
/RCode/models_liv_duke.R
|
no_license
|
demasma/Project3
|
R
| false | false | 2,250 |
r
|
# TS Difference
uva.duke.l <- uva.xplant$Liver - duke.xplant$Liver
# TS Plot
png("./ts_diff_uva_duke_l.png", width=900, height=900)
par(mfrow=c(1,1), ps=20)
plot(uva.xplant$Year, uva.duke.l, col = "blue", type = "l",
xlab = "Time",
ylab = "UVa - Duke",
main = "Difference between Kidney Transplants at UVA and Duke")
abline(0,0)
dev.off()
# LM Model
uva.duke.l.lm <- lm(uva.duke.l ~ r11.donor$Liver)
summary(uva.duke.l.lm)
# LM Model Diagnostics
png("./lm_diag_diff_uva_duke_l.png", width=900, height=900)
par(mfrow = c(2,2),ps=20)
plot(uva.duke.l.lm)
par(mfrow = c(1,1))
dev.off()
# ACF/PACF of Residuals
png("./acf_diff_uva_duke_l.png", width=900, height=900)
par(mfcol = c(1,2),ps = 20)
acf(uva.duke.l.lm$residuals)
pacf(uva.duke.l.lm$residuals)
par(mfcol = c(1,1))
dev.off()
# AR Suggestion
(uva.duke.l.ar <- ar(uva.duke.l.lm$residuals))
# AIC for Different AR Lags
png("./aic_ar_diff_uva_duke_l.png", width=900, height=900)
par(mfcol = c(1,1),ps = 20)
plot(uva.duke.l.ar$aic, type = "h")
dev.off()
# Adding the time series model
# AR(1)
uva.duke.l.lm.e1 <- uva.duke.l.lm$resid[1:24] # new var..1 AR term put resid into AR
r11k <- r11.donor$Liver[2:25] # need lag of use 1-24 to pred 2-25
uva.duke.l.ar1 <- uva.xplant$Liver[2:25] - duke.xplant$Liver[2:25] # lag from 1-24 to pred 2-25 diff
uva.duke.l.dm <- data.frame(uva.duke.l.ar1, r11k, uva.duke.l.lm.e1)
summary(uva.duke.l.dm)
# Linear model with time series component
uva.duke.l.lm2<- lm(uva.duke.l.ar1 ~ ., data = uva.duke.l.dm)
summary(uva.duke.l.lm2)
AIC(uva.duke.l.lm2)
lm.fitted <- fitted(uva.duke.l.lm2)
lm.resid <- residuals(uva.duke.l.lm2)
lm.model <- model.matrix(uva.duke.l.lm2)
lm.boot <- RTSB(uva.duke.l.ar1, r11k, lm.fitted, lm.resid, lm.model, 5000)
lm.boot
boot.ci(lm.boot,0.95,type=c('bca','perc'),index=1)
boot.ci(lm.boot,0.95,type=c('bca','perc'),index=2)
boot.ci(lm.boot,0.95,type=c('bca','perc'),index=3)
# diagnostics
png("./lm-ar1_diag_diff_uva_duke_l.png", width=900, height=900)
par(mfrow = c(2,2),ps=20)
plot(uva.duke.l.lm2)
par(mfrow = c(1,1))
dev.off()
png("./lm-ar1_acf_diff_uva_duke_l.png", width=900, height=900)
par(mfrow =c(1,2),ps=20)
acf(uva.duke.l.lm2$residuals)
pacf(uva.duke.l.lm2$residuals)
par(mfrow =c(1,1))
dev.off()
|
# We are using three matrices:
# x - the matrix that is initialized with makeCacheMatrix()
# y - the matrix whose inverse is stored in m
# m - the inverse matrix of y
# NOTE:
# To check if the matrix, whose inverse matrix is stored in m
# is changed after the inverse was calculated with chaceSolved(), I
# introduced y matrix. This matrix is always containing the matrix
# whose inverse is stored in m.
# If you reinitialized your object with the set() function, there
# is no need for this extra check. But if you modify only one element,
# and don`t recalculate the inverse, it is neccessary to check whose inverse
# is stored in the cache.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
y <- NULL
# modifies the element of the matrix
setelement <- function(a, b, c) {
x[a,b]<-c
x <<- x
}
# updates the x matrix and makes m null
setx <- function(z) {
x <<- z
m <<- NULL
}
# returns x matrix
getx <- function() x
# updates m, which is the inverse of y matrix
setSolve <- function(r) {
m <<- r
}
# returns m that is the inverse of y matrix
getSolve <- function() m
# updates the y matrix
sety <- function (k) {
y <<- k
}
# returns the y matrix
gety <- function () y
list(setelement=setelement, setx=setx, getx=getx,
setSolve=setSolve,
getSolve=getSolve,
sety=sety,
gety=gety)
}
# Calculates and prints the inverse matrix of the initialized matrix.
# Also updates y accordingly.
cacheSolve <- function(j, ...) {
# puts m into t
t <- j$getSolve()
# if m not null and the original matrix is not changed,
# returns with the cached data
if(!is.null(t) && identical(j$gety(), j$getx())) {
message("getting cached data")
return(t)
}
# if we are here, that means that m was null,
# or the original matrix was changed,
# let`s calculate the new inverse matrix and stores the new values
data <- j$getx()
t <- solve(data, ...)
j$setSolve(t)
j$sety(data)
t
}
|
/cachematrix.R
|
no_license
|
anikonagy15/ProgrammingAssignment2
|
R
| false | false | 2,320 |
r
|
# We are using three matrices:
# x - the matrix that is initialized with makeCacheMatrix()
# y - the matrix whose inverse is stored in m
# m - the inverse matrix of y
# NOTE:
# To check if the matrix, whose inverse matrix is stored in m
# is changed after the inverse was calculated with chaceSolved(), I
# introduced y matrix. This matrix is always containing the matrix
# whose inverse is stored in m.
# If you reinitialized your object with the set() function, there
# is no need for this extra check. But if you modify only one element,
# and don`t recalculate the inverse, it is neccessary to check whose inverse
# is stored in the cache.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
y <- NULL
# modifies the element of the matrix
setelement <- function(a, b, c) {
x[a,b]<-c
x <<- x
}
# updates the x matrix and makes m null
setx <- function(z) {
x <<- z
m <<- NULL
}
# returns x matrix
getx <- function() x
# updates m, which is the inverse of y matrix
setSolve <- function(r) {
m <<- r
}
# returns m that is the inverse of y matrix
getSolve <- function() m
# updates the y matrix
sety <- function (k) {
y <<- k
}
# returns the y matrix
gety <- function () y
list(setelement=setelement, setx=setx, getx=getx,
setSolve=setSolve,
getSolve=getSolve,
sety=sety,
gety=gety)
}
# Calculates and prints the inverse matrix of the initialized matrix.
# Also updates y accordingly.
cacheSolve <- function(j, ...) {
# puts m into t
t <- j$getSolve()
# if m not null and the original matrix is not changed,
# returns with the cached data
if(!is.null(t) && identical(j$gety(), j$getx())) {
message("getting cached data")
return(t)
}
# if we are here, that means that m was null,
# or the original matrix was changed,
# let`s calculate the new inverse matrix and stores the new values
data <- j$getx()
t <- solve(data, ...)
j$setSolve(t)
j$sety(data)
t
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sgdata.R
\docType{data}
\name{sgdata}
\alias{sgdata}
\title{Example dataset dataset with repeated measures of depression and rumination}
\format{
A longitudinal dataset in wide format, i.e one row per person, one column per variable.
\itemize{
\item{id}{: ID variable, unique identifier for each person}
\item{bdi_s0}{: BDI value, baseline assessment}
\item{bdi_s1}{: BDI value, session 1}
\item{bdi_s2}{: BDI value, session 2}
\item{bdi_s3}{: BDI value, session 3}
\item{bdi_s4}{: BDI value, session 4}
\item{bdi_s5}{: BDI value, session 5}
\item{bdi_s6}{: BDI value, session 6}
\item{bdi_s7}{: BDI value, session 7}
\item{bdi_s8}{: BDI value, session 8}
\item{bdi_s9}{: BDI value, session 9}
\item{bdi_s10}{: BDI value, session 10}
\item{bdi_s11}{: BDI value, session 11}
\item{bdi_s12}{: BDI value, session 12}
\item{bdi_fu1}{: BDI value, follow-up measure 1}
\item{bdi_fu2}{: BDI value, follow-up measure 2}
\item{rq_s0}{: RQ value, baseline assessment}
\item{rq_s1}{: RQ value, session 1}
\item{rq_s2}{: RQ value, session 2}
\item{rq_s3}{: RQ value, session 3}
\item{rq_s4}{: RQ value, session 4}
\item{rq_s5}{: RQ value, session 5}
\item{rq_s6}{: RQ value, session 6}
\item{rq_s7}{: RQ value, session 7}
\item{rq_s8}{: RQ value, session 8}
\item{rq_s9}{: RQ value, session 9}
\item{rq_s10}{: RQ value, session 10}
\item{rq_s11}{: RQ value, session 11}
\item{rq_s12}{: RQ value, session 12}
\item{rq_fu1}{: RQ value, follow-up measure 1}
\item{rq_fu2}{: RQ value, follow-up measure 2}
}
}
\usage{
data(sgdata)
}
\description{
Example dataset with a measure of depression symptoms (BDI) and a secondary process measure (RQ; Rumination Questionnaire) to illustrate how the package works.
}
\examples{
# Load data into global environment
data(sgdata)
}
\keyword{dataset}
|
/man/sgdata.Rd
|
permissive
|
milanwiedemann/suddengains
|
R
| false | true | 1,913 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sgdata.R
\docType{data}
\name{sgdata}
\alias{sgdata}
\title{Example dataset dataset with repeated measures of depression and rumination}
\format{
A longitudinal dataset in wide format, i.e one row per person, one column per variable.
\itemize{
\item{id}{: ID variable, unique identifier for each person}
\item{bdi_s0}{: BDI value, baseline assessment}
\item{bdi_s1}{: BDI value, session 1}
\item{bdi_s2}{: BDI value, session 2}
\item{bdi_s3}{: BDI value, session 3}
\item{bdi_s4}{: BDI value, session 4}
\item{bdi_s5}{: BDI value, session 5}
\item{bdi_s6}{: BDI value, session 6}
\item{bdi_s7}{: BDI value, session 7}
\item{bdi_s8}{: BDI value, session 8}
\item{bdi_s9}{: BDI value, session 9}
\item{bdi_s10}{: BDI value, session 10}
\item{bdi_s11}{: BDI value, session 11}
\item{bdi_s12}{: BDI value, session 12}
\item{bdi_fu1}{: BDI value, follow-up measure 1}
\item{bdi_fu2}{: BDI value, follow-up measure 2}
\item{rq_s0}{: RQ value, baseline assessment}
\item{rq_s1}{: RQ value, session 1}
\item{rq_s2}{: RQ value, session 2}
\item{rq_s3}{: RQ value, session 3}
\item{rq_s4}{: RQ value, session 4}
\item{rq_s5}{: RQ value, session 5}
\item{rq_s6}{: RQ value, session 6}
\item{rq_s7}{: RQ value, session 7}
\item{rq_s8}{: RQ value, session 8}
\item{rq_s9}{: RQ value, session 9}
\item{rq_s10}{: RQ value, session 10}
\item{rq_s11}{: RQ value, session 11}
\item{rq_s12}{: RQ value, session 12}
\item{rq_fu1}{: RQ value, follow-up measure 1}
\item{rq_fu2}{: RQ value, follow-up measure 2}
}
}
\usage{
data(sgdata)
}
\description{
Example dataset with a measure of depression symptoms (BDI) and a secondary process measure (RQ; Rumination Questionnaire) to illustrate how the package works.
}
\examples{
# Load data into global environment
data(sgdata)
}
\keyword{dataset}
|
\name{miss_cell}
\alias{miss_cell}
\title{
Missing values cell function
}
\description{
Counting the number of missing values in each cell.
}
\usage{
miss_cell(x, y, z, w, cell_ids, row_ids, col_ids, vnames, vars, n_min,
pct = FALSE, digits = 0, prefix='', suffix='')
}
\arguments{
\item{x}{
The x variable
}
\item{y}{
NOT USED
}
\item{z}{
NOT USED
}
\item{w}{
NOT USED (The number of missing will not be weighted!).
}
\item{cell_ids}{
Index vector for selecting values in cell.
}
\item{row_ids}{
NOT USED
}
\item{col_ids}{
NOT USED
}
\item{vnames}{
NOT USED
}
\item{vars}{
NOT USED
}
\item{n_min}{
NOT USED
}
\item{pct}{
Logical asking whatever to draw absolute or relative frequency of missing values.
}
\item{digits}{
Integer indicating the number of decimal places.
}
\item{prefix}{
Free text added in each cell bevor results.
}
\item{suffix}{
Free text added in each cell after results.
}
}
\author{
Andreas Schulz <ades-s@web.de>
}
\examples{
sex <- factor(rbinom(1000, 1, 0.4), labels=c('Men', 'Women'))
height <- rnorm(1000, mean=1.66, sd=0.1)
height[which(sex=='Men')]<-height[which(sex=='Men')]+0.1
weight <- rnorm(1000, mean=70, sd=5)
decades <- rbinom(1000, 3, 0.5)
decades <- factor(decades, labels=c('[35,45)','[45,55)','[55,65)','[65,75)'))
d<-data.frame(sex, decades, height, weight)
d$height[round(runif(250,1,1000))]<- NA
d$weight[round(runif(25 ,1,1000))]<- NA
tabular.ade(x_vars=c('height', 'weight'), xname=c('Height [m]','Weight [kg]'),
cols=c('sex','decades','ALL'), cnames=c('Gender', 'Age decades'),
data=d, FUN=miss_cell, prefix='Miss:')
}
\keyword{ missings }
|
/man/miss_cell.Rd
|
no_license
|
cran/etable
|
R
| false | false | 1,740 |
rd
|
\name{miss_cell}
\alias{miss_cell}
\title{
Missing values cell function
}
\description{
Counting the number of missing values in each cell.
}
\usage{
miss_cell(x, y, z, w, cell_ids, row_ids, col_ids, vnames, vars, n_min,
pct = FALSE, digits = 0, prefix='', suffix='')
}
\arguments{
\item{x}{
The x variable
}
\item{y}{
NOT USED
}
\item{z}{
NOT USED
}
\item{w}{
NOT USED (The number of missing will not be weighted!).
}
\item{cell_ids}{
Index vector for selecting values in cell.
}
\item{row_ids}{
NOT USED
}
\item{col_ids}{
NOT USED
}
\item{vnames}{
NOT USED
}
\item{vars}{
NOT USED
}
\item{n_min}{
NOT USED
}
\item{pct}{
Logical asking whatever to draw absolute or relative frequency of missing values.
}
\item{digits}{
Integer indicating the number of decimal places.
}
\item{prefix}{
Free text added in each cell bevor results.
}
\item{suffix}{
Free text added in each cell after results.
}
}
\author{
Andreas Schulz <ades-s@web.de>
}
\examples{
sex <- factor(rbinom(1000, 1, 0.4), labels=c('Men', 'Women'))
height <- rnorm(1000, mean=1.66, sd=0.1)
height[which(sex=='Men')]<-height[which(sex=='Men')]+0.1
weight <- rnorm(1000, mean=70, sd=5)
decades <- rbinom(1000, 3, 0.5)
decades <- factor(decades, labels=c('[35,45)','[45,55)','[55,65)','[65,75)'))
d<-data.frame(sex, decades, height, weight)
d$height[round(runif(250,1,1000))]<- NA
d$weight[round(runif(25 ,1,1000))]<- NA
tabular.ade(x_vars=c('height', 'weight'), xname=c('Height [m]','Weight [kg]'),
cols=c('sex','decades','ALL'), cnames=c('Gender', 'Age decades'),
data=d, FUN=miss_cell, prefix='Miss:')
}
\keyword{ missings }
|
Playerfunction=function(){
EndName<- regexpr("棒", x)
Name<- regexpr(":", x)
player<-substr(x,EndName+1,Name-1)
player<-substr(player,(regexpr("[^1-9a-zA-Z]", player)),(regexpr("[^1-9a-zA-Z]", player)+100))
player[(which(regexpr("[\\(][0-9]",player) != -1))]=""
player[(which(regexpr("[統義兄桃][一大弟猿]",player) != -1))]=""
player <- gsub("1B",replacement="",player)
player <- gsub("2B",replacement="",player)
player <- gsub("3B",replacement="",player)
player <- gsub("SS",replacement="",player)
player <- gsub("C",replacement="",player)
player <- gsub("LF",replacement="",player)
player <- gsub("CF",replacement="",player)
player <- gsub("RF",replacement="",player)
player <- gsub("DH",replacement="",player)
player <- gsub(",",replacement="",player)
player <- gsub("比賽結束",replacement="",player)
player <- gsub("比賽開始",replacement="",player)
return(player)
}
|
/offensive_db/functions/playerfunction.R
|
no_license
|
wokeketm1/CPBL
|
R
| false | false | 892 |
r
|
Playerfunction=function(){
EndName<- regexpr("棒", x)
Name<- regexpr(":", x)
player<-substr(x,EndName+1,Name-1)
player<-substr(player,(regexpr("[^1-9a-zA-Z]", player)),(regexpr("[^1-9a-zA-Z]", player)+100))
player[(which(regexpr("[\\(][0-9]",player) != -1))]=""
player[(which(regexpr("[統義兄桃][一大弟猿]",player) != -1))]=""
player <- gsub("1B",replacement="",player)
player <- gsub("2B",replacement="",player)
player <- gsub("3B",replacement="",player)
player <- gsub("SS",replacement="",player)
player <- gsub("C",replacement="",player)
player <- gsub("LF",replacement="",player)
player <- gsub("CF",replacement="",player)
player <- gsub("RF",replacement="",player)
player <- gsub("DH",replacement="",player)
player <- gsub(",",replacement="",player)
player <- gsub("比賽結束",replacement="",player)
player <- gsub("比賽開始",replacement="",player)
return(player)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/experiment_class.R
\name{seqlevels,experiment-method}
\alias{seqlevels,experiment-method}
\title{Seqlevels ORFik experiment
Extracted from fasta genome index}
\usage{
\S4method{seqlevels}{experiment}(x)
}
\arguments{
\item{x}{an ORFik \code{\link{experiment}}}
}
\value{
integer vector with names
}
\description{
Seqlevels ORFik experiment
Extracted from fasta genome index
}
|
/man/seqlevels-experiment-method.Rd
|
permissive
|
Roleren/ORFik
|
R
| false | true | 454 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/experiment_class.R
\name{seqlevels,experiment-method}
\alias{seqlevels,experiment-method}
\title{Seqlevels ORFik experiment
Extracted from fasta genome index}
\usage{
\S4method{seqlevels}{experiment}(x)
}
\arguments{
\item{x}{an ORFik \code{\link{experiment}}}
}
\value{
integer vector with names
}
\description{
Seqlevels ORFik experiment
Extracted from fasta genome index
}
|
source('getdata.r')
library('nlme')
options(contrasts=c("contr.sum","contr.poly"))
# Make some dataframes to analyze with linear models
reg.dat <- cbind(climData.sub[,c('siteClim','yearClim','maxswe','novSWEmean',
'decSWEmean','janSWEmean','febSWEmean','meltdoy','onsetdoy',
'totaldaysSC','maat','freemat','scovmat',
'jfmMAT','jasMAT','preonsetTair','JASprecip')],
soilTData[,c('jfmTs5mean','jfmTs20mean','jfmTs50mean',
'snowcovTs5mean','snowcovTs20mean',
'snowcovTs50mean','preonsetTs5',
'preonsetTs20','preonsetTs50')],
soilVWCData[,c('jasVWC5mean','jasVWC20mean','jasVWC50mean',
'jfmVWC5mean','jfmVWC20mean','jfmVWC50mean',
'preonsetVWC5','preonsetVWC20',
'preonsetVWC50')])
# List of sites
sites <- unique(reg.dat$siteClim)
# BELOW-SNOW Tsoil - Create a data frame to store regression values
scovTs.lm <- data.frame(site=sites,
nyrs=as.vector(table(reg.dat$siteClim)),
x1Beta=0,x1Pval=0,x2Beta=0,x2Pval=0,
x3Beta=0,x3Pval=0,x4Beta=0,x4Pval=0,
x5Beta=0,x5Pval=0,x6Beta=0,x6Pval=0,
x7Beta=0,x7Pval=0,x8Beta=0,x8Pval=0,
x9Beta=0,x9Pval=0)
wgtZ.lm <- data.frame(site=sites,
nyrs=as.vector(table(reg.dat$siteClim)),
x1Z=0,x1df=0,x2Z=0,x2df=0,
x3Z=0,x3df=0,x4Z=0,x4df=0,
x5Z=0,x5df=0,x6Z=0,x6df=0,
x7Z=0,x7df=0,x8Z=0,x8df=0,
x9Z=0,x9df=0)
for (i in 1:length(sites)) {
# Subset for the site
tmp <- reg.dat[reg.dat$siteClim==sites[i],]
# Assign the independent variable
y <- tmp$snowcovTs50mean
# Create a list of independent variables
# peak SWE, onsetdoy, preonset Tair, nov & dec SWE
xlist <- data.frame(tmp$maxswe,tmp$onsetdoy,tmp$preonsetTair,
tmp$scovmat,tmp$meltdoy,tmp$novSWEmean,tmp$decSWEmean,
tmp$janSWEmean,tmp$febSWEmean)
cols <- seq(3, 19, 2)
for (j in 1:9) {
x <- xlist[,j] # Get the x variable from the list
# Only do the linear model if there are 3 or more non-NA cases
if (length(y)-sum(is.na(y))>3 & length(x)-sum(is.na(x))>3) {
lm1 <- lm(y~x, na.action=na.omit)
scovTs.lm[i,cols[j]] <- lm1$coef[2]
scovTs.lm[i,cols[j]+1] <- summary(lm1)$coef[8]
# Save the degrees of freedom to weight the combined test
wgtZ.lm[i,cols[j]+1] <- lm1$df.residual
# Otherwise set NAs
} else {
scovTs.lm[i,cols[j]] <- NA
scovTs.lm[i,cols[j]+1] <- NA
}
}
}
# Get the mean Betas for the x variables
meandat <- sapply(scovTs.lm, mean, na.rm=TRUE)
# How many of these regressions are significant (5 10 50 indvar)?
sigp <- as.data.frame(scovTs.lm < 0.05)
pdat <- sapply(sigp, sum, na.rm=TRUE)
#Show a matrix of these values
cbind(meandat[seq(4,20,2)], pdat[seq(4,20,2)])
# maxswe
# onsetdoy
# preonsetTair
# scovmat
# meltdoy
# novSWEmean
# decSWEmean
# janSWEmean
# febSWEmean
# Now do the weighted z transform (Whitlock 2005) to test if these
# are significant in a combine sense
for (i in 1:length(sites)) {
# Subset pvalue data for the site
tmp <- scovTs.lm[scovTs.lm$site==sites[i],]
# Create a list of independent variables
# peak SWE, onsetdoy, preonset Tair, nov & dec SWE
#xlist <- data.frame(tmp$maxswe,tmp$onsetdoy,tmp$preonsetTair,
# tmp$scovmat,tmp$meltdoy,tmp$novSWEmean,tmp$decSWEmean,
# tmp$janSWEmean,tmp$febSWEmean)
cols <- seq(3, 19, 2)
for (j in 1:9) {
p_mu <- meandat[cols[j] + 1] # Get the mean p value for that row
wgtZ.lm[i,cols[j]] <- (p_mu - tmp[cols[j] + 1])/1
}
}
# Now we need to compute the numerator and denominator for the test
#browser()
# Now the nlme analysis
y <- reg.dat$jasVWC50mean #reg.dat$snowcovTs5mean #jfm
# df <- data.frame(reg.dat$maxswe,reg.dat$onsetdoy,
# reg.dat$preonsetTair,reg.dat$scovmat,reg.dat$meltdoy,
# reg.dat$novSWEmean,reg.dat$decSWEmean,reg.dat$janSWEmean,
# reg.dat$febSWEmean)
df <- data.frame(reg.dat$maxswe,reg.dat$meltdoy,reg.dat$jasMAT,
reg.dat$JASprecip,reg.dat$jfmTs5mean)
sitevec <- reg.dat$siteClim
for (i in 1:9) {
x <- df[,i] # Get the x variable from the list
mod <- lme(y ~ x, random=~1 | sitevec,
na.action = na.omit)
print(i)
print(anova(mod))
}
# BELOW-SNOW VWC - Create a data frame to store regression values
scovVWC.lm <- data.frame(site=sites,
nyrs=as.vector(table(reg.dat$siteClim)),
x1Beta=0,x1Pval=0,x2Beta=0,x2Pval=0,
x3Beta=0,x3Pval=0,x4Beta=0,x4Pval=0,
x5Beta=0,x5Pval=0,x6Beta=0,x6Pval=0,
x7Beta=0,x7Pval=0,x8Beta=0,x8Pval=0,
x9Beta=0,x9Pval=0)
for (i in 1:length(sites)) {
# Subset for the site
tmp <- reg.dat[reg.dat$siteClim==sites[i],]
# Assign the independent variable
y <- tmp$jfmVWC50mean
# Create a list of independent variables
# peak SWE, onsetdoy, preonset Tair, nov & dec SWE, etc
xlist <- data.frame(tmp$maxswe,tmp$onsetdoy,tmp$preonsetTair,
tmp$scovmat,tmp$meltdoy,
tmp$novSWEmean,tmp$decSWEmean,tmp$janSWEmean,tmp$febSWEmean)
cols <- seq(3, 19, 2)
for (j in 1:9) {
x <- xlist[,j] # Get the x variable from the list
# Only do the linear model if there are 3 or more non-NA cases
if (length(y)-sum(is.na(y))>3 & length(x)-sum(is.na(x))>3) {
lm1 <- lm(y~x, na.action=na.omit)
scovVWC.lm[i,cols[j]] <- lm1$coef[2]
scovVWC.lm[i,cols[j]+1] <- summary(lm1)$coef[8]
# Otherwise set NAs
} else {
scovVWC.lm[i,cols[j]] <- NA
scovVWC.lm[i,cols[j]+1] <- NA
}
}
}
# Get the mean Betas for the x variables
meandat <- sapply(scovVWC.lm, mean, na.rm=TRUE)
# How many of these regressions are significant (5 10 50 indvar)?
sigp <- as.data.frame(scovVWC.lm < 0.05)
pdat <- sapply(sigp, sum, na.rm=TRUE)
#Show a matrix of these values
cbind(meandat[seq(4,20,2)], pdat[seq(4,20,2)])
# maxswe
# onsetdoy
# preonsetTair
# scovmat
# meltdoy
# novSWEmean
# decSWEmean
# janSWEmean
# febSWEmean
# SUMMER (JAS) VWC - Create a data frame to store regression values
jasVWC.lm <- data.frame(site=sites,
nyrs=as.vector(table(reg.dat$siteClim)),
x1Beta=0,x1Pval=0,x2Beta=0,x2Pval=0,
x3Beta=0,x3Pval=0,x4Beta=0,x4Pval=0,
x5Beta=0,x5Pval=0)
for (i in 1:length(sites)) {
# Subset for the site
tmp <- reg.dat[reg.dat$siteClim==sites[i],]
# Assign the independent variable
y <- tmp$jasVWC50mean
# Create a list of independent variables
# peak SWE, meltdoy, JAS Tair, JAS precip, winter Tsoil
xlist <- data.frame(tmp$maxswe,tmp$meltdoy,tmp$jasMAT,
tmp$JASprecip,tmp$jfmTs5mean)
cols <- seq(3, 11, 2)
for (j in 1:5) {
x <- xlist[,j] # Get the x variable from the list
# Only do the linear model if there are 3 or more non-NA cases
if (length(y)-sum(is.na(y))>3 & length(x)-sum(is.na(x))>3) {
lm1 <- lm(y~x, na.action=na.omit)
jasVWC.lm[i,cols[j]] <- lm1$coef[2]
jasVWC.lm[i,cols[j]+1] <- summary(lm1)$coef[8]
# Otherwise set NAs
} else {
jasVWC.lm[i,cols[j]] <- NA
jasVWC.lm[i,cols[j]+1] <- NA
}
}
}
# Get the mean Betas for the x variables
meandat <- sapply(jasVWC.lm, mean, na.rm=TRUE)
# How many of these regressions are significant (5 10 50 indvar)?
sigp <- as.data.frame(jasVWC.lm < 0.05)
pdat <- sapply(sigp, sum, na.rm=TRUE)
#Show a matrix of these values
cbind(meandat[seq(4,12,2)], pdat[seq(4,12,2)])
# maxswe
# meltdoy
# jasTair
# jasPrecip
# jfmTs5mean
|
/R/lm_site.r
|
no_license
|
gremau/SNOTELsoildata
|
R
| false | false | 7,381 |
r
|
source('getdata.r')
library('nlme')
options(contrasts=c("contr.sum","contr.poly"))
# Make some dataframes to analyze with linear models
reg.dat <- cbind(climData.sub[,c('siteClim','yearClim','maxswe','novSWEmean',
'decSWEmean','janSWEmean','febSWEmean','meltdoy','onsetdoy',
'totaldaysSC','maat','freemat','scovmat',
'jfmMAT','jasMAT','preonsetTair','JASprecip')],
soilTData[,c('jfmTs5mean','jfmTs20mean','jfmTs50mean',
'snowcovTs5mean','snowcovTs20mean',
'snowcovTs50mean','preonsetTs5',
'preonsetTs20','preonsetTs50')],
soilVWCData[,c('jasVWC5mean','jasVWC20mean','jasVWC50mean',
'jfmVWC5mean','jfmVWC20mean','jfmVWC50mean',
'preonsetVWC5','preonsetVWC20',
'preonsetVWC50')])
# List of sites
sites <- unique(reg.dat$siteClim)
# BELOW-SNOW Tsoil - Create a data frame to store regression values
scovTs.lm <- data.frame(site=sites,
nyrs=as.vector(table(reg.dat$siteClim)),
x1Beta=0,x1Pval=0,x2Beta=0,x2Pval=0,
x3Beta=0,x3Pval=0,x4Beta=0,x4Pval=0,
x5Beta=0,x5Pval=0,x6Beta=0,x6Pval=0,
x7Beta=0,x7Pval=0,x8Beta=0,x8Pval=0,
x9Beta=0,x9Pval=0)
wgtZ.lm <- data.frame(site=sites,
nyrs=as.vector(table(reg.dat$siteClim)),
x1Z=0,x1df=0,x2Z=0,x2df=0,
x3Z=0,x3df=0,x4Z=0,x4df=0,
x5Z=0,x5df=0,x6Z=0,x6df=0,
x7Z=0,x7df=0,x8Z=0,x8df=0,
x9Z=0,x9df=0)
for (i in 1:length(sites)) {
# Subset for the site
tmp <- reg.dat[reg.dat$siteClim==sites[i],]
# Assign the independent variable
y <- tmp$snowcovTs50mean
# Create a list of independent variables
# peak SWE, onsetdoy, preonset Tair, nov & dec SWE
xlist <- data.frame(tmp$maxswe,tmp$onsetdoy,tmp$preonsetTair,
tmp$scovmat,tmp$meltdoy,tmp$novSWEmean,tmp$decSWEmean,
tmp$janSWEmean,tmp$febSWEmean)
cols <- seq(3, 19, 2)
for (j in 1:9) {
x <- xlist[,j] # Get the x variable from the list
# Only do the linear model if there are 3 or more non-NA cases
if (length(y)-sum(is.na(y))>3 & length(x)-sum(is.na(x))>3) {
lm1 <- lm(y~x, na.action=na.omit)
scovTs.lm[i,cols[j]] <- lm1$coef[2]
scovTs.lm[i,cols[j]+1] <- summary(lm1)$coef[8]
# Save the degrees of freedom to weight the combined test
wgtZ.lm[i,cols[j]+1] <- lm1$df.residual
# Otherwise set NAs
} else {
scovTs.lm[i,cols[j]] <- NA
scovTs.lm[i,cols[j]+1] <- NA
}
}
}
# Get the mean Betas for the x variables
meandat <- sapply(scovTs.lm, mean, na.rm=TRUE)
# How many of these regressions are significant (5 10 50 indvar)?
sigp <- as.data.frame(scovTs.lm < 0.05)
pdat <- sapply(sigp, sum, na.rm=TRUE)
#Show a matrix of these values
cbind(meandat[seq(4,20,2)], pdat[seq(4,20,2)])
# maxswe
# onsetdoy
# preonsetTair
# scovmat
# meltdoy
# novSWEmean
# decSWEmean
# janSWEmean
# febSWEmean
# Now do the weighted z transform (Whitlock 2005) to test if these
# are significant in a combine sense
for (i in 1:length(sites)) {
# Subset pvalue data for the site
tmp <- scovTs.lm[scovTs.lm$site==sites[i],]
# Create a list of independent variables
# peak SWE, onsetdoy, preonset Tair, nov & dec SWE
#xlist <- data.frame(tmp$maxswe,tmp$onsetdoy,tmp$preonsetTair,
# tmp$scovmat,tmp$meltdoy,tmp$novSWEmean,tmp$decSWEmean,
# tmp$janSWEmean,tmp$febSWEmean)
cols <- seq(3, 19, 2)
for (j in 1:9) {
p_mu <- meandat[cols[j] + 1] # Get the mean p value for that row
wgtZ.lm[i,cols[j]] <- (p_mu - tmp[cols[j] + 1])/1
}
}
# Now we need to compute the numerator and denominator for the test
#browser()
# Now the nlme analysis
y <- reg.dat$jasVWC50mean #reg.dat$snowcovTs5mean #jfm
# df <- data.frame(reg.dat$maxswe,reg.dat$onsetdoy,
# reg.dat$preonsetTair,reg.dat$scovmat,reg.dat$meltdoy,
# reg.dat$novSWEmean,reg.dat$decSWEmean,reg.dat$janSWEmean,
# reg.dat$febSWEmean)
df <- data.frame(reg.dat$maxswe,reg.dat$meltdoy,reg.dat$jasMAT,
reg.dat$JASprecip,reg.dat$jfmTs5mean)
sitevec <- reg.dat$siteClim
for (i in 1:9) {
x <- df[,i] # Get the x variable from the list
mod <- lme(y ~ x, random=~1 | sitevec,
na.action = na.omit)
print(i)
print(anova(mod))
}
# BELOW-SNOW VWC - Create a data frame to store regression values
scovVWC.lm <- data.frame(site=sites,
nyrs=as.vector(table(reg.dat$siteClim)),
x1Beta=0,x1Pval=0,x2Beta=0,x2Pval=0,
x3Beta=0,x3Pval=0,x4Beta=0,x4Pval=0,
x5Beta=0,x5Pval=0,x6Beta=0,x6Pval=0,
x7Beta=0,x7Pval=0,x8Beta=0,x8Pval=0,
x9Beta=0,x9Pval=0)
for (i in 1:length(sites)) {
# Subset for the site
tmp <- reg.dat[reg.dat$siteClim==sites[i],]
# Assign the independent variable
y <- tmp$jfmVWC50mean
# Create a list of independent variables
# peak SWE, onsetdoy, preonset Tair, nov & dec SWE, etc
xlist <- data.frame(tmp$maxswe,tmp$onsetdoy,tmp$preonsetTair,
tmp$scovmat,tmp$meltdoy,
tmp$novSWEmean,tmp$decSWEmean,tmp$janSWEmean,tmp$febSWEmean)
cols <- seq(3, 19, 2)
for (j in 1:9) {
x <- xlist[,j] # Get the x variable from the list
# Only do the linear model if there are 3 or more non-NA cases
if (length(y)-sum(is.na(y))>3 & length(x)-sum(is.na(x))>3) {
lm1 <- lm(y~x, na.action=na.omit)
scovVWC.lm[i,cols[j]] <- lm1$coef[2]
scovVWC.lm[i,cols[j]+1] <- summary(lm1)$coef[8]
# Otherwise set NAs
} else {
scovVWC.lm[i,cols[j]] <- NA
scovVWC.lm[i,cols[j]+1] <- NA
}
}
}
# Get the mean Betas for the x variables
meandat <- sapply(scovVWC.lm, mean, na.rm=TRUE)
# How many of these regressions are significant (5 10 50 indvar)?
sigp <- as.data.frame(scovVWC.lm < 0.05)
pdat <- sapply(sigp, sum, na.rm=TRUE)
#Show a matrix of these values
cbind(meandat[seq(4,20,2)], pdat[seq(4,20,2)])
# maxswe
# onsetdoy
# preonsetTair
# scovmat
# meltdoy
# novSWEmean
# decSWEmean
# janSWEmean
# febSWEmean
# SUMMER (JAS) VWC - Create a data frame to store regression values
jasVWC.lm <- data.frame(site=sites,
nyrs=as.vector(table(reg.dat$siteClim)),
x1Beta=0,x1Pval=0,x2Beta=0,x2Pval=0,
x3Beta=0,x3Pval=0,x4Beta=0,x4Pval=0,
x5Beta=0,x5Pval=0)
for (i in 1:length(sites)) {
# Subset for the site
tmp <- reg.dat[reg.dat$siteClim==sites[i],]
# Assign the independent variable
y <- tmp$jasVWC50mean
# Create a list of independent variables
# peak SWE, meltdoy, JAS Tair, JAS precip, winter Tsoil
xlist <- data.frame(tmp$maxswe,tmp$meltdoy,tmp$jasMAT,
tmp$JASprecip,tmp$jfmTs5mean)
cols <- seq(3, 11, 2)
for (j in 1:5) {
x <- xlist[,j] # Get the x variable from the list
# Only do the linear model if there are 3 or more non-NA cases
if (length(y)-sum(is.na(y))>3 & length(x)-sum(is.na(x))>3) {
lm1 <- lm(y~x, na.action=na.omit)
jasVWC.lm[i,cols[j]] <- lm1$coef[2]
jasVWC.lm[i,cols[j]+1] <- summary(lm1)$coef[8]
# Otherwise set NAs
} else {
jasVWC.lm[i,cols[j]] <- NA
jasVWC.lm[i,cols[j]+1] <- NA
}
}
}
# Get the mean Betas for the x variables
meandat <- sapply(jasVWC.lm, mean, na.rm=TRUE)
# How many of these regressions are significant (5 10 50 indvar)?
sigp <- as.data.frame(jasVWC.lm < 0.05)
pdat <- sapply(sigp, sum, na.rm=TRUE)
#Show a matrix of these values
cbind(meandat[seq(4,12,2)], pdat[seq(4,12,2)])
# maxswe
# meltdoy
# jasTair
# jasPrecip
# jfmTs5mean
|
#install.packages("devtools")
#devtools::install_github("MRCIEU/TwoSampleMR")
library(TwoSampleMR)
library(data.table)
library(dplyr)
library(gwasglue)
library(gwasvcf)
library(ieugwasr)
library(genetics.binaRies)
set_bcftools()
args <- commandArgs(T)
datadir <- args[1]
resultsdir <- args[2]
vcfdir <- args[3]
out <- args[4]
instr <- args[5]
# ======================== Functions ==============================
mr_analysis <- function(exp,d,r) {
# outcome (always the same as r)
of <- paste(resultsdir,"/",out,"/",r,".statsfile.txt.gz",sep="")
opt <- fread(of,header=TRUE)
opt <- as.data.frame(opt)
if ("BETA" %in% colnames(opt)) {
opt1 <- opt[,c("SNP","ALLELE1","ALLELE0","A1FREQ","BETA", "SE",tail(colnames(opt),1))]
colnames(opt1) <- c("SNP",
"effect_allele.outcome",
"other_allele.outcome",
"eaf.outcome",
"beta.outcome",
"se.outcome",
"pval.outcome")
opt2 <- opt1[order(opt1$SNP, opt1$pval.outcome), ]
opt2 <- opt2[ !duplicated(opt2$SNP), ]
opt3 <- cbind(opt2,
"outcome"=rep("outcome",nrow(opt2)),
"mr_keep.outcome"=rep("TRUE", nrow(opt2)),
"pval_origin.outcome"=rep("reported", nrow(opt2)),
"id.outcome"=rep(out, nrow(opt2)),
"data_source.outcome"=rep("textfile", nrow(opt2)))
out_gwas <- opt3
} else {
out_gwas <- "NA"
}
# MR analysis
if (!out_gwas=="NA") {
print(paste("exposure=",exp))
# Read the results of GWAS
df <- paste(resultsdir,"/",exp,"/",d,".statsfile.txt.gz",sep="")
dsc <- fread(df,header=TRUE)
dsc <- as.data.frame(dsc)
rf <- paste(resultsdir,"/",exp,"/",r,".statsfile.txt.gz",sep="")
rpc <- fread(rf,header=TRUE)
rpc <- as.data.frame(rpc)
if ("BETA" %in% colnames(dsc) & "BETA" %in% colnames(rpc)) {
dsc1 <- dsc[,c("SNP","ALLELE1","ALLELE0","A1FREQ","BETA", "SE",tail(colnames(dsc),1))]
colnames(dsc1) <- c("SNP",
"effect_allele.exposure",
"other_allele.exposure",
"eaf.exposure",
"beta.exposure",
"se.exposure",
"pval.exposure")
dsc2 <- dsc1[order(dsc1$SNP, dsc1$pval.exposure), ]
dsc2 <- dsc2[ !duplicated(dsc2$SNP), ]
dsc3 <- cbind(dsc2,
"exposure"=rep("exposure",nrow(dsc2)),
"mr_keep.exposure"=rep("TRUE", nrow(dsc2)),
"pval_origin.exposure"=rep("reported", nrow(dsc2)),
"id.exposure"=rep(exp, nrow(dsc2)),
"data_source.exposure"=rep("textfile", nrow(dsc2)))
rpc1 <- rpc[,c("SNP","ALLELE1","ALLELE0","A1FREQ","BETA", "SE",tail(colnames(rpc),1))]
colnames(rpc1) <- c("SNP",
"effect_allele.exposure",
"other_allele.exposure",
"eaf.exposure",
"beta.exposure",
"se.exposure",
"pval.exposure")
rpc2 <- rpc1[order(rpc1$SNP, rpc1$pval.exposure), ]
rpc2 <- rpc2[ !duplicated(rpc2$SNP), ]
rpc3 <- cbind(rpc2,
"exposure"=rep("exposure",nrow(rpc2)),
"mr_keep.exposure"=rep("TRUE", nrow(rpc2)),
"pval_origin.exposure"=rep("reported", nrow(rpc2)),
"id.exposure"=rep(exp, nrow(rpc2)),
"data_source.exposure"=rep("textfile", nrow(rpc2)))
disc_gwas <- dsc3
repl_gwas <- rpc3
# Filtering SNPs for their presence in the phenotype and for p-val
snp_exp <- as.character(subset(dat, id.exposure == exp)$SNP)
## Forward order
disc_gwas_f <- subset(disc_gwas,SNP %in% snp_exp & pval.exposure < 5e-8)
# Next two commands used to distinguish between weak instruments and weak instruments with wc. 10 is a threshold for weak instruments
if (instr=="weak") {
snplist <- subset(repl_gwas, SNP %in% disc_gwas_f$SNP & pval.exposure >= pf(10, 1, 10000, low=F))$SNP
disc_gwas_f <- subset(disc_gwas_f, SNP %in% snplist)
}
repl_gwas_f <- subset(repl_gwas,SNP %in% disc_gwas_f$SNP)
out_gwas_f <- subset(out_gwas,SNP %in% disc_gwas_f$SNP)
## Reverse order
repl_gwas_s <- subset(repl_gwas,SNP %in% snp_exp & pval.exposure < 5e-8)
# Next two commands used to distinguish between weak instruments and weak instruments with wc. 10 is a threshold for weak instruments
if (instr=="weak") {
snplist <- subset(disc_gwas, SNP %in% repl_gwas_s$SNP & pval.exposure >= pf(10, 1, 10000, low=F))$SNP
repl_gwas_s <- subset(repl_gwas_s, SNP %in% snplist)
}
disc_gwas_s <- subset(disc_gwas,SNP %in% repl_gwas_s$SNP)
out_gwas_s <- subset(out_gwas,SNP %in% repl_gwas_s$SNP)
# DR and D scenarios
if (!nrow(disc_gwas_f)==0) {
# Harmonise the exposure and outcome data
dat_dr <- harmonise_data(repl_gwas_f, out_gwas_f, action=1)
dat_d <- harmonise_data(disc_gwas_f, out_gwas_f, action=1)
# Perform MR on the replication data
res_dr <- mr(dat_dr)
if (nrow(res_dr)==0) {
res_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res_dr <- cbind(res_dr,"data"=rep("DR", nrow(res_dr)))
het_dr <- mr_heterogeneity(dat_dr)
if (nrow(het_dr)==0) {
het_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_dr <- cbind(het_dr,"data"=rep("DR", nrow(het_dr)))
# Perform MR on the discovery sign data
res_d <- mr(dat_d)
if (nrow(res_d)==0) {
res_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res_d <- cbind(res_d,"data"=rep("D", nrow(res_d)))
het_d <- mr_heterogeneity(dat_d)
if (nrow(het_d)==0) {
het_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_d <- cbind(het_d,"data"=rep("D", nrow(het_d)))
} else {
res_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_dr <- cbind(res_dr,"data"=rep("DR", nrow(res_dr)))
het_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_dr <- cbind(het_dr,"data"=rep("DR", nrow(het_dr)))
res_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_d <- cbind(res_d,"data"=rep("D", nrow(res_d)))
het_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_d <- cbind(het_d,"data"=rep("D", nrow(het_d)))
}
# RD and R scenario
if (!nrow(repl_gwas_s)==0) {
# Harmonise the exposure and outcome data
dat_rd <- harmonise_data(disc_gwas_s, out_gwas_s, action=1)
dat_r <- harmonise_data(repl_gwas_s, out_gwas_s, action=1)
# Perform MR on the discovery data
res_rd <- mr(dat_rd)
if (nrow(res_rd)==0) {
res_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res_rd <- cbind(res_rd,"data"=rep("RD", nrow(res_rd)))
het_rd <- mr_heterogeneity(dat_rd)
if (nrow(het_rd)==0) {
het_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_rd <- cbind(het_rd,"data"=rep("RD", nrow(het_rd)))
# Perform MR on the replication sign data
res_r <- mr(dat_r)
if (nrow(res_r)==0) {
res_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res_r <- cbind(res_r,"data"=rep("R", nrow(res_r)))
het_r <- mr_heterogeneity(dat_r)
if (nrow(het_r)==0) {
het_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_r <- cbind(het_r,"data"=rep("R", nrow(het_r)))
} else {
res_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_rd <- cbind(res_rd,"data"=rep("RD", nrow(res_rd)))
het_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_rd <- cbind(het_rd,"data"=rep("RD", nrow(het_rd)))
res_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_r <- cbind(res_r,"data"=rep("R", nrow(res_r)))
het_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_r <- cbind(het_r,"data"=rep("R", nrow(het_r)))
}
} else {
res_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_dr <- cbind(res_dr,"data"=rep("DR", nrow(res_dr)))
het_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_dr <- cbind(het_dr,"data"=rep("DR", nrow(het_dr)))
res_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_d <- cbind(res_d,"data"=rep("D", nrow(res_d)))
het_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_d <- cbind(het_d,"data"=rep("D", nrow(het_d)))
res_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_rd <- cbind(res_rd,"data"=rep("RD", nrow(res_rd)))
het_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_rd <- cbind(het_rd,"data"=rep("RD", nrow(het_rd)))
res_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_r <- cbind(res_r,"data"=rep("R", nrow(res_r)))
het_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_r <- cbind(het_r,"data"=rep("R", nrow(het_r)))
}
} else {
res_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_dr <- cbind(res_dr,"data"=rep("DR", nrow(res_dr)))
het_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_dr <- cbind(het_dr,"data"=rep("DR", nrow(het_dr)))
res_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_d <- cbind(res_d,"data"=rep("D", nrow(res_d)))
het_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_d <- cbind(het_d,"data"=rep("D", nrow(het_d)))
res_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_rd <- cbind(res_rd,"data"=rep("RD", nrow(res_rd)))
het_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_rd <- cbind(het_rd,"data"=rep("RD", nrow(het_rd)))
res_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_r <- cbind(res_r,"data"=rep("R", nrow(res_r)))
het_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_r <- cbind(het_r,"data"=rep("R", nrow(het_r)))
}
mr_res <- rbind(res_dr,res_d,res_rd,res_r)
mr_het <- rbind(het_dr,het_d,het_rd,het_r)
res_list <- list("mr" = mr_res, "het" = mr_het)
return(res_list)
}
# ======================== Run ==============================
# Read all phenotype names and define each phenotype id
phen_all <- fread(paste(datadir,"ukb-b-idlist.txt",sep="/"), header=FALSE)
phen_all <- as.data.frame(phen_all)
# ---------------------- Full MR ----------------------------
# Read instruments for all of the exposures
load(paste(datadir,"MR_prep.RData",sep="/"))
exposure_dat <- mybiglist$exp_dat
chrpos <- mybiglist$chr_pos
# Lookup from one dataset
filename <- paste(vcfdir,"/",out,"/",out,".vcf.gz",sep="")
out1 <- query_gwas(filename, chrompos=chrpos)
out2 <- gwasglue::gwasvcf_to_TwoSampleMR(out1, "outcome")
# Harmonise the exposure and outcome data
dat <- harmonise_data(exposure_dat, out2, action=1)
# testing for one exposure only:
# dat <- dat[dat$id.exposure==exp,]
# Perform full MR
res <- mr(dat)
if (nrow(res)==0) {
res <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res <- res %>% mutate(outcome = tolower(outcome))
res$id.outcome <- res$outcome
het <- mr_heterogeneity(dat)
if (nrow(het)==0) {
het <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het <- het %>% mutate(outcome = tolower(outcome))
het$id.outcome <- het$outcome
# ---------------------- Replication -------------------------
mr_out <- c()
het_out <- c()
for (exp in phen_all[,1])
{
mr_full <- subset(res, id.exposure==exp)
if (nrow(mr_full)==0) {
mr_full <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
mr_full <- cbind(mr_full,"data"=rep("full", nrow(mr_full)))
mr_full <- cbind(mr_full,"dir"="NA")
het_full <- subset(het, id.exposure==exp)
if (nrow(het_full)==0) {
het_full <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_full <- cbind(het_full,"data"=rep("full", nrow(het_full)))
het_full <- cbind(het_full,"dir"="NA")
mr_rep_AB <- mr_analysis(exp,"discovery","replication")
mr_AB <- mr_rep_AB$mr
mr_AB <- cbind(mr_AB,"dir"=rep("AB", nrow(mr_AB)))
het_AB <- mr_rep_AB$het
het_AB <- cbind(het_AB,"dir"=rep("AB", nrow(het_AB)))
mr_rep_BA <- mr_analysis(exp,"replication","discovery")
mr_BA <- mr_rep_BA$mr
mr_BA <- cbind(mr_BA,"dir"=rep("BA", nrow(mr_BA)))
het_BA <- mr_rep_BA$het
het_BA <- cbind(het_BA,"dir"=rep("BA", nrow(het_BA)))
mr_out <- rbind(mr_out,mr_full,mr_AB,mr_BA)
het_out <- rbind(het_out,het_full,het_AB,het_BA)
}
# Save all results
write.table(mr_out, file = paste(resultsdir,"/",out,"/MR_All_vs_All_",instr,".txt",sep=""), append = FALSE, quote = TRUE, sep = " ",
eol = "\n", na = "NA", dec = ".", row.names = FALSE,
col.names = TRUE, qmethod = c("escape", "double"),
fileEncoding = "")
write.table(het_out, file = paste(resultsdir,"/",out,"/MR_Het_All_vs_All_",instr,".txt",sep=""), append = FALSE, quote = TRUE, sep = " ",
eol = "\n", na = "NA", dec = ".", row.names = FALSE,
col.names = TRUE, qmethod = c("escape", "double"),
fileEncoding = "")
|
/mr/scripts/mr_All_vs_All.r
|
no_license
|
isadreev/UKBB_replication
|
R
| false | false | 17,606 |
r
|
#install.packages("devtools")
#devtools::install_github("MRCIEU/TwoSampleMR")
library(TwoSampleMR)
library(data.table)
library(dplyr)
library(gwasglue)
library(gwasvcf)
library(ieugwasr)
library(genetics.binaRies)
set_bcftools()
args <- commandArgs(T)
datadir <- args[1]
resultsdir <- args[2]
vcfdir <- args[3]
out <- args[4]
instr <- args[5]
# ======================== Functions ==============================
mr_analysis <- function(exp,d,r) {
# outcome (always the same as r)
of <- paste(resultsdir,"/",out,"/",r,".statsfile.txt.gz",sep="")
opt <- fread(of,header=TRUE)
opt <- as.data.frame(opt)
if ("BETA" %in% colnames(opt)) {
opt1 <- opt[,c("SNP","ALLELE1","ALLELE0","A1FREQ","BETA", "SE",tail(colnames(opt),1))]
colnames(opt1) <- c("SNP",
"effect_allele.outcome",
"other_allele.outcome",
"eaf.outcome",
"beta.outcome",
"se.outcome",
"pval.outcome")
opt2 <- opt1[order(opt1$SNP, opt1$pval.outcome), ]
opt2 <- opt2[ !duplicated(opt2$SNP), ]
opt3 <- cbind(opt2,
"outcome"=rep("outcome",nrow(opt2)),
"mr_keep.outcome"=rep("TRUE", nrow(opt2)),
"pval_origin.outcome"=rep("reported", nrow(opt2)),
"id.outcome"=rep(out, nrow(opt2)),
"data_source.outcome"=rep("textfile", nrow(opt2)))
out_gwas <- opt3
} else {
out_gwas <- "NA"
}
# MR analysis
if (!out_gwas=="NA") {
print(paste("exposure=",exp))
# Read the results of GWAS
df <- paste(resultsdir,"/",exp,"/",d,".statsfile.txt.gz",sep="")
dsc <- fread(df,header=TRUE)
dsc <- as.data.frame(dsc)
rf <- paste(resultsdir,"/",exp,"/",r,".statsfile.txt.gz",sep="")
rpc <- fread(rf,header=TRUE)
rpc <- as.data.frame(rpc)
if ("BETA" %in% colnames(dsc) & "BETA" %in% colnames(rpc)) {
dsc1 <- dsc[,c("SNP","ALLELE1","ALLELE0","A1FREQ","BETA", "SE",tail(colnames(dsc),1))]
colnames(dsc1) <- c("SNP",
"effect_allele.exposure",
"other_allele.exposure",
"eaf.exposure",
"beta.exposure",
"se.exposure",
"pval.exposure")
dsc2 <- dsc1[order(dsc1$SNP, dsc1$pval.exposure), ]
dsc2 <- dsc2[ !duplicated(dsc2$SNP), ]
dsc3 <- cbind(dsc2,
"exposure"=rep("exposure",nrow(dsc2)),
"mr_keep.exposure"=rep("TRUE", nrow(dsc2)),
"pval_origin.exposure"=rep("reported", nrow(dsc2)),
"id.exposure"=rep(exp, nrow(dsc2)),
"data_source.exposure"=rep("textfile", nrow(dsc2)))
rpc1 <- rpc[,c("SNP","ALLELE1","ALLELE0","A1FREQ","BETA", "SE",tail(colnames(rpc),1))]
colnames(rpc1) <- c("SNP",
"effect_allele.exposure",
"other_allele.exposure",
"eaf.exposure",
"beta.exposure",
"se.exposure",
"pval.exposure")
rpc2 <- rpc1[order(rpc1$SNP, rpc1$pval.exposure), ]
rpc2 <- rpc2[ !duplicated(rpc2$SNP), ]
rpc3 <- cbind(rpc2,
"exposure"=rep("exposure",nrow(rpc2)),
"mr_keep.exposure"=rep("TRUE", nrow(rpc2)),
"pval_origin.exposure"=rep("reported", nrow(rpc2)),
"id.exposure"=rep(exp, nrow(rpc2)),
"data_source.exposure"=rep("textfile", nrow(rpc2)))
disc_gwas <- dsc3
repl_gwas <- rpc3
# Filtering SNPs for their presence in the phenotype and for p-val
snp_exp <- as.character(subset(dat, id.exposure == exp)$SNP)
## Forward order
disc_gwas_f <- subset(disc_gwas,SNP %in% snp_exp & pval.exposure < 5e-8)
# Next two commands used to distinguish between weak instruments and weak instruments with wc. 10 is a threshold for weak instruments
if (instr=="weak") {
snplist <- subset(repl_gwas, SNP %in% disc_gwas_f$SNP & pval.exposure >= pf(10, 1, 10000, low=F))$SNP
disc_gwas_f <- subset(disc_gwas_f, SNP %in% snplist)
}
repl_gwas_f <- subset(repl_gwas,SNP %in% disc_gwas_f$SNP)
out_gwas_f <- subset(out_gwas,SNP %in% disc_gwas_f$SNP)
## Reverse order
repl_gwas_s <- subset(repl_gwas,SNP %in% snp_exp & pval.exposure < 5e-8)
# Next two commands used to distinguish between weak instruments and weak instruments with wc. 10 is a threshold for weak instruments
if (instr=="weak") {
snplist <- subset(disc_gwas, SNP %in% repl_gwas_s$SNP & pval.exposure >= pf(10, 1, 10000, low=F))$SNP
repl_gwas_s <- subset(repl_gwas_s, SNP %in% snplist)
}
disc_gwas_s <- subset(disc_gwas,SNP %in% repl_gwas_s$SNP)
out_gwas_s <- subset(out_gwas,SNP %in% repl_gwas_s$SNP)
# DR and D scenarios
if (!nrow(disc_gwas_f)==0) {
# Harmonise the exposure and outcome data
dat_dr <- harmonise_data(repl_gwas_f, out_gwas_f, action=1)
dat_d <- harmonise_data(disc_gwas_f, out_gwas_f, action=1)
# Perform MR on the replication data
res_dr <- mr(dat_dr)
if (nrow(res_dr)==0) {
res_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res_dr <- cbind(res_dr,"data"=rep("DR", nrow(res_dr)))
het_dr <- mr_heterogeneity(dat_dr)
if (nrow(het_dr)==0) {
het_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_dr <- cbind(het_dr,"data"=rep("DR", nrow(het_dr)))
# Perform MR on the discovery sign data
res_d <- mr(dat_d)
if (nrow(res_d)==0) {
res_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res_d <- cbind(res_d,"data"=rep("D", nrow(res_d)))
het_d <- mr_heterogeneity(dat_d)
if (nrow(het_d)==0) {
het_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_d <- cbind(het_d,"data"=rep("D", nrow(het_d)))
} else {
res_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_dr <- cbind(res_dr,"data"=rep("DR", nrow(res_dr)))
het_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_dr <- cbind(het_dr,"data"=rep("DR", nrow(het_dr)))
res_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_d <- cbind(res_d,"data"=rep("D", nrow(res_d)))
het_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_d <- cbind(het_d,"data"=rep("D", nrow(het_d)))
}
# RD and R scenario
if (!nrow(repl_gwas_s)==0) {
# Harmonise the exposure and outcome data
dat_rd <- harmonise_data(disc_gwas_s, out_gwas_s, action=1)
dat_r <- harmonise_data(repl_gwas_s, out_gwas_s, action=1)
# Perform MR on the discovery data
res_rd <- mr(dat_rd)
if (nrow(res_rd)==0) {
res_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res_rd <- cbind(res_rd,"data"=rep("RD", nrow(res_rd)))
het_rd <- mr_heterogeneity(dat_rd)
if (nrow(het_rd)==0) {
het_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_rd <- cbind(het_rd,"data"=rep("RD", nrow(het_rd)))
# Perform MR on the replication sign data
res_r <- mr(dat_r)
if (nrow(res_r)==0) {
res_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res_r <- cbind(res_r,"data"=rep("R", nrow(res_r)))
het_r <- mr_heterogeneity(dat_r)
if (nrow(het_r)==0) {
het_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_r <- cbind(het_r,"data"=rep("R", nrow(het_r)))
} else {
res_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_rd <- cbind(res_rd,"data"=rep("RD", nrow(res_rd)))
het_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_rd <- cbind(het_rd,"data"=rep("RD", nrow(het_rd)))
res_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_r <- cbind(res_r,"data"=rep("R", nrow(res_r)))
het_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_r <- cbind(het_r,"data"=rep("R", nrow(het_r)))
}
} else {
res_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_dr <- cbind(res_dr,"data"=rep("DR", nrow(res_dr)))
het_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_dr <- cbind(het_dr,"data"=rep("DR", nrow(het_dr)))
res_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_d <- cbind(res_d,"data"=rep("D", nrow(res_d)))
het_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_d <- cbind(het_d,"data"=rep("D", nrow(het_d)))
res_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_rd <- cbind(res_rd,"data"=rep("RD", nrow(res_rd)))
het_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_rd <- cbind(het_rd,"data"=rep("RD", nrow(het_rd)))
res_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_r <- cbind(res_r,"data"=rep("R", nrow(res_r)))
het_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_r <- cbind(het_r,"data"=rep("R", nrow(het_r)))
}
} else {
res_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_dr <- cbind(res_dr,"data"=rep("DR", nrow(res_dr)))
het_dr <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_dr <- cbind(het_dr,"data"=rep("DR", nrow(het_dr)))
res_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_d <- cbind(res_d,"data"=rep("D", nrow(res_d)))
het_d <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_d <- cbind(het_d,"data"=rep("D", nrow(het_d)))
res_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_rd <- cbind(res_rd,"data"=rep("RD", nrow(res_rd)))
het_rd <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_rd <- cbind(het_rd,"data"=rep("RD", nrow(het_rd)))
res_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
res_r <- cbind(res_r,"data"=rep("R", nrow(res_r)))
het_r <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
het_r <- cbind(het_r,"data"=rep("R", nrow(het_r)))
}
mr_res <- rbind(res_dr,res_d,res_rd,res_r)
mr_het <- rbind(het_dr,het_d,het_rd,het_r)
res_list <- list("mr" = mr_res, "het" = mr_het)
return(res_list)
}
# ======================== Run ==============================
# Read all phenotype names and define each phenotype id
phen_all <- fread(paste(datadir,"ukb-b-idlist.txt",sep="/"), header=FALSE)
phen_all <- as.data.frame(phen_all)
# ---------------------- Full MR ----------------------------
# Read instruments for all of the exposures
load(paste(datadir,"MR_prep.RData",sep="/"))
exposure_dat <- mybiglist$exp_dat
chrpos <- mybiglist$chr_pos
# Lookup from one dataset
filename <- paste(vcfdir,"/",out,"/",out,".vcf.gz",sep="")
out1 <- query_gwas(filename, chrompos=chrpos)
out2 <- gwasglue::gwasvcf_to_TwoSampleMR(out1, "outcome")
# Harmonise the exposure and outcome data
dat <- harmonise_data(exposure_dat, out2, action=1)
# testing for one exposure only:
# dat <- dat[dat$id.exposure==exp,]
# Perform full MR
res <- mr(dat)
if (nrow(res)==0) {
res <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
res <- res %>% mutate(outcome = tolower(outcome))
res$id.outcome <- res$outcome
het <- mr_heterogeneity(dat)
if (nrow(het)==0) {
het <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het <- het %>% mutate(outcome = tolower(outcome))
het$id.outcome <- het$outcome
# ---------------------- Replication -------------------------
mr_out <- c()
het_out <- c()
for (exp in phen_all[,1])
{
mr_full <- subset(res, id.exposure==exp)
if (nrow(mr_full)==0) {
mr_full <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","nsnp"=NA,"b"=NA,"se"=NA,"pval"=NA)
}
mr_full <- cbind(mr_full,"data"=rep("full", nrow(mr_full)))
mr_full <- cbind(mr_full,"dir"="NA")
het_full <- subset(het, id.exposure==exp)
if (nrow(het_full)==0) {
het_full <- data.frame("id.exposure"=exp,"id.outcome"=out,"outcome"="NA",
"exposure"="NA","method"="NA","Q"=NA,"Q_df"=NA,"Q_pval"=NA)
}
het_full <- cbind(het_full,"data"=rep("full", nrow(het_full)))
het_full <- cbind(het_full,"dir"="NA")
mr_rep_AB <- mr_analysis(exp,"discovery","replication")
mr_AB <- mr_rep_AB$mr
mr_AB <- cbind(mr_AB,"dir"=rep("AB", nrow(mr_AB)))
het_AB <- mr_rep_AB$het
het_AB <- cbind(het_AB,"dir"=rep("AB", nrow(het_AB)))
mr_rep_BA <- mr_analysis(exp,"replication","discovery")
mr_BA <- mr_rep_BA$mr
mr_BA <- cbind(mr_BA,"dir"=rep("BA", nrow(mr_BA)))
het_BA <- mr_rep_BA$het
het_BA <- cbind(het_BA,"dir"=rep("BA", nrow(het_BA)))
mr_out <- rbind(mr_out,mr_full,mr_AB,mr_BA)
het_out <- rbind(het_out,het_full,het_AB,het_BA)
}
# Save all results
write.table(mr_out, file = paste(resultsdir,"/",out,"/MR_All_vs_All_",instr,".txt",sep=""), append = FALSE, quote = TRUE, sep = " ",
eol = "\n", na = "NA", dec = ".", row.names = FALSE,
col.names = TRUE, qmethod = c("escape", "double"),
fileEncoding = "")
write.table(het_out, file = paste(resultsdir,"/",out,"/MR_Het_All_vs_All_",instr,".txt",sep=""), append = FALSE, quote = TRUE, sep = " ",
eol = "\n", na = "NA", dec = ".", row.names = FALSE,
col.names = TRUE, qmethod = c("escape", "double"),
fileEncoding = "")
|
# Page No. 180
m_1 <- 1.75 * 10^-5
m_2 <- 1.772 * 10^-4
mH_mOH <- 1.008 * 10^-14
MH_MOH <- m_2 / m_1
mH <- sqrt(mH_mOH * MH_MOH)
print(mH)
|
/Modern_Physical_Chemistry_A_Molecular_Approach_by_George_H_Duffey/CH8/EX8.6/Ex8_6.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false | false | 152 |
r
|
# Page No. 180
m_1 <- 1.75 * 10^-5
m_2 <- 1.772 * 10^-4
mH_mOH <- 1.008 * 10^-14
MH_MOH <- m_2 / m_1
mH <- sqrt(mH_mOH * MH_MOH)
print(mH)
|
\name{phylo.toBackbone}
\alias{phylo.toBackbone}
\alias{backbone.toPhylo}
\title{Converts tree to backbone or vice versa}
\usage{
phylo.toBackbone(x, trans, ...)
backbone.toPhylo(x)
}
\arguments{
\item{x}{an object of class \code{"phylo"} (for \code{phylo.toBackbone}); or an object of class \code{backbone.toPhylo} (for \code{backbone.toPhylo}).}
\item{trans}{data frame containing the attributes necessary to translate a backbone tree to an object of class \code{"backbonePhylo"}. The data frame should contain the following variables: \code{tip.label}: the tip labels in the input tree (not all need be included); \code{clade.label}: labels for the unobserved subtrees; \code{N}: number of species in each subtree; and \code{depth}: desired depth of each subtree. \code{depth} for each terminal taxon in \code{x} cannot be greater than the terminal edge length for that taxon.}
\item{...}{optional arguments.}
}
\description{
Converts between \code{"phylo"} and \code{"backbonePhylo"}.
}
\value{
Either an object of class \code{"phylo"} or an object of class \code{"backbonePhylo"}, depending on the method.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{plot.backbonePhylo}}
}
\keyword{phylogenetics}
\keyword{plotting}
|
/man/phylo.toBackbone.Rd
|
no_license
|
ianengelbrecht/phytools
|
R
| false | false | 1,421 |
rd
|
\name{phylo.toBackbone}
\alias{phylo.toBackbone}
\alias{backbone.toPhylo}
\title{Converts tree to backbone or vice versa}
\usage{
phylo.toBackbone(x, trans, ...)
backbone.toPhylo(x)
}
\arguments{
\item{x}{an object of class \code{"phylo"} (for \code{phylo.toBackbone}); or an object of class \code{backbone.toPhylo} (for \code{backbone.toPhylo}).}
\item{trans}{data frame containing the attributes necessary to translate a backbone tree to an object of class \code{"backbonePhylo"}. The data frame should contain the following variables: \code{tip.label}: the tip labels in the input tree (not all need be included); \code{clade.label}: labels for the unobserved subtrees; \code{N}: number of species in each subtree; and \code{depth}: desired depth of each subtree. \code{depth} for each terminal taxon in \code{x} cannot be greater than the terminal edge length for that taxon.}
\item{...}{optional arguments.}
}
\description{
Converts between \code{"phylo"} and \code{"backbonePhylo"}.
}
\value{
Either an object of class \code{"phylo"} or an object of class \code{"backbonePhylo"}, depending on the method.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{plot.backbonePhylo}}
}
\keyword{phylogenetics}
\keyword{plotting}
|
set.seed(2014)
args=commandArgs(TRUE)
iBKfamF=args[1]
gBKfamF=args[2]
ifamF=args[3]
gfamF=args[4]
#common
#iBKfamF="LBKcdiChip.fam"
#gBKfamF="LBKcdgChip.fam"
#ifamF="LcdiChip.fam"
#gfamF="LcdgChip.fam"
iBKfam=read.table(iBKfamF, as.is=T)
gBKfam=read.table(gBKfamF, as.is=T)
TrT=sample(nrow(iBKfam), nrow(iBKfam))
iTmat=matrix(1, nrow=nrow(iBKfam), ncol=8)
iTmat[,1] = iBKfam[,1]
iTmat[,2] = iBKfam[,2]
iTmat[,3] = iBKfam[,6]
TrT1=sample(nrow(gBKfam), nrow(gBKfam))
gTmat=matrix(1, nrow=nrow(gBKfam), ncol=8)
gTmat[,1] = gBKfam[,1]
gTmat[,2] = gBKfam[,2]
gTmat[,3] = gBKfam[,6]
len=ceiling(nrow(iBKfam)/5)
for(i in 1:4)
{
iTmat[TrT[(1+(i-1)*len):(i*len)],3+i]="0"
}
iTmat[TrT[(1+i*len):length(TrT)],8]="0"
len1=ceiling(nrow(gBKfam)/5)
for(i in 1:4)
{
gTmat[TrT1[(1+(i-1)*len1):(i*len1)],3+i]="0"
}
gTmat[TrT1[(1+i*len1):length(TrT1)],8]="0"
iBKfamOutF=sub("fam$", "trt", iBKfamF)
gBKfamOutF=sub("fam$", "trt", gBKfamF)
ifamOutF=sub("fam$", "trt", ifamF)
gfamOutF=sub("fam$", "trt", gfamF)
write.table(iTmat, iBKfamOutF, row.names=F, col.names=F, quote=F)
write.table(gTmat, gBKfamOutF, row.names=F, col.names=F, quote=F)
write.table(iTmat, ifamOutF, row.names=F, col.names=F, quote=F)
write.table(gTmat, gfamOutF, row.names=F, col.names=F, quote=F)
|
/StG/GHG/5FoldCVSplit.R
|
no_license
|
gc5k/Notes
|
R
| false | false | 1,261 |
r
|
set.seed(2014)
args=commandArgs(TRUE)
iBKfamF=args[1]
gBKfamF=args[2]
ifamF=args[3]
gfamF=args[4]
#common
#iBKfamF="LBKcdiChip.fam"
#gBKfamF="LBKcdgChip.fam"
#ifamF="LcdiChip.fam"
#gfamF="LcdgChip.fam"
iBKfam=read.table(iBKfamF, as.is=T)
gBKfam=read.table(gBKfamF, as.is=T)
TrT=sample(nrow(iBKfam), nrow(iBKfam))
iTmat=matrix(1, nrow=nrow(iBKfam), ncol=8)
iTmat[,1] = iBKfam[,1]
iTmat[,2] = iBKfam[,2]
iTmat[,3] = iBKfam[,6]
TrT1=sample(nrow(gBKfam), nrow(gBKfam))
gTmat=matrix(1, nrow=nrow(gBKfam), ncol=8)
gTmat[,1] = gBKfam[,1]
gTmat[,2] = gBKfam[,2]
gTmat[,3] = gBKfam[,6]
len=ceiling(nrow(iBKfam)/5)
for(i in 1:4)
{
iTmat[TrT[(1+(i-1)*len):(i*len)],3+i]="0"
}
iTmat[TrT[(1+i*len):length(TrT)],8]="0"
len1=ceiling(nrow(gBKfam)/5)
for(i in 1:4)
{
gTmat[TrT1[(1+(i-1)*len1):(i*len1)],3+i]="0"
}
gTmat[TrT1[(1+i*len1):length(TrT1)],8]="0"
iBKfamOutF=sub("fam$", "trt", iBKfamF)
gBKfamOutF=sub("fam$", "trt", gBKfamF)
ifamOutF=sub("fam$", "trt", ifamF)
gfamOutF=sub("fam$", "trt", gfamF)
write.table(iTmat, iBKfamOutF, row.names=F, col.names=F, quote=F)
write.table(gTmat, gBKfamOutF, row.names=F, col.names=F, quote=F)
write.table(iTmat, ifamOutF, row.names=F, col.names=F, quote=F)
write.table(gTmat, gfamOutF, row.names=F, col.names=F, quote=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatgrid.R
\name{grid2spts}
\alias{grid2spts}
\title{grid2spts function}
\usage{
grid2spts(xgrid, ygrid, proj4string = CRS(as.character(NA)))
}
\arguments{
\item{xgrid}{vector of x centroids (equally spaced)}
\item{ygrid}{vector of x centroids (equally spaced)}
\item{proj4string}{an optional proj4string, projection string for the grid, set using the function CRS}
}
\value{
a SpatialPoints object
}
\description{
A function to convert a regular (x,y) grid of centroids into a SpatialPoints object
}
|
/man/grid2spts.Rd
|
no_license
|
bentaylor1/spatsurv
|
R
| false | true | 582 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatgrid.R
\name{grid2spts}
\alias{grid2spts}
\title{grid2spts function}
\usage{
grid2spts(xgrid, ygrid, proj4string = CRS(as.character(NA)))
}
\arguments{
\item{xgrid}{vector of x centroids (equally spaced)}
\item{ygrid}{vector of x centroids (equally spaced)}
\item{proj4string}{an optional proj4string, projection string for the grid, set using the function CRS}
}
\value{
a SpatialPoints object
}
\description{
A function to convert a regular (x,y) grid of centroids into a SpatialPoints object
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gene_ann.R
\name{get.largest.interval}
\alias{get.largest.interval}
\title{Get the largest interval for each gene, given multiple TSS and TTS annotations}
\usage{
get.largest.interval(bed = NULL)
}
\arguments{
\item{bed}{A bed6 frame with comprehensive gene annotations, defaults to NULL}
}
\value{
A bed6 frame with the largest annotation for each gene
}
\description{
The input bed6 file can be derived from a gencode annotation file,
as described in the vignette
}
\examples{
# get intervals for furthest TSS and TTS +/- interval
bed.long = get.largest.interval(bed=dat0)
}
|
/primaryTranscriptAnnotation/man/get.largest.interval.Rd
|
no_license
|
WarrenDavidAnderson/genomicsRpackage
|
R
| false | true | 655 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gene_ann.R
\name{get.largest.interval}
\alias{get.largest.interval}
\title{Get the largest interval for each gene, given multiple TSS and TTS annotations}
\usage{
get.largest.interval(bed = NULL)
}
\arguments{
\item{bed}{A bed6 frame with comprehensive gene annotations, defaults to NULL}
}
\value{
A bed6 frame with the largest annotation for each gene
}
\description{
The input bed6 file can be derived from a gencode annotation file,
as described in the vignette
}
\examples{
# get intervals for furthest TSS and TTS +/- interval
bed.long = get.largest.interval(bed=dat0)
}
|
#' @examples
#' \dontrun{
#' dat <- list(filterStatement=list('query'="WHERE status='ACTIVE'"))
#' res <- dfp_getPlacementsByStatement(dat)
#' }
|
/examples/examples-dfp_getPlacementsByStatement.R
|
no_license
|
StevenMMortimer/rdfp
|
R
| false | false | 148 |
r
|
#' @examples
#' \dontrun{
#' dat <- list(filterStatement=list('query'="WHERE status='ACTIVE'"))
#' res <- dfp_getPlacementsByStatement(dat)
#' }
|
library(shiny)
# Define UI for dataset viewer application
shinyUI(pageWithSidebar(
# Application title
headerPanel("Learning by doing stats (t-test tutorial)"),
# Sidebar
sidebarPanel(
p(strong("Group A:")),
sliderInput("nx", " Sample size (n)",
min =1, max = 500, 30),
numericInput("mx", " Mean", 60.00),
numericInput("sdx", " SD", 10.00),
p(br()),
p(strong("Group B:")),
sliderInput("ny", " Sample size (n)",
min =1, max = 500, 30),
numericInput("my", " Mean", 50.00),
numericInput("sdy", " SD", 10.00),
p(br()),
strong('Option:'),
checkboxInput("varequal", "t-test with equal variances assumed", TRUE)
),
mainPanel(
tabsetPanel(
tabPanel("Main",
h3("Checking the data"),
tableOutput("values"),
br(),
h3("Histogram of Group A"),
plotOutput("distPlot"),
br(),
h3("Overlayed histograms of Group A and Group B"),
plotOutput("overPlot"),
br(),
br(),
h3("Group A と Group B の n, M, SD (variance) から t 値を算出"),
a(img(src="http://mizumot.com/files/t-value.png"), target="_blank", href="http://mizumot.com/files/t-value.png"),
h3("t-test"),
verbatimTextOutput("ttest.out"),
verbatimTextOutput("vartest.out"),
verbatimTextOutput("difference.out"),
br(),
h3("t distribution"),
p('黒点線よりも左右どちらかの外側に赤線(t値)があれば p < .05 になる'),
plotOutput("t.distPlot", width="80%"),
br(),
h3("Plot of means and mean of the differences [95% CI]"),
plotOutput("ciPlot", width="80%"),
h3("Effect size indices"),
verbatimTextOutput("es.out"),
br()
),
tabPanel("About",
strong('Note'),
p('This web application is developed with',
a("Shiny.", href="http://www.rstudio.com/shiny/", target="_blank"),
''),
br(),
strong('List of Packages Used'), br(),
code('library(shiny)'),br(),
code('library(compute.es)'),br(),
code('library(car)'),br(),
br(),
strong('Code'),
p('Source code for this application is based on',
a('"The handbook of Research in Foreign Language Learning and Teaching" (Takeuchi & Mizumoto, 2012).', href='http://mizumot.com/handbook/', target="_blank")),
p('The code for this web application is available at',
a('GitHub.', href='https://github.com/mizumot/tut', target="_blank")),
p('If you want to run this code on your computer (in a local R session), run the code below:',
br(),
code('library(shiny)'),br(),
code('runGitHub("tut","mizumot")')
),
br(),
strong('Citation in Publications'),
p('Mizumoto, A. (2015). Langtest (Version 1.0) [Web application]. Retrieved from http://langtest.jp'),
br(),
strong('Article'),
p('Mizumoto, A., & Plonsky, L. (2015).', a("R as a lingua franca: Advantages of using R for quantitative research in applied linguistics.", href='http://applij.oxfordjournals.org/content/early/2015/06/24/applin.amv025.abstract', target="_blank"), em('Applied Linguistics,'), 'Advance online publication. doi:10.1093/applin/amv025'),
br(),
strong('Recommended'),
p('To learn more about R, I suggest this excellent and free e-book (pdf),',
a("A Guide to Doing Statistics in Second Language Research Using R,", href="http://cw.routledge.com/textbooks/9780805861853/guide-to-R.asp", target="_blank"),
'written by Dr. Jenifer Larson-Hall.'),
p('Also, if you are a cool Mac user and want to use R with GUI,',
a("MacR", href="http://www.urano-ken.com/blog/2013/02/25/installing-and-using-macr/", target="_blank"),
'is defenitely the way to go!'),
br(),
strong('Author'),
p(a("Atsushi MIZUMOTO,", href="http://mizumot.com", target="_blank"),' Ph.D.',br(),
'Associate Professor of Applied Linguistics',br(),
'Faculty of Foreign Language Studies /',br(),
'Graduate School of Foreign Language Education and Research,',br(),
'Kansai University, Osaka, Japan'),
br(),
a(img(src="http://i.creativecommons.org/p/mark/1.0/80x15.png"), target="_blank", href="http://creativecommons.org/publicdomain/mark/1.0/")
)
)
)
))
|
/ui.R
|
permissive
|
mizumot/tut
|
R
| false | false | 4,308 |
r
|
library(shiny)
# Define UI for dataset viewer application
shinyUI(pageWithSidebar(
# Application title
headerPanel("Learning by doing stats (t-test tutorial)"),
# Sidebar
sidebarPanel(
p(strong("Group A:")),
sliderInput("nx", " Sample size (n)",
min =1, max = 500, 30),
numericInput("mx", " Mean", 60.00),
numericInput("sdx", " SD", 10.00),
p(br()),
p(strong("Group B:")),
sliderInput("ny", " Sample size (n)",
min =1, max = 500, 30),
numericInput("my", " Mean", 50.00),
numericInput("sdy", " SD", 10.00),
p(br()),
strong('Option:'),
checkboxInput("varequal", "t-test with equal variances assumed", TRUE)
),
mainPanel(
tabsetPanel(
tabPanel("Main",
h3("Checking the data"),
tableOutput("values"),
br(),
h3("Histogram of Group A"),
plotOutput("distPlot"),
br(),
h3("Overlayed histograms of Group A and Group B"),
plotOutput("overPlot"),
br(),
br(),
h3("Group A と Group B の n, M, SD (variance) から t 値を算出"),
a(img(src="http://mizumot.com/files/t-value.png"), target="_blank", href="http://mizumot.com/files/t-value.png"),
h3("t-test"),
verbatimTextOutput("ttest.out"),
verbatimTextOutput("vartest.out"),
verbatimTextOutput("difference.out"),
br(),
h3("t distribution"),
p('黒点線よりも左右どちらかの外側に赤線(t値)があれば p < .05 になる'),
plotOutput("t.distPlot", width="80%"),
br(),
h3("Plot of means and mean of the differences [95% CI]"),
plotOutput("ciPlot", width="80%"),
h3("Effect size indices"),
verbatimTextOutput("es.out"),
br()
),
tabPanel("About",
strong('Note'),
p('This web application is developed with',
a("Shiny.", href="http://www.rstudio.com/shiny/", target="_blank"),
''),
br(),
strong('List of Packages Used'), br(),
code('library(shiny)'),br(),
code('library(compute.es)'),br(),
code('library(car)'),br(),
br(),
strong('Code'),
p('Source code for this application is based on',
a('"The handbook of Research in Foreign Language Learning and Teaching" (Takeuchi & Mizumoto, 2012).', href='http://mizumot.com/handbook/', target="_blank")),
p('The code for this web application is available at',
a('GitHub.', href='https://github.com/mizumot/tut', target="_blank")),
p('If you want to run this code on your computer (in a local R session), run the code below:',
br(),
code('library(shiny)'),br(),
code('runGitHub("tut","mizumot")')
),
br(),
strong('Citation in Publications'),
p('Mizumoto, A. (2015). Langtest (Version 1.0) [Web application]. Retrieved from http://langtest.jp'),
br(),
strong('Article'),
p('Mizumoto, A., & Plonsky, L. (2015).', a("R as a lingua franca: Advantages of using R for quantitative research in applied linguistics.", href='http://applij.oxfordjournals.org/content/early/2015/06/24/applin.amv025.abstract', target="_blank"), em('Applied Linguistics,'), 'Advance online publication. doi:10.1093/applin/amv025'),
br(),
strong('Recommended'),
p('To learn more about R, I suggest this excellent and free e-book (pdf),',
a("A Guide to Doing Statistics in Second Language Research Using R,", href="http://cw.routledge.com/textbooks/9780805861853/guide-to-R.asp", target="_blank"),
'written by Dr. Jenifer Larson-Hall.'),
p('Also, if you are a cool Mac user and want to use R with GUI,',
a("MacR", href="http://www.urano-ken.com/blog/2013/02/25/installing-and-using-macr/", target="_blank"),
'is defenitely the way to go!'),
br(),
strong('Author'),
p(a("Atsushi MIZUMOTO,", href="http://mizumot.com", target="_blank"),' Ph.D.',br(),
'Associate Professor of Applied Linguistics',br(),
'Faculty of Foreign Language Studies /',br(),
'Graduate School of Foreign Language Education and Research,',br(),
'Kansai University, Osaka, Japan'),
br(),
a(img(src="http://i.creativecommons.org/p/mark/1.0/80x15.png"), target="_blank", href="http://creativecommons.org/publicdomain/mark/1.0/")
)
)
)
))
|
# some list
l <- mapply(list, x=1:12, i=rep(1:3, 4), j=rep(1:4, each=3), SIMPLIFY = FALSE)
# create a list array from a list
la <- list_array(l, 3, 4)
# a list array behaves like a matrix when subsetted
la[1,1]
la[1,1] <- 999
la
la[2,1] <- list(1,2,3) # random list
la
la[2,4] <- mtcars # dataframe are also lists
la
# get and change dimensions
dim(la)
nrow(la)
ncol(la)
dim(la) <- c(4,3)
la
# more behavior
# list_array will recycle the list passed to it
# to fill the whole r x c array
la <- list_array(list(a=1, b=2), 3, 4)
# objects with any class other than list are wrapped
# into a list and the recycled
la <- list_array(mtcars, 3, 4)
|
/inst/examples/listarray-example.R
|
no_license
|
markheckmann/list-array
|
R
| false | false | 659 |
r
|
# some list
l <- mapply(list, x=1:12, i=rep(1:3, 4), j=rep(1:4, each=3), SIMPLIFY = FALSE)
# create a list array from a list
la <- list_array(l, 3, 4)
# a list array behaves like a matrix when subsetted
la[1,1]
la[1,1] <- 999
la
la[2,1] <- list(1,2,3) # random list
la
la[2,4] <- mtcars # dataframe are also lists
la
# get and change dimensions
dim(la)
nrow(la)
ncol(la)
dim(la) <- c(4,3)
la
# more behavior
# list_array will recycle the list passed to it
# to fill the whole r x c array
la <- list_array(list(a=1, b=2), 3, 4)
# objects with any class other than list are wrapped
# into a list and the recycled
la <- list_array(mtcars, 3, 4)
|
## ---- warning=FALSE, message=FALSE---------------------------------------
library(tibbletime)
library(dplyr)
library(lubridate)
series <- create_series('2013' ~ '2017', 'daily', class = "Date") %>%
mutate(var = rnorm(1826))
series
series %>%
mutate(year = year(date), month = month(date)) %>%
group_by(year, month) %>%
summarise(mean_var = mean(var))
## ------------------------------------------------------------------------
series %>%
collapse_by("monthly") %>%
group_by(date) %>%
summarise(mean_var = mean(var))
## ------------------------------------------------------------------------
second_series <- create_series('2013' ~ '2015', '5 second')
second_series %>%
mutate(var = rnorm(nrow(second_series))) %>%
collapse_by("hourly") %>%
group_by(date) %>%
summarise(mean_var = mean(var))
## ------------------------------------------------------------------------
set.seed(123)
# Create price series of hourly movements for apple and facebook stock.
apple <- create_series('2014' ~ '2016', period = '1 hour') %>%
mutate(price = 100 + cumsum(rnorm(26304, mean = 0, sd = .5)))
facebook <- create_series('2014' ~ '2016', period = '1 hour') %>%
mutate(price = 150 + cumsum(rnorm(26304, mean = 0, sd = .5)))
# Bind them together and create a symbol column to group on
price_series <- bind_rows(list(apple = apple, facebook = facebook), .id = "symbol") %>%
as_tbl_time(date) %>%
group_by(symbol)
# Collapse to daily and transform to OHLC (Open, High, Low, Close), a
# common financial transformation
price_series %>%
collapse_by("daily") %>%
group_by(date, add = TRUE) %>%
summarise(
open = first(price),
high = max(price),
low = min(price),
close = last(price)
) %>%
slice(1:5)
|
/revdep/library.noindex/tibbletime/old/tibbletime/doc/TT-04-use-with-dplyr.R
|
no_license
|
sstoeckl/tibbletime
|
R
| false | false | 1,752 |
r
|
## ---- warning=FALSE, message=FALSE---------------------------------------
library(tibbletime)
library(dplyr)
library(lubridate)
series <- create_series('2013' ~ '2017', 'daily', class = "Date") %>%
mutate(var = rnorm(1826))
series
series %>%
mutate(year = year(date), month = month(date)) %>%
group_by(year, month) %>%
summarise(mean_var = mean(var))
## ------------------------------------------------------------------------
series %>%
collapse_by("monthly") %>%
group_by(date) %>%
summarise(mean_var = mean(var))
## ------------------------------------------------------------------------
second_series <- create_series('2013' ~ '2015', '5 second')
second_series %>%
mutate(var = rnorm(nrow(second_series))) %>%
collapse_by("hourly") %>%
group_by(date) %>%
summarise(mean_var = mean(var))
## ------------------------------------------------------------------------
set.seed(123)
# Create price series of hourly movements for apple and facebook stock.
apple <- create_series('2014' ~ '2016', period = '1 hour') %>%
mutate(price = 100 + cumsum(rnorm(26304, mean = 0, sd = .5)))
facebook <- create_series('2014' ~ '2016', period = '1 hour') %>%
mutate(price = 150 + cumsum(rnorm(26304, mean = 0, sd = .5)))
# Bind them together and create a symbol column to group on
price_series <- bind_rows(list(apple = apple, facebook = facebook), .id = "symbol") %>%
as_tbl_time(date) %>%
group_by(symbol)
# Collapse to daily and transform to OHLC (Open, High, Low, Close), a
# common financial transformation
price_series %>%
collapse_by("daily") %>%
group_by(date, add = TRUE) %>%
summarise(
open = first(price),
high = max(price),
low = min(price),
close = last(price)
) %>%
slice(1:5)
|
\name{makeGrid.death.r1}
\alias{makeGrid.death.r1}
\title{Evaluates expected deaths ODE over 2D grid of arguments}
\usage{
makeGrid.death.r1(time, dt, s1.seq, s2.seq, lam, v, mu)
}
\arguments{
\item{time}{A number corresponding to the desired
evaluation time of ODEs}
\item{dt}{A number giving the increment length used in
solving the ODE}
\item{s1.seq}{A vector of complex numbers; initial values
of the ODE G}
\item{s2.seq}{A vector of complex numbers as inputs of
s2.seq}
\item{lam}{Birth rate}
\item{v}{Shift rate}
\item{mu}{Death rate}
}
\value{
A matrix of dimension length(s1.seq) by length(s2.seq) of
the function values
}
\description{
Applies the function \code{\link{solve.death}} to a grid
of inputs s1, s2 at one fixed time and r=1
}
\examples{
time = 5; dt = 5; lam = .5; v = .2; mu = .4
gridLength = 32
s1.seq <- exp(2*pi*1i*seq(from = 0, to = (gridLength-1))/gridLength)
s2.seq <- exp(2*pi*1i*seq(from = 0, to = (gridLength-1))/gridLength)
makeGrid.death.r1(time,dt,s1.seq,s2.seq,lam,v,mu)
}
|
/man/makeGrid.death.r1.Rd
|
no_license
|
shubhampachori12110095/bdsem
|
R
| false | false | 1,048 |
rd
|
\name{makeGrid.death.r1}
\alias{makeGrid.death.r1}
\title{Evaluates expected deaths ODE over 2D grid of arguments}
\usage{
makeGrid.death.r1(time, dt, s1.seq, s2.seq, lam, v, mu)
}
\arguments{
\item{time}{A number corresponding to the desired
evaluation time of ODEs}
\item{dt}{A number giving the increment length used in
solving the ODE}
\item{s1.seq}{A vector of complex numbers; initial values
of the ODE G}
\item{s2.seq}{A vector of complex numbers as inputs of
s2.seq}
\item{lam}{Birth rate}
\item{v}{Shift rate}
\item{mu}{Death rate}
}
\value{
A matrix of dimension length(s1.seq) by length(s2.seq) of
the function values
}
\description{
Applies the function \code{\link{solve.death}} to a grid
of inputs s1, s2 at one fixed time and r=1
}
\examples{
time = 5; dt = 5; lam = .5; v = .2; mu = .4
gridLength = 32
s1.seq <- exp(2*pi*1i*seq(from = 0, to = (gridLength-1))/gridLength)
s2.seq <- exp(2*pi*1i*seq(from = 0, to = (gridLength-1))/gridLength)
makeGrid.death.r1(time,dt,s1.seq,s2.seq,lam,v,mu)
}
|
heatmap.3 <- function(x,
Rowv = TRUE, Colv = if (symm) "Rowv" else TRUE,
distfun = dist,
hclustfun = hclust,
dendrogram = c("both","row", "column", "none"),
symm = FALSE,
scale = c("none","row", "column"),
na.rm = TRUE,
revC = identical(Colv,"Rowv"),
add.expr,
breaks,
symbreaks = max(x < 0, na.rm = TRUE) || scale != "none",
col = "heat.colors",
colsep,
rowsep,
sepcolor = "white",
sepwidth = c(0.05, 0.05),
cellnote,
notecex = 1,
notecol = "cyan",
na.color = par("bg"),
trace = c("none", "column","row", "both"),
tracecol = "cyan",
hline = median(breaks),
vline = median(breaks),
linecol = tracecol,
margins = c(5,5),
ColSideColors,
RowSideColors,
side.height.fraction=0.3,
cexRow = 0.2 + 1/log10(nr),
cexCol = 0.2 + 1/log10(nc),
labRow = NULL,
labCol = NULL,
key = TRUE,
keysize = 1.5,
density.info = c("none", "histogram", "density"),
denscol = tracecol,
symkey = max(x < 0, na.rm = TRUE) || symbreaks,
densadj = 0.25,
main = NULL,
xlab = NULL,
ylab = NULL,
lmat = NULL,
lhei = NULL,
lwid = NULL,
ColSideColorsSize = 1,
RowSideColorsSize = 1,
KeyValueName="Value",...){
invalid <- function (x) {
if (missing(x) || is.null(x) || length(x) == 0)
return(TRUE)
if (is.list(x))
return(all(sapply(x, invalid)))
else if (is.vector(x))
return(all(is.na(x)))
else return(FALSE)
}
x <- as.matrix(x)
scale01 <- function(x, low = min(x), high = max(x)) {
x <- (x - low)/(high - low)
x
}
retval <- list()
scale <- if (symm && missing(scale))
"none"
else match.arg(scale)
dendrogram <- match.arg(dendrogram)
trace <- match.arg(trace)
density.info <- match.arg(density.info)
if (length(col) == 1 && is.character(col))
col <- get(col, mode = "function")
if (!missing(breaks) && (scale != "none"))
warning("Using scale=\"row\" or scale=\"column\" when breaks are",
"specified can produce unpredictable results.", "Please consider using only one or the other.")
if (is.null(Rowv) || is.na(Rowv))
Rowv <- FALSE
if (is.null(Colv) || is.na(Colv))
Colv <- FALSE
else if (Colv == "Rowv" && !isTRUE(Rowv))
Colv <- FALSE
if (length(di <- dim(x)) != 2 || !is.numeric(x))
stop("`x' must be a numeric matrix")
nr <- di[1]
nc <- di[2]
if (nr <= 1 || nc <= 1)
stop("`x' must have at least 2 rows and 2 columns")
if (!is.numeric(margins) || length(margins) != 2)
stop("`margins' must be a numeric vector of length 2")
if (missing(cellnote))
cellnote <- matrix("", ncol = ncol(x), nrow = nrow(x))
if (!inherits(Rowv, "dendrogram")) {
if (((!isTRUE(Rowv)) || (is.null(Rowv))) && (dendrogram %in%
c("both", "row"))) {
if (is.logical(Colv) && (Colv))
dendrogram <- "column"
else dedrogram <- "none"
warning("Discrepancy: Rowv is FALSE, while dendrogram is `",
dendrogram, "'. Omitting row dendogram.")
}
}
if (!inherits(Colv, "dendrogram")) {
if (((!isTRUE(Colv)) || (is.null(Colv))) && (dendrogram %in%
c("both", "column"))) {
if (is.logical(Rowv) && (Rowv))
dendrogram <- "row"
else dendrogram <- "none"
warning("Discrepancy: Colv is FALSE, while dendrogram is `",
dendrogram, "'. Omitting column dendogram.")
}
}
if (inherits(Rowv, "dendrogram")) {
ddr <- Rowv
rowInd <- order.dendrogram(ddr)
}
else if (is.integer(Rowv)) {
hcr <- hclustfun(distfun(x))
ddr <- as.dendrogram(hcr)
ddr <- reorder(ddr, Rowv)
rowInd <- order.dendrogram(ddr)
if (nr != length(rowInd))
stop("row dendrogram ordering gave index of wrong length")
}
else if (isTRUE(Rowv)) {
Rowv <- rowMeans(x, na.rm = na.rm)
hcr <- hclustfun(distfun(x))
ddr <- as.dendrogram(hcr)
ddr <- reorder(ddr, Rowv)
rowInd <- order.dendrogram(ddr)
if (nr != length(rowInd))
stop("row dendrogram ordering gave index of wrong length")
}
else {
rowInd <- nr:1
}
if (inherits(Colv, "dendrogram")) {
ddc <- Colv
colInd <- order.dendrogram(ddc)
}
else if (identical(Colv, "Rowv")) {
if (nr != nc)
stop("Colv = \"Rowv\" but nrow(x) != ncol(x)")
if (exists("ddr")) {
ddc <- ddr
colInd <- order.dendrogram(ddc)
}
else colInd <- rowInd
}
else if (is.integer(Colv)) {
hcc <- hclustfun(distfun(if (symm)
x
else t(x)))
ddc <- as.dendrogram(hcc)
ddc <- reorder(ddc, Colv)
colInd <- order.dendrogram(ddc)
if (nc != length(colInd))
stop("column dendrogram ordering gave index of wrong length")
}
else if (isTRUE(Colv)) {
Colv <- colMeans(x, na.rm = na.rm)
hcc <- hclustfun(distfun(if (symm)
x
else t(x)))
ddc <- as.dendrogram(hcc)
ddc <- reorder(ddc, Colv)
colInd <- order.dendrogram(ddc)
if (nc != length(colInd))
stop("column dendrogram ordering gave index of wrong length")
}
else {
colInd <- 1:nc
}
retval$rowInd <- rowInd
retval$colInd <- colInd
retval$call <- match.call()
x <- x[rowInd, colInd]
x.unscaled <- x
cellnote <- cellnote[rowInd, colInd]
if (is.null(labRow))
labRow <- if (is.null(rownames(x)))
(1:nr)[rowInd]
else rownames(x)
else labRow <- labRow[rowInd]
if (is.null(labCol))
labCol <- if (is.null(colnames(x)))
(1:nc)[colInd]
else colnames(x)
else labCol <- labCol[colInd]
if (scale == "row") {
retval$rowMeans <- rm <- rowMeans(x, na.rm = na.rm)
x <- sweep(x, 1, rm)
retval$rowSDs <- sx <- apply(x, 1, sd, na.rm = na.rm)
x <- sweep(x, 1, sx, "/")
}
else if (scale == "column") {
retval$colMeans <- rm <- colMeans(x, na.rm = na.rm)
x <- sweep(x, 2, rm)
retval$colSDs <- sx <- apply(x, 2, sd, na.rm = na.rm)
x <- sweep(x, 2, sx, "/")
}
if (missing(breaks) || is.null(breaks) || length(breaks) < 1) {
if (missing(col) || is.function(col))
breaks <- 16
else breaks <- length(col) + 1
}
if (length(breaks) == 1) {
if (!symbreaks)
breaks <- seq(min(x, na.rm = na.rm), max(x, na.rm = na.rm),
length = breaks)
else {
extreme <- max(abs(x), na.rm = TRUE)
breaks <- seq(-extreme, extreme, length = breaks)
}
}
nbr <- length(breaks)
ncol <- length(breaks) - 1
if (class(col) == "function")
col <- col(ncol)
min.breaks <- min(breaks)
max.breaks <- max(breaks)
x[x < min.breaks] <- min.breaks
x[x > max.breaks] <- max.breaks
if (missing(lhei) || is.null(lhei))
lhei <- c(keysize, 4)
if (missing(lwid) || is.null(lwid))
lwid <- c(keysize, 4)
if (missing(lmat) || is.null(lmat)) {
lmat <- rbind(4:3, 2:1)
if (!missing(ColSideColors)) {
#if (!is.matrix(ColSideColors))
#stop("'ColSideColors' must be a matrix")
if (!is.character(ColSideColors) || nrow(ColSideColors) != nc)
stop("'ColSideColors' must be a matrix of nrow(x) rows")
lmat <- rbind(lmat[1, ] + 1, c(NA, 1), lmat[2, ] + 1)
#lhei <- c(lhei[1], 0.2, lhei[2])
lhei=c(lhei[1], side.height.fraction*ColSideColorsSize/2, lhei[2])
}
if (!missing(RowSideColors)) {
#if (!is.matrix(RowSideColors))
#stop("'RowSideColors' must be a matrix")
if (!is.character(RowSideColors) || ncol(RowSideColors) != nr)
stop("'RowSideColors' must be a matrix of ncol(x) columns")
lmat <- cbind(lmat[, 1] + 1, c(rep(NA, nrow(lmat) - 1), 1), lmat[,2] + 1)
#lwid <- c(lwid[1], 0.2, lwid[2])
lwid <- c(lwid[1], side.height.fraction*RowSideColorsSize/2, lwid[2])
}
lmat[is.na(lmat)] <- 0
}
if (length(lhei) != nrow(lmat))
stop("lhei must have length = nrow(lmat) = ", nrow(lmat))
if (length(lwid) != ncol(lmat))
stop("lwid must have length = ncol(lmat) =", ncol(lmat))
op <- par(no.readonly = TRUE)
on.exit(par(op))
layout(lmat, widths = lwid, heights = lhei, respect = FALSE)
if (!missing(RowSideColors)) {
if (!is.matrix(RowSideColors)){
par(mar = c(margins[1], 0, 0, 0.5))
image(rbind(1:nr), col = RowSideColors[rowInd], axes = FALSE)
} else {
par(mar = c(margins[1], 0, 0, 0.5))
rsc = t(RowSideColors[,rowInd, drop=F])
rsc.colors = matrix()
rsc.names = names(table(rsc))
rsc.i = 1
for (rsc.name in rsc.names) {
rsc.colors[rsc.i] = rsc.name
rsc[rsc == rsc.name] = rsc.i
rsc.i = rsc.i + 1
}
rsc = matrix(as.numeric(rsc), nrow = dim(rsc)[1])
image(t(rsc), col = as.vector(rsc.colors), axes = FALSE)
if (length(rownames(RowSideColors)) > 0) {
axis(1, 0:(dim(rsc)[2] - 1)/max(1,(dim(rsc)[2] - 1)), rownames(RowSideColors), las = 2, tick = FALSE)
}
}
}
if (!missing(ColSideColors)) {
if (!is.matrix(ColSideColors)){
par(mar = c(0.5, 0, 0, margins[2]))
image(cbind(1:nc), col = ColSideColors[colInd], axes = FALSE)
} else {
par(mar = c(0.5, 0, 0, margins[2]))
csc = ColSideColors[colInd, , drop=F]
csc.colors = matrix()
csc.names = names(table(csc))
csc.i = 1
for (csc.name in csc.names) {
csc.colors[csc.i] = csc.name
csc[csc == csc.name] = csc.i
csc.i = csc.i + 1
}
csc = matrix(as.numeric(csc), nrow = dim(csc)[1])
image(csc, col = as.vector(csc.colors), axes = FALSE)
if (length(colnames(ColSideColors)) > 0) {
axis(2, 0:(dim(csc)[2] - 1)/max(1,(dim(csc)[2] - 1)), colnames(ColSideColors), las = 2, tick = FALSE)
}
}
}
par(mar = c(margins[1], 0, 0, margins[2]))
x <- t(x)
cellnote <- t(cellnote)
if (revC) {
iy <- nr:1
if (exists("ddr"))
ddr <- rev(ddr)
x <- x[, iy]
cellnote <- cellnote[, iy]
}
else iy <- 1:nr
image(1:nc, 1:nr, x, xlim = 0.5 + c(0, nc), ylim = 0.5 + c(0, nr), axes = FALSE, xlab = "", ylab = "", col = col, breaks = breaks, ...)
retval$carpet <- x
if (exists("ddr"))
retval$rowDendrogram <- ddr
if (exists("ddc"))
retval$colDendrogram <- ddc
retval$breaks <- breaks
retval$col <- col
if (!invalid(na.color) & any(is.na(x))) { # load library(gplots)
mmat <- ifelse(is.na(x), 1, NA)
image(1:nc, 1:nr, mmat, axes = FALSE, xlab = "", ylab = "",
col = na.color, add = TRUE)
}
axis(1, 1:nc, labels = labCol, las = 2, line = -0.5, tick = 0,
cex.axis = cexCol)
if (!is.null(xlab))
mtext(xlab, side = 1, line = margins[1] - 1.25)
axis(4, iy, labels = labRow, las = 2, line = -0.5, tick = 0,
cex.axis = cexRow)
if (!is.null(ylab))
mtext(ylab, side = 4, line = margins[2] - 1.25)
if (!missing(add.expr))
eval(substitute(add.expr))
if (!missing(colsep))
for (csep in colsep) rect(xleft = csep + 0.5, ybottom = rep(0, length(csep)), xright = csep + 0.5 + sepwidth[1], ytop = rep(ncol(x) + 1, csep), lty = 1, lwd = 1, col = sepcolor, border = sepcolor)
if (!missing(rowsep))
for (rsep in rowsep) rect(xleft = 0, ybottom = (ncol(x) + 1 - rsep) - 0.5, xright = nrow(x) + 1, ytop = (ncol(x) + 1 - rsep) - 0.5 - sepwidth[2], lty = 1, lwd = 1, col = sepcolor, border = sepcolor)
min.scale <- min(breaks)
max.scale <- max(breaks)
x.scaled <- scale01(t(x), min.scale, max.scale)
if (trace %in% c("both", "column")) {
retval$vline <- vline
vline.vals <- scale01(vline, min.scale, max.scale)
for (i in colInd) {
if (!is.null(vline)) {
abline(v = i - 0.5 + vline.vals, col = linecol,
lty = 2)
}
xv <- rep(i, nrow(x.scaled)) + x.scaled[, i] - 0.5
xv <- c(xv[1], xv)
yv <- 1:length(xv) - 0.5
lines(x = xv, y = yv, lwd = 1, col = tracecol, type = "s")
}
}
if (trace %in% c("both", "row")) {
retval$hline <- hline
hline.vals <- scale01(hline, min.scale, max.scale)
for (i in rowInd) {
if (!is.null(hline)) {
abline(h = i + hline, col = linecol, lty = 2)
}
yv <- rep(i, ncol(x.scaled)) + x.scaled[i, ] - 0.5
yv <- rev(c(yv[1], yv))
xv <- length(yv):1 - 0.5
lines(x = xv, y = yv, lwd = 1, col = tracecol, type = "s")
}
}
if (!missing(cellnote))
text(x = c(row(cellnote)), y = c(col(cellnote)), labels = c(cellnote),
col = notecol, cex = notecex)
par(mar = c(margins[1], 0, 0, 0))
if (dendrogram %in% c("both", "row")) {
plot(ddr, horiz = TRUE, axes = FALSE, yaxs = "i", leaflab = "none")
}
else plot.new()
par(mar = c(0, 0, if (!is.null(main)) 5 else 0, margins[2]))
if (dendrogram %in% c("both", "column")) {
plot(ddc, axes = FALSE, xaxs = "i", leaflab = "none")
}
else plot.new()
if (!is.null(main))
title(main, cex.main = 1.5 * op[["cex.main"]])
if (key) {
par(mar = c(5, 4, 2, 1), cex = 0.75)
tmpbreaks <- breaks
if (symkey) {
max.raw <- max(abs(c(x, breaks)), na.rm = TRUE)
min.raw <- -max.raw
tmpbreaks[1] <- -max(abs(x), na.rm = TRUE)
tmpbreaks[length(tmpbreaks)] <- max(abs(x), na.rm = TRUE)
}
else {
min.raw <- min(x, na.rm = TRUE)
max.raw <- max(x, na.rm = TRUE)
}
z <- seq(min.raw, max.raw, length = length(col))
image(z = matrix(z, ncol = 1), col = col, breaks = tmpbreaks,
xaxt = "n", yaxt = "n")
par(usr = c(0, 1, 0, 1))
lv <- pretty(breaks)
xv <- scale01(as.numeric(lv), min.raw, max.raw)
axis(1, at = xv, labels = lv)
if (scale == "row")
mtext(side = 1, "Row Z-Score", line = 2)
else if (scale == "column")
mtext(side = 1, "Column Z-Score", line = 2)
else mtext(side = 1, KeyValueName, line = 2)
if (density.info == "density") {
dens <- density(x, adjust = densadj, na.rm = TRUE)
omit <- dens$x < min(breaks) | dens$x > max(breaks)
dens$x <- dens$x[-omit]
dens$y <- dens$y[-omit]
dens$x <- scale01(dens$x, min.raw, max.raw)
lines(dens$x, dens$y/max(dens$y) * 0.95, col = denscol,
lwd = 1)
axis(2, at = pretty(dens$y)/max(dens$y) * 0.95, pretty(dens$y))
title("Color Key\nand Density Plot")
par(cex = 0.5)
mtext(side = 2, "Density", line = 2)
}
else if (density.info == "histogram") {
h <- hist(x, plot = FALSE, breaks = breaks)
hx <- scale01(breaks, min.raw, max.raw)
hy <- c(h$counts, h$counts[length(h$counts)])
lines(hx, hy/max(hy) * 0.95, lwd = 1, type = "s",
col = denscol)
axis(2, at = pretty(hy)/max(hy) * 0.95, pretty(hy))
title("Color Key\nand Histogram")
par(cex = 0.5)
mtext(side = 2, "Count", line = 2)
}
else title("Color Key")
}
else plot.new()
retval$colorTable <- data.frame(low = retval$breaks[-length(retval$breaks)],
high = retval$breaks[-1], color = retval$col)
invisible(retval)
}
# Breast Normal Data Preparation
meldat <- read.csv("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/Processed Matrices/GSE75688_BreastNor_RAW_CDF.csv", header = TRUE, stringsAsFactors = FALSE)
sampleinfo <- read.csv("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/Processed Matrices/GSE75688_Breast_NorAnnotation.csv", header = TRUE, stringsAsFactors = F)
sampleinfo <- sampleinfo[ order(sampleinfo[,1]), ]
# Bcell=1 ; Tcell=2 ; Myeloid=3 ; Stromal=4
sampleinfo$Cell.Type <- sub("Bcell", 1, sampleinfo$Cell.Type)
sampleinfo$Cell.Type <- sub("Tcell", 2, sampleinfo$Cell.Type)
sampleinfo$Cell.Type <- sub("Myeloid", 3, sampleinfo$Cell.Type)
sampleinfo$Cell.Type <- sub("Stromal", 4, sampleinfo$Cell.Type)
meldat <- meldat[!duplicated(meldat[ , 1]), ]
rownames(meldat) <- meldat$Gene
meldat <- meldat[,-1]
meldat <- rbind(as.numeric(sub("BC", "", sampleinfo$Patient)), as.numeric(sampleinfo$Cell.Type), meldat)
rownames(meldat)[1:2] <- c("Patient_ID", "Cell_Type")
meldat <- meldat[ ,order(meldat[2,], meldat[1,])] #Tumor ID arrange columns according to multiple row values
meldat <- meldat[apply(meldat, 1, function(x) sum(is.na(x)) < (ncol(meldat)*(0.7))), ]
### Colors Customization
library(RColorBrewer)
grcol <- colorRampPalette(c("green","red"))(64) #heat maps color keys
library(cluster) #General color Idex
colors = c("#e6194B", "#3cb44b", "#ffe119", "#4363d8", "#f58231", "#911eb4", "#42d4f4", "#f032e6", "#bfef45", "#fabebe"
, "#469990", "#e6beff", "#9A6324", "#800000", "#aaffc3", "#808000", "#ffd8b1", "#000075", "#a9a9a9", "#000000")
### Tumor Identity Colors labeling
tumors_colors=rep(colors[1], 198)
for(i in 2:length(table(as.numeric(meldat[1,])))){
tumors_colors[meldat[1, ]==names(table(as.numeric(meldat[1,])))[i]]=colors[i]
}
tumors_colors <- as.matrix(tumors_colors) # Legend Attached Form
colorss = c("#ffe119", "#f58231", "#42d4f4", "#469990", "#800000", "#aaffc3")
Cell_type <- rep(colorss[1], 198)
for(i in 2:length(table(as.numeric(meldat[2,])))){
Cell_type[meldat[2, ]==names(table(as.numeric(meldat[2,])))[i]]=colorss[i]
}
Cell_type <- as.matrix(Cell_type) # Legend Attached Form
clab <- cbind(Cell_type, tumors_colors)
colnames(clab)=c("Cell_Type", "Tumor_ID") # Legend Label Name
### Duplicate input and convert NAs to "0" for "Zero version"
AA <- meldat[3:nrow(meldat), ]
AA[is.na(AA)]=0 # "AA" ready for Kmeans
CC <- meldat[3:nrow(meldat), ]
### Duplicate input and convert NAs to "0" for "Mean version"
AA <- meldat[3:nrow(meldat), ]
for (i in 1:nrow(AA)) {
AA[ i, is.na(AA[i, ])] <- mean(na.omit(as.numeric(AA[i, ])))
}
CC <- meldat[3:nrow(meldat), ]
### TCGA Methods: ConsensusClusterPlus
# try http:// if https:// URLs are not supported
# source("https://bioconductor.org/biocLite.R")
# biocLite("ConsensusClusterPlus")
library(ConsensusClusterPlus)
title=paste("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/K.Means/Filled with Mean/ConsensusClusterPlus.Figures", sep = "")
p_time <- proc.time()
resultss = ConsensusClusterPlus(t(AA), maxK=12, reps=100, pItem=0.8, pFeature=1,
title=title, clusterAlg="km", distance="euclidean", plot="png")
icl = calcICL(resultss, title=title, plot="png")
#icl[["clusterConsensus"]]
#icl[["itemConsensus"]][1:5,]
t_time <- proc.time()-p_time
print(t_time)
p_time <- proc.time()
for(k in 2:12){
BB <- CC[order(resultss[[k]][["consensusClass"]]), ]
main_title=paste("GSE75688_Breast_CCP_k=", k, sep = "")
par(cex.main=0.5)
CCP_colors <- resultss[[k]]$clrs[[3]]
plot(1:k, col=CCP_colors, pch=16, cex=6)
Cluster_Colors=rep(CCP_colors[1], 5957)
for(j in 2:k){
Cluster_Colors[sort(resultss[[k]][["consensusClass"]])==j]=CCP_colors[j]
}
plot(1:length(Cluster_Colors), col=Cluster_Colors, pch=16, cex=3)
Cluster_Colors <- as.matrix(t(Cluster_Colors))
rownames(Cluster_Colors)=c("Clusters")
tiff(paste("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/K.Means/Filled with Mean/K_Heatmaps/", main_title, ".tiff", sep=""), width=2200, height=1600, compression="lzw", res=300)
heatmap.3(BB, na.rm = TRUE, scale="none", dendrogram="none", margins=c(6,12), RowSideColors=Cluster_Colors,
Rowv=FALSE, Colv=FALSE, ColSideColors=clab, symbreaks=FALSE, key=TRUE, symkey=FALSE,
density.info="none", trace="none", main=main_title, labCol=FALSE, labRow=FALSE, cexRow=1, col=grcol,
ColSideColorsSize=2, RowSideColorsSize=1)
par(xpd=T)
legend("bottomleft",legend=c(paste("ConsenClus", 1:k, sep = "")), fill=CCP_colors[1:k], border=FALSE, bty="n", y.intersp = 1, cex=0.7)
legend("topright",legend=c("Bcell", "Tcell", "Myeloid", "Stromal", "", names(table(as.numeric(meldat[1,])))), fill=c(colorss[1:4], "white", colors[1:length(names(table(as.character(meldat[1,]))))]), border=FALSE, bty="n", y.intersp = 1, cex=0.7)
dev.off()
for(y in 1:as.numeric(k)){
k <- as.numeric(k)
y <- as.numeric(y)
CGset <- rownames(CC)[resultss[[k]][["consensusClass"]]==y]
if(as.numeric(k) < 10){
k <- as.numeric(k)
k <- paste("0", k, sep = "")
}
if(as.numeric(y) < 10){
y <- paste("0", y, sep = "")
}
write.table(CGset, file = paste("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/K.Means/Filled with Mean/ClusterGeneLists/GSE75688_Breast_ClusGenes_K", k, "C", y, ".csv", sep = ""), quote = F, row.names = F, sep = ",")
}
}
t_time <- proc.time()-p_time
print(t_time)
# Extract clusters colors information
for(i in 2:12){
g1 <- resultss[[i]][[3]]
dfg1 <- data.frame(Gene = names(g1), Group = as.numeric(g1), stringsAsFactors = FALSE)
col1 <- unlist(resultss[[i]][[5]][1])
dfg1$Col <- col1
if(i < 10){
i = paste("0",i,sep ="")
}
file1 <- paste("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/K.Means/Filled with Mean/GeneColorsLists/GSE75688_Breast_CPP_K=", i, ".csv",sep = "")
write.csv(dfg1, file1, row.names = FALSE)
}
|
/CaseSpecific/GSE75688_Breast_NorMean_optiK.R
|
no_license
|
YuWei-Lin/scRNA
|
R
| false | false | 22,260 |
r
|
heatmap.3 <- function(x,
Rowv = TRUE, Colv = if (symm) "Rowv" else TRUE,
distfun = dist,
hclustfun = hclust,
dendrogram = c("both","row", "column", "none"),
symm = FALSE,
scale = c("none","row", "column"),
na.rm = TRUE,
revC = identical(Colv,"Rowv"),
add.expr,
breaks,
symbreaks = max(x < 0, na.rm = TRUE) || scale != "none",
col = "heat.colors",
colsep,
rowsep,
sepcolor = "white",
sepwidth = c(0.05, 0.05),
cellnote,
notecex = 1,
notecol = "cyan",
na.color = par("bg"),
trace = c("none", "column","row", "both"),
tracecol = "cyan",
hline = median(breaks),
vline = median(breaks),
linecol = tracecol,
margins = c(5,5),
ColSideColors,
RowSideColors,
side.height.fraction=0.3,
cexRow = 0.2 + 1/log10(nr),
cexCol = 0.2 + 1/log10(nc),
labRow = NULL,
labCol = NULL,
key = TRUE,
keysize = 1.5,
density.info = c("none", "histogram", "density"),
denscol = tracecol,
symkey = max(x < 0, na.rm = TRUE) || symbreaks,
densadj = 0.25,
main = NULL,
xlab = NULL,
ylab = NULL,
lmat = NULL,
lhei = NULL,
lwid = NULL,
ColSideColorsSize = 1,
RowSideColorsSize = 1,
KeyValueName="Value",...){
invalid <- function (x) {
if (missing(x) || is.null(x) || length(x) == 0)
return(TRUE)
if (is.list(x))
return(all(sapply(x, invalid)))
else if (is.vector(x))
return(all(is.na(x)))
else return(FALSE)
}
x <- as.matrix(x)
scale01 <- function(x, low = min(x), high = max(x)) {
x <- (x - low)/(high - low)
x
}
retval <- list()
scale <- if (symm && missing(scale))
"none"
else match.arg(scale)
dendrogram <- match.arg(dendrogram)
trace <- match.arg(trace)
density.info <- match.arg(density.info)
if (length(col) == 1 && is.character(col))
col <- get(col, mode = "function")
if (!missing(breaks) && (scale != "none"))
warning("Using scale=\"row\" or scale=\"column\" when breaks are",
"specified can produce unpredictable results.", "Please consider using only one or the other.")
if (is.null(Rowv) || is.na(Rowv))
Rowv <- FALSE
if (is.null(Colv) || is.na(Colv))
Colv <- FALSE
else if (Colv == "Rowv" && !isTRUE(Rowv))
Colv <- FALSE
if (length(di <- dim(x)) != 2 || !is.numeric(x))
stop("`x' must be a numeric matrix")
nr <- di[1]
nc <- di[2]
if (nr <= 1 || nc <= 1)
stop("`x' must have at least 2 rows and 2 columns")
if (!is.numeric(margins) || length(margins) != 2)
stop("`margins' must be a numeric vector of length 2")
if (missing(cellnote))
cellnote <- matrix("", ncol = ncol(x), nrow = nrow(x))
if (!inherits(Rowv, "dendrogram")) {
if (((!isTRUE(Rowv)) || (is.null(Rowv))) && (dendrogram %in%
c("both", "row"))) {
if (is.logical(Colv) && (Colv))
dendrogram <- "column"
else dedrogram <- "none"
warning("Discrepancy: Rowv is FALSE, while dendrogram is `",
dendrogram, "'. Omitting row dendogram.")
}
}
if (!inherits(Colv, "dendrogram")) {
if (((!isTRUE(Colv)) || (is.null(Colv))) && (dendrogram %in%
c("both", "column"))) {
if (is.logical(Rowv) && (Rowv))
dendrogram <- "row"
else dendrogram <- "none"
warning("Discrepancy: Colv is FALSE, while dendrogram is `",
dendrogram, "'. Omitting column dendogram.")
}
}
if (inherits(Rowv, "dendrogram")) {
ddr <- Rowv
rowInd <- order.dendrogram(ddr)
}
else if (is.integer(Rowv)) {
hcr <- hclustfun(distfun(x))
ddr <- as.dendrogram(hcr)
ddr <- reorder(ddr, Rowv)
rowInd <- order.dendrogram(ddr)
if (nr != length(rowInd))
stop("row dendrogram ordering gave index of wrong length")
}
else if (isTRUE(Rowv)) {
Rowv <- rowMeans(x, na.rm = na.rm)
hcr <- hclustfun(distfun(x))
ddr <- as.dendrogram(hcr)
ddr <- reorder(ddr, Rowv)
rowInd <- order.dendrogram(ddr)
if (nr != length(rowInd))
stop("row dendrogram ordering gave index of wrong length")
}
else {
rowInd <- nr:1
}
if (inherits(Colv, "dendrogram")) {
ddc <- Colv
colInd <- order.dendrogram(ddc)
}
else if (identical(Colv, "Rowv")) {
if (nr != nc)
stop("Colv = \"Rowv\" but nrow(x) != ncol(x)")
if (exists("ddr")) {
ddc <- ddr
colInd <- order.dendrogram(ddc)
}
else colInd <- rowInd
}
else if (is.integer(Colv)) {
hcc <- hclustfun(distfun(if (symm)
x
else t(x)))
ddc <- as.dendrogram(hcc)
ddc <- reorder(ddc, Colv)
colInd <- order.dendrogram(ddc)
if (nc != length(colInd))
stop("column dendrogram ordering gave index of wrong length")
}
else if (isTRUE(Colv)) {
Colv <- colMeans(x, na.rm = na.rm)
hcc <- hclustfun(distfun(if (symm)
x
else t(x)))
ddc <- as.dendrogram(hcc)
ddc <- reorder(ddc, Colv)
colInd <- order.dendrogram(ddc)
if (nc != length(colInd))
stop("column dendrogram ordering gave index of wrong length")
}
else {
colInd <- 1:nc
}
retval$rowInd <- rowInd
retval$colInd <- colInd
retval$call <- match.call()
x <- x[rowInd, colInd]
x.unscaled <- x
cellnote <- cellnote[rowInd, colInd]
if (is.null(labRow))
labRow <- if (is.null(rownames(x)))
(1:nr)[rowInd]
else rownames(x)
else labRow <- labRow[rowInd]
if (is.null(labCol))
labCol <- if (is.null(colnames(x)))
(1:nc)[colInd]
else colnames(x)
else labCol <- labCol[colInd]
if (scale == "row") {
retval$rowMeans <- rm <- rowMeans(x, na.rm = na.rm)
x <- sweep(x, 1, rm)
retval$rowSDs <- sx <- apply(x, 1, sd, na.rm = na.rm)
x <- sweep(x, 1, sx, "/")
}
else if (scale == "column") {
retval$colMeans <- rm <- colMeans(x, na.rm = na.rm)
x <- sweep(x, 2, rm)
retval$colSDs <- sx <- apply(x, 2, sd, na.rm = na.rm)
x <- sweep(x, 2, sx, "/")
}
if (missing(breaks) || is.null(breaks) || length(breaks) < 1) {
if (missing(col) || is.function(col))
breaks <- 16
else breaks <- length(col) + 1
}
if (length(breaks) == 1) {
if (!symbreaks)
breaks <- seq(min(x, na.rm = na.rm), max(x, na.rm = na.rm),
length = breaks)
else {
extreme <- max(abs(x), na.rm = TRUE)
breaks <- seq(-extreme, extreme, length = breaks)
}
}
nbr <- length(breaks)
ncol <- length(breaks) - 1
if (class(col) == "function")
col <- col(ncol)
min.breaks <- min(breaks)
max.breaks <- max(breaks)
x[x < min.breaks] <- min.breaks
x[x > max.breaks] <- max.breaks
if (missing(lhei) || is.null(lhei))
lhei <- c(keysize, 4)
if (missing(lwid) || is.null(lwid))
lwid <- c(keysize, 4)
if (missing(lmat) || is.null(lmat)) {
lmat <- rbind(4:3, 2:1)
if (!missing(ColSideColors)) {
#if (!is.matrix(ColSideColors))
#stop("'ColSideColors' must be a matrix")
if (!is.character(ColSideColors) || nrow(ColSideColors) != nc)
stop("'ColSideColors' must be a matrix of nrow(x) rows")
lmat <- rbind(lmat[1, ] + 1, c(NA, 1), lmat[2, ] + 1)
#lhei <- c(lhei[1], 0.2, lhei[2])
lhei=c(lhei[1], side.height.fraction*ColSideColorsSize/2, lhei[2])
}
if (!missing(RowSideColors)) {
#if (!is.matrix(RowSideColors))
#stop("'RowSideColors' must be a matrix")
if (!is.character(RowSideColors) || ncol(RowSideColors) != nr)
stop("'RowSideColors' must be a matrix of ncol(x) columns")
lmat <- cbind(lmat[, 1] + 1, c(rep(NA, nrow(lmat) - 1), 1), lmat[,2] + 1)
#lwid <- c(lwid[1], 0.2, lwid[2])
lwid <- c(lwid[1], side.height.fraction*RowSideColorsSize/2, lwid[2])
}
lmat[is.na(lmat)] <- 0
}
if (length(lhei) != nrow(lmat))
stop("lhei must have length = nrow(lmat) = ", nrow(lmat))
if (length(lwid) != ncol(lmat))
stop("lwid must have length = ncol(lmat) =", ncol(lmat))
op <- par(no.readonly = TRUE)
on.exit(par(op))
layout(lmat, widths = lwid, heights = lhei, respect = FALSE)
if (!missing(RowSideColors)) {
if (!is.matrix(RowSideColors)){
par(mar = c(margins[1], 0, 0, 0.5))
image(rbind(1:nr), col = RowSideColors[rowInd], axes = FALSE)
} else {
par(mar = c(margins[1], 0, 0, 0.5))
rsc = t(RowSideColors[,rowInd, drop=F])
rsc.colors = matrix()
rsc.names = names(table(rsc))
rsc.i = 1
for (rsc.name in rsc.names) {
rsc.colors[rsc.i] = rsc.name
rsc[rsc == rsc.name] = rsc.i
rsc.i = rsc.i + 1
}
rsc = matrix(as.numeric(rsc), nrow = dim(rsc)[1])
image(t(rsc), col = as.vector(rsc.colors), axes = FALSE)
if (length(rownames(RowSideColors)) > 0) {
axis(1, 0:(dim(rsc)[2] - 1)/max(1,(dim(rsc)[2] - 1)), rownames(RowSideColors), las = 2, tick = FALSE)
}
}
}
if (!missing(ColSideColors)) {
if (!is.matrix(ColSideColors)){
par(mar = c(0.5, 0, 0, margins[2]))
image(cbind(1:nc), col = ColSideColors[colInd], axes = FALSE)
} else {
par(mar = c(0.5, 0, 0, margins[2]))
csc = ColSideColors[colInd, , drop=F]
csc.colors = matrix()
csc.names = names(table(csc))
csc.i = 1
for (csc.name in csc.names) {
csc.colors[csc.i] = csc.name
csc[csc == csc.name] = csc.i
csc.i = csc.i + 1
}
csc = matrix(as.numeric(csc), nrow = dim(csc)[1])
image(csc, col = as.vector(csc.colors), axes = FALSE)
if (length(colnames(ColSideColors)) > 0) {
axis(2, 0:(dim(csc)[2] - 1)/max(1,(dim(csc)[2] - 1)), colnames(ColSideColors), las = 2, tick = FALSE)
}
}
}
par(mar = c(margins[1], 0, 0, margins[2]))
x <- t(x)
cellnote <- t(cellnote)
if (revC) {
iy <- nr:1
if (exists("ddr"))
ddr <- rev(ddr)
x <- x[, iy]
cellnote <- cellnote[, iy]
}
else iy <- 1:nr
image(1:nc, 1:nr, x, xlim = 0.5 + c(0, nc), ylim = 0.5 + c(0, nr), axes = FALSE, xlab = "", ylab = "", col = col, breaks = breaks, ...)
retval$carpet <- x
if (exists("ddr"))
retval$rowDendrogram <- ddr
if (exists("ddc"))
retval$colDendrogram <- ddc
retval$breaks <- breaks
retval$col <- col
if (!invalid(na.color) & any(is.na(x))) { # load library(gplots)
mmat <- ifelse(is.na(x), 1, NA)
image(1:nc, 1:nr, mmat, axes = FALSE, xlab = "", ylab = "",
col = na.color, add = TRUE)
}
axis(1, 1:nc, labels = labCol, las = 2, line = -0.5, tick = 0,
cex.axis = cexCol)
if (!is.null(xlab))
mtext(xlab, side = 1, line = margins[1] - 1.25)
axis(4, iy, labels = labRow, las = 2, line = -0.5, tick = 0,
cex.axis = cexRow)
if (!is.null(ylab))
mtext(ylab, side = 4, line = margins[2] - 1.25)
if (!missing(add.expr))
eval(substitute(add.expr))
if (!missing(colsep))
for (csep in colsep) rect(xleft = csep + 0.5, ybottom = rep(0, length(csep)), xright = csep + 0.5 + sepwidth[1], ytop = rep(ncol(x) + 1, csep), lty = 1, lwd = 1, col = sepcolor, border = sepcolor)
if (!missing(rowsep))
for (rsep in rowsep) rect(xleft = 0, ybottom = (ncol(x) + 1 - rsep) - 0.5, xright = nrow(x) + 1, ytop = (ncol(x) + 1 - rsep) - 0.5 - sepwidth[2], lty = 1, lwd = 1, col = sepcolor, border = sepcolor)
min.scale <- min(breaks)
max.scale <- max(breaks)
x.scaled <- scale01(t(x), min.scale, max.scale)
if (trace %in% c("both", "column")) {
retval$vline <- vline
vline.vals <- scale01(vline, min.scale, max.scale)
for (i in colInd) {
if (!is.null(vline)) {
abline(v = i - 0.5 + vline.vals, col = linecol,
lty = 2)
}
xv <- rep(i, nrow(x.scaled)) + x.scaled[, i] - 0.5
xv <- c(xv[1], xv)
yv <- 1:length(xv) - 0.5
lines(x = xv, y = yv, lwd = 1, col = tracecol, type = "s")
}
}
if (trace %in% c("both", "row")) {
retval$hline <- hline
hline.vals <- scale01(hline, min.scale, max.scale)
for (i in rowInd) {
if (!is.null(hline)) {
abline(h = i + hline, col = linecol, lty = 2)
}
yv <- rep(i, ncol(x.scaled)) + x.scaled[i, ] - 0.5
yv <- rev(c(yv[1], yv))
xv <- length(yv):1 - 0.5
lines(x = xv, y = yv, lwd = 1, col = tracecol, type = "s")
}
}
if (!missing(cellnote))
text(x = c(row(cellnote)), y = c(col(cellnote)), labels = c(cellnote),
col = notecol, cex = notecex)
par(mar = c(margins[1], 0, 0, 0))
if (dendrogram %in% c("both", "row")) {
plot(ddr, horiz = TRUE, axes = FALSE, yaxs = "i", leaflab = "none")
}
else plot.new()
par(mar = c(0, 0, if (!is.null(main)) 5 else 0, margins[2]))
if (dendrogram %in% c("both", "column")) {
plot(ddc, axes = FALSE, xaxs = "i", leaflab = "none")
}
else plot.new()
if (!is.null(main))
title(main, cex.main = 1.5 * op[["cex.main"]])
if (key) {
par(mar = c(5, 4, 2, 1), cex = 0.75)
tmpbreaks <- breaks
if (symkey) {
max.raw <- max(abs(c(x, breaks)), na.rm = TRUE)
min.raw <- -max.raw
tmpbreaks[1] <- -max(abs(x), na.rm = TRUE)
tmpbreaks[length(tmpbreaks)] <- max(abs(x), na.rm = TRUE)
}
else {
min.raw <- min(x, na.rm = TRUE)
max.raw <- max(x, na.rm = TRUE)
}
z <- seq(min.raw, max.raw, length = length(col))
image(z = matrix(z, ncol = 1), col = col, breaks = tmpbreaks,
xaxt = "n", yaxt = "n")
par(usr = c(0, 1, 0, 1))
lv <- pretty(breaks)
xv <- scale01(as.numeric(lv), min.raw, max.raw)
axis(1, at = xv, labels = lv)
if (scale == "row")
mtext(side = 1, "Row Z-Score", line = 2)
else if (scale == "column")
mtext(side = 1, "Column Z-Score", line = 2)
else mtext(side = 1, KeyValueName, line = 2)
if (density.info == "density") {
dens <- density(x, adjust = densadj, na.rm = TRUE)
omit <- dens$x < min(breaks) | dens$x > max(breaks)
dens$x <- dens$x[-omit]
dens$y <- dens$y[-omit]
dens$x <- scale01(dens$x, min.raw, max.raw)
lines(dens$x, dens$y/max(dens$y) * 0.95, col = denscol,
lwd = 1)
axis(2, at = pretty(dens$y)/max(dens$y) * 0.95, pretty(dens$y))
title("Color Key\nand Density Plot")
par(cex = 0.5)
mtext(side = 2, "Density", line = 2)
}
else if (density.info == "histogram") {
h <- hist(x, plot = FALSE, breaks = breaks)
hx <- scale01(breaks, min.raw, max.raw)
hy <- c(h$counts, h$counts[length(h$counts)])
lines(hx, hy/max(hy) * 0.95, lwd = 1, type = "s",
col = denscol)
axis(2, at = pretty(hy)/max(hy) * 0.95, pretty(hy))
title("Color Key\nand Histogram")
par(cex = 0.5)
mtext(side = 2, "Count", line = 2)
}
else title("Color Key")
}
else plot.new()
retval$colorTable <- data.frame(low = retval$breaks[-length(retval$breaks)],
high = retval$breaks[-1], color = retval$col)
invisible(retval)
}
# Breast Normal Data Preparation
meldat <- read.csv("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/Processed Matrices/GSE75688_BreastNor_RAW_CDF.csv", header = TRUE, stringsAsFactors = FALSE)
sampleinfo <- read.csv("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/Processed Matrices/GSE75688_Breast_NorAnnotation.csv", header = TRUE, stringsAsFactors = F)
sampleinfo <- sampleinfo[ order(sampleinfo[,1]), ]
# Bcell=1 ; Tcell=2 ; Myeloid=3 ; Stromal=4
sampleinfo$Cell.Type <- sub("Bcell", 1, sampleinfo$Cell.Type)
sampleinfo$Cell.Type <- sub("Tcell", 2, sampleinfo$Cell.Type)
sampleinfo$Cell.Type <- sub("Myeloid", 3, sampleinfo$Cell.Type)
sampleinfo$Cell.Type <- sub("Stromal", 4, sampleinfo$Cell.Type)
meldat <- meldat[!duplicated(meldat[ , 1]), ]
rownames(meldat) <- meldat$Gene
meldat <- meldat[,-1]
meldat <- rbind(as.numeric(sub("BC", "", sampleinfo$Patient)), as.numeric(sampleinfo$Cell.Type), meldat)
rownames(meldat)[1:2] <- c("Patient_ID", "Cell_Type")
meldat <- meldat[ ,order(meldat[2,], meldat[1,])] #Tumor ID arrange columns according to multiple row values
meldat <- meldat[apply(meldat, 1, function(x) sum(is.na(x)) < (ncol(meldat)*(0.7))), ]
### Colors Customization
library(RColorBrewer)
grcol <- colorRampPalette(c("green","red"))(64) #heat maps color keys
library(cluster) #General color Idex
colors = c("#e6194B", "#3cb44b", "#ffe119", "#4363d8", "#f58231", "#911eb4", "#42d4f4", "#f032e6", "#bfef45", "#fabebe"
, "#469990", "#e6beff", "#9A6324", "#800000", "#aaffc3", "#808000", "#ffd8b1", "#000075", "#a9a9a9", "#000000")
### Tumor Identity Colors labeling
tumors_colors=rep(colors[1], 198)
for(i in 2:length(table(as.numeric(meldat[1,])))){
tumors_colors[meldat[1, ]==names(table(as.numeric(meldat[1,])))[i]]=colors[i]
}
tumors_colors <- as.matrix(tumors_colors) # Legend Attached Form
colorss = c("#ffe119", "#f58231", "#42d4f4", "#469990", "#800000", "#aaffc3")
Cell_type <- rep(colorss[1], 198)
for(i in 2:length(table(as.numeric(meldat[2,])))){
Cell_type[meldat[2, ]==names(table(as.numeric(meldat[2,])))[i]]=colorss[i]
}
Cell_type <- as.matrix(Cell_type) # Legend Attached Form
clab <- cbind(Cell_type, tumors_colors)
colnames(clab)=c("Cell_Type", "Tumor_ID") # Legend Label Name
### Duplicate input and convert NAs to "0" for "Zero version"
AA <- meldat[3:nrow(meldat), ]
AA[is.na(AA)]=0 # "AA" ready for Kmeans
CC <- meldat[3:nrow(meldat), ]
### Duplicate input and convert NAs to "0" for "Mean version"
AA <- meldat[3:nrow(meldat), ]
for (i in 1:nrow(AA)) {
AA[ i, is.na(AA[i, ])] <- mean(na.omit(as.numeric(AA[i, ])))
}
CC <- meldat[3:nrow(meldat), ]
### TCGA Methods: ConsensusClusterPlus
# try http:// if https:// URLs are not supported
# source("https://bioconductor.org/biocLite.R")
# biocLite("ConsensusClusterPlus")
library(ConsensusClusterPlus)
title=paste("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/K.Means/Filled with Mean/ConsensusClusterPlus.Figures", sep = "")
p_time <- proc.time()
resultss = ConsensusClusterPlus(t(AA), maxK=12, reps=100, pItem=0.8, pFeature=1,
title=title, clusterAlg="km", distance="euclidean", plot="png")
icl = calcICL(resultss, title=title, plot="png")
#icl[["clusterConsensus"]]
#icl[["itemConsensus"]][1:5,]
t_time <- proc.time()-p_time
print(t_time)
p_time <- proc.time()
for(k in 2:12){
BB <- CC[order(resultss[[k]][["consensusClass"]]), ]
main_title=paste("GSE75688_Breast_CCP_k=", k, sep = "")
par(cex.main=0.5)
CCP_colors <- resultss[[k]]$clrs[[3]]
plot(1:k, col=CCP_colors, pch=16, cex=6)
Cluster_Colors=rep(CCP_colors[1], 5957)
for(j in 2:k){
Cluster_Colors[sort(resultss[[k]][["consensusClass"]])==j]=CCP_colors[j]
}
plot(1:length(Cluster_Colors), col=Cluster_Colors, pch=16, cex=3)
Cluster_Colors <- as.matrix(t(Cluster_Colors))
rownames(Cluster_Colors)=c("Clusters")
tiff(paste("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/K.Means/Filled with Mean/K_Heatmaps/", main_title, ".tiff", sep=""), width=2200, height=1600, compression="lzw", res=300)
heatmap.3(BB, na.rm = TRUE, scale="none", dendrogram="none", margins=c(6,12), RowSideColors=Cluster_Colors,
Rowv=FALSE, Colv=FALSE, ColSideColors=clab, symbreaks=FALSE, key=TRUE, symkey=FALSE,
density.info="none", trace="none", main=main_title, labCol=FALSE, labRow=FALSE, cexRow=1, col=grcol,
ColSideColorsSize=2, RowSideColorsSize=1)
par(xpd=T)
legend("bottomleft",legend=c(paste("ConsenClus", 1:k, sep = "")), fill=CCP_colors[1:k], border=FALSE, bty="n", y.intersp = 1, cex=0.7)
legend("topright",legend=c("Bcell", "Tcell", "Myeloid", "Stromal", "", names(table(as.numeric(meldat[1,])))), fill=c(colorss[1:4], "white", colors[1:length(names(table(as.character(meldat[1,]))))]), border=FALSE, bty="n", y.intersp = 1, cex=0.7)
dev.off()
for(y in 1:as.numeric(k)){
k <- as.numeric(k)
y <- as.numeric(y)
CGset <- rownames(CC)[resultss[[k]][["consensusClass"]]==y]
if(as.numeric(k) < 10){
k <- as.numeric(k)
k <- paste("0", k, sep = "")
}
if(as.numeric(y) < 10){
y <- paste("0", y, sep = "")
}
write.table(CGset, file = paste("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/K.Means/Filled with Mean/ClusterGeneLists/GSE75688_Breast_ClusGenes_K", k, "C", y, ".csv", sep = ""), quote = F, row.names = F, sep = ",")
}
}
t_time <- proc.time()-p_time
print(t_time)
# Extract clusters colors information
for(i in 2:12){
g1 <- resultss[[i]][[3]]
dfg1 <- data.frame(Gene = names(g1), Group = as.numeric(g1), stringsAsFactors = FALSE)
col1 <- unlist(resultss[[i]][[5]][1])
dfg1$Col <- col1
if(i < 10){
i = paste("0",i,sep ="")
}
file1 <- paste("D:/SC Cases Completed/GSE75688_Breast_DATA/Normal/K.Means/Filled with Mean/GeneColorsLists/GSE75688_Breast_CPP_K=", i, ".csv",sep = "")
write.csv(dfg1, file1, row.names = FALSE)
}
|
#!/usr/bin/Rscript
#
# Bhishan Poudel
# Jan 5, 2016
################################################################################
# Function to set the current directory as the working directory
set_default_wd <- function(wd = getwd(), overwrite = FALSE) {
text <- paste0(
'local({ setwd("', wd, '") })')
##
if (Sys.info()["sysname"] == "Windows") {
write(
text,
file = paste0(Sys.getenv("HOME"), "\\.Rprofile"),
append = !overwrite)
} else {
write(
text,
file = paste0(Sys.getenv("HOME"), "/.Rprofile"),
append = !overwrite)
}
}
################################################################################
# for plotly account
Sys.setenv("plotly_username"="bhishanpdl")
Sys.setenv("plotly_api_key"="amq1tpxuig")
################################################################################
# for rstudio addins
#' Insert texts.
#'
#' Call this function as an addin to insert at the cursor position.
#'
#' @export
shebang <- function() {
rstudioapi::insertText("#!/usr/bin/Rscript \n")
rstudioapi::insertText("# Bhishan Poudel \n")
rstudioapi::insertText("# \n")
rstudioapi::insertText("\n\n")
rstudioapi::insertText("# setting working directory \n")
rstudioapi::insertText("set_default_wd() \n")
}
################################################################################
|
/Programming_tips/R/RprofileUbuntu.txt
|
no_license
|
bpPrg/Tips
|
R
| false | false | 1,435 |
txt
|
#!/usr/bin/Rscript
#
# Bhishan Poudel
# Jan 5, 2016
################################################################################
# Function to set the current directory as the working directory
set_default_wd <- function(wd = getwd(), overwrite = FALSE) {
text <- paste0(
'local({ setwd("', wd, '") })')
##
if (Sys.info()["sysname"] == "Windows") {
write(
text,
file = paste0(Sys.getenv("HOME"), "\\.Rprofile"),
append = !overwrite)
} else {
write(
text,
file = paste0(Sys.getenv("HOME"), "/.Rprofile"),
append = !overwrite)
}
}
################################################################################
# for plotly account
Sys.setenv("plotly_username"="bhishanpdl")
Sys.setenv("plotly_api_key"="amq1tpxuig")
################################################################################
# for rstudio addins
#' Insert texts.
#'
#' Call this function as an addin to insert at the cursor position.
#'
#' @export
shebang <- function() {
rstudioapi::insertText("#!/usr/bin/Rscript \n")
rstudioapi::insertText("# Bhishan Poudel \n")
rstudioapi::insertText("# \n")
rstudioapi::insertText("\n\n")
rstudioapi::insertText("# setting working directory \n")
rstudioapi::insertText("set_default_wd() \n")
}
################################################################################
|
# print()함수와 cat()함수
print(100)
print(pi)
data <- "가나다"
print(data)
print(data, quote=FALSE)
v1 <- c("사과", "바나나", "포도")
print(v1)
print(v1, print.gap=10)
cat(100)
cat(100,200)
cat(100,200,"\n")
cat("aaa", "bbb", "ccc", "ddd", "\n")
cat(v1, "\n")
cat(v1, sep="-", "\n")
print(paste("R", "은 통계분석", "전용 언어입니다."))
cat("R", "은 통계분석", "전용 언어입니다.", "\n")
#제어문
#if else
randomNum <-sample(1:10,1)
if(randomNum>5){
cat(randomNum,":5보다 크군요","\n")
}else{
cat(randomNum,":5보다 작거나 같군요","\n")
}
if(randomNum%%2 == 1){
cat(randomNum,";홀수\n")
}else{
cat(randomNum,";짝수","\n")
}
if(randomNum%%2 == 1){
cat(randomNum,";홀수","\n")
cat("종료")
}else{
cat(randomNum,";짝수","\n")
cat("종료")
}
if(randomNum%%2 == 1){
cat(randomNum,";홀수")
cat("종료")
}else{
cat(randomNum,";짝수")
cat("종료")
}
score <- sample(0:100, 1) # 0~100 숫자 한 개를 무작위로 뽑아서
if (score >=90){
cat(score,"는 A등급입니다","\n")
}else if (score >=80){
cat(score,"는 B등급입니다","\n")
}else if (score >=70){
cat(score,"는 C등급입니다","\n")
}else if (score >=60){
cat(score,"는 D등급입니다","\n")
}else {
cat(score,"는 F등급입니다","\n")
}
#for문
#for 실습
for(data in month.name)
print(data)
for(data in month.name)print(data);print("가나다")
for(data in month.name){print(data);print("가나다")}
for(n in 1:5)
cat("hello?","\n")
for(i in 1:5){
for(j in 1:5){
cat("i=",i,"j=",j,"\n")
}
}
# 구구단
for(dan in 1:9){
for(num in 1:9){
cat(dan,"x",num,"=",dan*num,"\t") # \n : 개행문자, \t : 탭문자
}
cat("\n")
}
bb <- F
for(i in 1:9){
for(j in 1:9){
if(i*j>30){
bb<-T
break
}
cat(i,"*",j,"=",i*j,"\t")
}
cat("\n")
if(bb) #bb가 TRUE이면
break
}
for(i in 1:9){
for(j in 1:9){
if(i*j>30){
break
}
cat(i,"*",j,"=",i*j,"\t")
}
cat("\n")
}
#while문
i<-1
while(i <= 10){
cat(i,"\n")
i <- i+1
}
cat("종료 후 :",i,"\n")
i<-1
while (i<=10) {
cat(i,"\n")
}
i<-1
while (i<=10) {
cat(i,"\n")
i<-i+2
}
i<-1
while (i<=10) {
cat(i,"\n")
i<-i+1
}
#switch 문을 대신하는 함수
month <- sample(1:12,1)
month <- paste(month,"월",sep="") # "3월" "3 월"
result <- switch(EXPR=month,
"12월"=,"1월"=,"2월"="겨울",
"3월"=,"4월"=,"5월"="봄",
"6월"=,"7월"=,"8월"="여름",
"가을")
cat(month,"은 ",result,"입니다\n",sep="")
num <- sample(1:10,1)
num
switch(EXPR = num,"A","B","C","D")
for(num in 1:10){
cat(num,":",switch(EXPR = num,"A","B","C","D"),"\n")
}
for(num in 1:10){
num <- as.character(num)
cat(num,":",switch(EXPR = num,
"7"="A","8"="B","9"="C","10"="D","ㅋ"),"\n")
}
for(data in month.name)
print(data)
for(data in month.name)
cat(data)
sum <- 0
for(i in 5:15){
if(i%%10==0){
break
}
sum <- sum + i
print(paste(i,":",sum))
}
sum <- 0
for(i in 5:15){
if(i%%10==0){
break
}
sum <- sum + i
cat(i,":",sum,"\n")
}
sum <-0
for(i in 5:15){
if(i%%10==0){
next; #continue
}
sum <- sum + i
print(paste(i,":",sum))
}
sumNumber <- 0
while(sumNumber <= 20) {
i <- sample(1:5, 1)
sumNumber <-sumNumber+i;
cat(sumNumber,"\n")
}
repeat {
cat("ㅋㅋㅋ\n")
}
sumNumber <- 0
repeat {
i <- sample(1:5, 1)
sumNumber <-sumNumber+i;
cat(sumNumber,"\n")
if(sumNumber > 20)
break;
}
# 파일 입력
ls()
length(ls())
save(list=ls(),file="all.rda") # varience will save in "all.rda" of rexam
rm(list=ls())
ls()
load("all.rda")
ls()
#read file data
nums <- scan("data/sample_num.txt")
word_ansi <- scan("data/sample_ansi.txt",what="")
words_utf8 <- scan("data/sample_utf8.txt", what="",encoding="UTF-8")
words_utf8_new <- scan("data/sample_utf8.txt", what="")
lines_ansi <- readLines("data/sample_ansi.txt")
lines_utf8 <- readLines("data/sample_utf8.txt",encoding="UTF-8")
df2 <- read.table("data/product_click.log", stringsAsFactors = T)
str(df2)
head(df2)
summary(df2$V2)
|
/R-lecture/01_syntax/day04.R
|
no_license
|
yeonjooyou/learn-R
|
R
| false | false | 4,161 |
r
|
# print()함수와 cat()함수
print(100)
print(pi)
data <- "가나다"
print(data)
print(data, quote=FALSE)
v1 <- c("사과", "바나나", "포도")
print(v1)
print(v1, print.gap=10)
cat(100)
cat(100,200)
cat(100,200,"\n")
cat("aaa", "bbb", "ccc", "ddd", "\n")
cat(v1, "\n")
cat(v1, sep="-", "\n")
print(paste("R", "은 통계분석", "전용 언어입니다."))
cat("R", "은 통계분석", "전용 언어입니다.", "\n")
#제어문
#if else
randomNum <-sample(1:10,1)
if(randomNum>5){
cat(randomNum,":5보다 크군요","\n")
}else{
cat(randomNum,":5보다 작거나 같군요","\n")
}
if(randomNum%%2 == 1){
cat(randomNum,";홀수\n")
}else{
cat(randomNum,";짝수","\n")
}
if(randomNum%%2 == 1){
cat(randomNum,";홀수","\n")
cat("종료")
}else{
cat(randomNum,";짝수","\n")
cat("종료")
}
if(randomNum%%2 == 1){
cat(randomNum,";홀수")
cat("종료")
}else{
cat(randomNum,";짝수")
cat("종료")
}
score <- sample(0:100, 1) # 0~100 숫자 한 개를 무작위로 뽑아서
if (score >=90){
cat(score,"는 A등급입니다","\n")
}else if (score >=80){
cat(score,"는 B등급입니다","\n")
}else if (score >=70){
cat(score,"는 C등급입니다","\n")
}else if (score >=60){
cat(score,"는 D등급입니다","\n")
}else {
cat(score,"는 F등급입니다","\n")
}
#for문
#for 실습
for(data in month.name)
print(data)
for(data in month.name)print(data);print("가나다")
for(data in month.name){print(data);print("가나다")}
for(n in 1:5)
cat("hello?","\n")
for(i in 1:5){
for(j in 1:5){
cat("i=",i,"j=",j,"\n")
}
}
# 구구단
for(dan in 1:9){
for(num in 1:9){
cat(dan,"x",num,"=",dan*num,"\t") # \n : 개행문자, \t : 탭문자
}
cat("\n")
}
bb <- F
for(i in 1:9){
for(j in 1:9){
if(i*j>30){
bb<-T
break
}
cat(i,"*",j,"=",i*j,"\t")
}
cat("\n")
if(bb) #bb가 TRUE이면
break
}
for(i in 1:9){
for(j in 1:9){
if(i*j>30){
break
}
cat(i,"*",j,"=",i*j,"\t")
}
cat("\n")
}
#while문
i<-1
while(i <= 10){
cat(i,"\n")
i <- i+1
}
cat("종료 후 :",i,"\n")
i<-1
while (i<=10) {
cat(i,"\n")
}
i<-1
while (i<=10) {
cat(i,"\n")
i<-i+2
}
i<-1
while (i<=10) {
cat(i,"\n")
i<-i+1
}
#switch 문을 대신하는 함수
month <- sample(1:12,1)
month <- paste(month,"월",sep="") # "3월" "3 월"
result <- switch(EXPR=month,
"12월"=,"1월"=,"2월"="겨울",
"3월"=,"4월"=,"5월"="봄",
"6월"=,"7월"=,"8월"="여름",
"가을")
cat(month,"은 ",result,"입니다\n",sep="")
num <- sample(1:10,1)
num
switch(EXPR = num,"A","B","C","D")
for(num in 1:10){
cat(num,":",switch(EXPR = num,"A","B","C","D"),"\n")
}
for(num in 1:10){
num <- as.character(num)
cat(num,":",switch(EXPR = num,
"7"="A","8"="B","9"="C","10"="D","ㅋ"),"\n")
}
for(data in month.name)
print(data)
for(data in month.name)
cat(data)
sum <- 0
for(i in 5:15){
if(i%%10==0){
break
}
sum <- sum + i
print(paste(i,":",sum))
}
sum <- 0
for(i in 5:15){
if(i%%10==0){
break
}
sum <- sum + i
cat(i,":",sum,"\n")
}
sum <-0
for(i in 5:15){
if(i%%10==0){
next; #continue
}
sum <- sum + i
print(paste(i,":",sum))
}
sumNumber <- 0
while(sumNumber <= 20) {
i <- sample(1:5, 1)
sumNumber <-sumNumber+i;
cat(sumNumber,"\n")
}
repeat {
cat("ㅋㅋㅋ\n")
}
sumNumber <- 0
repeat {
i <- sample(1:5, 1)
sumNumber <-sumNumber+i;
cat(sumNumber,"\n")
if(sumNumber > 20)
break;
}
# 파일 입력
ls()
length(ls())
save(list=ls(),file="all.rda") # varience will save in "all.rda" of rexam
rm(list=ls())
ls()
load("all.rda")
ls()
#read file data
nums <- scan("data/sample_num.txt")
word_ansi <- scan("data/sample_ansi.txt",what="")
words_utf8 <- scan("data/sample_utf8.txt", what="",encoding="UTF-8")
words_utf8_new <- scan("data/sample_utf8.txt", what="")
lines_ansi <- readLines("data/sample_ansi.txt")
lines_utf8 <- readLines("data/sample_utf8.txt",encoding="UTF-8")
df2 <- read.table("data/product_click.log", stringsAsFactors = T)
str(df2)
head(df2)
summary(df2$V2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hanlp.R
\docType{data}
\name{hanlp.naiveBayesClassify}
\alias{hanlp.naiveBayesClassify}
\title{Naive Bayes classifier.}
\format{\code{\link{R6Class}} object.}
\usage{
hanlp.naiveBayesClassify
}
\description{
a R6 class of naive Bayes classifier.
}
\details{
A R6class of naive Bayes classifier .
}
\section{Usage}{
For usage details see \bold{Methods, Arguments and Examples} sections.
\preformatted{
naiveBayes = hanlp.naiveBayesClassify$new()
naiveBayes$train(file_folder)
naiveBayes$predict(text)
naiveBayes$test(test_data)
naiveBayes$getModel()
}
}
\section{Methods}{
\describe{
\item{\code{$new()}}{Constructor for Naive Bayes classifier.}
\item{\code{$train(file_folder)}}{Train Naive Bayes classifier,detail in https://github.com/hankcs/HanLP/wiki .}
\item{\code{$predict(text)}}{Predict \code{text} category.}
\item{\code{$test(test_data)}}{Predict a batch of text categories,\code{test_data} is a character vector.}
\item{\code{$getModel()}}{Output some infomation of Naive Bayes model. }
}
}
\examples{
\dontrun{
naiveBayes = hanlp.naiveBayesClassify$new()
naiveBayes$train(file_folder)
naiveBayes$predict(text)
naiveBayes$test(test_data)
naiveBayes$getModel()
}
}
\author{
qxde01
}
\keyword{datasets}
|
/man/hanlp.naiveBayesClassify.Rd
|
no_license
|
SimmsJeason/RHanLP
|
R
| false | true | 1,360 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hanlp.R
\docType{data}
\name{hanlp.naiveBayesClassify}
\alias{hanlp.naiveBayesClassify}
\title{Naive Bayes classifier.}
\format{\code{\link{R6Class}} object.}
\usage{
hanlp.naiveBayesClassify
}
\description{
a R6 class of naive Bayes classifier.
}
\details{
A R6class of naive Bayes classifier .
}
\section{Usage}{
For usage details see \bold{Methods, Arguments and Examples} sections.
\preformatted{
naiveBayes = hanlp.naiveBayesClassify$new()
naiveBayes$train(file_folder)
naiveBayes$predict(text)
naiveBayes$test(test_data)
naiveBayes$getModel()
}
}
\section{Methods}{
\describe{
\item{\code{$new()}}{Constructor for Naive Bayes classifier.}
\item{\code{$train(file_folder)}}{Train Naive Bayes classifier,detail in https://github.com/hankcs/HanLP/wiki .}
\item{\code{$predict(text)}}{Predict \code{text} category.}
\item{\code{$test(test_data)}}{Predict a batch of text categories,\code{test_data} is a character vector.}
\item{\code{$getModel()}}{Output some infomation of Naive Bayes model. }
}
}
\examples{
\dontrun{
naiveBayes = hanlp.naiveBayesClassify$new()
naiveBayes$train(file_folder)
naiveBayes$predict(text)
naiveBayes$test(test_data)
naiveBayes$getModel()
}
}
\author{
qxde01
}
\keyword{datasets}
|
#' Example dataset of WFD data interchange format
#'
#' A dataset containing ecology and chemistry data
#'
#' @format A data frame with 8477 rows and 20 variables:
#' \describe{
#' \item{location_id}{location_id}
#' \item{location_description}{location_description}
#' \item{easting}{easting}
#' \item{northing}{northing}
#' \item{latitude}{latitude}
#' \item{longitude}{longitude}
#' \item{date_taken}{date_taken}
#' \item{sample_id}{sample_id}
#' \item{analysis_name}{analysis_name}
#' \item{analysis_repname}{analysis_repname}
#' \item{question}{question}
#' \item{response}{response}
#' \item{units}{units}
#' \item{taxon}{taxon}
#' \item{taxon_id}{taxon_id}
#' \item{mean_alkalinity}{mean_alkalinity}
#' \item{result_id}{result_id}
#' \item{grid_reference}{grid_reference}
#' \item{standard}{standard}
#' \item{quality_element}{quality_element}
#' }
#' @source Agency sampling data
"demo_input"
|
/R/demo-input.R
|
no_license
|
cmbenn/hera
|
R
| false | false | 940 |
r
|
#' Example dataset of WFD data interchange format
#'
#' A dataset containing ecology and chemistry data
#'
#' @format A data frame with 8477 rows and 20 variables:
#' \describe{
#' \item{location_id}{location_id}
#' \item{location_description}{location_description}
#' \item{easting}{easting}
#' \item{northing}{northing}
#' \item{latitude}{latitude}
#' \item{longitude}{longitude}
#' \item{date_taken}{date_taken}
#' \item{sample_id}{sample_id}
#' \item{analysis_name}{analysis_name}
#' \item{analysis_repname}{analysis_repname}
#' \item{question}{question}
#' \item{response}{response}
#' \item{units}{units}
#' \item{taxon}{taxon}
#' \item{taxon_id}{taxon_id}
#' \item{mean_alkalinity}{mean_alkalinity}
#' \item{result_id}{result_id}
#' \item{grid_reference}{grid_reference}
#' \item{standard}{standard}
#' \item{quality_element}{quality_element}
#' }
#' @source Agency sampling data
"demo_input"
|
#' Interactive user interface for treemap
#'
#' This function is an interactive user interface for creating treemaps. Interaction is provided for the four main input arguments of (\code{\link{treemap}}) besides the data.frame itself, namely \code{index}, \code{vSize}, \code{vColor} and \code{type}. Zooming in and out is possible. Command line outputs are generated in the console.
#'
#' @param dtf a data.frame (\code{\link{treemap}}) If not provided, then the first data.frame in the global workspace is loaded.
#' @param index index variables (up to four). See \code{\link{treemap}}.
#' @param vSize name of the variable that determine the rectangle sizes.
#' @param vColor name of the variable that determine the rectangle colors. See \code{\link{treemap}}.
#' @param type treemap type. See \code{\link{treemap}}.
#' @param height height of the plotted treemap in pixels. Tip: decrease this number if the treemap doesn't fit conveniently.
#' @param command.line.output if \code{TRUE}, the command line output of the generated treemaps are provided in the console.
#' @examples
#' \dontrun{
#' data(business)
#' itreemap(business)
#' }
#' @note This interface will no longer be maintained (except for small bugs), since there is a better interactive interface available: \url{https://github.com/timelyportfolio/d3treeR}.
#' @import data.table
#' @import grid
#' @import gridBase
#' @import shiny
#' @export
itreemap <- function(dtf=NULL, index=NULL, vSize=NULL, vColor=NULL, type=NULL, height=700, command.line.output=TRUE) {
# get data.frame(s)
obs <- ls(envir=.GlobalEnv)
if (!length(obs)) stop("No data.frames loaded")
dfs <- obs[sapply(obs, function(x)inherits(get(x, envir=.GlobalEnv), "data.frame"))]
if (!length(dfs)) stop("No data.frames loaded")
# get variable names
dfvars <- lapply(dfs, function(x)names(get(x, envir=.GlobalEnv)))
names(dfvars) <- dfs
dfiscat <- lapply(dfs, function(x)sapply(get(x, envir=.GlobalEnv),function(y)is.factor(y)||is.logical(y)||is.character(y)))
names(dfiscat) <- dfs
dfcat <- lapply(dfiscat, function(x)if (any(x)) names(x[x]) else "<NA>")
dfnum <- lapply(dfiscat, function(x)if (any(!x)) names(x[!x]) else "<NA>")
## check input parameters
if (missing(dtf)) {
dtfname <- dfs[1]
} else {
dtfname <- deparse(substitute(dtf))
if (!dtfname %in% dfs) stop(paste(dtfname, "is not a data.frame"))
}
if (missing(index)) {
indexNames <- c(dfcat[[dtfname]][1], "<NA>", "<NA>", "<NA>")
} else {
if (!(all(index %in% dfcat[[dtfname]]))) stop("index variable(s) is(are) not categorical")
indexNames <- if (length(index) < 4) c(index, rep.int("<NA>", 4-length(index))) else index[1:4]
}
if (missing(vSize)) {
vSize <- dfnum[[dtfname]][1]
} else {
if (!(vSize %in% dfnum[[dtfname]])) stop("vSize is not numeric")
}
if (missing(type)) {
typeName <- "index"
} else {
if (!(type %in% c("value", "categorical", "comp", "dens", "index", "depth"))) stop("Invalid type")
typeName <- type
}
if (missing(vColor)) {
if (typeName %in% c("value", "comp", "dens")) vColor <- dfnum[[dtfname]][1]
if (typeName == "categorical") vColor <- dfcat[[dtfname]][1]
} else {
if (typeName %in% c("value", "comp", "dens") && (!(vColor %in% dfnum[[dtfname]]))) stop("vColor is not numeric")
if (typeName == "categorical" && (!(vColor %in% dfcat[[dtfname]]))) stop("vColor is not categorical")
}
if (typeName %in% c("index", "depth")) vColor <- "<not needed>"
## administration is kept in this environment (maybe not the most elegant solution)
e <- environment()
back <- 0
#filters <- NULL
#hcl <- list(tmSetHCLoptions())
x <- 0
y <- 0
count <- 0
size <- ""
color <- ""
type <- ""
index <- rep("", 4)
runApp(list(
ui = pageWithSidebar(
headerPanel("", windowTitle="Interactive Treemap"),
sidebarPanel(
uiOutput("df"),
#uiOutput("filter"),
uiOutput("ind1"),
uiOutput("ind2"),
uiOutput("ind3"),
uiOutput("ind4"),
uiOutput("size"),
uiOutput("type"),
uiOutput("color"),
checkboxInput("fixscales", "Fix scales", value = TRUE),
checkboxInput("fixasp", "Fix aspect ratio", value = TRUE),
actionButton("back", "Zoom out")
),
mainPanel(
tabsetPanel(
tabPanel("Treemap", plotOutput("plot", hover="hover", click="click", height=paste(height, "px", sep="")),
tableOutput("summary")),
tabPanel("Data", dataTableOutput("data")),
tabPanel("Microdata", dataTableOutput("microdata"))))
),
server = function(input, output, session){
values <- reactiveValues()
values$update <- FALSE
dataset <- reactive({
assign("filters", NULL, envir=e)
assign("hcl", list(tmSetHCLoptions()), envir=e)
assign("asp", NULL, envir=e)
assign("range", NA, envir=e)
assign("tm", NULL, envir=e)
ifelse(is.null(input$df), dfs[1], input$df)
})
getHoverID <- reactive({
p <- dataset()
x <- input$hover$x
y <- input$hover$y
if (!is.null(tm)) {
x <- (x - tm$vpCoorX[1]) / (tm$vpCoorX[2] - tm$vpCoorX[1])
y <- (y - tm$vpCoorY[1]) / (tm$vpCoorY[2] - tm$vpCoorY[1])
l <- tmLocate(list(x=x, y=y), tm)
if (is.na(l[1,1])) {
return(NULL)
} else return(as.list(l[1,]))
} else {
return(NULL)
}
})
getClickID <- reactive({
p <- dataset()
x.new <- input$click$x
y.new <- input$click$y
if (is.null(x.new) || is.null(y.new)) return(NULL)
if (x.new==x && y.new==y) return(NULL)
assign("x", x.new, envir=e)
assign("y", y.new, envir=e)
if (!is.null(tm)) {
x <- (x - tm$vpCoorX[1]) / (tm$vpCoorX[2] - tm$vpCoorX[1])
y <- (y - tm$vpCoorY[1]) / (tm$vpCoorY[2] - tm$vpCoorY[1])
l <- tmLocate(list(x=x, y=y), tm)
if (is.na(l[1,1])) {
return(NULL)
} else return(as.list(l[1,]))
} else {
return(NULL)
}
})
getFilter <- reactive({
p <- dataset()
back.new <- input$back
l <- getClickID()
if (back.new == back) {
if (!is.null(l)) if (!(l$x0==0 && l$y0==0 && l$w==1 && l$y==1)) {
# mouse click on treemap
filter <- as.character(l[[1]])
proceed <- is.null(filters)
if (!proceed) proceed <- (!length(filters)) || (filter != filters[length(filters)])
# select all rectangles inside clicked rectangle
if (proceed) {
sel <- tm$tm[[1]] == filter
#browser()
# create hcl options
cols <- tm$tm$color[sel]
cols <- substr(cols, 1L, 7L)
cols <- hex2RGB(cols)
cols <- as(cols, "polarLUV")
hues <- cols@coords[,3]
hcl.last <- hcl[[length(hcl)]]
hcl.last$hue_start <- min(hues)
hcl.last$hue_end <- max(hues)
notDeeper <- all(is.na(tm$tm[sel, 2]))
if (length(l)>10 && !notDeeper) {
hcl.last$chroma <- hcl.last$chroma + hcl.last$chroma_slope
hcl.last$luminance <- hcl.last$luminance + hcl.last$luminance_slope
}
assign("hcl", c(hcl, list(hcl.last)), envir=e)
# set aspect ratio
x0 <- tm$tm$x0[sel]
x1 <- x0 + tm$tm$w[sel]
y0 <- tm$tm$y0[sel]
y1 <- y0 + tm$tm$h[sel]
w <- max(x1) - min(x0)
h <- max(y1) - min(y0)
asp.new <- tm$aspRatio
assign("asp", if (is.null(asp)) c(asp.new, asp.new*(w/h)) else c(asp, asp.new*(w/h)), envir=e)
# get range
assign("range", tm$range, envir=e)
# add filter
assign("filters", c(filters, filter), envir=e)
}
}
} else {
if (!is.null(filters)) if (length(filters)) {
# click on zoom out button
assign("filters", filters[-(length(filters))], envir=e)
assign("hcl", hcl[-(length(hcl))], envir=e)
assign("asp", asp[-(length(asp))], envir=e)
assign("range", tm$range, envir=e)
}
assign("back", back.new, envir=e)
}
filters
})
getSummary <- reactive({
l <- getHoverID()
if (!is.null(l)) {
# create summary line on hover
sizeID <- which(names(l)=="vSize")
id <- switch(type,
comp=sizeID+2,
dens=sizeID+2,
value=sizeID+1,
index=sizeID,
categorical=sizeID+1,
depth=sizeID,
color=sizeID)
l <- l[1:id]
names(l)[sizeID] <- size
if (!(type %in% c("index", "depth", "color"))) names(l)[sizeID+1] <- color
if (type=="comp") {
names(l)[sizeID+2] <- paste("compared to", color, "(in %)")
} else if (type=="dens") {
names(l)[sizeID+2] <- paste(color, "per", size)
}
dt <- as.data.frame(l)
row.names(dt) <- ""
return(as.data.frame(dt))
} else {
dt <- data.frame('...'="")
row.names(dt) <- ""
return(dt)
}
})
output$df <- renderUI({
selectInput("df", label="Dataset:", choices=dfs, selected=dtfname)
})
output$ind1 <- renderUI({
p <- dataset()
vars <- dfcat[[p]]
selectInput("ind1", label="Index variables", choices=vars, selected=indexNames[1])
})
output$ind2 <- renderUI({
p <- dataset()
vars <- c("<NA>", dfcat[[p]])
ind1 <- input$ind1
if (!is.null(ind1)) {
vars <- setdiff(vars, ind1)
selectInput("ind2", label="", choices=vars, selected=indexNames[2])
}
})
output$ind3 <- renderUI({
p <- dataset()
vars <- c("<NA>", dfcat[[p]])
ind1 <- input$ind1
ind2 <- input$ind2
if (!is.null(ind1) && !is.null(ind2)) {
if (ind2=="<NA>") {
vars <- "<NA>"
} else {
vars <- setdiff(vars, c(ind1, ind2))
}
selectInput("ind3", label="", choices=vars, selected=indexNames[3])
}
})
output$ind4 <- renderUI({
p <- dataset()
vars <- c("<NA>", dfcat[[p]])
ind1 <- input$ind1
ind2 <- input$ind2
ind3 <- input$ind3
if (!is.null(ind1) && !is.null(ind2) && !is.null(ind3)) {
if (ind3=="<NA>") {
vars <- "<NA>"
} else {
vars <- setdiff(vars, c(ind1, ind2, ind3))
}
selectInput("ind4", label="", choices=vars, selected=indexNames[4])
}
})
output$size <- renderUI({
p <- dataset()
vars <- dfnum[[p]]
selectInput("size", label="Size variable", choices=vars, selected=vSize)
})
output$color <- renderUI({
p <- dataset()
type <- input$type
if (!is.null(type)) {
vars <- if (type=="index") {
"<not needed>"
} else if (type=="value") {
dfnum[[p]]
} else if (type=="comp") {
dfnum[[p]]
} else if (type=="dens") {
dfnum[[p]]
} else if (type=="depth") {
"<not needed>"
} else if (type=="categorical") {
dfcat[[p]]
}
selectInput("color", label="Color variable", choices=vars, selected=vColor)
}
})
output$type <- renderUI({
selectInput("type", label="Type", choices=c("index", "value", "comp", "dens",
"depth", "categorical"), selected=typeName)
})
output$plot <- renderPlot({
#.tm <- .range <- .count <- .size <- .color <- .type <- .index <- NULL
# get input parameters
filters <- getFilter()
p <- dataset()
size.new <- input$size
color.new <- input$color
type.new <- input$type
ind1 <- input$ind1
ind2 <- input$ind2
ind3 <- input$ind3
ind4 <- input$ind4
asp.new <- input$fixasp
scales <- input$fixscales
# check if all parameters are ready
if (is.null(size.new) || is.null(color.new) || is.null(type.new) ||
is.null(ind1) || is.null(ind2) || is.null(ind3) || is.null(ind4) || is.null(asp.new) || is.null(scales)) return(NULL)
# create index vector and get filter
index.new <- c(ind1, ind2, ind3, ind4)
if (all(index.new==index) && size.new ==size && color.new==color && type.new == type) {
#cat("same variables\n")
#return(NULL)
} else {
assign("range", NA, envir=e)
}
assign("size", size.new, envir=e)
assign("color", color.new, envir=e)
assign("type", type.new, envir=e)
assign("index", index.new, envir=e)
index.new <- index.new[index.new!="<NA>"]
# determine zoom level
zoomLevel <- if (is.null(filters)) 0 else length(filters)
# check parameters
if (!(anyDuplicated(index.new)) &&
((color.new=="<not needed>" && (type.new %in% c("index", "depth"))) ||
((color.new %in% dfnum[[p]]) && (type.new %in% c("value", "comp", "dens"))) ||
((color.new %in% dfcat[[p]]) && (type.new == "categorical"))) &&
all(index.new %in% dfcat[[p]]))
{
# create empty base R plot to obtain hover and click info
par(mar=c(0,0,0,0), xaxs='i', yaxs='i')
plot(c(0,1), c(0,1),axes=F, col="white")
vps <- baseViewports()
# subset data and get aspect ratio
#### TODO: in incomplete trees, the max zoom level is lower
#### test: 53 Postal and courier activities
dat <- get(p, envir=.GlobalEnv)
if (zoomLevel>0) {
filterString <- paste(paste(index.new[1:zoomLevel], paste("\"", filters, "\"", sep=""), sep=" == "), collapse=" & ")
selection <- eval(parse(text=filterString), dat, parent.frame())
dat <- dat[selection,]
# determine indices of treemap
allNA <- sapply(dat[, index.new], function(x)all(is.na(x)))
maxLevel <- ifelse(any(allNA), which(allNA)[1]-1, length(index.new))
minLevel <- min(maxLevel, zoomLevel+1, length(index.new))
if (length(index.new)>1) index.new <- index.new[(minLevel:maxLevel)]
#if (maxLevel==zoomLevel) hcl
aspRatio <- ifelse(asp.new, asp[length(asp)], NA)
} else {
aspRatio <- NA
}
# reset range if treemap is changed
assign("count", count + 1, envir=e)
#cat("draw", .count, " range", .range,"\n")
# get range and hcl info
assign("range", if(scales) range else NA, envir=e)
hcl.new <- if(scales) as.list(hcl[[zoomLevel+1]]) else hcl[[1]]
#require(data.table)
values$update <- TRUE
tm <- treemap(dat,
index=index.new,
vSize=size.new,
vColor=color.new,
type=type.new,
vp=vps$plot,
palette.HCL.options=hcl.new,
aspRatio=aspRatio,
range=range,
title="")
values$update <- FALSE
assign("tm", tm, envir=e)
tmString <-
paste0("treemap(",
ifelse(zoomLevel==0, p, paste0("subset(", p, ", subset=", filterString, ")")),
", index=", if(length(index.new)==1) paste0("\"", index.new, "\"") else paste0("c(", paste(paste0("\"", index.new, "\""), collapse=", "), ")"),
", vSize=\"", size.new, "\"",
if (color.new!="<not needed>") paste0(", vColor=\"", color.new, "\""),
", type=\"", type.new, "\")")
if (command.line.output) cat(tmString, "\n")
}
})
output$summary <- renderTable({
getSummary()
})
output$microdata <- renderDataTable({
# get input parameters (to get attention)
filters <- getFilter()
p <- dataset()
size <- input$size
color <- input$color
type <- input$type
ind1 <- input$ind1
ind2 <- input$ind2
ind3 <- input$ind3
ind4 <- input$ind4
asp.new <- input$fixasp
scales <- input$fixscales
update <- values$update
dat <- get(p, envir=.GlobalEnv)
index.new <- c(ind1, ind2, ind3, ind4)
zoomLevel <- if (is.null(filters)) 0 else length(filters)
if (zoomLevel>0) {
# subset data
filterString <- paste(paste(index[1:zoomLevel], paste("\"", filters, "\"", sep=""), sep=" == "), collapse=" & ")
selection <- eval(parse(text=filterString), dat, parent.frame())
dat <- dat[selection,]
}
dat
})
output$data <- renderDataTable({
# get input parameters (to get attention)
p <- dataset()
size.new <- input$size
color.new <- input$color
type.new <- input$type
ind1 <- input$ind1
ind2 <- input$ind2
ind3 <- input$ind3
ind4 <- input$ind4
asp <- input$fixasp
scales <- input$fixscales
update <- values$update
tm <- tm$tm
lvls <- tm$level
dat <- tm[lvls==max(lvls), 1:(ncol(tm)-6)]
sizeID <- which(names(dat)=="vSize")
id <- switch(type,
comp=sizeID+2,
dens=sizeID+2,
value=sizeID+1,
index=sizeID,
categorical=sizeID+1,
depth=sizeID,
color=sizeID)
dat <- dat[, 1:id]
names(dat)[sizeID] <- size
if (!(type %in% c("index", "depth", "color"))) names(dat)[sizeID+1] <- color
if (type=="comp") {
names(dat)[sizeID+2] <- paste("compared to", color, "(in %)")
} else if (type=="dens") {
names(dat)[sizeID+2] <- paste(color, "per", size)
}
dat
})
}
))
}
|
/pkg/R/itreemap.R
|
no_license
|
timelyportfolio/treemap
|
R
| false | false | 23,544 |
r
|
#' Interactive user interface for treemap
#'
#' This function is an interactive user interface for creating treemaps. Interaction is provided for the four main input arguments of (\code{\link{treemap}}) besides the data.frame itself, namely \code{index}, \code{vSize}, \code{vColor} and \code{type}. Zooming in and out is possible. Command line outputs are generated in the console.
#'
#' @param dtf a data.frame (\code{\link{treemap}}) If not provided, then the first data.frame in the global workspace is loaded.
#' @param index index variables (up to four). See \code{\link{treemap}}.
#' @param vSize name of the variable that determine the rectangle sizes.
#' @param vColor name of the variable that determine the rectangle colors. See \code{\link{treemap}}.
#' @param type treemap type. See \code{\link{treemap}}.
#' @param height height of the plotted treemap in pixels. Tip: decrease this number if the treemap doesn't fit conveniently.
#' @param command.line.output if \code{TRUE}, the command line output of the generated treemaps are provided in the console.
#' @examples
#' \dontrun{
#' data(business)
#' itreemap(business)
#' }
#' @note This interface will no longer be maintained (except for small bugs), since there is a better interactive interface available: \url{https://github.com/timelyportfolio/d3treeR}.
#' @import data.table
#' @import grid
#' @import gridBase
#' @import shiny
#' @export
itreemap <- function(dtf=NULL, index=NULL, vSize=NULL, vColor=NULL, type=NULL, height=700, command.line.output=TRUE) {
# get data.frame(s)
obs <- ls(envir=.GlobalEnv)
if (!length(obs)) stop("No data.frames loaded")
dfs <- obs[sapply(obs, function(x)inherits(get(x, envir=.GlobalEnv), "data.frame"))]
if (!length(dfs)) stop("No data.frames loaded")
# get variable names
dfvars <- lapply(dfs, function(x)names(get(x, envir=.GlobalEnv)))
names(dfvars) <- dfs
dfiscat <- lapply(dfs, function(x)sapply(get(x, envir=.GlobalEnv),function(y)is.factor(y)||is.logical(y)||is.character(y)))
names(dfiscat) <- dfs
dfcat <- lapply(dfiscat, function(x)if (any(x)) names(x[x]) else "<NA>")
dfnum <- lapply(dfiscat, function(x)if (any(!x)) names(x[!x]) else "<NA>")
## check input parameters
if (missing(dtf)) {
dtfname <- dfs[1]
} else {
dtfname <- deparse(substitute(dtf))
if (!dtfname %in% dfs) stop(paste(dtfname, "is not a data.frame"))
}
if (missing(index)) {
indexNames <- c(dfcat[[dtfname]][1], "<NA>", "<NA>", "<NA>")
} else {
if (!(all(index %in% dfcat[[dtfname]]))) stop("index variable(s) is(are) not categorical")
indexNames <- if (length(index) < 4) c(index, rep.int("<NA>", 4-length(index))) else index[1:4]
}
if (missing(vSize)) {
vSize <- dfnum[[dtfname]][1]
} else {
if (!(vSize %in% dfnum[[dtfname]])) stop("vSize is not numeric")
}
if (missing(type)) {
typeName <- "index"
} else {
if (!(type %in% c("value", "categorical", "comp", "dens", "index", "depth"))) stop("Invalid type")
typeName <- type
}
if (missing(vColor)) {
if (typeName %in% c("value", "comp", "dens")) vColor <- dfnum[[dtfname]][1]
if (typeName == "categorical") vColor <- dfcat[[dtfname]][1]
} else {
if (typeName %in% c("value", "comp", "dens") && (!(vColor %in% dfnum[[dtfname]]))) stop("vColor is not numeric")
if (typeName == "categorical" && (!(vColor %in% dfcat[[dtfname]]))) stop("vColor is not categorical")
}
if (typeName %in% c("index", "depth")) vColor <- "<not needed>"
## administration is kept in this environment (maybe not the most elegant solution)
e <- environment()
back <- 0
#filters <- NULL
#hcl <- list(tmSetHCLoptions())
x <- 0
y <- 0
count <- 0
size <- ""
color <- ""
type <- ""
index <- rep("", 4)
runApp(list(
ui = pageWithSidebar(
headerPanel("", windowTitle="Interactive Treemap"),
sidebarPanel(
uiOutput("df"),
#uiOutput("filter"),
uiOutput("ind1"),
uiOutput("ind2"),
uiOutput("ind3"),
uiOutput("ind4"),
uiOutput("size"),
uiOutput("type"),
uiOutput("color"),
checkboxInput("fixscales", "Fix scales", value = TRUE),
checkboxInput("fixasp", "Fix aspect ratio", value = TRUE),
actionButton("back", "Zoom out")
),
mainPanel(
tabsetPanel(
tabPanel("Treemap", plotOutput("plot", hover="hover", click="click", height=paste(height, "px", sep="")),
tableOutput("summary")),
tabPanel("Data", dataTableOutput("data")),
tabPanel("Microdata", dataTableOutput("microdata"))))
),
server = function(input, output, session){
values <- reactiveValues()
values$update <- FALSE
dataset <- reactive({
assign("filters", NULL, envir=e)
assign("hcl", list(tmSetHCLoptions()), envir=e)
assign("asp", NULL, envir=e)
assign("range", NA, envir=e)
assign("tm", NULL, envir=e)
ifelse(is.null(input$df), dfs[1], input$df)
})
getHoverID <- reactive({
p <- dataset()
x <- input$hover$x
y <- input$hover$y
if (!is.null(tm)) {
x <- (x - tm$vpCoorX[1]) / (tm$vpCoorX[2] - tm$vpCoorX[1])
y <- (y - tm$vpCoorY[1]) / (tm$vpCoorY[2] - tm$vpCoorY[1])
l <- tmLocate(list(x=x, y=y), tm)
if (is.na(l[1,1])) {
return(NULL)
} else return(as.list(l[1,]))
} else {
return(NULL)
}
})
getClickID <- reactive({
p <- dataset()
x.new <- input$click$x
y.new <- input$click$y
if (is.null(x.new) || is.null(y.new)) return(NULL)
if (x.new==x && y.new==y) return(NULL)
assign("x", x.new, envir=e)
assign("y", y.new, envir=e)
if (!is.null(tm)) {
x <- (x - tm$vpCoorX[1]) / (tm$vpCoorX[2] - tm$vpCoorX[1])
y <- (y - tm$vpCoorY[1]) / (tm$vpCoorY[2] - tm$vpCoorY[1])
l <- tmLocate(list(x=x, y=y), tm)
if (is.na(l[1,1])) {
return(NULL)
} else return(as.list(l[1,]))
} else {
return(NULL)
}
})
getFilter <- reactive({
p <- dataset()
back.new <- input$back
l <- getClickID()
if (back.new == back) {
if (!is.null(l)) if (!(l$x0==0 && l$y0==0 && l$w==1 && l$y==1)) {
# mouse click on treemap
filter <- as.character(l[[1]])
proceed <- is.null(filters)
if (!proceed) proceed <- (!length(filters)) || (filter != filters[length(filters)])
# select all rectangles inside clicked rectangle
if (proceed) {
sel <- tm$tm[[1]] == filter
#browser()
# create hcl options
cols <- tm$tm$color[sel]
cols <- substr(cols, 1L, 7L)
cols <- hex2RGB(cols)
cols <- as(cols, "polarLUV")
hues <- cols@coords[,3]
hcl.last <- hcl[[length(hcl)]]
hcl.last$hue_start <- min(hues)
hcl.last$hue_end <- max(hues)
notDeeper <- all(is.na(tm$tm[sel, 2]))
if (length(l)>10 && !notDeeper) {
hcl.last$chroma <- hcl.last$chroma + hcl.last$chroma_slope
hcl.last$luminance <- hcl.last$luminance + hcl.last$luminance_slope
}
assign("hcl", c(hcl, list(hcl.last)), envir=e)
# set aspect ratio
x0 <- tm$tm$x0[sel]
x1 <- x0 + tm$tm$w[sel]
y0 <- tm$tm$y0[sel]
y1 <- y0 + tm$tm$h[sel]
w <- max(x1) - min(x0)
h <- max(y1) - min(y0)
asp.new <- tm$aspRatio
assign("asp", if (is.null(asp)) c(asp.new, asp.new*(w/h)) else c(asp, asp.new*(w/h)), envir=e)
# get range
assign("range", tm$range, envir=e)
# add filter
assign("filters", c(filters, filter), envir=e)
}
}
} else {
if (!is.null(filters)) if (length(filters)) {
# click on zoom out button
assign("filters", filters[-(length(filters))], envir=e)
assign("hcl", hcl[-(length(hcl))], envir=e)
assign("asp", asp[-(length(asp))], envir=e)
assign("range", tm$range, envir=e)
}
assign("back", back.new, envir=e)
}
filters
})
getSummary <- reactive({
l <- getHoverID()
if (!is.null(l)) {
# create summary line on hover
sizeID <- which(names(l)=="vSize")
id <- switch(type,
comp=sizeID+2,
dens=sizeID+2,
value=sizeID+1,
index=sizeID,
categorical=sizeID+1,
depth=sizeID,
color=sizeID)
l <- l[1:id]
names(l)[sizeID] <- size
if (!(type %in% c("index", "depth", "color"))) names(l)[sizeID+1] <- color
if (type=="comp") {
names(l)[sizeID+2] <- paste("compared to", color, "(in %)")
} else if (type=="dens") {
names(l)[sizeID+2] <- paste(color, "per", size)
}
dt <- as.data.frame(l)
row.names(dt) <- ""
return(as.data.frame(dt))
} else {
dt <- data.frame('...'="")
row.names(dt) <- ""
return(dt)
}
})
output$df <- renderUI({
selectInput("df", label="Dataset:", choices=dfs, selected=dtfname)
})
output$ind1 <- renderUI({
p <- dataset()
vars <- dfcat[[p]]
selectInput("ind1", label="Index variables", choices=vars, selected=indexNames[1])
})
output$ind2 <- renderUI({
p <- dataset()
vars <- c("<NA>", dfcat[[p]])
ind1 <- input$ind1
if (!is.null(ind1)) {
vars <- setdiff(vars, ind1)
selectInput("ind2", label="", choices=vars, selected=indexNames[2])
}
})
output$ind3 <- renderUI({
p <- dataset()
vars <- c("<NA>", dfcat[[p]])
ind1 <- input$ind1
ind2 <- input$ind2
if (!is.null(ind1) && !is.null(ind2)) {
if (ind2=="<NA>") {
vars <- "<NA>"
} else {
vars <- setdiff(vars, c(ind1, ind2))
}
selectInput("ind3", label="", choices=vars, selected=indexNames[3])
}
})
output$ind4 <- renderUI({
p <- dataset()
vars <- c("<NA>", dfcat[[p]])
ind1 <- input$ind1
ind2 <- input$ind2
ind3 <- input$ind3
if (!is.null(ind1) && !is.null(ind2) && !is.null(ind3)) {
if (ind3=="<NA>") {
vars <- "<NA>"
} else {
vars <- setdiff(vars, c(ind1, ind2, ind3))
}
selectInput("ind4", label="", choices=vars, selected=indexNames[4])
}
})
output$size <- renderUI({
p <- dataset()
vars <- dfnum[[p]]
selectInput("size", label="Size variable", choices=vars, selected=vSize)
})
output$color <- renderUI({
p <- dataset()
type <- input$type
if (!is.null(type)) {
vars <- if (type=="index") {
"<not needed>"
} else if (type=="value") {
dfnum[[p]]
} else if (type=="comp") {
dfnum[[p]]
} else if (type=="dens") {
dfnum[[p]]
} else if (type=="depth") {
"<not needed>"
} else if (type=="categorical") {
dfcat[[p]]
}
selectInput("color", label="Color variable", choices=vars, selected=vColor)
}
})
output$type <- renderUI({
selectInput("type", label="Type", choices=c("index", "value", "comp", "dens",
"depth", "categorical"), selected=typeName)
})
output$plot <- renderPlot({
#.tm <- .range <- .count <- .size <- .color <- .type <- .index <- NULL
# get input parameters
filters <- getFilter()
p <- dataset()
size.new <- input$size
color.new <- input$color
type.new <- input$type
ind1 <- input$ind1
ind2 <- input$ind2
ind3 <- input$ind3
ind4 <- input$ind4
asp.new <- input$fixasp
scales <- input$fixscales
# check if all parameters are ready
if (is.null(size.new) || is.null(color.new) || is.null(type.new) ||
is.null(ind1) || is.null(ind2) || is.null(ind3) || is.null(ind4) || is.null(asp.new) || is.null(scales)) return(NULL)
# create index vector and get filter
index.new <- c(ind1, ind2, ind3, ind4)
if (all(index.new==index) && size.new ==size && color.new==color && type.new == type) {
#cat("same variables\n")
#return(NULL)
} else {
assign("range", NA, envir=e)
}
assign("size", size.new, envir=e)
assign("color", color.new, envir=e)
assign("type", type.new, envir=e)
assign("index", index.new, envir=e)
index.new <- index.new[index.new!="<NA>"]
# determine zoom level
zoomLevel <- if (is.null(filters)) 0 else length(filters)
# check parameters
if (!(anyDuplicated(index.new)) &&
((color.new=="<not needed>" && (type.new %in% c("index", "depth"))) ||
((color.new %in% dfnum[[p]]) && (type.new %in% c("value", "comp", "dens"))) ||
((color.new %in% dfcat[[p]]) && (type.new == "categorical"))) &&
all(index.new %in% dfcat[[p]]))
{
# create empty base R plot to obtain hover and click info
par(mar=c(0,0,0,0), xaxs='i', yaxs='i')
plot(c(0,1), c(0,1),axes=F, col="white")
vps <- baseViewports()
# subset data and get aspect ratio
#### TODO: in incomplete trees, the max zoom level is lower
#### test: 53 Postal and courier activities
dat <- get(p, envir=.GlobalEnv)
if (zoomLevel>0) {
filterString <- paste(paste(index.new[1:zoomLevel], paste("\"", filters, "\"", sep=""), sep=" == "), collapse=" & ")
selection <- eval(parse(text=filterString), dat, parent.frame())
dat <- dat[selection,]
# determine indices of treemap
allNA <- sapply(dat[, index.new], function(x)all(is.na(x)))
maxLevel <- ifelse(any(allNA), which(allNA)[1]-1, length(index.new))
minLevel <- min(maxLevel, zoomLevel+1, length(index.new))
if (length(index.new)>1) index.new <- index.new[(minLevel:maxLevel)]
#if (maxLevel==zoomLevel) hcl
aspRatio <- ifelse(asp.new, asp[length(asp)], NA)
} else {
aspRatio <- NA
}
# reset range if treemap is changed
assign("count", count + 1, envir=e)
#cat("draw", .count, " range", .range,"\n")
# get range and hcl info
assign("range", if(scales) range else NA, envir=e)
hcl.new <- if(scales) as.list(hcl[[zoomLevel+1]]) else hcl[[1]]
#require(data.table)
values$update <- TRUE
tm <- treemap(dat,
index=index.new,
vSize=size.new,
vColor=color.new,
type=type.new,
vp=vps$plot,
palette.HCL.options=hcl.new,
aspRatio=aspRatio,
range=range,
title="")
values$update <- FALSE
assign("tm", tm, envir=e)
tmString <-
paste0("treemap(",
ifelse(zoomLevel==0, p, paste0("subset(", p, ", subset=", filterString, ")")),
", index=", if(length(index.new)==1) paste0("\"", index.new, "\"") else paste0("c(", paste(paste0("\"", index.new, "\""), collapse=", "), ")"),
", vSize=\"", size.new, "\"",
if (color.new!="<not needed>") paste0(", vColor=\"", color.new, "\""),
", type=\"", type.new, "\")")
if (command.line.output) cat(tmString, "\n")
}
})
output$summary <- renderTable({
getSummary()
})
output$microdata <- renderDataTable({
# get input parameters (to get attention)
filters <- getFilter()
p <- dataset()
size <- input$size
color <- input$color
type <- input$type
ind1 <- input$ind1
ind2 <- input$ind2
ind3 <- input$ind3
ind4 <- input$ind4
asp.new <- input$fixasp
scales <- input$fixscales
update <- values$update
dat <- get(p, envir=.GlobalEnv)
index.new <- c(ind1, ind2, ind3, ind4)
zoomLevel <- if (is.null(filters)) 0 else length(filters)
if (zoomLevel>0) {
# subset data
filterString <- paste(paste(index[1:zoomLevel], paste("\"", filters, "\"", sep=""), sep=" == "), collapse=" & ")
selection <- eval(parse(text=filterString), dat, parent.frame())
dat <- dat[selection,]
}
dat
})
output$data <- renderDataTable({
# get input parameters (to get attention)
p <- dataset()
size.new <- input$size
color.new <- input$color
type.new <- input$type
ind1 <- input$ind1
ind2 <- input$ind2
ind3 <- input$ind3
ind4 <- input$ind4
asp <- input$fixasp
scales <- input$fixscales
update <- values$update
tm <- tm$tm
lvls <- tm$level
dat <- tm[lvls==max(lvls), 1:(ncol(tm)-6)]
sizeID <- which(names(dat)=="vSize")
id <- switch(type,
comp=sizeID+2,
dens=sizeID+2,
value=sizeID+1,
index=sizeID,
categorical=sizeID+1,
depth=sizeID,
color=sizeID)
dat <- dat[, 1:id]
names(dat)[sizeID] <- size
if (!(type %in% c("index", "depth", "color"))) names(dat)[sizeID+1] <- color
if (type=="comp") {
names(dat)[sizeID+2] <- paste("compared to", color, "(in %)")
} else if (type=="dens") {
names(dat)[sizeID+2] <- paste(color, "per", size)
}
dat
})
}
))
}
|
#################################################################
## Univariate state-space models
## Example 5. The hidden x is a straight line
## This illustrates that you can reproduce a
## linear regression fit with a state-space model.
#################################################################
library(MARSS)
#x is the "hidden" trend we want to find
intercept=1 #this is x at t=0
slope=0.5
r=1
n=10
t=1:n
x=intercept + slope*t
plot(x,xlim=c(1,n),ylim=c(0,n),type="l",ylab="time")
#y is our observation of x with error
y=x+rnorm(n,0,sqrt(r))
points(y)
#Let's estimate the x
fit = lm(y~t)
fit
#add fit to our plot
abline(fit, col="red", lty=2, lwd=3)
title("fit is red; true x is black")
##Preliminaries: how to write x=intercept+slope*t as a AR-1
x[1]=intercept+slope #this is x at t=1
for(i in 2:n) x[i]=x[i-1]+slope #n=10 from above
plot(1:n,x,xlim=c(0,n),ylim=c(0,n),type="l",lwd=2,col="blue")
lines(c(4,5),c(x[4],x[4]))
lines(c(5,5),c(x[4],x[5]))
text(5,x[4]+slope/2,"slope",pos=4)
#Let's write x as a AR-1 model and y as an observation of that
#x(t) = x(t-1) + slope + w(t), w(t)~N(0,0) so w(t)=0
#x(0) = intercept
#y(t) = x(t) + v(t), v(t)~N(0,r)
mod.list=list(
U=matrix("slope"),
x0=matrix("intercept"),
B=matrix(1),
Q=matrix(0),
Z=matrix(1),
A=matrix(0),
R=matrix("r"),
tinitx=0)
fit2=MARSS(y,model=mod.list)
plot(x,xlim=c(1,n),ylim=c(0,n),type="l",ylab="time")
points(y)
lines(fit2$states[1,], col="blue", lwd=2)
abline(fit, col="red", lty=2, lwd=3)
title("AR is blue; lm is red; true x is black")
#parameter estimates
est.slope=coef(fit2)$U[,1]
est.intercept=coef(fit2)$x0[,1]
est.r=coef(fit2)$R[,1]
#Let's forecast our OBSERVATIONS forward 10 time steps
#x(t+1)=x(t)+slope
#y(t+1)=x(t+1)+v(t+1), v(t)~N(0,r)
#First let's set up our estimated x
#The last x at t=max(t)
x.est.end = fit2$states[1,n]
t.forward = 10
x.forecast = x.est.end + est.slope*(1:t.forward)
#Let's first add the the real x and observations
ylims=c(0,x[max(t)]+slope*t.forward+3*r)
xlims=c(n-9,n+t.forward)
plot((n-9):n, x[(n-9):n],xlim=xlims,ylim=ylims,type="l",ylab="y",xlab="t")
points(y)
title(paste("forecast with",n,"data points for estimation\nblue is estimate; red is true"))
#Now let's forecast 1000 times using our estimates
for(i in 1:1000){
y.forecast = x.forecast + rnorm(t.forward,0,sqrt(est.r))
jit=rnorm(1,0,.1)-.25
points(n+1:t.forward+jit,y.forecast,pch=".",col="blue")
}
#Now let's forecast 1000 times using truth
x.end = x[max(t)]
x.true.forecast = x.end + slope*(1:t.forward)
for(i in 1:1000){
y.true.forecast = x.true.forecast + rnorm(t.forward,0,sqrt(r))
jit=rnorm(1,0,.1)+.25
points(n+1:t.forward+jit,y.true.forecast,pch=".",col="red")
}
|
/docs/Lectures/Week 3/univariate_example_lm.R
|
no_license
|
atsa-es/atsa2019
|
R
| false | false | 2,726 |
r
|
#################################################################
## Univariate state-space models
## Example 5. The hidden x is a straight line
## This illustrates that you can reproduce a
## linear regression fit with a state-space model.
#################################################################
library(MARSS)
#x is the "hidden" trend we want to find
intercept=1 #this is x at t=0
slope=0.5
r=1
n=10
t=1:n
x=intercept + slope*t
plot(x,xlim=c(1,n),ylim=c(0,n),type="l",ylab="time")
#y is our observation of x with error
y=x+rnorm(n,0,sqrt(r))
points(y)
#Let's estimate the x
fit = lm(y~t)
fit
#add fit to our plot
abline(fit, col="red", lty=2, lwd=3)
title("fit is red; true x is black")
##Preliminaries: how to write x=intercept+slope*t as a AR-1
x[1]=intercept+slope #this is x at t=1
for(i in 2:n) x[i]=x[i-1]+slope #n=10 from above
plot(1:n,x,xlim=c(0,n),ylim=c(0,n),type="l",lwd=2,col="blue")
lines(c(4,5),c(x[4],x[4]))
lines(c(5,5),c(x[4],x[5]))
text(5,x[4]+slope/2,"slope",pos=4)
#Let's write x as a AR-1 model and y as an observation of that
#x(t) = x(t-1) + slope + w(t), w(t)~N(0,0) so w(t)=0
#x(0) = intercept
#y(t) = x(t) + v(t), v(t)~N(0,r)
mod.list=list(
U=matrix("slope"),
x0=matrix("intercept"),
B=matrix(1),
Q=matrix(0),
Z=matrix(1),
A=matrix(0),
R=matrix("r"),
tinitx=0)
fit2=MARSS(y,model=mod.list)
plot(x,xlim=c(1,n),ylim=c(0,n),type="l",ylab="time")
points(y)
lines(fit2$states[1,], col="blue", lwd=2)
abline(fit, col="red", lty=2, lwd=3)
title("AR is blue; lm is red; true x is black")
#parameter estimates
est.slope=coef(fit2)$U[,1]
est.intercept=coef(fit2)$x0[,1]
est.r=coef(fit2)$R[,1]
#Let's forecast our OBSERVATIONS forward 10 time steps
#x(t+1)=x(t)+slope
#y(t+1)=x(t+1)+v(t+1), v(t)~N(0,r)
#First let's set up our estimated x
#The last x at t=max(t)
x.est.end = fit2$states[1,n]
t.forward = 10
x.forecast = x.est.end + est.slope*(1:t.forward)
#Let's first add the the real x and observations
ylims=c(0,x[max(t)]+slope*t.forward+3*r)
xlims=c(n-9,n+t.forward)
plot((n-9):n, x[(n-9):n],xlim=xlims,ylim=ylims,type="l",ylab="y",xlab="t")
points(y)
title(paste("forecast with",n,"data points for estimation\nblue is estimate; red is true"))
#Now let's forecast 1000 times using our estimates
for(i in 1:1000){
y.forecast = x.forecast + rnorm(t.forward,0,sqrt(est.r))
jit=rnorm(1,0,.1)-.25
points(n+1:t.forward+jit,y.forecast,pch=".",col="blue")
}
#Now let's forecast 1000 times using truth
x.end = x[max(t)]
x.true.forecast = x.end + slope*(1:t.forward)
for(i in 1:1000){
y.true.forecast = x.true.forecast + rnorm(t.forward,0,sqrt(r))
jit=rnorm(1,0,.1)+.25
points(n+1:t.forward+jit,y.true.forecast,pch=".",col="red")
}
|
setwd("D:/Dropbox/Public/Sean's Stuff/Grad School/Research/Scrape Tutorial") #set WD
library("rvest")
library(purrr)
library(plyr)
library(dplyr) #loading packages
##### Create Page Holder
page_holder <- rep(NA, 10)
for (i in 1:length(page_holder)){
page_holder[i] <-(paste("https://www.politico.com/news/2020-elections",i))
}
page_holder <- gsub("s ", "s/",page_holder) #minor adjustment to get formatting working
####### Create Functions ###
URLFunction <- function(url){
Sys.sleep(1)
read_html(url) %>%
html_nodes("#main h1 a") %>%
html_attr('href')
}
HeadlineFunction <- function(url){
Sys.sleep(1)
as.character(html_text(html_nodes(read_html(url), '#main h1 a')))
}
AuthorFunction <- function(url){
Sys.sleep(1)
as.character(html_text(html_nodes(read_html(url),'.story-meta__authors .vcard')))
}
DateFunction <- function(url){
Sys.sleep(1)
as.character(html_text(html_nodes(read_html(url),'time')))
}
TextFunction <- function(url){
Sys.sleep(1)
(html_text(html_nodes(read_html(url), '.story-text__paragraph')))
}
##### Scraping #####
PoliticoURL <- sapply(page_holder, URLFunction) #Apply URL Function, will take some time
PoliticoURL <- URLFunction(page_holder[1])
# PoliticoHeadline <- sapply(page_holder, HeadlineFunction) #Apply Headline function
# PoliticoHeadline <- as.character(PoliticoHeadline) #put into one column
PoliticoHeadline <- HeadlineFunction(page_holder[1])
Database <- as.data.frame(cbind(PoliticoHeadline, PoliticoURL)) #if you want to make sure they match up
Dates <- rep(NA,length(Database$PoliticoHeadline)) #we are going from actual articles now, so using a loop to deal with formatting errors/lag
for(i in 1:length(Dates)){
Dates[i] <- DateFunction(PoliticoURL[i])
print(i)
}
saveRDS(Dates, "Dates.rds")
Author <- rep(NA,length(Database$PoliticoHeadline))
for(i in 1:length(Author)){
Author[i] <- AuthorFunction(PoliticoURL[i])
print(i)
}
saveRDS(Author, "Author.rds")
Database$Date <- Dates
Database$Author <- Author
###### Scraping the Text ###
setwd("D:/Dropbox/Public/Sean's Stuff/Grad School/Research/Scrape Tutorial/Text")
for (i in 1:length(Database$PoliticoURL)){
text_holder <- NA
text_holder <- TextFunction(Database$PoliticoURL[i])
output <- paste0(i, ".txt") #create a name for each
write.csv(text_holder, output) #saves file
rm(text_holder)
print(i)
}
#### Bringing Text Back In ###
library(readr)
setwd("D:/Dropbox/Public/Sean's Stuff/Grad School/Research/Scrape Tutorial/Text")
file_list <- rep(NA, 20)
x <- 0
for (i in 1:length(file_list)){
x <- 0 + i
file_list[i] <- paste(x, ".txt") #creating list of the file names that we exported
}
file_list <- gsub(" .txt", ".txt", file_list)
for (file in file_list){
if(exists("dataset")){
temp_dataset <- read_file(file)
dataset<-rbind(dataset, temp_dataset)
rm(temp_dataset)
print(file)
}
if(!exists("dataset")){
dataset <- read_file(file)
}}
Database$Text <- dataset
|
/Scrape Tutorial.R
|
no_license
|
lorenc5/POSC-207
|
R
| false | false | 3,066 |
r
|
setwd("D:/Dropbox/Public/Sean's Stuff/Grad School/Research/Scrape Tutorial") #set WD
library("rvest")
library(purrr)
library(plyr)
library(dplyr) #loading packages
##### Create Page Holder
page_holder <- rep(NA, 10)
for (i in 1:length(page_holder)){
page_holder[i] <-(paste("https://www.politico.com/news/2020-elections",i))
}
page_holder <- gsub("s ", "s/",page_holder) #minor adjustment to get formatting working
####### Create Functions ###
URLFunction <- function(url){
Sys.sleep(1)
read_html(url) %>%
html_nodes("#main h1 a") %>%
html_attr('href')
}
HeadlineFunction <- function(url){
Sys.sleep(1)
as.character(html_text(html_nodes(read_html(url), '#main h1 a')))
}
AuthorFunction <- function(url){
Sys.sleep(1)
as.character(html_text(html_nodes(read_html(url),'.story-meta__authors .vcard')))
}
DateFunction <- function(url){
Sys.sleep(1)
as.character(html_text(html_nodes(read_html(url),'time')))
}
TextFunction <- function(url){
Sys.sleep(1)
(html_text(html_nodes(read_html(url), '.story-text__paragraph')))
}
##### Scraping #####
PoliticoURL <- sapply(page_holder, URLFunction) #Apply URL Function, will take some time
PoliticoURL <- URLFunction(page_holder[1])
# PoliticoHeadline <- sapply(page_holder, HeadlineFunction) #Apply Headline function
# PoliticoHeadline <- as.character(PoliticoHeadline) #put into one column
PoliticoHeadline <- HeadlineFunction(page_holder[1])
Database <- as.data.frame(cbind(PoliticoHeadline, PoliticoURL)) #if you want to make sure they match up
Dates <- rep(NA,length(Database$PoliticoHeadline)) #we are going from actual articles now, so using a loop to deal with formatting errors/lag
for(i in 1:length(Dates)){
Dates[i] <- DateFunction(PoliticoURL[i])
print(i)
}
saveRDS(Dates, "Dates.rds")
Author <- rep(NA,length(Database$PoliticoHeadline))
for(i in 1:length(Author)){
Author[i] <- AuthorFunction(PoliticoURL[i])
print(i)
}
saveRDS(Author, "Author.rds")
Database$Date <- Dates
Database$Author <- Author
###### Scraping the Text ###
setwd("D:/Dropbox/Public/Sean's Stuff/Grad School/Research/Scrape Tutorial/Text")
for (i in 1:length(Database$PoliticoURL)){
text_holder <- NA
text_holder <- TextFunction(Database$PoliticoURL[i])
output <- paste0(i, ".txt") #create a name for each
write.csv(text_holder, output) #saves file
rm(text_holder)
print(i)
}
#### Bringing Text Back In ###
library(readr)
setwd("D:/Dropbox/Public/Sean's Stuff/Grad School/Research/Scrape Tutorial/Text")
file_list <- rep(NA, 20)
x <- 0
for (i in 1:length(file_list)){
x <- 0 + i
file_list[i] <- paste(x, ".txt") #creating list of the file names that we exported
}
file_list <- gsub(" .txt", ".txt", file_list)
for (file in file_list){
if(exists("dataset")){
temp_dataset <- read_file(file)
dataset<-rbind(dataset, temp_dataset)
rm(temp_dataset)
print(file)
}
if(!exists("dataset")){
dataset <- read_file(file)
}}
Database$Text <- dataset
|
rankhospital <- function(state, outcome, num) {
## Read outcome data
df <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
if(!state %in% unique(df$State)){
stop("invalid sate")
}
if(!outcome %in% c("heart attack","heart failure","pneumonia")){
stop("invalid outcome")
}
## Return hospital name in that state with the given rank
find_rank <- function(x){
df <- df[df[x] != "Not Available",]
df <- df[df$State == state,]
df[, x] <- as.numeric(df[, x])
df <- df[order(df[x], df$Hospital.Name),"Hospital.Name"]
if(num == "best")
df[1]
else if(num == "worst")
df[length(df)]
else {
if(num > length(df)) NA
df[num]
}
}
if(outcome == "heart attack"){
find_rank(11)
} else if(outcome == "heart failure"){
find_rank(17)
} else if(outcome == "pneumonia"){
find_rank(23)
}
}
|
/R_programming_week_4/rankhospital.R
|
no_license
|
nsakiotis/R_Repo
|
R
| false | false | 943 |
r
|
rankhospital <- function(state, outcome, num) {
## Read outcome data
df <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
if(!state %in% unique(df$State)){
stop("invalid sate")
}
if(!outcome %in% c("heart attack","heart failure","pneumonia")){
stop("invalid outcome")
}
## Return hospital name in that state with the given rank
find_rank <- function(x){
df <- df[df[x] != "Not Available",]
df <- df[df$State == state,]
df[, x] <- as.numeric(df[, x])
df <- df[order(df[x], df$Hospital.Name),"Hospital.Name"]
if(num == "best")
df[1]
else if(num == "worst")
df[length(df)]
else {
if(num > length(df)) NA
df[num]
}
}
if(outcome == "heart attack"){
find_rank(11)
} else if(outcome == "heart failure"){
find_rank(17)
} else if(outcome == "pneumonia"){
find_rank(23)
}
}
|
#' Internal simulation function to generate a matrix to weight the genotypes when estimating d and stickbreaking coefficients
#'
#' @param geno.matrix Genotype matrix generated in \code{\link{generate.geno.matrix}}
#' @param fit.matrix Fitness matrix generated in \code{\link{simulate.stick.data}}
#' @param wts Vector of weights to be applied of form c(singletons, multiples). Default \code{wts=c(2,1)}.
#' @return weight.matrix
#' @details To calculate the likelihood of the data under the stickbreaking model for a given value of d,
#' we need to esimate the stickbreaking coefficients. The issue is whether all genotypes in the network provide equally good information
#' about the coefficients. The default assumption is that the wild type fitness
#' is know without error while all other genotypes have the same error strucutre. Coefficient estimates are based
#' on comparing pairs of genotypes: with and without the mutation. Therefore, estimates based on comparing wild type to
#' the single mutations (singletons) are expected to have half the variance of all other comparisions (i.e. multiples). This
#' function generates the weights matrix that reflects this. To change this assumption, change the \code{wts} parameter. For example,
#' if the wild type has the same error as all other genotypes, \code{wts = c(1,1)} would be appropriate.
#' @examples Examples here
#' @export
generate.geno.weight.matrix <- function(geno.matrix, fit.matrix, wts=c(2,1)){
weight.matrix <- matrix(nrow=dim(geno.matrix)[1], ncol=dim(geno.matrix)[2])
wt.row <- which(rowSums(geno.matrix)==0)
mult.rows <- which(rowSums(geno.matrix)>1)
single.rows <- which(rowSums(geno.matrix)==1)
n.muts <- length(geno.matrix[1,])
if (length(wts)==2){
for (geno.i in 1:dim(geno.matrix)[1]){
if (geno.i %in% single.rows){
weight.matrix[geno.i,which(geno.matrix[geno.i,]==1)] <- wts[1]
} else if (geno.i %in% mult.rows){
weight.matrix[geno.i,which(geno.matrix[geno.i,]==1)] <- wts[2]
}
}
} else{ # Using wts vector provided
geno.sim.strings.h <- apply(geno.matrix, MARGIN=1, FUN=paste, collapse="")
mut.i.genos <- apply(geno.matrix, 2, function(x) which(x==1))
if (is.list(mut.i.genos)==FALSE){
mut.i.genos <- as.list(as.data.frame(mut.i.genos))
}
for (mut.i in 1:n.muts){
for (geno.i in 1:length(mut.i.genos[[mut.i]])){ # so geno.i is indexing mut.i.genos (not geno.matrix or fit.matrix)
#geno.ii <- mut.i.genos[geno.i, mut.i]
geno.ii <- mut.i.genos[[mut.i]][geno.i]
geno <- geno.matrix[geno.ii,]
geno.background <- geno
geno.background[mut.i] <- 0
geno.back.string <- paste(geno.background, collapse="")
back.id <- which(geno.sim.strings.h==geno.back.string)
var.of.diff <- wts[geno.ii] + wts[back.id]
weight.of.geno <- 1/var.of.diff
weight.matrix[geno.ii, mut.i] <- weight.of.geno
} #next geno.i
} #next.mut.i
}
# Normalize columns (mutations) to sum to one
for (mut.i in 1:dim(geno.matrix)[2]){
weight.matrix[,mut.i] <- weight.matrix[,mut.i]/sum(weight.matrix[,mut.i], na.rm=T)
}
return(weight.matrix)
}
|
/R/fnxs_general.R
|
no_license
|
crussellmiller/Stickbreaker
|
R
| false | false | 3,189 |
r
|
#' Internal simulation function to generate a matrix to weight the genotypes when estimating d and stickbreaking coefficients
#'
#' @param geno.matrix Genotype matrix generated in \code{\link{generate.geno.matrix}}
#' @param fit.matrix Fitness matrix generated in \code{\link{simulate.stick.data}}
#' @param wts Vector of weights to be applied of form c(singletons, multiples). Default \code{wts=c(2,1)}.
#' @return weight.matrix
#' @details To calculate the likelihood of the data under the stickbreaking model for a given value of d,
#' we need to esimate the stickbreaking coefficients. The issue is whether all genotypes in the network provide equally good information
#' about the coefficients. The default assumption is that the wild type fitness
#' is know without error while all other genotypes have the same error strucutre. Coefficient estimates are based
#' on comparing pairs of genotypes: with and without the mutation. Therefore, estimates based on comparing wild type to
#' the single mutations (singletons) are expected to have half the variance of all other comparisions (i.e. multiples). This
#' function generates the weights matrix that reflects this. To change this assumption, change the \code{wts} parameter. For example,
#' if the wild type has the same error as all other genotypes, \code{wts = c(1,1)} would be appropriate.
#' @examples Examples here
#' @export
generate.geno.weight.matrix <- function(geno.matrix, fit.matrix, wts=c(2,1)){
weight.matrix <- matrix(nrow=dim(geno.matrix)[1], ncol=dim(geno.matrix)[2])
wt.row <- which(rowSums(geno.matrix)==0)
mult.rows <- which(rowSums(geno.matrix)>1)
single.rows <- which(rowSums(geno.matrix)==1)
n.muts <- length(geno.matrix[1,])
if (length(wts)==2){
for (geno.i in 1:dim(geno.matrix)[1]){
if (geno.i %in% single.rows){
weight.matrix[geno.i,which(geno.matrix[geno.i,]==1)] <- wts[1]
} else if (geno.i %in% mult.rows){
weight.matrix[geno.i,which(geno.matrix[geno.i,]==1)] <- wts[2]
}
}
} else{ # Using wts vector provided
geno.sim.strings.h <- apply(geno.matrix, MARGIN=1, FUN=paste, collapse="")
mut.i.genos <- apply(geno.matrix, 2, function(x) which(x==1))
if (is.list(mut.i.genos)==FALSE){
mut.i.genos <- as.list(as.data.frame(mut.i.genos))
}
for (mut.i in 1:n.muts){
for (geno.i in 1:length(mut.i.genos[[mut.i]])){ # so geno.i is indexing mut.i.genos (not geno.matrix or fit.matrix)
#geno.ii <- mut.i.genos[geno.i, mut.i]
geno.ii <- mut.i.genos[[mut.i]][geno.i]
geno <- geno.matrix[geno.ii,]
geno.background <- geno
geno.background[mut.i] <- 0
geno.back.string <- paste(geno.background, collapse="")
back.id <- which(geno.sim.strings.h==geno.back.string)
var.of.diff <- wts[geno.ii] + wts[back.id]
weight.of.geno <- 1/var.of.diff
weight.matrix[geno.ii, mut.i] <- weight.of.geno
} #next geno.i
} #next.mut.i
}
# Normalize columns (mutations) to sum to one
for (mut.i in 1:dim(geno.matrix)[2]){
weight.matrix[,mut.i] <- weight.matrix[,mut.i]/sum(weight.matrix[,mut.i], na.rm=T)
}
return(weight.matrix)
}
|
estimateComplex_2x2 <- function(x, ...) {
UseMethod("estimateComplex_2x2")
}
estimateComplex_2x2.numeric <- function(means = c(NULL),
sds = c(NULL),
ns = c(NULL),
alabels = c("A1", "A2"),
blabels = c("B1", "B2"),
aname = "A",
bname = "B",
conf.level = .95) {
# Setup 5 common contrasts for a 2x2 design
contrast1 <- c(-1/2, -1/2, 1/2, 1/2)
contrast2 <- c(-1/2, 1/2, -1/2, 1/2)
contrast3 <- c(1, -1, -1, 1)
contrast4 <- c(-1, 1, 0, 0)
contrast5 <- c(0,0,-1, 1)
myconstrasts <- list(contrast1, contrast2, contrast3, contrast4, contrast5)
# Check if variable labels have been passed, otherwise set to generic A*B labels
if(is.null(alabels)) {
alabels <- c("A1", "A2")
}
if(is.null(blabels)) {
blabels <- c("B1", "B2")
}
if(is.null(aname)) {
aname <- "A"
}
if(is.null(bname)) {
bname <- "B"
}
# Make cross labels for each cell
labels <- paste(rep(alabels, each = length(blabels)), blabels, sep = "\n")
celllabels <- paste(rep(alabels, each = length(blabels)), blabels, sep = ".")
# Now make labels for each of the main contrasts
clabel1 <- c(alabels[2], alabels[1], paste("Main effect\n of ", aname, ":\n(", alabels[2], " - ", alabels[1], ")", sep = ""))
clabel2 <- c(blabels[2], blabels[1], paste("Main effect\n of ", bname, ":\n(", blabels[2], " - ", blabels[1], ")", sep = ""))
clabel3 <- c("G2", "G1", paste("Interaction\n of ", aname, " and ", bname, ":\n(",
celllabels[4], " - ", celllabels[3], ") - (",
celllabels[2], " - ", celllabels[1], ")",
sep = ""
)
)
clabel4 <- c(labels[2], labels[1], paste("Simple effect:\n(", blabels[2], " - ", blabels[1], ")\n at ", alabels[1], sep=""))
clabel5 <- c(labels[4], labels[3], paste("Simple effect:\n(", blabels[2], " - ", blabels[1], ")\n at ", alabels[2], sep=""))
clabels <- list(clabel1, clabel2, clabel3, clabel4, clabel5)
# Use estimate contrasts to obtain estimate for each contrast
estimate <- estimateContrasts.numeric(means, sds, ns, myconstrasts, labels, clabels, conf.level = conf.level)
# Stitch together the interaction contrast--we will copy each simple effect in
estimate$contrast_table[7, ] <- estimate$contrast_table[15, ]
estimate$contrast_table[8, ] <- estimate$contrast_table[12, ]
estimate$contrast_table[7, "contrast_number"] <- 3
estimate$contrast_table[8, "contrast_number"] <- 3
# Fix the labels in the contrast table to remove line breaks
levels(estimate$contrast_table$label) <- gsub("\n", "", levels(estimate$contrast_table$label))
levels(estimate$means_table$label) <- gsub("\n", "", levels(estimate$means_table$label))
# Fix the interaction plot
estimate$plot_table <- estimate$plot_table[!(estimate$plot_table$contrast_number == 3 & estimate$plot_table$plot_labels != "Difference"), ]
estimate$plot_table[estimate$plot_table$contrast_number == 3, ]$plot_labels <- "Interaction"
estimate$plot_table[estimate$plot_table$contrast_number == 3, ]$x <- 5.5
estimate$error_table <- estimate$error_table[!(estimate$error_table$contrast_number == 3 & estimate$error_table$label != "Difference"), ]
estimate$error_table[estimate$error_table$contrast_number == 3, ]$label <- "Interaction"
estimate$error_table[estimate$error_table$contrast_number == 3, ]$x <- 5.5
#Fix the incorrect offset of the error table
wrong_offset <- mean(means[which(contrast3 > 0)])
estimate$error_table[estimate$error_table$contrast_number == 3, ]$m <-
estimate$error_table[estimate$error_table$contrast_number == 3, ]$m -
wrong_offset +
means[4]
return(estimate)
}
estimateComplex_2x2.default <- function(data, dv, iv1, iv2,
conf.level = .95) {
# Initialization ---------------------------
# Create quosures and quonames.
# Stolen directly from dabestr
dv_enquo <- rlang::enquo(dv)
dv_quoname <- rlang::quo_name(dv_enquo)
iv1_enquo <- rlang::enquo(iv1)
iv1_quoname <- rlang::quo_name(iv1_enquo)
iv2_enquo <- rlang::enquo(iv2)
iv2_quoname <- rlang::quo_name(iv2_enquo)
# Validate inputs ---------------------------
# check CI.
if (conf.level < 0.50 | conf.level >= 1) {
err_string <- stringr::str_interp(
"`conf.level` must be between 0.50 and 1, not ${conf.level}"
)
stop(err_string)
}
# Check data is a dataframe
if(!is.data.frame(data)) {
err_string <- stringr::str_interp(
"`data` must be a data frame, not ${class(data)}"
)
stop(err_string)
}
#Check data has more than 8 rows
if(nrow(data)<8) {
err_string <- stringr::str_interp(
"`data` must have more than 7 rows, not ${nrow(data)}"
)
stop(err_string)
}
#Check that dv column exists
if(dv_quoname %in% colnames(data)) {
} else {
err_string <- stringr::str_interp(
"Must pass a column name that exists, not ${dv_quoname}"
)
stop(err_string)
}
#Check that iv1 column exists
if(iv1_quoname %in% colnames(data)) {
} else {
err_string <- stringr::str_interp(
"Must pass a column name that exists, not ${iv1_quoname}"
)
stop(err_string)
}
#Check that iv1 column exists
if(iv2_quoname %in% colnames(data)) {
} else {
err_string <- stringr::str_interp(
"Must pass a column name that exists, not ${iv2_quoname}"
)
stop(err_string)
}
# Check if dv is numeric
if(!is.numeric(data[[dv_quoname]])) {
err_string <- stringr::str_interp(
"dv (${dv_quoname}) must be numeric, not ${class(data[[dv_quoname]])}. Try making a numeric colum with as.numeric"
)
stop(err_string)
}
# Data cleanup ---------------------------
# Make duplicate copies that can be addressed using $ notation..cause I like it?
data$iv1 <- data[[iv1_quoname]]
data$iv2 <- data[[iv2_quoname]]
data$dv <- data[[dv_quoname]]
# Reduce down to only the iv and dv columns. Since we're passing this data back, best to limit its size, I think
keeps <- c("iv1", "iv2", "dv")
data <- data[keeps]
# Now remove NAs from data
data <- data[!is.na(data$dv), ]
data <- data[!is.na(data$iv1), ]
data <- data[!is.na(data$iv2), ]
data$x <- 0
#Now get summary data by group
means <- c(NULL)
sds <- c(NULL)
ns <- c(NULL)
labels <- c(NULL)
this_index <- 0
for (this_group in levels(data$iv1)) {
for(that_group in levels(data$iv2)) {
group_only <- data[data$iv1 == this_group & data$iv2 == that_group, ]
if (nrow(group_only) > 0) {
this_index <- this_index + 1
data[data$iv1 == this_group & data$iv2 == that_group, ]$x <- this_index-0.5
means[this_index] <- mean(group_only$dv)
sds[this_index] <- sd(group_only$dv)
ns[this_index] <- nrow(group_only)
labels[this_index] <- this_group
}
}
}
### Now pass along to summary data version
res <- estimateComplex_2x2.numeric(means = means,sds = sds, ns = ns,
alabels = levels(data$iv1),
blabels = levels(data$iv2),
aname = iv1_quoname,
bname = iv2_quoname,
conf.level = conf.level
)
data$cell_labels <- as.factor(paste(data$iv1, data$iv2, sep="."))
contrast_count <- 0
for (contrast in res$contrasts) {
contrast_count <- contrast_count + 1
contrast_column <- paste("contrast", contrast_count)
data[data$cell_labels %in% levels(data$cell_labels)[which(contrast > 0)], contrast_column] <- "G1"
data[data$cell_labels %in% levels(data$cell_labels)[which(contrast < 0)], contrast_column] <- "G2"
data[data$cell_labels %in% levels(data$cell_labels)[which(contrast == 0)], contrast_column] <- "Unused"
}
res$raw_data <- data
### Prepare for return
return(res)
}
## Raw data test
# testd <- data.frame(duration = c(rep("morning", 100), rep("evening", 100)),
# activity = c(rep("Sleep", 50), rep("Wake", 50), rep("Sleep", 50), rep("Wake", 50)),
# memory = c(rnorm(n = 50, mean= 1.5, sd = 1.38), rnorm(n = 50, mean= 1.14, sd = 0.96), rnorm(n = 50, mean= 1.38, sd = 1.5), rnorm(n = 50, mean= 2.22, sd = 1.68))
# )
#
# estimate <- estimateComplex_2x2(testd, memory, duration, activity)
# myplot <- plotContrast(estimate, contrast_number = 3)
# myplot
###
# # Temp assignments for debugging
# means <- c(1.5, 1.14, 1.38, 2.22)
# sds <- c(1.38, 0.96, 1.5, 1.68)
# ns <- c(26, 26, 25, 26)
# alabels <- c("Evening", "Morning")
# blabels <- c("Sleep", "No Sleep")
# aname <- "Time"
# bname <- "Activity"
# conf.level = .95
#
# estimate <- estimateComplex_2x2(means, sds, ns, alabels, blabels, aname, bname)
#
# myplot <- plotContrast(estimate, contrast_number = 2, ylab = "Memory Score")
# myplot
|
/R/estimateInteraction.R
|
no_license
|
MelinaPB/esci
|
R
| false | false | 9,358 |
r
|
estimateComplex_2x2 <- function(x, ...) {
UseMethod("estimateComplex_2x2")
}
estimateComplex_2x2.numeric <- function(means = c(NULL),
sds = c(NULL),
ns = c(NULL),
alabels = c("A1", "A2"),
blabels = c("B1", "B2"),
aname = "A",
bname = "B",
conf.level = .95) {
# Setup 5 common contrasts for a 2x2 design
contrast1 <- c(-1/2, -1/2, 1/2, 1/2)
contrast2 <- c(-1/2, 1/2, -1/2, 1/2)
contrast3 <- c(1, -1, -1, 1)
contrast4 <- c(-1, 1, 0, 0)
contrast5 <- c(0,0,-1, 1)
myconstrasts <- list(contrast1, contrast2, contrast3, contrast4, contrast5)
# Check if variable labels have been passed, otherwise set to generic A*B labels
if(is.null(alabels)) {
alabels <- c("A1", "A2")
}
if(is.null(blabels)) {
blabels <- c("B1", "B2")
}
if(is.null(aname)) {
aname <- "A"
}
if(is.null(bname)) {
bname <- "B"
}
# Make cross labels for each cell
labels <- paste(rep(alabels, each = length(blabels)), blabels, sep = "\n")
celllabels <- paste(rep(alabels, each = length(blabels)), blabels, sep = ".")
# Now make labels for each of the main contrasts
clabel1 <- c(alabels[2], alabels[1], paste("Main effect\n of ", aname, ":\n(", alabels[2], " - ", alabels[1], ")", sep = ""))
clabel2 <- c(blabels[2], blabels[1], paste("Main effect\n of ", bname, ":\n(", blabels[2], " - ", blabels[1], ")", sep = ""))
clabel3 <- c("G2", "G1", paste("Interaction\n of ", aname, " and ", bname, ":\n(",
celllabels[4], " - ", celllabels[3], ") - (",
celllabels[2], " - ", celllabels[1], ")",
sep = ""
)
)
clabel4 <- c(labels[2], labels[1], paste("Simple effect:\n(", blabels[2], " - ", blabels[1], ")\n at ", alabels[1], sep=""))
clabel5 <- c(labels[4], labels[3], paste("Simple effect:\n(", blabels[2], " - ", blabels[1], ")\n at ", alabels[2], sep=""))
clabels <- list(clabel1, clabel2, clabel3, clabel4, clabel5)
# Use estimate contrasts to obtain estimate for each contrast
estimate <- estimateContrasts.numeric(means, sds, ns, myconstrasts, labels, clabels, conf.level = conf.level)
# Stitch together the interaction contrast--we will copy each simple effect in
estimate$contrast_table[7, ] <- estimate$contrast_table[15, ]
estimate$contrast_table[8, ] <- estimate$contrast_table[12, ]
estimate$contrast_table[7, "contrast_number"] <- 3
estimate$contrast_table[8, "contrast_number"] <- 3
# Fix the labels in the contrast table to remove line breaks
levels(estimate$contrast_table$label) <- gsub("\n", "", levels(estimate$contrast_table$label))
levels(estimate$means_table$label) <- gsub("\n", "", levels(estimate$means_table$label))
# Fix the interaction plot
estimate$plot_table <- estimate$plot_table[!(estimate$plot_table$contrast_number == 3 & estimate$plot_table$plot_labels != "Difference"), ]
estimate$plot_table[estimate$plot_table$contrast_number == 3, ]$plot_labels <- "Interaction"
estimate$plot_table[estimate$plot_table$contrast_number == 3, ]$x <- 5.5
estimate$error_table <- estimate$error_table[!(estimate$error_table$contrast_number == 3 & estimate$error_table$label != "Difference"), ]
estimate$error_table[estimate$error_table$contrast_number == 3, ]$label <- "Interaction"
estimate$error_table[estimate$error_table$contrast_number == 3, ]$x <- 5.5
#Fix the incorrect offset of the error table
wrong_offset <- mean(means[which(contrast3 > 0)])
estimate$error_table[estimate$error_table$contrast_number == 3, ]$m <-
estimate$error_table[estimate$error_table$contrast_number == 3, ]$m -
wrong_offset +
means[4]
return(estimate)
}
estimateComplex_2x2.default <- function(data, dv, iv1, iv2,
conf.level = .95) {
# Initialization ---------------------------
# Create quosures and quonames.
# Stolen directly from dabestr
dv_enquo <- rlang::enquo(dv)
dv_quoname <- rlang::quo_name(dv_enquo)
iv1_enquo <- rlang::enquo(iv1)
iv1_quoname <- rlang::quo_name(iv1_enquo)
iv2_enquo <- rlang::enquo(iv2)
iv2_quoname <- rlang::quo_name(iv2_enquo)
# Validate inputs ---------------------------
# check CI.
if (conf.level < 0.50 | conf.level >= 1) {
err_string <- stringr::str_interp(
"`conf.level` must be between 0.50 and 1, not ${conf.level}"
)
stop(err_string)
}
# Check data is a dataframe
if(!is.data.frame(data)) {
err_string <- stringr::str_interp(
"`data` must be a data frame, not ${class(data)}"
)
stop(err_string)
}
#Check data has more than 8 rows
if(nrow(data)<8) {
err_string <- stringr::str_interp(
"`data` must have more than 7 rows, not ${nrow(data)}"
)
stop(err_string)
}
#Check that dv column exists
if(dv_quoname %in% colnames(data)) {
} else {
err_string <- stringr::str_interp(
"Must pass a column name that exists, not ${dv_quoname}"
)
stop(err_string)
}
#Check that iv1 column exists
if(iv1_quoname %in% colnames(data)) {
} else {
err_string <- stringr::str_interp(
"Must pass a column name that exists, not ${iv1_quoname}"
)
stop(err_string)
}
#Check that iv1 column exists
if(iv2_quoname %in% colnames(data)) {
} else {
err_string <- stringr::str_interp(
"Must pass a column name that exists, not ${iv2_quoname}"
)
stop(err_string)
}
# Check if dv is numeric
if(!is.numeric(data[[dv_quoname]])) {
err_string <- stringr::str_interp(
"dv (${dv_quoname}) must be numeric, not ${class(data[[dv_quoname]])}. Try making a numeric colum with as.numeric"
)
stop(err_string)
}
# Data cleanup ---------------------------
# Make duplicate copies that can be addressed using $ notation..cause I like it?
data$iv1 <- data[[iv1_quoname]]
data$iv2 <- data[[iv2_quoname]]
data$dv <- data[[dv_quoname]]
# Reduce down to only the iv and dv columns. Since we're passing this data back, best to limit its size, I think
keeps <- c("iv1", "iv2", "dv")
data <- data[keeps]
# Now remove NAs from data
data <- data[!is.na(data$dv), ]
data <- data[!is.na(data$iv1), ]
data <- data[!is.na(data$iv2), ]
data$x <- 0
#Now get summary data by group
means <- c(NULL)
sds <- c(NULL)
ns <- c(NULL)
labels <- c(NULL)
this_index <- 0
for (this_group in levels(data$iv1)) {
for(that_group in levels(data$iv2)) {
group_only <- data[data$iv1 == this_group & data$iv2 == that_group, ]
if (nrow(group_only) > 0) {
this_index <- this_index + 1
data[data$iv1 == this_group & data$iv2 == that_group, ]$x <- this_index-0.5
means[this_index] <- mean(group_only$dv)
sds[this_index] <- sd(group_only$dv)
ns[this_index] <- nrow(group_only)
labels[this_index] <- this_group
}
}
}
### Now pass along to summary data version
res <- estimateComplex_2x2.numeric(means = means,sds = sds, ns = ns,
alabels = levels(data$iv1),
blabels = levels(data$iv2),
aname = iv1_quoname,
bname = iv2_quoname,
conf.level = conf.level
)
data$cell_labels <- as.factor(paste(data$iv1, data$iv2, sep="."))
contrast_count <- 0
for (contrast in res$contrasts) {
contrast_count <- contrast_count + 1
contrast_column <- paste("contrast", contrast_count)
data[data$cell_labels %in% levels(data$cell_labels)[which(contrast > 0)], contrast_column] <- "G1"
data[data$cell_labels %in% levels(data$cell_labels)[which(contrast < 0)], contrast_column] <- "G2"
data[data$cell_labels %in% levels(data$cell_labels)[which(contrast == 0)], contrast_column] <- "Unused"
}
res$raw_data <- data
### Prepare for return
return(res)
}
## Raw data test
# testd <- data.frame(duration = c(rep("morning", 100), rep("evening", 100)),
# activity = c(rep("Sleep", 50), rep("Wake", 50), rep("Sleep", 50), rep("Wake", 50)),
# memory = c(rnorm(n = 50, mean= 1.5, sd = 1.38), rnorm(n = 50, mean= 1.14, sd = 0.96), rnorm(n = 50, mean= 1.38, sd = 1.5), rnorm(n = 50, mean= 2.22, sd = 1.68))
# )
#
# estimate <- estimateComplex_2x2(testd, memory, duration, activity)
# myplot <- plotContrast(estimate, contrast_number = 3)
# myplot
###
# # Temp assignments for debugging
# means <- c(1.5, 1.14, 1.38, 2.22)
# sds <- c(1.38, 0.96, 1.5, 1.68)
# ns <- c(26, 26, 25, 26)
# alabels <- c("Evening", "Morning")
# blabels <- c("Sleep", "No Sleep")
# aname <- "Time"
# bname <- "Activity"
# conf.level = .95
#
# estimate <- estimateComplex_2x2(means, sds, ns, alabels, blabels, aname, bname)
#
# myplot <- plotContrast(estimate, contrast_number = 2, ylab = "Memory Score")
# myplot
|
# this script plots the PR counties and will eventually add the ARIA dammage points for Hurriacane Maria
print("starting")
# first, add some libraries
library(sp)
library(raster)
library(rgdal)
library(maptools)
# partial path to the ARIA data
path_vec_east <- c("20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_04_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_05_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_06_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_07_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_08_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_05_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_06_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_07_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_08_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_09_c0.7g1_T1H0B0U1_dpm.tif")
path_vec_west <- c("20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s1_01_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s1_02_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s1_03_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s1_04_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_01_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_02_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_03_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_04_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_10_c0.6g1_T1H0B0U1_dpm.tif")
path_vec_all <- c(path_vec_east, path_vec_west)
# open the shapefile of county boundaries
PR_counties <- readOGR(dsn="/Users/mschwall/Desktop/hurricane_maria/nhgis0042_shapefile_tl2016_us_county_2016", layer="US_county_2016") # ingest the shapefile
lat_lon_projection <- CRS("+proj=longlat +datum=WGS84") # the basic geographic lat,lon projections
PR_counties_latlon <- spTransform(PR_counties, lat_lon_projection) # convert the nhgis0042_shapefile_tl2016_us_county_2016 projection to geographic lat,lon
plot(PR_counties_latlon, xlim=c(-67.5, -64.5), ylim=c(17.5, 18.5 ), xlab="longitude degrees", ylab="latitude degrees")
box() # put a box around the plot
axis(1) # and add the axis ticks
axis(2)
# now open each of the geotiffs (there are 10 for the eastern part & 9 for the western part) and plot their points on the map
use_this_vec <- path_vec_all
num_tifs <- length(use_this_vec)
for (ii in 1:num_tifs) {
print(c("starting iteration", ii))
tiff_name <- paste("/Users/mschwall/Desktop/hurricane_maria/", use_this_vec[ii], sep="")
# open 4th band of the tiff as a raster because I found that this band has the largest number of hits that I'm guessing are equivalent to Maria damage hotspots
PR_tiff1 <- raster(tiff_name, band=2)
crs(PR_tiff1) <- lat_lon_projection # use the same projections for the raster
PR_data <- rasterToPoints(PR_tiff1) # convert the raster to a lon,lat,value matrix
points(PR_data[which(PR_data[,3] < 255),1], PR_data[which(PR_data[,3] < 255),2], pch=19,
col = rgb(red=1, green=0, blue=0, alpha=0.5), cex=0.01) # plot the location of the points with values > 0
}
|
/ARIA_hurricane_maria_analysis_v3.R
|
no_license
|
mschwaller/R_code
|
R
| false | false | 3,861 |
r
|
# this script plots the PR counties and will eventually add the ARIA dammage points for Hurriacane Maria
print("starting")
# first, add some libraries
library(sp)
library(raster)
library(rgdal)
library(maptools)
# partial path to the ARIA data
path_vec_east <- c("20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_04_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_05_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_06_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_07_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s2_08_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_05_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_06_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_07_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_08_c0.7g1_T1H0B0U1_dpm.tif","20170921_1014z_PuertoRico_S1_DPM_NASA_ARIA_v0.4_geotiff/DPM_Maria_S1_s3_09_c0.7g1_T1H0B0U1_dpm.tif")
path_vec_west <- c("20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s1_01_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s1_02_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s1_03_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s1_04_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_01_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_02_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_03_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_04_c0.6g1_T1H0B0U1_dpm.tif","20170926_1023z_PuertoRicoWest_S1_DPM_NASA_ARIA_v0.5_geotiff/DPM_Maria_S1_s2_10_c0.6g1_T1H0B0U1_dpm.tif")
path_vec_all <- c(path_vec_east, path_vec_west)
# open the shapefile of county boundaries
PR_counties <- readOGR(dsn="/Users/mschwall/Desktop/hurricane_maria/nhgis0042_shapefile_tl2016_us_county_2016", layer="US_county_2016") # ingest the shapefile
lat_lon_projection <- CRS("+proj=longlat +datum=WGS84") # the basic geographic lat,lon projections
PR_counties_latlon <- spTransform(PR_counties, lat_lon_projection) # convert the nhgis0042_shapefile_tl2016_us_county_2016 projection to geographic lat,lon
plot(PR_counties_latlon, xlim=c(-67.5, -64.5), ylim=c(17.5, 18.5 ), xlab="longitude degrees", ylab="latitude degrees")
box() # put a box around the plot
axis(1) # and add the axis ticks
axis(2)
# now open each of the geotiffs (there are 10 for the eastern part & 9 for the western part) and plot their points on the map
use_this_vec <- path_vec_all
num_tifs <- length(use_this_vec)
for (ii in 1:num_tifs) {
print(c("starting iteration", ii))
tiff_name <- paste("/Users/mschwall/Desktop/hurricane_maria/", use_this_vec[ii], sep="")
# open 4th band of the tiff as a raster because I found that this band has the largest number of hits that I'm guessing are equivalent to Maria damage hotspots
PR_tiff1 <- raster(tiff_name, band=2)
crs(PR_tiff1) <- lat_lon_projection # use the same projections for the raster
PR_data <- rasterToPoints(PR_tiff1) # convert the raster to a lon,lat,value matrix
points(PR_data[which(PR_data[,3] < 255),1], PR_data[which(PR_data[,3] < 255),2], pch=19,
col = rgb(red=1, green=0, blue=0, alpha=0.5), cex=0.01) # plot the location of the points with values > 0
}
|
# prior distribution for theta
#' Prior distribution for time-to-event outcomes
#'
#' If we do not assume the treatment effects to be fixed, i.e. `fixed = FALSE`,
#' the function `prior_tte` allows us to model the treatment effect following a prior distribution.
#' For more details concerning the definition of a prior distribution, see the \href{https://sterniii3.github.io/drugdevelopR/articles/Introduction-to-drugdevelopR.html}{vignette on priors}
#' as well as the \href{https://web.imbi.uni-heidelberg.de/prior/}{Shiny app}.
#'
#' @param x integration variable
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @return The output of the functions `Epgo_tte()` is the expected number of participants in phase III with conservative decision rule and sample size calculation.
#' @examples res <- prior_tte(x = 0.5, w = 0.5, hr1 = 0.69, hr2 = 0.88, id1 = 240, id2 = 420)
#' @export
#' @keywords internal
prior_tte<-function(x, w, hr1, hr2, id1, id2){
w * dnorm(x, -log(hr1), sqrt(4/id1)) +
(1 - w) * dnorm(x, -log(hr2), sqrt(4/id2))
}
# 10000 realizations of the prior distribution
box_tte<-function(w, hr1, hr2, id1, id2){
w * rnorm(1000000, -log(hr1),sqrt(4/id1)) +
(1 - w) * rnorm(1000000, -log(hr2), sqrt(4/id2))
}
# expected probability to go to phase III
#' Expected probability to go to phase III for time-to-event outcomes
#'
#' @param HRgo threshold value for the go/no-go decision rule
#' @param d2 total number of events for phase II; must be even number
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @param fixed choose if true treatment effects are fixed or random, if TRUE `hr1` is used as fixed effect
#' @return The output of the functions `Epgo_tte()` is the expected probability to go to phase III.
#' @examples res <- Epgo_tte(HRgo = 0.8, d2 = 50,
#' w = 0.3, hr1 = 0.69, hr2 = 0.81,
#' id1 = 280, id2 = 420, fixed = FALSE)
#' @export
#' @keywords internal
Epgo_tte <- function(HRgo, d2, w, hr1, hr2, id1, id2, fixed){
if(!fixed){
return(
integrate(function(x){
sapply(x, function(x){
pnorm((log(HRgo) + x)/sqrt(4/d2))*
prior_tte(x, w, hr1, hr2, id1, id2)
})
}, - Inf, Inf)$value
)
}else{
return(
pnorm((log(HRgo) - log(hr1))/sqrt(4/d2))
)
}
}
# expected number of events for phase III
# in before phase II perspective
#' Expected sample size for phase III for time-to-event outcomes
#'
#' @param HRgo threshold value for the go/no-go decision rule
#' @param d2 total events for phase II; must be even number
#' @param alpha significance level
#' @param beta `1-beta` power for calculation of sample size for phase III
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @param fixed choose if true treatment effects are fixed or random, if TRUE `hr1` is used as fixed effect
#' @return The output of the the functions `Ed3_tte` is the expected number of events in phase III.
#' @examples res <- Ed3_tte(HRgo = 0.8, d2 = 50,
#' alpha = 0.025, beta = 0.1, w = 0.3,
#' hr1 = 0.69, hr2 = 0.81,
#' id1 = 280, id2 = 420, fixed = FALSE)
#' @export
#' @keywords internal
Ed3_tte <- function(HRgo, d2, alpha, beta,
w, hr1, hr2, id1, id2, fixed){
if(!fixed){
return(
integrate(function(x){
sapply(x, function(x){
integrate(function(y){
((4*(qnorm(1-alpha)+qnorm(1-beta))^2)/(y^2))*
dnorm(y,
mean = x,
sd = sqrt(4/d2))*
prior_tte(x, w, hr1, hr2, id1, id2)
}, -log(HRgo), Inf)$value
})
}, - Inf, Inf)$value
)
}else{
return(
integrate(function(y){
((4*(qnorm(1-alpha)+qnorm(1-beta))^2)/(y^2))*
dnorm(y,
mean = -log(hr1),
sd = sqrt(4/d2))
}, -log(HRgo), Inf)$value
)
}
}
# expected probability of a successful program
#' Expected probability of a successful program for time-to-event outcomes
#'
#' @param HRgo threshold value for the go/no-go decision rule
#' @param d2 total events for phase II; must be even number
#' @param alpha significance level
#' @param beta `1-beta` power for calculation of sample size for phase III
#' @param step1 lower boundary for effect size
#' @param step2 upper boundary for effect size
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @param gamma difference in treatment effect due to different population structures in phase II and III
#' @param fixed choose if true treatment effects are fixed or random, if TRUE `hr1` is used as fixed effect
#' @return The output of the functions `EPsProg_tte()` is the expected probability of a successful program.
#' @examples res <- EPsProg_tte(HRgo = 0.8, d2 = 50,
#' alpha = 0.025, beta = 0.1,
#' step1 = 1, step2 = 0.95,
#' w = 0.3, hr1 = 0.69, hr2 = 0.81,
#' id1 = 280, id2 = 420,
#' gamma = 0, fixed = FALSE)
#' @export
#' @keywords internal
EPsProg_tte <- function(HRgo, d2, alpha, beta,
step1, step2,
w, hr1, hr2, id1, id2,
gamma, fixed){
c = (qnorm(1 - alpha) + qnorm(1 - beta))^2
if(!fixed){
return(
integrate(function(x){
sapply(x, function(x){
integrate(function(y){
(pnorm(qnorm(1-alpha)-log(step2)/(sqrt(y^2/c)),
mean = (x+gamma)/(sqrt(y^2/c)),
sd = 1) -
pnorm(qnorm(1-alpha)-log(step1)/(sqrt(y^2/c)),
mean = (x+gamma)/(sqrt(y^2/c)),
sd = 1) )*
dnorm(y,
mean = x,
sd = sqrt(4/d2))*
prior_tte(x, w, hr1, hr2, id1, id2)
}, -log(HRgo), Inf)$value
})
}, - Inf, Inf)$value
)
}else{
return(
integrate(function(y){
(pnorm(qnorm(1-alpha)-log(step2)/(sqrt(y^2/c)),
mean = (-log(hr1)+gamma)/(sqrt(y^2/c)),
sd = 1) -
pnorm(qnorm(1-alpha)-log(step1)/(sqrt(y^2/c)),
mean = (-log(hr1)+gamma)/(sqrt(y^2/c)),
sd = 1))*
dnorm(y,
mean = -log(hr1),
sd = sqrt(4/d2))
}, - log(HRgo), Inf)$value
)
}
}
# utility function
#' Utility function for time-to-event outcomes.
#'
#' The utility function calculates the expected utility of our drug development program and is given as gains minus costs and depends on the parameters and the expected probability of a successful program.
#' The utility is in a further step maximized by the `optimal_tte()` function.
#' @param HRgo threshold value for the go/no-go decision rule
#' @param d2 total events for phase II; must be even number
#' @param alpha significance level
#' @param beta `1-beta` power for calculation of sample size for phase III
#' @param xi2 event rate for phase II
#' @param xi3 event rate for phase III
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @param c2 variable per-patient cost for phase II
#' @param c3 variable per-patient cost for phase III
#' @param c02 fixed cost for phase II
#' @param c03 fixed cost for phase III
#' @param K constraint on the costs of the program, default: Inf, e.g. no constraint
#' @param N constraint on the total expected sample size of the program, default: Inf, e.g. no constraint
#' @param S constraint on the expected probability of a successful program, default: -Inf, e.g. no constraint
#' @param steps1 lower boundary for effect size category `"small"` in RR scale, default: 1
#' @param stepm1 lower boundary for effect size category `"medium"` in RR scale = upper boundary for effect size category "small" in RR scale, default: 0.95
#' @param stepl1 lower boundary for effect size category `"large"` in RR scale = upper boundary for effect size category "medium" in RR scale, default: 0.85
#' @param b1 expected gain for effect size category `"small"`
#' @param b2 expected gain for effect size category `"medium"`
#' @param b3 expected gain for effect size category `"large"`
#' @param gamma difference in treatment effect due to different population structures in phase II and III
#' @param fixed choose if true treatment effects are fixed or random, if TRUE `hr1` is used as fixed effect
#' @return The output of the functions `utility_tte()` is the expected utility of the program.
#' @examples res <- utility_tte(d2 = 50, HRgo = 0.8, w = 0.3,
#' hr1 = 0.69, hr2 = 0.81,
#' id1 = 280, id2 = 420, xi2 = 0.7, xi3 = 0.7,
#' alpha = 0.025, beta = 0.1,
#' c2 = 0.75, c3 = 1, c02 = 100, c03 = 150,
#' K = Inf, N = Inf, S = -Inf,
#' steps1 = 1, stepm1 = 0.95, stepl1 = 0.85,
#' b1 = 1000, b2 = 2000, b3 = 3000,
#' gamma = 0, fixed = TRUE)
#' @export
#' @keywords internal
utility_tte <- function(d2, HRgo, w, hr1, hr2, id1, id2,
alpha, beta, xi2, xi3,
c2, c3, c02, c03,
K, N, S,
steps1, stepm1, stepl1,
b1, b2, b3,
gamma, fixed){
steps2 <- stepm1
stepm2 <- stepl1
stepl2 <- 0
d3 <- Ed3_tte(HRgo = HRgo, d2 = d2, alpha = alpha,
beta = beta, w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2, fixed = fixed)
# sample size is rounded up to next even natural number
n2 <- ceiling(d2*(1/xi2))
if(round(n2/2) != n2 / 2) {n2 <- n2 + 1}
n3 <- ceiling(d3 * (1/xi3))
if(round(n3/2) != n3 / 2) {n3 <- n3 + 1}
# expected number of events is rounded to natural number
d3 <- ceiling(d3)
if(n2+n3>N){
return(c(-9999, -9999, -9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999, -9999))
}else{
pg <- Epgo_tte(HRgo = HRgo, d2 = d2,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
fixed = fixed)
K2 <- c02 + c2 * n2 # cost phase II
K3 <- c03 * pg + c3 * n3 # cost phase III
if(K2+K3>K){
return(c(-9999, -9999, -9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999, -9999))
}else{
# probability of a successful program:
# small, medium and large effect size
prob1 <- EPsProg_tte(HRgo = HRgo, d2 = d2,
alpha = alpha, beta = beta,
step1 = steps1, step2 = steps2,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
prob2 <- EPsProg_tte(HRgo = HRgo, d2 = d2,
alpha = alpha, beta = beta,
step1 = stepm1, step2 = stepm2,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
prob3 <- EPsProg_tte(HRgo = HRgo, d2 = d2,
alpha = alpha, beta = beta,
step1 = stepl1, step2 = stepl2,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
SP <- prob1 + prob2 + prob3
if(SP<S){
return(c(-9999, -9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999, -9999, -9999))
}else{
G <- b1 * prob1 + b2 * prob2 + b3 * prob3
EU <- - K2 - K3 + G
return(
c(EU, d3, SP, pg, K2, K3, prob1, prob2, prob3, n2, n3)
)
}
}
}
}
#################
# skip phase II #
#################
# number of events for phase III based on median_prior
#' Expected probability to go to phase III for time-to-event outcomes
#'
#' If choosing `skipII = TRUE`, the program calculates the expected utility for the case when phase
#' II is skipped and compares it to the situation when phase II is not skipped.
#' This function calculates the expected sample size for phase III for time-to-event outcomes using a median prior.
#' @param alpha significance level
#' @param beta `1-beta` power for calculation of sample size for phase III
#' @param median_prior the median_prior is given as -log(hr1), the assumed true treatment effect
#' @return The output of the functions `d3_skipII_tte()` is the expected number of events in phase III when skipping phase II.
#' @examples res <- d3_skipII_tte(alpha = 0.05, beta = 0.1, median_prior = 0.35)
#' @export
#' @keywords internal
d3_skipII_tte <-function(alpha, beta, median_prior){
return(
(4*(qnorm(1-alpha)+qnorm(1-beta))^2)/(median_prior^2)
)
}
# expected probability of a successful program
# based on median_prior
EPsProg_skipII_tte <-function(alpha, beta, step1, step2,
median_prior, w, hr1, hr2,
id1, id2, gamma, fixed){
c=(qnorm(1-alpha)+qnorm(1-beta))^2
if(!fixed){
return(
integrate(function(x){
sapply(x,function(x){
(pnorm(qnorm(1-alpha)-
log(step2)/(sqrt(median_prior^2/c)),
mean=(x+gamma)/(sqrt(median_prior^2/c)),
sd=1)-
pnorm(qnorm(1-alpha)-
log(step1)/(sqrt(median_prior^2/c)),
mean=(x+gamma)/(sqrt(median_prior^2/c)),
sd=1))*
prior_tte(x, w, hr1, hr2, id1, id2)
})
}, -Inf, Inf)$value
)
}else{
return(
pnorm(qnorm(1-alpha)-
log(step2)/(sqrt(median_prior^2/c)),
mean=(-log(hr1)+gamma)/(sqrt(median_prior^2/c)),
sd=1)-
pnorm(qnorm(1-alpha)-
log(step1)/(sqrt(median_prior^2/c)),
mean=(-log(hr1)+gamma)/(sqrt(median_prior^2/c)),
sd=1)
)
}
}
#utility function
utility_skipII_tte <-function(alpha, beta, xi3, c03, c3,
b1, b2, b3, median_prior,
K, N, S,
steps1, stepm1, stepl1,
w, hr1, hr2, id1, id2,
gamma, fixed){
steps2 <- stepm1
stepm2 <- stepl1
stepl2 <- 0
d3 <- d3_skipII_tte(alpha = alpha, beta = beta,
median_prior = median_prior)
n3 <- ceiling(d3*(1/xi3))
if(round(n3/2) != n3 / 2) {n3 = n3 + 1}
d3 <- ceiling(d3)
if(n3>N){
return(c(-9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999))
}else{
K2 <- 0
K3 <- c03 + c3*n3
if(K2+K3>K){
return(c(-9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999))
}else{
# probability of a successful program:
# small, medium, large effect size
prob1 <- EPsProg_skipII_tte(alpha = alpha, beta = beta,
step1 = steps1,
step2 = steps2,
median_prior = median_prior,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
prob2 <- EPsProg_skipII_tte(alpha = alpha, beta = beta,
step1 = stepm1,
step2 = stepm2,
median_prior = median_prior,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
prob3 <- EPsProg_skipII_tte(alpha = alpha, beta = beta,
step1 = stepl1,
step2 = stepl2,
median_prior = median_prior,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
SP <- prob1 + prob2 + prob3
if(SP<S){
return(c(-9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999))
}else{
G <- b1 * prob1 + b2 * prob2 + b3 * prob3
EU <- - K2 - K3 + G
return(
c(EU, d3, n3, SP, K3, prob1, prob2, prob3)
)
}
}
}
}
|
/R/functions_tte.R
|
permissive
|
Sterniii3/drugdevelopR
|
R
| false | false | 18,523 |
r
|
# prior distribution for theta
#' Prior distribution for time-to-event outcomes
#'
#' If we do not assume the treatment effects to be fixed, i.e. `fixed = FALSE`,
#' the function `prior_tte` allows us to model the treatment effect following a prior distribution.
#' For more details concerning the definition of a prior distribution, see the \href{https://sterniii3.github.io/drugdevelopR/articles/Introduction-to-drugdevelopR.html}{vignette on priors}
#' as well as the \href{https://web.imbi.uni-heidelberg.de/prior/}{Shiny app}.
#'
#' @param x integration variable
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @return The output of the functions `Epgo_tte()` is the expected number of participants in phase III with conservative decision rule and sample size calculation.
#' @examples res <- prior_tte(x = 0.5, w = 0.5, hr1 = 0.69, hr2 = 0.88, id1 = 240, id2 = 420)
#' @export
#' @keywords internal
prior_tte<-function(x, w, hr1, hr2, id1, id2){
w * dnorm(x, -log(hr1), sqrt(4/id1)) +
(1 - w) * dnorm(x, -log(hr2), sqrt(4/id2))
}
# 10000 realizations of the prior distribution
box_tte<-function(w, hr1, hr2, id1, id2){
w * rnorm(1000000, -log(hr1),sqrt(4/id1)) +
(1 - w) * rnorm(1000000, -log(hr2), sqrt(4/id2))
}
# expected probability to go to phase III
#' Expected probability to go to phase III for time-to-event outcomes
#'
#' @param HRgo threshold value for the go/no-go decision rule
#' @param d2 total number of events for phase II; must be even number
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @param fixed choose if true treatment effects are fixed or random, if TRUE `hr1` is used as fixed effect
#' @return The output of the functions `Epgo_tte()` is the expected probability to go to phase III.
#' @examples res <- Epgo_tte(HRgo = 0.8, d2 = 50,
#' w = 0.3, hr1 = 0.69, hr2 = 0.81,
#' id1 = 280, id2 = 420, fixed = FALSE)
#' @export
#' @keywords internal
Epgo_tte <- function(HRgo, d2, w, hr1, hr2, id1, id2, fixed){
if(!fixed){
return(
integrate(function(x){
sapply(x, function(x){
pnorm((log(HRgo) + x)/sqrt(4/d2))*
prior_tte(x, w, hr1, hr2, id1, id2)
})
}, - Inf, Inf)$value
)
}else{
return(
pnorm((log(HRgo) - log(hr1))/sqrt(4/d2))
)
}
}
# expected number of events for phase III
# in before phase II perspective
#' Expected sample size for phase III for time-to-event outcomes
#'
#' @param HRgo threshold value for the go/no-go decision rule
#' @param d2 total events for phase II; must be even number
#' @param alpha significance level
#' @param beta `1-beta` power for calculation of sample size for phase III
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @param fixed choose if true treatment effects are fixed or random, if TRUE `hr1` is used as fixed effect
#' @return The output of the the functions `Ed3_tte` is the expected number of events in phase III.
#' @examples res <- Ed3_tte(HRgo = 0.8, d2 = 50,
#' alpha = 0.025, beta = 0.1, w = 0.3,
#' hr1 = 0.69, hr2 = 0.81,
#' id1 = 280, id2 = 420, fixed = FALSE)
#' @export
#' @keywords internal
Ed3_tte <- function(HRgo, d2, alpha, beta,
w, hr1, hr2, id1, id2, fixed){
if(!fixed){
return(
integrate(function(x){
sapply(x, function(x){
integrate(function(y){
((4*(qnorm(1-alpha)+qnorm(1-beta))^2)/(y^2))*
dnorm(y,
mean = x,
sd = sqrt(4/d2))*
prior_tte(x, w, hr1, hr2, id1, id2)
}, -log(HRgo), Inf)$value
})
}, - Inf, Inf)$value
)
}else{
return(
integrate(function(y){
((4*(qnorm(1-alpha)+qnorm(1-beta))^2)/(y^2))*
dnorm(y,
mean = -log(hr1),
sd = sqrt(4/d2))
}, -log(HRgo), Inf)$value
)
}
}
# expected probability of a successful program
#' Expected probability of a successful program for time-to-event outcomes
#'
#' @param HRgo threshold value for the go/no-go decision rule
#' @param d2 total events for phase II; must be even number
#' @param alpha significance level
#' @param beta `1-beta` power for calculation of sample size for phase III
#' @param step1 lower boundary for effect size
#' @param step2 upper boundary for effect size
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @param gamma difference in treatment effect due to different population structures in phase II and III
#' @param fixed choose if true treatment effects are fixed or random, if TRUE `hr1` is used as fixed effect
#' @return The output of the functions `EPsProg_tte()` is the expected probability of a successful program.
#' @examples res <- EPsProg_tte(HRgo = 0.8, d2 = 50,
#' alpha = 0.025, beta = 0.1,
#' step1 = 1, step2 = 0.95,
#' w = 0.3, hr1 = 0.69, hr2 = 0.81,
#' id1 = 280, id2 = 420,
#' gamma = 0, fixed = FALSE)
#' @export
#' @keywords internal
EPsProg_tte <- function(HRgo, d2, alpha, beta,
step1, step2,
w, hr1, hr2, id1, id2,
gamma, fixed){
c = (qnorm(1 - alpha) + qnorm(1 - beta))^2
if(!fixed){
return(
integrate(function(x){
sapply(x, function(x){
integrate(function(y){
(pnorm(qnorm(1-alpha)-log(step2)/(sqrt(y^2/c)),
mean = (x+gamma)/(sqrt(y^2/c)),
sd = 1) -
pnorm(qnorm(1-alpha)-log(step1)/(sqrt(y^2/c)),
mean = (x+gamma)/(sqrt(y^2/c)),
sd = 1) )*
dnorm(y,
mean = x,
sd = sqrt(4/d2))*
prior_tte(x, w, hr1, hr2, id1, id2)
}, -log(HRgo), Inf)$value
})
}, - Inf, Inf)$value
)
}else{
return(
integrate(function(y){
(pnorm(qnorm(1-alpha)-log(step2)/(sqrt(y^2/c)),
mean = (-log(hr1)+gamma)/(sqrt(y^2/c)),
sd = 1) -
pnorm(qnorm(1-alpha)-log(step1)/(sqrt(y^2/c)),
mean = (-log(hr1)+gamma)/(sqrt(y^2/c)),
sd = 1))*
dnorm(y,
mean = -log(hr1),
sd = sqrt(4/d2))
}, - log(HRgo), Inf)$value
)
}
}
# utility function
#' Utility function for time-to-event outcomes.
#'
#' The utility function calculates the expected utility of our drug development program and is given as gains minus costs and depends on the parameters and the expected probability of a successful program.
#' The utility is in a further step maximized by the `optimal_tte()` function.
#' @param HRgo threshold value for the go/no-go decision rule
#' @param d2 total events for phase II; must be even number
#' @param alpha significance level
#' @param beta `1-beta` power for calculation of sample size for phase III
#' @param xi2 event rate for phase II
#' @param xi3 event rate for phase III
#' @param w weight for mixture prior distribution
#' @param hr1 first assumed true treatment effect on HR scale for prior distribution
#' @param hr2 second assumed true treatment effect on HR scale for prior distribution
#' @param id1 amount of information for `hr1` in terms of number of events
#' @param id2 amount of information for `hr2` in terms of number of events
#' @param c2 variable per-patient cost for phase II
#' @param c3 variable per-patient cost for phase III
#' @param c02 fixed cost for phase II
#' @param c03 fixed cost for phase III
#' @param K constraint on the costs of the program, default: Inf, e.g. no constraint
#' @param N constraint on the total expected sample size of the program, default: Inf, e.g. no constraint
#' @param S constraint on the expected probability of a successful program, default: -Inf, e.g. no constraint
#' @param steps1 lower boundary for effect size category `"small"` in RR scale, default: 1
#' @param stepm1 lower boundary for effect size category `"medium"` in RR scale = upper boundary for effect size category "small" in RR scale, default: 0.95
#' @param stepl1 lower boundary for effect size category `"large"` in RR scale = upper boundary for effect size category "medium" in RR scale, default: 0.85
#' @param b1 expected gain for effect size category `"small"`
#' @param b2 expected gain for effect size category `"medium"`
#' @param b3 expected gain for effect size category `"large"`
#' @param gamma difference in treatment effect due to different population structures in phase II and III
#' @param fixed choose if true treatment effects are fixed or random, if TRUE `hr1` is used as fixed effect
#' @return The output of the functions `utility_tte()` is the expected utility of the program.
#' @examples res <- utility_tte(d2 = 50, HRgo = 0.8, w = 0.3,
#' hr1 = 0.69, hr2 = 0.81,
#' id1 = 280, id2 = 420, xi2 = 0.7, xi3 = 0.7,
#' alpha = 0.025, beta = 0.1,
#' c2 = 0.75, c3 = 1, c02 = 100, c03 = 150,
#' K = Inf, N = Inf, S = -Inf,
#' steps1 = 1, stepm1 = 0.95, stepl1 = 0.85,
#' b1 = 1000, b2 = 2000, b3 = 3000,
#' gamma = 0, fixed = TRUE)
#' @export
#' @keywords internal
utility_tte <- function(d2, HRgo, w, hr1, hr2, id1, id2,
alpha, beta, xi2, xi3,
c2, c3, c02, c03,
K, N, S,
steps1, stepm1, stepl1,
b1, b2, b3,
gamma, fixed){
steps2 <- stepm1
stepm2 <- stepl1
stepl2 <- 0
d3 <- Ed3_tte(HRgo = HRgo, d2 = d2, alpha = alpha,
beta = beta, w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2, fixed = fixed)
# sample size is rounded up to next even natural number
n2 <- ceiling(d2*(1/xi2))
if(round(n2/2) != n2 / 2) {n2 <- n2 + 1}
n3 <- ceiling(d3 * (1/xi3))
if(round(n3/2) != n3 / 2) {n3 <- n3 + 1}
# expected number of events is rounded to natural number
d3 <- ceiling(d3)
if(n2+n3>N){
return(c(-9999, -9999, -9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999, -9999))
}else{
pg <- Epgo_tte(HRgo = HRgo, d2 = d2,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
fixed = fixed)
K2 <- c02 + c2 * n2 # cost phase II
K3 <- c03 * pg + c3 * n3 # cost phase III
if(K2+K3>K){
return(c(-9999, -9999, -9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999, -9999))
}else{
# probability of a successful program:
# small, medium and large effect size
prob1 <- EPsProg_tte(HRgo = HRgo, d2 = d2,
alpha = alpha, beta = beta,
step1 = steps1, step2 = steps2,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
prob2 <- EPsProg_tte(HRgo = HRgo, d2 = d2,
alpha = alpha, beta = beta,
step1 = stepm1, step2 = stepm2,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
prob3 <- EPsProg_tte(HRgo = HRgo, d2 = d2,
alpha = alpha, beta = beta,
step1 = stepl1, step2 = stepl2,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
SP <- prob1 + prob2 + prob3
if(SP<S){
return(c(-9999, -9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999, -9999, -9999))
}else{
G <- b1 * prob1 + b2 * prob2 + b3 * prob3
EU <- - K2 - K3 + G
return(
c(EU, d3, SP, pg, K2, K3, prob1, prob2, prob3, n2, n3)
)
}
}
}
}
#################
# skip phase II #
#################
# number of events for phase III based on median_prior
#' Expected probability to go to phase III for time-to-event outcomes
#'
#' If choosing `skipII = TRUE`, the program calculates the expected utility for the case when phase
#' II is skipped and compares it to the situation when phase II is not skipped.
#' This function calculates the expected sample size for phase III for time-to-event outcomes using a median prior.
#' @param alpha significance level
#' @param beta `1-beta` power for calculation of sample size for phase III
#' @param median_prior the median_prior is given as -log(hr1), the assumed true treatment effect
#' @return The output of the functions `d3_skipII_tte()` is the expected number of events in phase III when skipping phase II.
#' @examples res <- d3_skipII_tte(alpha = 0.05, beta = 0.1, median_prior = 0.35)
#' @export
#' @keywords internal
d3_skipII_tte <-function(alpha, beta, median_prior){
return(
(4*(qnorm(1-alpha)+qnorm(1-beta))^2)/(median_prior^2)
)
}
# expected probability of a successful program
# based on median_prior
EPsProg_skipII_tte <-function(alpha, beta, step1, step2,
median_prior, w, hr1, hr2,
id1, id2, gamma, fixed){
c=(qnorm(1-alpha)+qnorm(1-beta))^2
if(!fixed){
return(
integrate(function(x){
sapply(x,function(x){
(pnorm(qnorm(1-alpha)-
log(step2)/(sqrt(median_prior^2/c)),
mean=(x+gamma)/(sqrt(median_prior^2/c)),
sd=1)-
pnorm(qnorm(1-alpha)-
log(step1)/(sqrt(median_prior^2/c)),
mean=(x+gamma)/(sqrt(median_prior^2/c)),
sd=1))*
prior_tte(x, w, hr1, hr2, id1, id2)
})
}, -Inf, Inf)$value
)
}else{
return(
pnorm(qnorm(1-alpha)-
log(step2)/(sqrt(median_prior^2/c)),
mean=(-log(hr1)+gamma)/(sqrt(median_prior^2/c)),
sd=1)-
pnorm(qnorm(1-alpha)-
log(step1)/(sqrt(median_prior^2/c)),
mean=(-log(hr1)+gamma)/(sqrt(median_prior^2/c)),
sd=1)
)
}
}
#utility function
utility_skipII_tte <-function(alpha, beta, xi3, c03, c3,
b1, b2, b3, median_prior,
K, N, S,
steps1, stepm1, stepl1,
w, hr1, hr2, id1, id2,
gamma, fixed){
steps2 <- stepm1
stepm2 <- stepl1
stepl2 <- 0
d3 <- d3_skipII_tte(alpha = alpha, beta = beta,
median_prior = median_prior)
n3 <- ceiling(d3*(1/xi3))
if(round(n3/2) != n3 / 2) {n3 = n3 + 1}
d3 <- ceiling(d3)
if(n3>N){
return(c(-9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999))
}else{
K2 <- 0
K3 <- c03 + c3*n3
if(K2+K3>K){
return(c(-9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999))
}else{
# probability of a successful program:
# small, medium, large effect size
prob1 <- EPsProg_skipII_tte(alpha = alpha, beta = beta,
step1 = steps1,
step2 = steps2,
median_prior = median_prior,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
prob2 <- EPsProg_skipII_tte(alpha = alpha, beta = beta,
step1 = stepm1,
step2 = stepm2,
median_prior = median_prior,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
prob3 <- EPsProg_skipII_tte(alpha = alpha, beta = beta,
step1 = stepl1,
step2 = stepl2,
median_prior = median_prior,
w = w, hr1 = hr1, hr2 = hr2,
id1 = id1, id2 = id2,
gamma = gamma, fixed = fixed)
SP <- prob1 + prob2 + prob3
if(SP<S){
return(c(-9999, -9999, -9999, -9999,
-9999, -9999, -9999, -9999))
}else{
G <- b1 * prob1 + b2 * prob2 + b3 * prob3
EU <- - K2 - K3 + G
return(
c(EU, d3, n3, SP, K3, prob1, prob2, prob3)
)
}
}
}
}
|
testlist <- list(nmod = NULL, id = NULL, score = NULL, rsp = NULL, id = NULL, score = NULL, nbr = NULL, id = NULL, bk_nmod = integer(0), booklet_id = c(1192022832L, -996667132L, 432518541L, 815996035L, 1157250652L, 751417555L, 116882132L, 1085030516L, 1202941484L, 15623892L, -1665580313L, NA, NA, 1254131289L, 749806690L, -1501899956L, -1876835267L), booklet_score = integer(0), include_rsp = integer(0), item_id = c(1415150763L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), item_score = integer(0), module_nbr = c(992888811L, -1345548849L, -449112064L, NA, 1678998078L, 759393453L, 786045775L, 453135142L, 455895826L, -1331816706L, 391475866L, 1748544614L, 19691586L, 1176953756L, 349411874L, 2121585973L, -301177052L, 1082896916L, -450872028L, -636931467L, -53289638L), person_id = c(16777216L, 0L, 1409351680L, 682962941L, 1615462481L, 167774546L, 1801886528L, -1519479597L, -158300141L, 1701913732L, 1152883163L, 35860266L, 1969689444L, -1318203443L, -2131865434L, 1632280887L, 637082149L, 260799231L, 1754027460L, -1055514020L, -1311932986L))
result <- do.call(dexterMST:::make_booklets_unsafe,testlist)
str(result)
|
/dexterMST/inst/testfiles/make_booklets_unsafe/AFL_make_booklets_unsafe/make_booklets_unsafe_valgrind_files/1615943472-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false | false | 1,558 |
r
|
testlist <- list(nmod = NULL, id = NULL, score = NULL, rsp = NULL, id = NULL, score = NULL, nbr = NULL, id = NULL, bk_nmod = integer(0), booklet_id = c(1192022832L, -996667132L, 432518541L, 815996035L, 1157250652L, 751417555L, 116882132L, 1085030516L, 1202941484L, 15623892L, -1665580313L, NA, NA, 1254131289L, 749806690L, -1501899956L, -1876835267L), booklet_score = integer(0), include_rsp = integer(0), item_id = c(1415150763L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), item_score = integer(0), module_nbr = c(992888811L, -1345548849L, -449112064L, NA, 1678998078L, 759393453L, 786045775L, 453135142L, 455895826L, -1331816706L, 391475866L, 1748544614L, 19691586L, 1176953756L, 349411874L, 2121585973L, -301177052L, 1082896916L, -450872028L, -636931467L, -53289638L), person_id = c(16777216L, 0L, 1409351680L, 682962941L, 1615462481L, 167774546L, 1801886528L, -1519479597L, -158300141L, 1701913732L, 1152883163L, 35860266L, 1969689444L, -1318203443L, -2131865434L, 1632280887L, 637082149L, 260799231L, 1754027460L, -1055514020L, -1311932986L))
result <- do.call(dexterMST:::make_booklets_unsafe,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidying_functions.R
\name{ipsatize}
\alias{ipsatize}
\title{Ipsatize circumplex items using deviation scoring across variables}
\usage{
ipsatize(.data, items, na.rm = TRUE, overwrite = FALSE)
}
\arguments{
\item{.data}{Required. A data frame containing at least circumplex scales.}
\item{items}{Required. The variable names or column numbers for the
variables in \code{.data} that contain circumplex items to be ipsatized.}
\item{na.rm}{Optional. A logical that determines whether missing values
should be ignored during the calculation of the mean during ipsatization
(default = TRUE).}
\item{overwrite}{Optional. A logical that determines whether the variables
specified in \code{items} should be overwritten with ipsatized versions
or alternatively preserved and new variables ending with "_i" should be
added to the data frame (default = FALSE).}
}
\value{
A data frame that matches \code{.data} except that the variables
specified in \code{items} have been rescored using ipsatization.
}
\description{
Rescore each circumplex item using deviation scoring across variables. In
other words, subtract each observation's mean response from each response.
This effectively removes the presence of a general factor, which can make
certain circumplex fit analyses more powerful.
}
\examples{
data("raw_iipsc")
ipsatize(raw_iipsc, IIP01:IIP32)
}
\seealso{
Other tidying functions:
\code{\link{score}()},
\code{\link{standardize}()}
}
\concept{tidying functions}
|
/man/ipsatize.Rd
|
no_license
|
cran/circumplex
|
R
| false | true | 1,584 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidying_functions.R
\name{ipsatize}
\alias{ipsatize}
\title{Ipsatize circumplex items using deviation scoring across variables}
\usage{
ipsatize(.data, items, na.rm = TRUE, overwrite = FALSE)
}
\arguments{
\item{.data}{Required. A data frame containing at least circumplex scales.}
\item{items}{Required. The variable names or column numbers for the
variables in \code{.data} that contain circumplex items to be ipsatized.}
\item{na.rm}{Optional. A logical that determines whether missing values
should be ignored during the calculation of the mean during ipsatization
(default = TRUE).}
\item{overwrite}{Optional. A logical that determines whether the variables
specified in \code{items} should be overwritten with ipsatized versions
or alternatively preserved and new variables ending with "_i" should be
added to the data frame (default = FALSE).}
}
\value{
A data frame that matches \code{.data} except that the variables
specified in \code{items} have been rescored using ipsatization.
}
\description{
Rescore each circumplex item using deviation scoring across variables. In
other words, subtract each observation's mean response from each response.
This effectively removes the presence of a general factor, which can make
certain circumplex fit analyses more powerful.
}
\examples{
data("raw_iipsc")
ipsatize(raw_iipsc, IIP01:IIP32)
}
\seealso{
Other tidying functions:
\code{\link{score}()},
\code{\link{standardize}()}
}
\concept{tidying functions}
|
\name{QGmvicc}
\alias{QGmvicc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Intra - Class Correlation coefficients (ICC) on the observed data scale (multivariate analysis).
}
\description{
Function to estimate the variance-covariance matrix of a variance component on the observed scale based on estimates on the latent scale. Contrary to the univariate function, this one cannot use the analytical closed forms and yields a list of paramaters instead of a data.frame.
}
\usage{
QGmvicc(mu = NULL, vcv.comp, vcv.P, models, predict = NULL, rel.acc = 0.001,
width = 10, n.obs = NULL, theta = NULL, verbose = TRUE, mask = NULL)
}
\arguments{
\item{mu}{Vector of latent intercepts estimated from a GLMM (ignored if predict is not \code{NULL}). (numeric)}
\item{vcv.comp}{Component variance-covariance matrix (G-matrix - like). (numeric)}
\item{vcv.P}{Total phenotypic variance-covariance matrix. Usually, the sum of all the estimated variance-covariance matrices. (numeric)}
\item{models}{A vector containing the names of the model used or a list which elements contain the list of the functions needed (inverse-link, distribution variance and derivative of the inverse-link, as stated in the output of \code{QGlink.funcs()}). (character vector or list of lists of functions) Available model names are :
\itemize{
\item{"Gaussian"} Gaussian distribution with identity link (e.g. LMM)
\item{"binom1.probit"} Binomial with 1 trial (binary data) with a probit link
\item{"binomN.probit"} Binomial with N tria with a probit link (require the parameter \code{n.obs})
\item{"binom1.logit"} Binomial with 1 trial (binary) with a logit link
\item{"binomN.logit"} Binomial with N trial with a logit link (require the parameter \code{n.obs})
\item{"Poisson.log"} Poisson distribution wiht a log link
\item{"Poisson.sqrt"} Poisson distribution with a square - root link
\item{"negbin.log"} Negative - Binomial distribution wiht a log link (require the parameter \code{theta})
\item{"negbin.sqrt"} Negative - Binomial distribution with a square - root link (require the parameter \code{theta})
}
}
\item{rel.acc}{Relative accuracy of the integral approximation. (numeric)}
\item{width}{Parameter for the integral computation. The default value is 10, which should be sensible for most models. (numeric)}
\item{predict}{Optional matrix of predicted values on the latent scale (each trait in each column). The latent predicted values must be computed while only accounting for the fixed effects (marginal to the random effects). (numeric)}
\item{n.obs}{Number of "trials" for each "binomN" distribution. (numeric, length equal to the number of "binomN" models)}
\item{theta}{Dispersion parameter for the Negative Binomial distribution. The parameter \code{theta} should be such as the variance of the distribution is \code{mean + mean^2 / theta}. (numeric, length equal to the number of "negbin" models)}
\item{verbose}{Should the function be verbose? (boolean)}
\item{mask}{Masking filter for removing predictions that don't exist in the population (e.g. female predictions for males for a sex - based bivariate model). Should the same dimensions as \code{predict} and values should be \code{FALSE} when the predictions should be filtered out.}
}
\details{
The function typically uses integral numerical approximation provided by the R2Cuba package to compute multivariate quantitative genetics parameters on the observed scale, from latent estimates yielded by a GLMM. It cannot use closed form solutions.
Only the most typical distribution/link function couples are implemented through the \code{models} argument. If you used an "exotic" GLMM, you can provide a list containg lists of functions corresponding to the model. The list of functions should be implemented as is the output of \code{QGlink.funcs()}, i.e. three elements: the inverse link functions named \code{inv.link}, the derivative of this function named \code{d.inv.link} and the distribution variance named \code{var.func} (see Example below).
Some distributions require extra-arguments. This is the case for "binomN", which require the number of trials N, passed with the argument \code{n.obs}. The distribution "negbin" requires a dispersion parameter \code{theta}, such as the variance of the distribution is \code{mean + mean^2 / theta} (mean/dispersion parametrisation). For now, the arguments \code{n.obs} and \code{theta} can be used for ONE distribution only.
If fixed effects (apart from the intercept) have been included in the GLMM, they can be included through the argument \code{predict} as a matrix of the marginal predicted values, i.e. predicted values excluding the random effects, for each trait (one trait per column of the matrix, see Example below).Note that computation can be extremely slow in that case.
}
\value{
The function yields a list containing the following values:
\item{mean.obs}{Vector of phenotypic means on the observed scale.}
\item{vcv.P.obs}{Phenotypic variance-covariance matrix on the observed scale.}
\item{vcv.comp.obs}{Component variance-covariance (G-matrix - like, but broad - sense) on the observed scale.}
}
\author{
Pierre de Villemereuil & Michael B. Morrissey
}
\seealso{
\code{\link{QGmvparams}}, \code{\link{QGlink.funcs}}, \code{\link{QGmvmean}}, \code{\link{QGvcov}}, \code{\link{QGmvpsi}}
}
\examples{
## Example using a bivariate model (Binary trait/Gaussian trait)
# Parameters
mu <- c(0, 1)
G <- diag(c(0.5, 2))
M <- diag(c(0.2, 1)) # Maternal effect VCV matrix
P <- diag(c(1, 4))
# Broad - sense "G-matrix" on observed data scale
\dontrun{QGmvicc(mu = mu, vcv.comp = G, vcv.P = P, models = c("binom1.probit", "Gaussian"))}
# Maternal effect VCV matrix on observed data scale
\dontrun{QGmvicc(mu = mu, vcv.comp = M, vcv.P = P, models = c("binom1.probit", "Gaussian"))}
# Reminder: the results are the same here because we have no correlation between the two traits
# Defining the model "by hand" using the list
list.models = list(
model1 = list(inv.link = function(x){pnorm(x)},
d.inv.link = function(x){dnorm(x)},
var.func = function(x){pnorm(x) * (1 - pnorm(x))}),
model2 = list(inv.link = function(x){x},
d.inv.link = function(x){1},
var.func = function(x){0})
)
# Running the same analysis than above
QGmvicc(mu = mu, vcv.comp = M, vcv.P = P, models = list.models)
# Using predicted values
# Say we have 100 individuals
n <- 100
# Let's simulate predicted values
p <- matrix(c(runif(n), runif(n)), ncol = 2)
# Note that p has as many as columns as we have traits (i.e. two)
# Multivariate analysis with predicted values
\dontrun{QGmvicc(predict = p, vcv.comp = M, vcv.P = P, models = c("binom1.probit", "Gaussian"))}
# That can be a bit long to run!
}
|
/man/QGmvicc.Rd
|
no_license
|
cran/QGglmm
|
R
| false | false | 6,869 |
rd
|
\name{QGmvicc}
\alias{QGmvicc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Intra - Class Correlation coefficients (ICC) on the observed data scale (multivariate analysis).
}
\description{
Function to estimate the variance-covariance matrix of a variance component on the observed scale based on estimates on the latent scale. Contrary to the univariate function, this one cannot use the analytical closed forms and yields a list of paramaters instead of a data.frame.
}
\usage{
QGmvicc(mu = NULL, vcv.comp, vcv.P, models, predict = NULL, rel.acc = 0.001,
width = 10, n.obs = NULL, theta = NULL, verbose = TRUE, mask = NULL)
}
\arguments{
\item{mu}{Vector of latent intercepts estimated from a GLMM (ignored if predict is not \code{NULL}). (numeric)}
\item{vcv.comp}{Component variance-covariance matrix (G-matrix - like). (numeric)}
\item{vcv.P}{Total phenotypic variance-covariance matrix. Usually, the sum of all the estimated variance-covariance matrices. (numeric)}
\item{models}{A vector containing the names of the model used or a list which elements contain the list of the functions needed (inverse-link, distribution variance and derivative of the inverse-link, as stated in the output of \code{QGlink.funcs()}). (character vector or list of lists of functions) Available model names are :
\itemize{
\item{"Gaussian"} Gaussian distribution with identity link (e.g. LMM)
\item{"binom1.probit"} Binomial with 1 trial (binary data) with a probit link
\item{"binomN.probit"} Binomial with N tria with a probit link (require the parameter \code{n.obs})
\item{"binom1.logit"} Binomial with 1 trial (binary) with a logit link
\item{"binomN.logit"} Binomial with N trial with a logit link (require the parameter \code{n.obs})
\item{"Poisson.log"} Poisson distribution wiht a log link
\item{"Poisson.sqrt"} Poisson distribution with a square - root link
\item{"negbin.log"} Negative - Binomial distribution wiht a log link (require the parameter \code{theta})
\item{"negbin.sqrt"} Negative - Binomial distribution with a square - root link (require the parameter \code{theta})
}
}
\item{rel.acc}{Relative accuracy of the integral approximation. (numeric)}
\item{width}{Parameter for the integral computation. The default value is 10, which should be sensible for most models. (numeric)}
\item{predict}{Optional matrix of predicted values on the latent scale (each trait in each column). The latent predicted values must be computed while only accounting for the fixed effects (marginal to the random effects). (numeric)}
\item{n.obs}{Number of "trials" for each "binomN" distribution. (numeric, length equal to the number of "binomN" models)}
\item{theta}{Dispersion parameter for the Negative Binomial distribution. The parameter \code{theta} should be such as the variance of the distribution is \code{mean + mean^2 / theta}. (numeric, length equal to the number of "negbin" models)}
\item{verbose}{Should the function be verbose? (boolean)}
\item{mask}{Masking filter for removing predictions that don't exist in the population (e.g. female predictions for males for a sex - based bivariate model). Should the same dimensions as \code{predict} and values should be \code{FALSE} when the predictions should be filtered out.}
}
\details{
The function typically uses integral numerical approximation provided by the R2Cuba package to compute multivariate quantitative genetics parameters on the observed scale, from latent estimates yielded by a GLMM. It cannot use closed form solutions.
Only the most typical distribution/link function couples are implemented through the \code{models} argument. If you used an "exotic" GLMM, you can provide a list containg lists of functions corresponding to the model. The list of functions should be implemented as is the output of \code{QGlink.funcs()}, i.e. three elements: the inverse link functions named \code{inv.link}, the derivative of this function named \code{d.inv.link} and the distribution variance named \code{var.func} (see Example below).
Some distributions require extra-arguments. This is the case for "binomN", which require the number of trials N, passed with the argument \code{n.obs}. The distribution "negbin" requires a dispersion parameter \code{theta}, such as the variance of the distribution is \code{mean + mean^2 / theta} (mean/dispersion parametrisation). For now, the arguments \code{n.obs} and \code{theta} can be used for ONE distribution only.
If fixed effects (apart from the intercept) have been included in the GLMM, they can be included through the argument \code{predict} as a matrix of the marginal predicted values, i.e. predicted values excluding the random effects, for each trait (one trait per column of the matrix, see Example below).Note that computation can be extremely slow in that case.
}
\value{
The function yields a list containing the following values:
\item{mean.obs}{Vector of phenotypic means on the observed scale.}
\item{vcv.P.obs}{Phenotypic variance-covariance matrix on the observed scale.}
\item{vcv.comp.obs}{Component variance-covariance (G-matrix - like, but broad - sense) on the observed scale.}
}
\author{
Pierre de Villemereuil & Michael B. Morrissey
}
\seealso{
\code{\link{QGmvparams}}, \code{\link{QGlink.funcs}}, \code{\link{QGmvmean}}, \code{\link{QGvcov}}, \code{\link{QGmvpsi}}
}
\examples{
## Example using a bivariate model (Binary trait/Gaussian trait)
# Parameters
mu <- c(0, 1)
G <- diag(c(0.5, 2))
M <- diag(c(0.2, 1)) # Maternal effect VCV matrix
P <- diag(c(1, 4))
# Broad - sense "G-matrix" on observed data scale
\dontrun{QGmvicc(mu = mu, vcv.comp = G, vcv.P = P, models = c("binom1.probit", "Gaussian"))}
# Maternal effect VCV matrix on observed data scale
\dontrun{QGmvicc(mu = mu, vcv.comp = M, vcv.P = P, models = c("binom1.probit", "Gaussian"))}
# Reminder: the results are the same here because we have no correlation between the two traits
# Defining the model "by hand" using the list
list.models = list(
model1 = list(inv.link = function(x){pnorm(x)},
d.inv.link = function(x){dnorm(x)},
var.func = function(x){pnorm(x) * (1 - pnorm(x))}),
model2 = list(inv.link = function(x){x},
d.inv.link = function(x){1},
var.func = function(x){0})
)
# Running the same analysis than above
QGmvicc(mu = mu, vcv.comp = M, vcv.P = P, models = list.models)
# Using predicted values
# Say we have 100 individuals
n <- 100
# Let's simulate predicted values
p <- matrix(c(runif(n), runif(n)), ncol = 2)
# Note that p has as many as columns as we have traits (i.e. two)
# Multivariate analysis with predicted values
\dontrun{QGmvicc(predict = p, vcv.comp = M, vcv.P = P, models = c("binom1.probit", "Gaussian"))}
# That can be a bit long to run!
}
|
"asymean" <-
function(xgrid=seq(0,1,length=21),ygrid=seq(0,1,length=21),binsize=32){
zetam1m2<-matrix(0,length(xgrid),length(ygrid))
for (i in 1:length(xgrid)){
for (j in 1:length(xgrid)){
zetam1m2[i,j]<-(ygrid[j]-xgrid[i])/sqrt((ygrid[j]+xgrid[i])*(2-(ygrid[j]+xgrid[i]))/(2*binsize))
}
}
zetam1m2[which(abs(zetam1m2)==Inf)]<-0
zetam1m2[which(is.na(zetam1m2))]<-0
zetam1m2
}
|
/R/asymean.R
|
no_license
|
nunesmatt/binhf
|
R
| false | false | 380 |
r
|
"asymean" <-
function(xgrid=seq(0,1,length=21),ygrid=seq(0,1,length=21),binsize=32){
zetam1m2<-matrix(0,length(xgrid),length(ygrid))
for (i in 1:length(xgrid)){
for (j in 1:length(xgrid)){
zetam1m2[i,j]<-(ygrid[j]-xgrid[i])/sqrt((ygrid[j]+xgrid[i])*(2-(ygrid[j]+xgrid[i]))/(2*binsize))
}
}
zetam1m2[which(abs(zetam1m2)==Inf)]<-0
zetam1m2[which(is.na(zetam1m2))]<-0
zetam1m2
}
|
genConfNorm <- function (file, pop, t0, np) {
cat (paste(t0, np, sep=' '), file=file, sep="\n")
for (i in 1:pop) {
x = 0
x = rnorm(1, 1.5, 1)
x = x
cat (paste(rnorm (1, 0, pi/6), x, sep=' '), file = file, append = TRUE, sep="\n")
}
}
genConfUnif <- function (file, pop, t0, np) {
cat (paste(t0, np, sep=' '), file=file, sep="\n")
for (i in 1:pop) {
x = 0
while (x == 0) {
x = runif(1, -1, 1)
}
cat (paste(runif (1, 0, 1), x, sep=' '), file = file, append = TRUE, sep="\n")
}
}
|
/src/genConf.r
|
permissive
|
ldorelli/tcc
|
R
| false | false | 505 |
r
|
genConfNorm <- function (file, pop, t0, np) {
cat (paste(t0, np, sep=' '), file=file, sep="\n")
for (i in 1:pop) {
x = 0
x = rnorm(1, 1.5, 1)
x = x
cat (paste(rnorm (1, 0, pi/6), x, sep=' '), file = file, append = TRUE, sep="\n")
}
}
genConfUnif <- function (file, pop, t0, np) {
cat (paste(t0, np, sep=' '), file=file, sep="\n")
for (i in 1:pop) {
x = 0
while (x == 0) {
x = runif(1, -1, 1)
}
cat (paste(runif (1, 0, 1), x, sep=' '), file = file, append = TRUE, sep="\n")
}
}
|
library(ggplot2)
library(ggsci)
A11 <- "A"
A12 <- "G"
A21 <- "C"
A22 <- "T"
df <- read.table("genotype.raw", header=TRUE)
snp1 <- colnames(df)[3]
snp2 <- colnames(df)[4]
colnames(df)[3] <- "SNP1"
colnames(df)[4] <- "SNP2"
df$barhgt <- ifelse(df$PHENO<0, 0.4, -0.4)
df$barhgt2 <- ifelse(df$PHENO<0, 0.10, -0.1)
df$GT1 <- ifelse(df$SNP1 == 0, paste0(A11,A11),
ifelse(df$SNP1==1, paste0(A11,A12), paste0(A12,A12)))
df$GT2 <- ifelse(df$SNP2 == 0, paste0(A21,A21),
ifelse(df$SNP2==1, paste0(A21,A22), paste0(A22,A22)))
df$Shade <- ifelse(df$PHENO<0, "Run", "Not run")
#Redo colors
pal <- c("#1b9e77", "#d95f02", "#7570b3", "#e7298a", "#66a61e", "#e6ab02")
names(pal) <- c(unique(df$GT1), unique(df$GT2))
#Single
p <- ggplot(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=PHENO))
p <- p + geom_bar(stat="identity")
p <- p + theme_minimal()
p <- p + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) + xlab("Sample") + ggtitle(paste(colnames(df)[1], "by", snp1))
p <- p + geom_point(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=barhgt2, colour=GT2), size=3)
p <- p + scale_color_tron()
p <- p + guides(color=guide_legend(snp1))
ggsave(p, file="singleSNP.png", dpi=300, height=6, width=4, units="in")
#Interactions
p <- ggplot(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=PHENO, fill=Shade)) + geom_bar(stat="identity")
p <- p + theme_minimal()
p <- p + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) + xlab("Sample") + ggtitle("Significant SNPxSNP Interaction")
p <- p + geom_point(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=barhgt, colour=GT1), size=3)
p <- p + geom_point(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=barhgt2, colour=GT2), size=3)
p <- p + scale_color_manual(values=pal)
p <- p + scale_fill_manual(values=c("#AAAAAA", "#a6761d"))
p <- p + guides(fill=guide_legend(""), color=guide_legend("Genotype"))
ggsave(p, file="GxG.png", dpi=300, height=5, width=7, units="in")
|
/geno-pheno_plot.R
|
no_license
|
anastasia-lucas/genopheno-plot
|
R
| false | false | 2,128 |
r
|
library(ggplot2)
library(ggsci)
A11 <- "A"
A12 <- "G"
A21 <- "C"
A22 <- "T"
df <- read.table("genotype.raw", header=TRUE)
snp1 <- colnames(df)[3]
snp2 <- colnames(df)[4]
colnames(df)[3] <- "SNP1"
colnames(df)[4] <- "SNP2"
df$barhgt <- ifelse(df$PHENO<0, 0.4, -0.4)
df$barhgt2 <- ifelse(df$PHENO<0, 0.10, -0.1)
df$GT1 <- ifelse(df$SNP1 == 0, paste0(A11,A11),
ifelse(df$SNP1==1, paste0(A11,A12), paste0(A12,A12)))
df$GT2 <- ifelse(df$SNP2 == 0, paste0(A21,A21),
ifelse(df$SNP2==1, paste0(A21,A22), paste0(A22,A22)))
df$Shade <- ifelse(df$PHENO<0, "Run", "Not run")
#Redo colors
pal <- c("#1b9e77", "#d95f02", "#7570b3", "#e7298a", "#66a61e", "#e6ab02")
names(pal) <- c(unique(df$GT1), unique(df$GT2))
#Single
p <- ggplot(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=PHENO))
p <- p + geom_bar(stat="identity")
p <- p + theme_minimal()
p <- p + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) + xlab("Sample") + ggtitle(paste(colnames(df)[1], "by", snp1))
p <- p + geom_point(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=barhgt2, colour=GT2), size=3)
p <- p + scale_color_tron()
p <- p + guides(color=guide_legend(snp1))
ggsave(p, file="singleSNP.png", dpi=300, height=6, width=4, units="in")
#Interactions
p <- ggplot(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=PHENO, fill=Shade)) + geom_bar(stat="identity")
p <- p + theme_minimal()
p <- p + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) + xlab("Sample") + ggtitle("Significant SNPxSNP Interaction")
p <- p + geom_point(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=barhgt, colour=GT1), size=3)
p <- p + geom_point(data=df[order(df$PHENO),], aes(x=factor(ID, levels=unique(ID)), y=barhgt2, colour=GT2), size=3)
p <- p + scale_color_manual(values=pal)
p <- p + scale_fill_manual(values=c("#AAAAAA", "#a6761d"))
p <- p + guides(fill=guide_legend(""), color=guide_legend("Genotype"))
ggsave(p, file="GxG.png", dpi=300, height=5, width=7, units="in")
|
rRandomLocation <-
function(X, ReferenceType = "", CheckArguments = TRUE) {
if (CheckArguments)
CheckdbmssArguments()
if (ReferenceType != "") {
# Retain a single point type
X.reduced <- X[X$marks$PointType == ReferenceType]
RandomizedX <- rlabel(X.reduced)
} else {
RandomizedX <- rlabel(X)
}
class(RandomizedX) <- c("wmppp", "ppp")
return (RandomizedX)
}
|
/dbmss/R/rRandomLocation.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 415 |
r
|
rRandomLocation <-
function(X, ReferenceType = "", CheckArguments = TRUE) {
if (CheckArguments)
CheckdbmssArguments()
if (ReferenceType != "") {
# Retain a single point type
X.reduced <- X[X$marks$PointType == ReferenceType]
RandomizedX <- rlabel(X.reduced)
} else {
RandomizedX <- rlabel(X)
}
class(RandomizedX) <- c("wmppp", "ppp")
return (RandomizedX)
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
mv_multgen_ <- function(mat, vec) {
.Call('_sdbmsABC_mv_multgen_', PACKAGE = 'sdbmsABC', mat, vec)
}
mv_multcm_ <- function(mat, vec) {
.Call('_sdbmsABC_mv_multcm_', PACKAGE = 'sdbmsABC', mat, vec)
}
mv_multdm_ <- function(mat, vec) {
.Call('_sdbmsABC_mv_multdm_', PACKAGE = 'sdbmsABC', mat, vec)
}
sigmoid_Cpp_ <- function(x, vmax, v0, r) {
.Call('_sdbmsABC_sigmoid_Cpp_', PACKAGE = 'sdbmsABC', x, vmax, v0, r)
}
SDE_Cpp_gen_ <- function(vec, dm, cm, randvec) {
.Call('_sdbmsABC_SDE_Cpp_gen_', PACKAGE = 'sdbmsABC', vec, dm, cm, randvec)
}
SDE_Cpp_ <- function(vec, dm, cm, randvec) {
.Call('_sdbmsABC_SDE_Cpp_', PACKAGE = 'sdbmsABC', vec, dm, cm, randvec)
}
ODE_Cpp_ <- function(vec, h, Aa, mu, BbC, C1, C2, C3, vmax, v0, r) {
.Call('_sdbmsABC_ODE_Cpp_', PACKAGE = 'sdbmsABC', vec, h, Aa, mu, BbC, C1, C2, C3, vmax, v0, r)
}
Splitting_JRNMM_gen_Cpp_ <- function(h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax) {
.Call('_sdbmsABC_Splitting_JRNMM_gen_Cpp_', PACKAGE = 'sdbmsABC', h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax)
}
Splitting_JRNMM_Cpp_ <- function(h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax) {
.Call('_sdbmsABC_Splitting_JRNMM_Cpp_', PACKAGE = 'sdbmsABC', h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax)
}
Splitting_JRNMM_output_Cpp_ <- function(h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax) {
.Call('_sdbmsABC_Splitting_JRNMM_output_Cpp_', PACKAGE = 'sdbmsABC', h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax)
}
|
/R/RcppExports.R
|
no_license
|
massimilianotamborrino/sdbmpABC
|
R
| false | false | 1,736 |
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
mv_multgen_ <- function(mat, vec) {
.Call('_sdbmsABC_mv_multgen_', PACKAGE = 'sdbmsABC', mat, vec)
}
mv_multcm_ <- function(mat, vec) {
.Call('_sdbmsABC_mv_multcm_', PACKAGE = 'sdbmsABC', mat, vec)
}
mv_multdm_ <- function(mat, vec) {
.Call('_sdbmsABC_mv_multdm_', PACKAGE = 'sdbmsABC', mat, vec)
}
sigmoid_Cpp_ <- function(x, vmax, v0, r) {
.Call('_sdbmsABC_sigmoid_Cpp_', PACKAGE = 'sdbmsABC', x, vmax, v0, r)
}
SDE_Cpp_gen_ <- function(vec, dm, cm, randvec) {
.Call('_sdbmsABC_SDE_Cpp_gen_', PACKAGE = 'sdbmsABC', vec, dm, cm, randvec)
}
SDE_Cpp_ <- function(vec, dm, cm, randvec) {
.Call('_sdbmsABC_SDE_Cpp_', PACKAGE = 'sdbmsABC', vec, dm, cm, randvec)
}
ODE_Cpp_ <- function(vec, h, Aa, mu, BbC, C1, C2, C3, vmax, v0, r) {
.Call('_sdbmsABC_ODE_Cpp_', PACKAGE = 'sdbmsABC', vec, h, Aa, mu, BbC, C1, C2, C3, vmax, v0, r)
}
Splitting_JRNMM_gen_Cpp_ <- function(h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax) {
.Call('_sdbmsABC_Splitting_JRNMM_gen_Cpp_', PACKAGE = 'sdbmsABC', h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax)
}
Splitting_JRNMM_Cpp_ <- function(h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax) {
.Call('_sdbmsABC_Splitting_JRNMM_Cpp_', PACKAGE = 'sdbmsABC', h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax)
}
Splitting_JRNMM_output_Cpp_ <- function(h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax) {
.Call('_sdbmsABC_Splitting_JRNMM_output_Cpp_', PACKAGE = 'sdbmsABC', h_i, startv, grid_i, dm_i, cm_i, mu_i, C_i, A, B, a, b, v0, r, vmax)
}
|
\name{createTable}
\Rdversion{1.5}
\alias{createTable}
\title{Function to create an output table}
\description{
This function reports the results from the Frequentist and Bayesian model for hmax and for h2. It also creates an output table with the results for all the thresholds in a csv format, so the user can select additional thresholds of interest.
}
\usage{
createTable(output.ratio, output.bay, dir = getwd(),h=NULL)
}
\arguments{
\item{output.ratio}{ \code{The output object from the Frequentist model (ratio function)} }
\item{output.bay}{ \code{The output object from the Bayesian model (baymod function)} }
\item{dir}{ \code{Directory for storing the table} }
\item{h}{\code{Additional thresholds in the form of a vector}}
}
\details{
To select a list of interesting features from the Bayesian model we suggest two decision rules in the paper: 1) the maximum of Median(R(h)) only for the subset of credibility intervals which do not include 1; 2) the largest threshold h for which the ratio R(h) il bigger than 2.
The first one is pointing out the strongest deviation from independence, whilst the second is the largest threshold where the number of features called in common at least doubles the number of features in common under independence.
}
\value{
\item{max }{The results of the R(hmax) statistic}
\item{rule2 }{The results using the rule R(h) larger than 2 (see details)}
\item{ruleh}{The results using additional thresholds}
}
\references{ 1. M.Blangiardo and S.Richardson (2007) Statistical tools for synthesizing lists of differentially expressed features in related experiments , Genome Biology, 8, R54 }
\author{ Alberto Cassese, Marta Blangiardo }
\examples{
data = simulation(n=500,GammaA=1,GammaB=1,r1=0.5,r2=0.8,
DEfirst=300,DEsecond=200,DEcommon=100)
Th<- ratio(data=data$Pval)
Rh<- baymod(iter=100,output.ratio=Th)
output.table <- createTable(output.ratio=Th,output.bay=Rh)
}
|
/man/createTable.Rd
|
no_license
|
AlbertoCassese/sdef
|
R
| false | false | 1,962 |
rd
|
\name{createTable}
\Rdversion{1.5}
\alias{createTable}
\title{Function to create an output table}
\description{
This function reports the results from the Frequentist and Bayesian model for hmax and for h2. It also creates an output table with the results for all the thresholds in a csv format, so the user can select additional thresholds of interest.
}
\usage{
createTable(output.ratio, output.bay, dir = getwd(),h=NULL)
}
\arguments{
\item{output.ratio}{ \code{The output object from the Frequentist model (ratio function)} }
\item{output.bay}{ \code{The output object from the Bayesian model (baymod function)} }
\item{dir}{ \code{Directory for storing the table} }
\item{h}{\code{Additional thresholds in the form of a vector}}
}
\details{
To select a list of interesting features from the Bayesian model we suggest two decision rules in the paper: 1) the maximum of Median(R(h)) only for the subset of credibility intervals which do not include 1; 2) the largest threshold h for which the ratio R(h) il bigger than 2.
The first one is pointing out the strongest deviation from independence, whilst the second is the largest threshold where the number of features called in common at least doubles the number of features in common under independence.
}
\value{
\item{max }{The results of the R(hmax) statistic}
\item{rule2 }{The results using the rule R(h) larger than 2 (see details)}
\item{ruleh}{The results using additional thresholds}
}
\references{ 1. M.Blangiardo and S.Richardson (2007) Statistical tools for synthesizing lists of differentially expressed features in related experiments , Genome Biology, 8, R54 }
\author{ Alberto Cassese, Marta Blangiardo }
\examples{
data = simulation(n=500,GammaA=1,GammaB=1,r1=0.5,r2=0.8,
DEfirst=300,DEsecond=200,DEcommon=100)
Th<- ratio(data=data$Pval)
Rh<- baymod(iter=100,output.ratio=Th)
output.table <- createTable(output.ratio=Th,output.bay=Rh)
}
|
#!/usr/bin/Rscript
require(bigmemory)
require(bigalgebra)
require(irlba)
con <- file("mat.txt", open = "a")
replicate(1, {
x <- matrix(rnorm(5 * 5), nrow = 5)
write.table(x, file = 'mat.txt', append = TRUE,
row.names = FALSE, col.names = FALSE)
})
file.info("mat.txt")$size
close(con)
|
/examples/dateGen/mGen.rs
|
permissive
|
pomadchin/hadoop-dg-decomp
|
R
| false | false | 308 |
rs
|
#!/usr/bin/Rscript
require(bigmemory)
require(bigalgebra)
require(irlba)
con <- file("mat.txt", open = "a")
replicate(1, {
x <- matrix(rnorm(5 * 5), nrow = 5)
write.table(x, file = 'mat.txt', append = TRUE,
row.names = FALSE, col.names = FALSE)
})
file.info("mat.txt")$size
close(con)
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Demonstration of submitButton()"),
sidebarLayout(
sidebarPanel(
selectInput("dataset","Choose a dataset:",choices = c("iris","pressure","mtcars")),
numericInput("obs", "Number of observations:", 6),
submitButton("Update!"),
p("In this example, changing the user input (dataset or number of observations) will not reflect in the output until the Update button is clicked"),
p("submitButton is used to control the reactiveness of the change in the user input")
),
mainPanel(
h4(textOutput("dataname")),
verbatimTextOutput("structure"),
h4(textOutput("observation")),
tableOutput("view")
)
)
))
|
/shiny/submitButton/example/ui.R
|
no_license
|
ChanningC12/Machine-Learning-with-R
|
R
| false | false | 857 |
r
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Demonstration of submitButton()"),
sidebarLayout(
sidebarPanel(
selectInput("dataset","Choose a dataset:",choices = c("iris","pressure","mtcars")),
numericInput("obs", "Number of observations:", 6),
submitButton("Update!"),
p("In this example, changing the user input (dataset or number of observations) will not reflect in the output until the Update button is clicked"),
p("submitButton is used to control the reactiveness of the change in the user input")
),
mainPanel(
h4(textOutput("dataname")),
verbatimTextOutput("structure"),
h4(textOutput("observation")),
tableOutput("view")
)
)
))
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536106829e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613111889-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 257 |
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536106829e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{validation.data}
\alias{validation.data}
\title{Sample validation data}
\format{
An object of class \code{\link[sf]{sf}} with 8 rows and 3 variables
\describe{
\item{sight}{1's and 0's indicating species presence/absence}
\item{count}{number of individuals observed at each point}
\item{geometry}{simple feature list column representing validation data points}
}
}
\usage{
validation.data
}
\description{
Sample validation data created by cropping Validation_data.csv to the SoCal_bite.csv region
(.csv files from ...)
}
\keyword{datasets}
|
/man/validation.data.Rd
|
no_license
|
cran/eSDM
|
R
| false | true | 676 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{validation.data}
\alias{validation.data}
\title{Sample validation data}
\format{
An object of class \code{\link[sf]{sf}} with 8 rows and 3 variables
\describe{
\item{sight}{1's and 0's indicating species presence/absence}
\item{count}{number of individuals observed at each point}
\item{geometry}{simple feature list column representing validation data points}
}
}
\usage{
validation.data
}
\description{
Sample validation data created by cropping Validation_data.csv to the SoCal_bite.csv region
(.csv files from ...)
}
\keyword{datasets}
|
#' Exponential Weighted Moving Average (EWMA) Mean Volitility
#'
#' @param x returns of the portfolio
#' @param nstart
#' @param robMean if the robust mean is used, default is T
#' @param robVol if the robust vol is used, default is T
#' @param cc
#' @param lambdaMean
#' @param lambdaVol
#' @param Dyn
#' @param lambdaMeanDyn
#' @param lambdaVolDyn
#'
#' @return
#'
#' @details The robust EWMA mean algorithm has the form
#\hat{\mu}_t = \hat{\mu}_{t-1} + (1-\lambda)\hat{\sigma}_{t-1}\psi_{\texttt{hub}}
#\left(\frac{x_t-\hat{\mu}_{t-1}}{\hat{\sigma}_{t-1}}\right)
#'
#' @examples
#' @export
ewmaMeanVol <- function(x,nstart = 10,robMean = T,robVol = T,cc = 2.5,
lambdaMean = 0.9,lambdaVol = 0.9, Dyn = F,
lambdaMeanDyn = 0.7,lambdaVolDyn = 0.7)
{
n <- length(x)
index = index(x)
x <- coredata(x)
# Compute initial robust mean and vol estimates
mean.start <- median(x[1:nstart])
vol.start <- mad(x[1:nstart])
# Create output vectors with initial estimates and zeros
ewmaMean <- c(rep(mean.start, nstart), rep(0, n - nstart))
ewmaVol <- c(rep(vol.start, nstart), rep(0, n - nstart))
# EWMA recursion
ewmaMean.old <- mean.start
ewmaVol.old <-vol.start
ns1 <- nstart + 1
for(i in ns1:n)
{
resid <- x[i]-ewmaMean.old
#if(robMean) {resid <- ewmaVol.old*psi_modOpt(resid/ewmaVol.old,
# cc = c(0.01316352,1.05753107,3.00373939,1.0))}
if(robMean) {
resid <- ewmaVol.old*psiHuber(resid/ewmaVol.old,cc = cc)
}
if(Dyn & abs(resid/ewmaVol.old) >= cc) {
lambda = lambdaMeanDyn
} else {
lambda = lambdaMean
}
ewmaMean.new <- ewmaMean.old + (1 - lambda) * resid
ewmaMean[i] <- ewmaMean.new
residNew <- x[i]-ewmaMean.new
ewmaVar.old <- ewmaVol.old^2
residVar <- residNew^2 - ewmaVar.old
#if(robVol) {sPsi <- ewmaVol.old*psi_modOpt(resid/ewmaVol.old,
# cc = c(0.01316352,1.05753107,3.00373939,1.0))
# residVar <- sPsi^2 - ewmaVar.old}
if(robVol) {
sPsi <- ewmaVol.old*psiHuber(resid/ewmaVol.old,cc = cc)
residVar <- sPsi^2 - ewmaVar.old
}
if(Dyn & abs(resid/ewmaVol.old) >= cc) {
lambda = lambdaVolDyn
} else {
lambda = lambdaVol
}
ewmaVar.new <- ewmaVar.old + (1-lambda)*residVar
ewmaVol.new <- sqrt(ewmaVar.new)
ewmaVol[i] <- ewmaVol.new
ewmaMean.old <- ewmaMean.new
ewmaVol.old <- ewmaVol.new
}
ewmaMeanVol <- xts(cbind(ewmaMean,ewmaVol),order.by = index)
return(ewmaMeanVol)
}
|
/R/ewmaMeanVol.R
|
permissive
|
kecoli/PCRM
|
R
| false | false | 2,657 |
r
|
#' Exponential Weighted Moving Average (EWMA) Mean Volitility
#'
#' @param x returns of the portfolio
#' @param nstart
#' @param robMean if the robust mean is used, default is T
#' @param robVol if the robust vol is used, default is T
#' @param cc
#' @param lambdaMean
#' @param lambdaVol
#' @param Dyn
#' @param lambdaMeanDyn
#' @param lambdaVolDyn
#'
#' @return
#'
#' @details The robust EWMA mean algorithm has the form
#\hat{\mu}_t = \hat{\mu}_{t-1} + (1-\lambda)\hat{\sigma}_{t-1}\psi_{\texttt{hub}}
#\left(\frac{x_t-\hat{\mu}_{t-1}}{\hat{\sigma}_{t-1}}\right)
#'
#' @examples
#' @export
ewmaMeanVol <- function(x,nstart = 10,robMean = T,robVol = T,cc = 2.5,
lambdaMean = 0.9,lambdaVol = 0.9, Dyn = F,
lambdaMeanDyn = 0.7,lambdaVolDyn = 0.7)
{
n <- length(x)
index = index(x)
x <- coredata(x)
# Compute initial robust mean and vol estimates
mean.start <- median(x[1:nstart])
vol.start <- mad(x[1:nstart])
# Create output vectors with initial estimates and zeros
ewmaMean <- c(rep(mean.start, nstart), rep(0, n - nstart))
ewmaVol <- c(rep(vol.start, nstart), rep(0, n - nstart))
# EWMA recursion
ewmaMean.old <- mean.start
ewmaVol.old <-vol.start
ns1 <- nstart + 1
for(i in ns1:n)
{
resid <- x[i]-ewmaMean.old
#if(robMean) {resid <- ewmaVol.old*psi_modOpt(resid/ewmaVol.old,
# cc = c(0.01316352,1.05753107,3.00373939,1.0))}
if(robMean) {
resid <- ewmaVol.old*psiHuber(resid/ewmaVol.old,cc = cc)
}
if(Dyn & abs(resid/ewmaVol.old) >= cc) {
lambda = lambdaMeanDyn
} else {
lambda = lambdaMean
}
ewmaMean.new <- ewmaMean.old + (1 - lambda) * resid
ewmaMean[i] <- ewmaMean.new
residNew <- x[i]-ewmaMean.new
ewmaVar.old <- ewmaVol.old^2
residVar <- residNew^2 - ewmaVar.old
#if(robVol) {sPsi <- ewmaVol.old*psi_modOpt(resid/ewmaVol.old,
# cc = c(0.01316352,1.05753107,3.00373939,1.0))
# residVar <- sPsi^2 - ewmaVar.old}
if(robVol) {
sPsi <- ewmaVol.old*psiHuber(resid/ewmaVol.old,cc = cc)
residVar <- sPsi^2 - ewmaVar.old
}
if(Dyn & abs(resid/ewmaVol.old) >= cc) {
lambda = lambdaVolDyn
} else {
lambda = lambdaVol
}
ewmaVar.new <- ewmaVar.old + (1-lambda)*residVar
ewmaVol.new <- sqrt(ewmaVar.new)
ewmaVol[i] <- ewmaVol.new
ewmaMean.old <- ewmaMean.new
ewmaVol.old <- ewmaVol.new
}
ewmaMeanVol <- xts(cbind(ewmaMean,ewmaVol),order.by = index)
return(ewmaMeanVol)
}
|
library(dplyr)
data<-read.csv("Movies_Distance_Matrix_2.csv",header=T)
raw<-read.csv("oscar_nominations.csv",header=T)
oscar<-filter(data,movies %in% raw$Title)
oscar_index<-oscar[,1]+1
n<-length(oscar_index)
distance_matrix<-data[oscar_index,]
distance_matrix<-distance_matrix[,-1]
distance_matrix<-distance_matrix[,oscar_index]
title<-data.frame(index=1:78,title=data[oscar_index,]$movies)
save(distance_matrix,title,file="distance_matrix.RData")
load("distance_matrix.RData")
distance_matrix<-distance_matrix[-28,-28]
title<-title[-28,]
title$index<-1:77
movie$index<-1:77
save(movie,distance_matrix,file="oscars_summary.RData")
load("oscars_summary.RData")
|
/lib/matrix_process.R
|
no_license
|
TZstatsADS/Spr2016-Proj4-Grp5
|
R
| false | false | 691 |
r
|
library(dplyr)
data<-read.csv("Movies_Distance_Matrix_2.csv",header=T)
raw<-read.csv("oscar_nominations.csv",header=T)
oscar<-filter(data,movies %in% raw$Title)
oscar_index<-oscar[,1]+1
n<-length(oscar_index)
distance_matrix<-data[oscar_index,]
distance_matrix<-distance_matrix[,-1]
distance_matrix<-distance_matrix[,oscar_index]
title<-data.frame(index=1:78,title=data[oscar_index,]$movies)
save(distance_matrix,title,file="distance_matrix.RData")
load("distance_matrix.RData")
distance_matrix<-distance_matrix[-28,-28]
title<-title[-28,]
title$index<-1:77
movie$index<-1:77
save(movie,distance_matrix,file="oscars_summary.RData")
load("oscars_summary.RData")
|
#' @name prepSim
#'
#' @title SCE preparation for \code{\link{simData}}
#'
#' @description \code{prepSim} prepares an input SCE for simulation
#' with \code{muscat}'s \code{\link{simData}} function by
#' \enumerate{
#' \item{basic filtering of genes and cells}
#' \item{(optional) filtering of subpopulation-sample instances}
#' \item{estimation of cell (library sizes) and gene parameters
#' (dispersions and sample-specific means), respectively.}
#' }
#'
#' @param x a \code{\link[SingleCellExperiment]{SingleCellExperiment}}.
#' @param min_count,min_cells used for filtering of genes; only genes with
#' a count > \code{min_count} in >= \code{min_cells} will be retained.
#' @param min_genes used for filtering cells;
#' only cells with a count > 0 in >= \code{min_genes} will be retained.
#' @param min_size used for filtering subpopulation-sample combinations;
#' only instances with >= \code{min_size} cells will be retained.
#' Specifying \code{min_size = NULL} skips this step.
#' @param group_keep character string; if \code{nlevels(x$group_id) > 1},
#' specifies which group of samples to keep (see details). The default
#' NULL retains samples from \code{levels(x$group_id)[1]}; otherwise,
#' if `colData(x)$group_id` is not specified, all samples will be kept.
#' @param verbose logical; should information on progress be reported?
#'
#' @details For each gene \eqn{g}, \code{prepSim} fits a model to estimate
#' sample-specific means \eqn{\beta_g^s}, for each sample \eqn{s},
#' and dispersion parameters \eqn{\phi_g} using \code{edgeR}'s
#' \code{\link[edgeR]{estimateDisp}} function with default parameters.
#' Thus, the reference count data is modeled as NB distributed:
#' \deqn{Y_{gc} \sim NB(\mu_{gc}, \phi_g)}
#' for gene \eqn{g} and cell \eqn{c}, where the mean
#' \eqn{\mu_{gc} = \exp(\beta_{g}^{s(c)}) \cdot \lambda_c}. Here,
#' \eqn{\beta_{g}^{s(c)}} is the relative abundance of gene \eqn{g}
#' in sample \eqn{s(c)}, \eqn{\lambda_c} is the library size
#' (total number of counts), and \eqn{\phi_g} is the dispersion.
#'
#' @return a \code{\link[SingleCellExperiment]{SingleCellExperiment}}
#' containing, for each cell, library size (\code{colData(x)$offset})
#' and, for each gene, dispersion and sample-specific mean estimates
#' (\code{rowData(x)$dispersion} and \code{$beta.sample_id}, respectively).
#'
#' @examples
#' # estimate simulation parameters
#' data(example_sce)
#' ref <- prepSim(example_sce)
#'
#' # tabulate number of genes/cells before vs. after
#' ns <- cbind(
#' before = dim(example_sce),
#' after = dim(ref))
#' rownames(ns) <- c("#genes", "#cells")
#' ns
#'
#' library(SingleCellExperiment)
#' head(rowData(ref)) # gene parameters
#' head(colData(ref)) # cell parameters
#'
#' @author Helena L Crowell
#'
#' @references
#' Crowell, HL, Soneson, C, Germain, P-L, Calini, D,
#' Collin, L, Raposo, C, Malhotra, D & Robinson, MD:
#' On the discovery of population-specific state transitions from
#' multi-sample multi-condition single-cell RNA sequencing data.
#' \emph{bioRxiv} \strong{713412} (2018).
#' doi: \url{https://doi.org/10.1101/713412}
#'
#' @importFrom edgeR DGEList estimateDisp glmFit
#' @importFrom Matrix colSums rowSums
#' @importFrom matrixStats rowAnyNAs
#' @importFrom SingleCellExperiment SingleCellExperiment counts
#' @importFrom SummarizedExperiment colData rowData<-
#' @importFrom stats model.matrix
#' @importFrom S4Vectors DataFrame
#' @export
prepSim <- function(x,
min_count = 1, min_cells = 10,
min_genes = 100, min_size = 100,
group_keep = NULL, verbose = TRUE) {
.check_sce(x, req_group = FALSE)
stopifnot(is.numeric(min_count),
is.numeric(min_cells), is.numeric(min_genes),
is.null(min_size) || is.numeric(min_size),
is.logical(verbose), length(verbose) == 1)
# get model variables
vars <- c("sample_id", "cluster_id")
names(vars) <- vars <- intersect(vars, names(colData(x)))
# assure these are factors
for (v in vars) {
# drop singular variables
n <- length(unique(x[[v]]))
if (n == 1) {
x[[v]] <- NULL
rmv <- grep(v, vars)
vars <- vars[-rmv]
next
}
if (!is.factor(x[[v]]))
x[[v]] <- as.factor(x[[v]])
x[[v]] <- droplevels(x[[v]])
}
n_cells0 <- ncol(x)
x <- .update_sce(x)
if (is.null(group_keep)) {
if ("group_id" %in% colnames(colData(x))) {
group_keep <- levels(x$group_id)[1]
if (verbose) {
fmt <- paste(
"Argument `group_keep` unspecified;",
"defaulting to retaining %s-group samples.")
message(sprintf(fmt, dQuote(group_keep)))
}
cells_keep <- x$group_id == group_keep
} else {
cells_keep <- seq_len(ncol(x))
}
} else {
stopifnot(is.character(group_keep),
group_keep %in% levels(x$group_id))
cells_keep <- x$group_id %in% group_keep
}
x <- x[, cells_keep]
x <- .update_sce(x)
# keep genes w/ count > `min_count` in at least `min_cells`;
# keep cells w/ at least `min_genes` detected genes
if (verbose) message("Filtering...")
genes_keep <- rowSums(counts(x) > min_count) >= min_cells
cells_keep <- colSums(counts(x) > 0) >= min_genes
if (verbose) message(sprintf(
"- %s/%s genes and %s/%s cells retained.",
sum(genes_keep), nrow(x), sum(cells_keep), n_cells0))
x <- x[genes_keep, cells_keep, drop = FALSE]
# keep cluster-samples w/ at least 'min_size' cells
if (!is.null(min_size)) {
n_cells <- table(x$cluster_id, x$sample_id)
n_cells <- .filter_matrix(n_cells, n = min_size)
if (ncol(n_cells) == 1)
stop("Current 'min_size' retains only 1 sample,\nbut",
" mean-dispersion estimation requires at least 2.")
if (verbose) message(sprintf(
"- %s/%s subpopulations and %s/%s samples retained.",
nrow(n_cells), nlevels(x$cluster_id),
ncol(n_cells), nlevels(x$sample_id)))
x <- .filter_sce(x, rownames(n_cells), colnames(n_cells))
}
if (is.null(rownames(x))) rownames(x) <- paste0("gene", seq(nrow(x)))
if (is.null(colnames(x))) colnames(x) <- paste0("cell", seq(ncol(x)))
# construct model formula
f <- "~ 1"
for (v in vars)
f <- paste(f, v, sep = "+")
cd <- as.data.frame(droplevels(colData(x)))
mm <- model.matrix(as.formula(f), data = cd)
# fit NB model
if (verbose)
message("Estimating gene and cell parameters...")
y <- DGEList(counts(x))
y <- calcNormFactors(y)
y <- estimateDisp(y, mm)
y <- glmFit(y, prior.count = 0)
# drop genes for which estimation failed
cs <- y$coefficients
x <- x[!rowAnyNAs(cs), ]
# group betas by variable
bs <- DataFrame(
beta0 = cs[, 1],
row.names = rownames(x))
for (v in vars) {
pat <- paste0("^", v)
i <- grep(pat, colnames(cs))
df <- DataFrame(cs[, i])
nms <- colnames(cs)[i]
names(df) <- gsub(pat, "", nms)
bs[[v]] <- df
}
rowData(x)$beta <- bs
# store dispersions in row- & offsets in colData
ds <- y$dispersion
names(ds) <- rownames(x)
rowData(x)$disp <- ds
os <- c(y$offset)
names(os) <- colnames(x)
x$offset <- os
# return SCE
return(x)
}
|
/R/prepSim.R
|
no_license
|
retogerber/muscat
|
R
| false | false | 7,580 |
r
|
#' @name prepSim
#'
#' @title SCE preparation for \code{\link{simData}}
#'
#' @description \code{prepSim} prepares an input SCE for simulation
#' with \code{muscat}'s \code{\link{simData}} function by
#' \enumerate{
#' \item{basic filtering of genes and cells}
#' \item{(optional) filtering of subpopulation-sample instances}
#' \item{estimation of cell (library sizes) and gene parameters
#' (dispersions and sample-specific means), respectively.}
#' }
#'
#' @param x a \code{\link[SingleCellExperiment]{SingleCellExperiment}}.
#' @param min_count,min_cells used for filtering of genes; only genes with
#' a count > \code{min_count} in >= \code{min_cells} will be retained.
#' @param min_genes used for filtering cells;
#' only cells with a count > 0 in >= \code{min_genes} will be retained.
#' @param min_size used for filtering subpopulation-sample combinations;
#' only instances with >= \code{min_size} cells will be retained.
#' Specifying \code{min_size = NULL} skips this step.
#' @param group_keep character string; if \code{nlevels(x$group_id) > 1},
#' specifies which group of samples to keep (see details). The default
#' NULL retains samples from \code{levels(x$group_id)[1]}; otherwise,
#' if `colData(x)$group_id` is not specified, all samples will be kept.
#' @param verbose logical; should information on progress be reported?
#'
#' @details For each gene \eqn{g}, \code{prepSim} fits a model to estimate
#' sample-specific means \eqn{\beta_g^s}, for each sample \eqn{s},
#' and dispersion parameters \eqn{\phi_g} using \code{edgeR}'s
#' \code{\link[edgeR]{estimateDisp}} function with default parameters.
#' Thus, the reference count data is modeled as NB distributed:
#' \deqn{Y_{gc} \sim NB(\mu_{gc}, \phi_g)}
#' for gene \eqn{g} and cell \eqn{c}, where the mean
#' \eqn{\mu_{gc} = \exp(\beta_{g}^{s(c)}) \cdot \lambda_c}. Here,
#' \eqn{\beta_{g}^{s(c)}} is the relative abundance of gene \eqn{g}
#' in sample \eqn{s(c)}, \eqn{\lambda_c} is the library size
#' (total number of counts), and \eqn{\phi_g} is the dispersion.
#'
#' @return a \code{\link[SingleCellExperiment]{SingleCellExperiment}}
#' containing, for each cell, library size (\code{colData(x)$offset})
#' and, for each gene, dispersion and sample-specific mean estimates
#' (\code{rowData(x)$dispersion} and \code{$beta.sample_id}, respectively).
#'
#' @examples
#' # estimate simulation parameters
#' data(example_sce)
#' ref <- prepSim(example_sce)
#'
#' # tabulate number of genes/cells before vs. after
#' ns <- cbind(
#' before = dim(example_sce),
#' after = dim(ref))
#' rownames(ns) <- c("#genes", "#cells")
#' ns
#'
#' library(SingleCellExperiment)
#' head(rowData(ref)) # gene parameters
#' head(colData(ref)) # cell parameters
#'
#' @author Helena L Crowell
#'
#' @references
#' Crowell, HL, Soneson, C, Germain, P-L, Calini, D,
#' Collin, L, Raposo, C, Malhotra, D & Robinson, MD:
#' On the discovery of population-specific state transitions from
#' multi-sample multi-condition single-cell RNA sequencing data.
#' \emph{bioRxiv} \strong{713412} (2018).
#' doi: \url{https://doi.org/10.1101/713412}
#'
#' @importFrom edgeR DGEList estimateDisp glmFit
#' @importFrom Matrix colSums rowSums
#' @importFrom matrixStats rowAnyNAs
#' @importFrom SingleCellExperiment SingleCellExperiment counts
#' @importFrom SummarizedExperiment colData rowData<-
#' @importFrom stats model.matrix
#' @importFrom S4Vectors DataFrame
#' @export
prepSim <- function(x,
min_count = 1, min_cells = 10,
min_genes = 100, min_size = 100,
group_keep = NULL, verbose = TRUE) {
.check_sce(x, req_group = FALSE)
stopifnot(is.numeric(min_count),
is.numeric(min_cells), is.numeric(min_genes),
is.null(min_size) || is.numeric(min_size),
is.logical(verbose), length(verbose) == 1)
# get model variables
vars <- c("sample_id", "cluster_id")
names(vars) <- vars <- intersect(vars, names(colData(x)))
# assure these are factors
for (v in vars) {
# drop singular variables
n <- length(unique(x[[v]]))
if (n == 1) {
x[[v]] <- NULL
rmv <- grep(v, vars)
vars <- vars[-rmv]
next
}
if (!is.factor(x[[v]]))
x[[v]] <- as.factor(x[[v]])
x[[v]] <- droplevels(x[[v]])
}
n_cells0 <- ncol(x)
x <- .update_sce(x)
if (is.null(group_keep)) {
if ("group_id" %in% colnames(colData(x))) {
group_keep <- levels(x$group_id)[1]
if (verbose) {
fmt <- paste(
"Argument `group_keep` unspecified;",
"defaulting to retaining %s-group samples.")
message(sprintf(fmt, dQuote(group_keep)))
}
cells_keep <- x$group_id == group_keep
} else {
cells_keep <- seq_len(ncol(x))
}
} else {
stopifnot(is.character(group_keep),
group_keep %in% levels(x$group_id))
cells_keep <- x$group_id %in% group_keep
}
x <- x[, cells_keep]
x <- .update_sce(x)
# keep genes w/ count > `min_count` in at least `min_cells`;
# keep cells w/ at least `min_genes` detected genes
if (verbose) message("Filtering...")
genes_keep <- rowSums(counts(x) > min_count) >= min_cells
cells_keep <- colSums(counts(x) > 0) >= min_genes
if (verbose) message(sprintf(
"- %s/%s genes and %s/%s cells retained.",
sum(genes_keep), nrow(x), sum(cells_keep), n_cells0))
x <- x[genes_keep, cells_keep, drop = FALSE]
# keep cluster-samples w/ at least 'min_size' cells
if (!is.null(min_size)) {
n_cells <- table(x$cluster_id, x$sample_id)
n_cells <- .filter_matrix(n_cells, n = min_size)
if (ncol(n_cells) == 1)
stop("Current 'min_size' retains only 1 sample,\nbut",
" mean-dispersion estimation requires at least 2.")
if (verbose) message(sprintf(
"- %s/%s subpopulations and %s/%s samples retained.",
nrow(n_cells), nlevels(x$cluster_id),
ncol(n_cells), nlevels(x$sample_id)))
x <- .filter_sce(x, rownames(n_cells), colnames(n_cells))
}
if (is.null(rownames(x))) rownames(x) <- paste0("gene", seq(nrow(x)))
if (is.null(colnames(x))) colnames(x) <- paste0("cell", seq(ncol(x)))
# construct model formula
f <- "~ 1"
for (v in vars)
f <- paste(f, v, sep = "+")
cd <- as.data.frame(droplevels(colData(x)))
mm <- model.matrix(as.formula(f), data = cd)
# fit NB model
if (verbose)
message("Estimating gene and cell parameters...")
y <- DGEList(counts(x))
y <- calcNormFactors(y)
y <- estimateDisp(y, mm)
y <- glmFit(y, prior.count = 0)
# drop genes for which estimation failed
cs <- y$coefficients
x <- x[!rowAnyNAs(cs), ]
# group betas by variable
bs <- DataFrame(
beta0 = cs[, 1],
row.names = rownames(x))
for (v in vars) {
pat <- paste0("^", v)
i <- grep(pat, colnames(cs))
df <- DataFrame(cs[, i])
nms <- colnames(cs)[i]
names(df) <- gsub(pat, "", nms)
bs[[v]] <- df
}
rowData(x)$beta <- bs
# store dispersions in row- & offsets in colData
ds <- y$dispersion
names(ds) <- rownames(x)
rowData(x)$disp <- ds
os <- c(y$offset)
names(os) <- colnames(x)
x$offset <- os
# return SCE
return(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algorithm_sc10Z.R
\name{sc10Z}
\alias{sc10Z}
\title{Spectral Clustering by Zhang et al. (2010)}
\usage{
sc10Z(data, k = 2, ...)
}
\arguments{
\item{data}{an \eqn{(n\times p)} matrix of row-stacked observations or S3 \code{dist} object of \eqn{n} observations.}
\item{k}{the number of clusters (default: 2).}
\item{...}{extra parameters including \describe{
\item{algclust}{method to perform clustering on embedded data; either \code{"kmeans"} (default) or \code{"GMM"}.}
\item{maxiter}{the maximum number of iterations (default: 10).}
}}
}
\value{
a named list of S3 class \code{T4cluster} containing
\describe{
\item{cluster}{a length-\eqn{n} vector of class labels (from \eqn{1:k}).}
\item{eigval}{eigenvalues of the graph laplacian's spectral decomposition.}
\item{embeds}{an \eqn{(n\times k)} low-dimensional embedding.}
\item{algorithm}{name of the algorithm.}
}
}
\description{
The algorithm defines a set of data-driven
bandwidth parameters \eqn{p_{ij}} by constructing a similarity matrix.
Then the affinity matrix is defined as \deqn{A_{ij} = \exp(-d(x_i, d_j)^2 / 2 p_{ij}}
and the standard spectral clustering of Ng, Jordan, and Weiss (\code{\link{scNJW}}) is applied.
}
\examples{
# -------------------------------------------------------------
# clustering with 'iris' dataset
# -------------------------------------------------------------
## PREPARE
data(iris)
X = as.matrix(iris[,1:4])
lab = as.integer(as.factor(iris[,5]))
## EMBEDDING WITH PCA
X2d = Rdimtools::do.pca(X, ndim=2)$Y
## CLUSTERING WITH DIFFERENT K VALUES
cl2 = sc10Z(X, k=2)$cluster
cl3 = sc10Z(X, k=3)$cluster
cl4 = sc10Z(X, k=4)$cluster
## VISUALIZATION
opar <- par(no.readonly=TRUE)
par(mfrow=c(1,4), pty="s")
plot(X2d, col=lab, pch=19, main="true label")
plot(X2d, col=cl2, pch=19, main="sc10Z: k=2")
plot(X2d, col=cl3, pch=19, main="sc10Z: k=3")
plot(X2d, col=cl4, pch=19, main="sc10Z: k=4")
par(opar)
}
\references{
\insertRef{zhang_spectral_2010}{T4cluster}
}
\concept{algorithm}
|
/T4cluster/man/sc10Z.Rd
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | true | 2,071 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algorithm_sc10Z.R
\name{sc10Z}
\alias{sc10Z}
\title{Spectral Clustering by Zhang et al. (2010)}
\usage{
sc10Z(data, k = 2, ...)
}
\arguments{
\item{data}{an \eqn{(n\times p)} matrix of row-stacked observations or S3 \code{dist} object of \eqn{n} observations.}
\item{k}{the number of clusters (default: 2).}
\item{...}{extra parameters including \describe{
\item{algclust}{method to perform clustering on embedded data; either \code{"kmeans"} (default) or \code{"GMM"}.}
\item{maxiter}{the maximum number of iterations (default: 10).}
}}
}
\value{
a named list of S3 class \code{T4cluster} containing
\describe{
\item{cluster}{a length-\eqn{n} vector of class labels (from \eqn{1:k}).}
\item{eigval}{eigenvalues of the graph laplacian's spectral decomposition.}
\item{embeds}{an \eqn{(n\times k)} low-dimensional embedding.}
\item{algorithm}{name of the algorithm.}
}
}
\description{
The algorithm defines a set of data-driven
bandwidth parameters \eqn{p_{ij}} by constructing a similarity matrix.
Then the affinity matrix is defined as \deqn{A_{ij} = \exp(-d(x_i, d_j)^2 / 2 p_{ij}}
and the standard spectral clustering of Ng, Jordan, and Weiss (\code{\link{scNJW}}) is applied.
}
\examples{
# -------------------------------------------------------------
# clustering with 'iris' dataset
# -------------------------------------------------------------
## PREPARE
data(iris)
X = as.matrix(iris[,1:4])
lab = as.integer(as.factor(iris[,5]))
## EMBEDDING WITH PCA
X2d = Rdimtools::do.pca(X, ndim=2)$Y
## CLUSTERING WITH DIFFERENT K VALUES
cl2 = sc10Z(X, k=2)$cluster
cl3 = sc10Z(X, k=3)$cluster
cl4 = sc10Z(X, k=4)$cluster
## VISUALIZATION
opar <- par(no.readonly=TRUE)
par(mfrow=c(1,4), pty="s")
plot(X2d, col=lab, pch=19, main="true label")
plot(X2d, col=cl2, pch=19, main="sc10Z: k=2")
plot(X2d, col=cl3, pch=19, main="sc10Z: k=3")
plot(X2d, col=cl4, pch=19, main="sc10Z: k=4")
par(opar)
}
\references{
\insertRef{zhang_spectral_2010}{T4cluster}
}
\concept{algorithm}
|
#calcualte LM
dir.create("./results/R2/")
stages <- read.csv("./data/raw_data/stages.csv")
#---------------------
#unimodal
#---------------------
simulated <- read.csv("./results/compiled_LBGs/unimodal_simulated.csv")
sampled <- read.csv("./results/compiled_LBGs/unimodal_sampled.csv")
rarefied <- read.csv("./results/compiled_LBGs/unimodal_rarefied.csv")
master <- data.frame()
for(i in stages$name){
name <- i
sim <- subset(simulated, name == i)
samp <- subset(sampled, name == i)
rare <- subset(rarefied, name == i)
sampled_r2 <- summary(lm(sim$prop_richness~samp$prop_richness))$r.squared
sampled_pval <- summary(lm(sim$prop_richness~samp$prop_richness))$coefficients[8]
rarefied_r2 <- summary(lm(sim$prop_richness~rare$prop_richness))$r.squared
rarefied_pval <- summary(lm(sim$prop_richness~rare$prop_richness))$coefficients[8]
tmp <- cbind.data.frame(name, sampled_r2, sampled_pval, rarefied_r2, rarefied_pval)
master <- rbind.data.frame(master, tmp)
}
master <- plyr::join(master, stages, by = "name", type = "left")
master <- master[order(master$max_age),]
write.csv(master, "./results/R2/unimodal_temporal_R2.csv", row.names = FALSE)
#---------------------
#bimodal
#---------------------
simulated <- read.csv("./results/compiled_LBGs/bimodal_simulated.csv")
sampled <- read.csv("./results/compiled_LBGs/bimodal_sampled.csv")
rarefied <- read.csv("./results/compiled_LBGs/bimodal_rarefied.csv")
master <- data.frame()
for(i in stages$name){
name <- i
sim <- subset(simulated, name == i)
samp <- subset(sampled, name == i)
rare <- subset(rarefied, name == i)
sampled_r2 <- summary(lm(sim$prop_richness~samp$prop_richness))$r.squared
sampled_pval <- summary(lm(sim$prop_richness~samp$prop_richness))$coefficients[8]
rarefied_r2 <- summary(lm(sim$prop_richness~rare$prop_richness))$r.squared
rarefied_pval <- summary(lm(sim$prop_richness~rare$prop_richness))$coefficients[8]
tmp <- cbind.data.frame(name, sampled_r2, sampled_pval, rarefied_r2, rarefied_pval)
master <- rbind.data.frame(master, tmp)
}
master <- plyr::join(master, stages, by = "name", type = "left")
master <- master[order(master$max_age),]
write.csv(master, "./results/R2/bimodal_temporal_R2.csv", row.names = FALSE)
#---------------------
#flat
#---------------------
simulated <- read.csv("./results/compiled_LBGs/flat_simulated.csv")
sampled <- read.csv("./results/compiled_LBGs/flat_sampled.csv")
rarefied <- read.csv("./results/compiled_LBGs/flat_rarefied.csv")
master <- data.frame()
for(i in stages$name){
name <- i
sim <- subset(simulated, name == i)
samp <- subset(sampled, name == i)
rare <- subset(rarefied, name == i)
sampled_r2 <- summary(lm(sim$prop_richness~samp$prop_richness))$r.squared
sampled_pval <- summary(lm(sim$prop_richness~samp$prop_richness))$coefficients[8]
rarefied_r2 <- summary(lm(sim$prop_richness~rare$prop_richness))$r.squared
rarefied_pval <- summary(lm(sim$prop_richness~rare$prop_richness))$coefficients[8]
tmp <- cbind.data.frame(name, sampled_r2, sampled_pval, rarefied_r2, rarefied_pval)
master <- rbind.data.frame(master, tmp)
}
master <- plyr::join(master, stages, by = "name", type = "left")
master <- master[order(master$max_age),]
write.csv(master, "./results/R2/flat_temporal_R2.csv", row.names = FALSE)
#---------------------
#LBG type
#---------------------
unimodal_sim <- read.csv("./results/compiled_LBGs/unimodal_simulated.csv")
bimodal_sim <- read.csv("./results/compiled_LBGs/bimodal_simulated.csv")
unimodal_samp <- read.csv("./results/compiled_LBGs/unimodal_sampled.csv")
bimodal_samp <- read.csv("./results/compiled_LBGs/bimodal_sampled.csv")
unimodal_rare <- read.csv("./results/compiled_LBGs/unimodal_rarefied.csv")
bimodal_rare <- read.csv("./results/compiled_LBGs/bimodal_rarefied.csv")
master <- data.frame()
for(i in stages$name){
name <- i
simulated_r2 <- summary(lm(subset(unimodal_sim, name == i)$prop_richness~subset(bimodal_sim, name == i)$prop_richness))$r.squared
simulated_pval <- summary(lm(subset(unimodal_sim, name == i)$prop_richness~subset(bimodal_sim, name == i)$prop_richness))$coefficients[8]
sampled_r2 <- summary(lm(subset(unimodal_samp, name == i)$prop_richness~subset(bimodal_samp, name == i)$prop_richness))$r.squared
sampled_pval <- summary(lm(subset(unimodal_samp, name == i)$prop_richness~subset(bimodal_samp, name == i)$prop_richness))$coefficients[8]
rarefied_r2 <- summary(lm(subset(unimodal_rare, name == i)$prop_richness~subset(bimodal_rare, name == i)$prop_richness))$r.squared
rarefied_pval <- summary(lm(subset(unimodal_rare, name == i)$prop_richness~subset(bimodal_rare, name == i)$prop_richness))$coefficients[8]
tmp <- cbind.data.frame(name, simulated_r2, simulated_pval, sampled_r2, sampled_pval, rarefied_r2, rarefied_pval)
master <- rbind.data.frame(master, tmp)
}
master <- plyr::join(master, stages, by = "name", type = "left")
master <- master[order(master$max_age),]
write.csv(master, "./results/R2/LBG_type_R2.csv", row.names = FALSE)
|
/R/subscripts/calculate_R2.R
|
permissive
|
LewisAJones/LBG_sim
|
R
| false | false | 5,075 |
r
|
#calcualte LM
dir.create("./results/R2/")
stages <- read.csv("./data/raw_data/stages.csv")
#---------------------
#unimodal
#---------------------
simulated <- read.csv("./results/compiled_LBGs/unimodal_simulated.csv")
sampled <- read.csv("./results/compiled_LBGs/unimodal_sampled.csv")
rarefied <- read.csv("./results/compiled_LBGs/unimodal_rarefied.csv")
master <- data.frame()
for(i in stages$name){
name <- i
sim <- subset(simulated, name == i)
samp <- subset(sampled, name == i)
rare <- subset(rarefied, name == i)
sampled_r2 <- summary(lm(sim$prop_richness~samp$prop_richness))$r.squared
sampled_pval <- summary(lm(sim$prop_richness~samp$prop_richness))$coefficients[8]
rarefied_r2 <- summary(lm(sim$prop_richness~rare$prop_richness))$r.squared
rarefied_pval <- summary(lm(sim$prop_richness~rare$prop_richness))$coefficients[8]
tmp <- cbind.data.frame(name, sampled_r2, sampled_pval, rarefied_r2, rarefied_pval)
master <- rbind.data.frame(master, tmp)
}
master <- plyr::join(master, stages, by = "name", type = "left")
master <- master[order(master$max_age),]
write.csv(master, "./results/R2/unimodal_temporal_R2.csv", row.names = FALSE)
#---------------------
#bimodal
#---------------------
simulated <- read.csv("./results/compiled_LBGs/bimodal_simulated.csv")
sampled <- read.csv("./results/compiled_LBGs/bimodal_sampled.csv")
rarefied <- read.csv("./results/compiled_LBGs/bimodal_rarefied.csv")
master <- data.frame()
for(i in stages$name){
name <- i
sim <- subset(simulated, name == i)
samp <- subset(sampled, name == i)
rare <- subset(rarefied, name == i)
sampled_r2 <- summary(lm(sim$prop_richness~samp$prop_richness))$r.squared
sampled_pval <- summary(lm(sim$prop_richness~samp$prop_richness))$coefficients[8]
rarefied_r2 <- summary(lm(sim$prop_richness~rare$prop_richness))$r.squared
rarefied_pval <- summary(lm(sim$prop_richness~rare$prop_richness))$coefficients[8]
tmp <- cbind.data.frame(name, sampled_r2, sampled_pval, rarefied_r2, rarefied_pval)
master <- rbind.data.frame(master, tmp)
}
master <- plyr::join(master, stages, by = "name", type = "left")
master <- master[order(master$max_age),]
write.csv(master, "./results/R2/bimodal_temporal_R2.csv", row.names = FALSE)
#---------------------
#flat
#---------------------
simulated <- read.csv("./results/compiled_LBGs/flat_simulated.csv")
sampled <- read.csv("./results/compiled_LBGs/flat_sampled.csv")
rarefied <- read.csv("./results/compiled_LBGs/flat_rarefied.csv")
master <- data.frame()
for(i in stages$name){
name <- i
sim <- subset(simulated, name == i)
samp <- subset(sampled, name == i)
rare <- subset(rarefied, name == i)
sampled_r2 <- summary(lm(sim$prop_richness~samp$prop_richness))$r.squared
sampled_pval <- summary(lm(sim$prop_richness~samp$prop_richness))$coefficients[8]
rarefied_r2 <- summary(lm(sim$prop_richness~rare$prop_richness))$r.squared
rarefied_pval <- summary(lm(sim$prop_richness~rare$prop_richness))$coefficients[8]
tmp <- cbind.data.frame(name, sampled_r2, sampled_pval, rarefied_r2, rarefied_pval)
master <- rbind.data.frame(master, tmp)
}
master <- plyr::join(master, stages, by = "name", type = "left")
master <- master[order(master$max_age),]
write.csv(master, "./results/R2/flat_temporal_R2.csv", row.names = FALSE)
#---------------------
#LBG type
#---------------------
unimodal_sim <- read.csv("./results/compiled_LBGs/unimodal_simulated.csv")
bimodal_sim <- read.csv("./results/compiled_LBGs/bimodal_simulated.csv")
unimodal_samp <- read.csv("./results/compiled_LBGs/unimodal_sampled.csv")
bimodal_samp <- read.csv("./results/compiled_LBGs/bimodal_sampled.csv")
unimodal_rare <- read.csv("./results/compiled_LBGs/unimodal_rarefied.csv")
bimodal_rare <- read.csv("./results/compiled_LBGs/bimodal_rarefied.csv")
master <- data.frame()
for(i in stages$name){
name <- i
simulated_r2 <- summary(lm(subset(unimodal_sim, name == i)$prop_richness~subset(bimodal_sim, name == i)$prop_richness))$r.squared
simulated_pval <- summary(lm(subset(unimodal_sim, name == i)$prop_richness~subset(bimodal_sim, name == i)$prop_richness))$coefficients[8]
sampled_r2 <- summary(lm(subset(unimodal_samp, name == i)$prop_richness~subset(bimodal_samp, name == i)$prop_richness))$r.squared
sampled_pval <- summary(lm(subset(unimodal_samp, name == i)$prop_richness~subset(bimodal_samp, name == i)$prop_richness))$coefficients[8]
rarefied_r2 <- summary(lm(subset(unimodal_rare, name == i)$prop_richness~subset(bimodal_rare, name == i)$prop_richness))$r.squared
rarefied_pval <- summary(lm(subset(unimodal_rare, name == i)$prop_richness~subset(bimodal_rare, name == i)$prop_richness))$coefficients[8]
tmp <- cbind.data.frame(name, simulated_r2, simulated_pval, sampled_r2, sampled_pval, rarefied_r2, rarefied_pval)
master <- rbind.data.frame(master, tmp)
}
master <- plyr::join(master, stages, by = "name", type = "left")
master <- master[order(master$max_age),]
write.csv(master, "./results/R2/LBG_type_R2.csv", row.names = FALSE)
|
.onewrq <-
function(form, tau, data, Y, X1, X2, subject, death, time, interval.death, impute, weight, wcompute, seed, intermittent)
{
## graine
set.seed(seed)
## sujet dans data
numeros <- unique(data[,subject])
n <- length(numeros)
## poids dans l'echantillon de depart
poidsechdepart <- data[,weight]
if(wcompute!=1)
{
data$poidsechdepart <- data[,weight]
data <- data[,-which(colnames(data)==weight)]
}
## echantillon boot
num_b <- sample(numeros, size=n, replace=TRUE)
j_b <- sapply(num_b,function(i) which(data[,subject]==i,useNames=FALSE))
j_b <- unlist(j_b,use.names=FALSE)
nbmes_b <- sapply(num_b,function(i) length(which(data[,subject]==i)),USE.NAMES=FALSE)
ech_b <- data[j_b,]
ech_b[,subject] <- rep(1:n,nbmes_b)
## estimation des modeles
if(wcompute==0) ## on ne recalcule pas
{
## modeles si on ne recalcule pas les poids
mold <- rq(formula=form,tau=tau,data=ech_b,weights=poidsechdepart)
}
else
{
if(wcompute==1) ## on recalcule
{
## ajout des nouveaux poids
if(intermittent==FALSE)
{
dataw <- weightsMMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,interval.death=interval.death)$data
}
if(intermittent==TRUE)
{
dataw <- weightsIMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,impute=impute)$data
}
## modeles
mnew <- rq(formula=form,tau=tau,data=dataw,weights=weight)
}
else ## on fait les 2
{
## modeles si on ne recalcule pas les poids
mold <- rq(formula=form,tau=tau,data=ech_b,weights=poidsechdepart)
## ajout des poids
if(intermittent==FALSE)
{
dataw <- weightsMMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,interval.death=interval.death)$data
}
if(intermittent==TRUE)
{
dataw <- weightsIMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,impute=impute)$data
}
## modeles
mnew <- rq(formula=form,tau=tau,data=dataw,weights=weight)
}
}
## garder les coef
coef_b0 <- NULL
nbcoef0 <- 0
nomcoef0 <- NULL
if(exists("mold"))
{
coef_b0 <- mold$coefficients
if(length(tau)==1)
{
nbcoef0 <- length(coef_b0)
nomcoef0 <- paste("calc0",rep(tau,each=nbcoef0),names(coef_b0),sep="_")
}
else
{
nbcoef0 <- nrow(coef_b0)
nomcoef0 <- paste("calc0",rep(tau,each=nbcoef0),rownames(coef_b0),sep="_")
}
}
coef_b1 <- NULL
nbcoef1 <- 0
nomcoef1 <- NULL
if(exists("mnew"))
{
coef_b1 <- mnew$coefficients
if(length(tau)==1)
{
nbcoef1 <- length(coef_b1)
nomcoef1 <- paste("calc1",rep(tau,each=nbcoef1),names(coef_b1),sep="_")
}
else
{
nbcoef1 <- nrow(coef_b1)
nomcoef1 <- paste("calc1",rep(tau,each=nbcoef1),rownames(coef_b1),sep="_")
}
}
coef_b <- c(coef_b0,coef_b1)
nomcoef <- c(nomcoef0,nomcoef1)
res <- c(as.vector(coef_b),seed)
names(res) <- c(nomcoef,"seed")
return(res)
}
|
/R/onewrq.R
|
no_license
|
VivianePhilipps/weightQuant
|
R
| false | false | 4,463 |
r
|
.onewrq <-
function(form, tau, data, Y, X1, X2, subject, death, time, interval.death, impute, weight, wcompute, seed, intermittent)
{
## graine
set.seed(seed)
## sujet dans data
numeros <- unique(data[,subject])
n <- length(numeros)
## poids dans l'echantillon de depart
poidsechdepart <- data[,weight]
if(wcompute!=1)
{
data$poidsechdepart <- data[,weight]
data <- data[,-which(colnames(data)==weight)]
}
## echantillon boot
num_b <- sample(numeros, size=n, replace=TRUE)
j_b <- sapply(num_b,function(i) which(data[,subject]==i,useNames=FALSE))
j_b <- unlist(j_b,use.names=FALSE)
nbmes_b <- sapply(num_b,function(i) length(which(data[,subject]==i)),USE.NAMES=FALSE)
ech_b <- data[j_b,]
ech_b[,subject] <- rep(1:n,nbmes_b)
## estimation des modeles
if(wcompute==0) ## on ne recalcule pas
{
## modeles si on ne recalcule pas les poids
mold <- rq(formula=form,tau=tau,data=ech_b,weights=poidsechdepart)
}
else
{
if(wcompute==1) ## on recalcule
{
## ajout des nouveaux poids
if(intermittent==FALSE)
{
dataw <- weightsMMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,interval.death=interval.death)$data
}
if(intermittent==TRUE)
{
dataw <- weightsIMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,impute=impute)$data
}
## modeles
mnew <- rq(formula=form,tau=tau,data=dataw,weights=weight)
}
else ## on fait les 2
{
## modeles si on ne recalcule pas les poids
mold <- rq(formula=form,tau=tau,data=ech_b,weights=poidsechdepart)
## ajout des poids
if(intermittent==FALSE)
{
dataw <- weightsMMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,interval.death=interval.death)$data
}
if(intermittent==TRUE)
{
dataw <- weightsIMD(data=ech_b,Y=Y,X1=X1,X2=X2,subject=subject,death=death,time=time,impute=impute)$data
}
## modeles
mnew <- rq(formula=form,tau=tau,data=dataw,weights=weight)
}
}
## garder les coef
coef_b0 <- NULL
nbcoef0 <- 0
nomcoef0 <- NULL
if(exists("mold"))
{
coef_b0 <- mold$coefficients
if(length(tau)==1)
{
nbcoef0 <- length(coef_b0)
nomcoef0 <- paste("calc0",rep(tau,each=nbcoef0),names(coef_b0),sep="_")
}
else
{
nbcoef0 <- nrow(coef_b0)
nomcoef0 <- paste("calc0",rep(tau,each=nbcoef0),rownames(coef_b0),sep="_")
}
}
coef_b1 <- NULL
nbcoef1 <- 0
nomcoef1 <- NULL
if(exists("mnew"))
{
coef_b1 <- mnew$coefficients
if(length(tau)==1)
{
nbcoef1 <- length(coef_b1)
nomcoef1 <- paste("calc1",rep(tau,each=nbcoef1),names(coef_b1),sep="_")
}
else
{
nbcoef1 <- nrow(coef_b1)
nomcoef1 <- paste("calc1",rep(tau,each=nbcoef1),rownames(coef_b1),sep="_")
}
}
coef_b <- c(coef_b0,coef_b1)
nomcoef <- c(nomcoef0,nomcoef1)
res <- c(as.vector(coef_b),seed)
names(res) <- c(nomcoef,"seed")
return(res)
}
|
# Ready Graduate - IB
# Evan Kramer
# 5/2/2019
options(java.parameters = "-Xmx16G")
library(tidyverse)
library(lubridate)
library(haven)
library(RJDBC)
setwd("N:/")
# Switches
data = T
clean = T
compile = T
check = F
domain = "ib"
# Load data and connect to database
if(data == T) {
con = dbConnect(
JDBC("oracle.jdbc.OracleDriver", classPath="C:/Users/CA19130/Downloads/ojdbc6.jar"),
readRegistry("Environment", hive = "HCU")$EIS_MGR_CXN_STR[1],
"EIS_MGR",
readRegistry("Environment", hive = "HCU")$EIS_MGR_PWD[1]
)
# Correlation of course codes
cc = readxl::read_excel("C:/Users/CA19130/Downloads/ed2356_course_code_2018-19.xlsx", sheet = 1)
course_codes = cc$`Course Code`[str_detect(cc$`Course Title`, str_to_upper(domain))]
# Cohort
cohort = read_csv(str_c("ORP_accountability/data/", year(today()) - 1, "_graduation_rate/student_level.csv"),
col_types = "dccccTccTdcccccccdcdddcddcTcccdccccccdcTcccdTc")
# Exam records
exams = read_delim("ORP_accountability/data/2018_Assessment_Files/2014 cohort AP SDC IB CLEP raw.txt",
delim = "\t", col_types = "ddcccccccccccccccc") %>%
janitor::clean_names() %>%
filter(str_detect(test_administration_cd, str_c(str_to_upper(domain), "_")))
# Exam crosswalk
xw = read_csv("ORP_accountability/projects/2019_ready_graduate/Code/Crosswalks/epso_course_codes_exams.csv")
# Students taking courses
enrollments = as.tbl(dbGetQuery(con, str_c(
"select courses.isp_id, courses.student_key, courses.type_of_service, courses.first_name, courses.middle_name, courses.last_name,
courses.date_of_birth, courses.isp_school_year, courses.withdrawal_reason, courses.begin_date, courses.end_date,
courses.sca_school_year, courses.sca_begin_date, courses.sca_end_date, courses.cs_school_year,
courses.sca_local_class_number, courses.course_code, courses.cs_begin_date, courses.cs_end_date, courses.state_dual_credit,
courses.local_dual_credit, courses.dual_enrollment, courses.school_bu_id,
instructional_days.district_no, instructional_days.school_no, instructional_days.id_date
from (
select distinct student_key
from studentcohortdata_historic
where cohortyear = extract(year from sysdate) - 5 and completion_type in (1, 11, 12, 13)
) cohort
left outer join (
select isp.isp_id, isp.student_key, isp.type_of_service, isp.first_name, isp.middle_name, isp.last_name,
isp.date_of_birth, isp.school_year as isp_school_year, isp.withdrawal_reason, isp.begin_date, isp.end_date,
sca.school_year as sca_school_year, sca.sca_begin_date, sca.sca_end_date, cs.school_year as cs_school_year,
sca.local_class_number as sca_local_class_number, course_code, cs_begin_date, cs_end_date, state_dual_credit,
local_dual_credit, dual_enrollment, isp.school_bu_id
from instructional_service_period isp
join student_class_assignment sca on sca.isp_id = isp.isp_id
join class_section cs on
sca.instructional_program_num = cs.instructional_program_num and
sca.local_class_number = cs.local_class_number and
sca.school_bu_id = cs.school_bu_id and
sca.school_year = cs.school_year
where cs.course_code in (", str_flatten(course_codes, ","), ")
) courses on cohort.student_key = courses.student_key
left outer join (
select school_year, s.school_bu_id, s.district_no, s.school_no, sid.id_date
from scal_id_days sid
join school s on s.school_bu_id = sid.school_bu_id
where school_year >= extract(year from sysdate) - 5
) instructional_days
on (
courses.school_bu_id = instructional_days.school_bu_id and
courses.isp_school_year = instructional_days.school_year
)"
))) %>%
janitor::clean_names() %>%
group_by(isp_school_year, course_code) %>%
mutate(
# Account for missing end dates
max_id_date = max(id_date, na.rm = T),
# Create instructional day variables
cs_end_date = ifelse(!is.na(cs_end_date), cs_end_date,
ifelse(!is.na(sca_end_date), sca_end_date, max_id_date)),
sca_end_date = ifelse(!is.na(sca_end_date), sca_end_date, max_id_date),
course_instructional_days = as.numeric(id_date >= cs_begin_date & id_date <= cs_end_date),
enrolled_instructional_days = as.numeric(id_date >= sca_begin_date & id_date <= sca_end_date)
) %>%
arrange(isp_id, student_key) %>%
group_by(isp_id, student_key, course_code, begin_date, end_date, sca_begin_date, sca_end_date,
cs_begin_date, cs_end_date) %>%
# Sum course and enrolled instructional days by course code, all begin and end dates
summarize(first_name = first(first_name), middle_name = first(middle_name), last_name = first(last_name),
date_of_birth = first(date_of_birth), type_of_service = first(type_of_service),
isp_school_year = first(isp_school_year), withdrawal_reason = first(withdrawal_reason),
sca_school_year = first(sca_school_year), cs_school_year = first(cs_school_year),
sca_local_class_number = first(sca_local_class_number), state_dual_credit = first(state_dual_credit),
local_dual_credit = first(local_dual_credit), dual_enrollment = first(dual_enrollment),
course_instructional_days = sum(course_instructional_days, na.rm = T),
enrolled_instructional_days = sum(enrolled_instructional_days, na.rm = T)) %>%
ungroup()
} else {
rm(data)
}
# Clean data
if(clean == T) {
# Course enrollments
c = filter(cohort, included_in_cohort == "Y" & completion_type %in% c(1, 11, 12, 13)) %>%
# Start with the cohort
select(student_key) %>%
# Join to enrollments
left_join(enrollments, by = "student_key") %>% # 127211 observations
# Remove students in the cohort with no AP enrollments
filter(!is.na(isp_id)) %>% # 81213 observations
# Must not be withdrawn
filter(is.na(withdrawal_reason)) %>% # 80152 observations
# Remove if enrollment end_date is after course assignment end date
filter(is.na(end_date) | ymd_hms(end_date) <= ymd_hms(sca_end_date)) %>% # same
# Take latest enrollment end, begin, course assignment end, begin, class section end, begin
arrange(student_key, course_code, desc(is.na(end_date)), desc(end_date), desc(begin_date),
desc(is.na(sca_end_date)), desc(sca_end_date), desc(sca_begin_date),
desc(is.na(cs_end_date)), desc(cs_end_date), desc(cs_begin_date)) %>%
group_by(student_key, course_code) %>%
arrange(begin_date, end_date, sca_begin_date, sca_end_date, cs_begin_date, cs_end_date) %>%
# Summarize by student and course code
summarize(first_name = first(first_name), middle_name = first(middle_name), last_name = first(last_name),
isp_school_year = first(isp_school_year),
enrolled_instructional_days = sum(enrolled_instructional_days, na.rm = T),
course_instructional_days = max(course_instructional_days, na.rm = T)) %>% # 985 observations
ungroup() %>%
# Remove students enrolled for less than half the course
filter(enrolled_instructional_days / course_instructional_days >= 0.5) %>% # XXX observations
# Join course names from correlation
left_join(group_by(mutate(cc, course_code = as.numeric(`Course Code`)), course_code) %>%
summarize(course_title = first(`Course Title`)),
by = "course_code") %>%
# Join to exam data -- only keep if course and test match
left_join(transmute(xw, course_code, exam_name), by = "course_code") %>% # 909 observations
left_join(transmute(exams, student_id, exam_name = sublevel2_shortname, performance_level = assessment_result_number_score),
by = c("student_key" = "student_id", "exam_name")) %>% # 909 observations
# Remove students who do not have a valid performance level
filter(!is.na(performance_level)) %>% # observations
filter(!is.na(exam_name)) %>% # observations
# Remove exam records with no corresponding course codes
filter(!is.na(course_code)) %>% # 854 observations
# Collapse to student_level count of courses
group_by(student_key) %>%
summarize(epso_type = domain, n_courses = n_distinct(course_code)) %>% # 12932 observations
ungroup() #%>% filter(student_key == 3073966)
} else {
rm(clean)
}
# Analyze and compile data
if(compile == T) {
# Define path and filename
path = str_c(getwd(), "ORP_accountability/projects/",
ifelse(between(month(today()), 1, 10), year(today()), year(today()) + 1),
"_ready_graduate/Data/")
file = str_c(domain, "_student_level.csv")
if(file %in% list.files(path)) {
if(!dir.exists(str_c(path, "Previous"))) {
dir.create(str_c(path, "Previous"))
dir.create(str_c(path, "Previous/", str_replace_all(now(), "[-:]", "")))
}
if(!dir.exists(str_c(path, "Previous/", str_replace_all(now(), "[-:]", "")))) {
dir.create(str_c(path, "Previous/", str_replace_all(now(), "[-:]", "")))
}
file.rename(str_c(path, file),
str_c(path, "Previous/", str_replace_all(now(), "[-:]", ""), "/", file))
}
write_csv(c, str_c(path, file), na = "")
} else {
rm(compile)
}
# Checks
if(check) {
# Missing sca_end_dates
full_join(
c,
read_csv(
str_c(
"ORP_accountability/projects/",
year(now()),
"_ready_graduate/Data/",
domain,
"_student_level.csv"
)
),
by = c("student_key", "epso_type")
) %>%
mutate_at(vars(starts_with("n_courses")), funs(ifelse(is.na(.), 0, .))) %>%
mutate(diff = n_courses.x - n_courses.y) %>%
group_by(diff) %>%
summarize(n = n()) %>%
ungroup() %>%
mutate(pct = round(100 * n / sum(n), 1))
# Hamilton County
hamilton = readxl::read_excel("C:/Users/CA19130/Downloads/Copy of HCS_IB records class of 2018.xlsx") %>%
janitor::clean_names()
left_join(
hamilton,
filter(exams, student_id %in% hamilton$student_key),
by = c("student_key" = "student_id")
)
} else {
rm(check)
}
|
/ib.R
|
no_license
|
evan-kramer/ready_graduate
|
R
| false | false | 10,208 |
r
|
# Ready Graduate - IB
# Evan Kramer
# 5/2/2019
options(java.parameters = "-Xmx16G")
library(tidyverse)
library(lubridate)
library(haven)
library(RJDBC)
setwd("N:/")
# Switches
data = T
clean = T
compile = T
check = F
domain = "ib"
# Load data and connect to database
if(data == T) {
con = dbConnect(
JDBC("oracle.jdbc.OracleDriver", classPath="C:/Users/CA19130/Downloads/ojdbc6.jar"),
readRegistry("Environment", hive = "HCU")$EIS_MGR_CXN_STR[1],
"EIS_MGR",
readRegistry("Environment", hive = "HCU")$EIS_MGR_PWD[1]
)
# Correlation of course codes
cc = readxl::read_excel("C:/Users/CA19130/Downloads/ed2356_course_code_2018-19.xlsx", sheet = 1)
course_codes = cc$`Course Code`[str_detect(cc$`Course Title`, str_to_upper(domain))]
# Cohort
cohort = read_csv(str_c("ORP_accountability/data/", year(today()) - 1, "_graduation_rate/student_level.csv"),
col_types = "dccccTccTdcccccccdcdddcddcTcccdccccccdcTcccdTc")
# Exam records
exams = read_delim("ORP_accountability/data/2018_Assessment_Files/2014 cohort AP SDC IB CLEP raw.txt",
delim = "\t", col_types = "ddcccccccccccccccc") %>%
janitor::clean_names() %>%
filter(str_detect(test_administration_cd, str_c(str_to_upper(domain), "_")))
# Exam crosswalk
xw = read_csv("ORP_accountability/projects/2019_ready_graduate/Code/Crosswalks/epso_course_codes_exams.csv")
# Students taking courses
enrollments = as.tbl(dbGetQuery(con, str_c(
"select courses.isp_id, courses.student_key, courses.type_of_service, courses.first_name, courses.middle_name, courses.last_name,
courses.date_of_birth, courses.isp_school_year, courses.withdrawal_reason, courses.begin_date, courses.end_date,
courses.sca_school_year, courses.sca_begin_date, courses.sca_end_date, courses.cs_school_year,
courses.sca_local_class_number, courses.course_code, courses.cs_begin_date, courses.cs_end_date, courses.state_dual_credit,
courses.local_dual_credit, courses.dual_enrollment, courses.school_bu_id,
instructional_days.district_no, instructional_days.school_no, instructional_days.id_date
from (
select distinct student_key
from studentcohortdata_historic
where cohortyear = extract(year from sysdate) - 5 and completion_type in (1, 11, 12, 13)
) cohort
left outer join (
select isp.isp_id, isp.student_key, isp.type_of_service, isp.first_name, isp.middle_name, isp.last_name,
isp.date_of_birth, isp.school_year as isp_school_year, isp.withdrawal_reason, isp.begin_date, isp.end_date,
sca.school_year as sca_school_year, sca.sca_begin_date, sca.sca_end_date, cs.school_year as cs_school_year,
sca.local_class_number as sca_local_class_number, course_code, cs_begin_date, cs_end_date, state_dual_credit,
local_dual_credit, dual_enrollment, isp.school_bu_id
from instructional_service_period isp
join student_class_assignment sca on sca.isp_id = isp.isp_id
join class_section cs on
sca.instructional_program_num = cs.instructional_program_num and
sca.local_class_number = cs.local_class_number and
sca.school_bu_id = cs.school_bu_id and
sca.school_year = cs.school_year
where cs.course_code in (", str_flatten(course_codes, ","), ")
) courses on cohort.student_key = courses.student_key
left outer join (
select school_year, s.school_bu_id, s.district_no, s.school_no, sid.id_date
from scal_id_days sid
join school s on s.school_bu_id = sid.school_bu_id
where school_year >= extract(year from sysdate) - 5
) instructional_days
on (
courses.school_bu_id = instructional_days.school_bu_id and
courses.isp_school_year = instructional_days.school_year
)"
))) %>%
janitor::clean_names() %>%
group_by(isp_school_year, course_code) %>%
mutate(
# Account for missing end dates
max_id_date = max(id_date, na.rm = T),
# Create instructional day variables
cs_end_date = ifelse(!is.na(cs_end_date), cs_end_date,
ifelse(!is.na(sca_end_date), sca_end_date, max_id_date)),
sca_end_date = ifelse(!is.na(sca_end_date), sca_end_date, max_id_date),
course_instructional_days = as.numeric(id_date >= cs_begin_date & id_date <= cs_end_date),
enrolled_instructional_days = as.numeric(id_date >= sca_begin_date & id_date <= sca_end_date)
) %>%
arrange(isp_id, student_key) %>%
group_by(isp_id, student_key, course_code, begin_date, end_date, sca_begin_date, sca_end_date,
cs_begin_date, cs_end_date) %>%
# Sum course and enrolled instructional days by course code, all begin and end dates
summarize(first_name = first(first_name), middle_name = first(middle_name), last_name = first(last_name),
date_of_birth = first(date_of_birth), type_of_service = first(type_of_service),
isp_school_year = first(isp_school_year), withdrawal_reason = first(withdrawal_reason),
sca_school_year = first(sca_school_year), cs_school_year = first(cs_school_year),
sca_local_class_number = first(sca_local_class_number), state_dual_credit = first(state_dual_credit),
local_dual_credit = first(local_dual_credit), dual_enrollment = first(dual_enrollment),
course_instructional_days = sum(course_instructional_days, na.rm = T),
enrolled_instructional_days = sum(enrolled_instructional_days, na.rm = T)) %>%
ungroup()
} else {
rm(data)
}
# Clean data
if(clean == T) {
# Course enrollments
c = filter(cohort, included_in_cohort == "Y" & completion_type %in% c(1, 11, 12, 13)) %>%
# Start with the cohort
select(student_key) %>%
# Join to enrollments
left_join(enrollments, by = "student_key") %>% # 127211 observations
# Remove students in the cohort with no AP enrollments
filter(!is.na(isp_id)) %>% # 81213 observations
# Must not be withdrawn
filter(is.na(withdrawal_reason)) %>% # 80152 observations
# Remove if enrollment end_date is after course assignment end date
filter(is.na(end_date) | ymd_hms(end_date) <= ymd_hms(sca_end_date)) %>% # same
# Take latest enrollment end, begin, course assignment end, begin, class section end, begin
arrange(student_key, course_code, desc(is.na(end_date)), desc(end_date), desc(begin_date),
desc(is.na(sca_end_date)), desc(sca_end_date), desc(sca_begin_date),
desc(is.na(cs_end_date)), desc(cs_end_date), desc(cs_begin_date)) %>%
group_by(student_key, course_code) %>%
arrange(begin_date, end_date, sca_begin_date, sca_end_date, cs_begin_date, cs_end_date) %>%
# Summarize by student and course code
summarize(first_name = first(first_name), middle_name = first(middle_name), last_name = first(last_name),
isp_school_year = first(isp_school_year),
enrolled_instructional_days = sum(enrolled_instructional_days, na.rm = T),
course_instructional_days = max(course_instructional_days, na.rm = T)) %>% # 985 observations
ungroup() %>%
# Remove students enrolled for less than half the course
filter(enrolled_instructional_days / course_instructional_days >= 0.5) %>% # XXX observations
# Join course names from correlation
left_join(group_by(mutate(cc, course_code = as.numeric(`Course Code`)), course_code) %>%
summarize(course_title = first(`Course Title`)),
by = "course_code") %>%
# Join to exam data -- only keep if course and test match
left_join(transmute(xw, course_code, exam_name), by = "course_code") %>% # 909 observations
left_join(transmute(exams, student_id, exam_name = sublevel2_shortname, performance_level = assessment_result_number_score),
by = c("student_key" = "student_id", "exam_name")) %>% # 909 observations
# Remove students who do not have a valid performance level
filter(!is.na(performance_level)) %>% # observations
filter(!is.na(exam_name)) %>% # observations
# Remove exam records with no corresponding course codes
filter(!is.na(course_code)) %>% # 854 observations
# Collapse to student_level count of courses
group_by(student_key) %>%
summarize(epso_type = domain, n_courses = n_distinct(course_code)) %>% # 12932 observations
ungroup() #%>% filter(student_key == 3073966)
} else {
rm(clean)
}
# Analyze and compile data
if(compile == T) {
# Define path and filename
path = str_c(getwd(), "ORP_accountability/projects/",
ifelse(between(month(today()), 1, 10), year(today()), year(today()) + 1),
"_ready_graduate/Data/")
file = str_c(domain, "_student_level.csv")
if(file %in% list.files(path)) {
if(!dir.exists(str_c(path, "Previous"))) {
dir.create(str_c(path, "Previous"))
dir.create(str_c(path, "Previous/", str_replace_all(now(), "[-:]", "")))
}
if(!dir.exists(str_c(path, "Previous/", str_replace_all(now(), "[-:]", "")))) {
dir.create(str_c(path, "Previous/", str_replace_all(now(), "[-:]", "")))
}
file.rename(str_c(path, file),
str_c(path, "Previous/", str_replace_all(now(), "[-:]", ""), "/", file))
}
write_csv(c, str_c(path, file), na = "")
} else {
rm(compile)
}
# Checks
if(check) {
# Missing sca_end_dates
full_join(
c,
read_csv(
str_c(
"ORP_accountability/projects/",
year(now()),
"_ready_graduate/Data/",
domain,
"_student_level.csv"
)
),
by = c("student_key", "epso_type")
) %>%
mutate_at(vars(starts_with("n_courses")), funs(ifelse(is.na(.), 0, .))) %>%
mutate(diff = n_courses.x - n_courses.y) %>%
group_by(diff) %>%
summarize(n = n()) %>%
ungroup() %>%
mutate(pct = round(100 * n / sum(n), 1))
# Hamilton County
hamilton = readxl::read_excel("C:/Users/CA19130/Downloads/Copy of HCS_IB records class of 2018.xlsx") %>%
janitor::clean_names()
left_join(
hamilton,
filter(exams, student_id %in% hamilton$student_key),
by = c("student_key" = "student_id")
)
} else {
rm(check)
}
|
library(tidyverse)
flower_breeding_sheet <- "https://docs.google.com/spreadsheets/d/e/2PACX-1vTdwUI4iZE1wdfZv1xdi2qJtldnWS2iiQdjRjKP-4oKoH0R8a07vaVFxZHSwFiDlwzb6gZAE8U5C_vG/pubhtml#"
source_html <- xml2::read_html(flower_breeding_sheet)
source_tbl <- source_html %>%
rvest::xml_node("#588946015 > div > table") %>%
rvest::html_table()
flower_tbl <- source_tbl %>%
janitor::row_to_names(1) %>%
janitor::clean_names() %>%
drop_na() %>%
select(-x1) %>%
rename(colour = color) %>%
mutate(
across(where(is.character), tolower),
across(starts_with("gene"),
~case_when(
. == 0 ~ "00",
. == 1 ~ "01",
. == 2 ~ "11")),
seed_bag = if_else(seed_bag == 1, TRUE, FALSE),
gene_4 = if_else(species != "rose", NA_character_, gene_4),
geneotype = if_else(
species == "rose",
paste0(gene_1, gene_2, gene_3, gene_4),
paste0(gene_1, gene_2, gene_3)
),
flower_id = paste0(species, "_", strtoi(geneotype, 2))
)
|
/data_setup.R
|
permissive
|
mattkerlogue/anch_flowers
|
R
| false | false | 1,013 |
r
|
library(tidyverse)
flower_breeding_sheet <- "https://docs.google.com/spreadsheets/d/e/2PACX-1vTdwUI4iZE1wdfZv1xdi2qJtldnWS2iiQdjRjKP-4oKoH0R8a07vaVFxZHSwFiDlwzb6gZAE8U5C_vG/pubhtml#"
source_html <- xml2::read_html(flower_breeding_sheet)
source_tbl <- source_html %>%
rvest::xml_node("#588946015 > div > table") %>%
rvest::html_table()
flower_tbl <- source_tbl %>%
janitor::row_to_names(1) %>%
janitor::clean_names() %>%
drop_na() %>%
select(-x1) %>%
rename(colour = color) %>%
mutate(
across(where(is.character), tolower),
across(starts_with("gene"),
~case_when(
. == 0 ~ "00",
. == 1 ~ "01",
. == 2 ~ "11")),
seed_bag = if_else(seed_bag == 1, TRUE, FALSE),
gene_4 = if_else(species != "rose", NA_character_, gene_4),
geneotype = if_else(
species == "rose",
paste0(gene_1, gene_2, gene_3, gene_4),
paste0(gene_1, gene_2, gene_3)
),
flower_id = paste0(species, "_", strtoi(geneotype, 2))
)
|
#################
## 시계열 분석 ##
#################
# 1. 시계열 자료
# 시간의 흐름에 따라서 관찰된 데이터
# 2. 정상성
# 대부분의 시계열 자료는 다루기 어려운 비정상성 시계열 자료
# 분석하기 쉬운 정상성 시계열 자료로 변환해야함
# 정상성 조건
# - 평균이 일정해야 함
# 평균이 일정하지 않은 시계열은 차분(difference)을 통해 정상화
# - 분산이 시점에 의존하지 않음
# 분산이 일정하지 않은 시계열은 변환(transformation)을 통해 정상화
# - 공분산도 시차에만 의존할 뿐, 특정 시점에는 의존하지 않음
# 3. 시계열 모형
# 3.1 자기회귀 모형(Autogressive model, AR)
# P 시점 이전의 자료가 현재 자료에 영향을 줌
# 오차항 = 백색잡음과정(white noise process)
# 자기상관함수(Autocorrelation Function, ACF) : k 기간 떨어진 값들의 상관계수
# 부분자기상관함수(partial ACF) : 서로 다른 두 시점의 중간에 있는 값들의 영향을 제외시킨 상관계수
# ACF 빠르게 감소, PACF는 어느 시점에서 절단점을 가짐
# PACF가 2시점에서 절단점을 가지면 AR(1) 모형
# 3.2 이동평균 모형(Moving average model, MA)
# 유한한 개수의 백색잡음 결합이므로 항상 정상성 만족
# ACF가 절단점을 갖고, PACF는 빠르게 감소
# 자기회귀누적이동평균 모형 (Autoregressive integrated moving average model, ARIMA)
# 비정상 시계열 모형
# 차분이나 변환을 통해 AR, MA, 또는 이 둘을 합한 ARMA 모형으로 정상화
# ARIMA(p, d, q) - d : 차분 차수 / p : AR 모형 차수 / q : MA 모형 차수
# 분해 시계열
# 시계열에 영향을 주는 일반적인 요인을 시계열에서 분리해 분석하는 방법
# 계절 요인(seasonal factor), 순환 요인(cyclical), 추세 요인(trend), 불규칙 요인(random)
# 1) 소스 데이터를 시계열 데이터로 변환
ts(data, frequency = n, start = c(시작년도, 월))
# 2) 시계열 데이터를 x, trend, seasonal, random 값으로 분해
decompose(data)
# 3) 시계열 데이터를 이동평균한 값 생성
SMA(data, n = 이동평균수)
# 4) 시계열 데이터를 차분
diff(data, differences = 차분횟수)
# 5) ACF 값과 그래프를 통해 래그 절단값을 확인
acf(data, lag.max = 래그수)
# 6) PACF 값과 그래프를 통해 래그 절단값을 확인
pacf(data, lag.max = 래그수)
# 7) 데이터를 활용하여 최적의 ARIMA 모형을 선택
auto.arima(data)
# 8) 선정된 ARIMA 모형으로 데이터를 보정(fitting)
arima(data, order = c(p, d, q))
# 9) ARIMA 모형에 의해 보정된 데이터를 통해 미래값을 예측
forecast.Arima(fittedData, h = 미래예측수)
# 10) 시계열 데이터를 그래프로 표현
plot.ts(시계열데이터)
# 11) 예측된 시계열 데이터를 그래프로 표현
plot.forecast(예측된시계열데이터)
##########################################
## 시계열 실습 - 영국왕들의 사망시 나이 ##
##########################################
library(TTR)
library(forecast)
# 영국왕들의 사망시 나이
kings <- scan("http://robjhyndman.com/tsdldata/misc/kings.dat", skip = 3)
kings
kings_ts <- ts(kings)
kings_ts
plot.ts(kings_ts)
# 이동평균
kings_sma3 <- SMA(kings_ts, n = 3)
kings_sma8 <- SMA(kings_ts, n = 8)
kings_sma12 <- SMA(kings_ts, n = 12)
par(mfrow = c(2,2))
plot.ts(kings_ts)
plot.ts(kings_sma3)
plot.ts(kings_sma8)
plot.ts(kings_sma12)
# 차분을 통해 데이터 정상화
kings_diff1 <- diff(kings_ts, differences = 1)
kings_diff2 <- diff(kings_ts, differences = 2)
kings_diff3 <- diff(kings_ts, differences = 3)
plot.ts(kings_ts)
plot.ts(kings_diff1) # 1차 차분만 해도 어느정도 정상화 패턴을 보임
plot.ts(kings_diff2)
plot.ts(kings_diff3)
par(mfrow = c(1,1))
mean(kings_diff1); sd(kings_diff1)
# 1차 차분한 데이터로 ARIMA 모형 확인
acf(kings_diff1, lag.max = 20) # lag 2부터 점선 안에 존재. lag 절단값 = 2. --> MA(1)
pacf(kings_diff1, lag.max = 20) # lag 4에서 절단값 --> AR(3)
# --> ARIMA(3,1,1) --> AR(3), I(1), MA(1) : (3,1,1)
# 자동으로 ARIMA 모형 확인
auto.arima(kings) # --> ARIMA(0,1,1)
# 예측
kings_arima <- arima(kings_ts, order = c(3,1,1)) # 차분통해 확인한 값 적용
kings_arima
# 미래 5개의 예측값 사용
kings_fcast <- forecast(kings_arima, h = 5)
kings_fcast
plot(kings_fcast)
kings_arima1 <- arima(kings_ts, order = c(0,1,1)) # auto.arima 추천값 적용
kings_arima1
kings_fcast1 <- forecast(kings_arima1, h = 5)
kings_fcast1
plot(kings_fcast)
plot(kings_fcast1)
############################################
## 시계열 실습 - 리조트 기념품매장 매출액 ##
############################################
data <- scan("http://robjhyndman.com/tsdldata/data/fancy.dat")
fancy <- ts(data, frequency = 12, start = c(1987, 1))
fancy
plot.ts(fancy) # 분산이 증가하는 경향 --> log 변환으로 분산 조정
fancy_log <- log(fancy)
plot.ts(fancy_log)
fancy_diff <- diff(fancy_log, differences = 1)
plot.ts(fancy_diff)
# 평균은 어느정도 일정하지만 특정 시기에 분산이 크다
# --> ARIMA 보다는 다른 모형 적용 추천
acf(fancy_diff, lag.max = 100)
pacf(fancy_diff, lag.max = 100)
auto.arima(fancy) # ARIMA(1,1,1)(0,1,1)[12]
fancy_arima <- arima(fancy, order = c(1,1,1), seasonal = list(order = c(0,1,1), period = 12))
fancy_fcast <- forecast.Arima(fancy_arima)
plot(fancy_fcast)
|
/JB_timeSeries.R
|
no_license
|
doeungim/ADP-1
|
R
| false | false | 5,706 |
r
|
#################
## 시계열 분석 ##
#################
# 1. 시계열 자료
# 시간의 흐름에 따라서 관찰된 데이터
# 2. 정상성
# 대부분의 시계열 자료는 다루기 어려운 비정상성 시계열 자료
# 분석하기 쉬운 정상성 시계열 자료로 변환해야함
# 정상성 조건
# - 평균이 일정해야 함
# 평균이 일정하지 않은 시계열은 차분(difference)을 통해 정상화
# - 분산이 시점에 의존하지 않음
# 분산이 일정하지 않은 시계열은 변환(transformation)을 통해 정상화
# - 공분산도 시차에만 의존할 뿐, 특정 시점에는 의존하지 않음
# 3. 시계열 모형
# 3.1 자기회귀 모형(Autogressive model, AR)
# P 시점 이전의 자료가 현재 자료에 영향을 줌
# 오차항 = 백색잡음과정(white noise process)
# 자기상관함수(Autocorrelation Function, ACF) : k 기간 떨어진 값들의 상관계수
# 부분자기상관함수(partial ACF) : 서로 다른 두 시점의 중간에 있는 값들의 영향을 제외시킨 상관계수
# ACF 빠르게 감소, PACF는 어느 시점에서 절단점을 가짐
# PACF가 2시점에서 절단점을 가지면 AR(1) 모형
# 3.2 이동평균 모형(Moving average model, MA)
# 유한한 개수의 백색잡음 결합이므로 항상 정상성 만족
# ACF가 절단점을 갖고, PACF는 빠르게 감소
# 자기회귀누적이동평균 모형 (Autoregressive integrated moving average model, ARIMA)
# 비정상 시계열 모형
# 차분이나 변환을 통해 AR, MA, 또는 이 둘을 합한 ARMA 모형으로 정상화
# ARIMA(p, d, q) - d : 차분 차수 / p : AR 모형 차수 / q : MA 모형 차수
# 분해 시계열
# 시계열에 영향을 주는 일반적인 요인을 시계열에서 분리해 분석하는 방법
# 계절 요인(seasonal factor), 순환 요인(cyclical), 추세 요인(trend), 불규칙 요인(random)
# 1) 소스 데이터를 시계열 데이터로 변환
ts(data, frequency = n, start = c(시작년도, 월))
# 2) 시계열 데이터를 x, trend, seasonal, random 값으로 분해
decompose(data)
# 3) 시계열 데이터를 이동평균한 값 생성
SMA(data, n = 이동평균수)
# 4) 시계열 데이터를 차분
diff(data, differences = 차분횟수)
# 5) ACF 값과 그래프를 통해 래그 절단값을 확인
acf(data, lag.max = 래그수)
# 6) PACF 값과 그래프를 통해 래그 절단값을 확인
pacf(data, lag.max = 래그수)
# 7) 데이터를 활용하여 최적의 ARIMA 모형을 선택
auto.arima(data)
# 8) 선정된 ARIMA 모형으로 데이터를 보정(fitting)
arima(data, order = c(p, d, q))
# 9) ARIMA 모형에 의해 보정된 데이터를 통해 미래값을 예측
forecast.Arima(fittedData, h = 미래예측수)
# 10) 시계열 데이터를 그래프로 표현
plot.ts(시계열데이터)
# 11) 예측된 시계열 데이터를 그래프로 표현
plot.forecast(예측된시계열데이터)
##########################################
## 시계열 실습 - 영국왕들의 사망시 나이 ##
##########################################
library(TTR)
library(forecast)
# 영국왕들의 사망시 나이
kings <- scan("http://robjhyndman.com/tsdldata/misc/kings.dat", skip = 3)
kings
kings_ts <- ts(kings)
kings_ts
plot.ts(kings_ts)
# 이동평균
kings_sma3 <- SMA(kings_ts, n = 3)
kings_sma8 <- SMA(kings_ts, n = 8)
kings_sma12 <- SMA(kings_ts, n = 12)
par(mfrow = c(2,2))
plot.ts(kings_ts)
plot.ts(kings_sma3)
plot.ts(kings_sma8)
plot.ts(kings_sma12)
# 차분을 통해 데이터 정상화
kings_diff1 <- diff(kings_ts, differences = 1)
kings_diff2 <- diff(kings_ts, differences = 2)
kings_diff3 <- diff(kings_ts, differences = 3)
plot.ts(kings_ts)
plot.ts(kings_diff1) # 1차 차분만 해도 어느정도 정상화 패턴을 보임
plot.ts(kings_diff2)
plot.ts(kings_diff3)
par(mfrow = c(1,1))
mean(kings_diff1); sd(kings_diff1)
# 1차 차분한 데이터로 ARIMA 모형 확인
acf(kings_diff1, lag.max = 20) # lag 2부터 점선 안에 존재. lag 절단값 = 2. --> MA(1)
pacf(kings_diff1, lag.max = 20) # lag 4에서 절단값 --> AR(3)
# --> ARIMA(3,1,1) --> AR(3), I(1), MA(1) : (3,1,1)
# 자동으로 ARIMA 모형 확인
auto.arima(kings) # --> ARIMA(0,1,1)
# 예측
kings_arima <- arima(kings_ts, order = c(3,1,1)) # 차분통해 확인한 값 적용
kings_arima
# 미래 5개의 예측값 사용
kings_fcast <- forecast(kings_arima, h = 5)
kings_fcast
plot(kings_fcast)
kings_arima1 <- arima(kings_ts, order = c(0,1,1)) # auto.arima 추천값 적용
kings_arima1
kings_fcast1 <- forecast(kings_arima1, h = 5)
kings_fcast1
plot(kings_fcast)
plot(kings_fcast1)
############################################
## 시계열 실습 - 리조트 기념품매장 매출액 ##
############################################
data <- scan("http://robjhyndman.com/tsdldata/data/fancy.dat")
fancy <- ts(data, frequency = 12, start = c(1987, 1))
fancy
plot.ts(fancy) # 분산이 증가하는 경향 --> log 변환으로 분산 조정
fancy_log <- log(fancy)
plot.ts(fancy_log)
fancy_diff <- diff(fancy_log, differences = 1)
plot.ts(fancy_diff)
# 평균은 어느정도 일정하지만 특정 시기에 분산이 크다
# --> ARIMA 보다는 다른 모형 적용 추천
acf(fancy_diff, lag.max = 100)
pacf(fancy_diff, lag.max = 100)
auto.arima(fancy) # ARIMA(1,1,1)(0,1,1)[12]
fancy_arima <- arima(fancy, order = c(1,1,1), seasonal = list(order = c(0,1,1), period = 12))
fancy_fcast <- forecast.Arima(fancy_arima)
plot(fancy_fcast)
|
# What's a factor and why would you use it?
#####################################################################################################################
#
# In this chapter you dive into the wonderful world of factors.
#
# The term factor refers to a statistical data type used to store categorical variables.
# The difference between a categorical variable and a continuous variable is that a categorical variable can
# belong to a limited number of categories. A continuous variable, on the other hand,
# can correspond to an infinite number of values.
#
# It is important that R knows whether it is dealing with a continuous or a categorical variable,
# as the statistical models you will develop in the future treat both types differently.
# (You will see later why this is the case.)
#
# A good example of a categorical variable is the variable 'Gender'.
# A human individual can either be "Male" or "Female", making abstraction of intersexes.
# So here "Male" and "Female" are, in a simplified sense, the two values of the categorical variable "Gender",
# and every observation can be assigned to either the value "Male" of "Female".
#
#####################################################################################################################
theory <- "R uses factors for categorical variables!"
|
/dataCamp/introductionToR/4_factors/1_WhatsAFactorAndWhyWouldYouUseIt.R
|
permissive
|
odonnmi/learnNPractice
|
R
| false | false | 1,578 |
r
|
# What's a factor and why would you use it?
#####################################################################################################################
#
# In this chapter you dive into the wonderful world of factors.
#
# The term factor refers to a statistical data type used to store categorical variables.
# The difference between a categorical variable and a continuous variable is that a categorical variable can
# belong to a limited number of categories. A continuous variable, on the other hand,
# can correspond to an infinite number of values.
#
# It is important that R knows whether it is dealing with a continuous or a categorical variable,
# as the statistical models you will develop in the future treat both types differently.
# (You will see later why this is the case.)
#
# A good example of a categorical variable is the variable 'Gender'.
# A human individual can either be "Male" or "Female", making abstraction of intersexes.
# So here "Male" and "Female" are, in a simplified sense, the two values of the categorical variable "Gender",
# and every observation can be assigned to either the value "Male" of "Female".
#
#####################################################################################################################
theory <- "R uses factors for categorical variables!"
|
## Getting the full dataset on household power consumption
data_full <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting the dates as required
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Creating Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving Plot1 to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
/plot1.R
|
no_license
|
dghosal15/ExData_Plotting1
|
R
| false | false | 806 |
r
|
## Getting the full dataset on household power consumption
data_full <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting the dates as required
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Creating Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving Plot1 to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
# ---------------------------------------------------------------------
# length.R
# return the number of keys in a hash
# NB:
# - This doesn't work: env.profile(x@.xData)$nchains
# ---------------------------------------------------------------------
setMethod( "length" , "hash" ,
function(x)
length( x@.xData )
)
|
/R/length.R
|
no_license
|
cran/hash
|
R
| false | false | 342 |
r
|
# ---------------------------------------------------------------------
# length.R
# return the number of keys in a hash
# NB:
# - This doesn't work: env.profile(x@.xData)$nchains
# ---------------------------------------------------------------------
setMethod( "length" , "hash" ,
function(x)
length( x@.xData )
)
|
### This script is for extra analyses on the most interesting select loci (1p36.1, 7q32 and 12p13.1)
#install.packages("devtools")
#library(devtools)
#install_github("jrs95/hyprcoloc", build_opts = c("--no-resave-data", "--no-manual"), build_vignettes = F)
#browseVignettes("hyprcoloc") The install kept failing when trying to build the vignettes, so I disabled that.
#devtools::install_github("boxiangliu/locuscomparer")
library(hyprcoloc)
library(locuscomparer)
setwd("YOUR WORKING DIRECTORY")
# Read in the LD matrices, and the GWAS and QTL data. The file locations are relative to your working directory, so adjust accordingly.
bmi=read.table("./Meta-analysis_Locke_et_al+UKBiobank_2018_UPDATED.txt",sep = "\t",header = T)
t2d=read.table("./Mahajan.NatGenet2018b.T2Dbmiadj.European.with.rsIDs.txt",sep = "\t",header = T)
hdl=read.table("./jointGwasMc_HDL.txt",sep = "\t",header = T)
triG=read.table("./jointGwasMc_TG.txt",sep = "\t",header = T)
cis_eqtl=read.table("./Eurobats_adipose_select_loci_cis-eQTLs_from_INT_logTPM.txt",sep = "\t",header = T)
cis_aqtl=read.table("./Eurobats_adipose_select_loci_cis-aQTLs_from_unnormalized_activities.txt",sep = "\t",header = T)
trans_bmi_eqtl=read.table("./Eurobats_adipose_select_loci_trans-eQTLs_for_BMI_MRs.txt",sep = "\t",header = T)
trans_bmi_aqtl=read.table("./Eurobats_adipose_select_loci_trans-aQTLs_for_BMI_MRs.txt",sep = "\t",header = T)
trans_t2d_eqtl=read.table("./Eurobats_adipose_select_loci_trans-eQTLs_for_HOMA-IR_MRs.txt",sep = "\t",header = T)
trans_t2d_aqtl=read.table("./Eurobats_adipose_select_loci_trans-aQTLs_for_HOMA-IR_MRs.txt",sep = "\t",header = T)
trans_hdl_eqtl=read.table("./Eurobats_adipose_select_loci_trans-eQTLs_for_HDL_MRs.txt",sep = "\t",header = T)
trans_hdl_aqtl=read.table("./Eurobats_adipose_select_loci_trans-aQTLs_for_HDL_MRs.txt",sep = "\t",header = T)
trans_triG_eqtl=read.table("./Eurobats_adipose_select_loci_trans-eQTLs_for_TriG_MRs.txt",sep = "\t",header = T)
trans_triG_aqtl=read.table("./Eurobats_adipose_select_loci_trans-aQTLs_for_TriG_MRs.txt",sep = "\t",header = T)
ld_files=c("Eurobats_chr1p36.1_LD_matrix.txt","Eurobats_chr7q32_LD_matrix.txt","Eurobats_chr12p13.33_LD_matrix.txt","Eurobats_chr12p13.1_LD_matrix.txt")
ld=list()
index=1
for(i in ld_files){
ld[[index]]=read.table(paste("./",i,sep=""),sep = "\t",header = F)
rownames(ld[[index]])=ld[[index]][,3]
ld[[index]]=ld[[index]][,-c(1:5)]
colnames(ld[[index]])=rownames(ld[[index]])
index=index+1
}
# The HDL GWAS data has coordinates for hg18 and hg 19, but I need to have CHR and POS columns (based on hg19) instead.
colnames(hdl)=c("CHR","POS","SNP","A1","A2","BETA","SE","N","P","Freq.A1.1000G.EUR")
hdl$CHR=gsub("chr","",hdl$CHR)
hdl$CHR=as.numeric(gsub(":.*","",hdl$CHR))
# This introduced NAs, but only for 3 SNPs without rsIDs (labeled only as ".")
hdl=hdl[!is.na(hdl$CHR),]
hdl$POS=as.numeric(gsub("chr.*:","",hdl$POS))
# The TriG GWAS data has coordinates for hg18 and hg 19, but I need to have CHR and POS columns (based on hg19) instead.
colnames(triG)=c("CHR","POS","SNP","A1","A2","BETA","SE","N","P","Freq.A1.1000G.EUR")
triG$CHR=gsub("chr","",triG$CHR)
triG$CHR=as.numeric(gsub(":.*","",triG$CHR))
# This introduced NAs, but only for 3 SNPs without rsIDs (labeled only as ".")
triG=triG[!is.na(triG$CHR),]
triG$POS=as.numeric(gsub("chr.*:","",triG$POS))
# Filter GWAS, QTL and LD data to the same SNPs
filt_bmi=bmi[na.omit(match(c(rownames(ld[[1]]),rownames(ld[[2]]),rownames(ld[[3]]),rownames(ld[[4]])),bmi$SNP)),]
filt_bmi=filt_bmi[na.omit(match(unique(cis_eqtl$snps),filt_bmi$SNP)),]
filt_bmi=filt_bmi[na.omit(match(t2d$rsID,filt_bmi$SNP)),]
filt_bmi=filt_bmi[na.omit(match(hdl$SNP,filt_bmi$SNP)),]
filt_bmi=filt_bmi[na.omit(match(triG$SNP,filt_bmi$SNP)),]
filt_t2d=t2d[na.omit(match(filt_bmi$SNP,t2d$rsID)),]
filt_hdl=hdl[na.omit(match(filt_bmi$SNP,hdl$SNP)),]
filt_triG=triG[na.omit(match(filt_bmi$SNP,triG$SNP)),]
filt_cis_eqtl=cis_eqtl[cis_eqtl$snps %in% filt_bmi$SNP,]
filt_cis_aqtl=cis_aqtl[cis_aqtl$snps %in% filt_bmi$SNP,]
filt_trans_bmi_eqtl=trans_bmi_eqtl[trans_bmi_eqtl$snps %in% filt_bmi$SNP,]
filt_trans_bmi_aqtl=trans_bmi_aqtl[trans_bmi_aqtl$snps %in% filt_bmi$SNP,]
filt_trans_t2d_eqtl=trans_t2d_eqtl[trans_t2d_eqtl$snps %in% filt_t2d$rsID,]
filt_trans_t2d_aqtl=trans_t2d_aqtl[trans_t2d_aqtl$snps %in% filt_t2d$rsID,]
filt_trans_hdl_eqtl=trans_hdl_eqtl[trans_hdl_eqtl$snps %in% filt_hdl$SNP,]
filt_trans_hdl_aqtl=trans_hdl_aqtl[trans_hdl_aqtl$snps %in% filt_hdl$SNP,]
filt_trans_triG_eqtl=trans_triG_eqtl[trans_triG_eqtl$snps %in% filt_triG$SNP,]
filt_trans_triG_aqtl=trans_triG_aqtl[trans_triG_aqtl$snps %in% filt_triG$SNP,]
filt_ld=list()
filt_ld[[1]]=ld[[1]][filt_bmi$SNP[filt_bmi$CHR==1],filt_bmi$SNP[filt_bmi$CHR==1]]
filt_ld[[2]]=ld[[2]][filt_bmi$SNP[filt_bmi$CHR==7],filt_bmi$SNP[filt_bmi$CHR==7]]
filt_ld[[3]]=ld[[3]][filt_bmi$SNP[filt_bmi$CHR==12 & filt_bmi$POS<1400000],filt_bmi$SNP[filt_bmi$CHR==12 & filt_bmi$POS<1400000]]
filt_ld[[4]]=ld[[4]][filt_bmi$SNP[filt_bmi$CHR==12 & filt_bmi$POS>1400000],filt_bmi$SNP[filt_bmi$CHR==12 & filt_bmi$POS>1400000]]
# Let's free up some memory by dropping the huge trans-QTL data.frames
rm(trans_bmi_eqtl)
rm(trans_bmi_aqtl)
rm(trans_t2d_eqtl)
rm(trans_t2d_aqtl)
rm(trans_hdl_eqtl)
rm(trans_hdl_aqtl)
rm(trans_triG_eqtl)
rm(trans_triG_aqtl)
# Add chromosome and position to the QTLs for sorting
filt_cis_eqtl$chr=filt_bmi[match(filt_cis_eqtl$snps,filt_bmi$SNP),1]
filt_cis_aqtl$chr=filt_bmi[match(filt_cis_aqtl$snps,filt_bmi$SNP),1]
filt_trans_bmi_eqtl$chr=filt_bmi[match(filt_trans_bmi_eqtl$snps,filt_bmi$SNP),1]
filt_trans_bmi_aqtl$chr=filt_bmi[match(filt_trans_bmi_aqtl$snps,filt_bmi$SNP),1]
filt_trans_t2d_eqtl$chr=filt_bmi[match(filt_trans_t2d_eqtl$snps,filt_bmi$SNP),1]
filt_trans_t2d_aqtl$chr=filt_bmi[match(filt_trans_t2d_aqtl$snps,filt_bmi$SNP),1]
filt_trans_hdl_eqtl$chr=filt_bmi[match(filt_trans_hdl_eqtl$snps,filt_bmi$SNP),1]
filt_trans_hdl_aqtl$chr=filt_bmi[match(filt_trans_hdl_aqtl$snps,filt_bmi$SNP),1]
filt_trans_triG_eqtl$chr=filt_bmi[match(filt_trans_triG_eqtl$snps,filt_bmi$SNP),1]
filt_trans_triG_aqtl$chr=filt_bmi[match(filt_trans_triG_aqtl$snps,filt_bmi$SNP),1]
filt_cis_eqtl$position=filt_bmi[match(filt_cis_eqtl$snps,filt_bmi$SNP),2]
filt_cis_aqtl$position=filt_bmi[match(filt_cis_aqtl$snps,filt_bmi$SNP),2]
filt_trans_bmi_eqtl$position=filt_bmi[match(filt_trans_bmi_eqtl$snps,filt_bmi$SNP),2]
filt_trans_bmi_aqtl$position=filt_bmi[match(filt_trans_bmi_aqtl$snps,filt_bmi$SNP),2]
filt_trans_t2d_eqtl$position=filt_bmi[match(filt_trans_t2d_eqtl$snps,filt_bmi$SNP),2]
filt_trans_t2d_aqtl$position=filt_bmi[match(filt_trans_t2d_aqtl$snps,filt_bmi$SNP),2]
filt_trans_hdl_eqtl$position=filt_bmi[match(filt_trans_hdl_eqtl$snps,filt_bmi$SNP),2]
filt_trans_hdl_aqtl$position=filt_bmi[match(filt_trans_hdl_aqtl$snps,filt_bmi$SNP),2]
filt_trans_triG_eqtl$position=filt_bmi[match(filt_trans_triG_eqtl$snps,filt_bmi$SNP),2]
filt_trans_triG_aqtl$position=filt_bmi[match(filt_trans_triG_aqtl$snps,filt_bmi$SNP),2]
# Sort by chr and position
filt_bmi=filt_bmi[order(filt_bmi$CHR,filt_bmi$POS),]
filt_t2d=filt_t2d[order(filt_t2d$Chr,filt_t2d$Pos),]
filt_hdl=filt_hdl[order(filt_hdl$CHR,filt_hdl$POS),]
filt_triG=filt_triG[order(filt_triG$CHR,filt_triG$POS),]
filt_cis_eqtl=filt_cis_eqtl[order(filt_cis_eqtl$chr,filt_cis_eqtl$position),]
filt_cis_aqtl=filt_cis_aqtl[order(filt_cis_aqtl$chr,filt_cis_aqtl$position),]
filt_trans_bmi_eqtl=filt_trans_bmi_eqtl[order(filt_trans_bmi_eqtl$chr,filt_trans_bmi_eqtl$position),]
filt_trans_bmi_aqtl=filt_trans_bmi_aqtl[order(filt_trans_bmi_aqtl$chr,filt_trans_bmi_aqtl$position),]
filt_trans_t2d_eqtl=filt_trans_t2d_eqtl[order(filt_trans_t2d_eqtl$chr,filt_trans_t2d_eqtl$position),]
filt_trans_t2d_aqtl=filt_trans_t2d_aqtl[order(filt_trans_t2d_aqtl$chr,filt_trans_t2d_aqtl$position),]
filt_trans_hdl_eqtl=filt_trans_hdl_eqtl[order(filt_trans_hdl_eqtl$chr,filt_trans_hdl_eqtl$position),]
filt_trans_hdl_aqtl=filt_trans_hdl_aqtl[order(filt_trans_hdl_aqtl$chr,filt_trans_hdl_aqtl$position),]
filt_trans_triG_eqtl=filt_trans_triG_eqtl[order(filt_trans_triG_eqtl$chr,filt_trans_triG_eqtl$position),]
filt_trans_triG_eqtl=filt_trans_triG_eqtl[order(filt_trans_triG_eqtl$chr,filt_trans_triG_eqtl$position),]
### LocusCompare plots
## 1p36.1
# First, grab the necessary P-values for the SNPs used in the HyPrColoc analyses for the traits of interest
bmi1=filt_bmi[filt_bmi$CHR==1,c(3,9)]
loc1_eEPHB2=filt_cis_eqtl[filt_cis_eqtl$gene=="EPHB2",c(1,4)]
loc1_aEPHB2=filt_cis_aqtl[filt_cis_aqtl$gene=="EPHB2",c(1,4)]
loc1_eZNF436=filt_cis_eqtl[filt_cis_eqtl$gene=="ZNF436",c(1,4)]
loc1_aZNF436=filt_cis_aqtl[filt_cis_aqtl$gene=="ZNF436",c(1,4)]
loc1_eTCEA3=filt_cis_eqtl[filt_cis_eqtl$gene=="TCEA3",c(1,4)]
loc1_aTCEA3=filt_cis_aqtl[filt_cis_aqtl$gene=="TCEA3",c(1,4)]
loc1_eLASP1=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="LASP1" & filt_trans_bmi_eqtl$chr==1,c(1,4)]
loc1_aLASP1=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="LASP1" & filt_trans_bmi_aqtl$chr==1,c(1,4)]
loc1_eRASSF4=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="RASSF4" & filt_trans_bmi_eqtl$chr==1,c(1,4)]
loc1_aRASSF4=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="RASSF4" & filt_trans_bmi_aqtl$chr==1,c(1,4)]
loc1_aGNA14=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="GNA14" & filt_trans_bmi_aqtl$chr==1,c(1,4)]
loc1_aDOK5=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="DOK5" & filt_trans_bmi_aqtl$chr==1,c(1,4)]
colnames(bmi1)=c("rsid","pval")
colnames(loc1_eEPHB2)=c("rsid","pval")
colnames(loc1_aEPHB2)=c("rsid","pval")
colnames(loc1_eZNF436)=c("rsid","pval")
colnames(loc1_aZNF436)=c("rsid","pval")
colnames(loc1_eTCEA3)=c("rsid","pval")
colnames(loc1_aTCEA3)=c("rsid","pval")
colnames(loc1_eLASP1)=c("rsid","pval")
colnames(loc1_aLASP1)=c("rsid","pval")
colnames(loc1_eRASSF4)=c("rsid","pval")
colnames(loc1_aRASSF4)=c("rsid","pval")
colnames(loc1_aGNA14)=c("rsid","pval")
colnames(loc1_aDOK5)=c("rsid","pval")
rownames(bmi1)=bmi1$rsid
rownames(loc1_eEPHB2)=loc1_eEPHB2$rsid
rownames(loc1_aEPHB2)=loc1_aEPHB2$rsid
rownames(loc1_eZNF436)=loc1_eZNF436$rsid
rownames(loc1_aZNF436)=loc1_aZNF436$rsid
rownames(loc1_eTCEA3)=loc1_eTCEA3$rsid
rownames(loc1_aTCEA3)=loc1_aTCEA3$rsid
rownames(loc1_eLASP1)=loc1_eLASP1$rsid
rownames(loc1_aLASP1)=loc1_aLASP1$rsid
rownames(loc1_eRASSF4)=loc1_eRASSF4$rsid
rownames(loc1_aRASSF4)=loc1_aRASSF4$rsid
rownames(loc1_aGNA14)=loc1_aGNA14$rsid
rownames(loc1_aDOK5)=loc1_aDOK5$rsid
# Check out some relevant LocusCompare plots before picking the which to write to file
locuscompare(in_fn1=bmi1,in_fn2=loc1_eEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-eQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs12408468") # Potential 3rd BMI signal?
locuscompare(in_fn1=loc1_eEPHB2,in_fn2=loc1_aEPHB2,title1 = "EPHB2 cis-eQTL", title2 = "EPHB2 cis-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_eZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-eQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=loc1_eZNF436,in_fn2=loc1_aZNF436,title1 = "ZNF436 cis-eQTL", title2 = "ZNF436 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_eTCEA3,title1 = "BMI GWAS", title2 = "TCEA3 cis-eQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aTCEA3,title1 = "BMI GWAS", title2 = "TCEA3 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_eLASP1,title1 = "BMI GWAS", title2 = "LASP1 trans-eQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aLASP1,title1 = "BMI GWAS", title2 = "LASP1 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aLASP1,title1 = "EPHB2 cis-aQTL", title2 = "LASP1 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aLASP1,title1 = "EPHB2 cis-aQTL", title2 = "LASP1 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_eRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 trans-eQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aRASSF4,title1 = "EPHB2 cis-aQTL", title2 = "RASSF4 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aRASSF4,title1 = "EPHB2 cis-aQTL", title2 = "RASSF4 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aLASP1,title1 = "BMI GWAS", title2 = "LASP1 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aGNA14,title1 = "BMI GWAS", title2 = "GNA14 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aGNA14,title1 = "BMI GWAS", title2 = "GNA14 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aGNA14,title1 = "EPHB2 cis-aQTL", title2 = "GNA14 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aGNA14,title1 = "EPHB2 cis-aQTL", title2 = "GNA14 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aDOK5,title1 = "BMI GWAS", title2 = "DOK5 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aDOK5,title1 = "BMI GWAS", title2 = "DOK5 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aDOK5,title1 = "EPHB2 cis-aQTL", title2 = "DOK5 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aDOK5,title1 = "EPHB2 cis-aQTL", title2 = "DOK5 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=loc1_eEPHB2,in_fn2=loc1_aEPHB2,title1 = "EPHB2 cis-eQTL", title2 = "EPHB2 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
# This locus is complex and consequently difficult to interpret. The EPHB2 cis-aQTL and various BMI MR trans-aQTL signals suggest 2 functional signals
# represented by rs6692586 (the top BMI GWAS SNP) and rs4654828 (the top trans-aQTL signal for many BMI MRs). The EPHB2 cis-aQTL has these two SNPs
# at roughly equal strength while rs6692586 is clearly stronger for BMI and rs4654828 is clearly stronger for the trans-aQTLs. Perhaps the best
# hypothetical explanations for these observations is that rs6692586 operates in cis thru effects on EPHB2 expression and activity, while rs4654828
# has an alternative proximal effect that distally affects the activities of many correlated BMI MRs, including EPHB2, which shows up as a bump in
# in the EPHB2 aQTL signal. The proximal effect of rs4654828 might be on ZNF436 activity, but this probably cannot be mediated via expression levels
# since there is an extremely strong cis-eQTL for ZNF436 at this locus that does not overlap the BMI signal or the cis-aQTL signal. I looked into the
# the position of rs4654828, but it is quite far away from ZNF436 in a LACTBL1 intron. LACTBL1 is apparently not expressed in our adipose tissue, so
# it is hard to imagine how it could be mediating the effect on BMI within adipose. It is best expressed in testis, which does have a sig eQTL
# between rs4654828-LACTBL1, but this also doesn't seem relevant to BMI. So if rs4654828 does affect ZNF436 activity in adipose, it is mediated
# some other way. Interestingly, rs4654828 in GTEx does show sig eQTLs with ZNF436 in other tissue types (Skin, Aorta, Tibial Artery, Esophagus,
# Tibial Nerve and Thyroid). It's hard to imagine how ZNF436 expression effects in other tissues could be relevant to ZNF436 activity in adipose.
# There are also rs4654828-TCEA3 eQTLs in Skin and Skeletal Muscle, and a TCEA3 splicing QTL in skin. The TCEA3 eQTL/aQTL LocusCompare plots do
# not look like TCEA3 is relevant to either BMI signal in adipose. Regardless, this sort of scenario might manifest epistatic effects on BMI and
# EPHB2 between these two SNPs. This is easy enough to test for EPHB2 activity, but I can't test it for BMI.
# Let's write some to PDFs
pdf("rs6692586-EPHB2_eQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_eEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-eQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-EPHB2_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-EPHB2_eQTL_and_aQTL_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_eEPHB2,in_fn2=loc1_aEPHB2,title1 = "EPHB2 cis-eQTL", title2 = "EPHB2 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-EPHB2_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-ZNF436_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-ZNF436_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-DOK5_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aDOK5,title1 = "BMI GWAS", title2 = "DOK5 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-DOK5_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aDOK5,title1 = "BMI GWAS", title2 = "DOK5 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-RASSF4_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-RASSF4_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-GNA14_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aGNA14,title1 = "BMI GWAS", title2 = "GNA14 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-GNA14_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aGNA14,title1 = "BMI GWAS", title2 = "GNA14 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-LASP1_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aLASP1,title1 = "BMI GWAS", title2 = "LASP1 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-LASP1_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aLASP1,title1 = "BMI GWAS", title2 = "LASP1 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-LASP1_aQTL_and_EPHB2_aQTL_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aLASP1,title1 = "EPHB2 cis-aQTL", title2 = "LASP1 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-GNA14_aQTL_and_EPHB2_aQTL_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aGNA14,title1 = "EPHB2 cis-aQTL", title2 = "GNA14 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-RASSF4_aQTL_and_EPHB2_aQTL_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aRASSF4,title1 = "EPHB2 cis-aQTL", title2 = "RASSF4 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-DOK5_aQTL_and_EPHB2_aQTL_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aDOK5,title1 = "EPHB2 cis-aQTL", title2 = "DOK5 cis-aQTL",snp = "rs6692586")
dev.off()
## 7q32
# First, grab the necessary P-values for the SNPs used in the HyPrColoc analyses for the traits of interest
bmi2=filt_bmi[filt_bmi$CHR==7,c(3,9)]
t2d2=filt_t2d[filt_t2d$Chr==7,c(1,9)]
hdl2=filt_hdl[filt_hdl$CHR==7,c(3,9)]
triG2=filt_triG[filt_triG$CHR==7,c(3,9)]
loc2_eLINC=filt_cis_eqtl[filt_cis_eqtl$gene=="LINC-PINT",c(1,4)]
loc2_aLINC=filt_cis_aqtl[filt_cis_aqtl$gene=="LINC-PINT",c(1,4)]
loc2_eKLF14=filt_cis_eqtl[filt_cis_eqtl$gene=="KLF14",c(1,4)]
loc2_aKLF14=filt_cis_aqtl[filt_cis_aqtl$gene=="KLF14",c(1,4)]
loc2_eAC=filt_cis_eqtl[filt_cis_eqtl$gene=="AC016831.7",c(1,4)]
loc2_eTBX4=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="TBX4" & filt_trans_bmi_eqtl$chr==7,c(1,4)]
loc2_aTBX4=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="TBX4" & filt_trans_bmi_aqtl$chr==7,c(1,4)]
loc2_eGNB1=filt_trans_t2d_eqtl[filt_trans_t2d_eqtl$gene=="GNB1" & filt_trans_t2d_eqtl$chr==7,c(1,4)]
loc2_aGNB1=filt_trans_t2d_aqtl[filt_trans_t2d_aqtl$gene=="GNB1" & filt_trans_t2d_aqtl$chr==7,c(1,4)]
loc2_eESR2=filt_trans_hdl_eqtl[filt_trans_hdl_eqtl$gene=="ESR2" & filt_trans_hdl_eqtl$chr==7,c(1,4)]
loc2_aESR2=filt_trans_hdl_aqtl[filt_trans_hdl_aqtl$gene=="ESR2" & filt_trans_hdl_aqtl$chr==7,c(1,4)]
loc2_eNR2F1=filt_trans_hdl_eqtl[filt_trans_hdl_eqtl$gene=="NR2F1" & filt_trans_hdl_eqtl$chr==7,c(1,4)]
loc2_aNR2F1=filt_trans_hdl_aqtl[filt_trans_hdl_aqtl$gene=="NR2F1" & filt_trans_hdl_aqtl$chr==7,c(1,4)]
loc2_eAGT=filt_trans_triG_eqtl[filt_trans_triG_eqtl$gene=="AGT" & filt_trans_triG_eqtl$chr==7,c(1,4)]
loc2_aAGT=filt_trans_triG_aqtl[filt_trans_triG_aqtl$gene=="AGT" & filt_trans_triG_aqtl$chr==7,c(1,4)]
loc2_eRABIF=filt_trans_triG_eqtl[filt_trans_triG_eqtl$gene=="RABIF" & filt_trans_triG_eqtl$chr==7,c(1,4)]
loc2_aRABIF=filt_trans_triG_aqtl[filt_trans_triG_aqtl$gene=="RABIF" & filt_trans_triG_aqtl$chr==7,c(1,4)]
colnames(bmi2)=c("rsid","pval")
colnames(t2d2)=c("rsid","pval")
colnames(hdl2)=c("rsid","pval")
colnames(triG2)=c("rsid","pval")
colnames(loc2_eLINC)=c("rsid","pval")
colnames(loc2_aLINC)=c("rsid","pval")
colnames(loc2_eKLF14)=c("rsid","pval")
colnames(loc2_aKLF14)=c("rsid","pval")
colnames(loc2_eAC)=c("rsid","pval")
colnames(loc2_eTBX4)=c("rsid","pval")
colnames(loc2_aTBX4)=c("rsid","pval")
colnames(loc2_eGNB1)=c("rsid","pval")
colnames(loc2_aGNB1)=c("rsid","pval")
colnames(loc2_eESR2)=c("rsid","pval")
colnames(loc2_aESR2)=c("rsid","pval")
colnames(loc2_eNR2F1)=c("rsid","pval")
colnames(loc2_aNR2F1)=c("rsid","pval")
colnames(loc2_eAGT)=c("rsid","pval")
colnames(loc2_aAGT)=c("rsid","pval")
colnames(loc2_eRABIF)=c("rsid","pval")
colnames(loc2_aRABIF)=c("rsid","pval")
rownames(bmi2)=bmi2$rsid
rownames(t2d2)=t2d2$rsid
rownames(hdl2)=hdl2$rsid
rownames(triG2)=triG2$rsid
rownames(loc2_eLINC)=loc2_eLINC$rsid
rownames(loc2_aLINC)=loc2_aLINC$rsid
rownames(loc2_eKLF14)=loc2_eKLF14$rsid
rownames(loc2_aKLF14)=loc2_aKLF14$rsid
rownames(loc2_eAC)=loc2_eAC$rsid
rownames(loc2_eTBX4)=loc2_eTBX4$rsid
rownames(loc2_aTBX4)=loc2_aTBX4$rsid
rownames(loc2_eGNB1)=loc2_eGNB1$rsid
rownames(loc2_aGNB1)=loc2_aGNB1$rsid
rownames(loc2_eESR2)=loc2_eESR2$rsid
rownames(loc2_aESR2)=loc2_aESR2$rsid
rownames(loc2_eNR2F1)=loc2_eNR2F1$rsid
rownames(loc2_aNR2F1)=loc2_aNR2F1$rsid
rownames(loc2_eAGT)=loc2_eAGT$rsid
rownames(loc2_aAGT)=loc2_aAGT$rsid
rownames(loc2_eRABIF)=loc2_eRABIF$rsid
rownames(loc2_aRABIF)=loc2_aRABIF$rsid
# Check out some relevant LocusCompare plots before picking the which to write to file
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=triG2,title1 = "BMI GWAS", title2 = "Triglycerides GWAS",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=triG2,title1 = "BMI GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_eLINC,title1 = "BMI GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=loc2_eLINC,in_fn2=loc2_aLINC,title1 = "LINC-PINT cis-eQTL", title2 = "LINC-PINT cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eLINC,title1 = "T2D GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eLINC,title1 = "HDL GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eLINC,title1 = "Triglycerides GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_eKLF14,title1 = "BMI GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eKLF14,title1 = "HDL GWAS", title2 = "KLF14 cis-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_aKLF14,title1 = "BMI GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_aKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-aQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_aKLF14,title1 = "HDL GWAS", title2 = "KLF14 cis-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_aKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-aQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=loc2_eKLF14,in_fn2=loc2_aKLF14,title1 = "KLF14 cis-eQTL", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=loc2_eKLF14,in_fn2=loc2_aKLF14,title1 = "KLF14 cis-eQTL", title2 = "KLF14 cis-aQTL",snp = "rs738134") # Top BMI GWAS SNP
locuscompare(in_fn1=loc2_eKLF14,in_fn2=loc2_aKLF14,title1 = "KLF14 cis-eQTL", title2 = "KLF14 cis-aQTL",snp = "rs287621") # Top BMI GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_eAC,title1 = "BMI GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eAC,title1 = "T2D GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eAC,title1 = "HDL GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eAC,title1 = "Triglycerides GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_eTBX4,title1 = "BMI GWAS", title2 = "TBX4 trans-eQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eTBX4,title1 = "T2D GWAS", title2 = "TBX4 trans-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eTBX4,title1 = "HDL GWAS", title2 = "TBX4 trans-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eTBX4,title1 = "Triglycerides GWAS", title2 = "TBX4 trans-eQTL",snp = "rs287621") # Top HDL GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_aTBX4,title1 = "BMI GWAS", title2 = "TBX4 trans-aQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_aTBX4,title1 = "T2D GWAS", title2 = "TBX4 trans-aQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_aTBX4,title1 = "HDL GWAS", title2 = "TBX4 trans-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_aTBX4,title1 = "Triglycerides GWAS", title2 = "TBX4 trans-aQTL",snp = "rs287621") # Top HDL GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eGNB1,title1 = "T2D GWAS", title2 = "GNB1 trans-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=t2d2,in_fn2=loc2_aGNB1,title1 = "T2D GWAS", title2 = "GNB1 trans-aQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eESR2,title1 = "HDL GWAS", title2 = "ESR2 trans-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=loc2_aESR2,title1 = "HDL GWAS", title2 = "ESR2 trans-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=loc2_eNR2F1,title1 = "HDL GWAS", title2 = "NR2F1 trans-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=loc2_aNR2F1,title1 = "HDL GWAS", title2 = "NR2F1 trans-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=loc2_eNR2F1,in_fn2=loc2_aNR2F1,title1 = "NR2F1 trans-eQTL", title2 = "NR2F1 trans-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eRABIF,title1 = "Triglycerides GWAS", title2 = "RABIF trans-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_aRABIF,title1 = "Triglycerides GWAS", title2 = "RABIF trans-aQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eAGT,title1 = "Triglycerides GWAS", title2 = "AGT trans-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_aAGT,title1 = "Triglycerides GWAS", title2 = "AGT trans-aQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=loc2_eAGT,in_fn2=loc2_aAGT,title1 = "AGT trans-eQTL", title2 = "AGT trans-aQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=loc2_eAGT,in_fn2=loc2_aAGT,title1 = "AGT trans-eQTL", title2 = "AGT trans-aQTL",snp = "rs972283") # Top TriG GWAS SNP
locuscompare(in_fn1=loc2_eAGT,in_fn2=loc2_aAGT,title1 = "AGT trans-eQTL", title2 = "AGT trans-aQTL",snp = "rs738134") # Top TriG GWAS SNP
# I never ran HyPrColoc on just cis-e_KLF14 and cis-a_KLF14 alone. First I need to calculate the SEs and format the data.
filt_cis_eqtl$SE=filt_cis_eqtl$beta/filt_cis_eqtl$statistic
filt_cis_aqtl$SE=filt_cis_aqtl$beta/filt_cis_aqtl$statistic
all(filt_cis_eqtl$snps[filt_cis_eqtl$gene=="KLF14"]==filt_cis_aqtl$snps[filt_cis_aqtl$gene=="KLF14"]) # TRUE
betas2=cbind("cis-e_KLF14"=filt_cis_eqtl[filt_cis_eqtl$gene=="KLF14","beta"],"cis-a_KLF14"=filt_cis_aqtl[filt_cis_aqtl$gene=="KLF14","beta"])
ses2=cbind("cis-e_KLF14"=filt_cis_eqtl[filt_cis_eqtl$gene=="KLF14","SE"],"cis-a_KLF14"=filt_cis_aqtl[filt_cis_aqtl$gene=="KLF14","SE"])
rownames(betas2)=filt_cis_aqtl$snps[filt_cis_aqtl$gene=="KLF14"]
rownames(ses2)=filt_cis_aqtl$snps[filt_cis_aqtl$gene=="KLF14"]
all(rownames(betas2)==rownames(filt_ld[[2]])) # TRUE
all(rownames(ses2)==rownames(filt_ld[[2]])) # TRUE
eKLF14_aKLF14=hyprcoloc(as.matrix(betas2),as.matrix(ses2),
trait.names=colnames(betas2),snp.id=rownames(betas2),ld.matrix = filt_ld[[2]],
trait.subset = c("cis-e_KLF14","cis-a_KLF14"),snpscores = T)
# KLF14 eQTL and aQTL colocalize with a PP=0.9094 that is best explained by rs4731702.
# Now let's do the same sort of analysis for NR2F1 and AGT.
filt_trans_hdl_eqtl$SE=filt_trans_hdl_eqtl$beta/filt_trans_hdl_eqtl$statistic
filt_trans_hdl_aqtl$SE=filt_trans_hdl_aqtl$beta/filt_trans_hdl_aqtl$statistic
temp_e=filt_trans_hdl_eqtl[filt_trans_hdl_eqtl$chr==7 & filt_trans_hdl_eqtl$gene=="NR2F1",]
temp_a=filt_trans_hdl_aqtl[filt_trans_hdl_aqtl$chr==7 & filt_trans_hdl_aqtl$gene=="NR2F1",]
all(rownames(betas2)==temp_e$snps) # TRUE
all(rownames(betas2)==temp_a$snps) # TRUE
betas2=cbind(betas2,"trans-e_NR2F1"=temp_e$beta,"trans-a_NR2F1"=temp_a$beta)
ses2=cbind(ses2,"trans-e_NR2F1"=temp_e$SE,"trans-a_NR2F1"=temp_a$SE)
filt_trans_triG_eqtl$SE=filt_trans_triG_eqtl$beta/filt_trans_triG_eqtl$statistic
filt_trans_triG_aqtl$SE=filt_trans_triG_aqtl$beta/filt_trans_triG_aqtl$statistic
temp_e=filt_trans_triG_eqtl[filt_trans_triG_eqtl$chr==7 & filt_trans_triG_eqtl$gene=="AGT",]
temp_a=filt_trans_triG_aqtl[filt_trans_triG_aqtl$chr==7 & filt_trans_triG_aqtl$gene=="AGT",]
all(rownames(betas2)==temp_e$snps) # TRUE
all(rownames(betas2)==temp_a$snps) # FALSE
temp_a=temp_a[match(rownames(betas2),temp_a$snps),]
all(rownames(betas2)==temp_a$snps) # TRUE
betas2=cbind(betas2,"trans-e_AGT"=temp_e$beta,"trans-a_AGT"=temp_a$beta)
ses2=cbind(ses2,"trans-e_AGT"=temp_e$SE,"trans-a_AGT"=temp_a$SE)
eNR2F1_aNR2F1=hyprcoloc(as.matrix(betas2),as.matrix(ses2),
trait.names=colnames(betas2),snp.id=rownames(betas2),ld.matrix = filt_ld[[2]],
trait.subset = c("trans-e_NR2F1","trans-a_NR2F1"),snpscores = T)
# NR2F1 eQTL and aQTL colocalize with a PP=0.8700 that is best explained by rs738134.
eAGT_aAGT=hyprcoloc(as.matrix(betas2),as.matrix(ses2),
trait.names=colnames(betas2),snp.id=rownames(betas2),ld.matrix = filt_ld[[2]],
trait.subset = c("trans-e_AGT","trans-a_AGT"),snpscores = T)
# AGT eQTL and aQTL colocalize with a PP=0.6451 that is best explained by rs11765979.
# Let's write some to PDFs
pdf("BMI_T2D_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("BMI_T2D_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("BMI_T2D_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
dev.off()
pdf("BMI_HDL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("BMI_HDL_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("BMI_HDL_7q32_LocusCompare_rs11765979.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs11765979") # Top HDL GWAS SNP
dev.off()
pdf("BMI_TriG_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=triG2,title1 = "BMI GWAS", title2 = "Triglycerides GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("BMI_TriG_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=triG2,title1 = "BMI GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("T2D_HDL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("T2D_HDL_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
dev.off()
pdf("T2D_HDL_7q32_LocusCompare_rs11765979.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs11765979") # Top HDL GWAS SNP
dev.off()
pdf("T2D_TriG_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("T2D_TriG_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
dev.off()
pdf("T2D_TriG_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("HDL_TriG_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("HDL_TriG_7q32_LocusCompare_rs11765979.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs11765979") # Top HDL GWAS SNP
dev.off()
pdf("HDL_TriG_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./BMI/BMI_e_LINC-PINT_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_eLINC,title1 = "BMI GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_LINC-PINT_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eLINC,title1 = "T2D GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_e_LINC-PINT_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eLINC,title1 = "HDL GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_LINC-PINT_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eLINC,title1 = "Triglycerides GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./LINC-PINT_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eLINC,in_fn2=loc2_aLINC,title1 = "LINC-PINT cis-eQTL", title2 = "LINC-PINT cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./BMI/BMI_e_AC016831.7_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_eAC,title1 = "BMI GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_AC016831.7_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eAC,title1 = "T2D GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_e_AC016831.7_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eAC,title1 = "HDL GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_AC016831.7_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eAC,title1 = "Triglycerides GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./BMI/BMI_e_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_eKLF14,title1 = "BMI GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_KLF14_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_e_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eKLF14,title1 = "HDL GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_KLF14_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./BMI/BMI_a_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_aKLF14,title1 = "BMI GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_a_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_aKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_a_KLF14_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_aKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-aQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_a_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_aKLF14,title1 = "HDL GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_a_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_aKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_a_KLF14_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_aKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-aQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./KLF14_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eKLF14,in_fn2=loc2_aKLF14,title1 = "KLF14 cis-eQTL", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./BMI/BMI_e_TBX4_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_eTBX4,title1 = "BMI GWAS", title2 = "TBX4 trans-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_TBX4_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eTBX4,title1 = "T2D GWAS", title2 = "TBX4 trans-eQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_e_TBX4_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eTBX4,title1 = "HDL GWAS", title2 = "TBX4 trans-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_TBX4_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eTBX4,title1 = "Triglycerides GWAS", title2 = "TBX4 trans-eQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./TBX4_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eTBX4,in_fn2=loc2_aTBX4,title1 = "TBX4 cis-eQTL", title2 = "TBX4 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./HDL/HDL_e_NR2F1_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eNR2F1,title1 = "HDL GWAS", title2 = "NR2F1 trans-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./HDL/HDL_a_NR2F1_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_aNR2F1,title1 = "HDL GWAS", title2 = "NR2F1 trans-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./NR2F1_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eNR2F1,in_fn2=loc2_aNR2F1,title1 = "NR2F1 trans-eQTL", title2 = "NR2F1 trans-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_AGT_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eAGT,title1 = "Triglycerides GWAS", title2 = "AGT trans-eQTL",snp = "rs287621") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_a_AGT_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_aAGT,title1 = "Triglycerides GWAS", title2 = "AGT trans-aQTL",snp = "rs287621") # Top BMI GWAS SNP
dev.off()
pdf("./AGT_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eAGT,in_fn2=loc2_aAGT,title1 = "AGT trans-eQTL", title2 = "AGT trans-aQTL",snp = "rs287621") # Top BMI GWAS SNP
dev.off()
## 12p13.1
# First, grab the necessary P-values for the SNPs used in the HyPrColoc analyses for the traits of interest
bmi4=filt_bmi[match(rownames(filt_ld[[4]]),filt_bmi$SNP),c(3,9)]
loc4_eANG=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="ANG",c(1,4)]
loc4_aANG=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="ANG",c(1,4)]
loc4_eID2=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="ID2",c(1,4)]
loc4_aID2=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="ID2",c(1,4)]
loc4_ePTPRJ=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="PTPRJ",c(1,4)]
loc4_aPTPRJ=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="PTPRJ",c(1,4)]
loc4_eTENM4=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="TENM4",c(1,4)]
loc4_aTENM4=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="TENM4",c(1,4)]
loc4_eEPHB2=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="EPHB2",c(1,4)]
loc4_aEPHB2=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="EPHB2",c(1,4)]
colnames(bmi4)=c("rsid","pval")
colnames(loc4_eANG)=c("rsid","pval")
colnames(loc4_aANG)=c("rsid","pval")
colnames(loc4_eID2)=c("rsid","pval")
colnames(loc4_aID2)=c("rsid","pval")
colnames(loc4_ePTPRJ)=c("rsid","pval")
colnames(loc4_aPTPRJ)=c("rsid","pval")
colnames(loc4_eTENM4)=c("rsid","pval")
colnames(loc4_aTENM4)=c("rsid","pval")
colnames(loc4_eEPHB2)=c("rsid","pval")
colnames(loc4_aEPHB2)=c("rsid","pval")
rownames(bmi4)=bmi4$rsid
rownames(loc4_eANG)=loc4_eANG$rsid
rownames(loc4_aANG)=loc4_aANG$rsid
rownames(loc4_eID2)=loc4_eID2$rsid
rownames(loc4_aID2)=loc4_aID2$rsid
rownames(loc4_ePTPRJ)=loc4_ePTPRJ$rsid
rownames(loc4_aPTPRJ)=loc4_aPTPRJ$rsid
rownames(loc4_eTENM4)=loc4_eTENM4$rsid
rownames(loc4_aTENM4)=loc4_aTENM4$rsid
rownames(loc4_eEPHB2)=loc4_eEPHB2$rsid
rownames(loc4_aEPHB2)=loc4_aEPHB2$rsid
# Check out some relevant LocusCompare plots before picking the which to write to file
locuscompare(in_fn1=bmi4,in_fn2=loc4_eEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 trans-aQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_eANG,title1 = "BMI GWAS", title2 = "ANG trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aANG,title1 = "BMI GWAS", title2 = "ANG trans-aQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_eID2,title1 = "BMI GWAS", title2 = "ID2 trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aID2,title1 = "BMI GWAS", title2 = "ID2 trans-aQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_ePTPRJ,title1 = "BMI GWAS", title2 = "PTPRJ trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aPTPRJ,title1 = "BMI GWAS", title2 = "PTPRJ trans-aQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_eTENM4,title1 = "BMI GWAS", title2 = "TENM4 trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aTENM4,title1 = "BMI GWAS", title2 = "TENM4 trans-aQTL",snp = "rs12422552") # Top GWAS SNP
# Let's write some to PDFs
pdf("./BMI/rs12422552-ANG_eQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_eANG,title1 = "BMI GWAS", title2 = "ANG cis-eQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-ANG_aQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_aANG,title1 = "BMI GWAS", title2 = "ANG cis-aQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-ID2_eQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_eID2,title1 = "BMI GWAS", title2 = "ID2 cis-eQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-ID2_aQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_aID2,title1 = "BMI GWAS", title2 = "ID2 cis-aQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-PTPRJ_eQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_ePTPRJ,title1 = "BMI GWAS", title2 = "PTPRJ cis-eQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-PTPRJ_aQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_aPTPRJ,title1 = "BMI GWAS", title2 = "PTPRJ cis-aQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-TENM4_eQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_eTENM4,title1 = "BMI GWAS", title2 = "TENM4 cis-eQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-TENM4_aQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_aTENM4,title1 = "BMI GWAS", title2 = "TENM4 cis-aQTL",snp = "rs12422552")
dev.off()
### Let's switch to pulling out data for Cytoscape network visualizations
# Read in data
bmi_pairColoc=read.table("./BMI/Pairwise_HyPrColoc_between_BMI_and_each_QTL_for_select_loci.txt",sep = "\t",header = T)
t2d_pairColoc=read.table("./T2D/Pairwise_HyPrColoc_between_BMIadjT2D_and_each_QTL_for_select_loci.txt",sep = "\t",header = T)
hdl_pairColoc=read.table("./HDL/Pairwise_HyPrColoc_between_HDL_and_each_QTL_for_select_loci.txt",sep = "\t",header = T)
triG_pairColoc=read.table("./Triglycerides/Pairwise_HyPrColoc_between_TriG_and_each_QTL_for_select_loci.txt",sep = "\t",header = T)
ephb2_pairColoc=read.table("./BMI/Pairwise_HyPrColoc_between_EPHB2_aQTL_and_each_other_QTL_for_1p36.txt",sep = "\t",header = T)
bmi_mrs=read.table("./BMI/Eurobats_adipose_time-matched_BMI_MRs_from_RF_modeling.txt",header = F)
homair_mrs=read.table("./HOMA-IR/Eurobats_adipose_time-matched_HOMA-IR_MRs_from_RF_modeling.txt",header = F)
hdl_mrs=read.table("./HDL/Eurobats_adipose_time-matched_HDL_MRs_from_RF_modeling.txt",header = F)
triG_mrs=read.table("./Triglycerides/Eurobats_adipose_time-matched_Triglycerides_MRs_from_RF_modeling.txt",header = F)
interactome=read.table("../Adipose expression data/FINAL_logTPMs_and_activities/Eurobats_adipose_900boots_regulon_with_LINC-PINT.txt",sep = "\t",header = T)
tpm=read.table("../Adipose expression data/FINAL_logTPMs_and_activities/Filtered_Eurobats_adipose_qnorm_INT_logTPMs_for_all_expressed_genes.txt",
sep = "\t",header = T,row.names = 1)
vip=read.table("../Adipose expression data/FINAL_logTPMs_and_activities/Filtered_Eurobats_adipose_unnormalized_activities_from_logTPM_for_4213_regulators.txt",
sep = "\t",header = T,row.names = 1)
phenos=read.table("../Eurobats phenotypes/Amendment_time-matched_phenotypes_E886_02082019_with_HOMA.txt",sep="\t",header = T,row.names = 1)
filt_pheno=phenos[na.omit(match(colnames(vip),rownames(phenos))),]
all(colnames(vip)==colnames(tpm)) # TRUE
all(colnames(vip)==rownames(filt_pheno)) # TRUE
sig_bmi=filt_bmi[filt_bmi$P<=5E-8,]
sig_t2d=filt_t2d[filt_t2d$Pvalue<=5E-8,]
sig_hdl=filt_hdl[filt_hdl$P<=5E-8,]
sig_triG=filt_triG[filt_triG$P<=5E-8,]
# Grab relevant sub-interactomes
bmi_MRregs=interactome[interactome$Target %in% bmi_mrs[,1],]
bmi_MRMR=bmi_MRregs[bmi_MRregs$Regulator %in% bmi_mrs[,1],]
homair_MRregs=interactome[interactome$Target %in% homair_mrs[,1],]
homair_MRMR=homair_MRregs[homair_MRregs$Regulator %in% homair_mrs[,1],]
hdl_MRregs=interactome[interactome$Target %in% hdl_mrs[,1],]
hdl_MRMR=hdl_MRregs[hdl_MRregs$Regulator %in% hdl_mrs[,1],]
triG_MRregs=interactome[interactome$Target %in% triG_mrs[,1],]
triG_MRMR=triG_MRregs[triG_MRregs$Regulator %in% triG_mrs[,1],]
# 1p36
# Grab interactions between EPHB2 and MRs in adipose interactome
EPHB2mrs=bmi_MRregs[bmi_MRregs$Regulator=="EPHB2",]
mrsEPHB2=interactome[(interactome$Regulator %in% bmi_mrs[,1]) & (interactome$Target=="EPHB2"),]
interactome1p36=rbind(bmi_MRMR,EPHB2mrs,mrsEPHB2)
# Grab pairwise colocalizations with PP>0.5 between BMI and QTLs and EPHB2 aQTL and trans-QTLs
bmi_pairColoc1p36=bmi_pairColoc[bmi_pairColoc$locus=="1p36.1" & bmi_pairColoc$posterior_prob>0.5,c(2,3,5)]
bmi_pairColoc1p36$traits=gsub("BMI, ","",bmi_pairColoc1p36$traits)
bmi_pairColoc1p36=cbind("trait1"=rep("BMI",dim(bmi_pairColoc1p36)[1]),bmi_pairColoc1p36)
bmi_e_pairColoc1=bmi_pairColoc1p36[grepl("-e_",bmi_pairColoc1p36$traits),]
bmi_a_pairColoc1=bmi_pairColoc1p36[grepl("-a_",bmi_pairColoc1p36$traits),]
bmi_e_pairColoc1$traits=gsub(".*_","",bmi_e_pairColoc1$traits)
bmi_a_pairColoc1$traits=gsub(".*_","",bmi_a_pairColoc1$traits)
bmi_netPair1p36=rbind(bmi_e_pairColoc1,bmi_a_pairColoc1)
bmi_netPair1p36=bmi_netPair1p36[!duplicated(bmi_netPair1p36$traits),-c(3,4)]
bmi_netPair1p36$eQTL_PP=bmi_e_pairColoc1[match(bmi_netPair1p36$traits,bmi_e_pairColoc1$traits),3]
bmi_netPair1p36$eQTL_SNP=bmi_e_pairColoc1[match(bmi_netPair1p36$traits,bmi_e_pairColoc1$traits),4]
bmi_netPair1p36$aQTL_PP=bmi_a_pairColoc1[match(bmi_netPair1p36$traits,bmi_a_pairColoc1$traits),3]
bmi_netPair1p36$aQTL_SNP=bmi_a_pairColoc1[match(bmi_netPair1p36$traits,bmi_a_pairColoc1$traits),4]
ephb2Coloc1=ephb2_pairColoc[!is.na(ephb2_pairColoc$candidate_snp),c(2,3,5)]
ephb2Coloc1=ephb2Coloc1[ephb2Coloc1$posterior_prob>0.5,]
ephb2Coloc1$traits=gsub("cis-a_EPHB2, ","",ephb2Coloc1$traits)
ephb2Coloc1=cbind("trait1"=rep("EPHB2",dim(ephb2Coloc1)[1]),ephb2Coloc1)
e_ephb2Coloc1=ephb2Coloc1[grepl("-e_",ephb2Coloc1$traits),]
a_ephb2Coloc1=ephb2Coloc1[grepl("-a_",ephb2Coloc1$traits),]
e_ephb2Coloc1$traits=gsub(".*_","",e_ephb2Coloc1$traits)
a_ephb2Coloc1$traits=gsub(".*_","",a_ephb2Coloc1$traits)
netEPHB2=rbind(e_ephb2Coloc1,a_ephb2Coloc1)
netEPHB2=netEPHB2[!duplicated(netEPHB2$traits),-c(3,4)]
netEPHB2$eQTL_PP=e_ephb2Coloc1[match(netEPHB2$traits,e_ephb2Coloc1$traits),3]
netEPHB2$eQTL_SNP=e_ephb2Coloc1[match(netEPHB2$traits,e_ephb2Coloc1$traits),4]
netEPHB2$aQTL_PP=a_ephb2Coloc1[match(netEPHB2$traits,a_ephb2Coloc1$traits),3]
netEPHB2$aQTL_SNP=a_ephb2Coloc1[match(netEPHB2$traits,a_ephb2Coloc1$traits),4]
colocNet1p36=rbind(bmi_netPair1p36,netEPHB2)
colocNet1p36[is.na(colocNet1p36)]=0
# Grab the -log10(Pmin) and betas for the eQTLs and aQTLs among BMI GWAS significant SNPs at the 1p36.1 locus
# Actually, though I originally made networks with nodes shaded according to their minP QTLs, I've since decided
# that for the manuscript I need to stick to a single SNP for all QTLs to avoid allele switching issues and to
# facilitate discussion in the manuscript. Therefore, for this locus I will focus on rs4654828 since it tends to
# be among the top SNPs for EPHB2 cis-aQTL and all BMI MR trans-aQTLs. However, since subsequent lines of code
# refer to the variables as min_cisE1, etc. I will keep that naming even though it's not an adequate description.
cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==1,]
cisE1=cisE1[cisE1$snps %in% sig_bmi$SNP,]
cisE1=cisE1[order(cisE1$pvalue),]
#min_cisE1=cisE1[!duplicated(cisE1$gene),]
min_cisE1=cisE1[cisE1$snps=="rs4654828",]
cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==1,]
cisA1=cisA1[cisA1$snps %in% sig_bmi$SNP,]
cisA1=cisA1[order(cisA1$pvalue),]
#min_cisA1=cisA1[!duplicated(cisA1$gene),]
min_cisA1=cisA1[cisA1$snps=="rs4654828",]
transE1=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$chr==1,]
transE1=transE1[transE1$snps %in% sig_bmi$SNP,]
transE1=transE1[order(transE1$pvalue),]
#min_transE1=transE1[!duplicated(transE1$gene),]
min_transE1=transE1[transE1$snps=="rs4654828",]
transA1=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$chr==1,]
transA1=transA1[transA1$snps %in% sig_bmi$SNP,]
transA1=transA1[order(transA1$pvalue),]
#min_transA1=transA1[!duplicated(transA1$gene),]
min_transA1=transA1[transA1$snps=="rs4654828",]
# Make node tables for 1p36 networks
inter_nodes1p36=data.frame("Node"=as.character(unique(interactome1p36$Regulator)),"BMI_exp_cor"=rep(0,length(unique(interactome1p36$Regulator))),
"BMI_act_cor"=rep(0,length(unique(interactome1p36$Regulator))),"rs4654828_eQTL_Beta"=rep(0,length(unique(interactome1p36$Regulator))),
"rs4654828_eQTL_logP"=rep(0,length(unique(interactome1p36$Regulator))),"rs4654828_aQTL_Beta"=rep(0,length(unique(interactome1p36$Regulator))),
"rs4654828_aQTL_logP"=rep(0,length(unique(interactome1p36$Regulator))))
for(i in 1:dim(inter_nodes1p36)[1]){
inter_nodes1p36$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(inter_nodes1p36$Node[i]),]),filt_pheno$BMI)
inter_nodes1p36$BMI_act_cor[i]=cor(as.numeric(vip[as.character(inter_nodes1p36$Node[i]),]),filt_pheno$BMI)
inter_nodes1p36$rs4654828_eQTL_Beta[i]=ifelse(inter_nodes1p36$Node[i] %in% min_transE1$gene,
min_transE1[min_transE1$gene==as.character(inter_nodes1p36$Node[i]),"beta"],
0)
inter_nodes1p36$rs4654828_eQTL_logP[i]=ifelse(inter_nodes1p36$Node[i] %in% min_transE1$gene,
-log10(min_transE1[min_transE1$gene==as.character(inter_nodes1p36$Node[i]),"pvalue"]),
0)
inter_nodes1p36$rs4654828_aQTL_Beta[i]=ifelse(inter_nodes1p36$Node[i] %in% min_transA1$gene,
min_transA1[min_transA1$gene==as.character(inter_nodes1p36$Node[i]),"beta"],
0)
inter_nodes1p36$rs4654828_aQTL_logP[i]=ifelse(inter_nodes1p36$Node[i] %in% min_transA1$gene,
-log10(min_transA1[min_transA1$gene==as.character(inter_nodes1p36$Node[i]),"pvalue"]),
0)
}
coloc_nodes1p36=data.frame("Node"=as.character(unique(colocNet1p36$traits)),"BMI_exp_cor"=rep(0,length(unique(colocNet1p36$traits))),
"BMI_act_cor"=rep(0,length(unique(colocNet1p36$traits))),"rs4654828_eQTL_Beta"=rep(0,length(unique(colocNet1p36$traits))),
"rs4654828_eQTL_logP"=rep(0,length(unique(colocNet1p36$traits))),"rs4654828_aQTL_Beta"=rep(0,length(unique(colocNet1p36$traits))),
"rs4654828_aQTL_logP"=rep(0,length(unique(colocNet1p36$traits))))
for(i in 1:dim(coloc_nodes1p36)[1]){
coloc_nodes1p36$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(coloc_nodes1p36$Node[i]),]),filt_pheno$BMI)
coloc_nodes1p36$BMI_act_cor[i]=cor(as.numeric(vip[as.character(coloc_nodes1p36$Node[i]),]),filt_pheno$BMI)
coloc_nodes1p36$rs4654828_eQTL_Beta[i]=ifelse(coloc_nodes1p36$Node[i] %in% min_transE1$gene,
min_transE1[min_transE1$gene==as.character(coloc_nodes1p36$Node[i]),"beta"],
0)
coloc_nodes1p36$rs4654828_eQTL_logP[i]=ifelse(coloc_nodes1p36$Node[i] %in% min_transE1$gene,
-log10(min_transE1[min_transE1$gene==as.character(coloc_nodes1p36$Node[i]),"pvalue"]),
0)
coloc_nodes1p36$rs4654828_aQTL_Beta[i]=ifelse(coloc_nodes1p36$Node[i] %in% min_transA1$gene,
min_transA1[min_transA1$gene==as.character(coloc_nodes1p36$Node[i]),"beta"],
0)
coloc_nodes1p36$rs4654828_aQTL_logP[i]=ifelse(coloc_nodes1p36$Node[i] %in% min_transA1$gene,
-log10(min_transA1[min_transA1$gene==as.character(coloc_nodes1p36$Node[i]),"pvalue"]),
0)
}
# Since EPHB2 is the only cis gene here, I'll just deal with it manually
inter_nodes1p36[inter_nodes1p36$Node=="EPHB2","rs4654828_eQTL_Beta"]=min_cisE1[min_cisE1$gene=="EPHB2","beta"]
inter_nodes1p36[inter_nodes1p36$Node=="EPHB2","rs4654828_eQTL_logP"]=-log10(min_cisE1[min_cisE1$gene=="EPHB2","pvalue"])
inter_nodes1p36[inter_nodes1p36$Node=="EPHB2","rs4654828_aQTL_Beta"]=min_cisA1[min_cisA1$gene=="EPHB2","beta"]
inter_nodes1p36[inter_nodes1p36$Node=="EPHB2","rs4654828_aQTL_logP"]=-log10(min_cisA1[min_cisA1$gene=="EPHB2","pvalue"])
coloc_nodes1p36[coloc_nodes1p36$Node=="EPHB2","rs4654828_eQTL_Beta"]=min_cisE1[min_cisE1$gene=="EPHB2","beta"]
coloc_nodes1p36[coloc_nodes1p36$Node=="EPHB2","rs4654828_eQTL_logP"]=-log10(min_cisE1[min_cisE1$gene=="EPHB2","pvalue"])
coloc_nodes1p36[coloc_nodes1p36$Node=="EPHB2","rs4654828_aQTL_Beta"]=min_cisA1[min_cisA1$gene=="EPHB2","beta"]
coloc_nodes1p36[coloc_nodes1p36$Node=="EPHB2","rs4654828_aQTL_logP"]=-log10(min_cisA1[min_cisA1$gene=="EPHB2","pvalue"])
# I think it may be more convenient to merge the networks into one and then just change which attributes I visualize in Cytoscape
# Start with 2 temporary columns concatinating the regulator-target and target-regulator for easier matching.
interactome1p36$temp1=paste(interactome1p36$Regulator,interactome1p36$Target)
interactome1p36$temp2=paste(interactome1p36$Target,interactome1p36$Regulator)
colocNet1p36$temp1=paste(colocNet1p36$trait1,colocNet1p36$traits)
colocNet1p36$temp2=paste(colocNet1p36$traits,colocNet1p36$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(interactome1p36)[1],ncol = 4))
for(i in 1:dim(interactome1p36)[1]){
temp[i,1:4]=colocNet1p36[ifelse(is.na(match(interactome1p36$temp1[i],colocNet1p36$temp1)),
match(interactome1p36$temp1[i],colocNet1p36$temp2),
match(interactome1p36$temp1[i],colocNet1p36$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the BMI colocalizations
colnames(temp)=colnames(colocNet1p36)[3:6]
temp=rbind(temp,colocNet1p36[colocNet1p36$trait1=="BMI",3:6])
# Then add rows for BMI-Gene connections with 0 for MoA and likelihood
full1p36=interactome1p36[,1:4]
temp2=colocNet1p36[colocNet1p36$trait1=="BMI",1:4]
colnames(temp2)=colnames(interactome1p36)[1:4]
temp2[,3:4]=0
full1p36=rbind(full1p36,temp2)
# Finally, combine the colocalization columns with the interactome columns
full1p36=cbind(full1p36,temp)
# The nodes data also needs to be combined and duplicate rows removed
full1p36_nodes=rbind(inter_nodes1p36,coloc_nodes1p36)
full1p36_nodes=full1p36_nodes[!duplicated(full1p36_nodes$Node),]
# Write networks and node data to file for Cytoscape visualizations
write.table(interactome1p36,"Chr1p36_EPHB2_and_BMI_MRs_interactome.txt",sep = "\t",quote = F,row.names = F)
write.table(inter_nodes1p36,"Chr1p36_EPHB2_and_BMI_MRs_interactome_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(colocNet1p36,"Chr1p36_BMI_EPHB2_and_BMI_MRs_pairwise_colocalization_network.txt",sep = "\t",quote = F,row.names = F)
write.table(coloc_nodes1p36,"Chr1p36_BMI_EPHB2_and_BMI_MRs_pairwise_colocalization_network_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(full1p36,"Chr1p36_EPHB2_and_BMI_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(full1p36_nodes,"Chr1p36_EPHB2_and_BMI_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
# 7q32
# Grab interactions between LINC-PINT, KLF14 and MRs in adipose interactome
linc_bmi_mrs=bmi_MRregs[bmi_MRregs$Regulator=="LINC-PINT",]
linc_homair_mrs=homair_MRregs[homair_MRregs$Regulator=="LINC-PINT",]
linc_hdl_mrs=hdl_MRregs[hdl_MRregs$Regulator=="LINC-PINT",]
linc_triG_mrs=triG_MRregs[triG_MRregs$Regulator=="LINC-PINT",]
bmi_mrsLINC=interactome[(interactome$Regulator %in% bmi_mrs[,1]) & (interactome$Target=="LINC-PINT"),]
homair_mrsLINC=interactome[(interactome$Regulator %in% homair_mrs[,1]) & (interactome$Target=="LINC-PINT"),]
hdl_mrsLINC=interactome[(interactome$Regulator %in% hdl_mrs[,1]) & (interactome$Target=="LINC-PINT"),]
triG_mrsLINC=interactome[(interactome$Regulator %in% triG_mrs[,1]) & (interactome$Target=="LINC-PINT"),]
klf14_bmi_mrs=bmi_MRregs[bmi_MRregs$Regulator=="KLF14",]
klf14_homair_mrs=homair_MRregs[homair_MRregs$Regulator=="KLF14",]
klf14_hdl_mrs=hdl_MRregs[hdl_MRregs$Regulator=="KLF14",]
klf14_triG_mrs=triG_MRregs[triG_MRregs$Regulator=="KLF14",]
bmi_mrsKLF14=interactome[(interactome$Regulator %in% bmi_mrs[,1]) & (interactome$Target=="KLF14"),]
homair_mrsKLF14=interactome[(interactome$Regulator %in% homair_mrs[,1]) & (interactome$Target=="KLF14"),]
hdl_mrsKLF14=interactome[(interactome$Regulator %in% hdl_mrs[,1]) & (interactome$Target=="KLF14"),]
triG_mrsKLF14=interactome[(interactome$Regulator %in% triG_mrs[,1]) & (interactome$Target=="KLF14"),]
bmi_interactome7q32=rbind(bmi_MRMR,linc_bmi_mrs,bmi_mrsLINC,klf14_bmi_mrs,bmi_mrsKLF14)
homair_interactome7q32=rbind(homair_MRMR,linc_homair_mrs,homair_mrsLINC,klf14_homair_mrs,homair_mrsKLF14)
hdl_interactome7q32=rbind(hdl_MRMR,linc_hdl_mrs,hdl_mrsLINC,klf14_hdl_mrs,hdl_mrsKLF14)
triG_interactome7q32=rbind(triG_MRMR,linc_triG_mrs,triG_mrsLINC,klf14_triG_mrs,triG_mrsKLF14)
# Grab pairwise colocalizations with PP>0.5 between each GWAS and QTLs. I did not run pairwise colocalization analyses for LINC-PINT or KLF14 yet.
bmi_pairColoc7q32=bmi_pairColoc[bmi_pairColoc$locus=="7q32" & bmi_pairColoc$posterior_prob>0.5,c(2,3,5)]
bmi_pairColoc7q32$traits=gsub("BMI, ","",bmi_pairColoc7q32$traits)
bmi_pairColoc7q32=cbind("trait1"=rep("BMI",dim(bmi_pairColoc7q32)[1]),bmi_pairColoc7q32)
bmi_e_pairColoc1=bmi_pairColoc7q32[grepl("-e_",bmi_pairColoc7q32$traits),]
bmi_a_pairColoc1=bmi_pairColoc7q32[grepl("-a_",bmi_pairColoc7q32$traits),]
bmi_e_pairColoc1$traits=gsub(".*_","",bmi_e_pairColoc1$traits)
bmi_a_pairColoc1$traits=gsub(".*_","",bmi_a_pairColoc1$traits)
bmi_netPair7q32=rbind(bmi_e_pairColoc1,bmi_a_pairColoc1)
bmi_netPair7q32=bmi_netPair7q32[!duplicated(bmi_netPair7q32$traits),-c(3,4)]
bmi_netPair7q32$eQTL_PP=bmi_e_pairColoc1[match(bmi_netPair7q32$traits,bmi_e_pairColoc1$traits),3]
bmi_netPair7q32$eQTL_SNP=bmi_e_pairColoc1[match(bmi_netPair7q32$traits,bmi_e_pairColoc1$traits),4]
bmi_netPair7q32$aQTL_PP=bmi_a_pairColoc1[match(bmi_netPair7q32$traits,bmi_a_pairColoc1$traits),3]
bmi_netPair7q32$aQTL_SNP=bmi_a_pairColoc1[match(bmi_netPair7q32$traits,bmi_a_pairColoc1$traits),4]
bmi_netPair7q32[is.na(bmi_netPair7q32)]=0
t2d_pairColoc7q32=t2d_pairColoc[t2d_pairColoc$locus=="7q32" & t2d_pairColoc$posterior_prob>0.5,c(2,3,5)]
t2d_pairColoc7q32$traits=gsub("T2D, ","",t2d_pairColoc7q32$traits)
t2d_pairColoc7q32=cbind("trait1"=rep("T2D",dim(t2d_pairColoc7q32)[1]),t2d_pairColoc7q32)
t2d_e_pairColoc1=t2d_pairColoc7q32[grepl("-e_",t2d_pairColoc7q32$traits),]
t2d_a_pairColoc1=t2d_pairColoc7q32[grepl("-a_",t2d_pairColoc7q32$traits),]
t2d_e_pairColoc1$traits=gsub(".*_","",t2d_e_pairColoc1$traits)
t2d_a_pairColoc1$traits=gsub(".*_","",t2d_a_pairColoc1$traits)
t2d_netPair7q32=rbind(t2d_e_pairColoc1,t2d_a_pairColoc1)
t2d_netPair7q32=t2d_netPair7q32[!duplicated(t2d_netPair7q32$traits),-c(3,4)]
t2d_netPair7q32$eQTL_PP=t2d_e_pairColoc1[match(t2d_netPair7q32$traits,t2d_e_pairColoc1$traits),3]
t2d_netPair7q32$eQTL_SNP=t2d_e_pairColoc1[match(t2d_netPair7q32$traits,t2d_e_pairColoc1$traits),4]
t2d_netPair7q32$aQTL_PP=t2d_a_pairColoc1[match(t2d_netPair7q32$traits,t2d_a_pairColoc1$traits),3]
t2d_netPair7q32$aQTL_SNP=t2d_a_pairColoc1[match(t2d_netPair7q32$traits,t2d_a_pairColoc1$traits),4]
t2d_netPair7q32[is.na(t2d_netPair7q32)]=0
hdl_pairColoc7q32=hdl_pairColoc[hdl_pairColoc$locus=="7q32" & hdl_pairColoc$posterior_prob>0.5,c(2,3,5)]
hdl_pairColoc7q32$traits=gsub("HDL, ","",hdl_pairColoc7q32$traits)
hdl_pairColoc7q32=cbind("trait1"=rep("HDL",dim(hdl_pairColoc7q32)[1]),hdl_pairColoc7q32)
hdl_e_pairColoc1=hdl_pairColoc7q32[grepl("-e_",hdl_pairColoc7q32$traits),]
hdl_a_pairColoc1=hdl_pairColoc7q32[grepl("-a_",hdl_pairColoc7q32$traits),]
hdl_e_pairColoc1$traits=gsub(".*_","",hdl_e_pairColoc1$traits)
hdl_a_pairColoc1$traits=gsub(".*_","",hdl_a_pairColoc1$traits)
hdl_netPair7q32=rbind(hdl_e_pairColoc1,hdl_a_pairColoc1)
hdl_netPair7q32=hdl_netPair7q32[!duplicated(hdl_netPair7q32$traits),-c(3,4)]
hdl_netPair7q32$eQTL_PP=hdl_e_pairColoc1[match(hdl_netPair7q32$traits,hdl_e_pairColoc1$traits),3]
hdl_netPair7q32$eQTL_SNP=hdl_e_pairColoc1[match(hdl_netPair7q32$traits,hdl_e_pairColoc1$traits),4]
hdl_netPair7q32$aQTL_PP=hdl_a_pairColoc1[match(hdl_netPair7q32$traits,hdl_a_pairColoc1$traits),3]
hdl_netPair7q32$aQTL_SNP=hdl_a_pairColoc1[match(hdl_netPair7q32$traits,hdl_a_pairColoc1$traits),4]
hdl_netPair7q32[is.na(hdl_netPair7q32)]=0
triG_pairColoc7q32=triG_pairColoc[triG_pairColoc$locus=="7q32" & triG_pairColoc$posterior_prob>0.5,c(2,3,5)]
triG_pairColoc7q32$traits=gsub("TriG, ","",triG_pairColoc7q32$traits)
triG_pairColoc7q32=cbind("trait1"=rep("TriG",dim(triG_pairColoc7q32)[1]),triG_pairColoc7q32)
triG_e_pairColoc1=triG_pairColoc7q32[grepl("-e_",triG_pairColoc7q32$traits),]
triG_a_pairColoc1=triG_pairColoc7q32[grepl("-a_",triG_pairColoc7q32$traits),]
triG_e_pairColoc1$traits=gsub(".*_","",triG_e_pairColoc1$traits)
triG_a_pairColoc1$traits=gsub(".*_","",triG_a_pairColoc1$traits)
triG_netPair7q32=rbind(triG_e_pairColoc1,triG_a_pairColoc1)
triG_netPair7q32=triG_netPair7q32[!duplicated(triG_netPair7q32$traits),-c(3,4)]
triG_netPair7q32$eQTL_PP=triG_e_pairColoc1[match(triG_netPair7q32$traits,triG_e_pairColoc1$traits),3]
triG_netPair7q32$eQTL_SNP=triG_e_pairColoc1[match(triG_netPair7q32$traits,triG_e_pairColoc1$traits),4]
triG_netPair7q32$aQTL_PP=triG_a_pairColoc1[match(triG_netPair7q32$traits,triG_a_pairColoc1$traits),3]
triG_netPair7q32$aQTL_SNP=triG_a_pairColoc1[match(triG_netPair7q32$traits,triG_a_pairColoc1$traits),4]
triG_netPair7q32[is.na(triG_netPair7q32)]=0
# Grab the -log10(Pmin) and betas for the eQTLs and aQTLs among GWAS significant SNPs at the 7q32 locus
bmi_cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==7,]
bmi_cisE1=bmi_cisE1[bmi_cisE1$snps %in% sig_bmi$SNP,]
bmi_cisE1=bmi_cisE1[order(bmi_cisE1$pvalue),]
min_bmi_cisE1=bmi_cisE1[!duplicated(bmi_cisE1$gene),]
bmi_cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==7,]
bmi_cisA1=bmi_cisA1[bmi_cisA1$snps %in% sig_bmi$SNP,]
bmi_cisA1=bmi_cisA1[order(bmi_cisA1$pvalue),]
min_bmi_cisA1=bmi_cisA1[!duplicated(bmi_cisA1$gene),]
bmi_transE1=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$chr==7,]
bmi_transE1=bmi_transE1[bmi_transE1$snps %in% sig_bmi$SNP,]
bmi_transE1=bmi_transE1[order(bmi_transE1$pvalue),]
min_bmi_transE1=bmi_transE1[!duplicated(bmi_transE1$gene),]
bmi_transA1=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$chr==7,]
bmi_transA1=bmi_transA1[bmi_transA1$snps %in% sig_bmi$SNP,]
bmi_transA1=bmi_transA1[order(bmi_transA1$pvalue),]
min_bmi_transA1=bmi_transA1[!duplicated(bmi_transA1$gene),]
t2d_cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==7,]
t2d_cisE1=t2d_cisE1[t2d_cisE1$snps %in% sig_t2d$rsID,]
t2d_cisE1=t2d_cisE1[order(t2d_cisE1$pvalue),]
min_t2d_cisE1=t2d_cisE1[!duplicated(t2d_cisE1$gene),]
t2d_cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==7,]
t2d_cisA1=t2d_cisA1[t2d_cisA1$snps %in% sig_t2d$rsID,]
t2d_cisA1=t2d_cisA1[order(t2d_cisA1$pvalue),]
min_t2d_cisA1=t2d_cisA1[!duplicated(t2d_cisA1$gene),]
t2d_transE1=filt_trans_t2d_eqtl[filt_trans_t2d_eqtl$chr==7,]
t2d_transE1=t2d_transE1[t2d_transE1$snps %in% sig_t2d$rsID,]
t2d_transE1=t2d_transE1[order(t2d_transE1$pvalue),]
min_t2d_transE1=t2d_transE1[!duplicated(t2d_transE1$gene),]
t2d_transA1=filt_trans_t2d_aqtl[filt_trans_t2d_aqtl$chr==7,]
t2d_transA1=t2d_transA1[t2d_transA1$snps %in% sig_t2d$rsID,]
t2d_transA1=t2d_transA1[order(t2d_transA1$pvalue),]
min_t2d_transA1=t2d_transA1[!duplicated(t2d_transA1$gene),]
hdl_cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==7,]
hdl_cisE1=hdl_cisE1[hdl_cisE1$snps %in% sig_hdl$SNP,]
hdl_cisE1=hdl_cisE1[order(hdl_cisE1$pvalue),]
min_hdl_cisE1=hdl_cisE1[!duplicated(hdl_cisE1$gene),]
hdl_cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==7,]
hdl_cisA1=hdl_cisA1[hdl_cisA1$snps %in% sig_hdl$SNP,]
hdl_cisA1=hdl_cisA1[order(hdl_cisA1$pvalue),]
min_hdl_cisA1=hdl_cisA1[!duplicated(hdl_cisA1$gene),]
hdl_transE1=filt_trans_hdl_eqtl[filt_trans_hdl_eqtl$chr==7,]
hdl_transE1=hdl_transE1[hdl_transE1$snps %in% sig_hdl$SNP,]
hdl_transE1=hdl_transE1[order(hdl_transE1$pvalue),]
min_hdl_transE1=hdl_transE1[!duplicated(hdl_transE1$gene),]
hdl_transA1=filt_trans_hdl_aqtl[filt_trans_hdl_aqtl$chr==7,]
hdl_transA1=hdl_transA1[hdl_transA1$snps %in% sig_hdl$SNP,]
hdl_transA1=hdl_transA1[order(hdl_transA1$pvalue),]
min_hdl_transA1=hdl_transA1[!duplicated(hdl_transA1$gene),]
triG_cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==7,]
triG_cisE1=triG_cisE1[triG_cisE1$snps %in% sig_triG$SNP,]
triG_cisE1=triG_cisE1[order(triG_cisE1$pvalue),]
min_triG_cisE1=triG_cisE1[!duplicated(triG_cisE1$gene),]
triG_cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==7,]
triG_cisA1=triG_cisA1[triG_cisA1$snps %in% sig_triG$SNP,]
triG_cisA1=triG_cisA1[order(triG_cisA1$pvalue),]
min_triG_cisA1=triG_cisA1[!duplicated(triG_cisA1$gene),]
triG_transE1=filt_trans_triG_eqtl[filt_trans_triG_eqtl$chr==7,]
triG_transE1=triG_transE1[triG_transE1$snps %in% sig_triG$SNP,]
triG_transE1=triG_transE1[order(triG_transE1$pvalue),]
min_triG_transE1=triG_transE1[!duplicated(triG_transE1$gene),]
triG_transA1=filt_trans_triG_aqtl[filt_trans_triG_aqtl$chr==7,]
triG_transA1=triG_transA1[triG_transA1$snps %in% sig_triG$SNP,]
triG_transA1=triG_transA1[order(triG_transA1$pvalue),]
min_triG_transA1=triG_transA1[!duplicated(triG_transA1$gene),]
# Make node tables for 7q32 networks for each GWAS. Note that some GWAS (T2D and TriG) failed to have their MRs connect at all with LINC-PINT or KLF14,
# so I manually added those to the node lists when needed.
# BMI
bmi_inter_nodes7q32=data.frame("Node"=as.character(unique(bmi_interactome7q32$Regulator)),"BMI_exp_cor"=rep(0,length(unique(bmi_interactome7q32$Regulator))),
"BMI_act_cor"=rep(0,length(unique(bmi_interactome7q32$Regulator))),"Best_eQTL_Beta"=rep(0,length(unique(bmi_interactome7q32$Regulator))),
"Best_eQTL_logP"=rep(0,length(unique(bmi_interactome7q32$Regulator))),"Best_aQTL_Beta"=rep(0,length(unique(bmi_interactome7q32$Regulator))),
"Best_aQTL_logP"=rep(0,length(unique(bmi_interactome7q32$Regulator))))
bmi_coloc_nodes7q32=data.frame("Node"=as.character(unique(bmi_netPair7q32$traits)),"BMI_exp_cor"=rep(0,length(unique(bmi_netPair7q32$traits))),
"BMI_act_cor"=rep(0,length(unique(bmi_netPair7q32$traits))),"Best_eQTL_Beta"=rep(0,length(unique(bmi_netPair7q32$traits))),
"Best_eQTL_logP"=rep(0,length(unique(bmi_netPair7q32$traits))),"Best_aQTL_Beta"=rep(0,length(unique(bmi_netPair7q32$traits))),
"Best_aQTL_logP"=rep(0,length(unique(bmi_netPair7q32$traits))))
for(i in 1:dim(bmi_inter_nodes7q32)[1]){
bmi_inter_nodes7q32$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(bmi_inter_nodes7q32$Node[i]),]),filt_pheno$BMI)
bmi_inter_nodes7q32$BMI_act_cor[i]=cor(as.numeric(vip[as.character(bmi_inter_nodes7q32$Node[i]),]),filt_pheno$BMI)
bmi_inter_nodes7q32$Best_eQTL_Beta[i]=ifelse(bmi_inter_nodes7q32$Node[i] %in% min_bmi_transE1$gene,
min_bmi_transE1[min_bmi_transE1$gene==as.character(bmi_inter_nodes7q32$Node[i]),"beta"],
0)
bmi_inter_nodes7q32$Best_eQTL_logP[i]=ifelse(bmi_inter_nodes7q32$Node[i] %in% min_bmi_transE1$gene,
-log10(min_bmi_transE1[min_bmi_transE1$gene==as.character(bmi_inter_nodes7q32$Node[i]),"pvalue"]),
0)
bmi_inter_nodes7q32$Best_aQTL_Beta[i]=ifelse(bmi_inter_nodes7q32$Node[i] %in% min_bmi_transA1$gene,
min_bmi_transA1[min_bmi_transA1$gene==as.character(bmi_inter_nodes7q32$Node[i]),"beta"],
0)
bmi_inter_nodes7q32$Best_aQTL_logP[i]=ifelse(bmi_inter_nodes7q32$Node[i] %in% min_bmi_transA1$gene,
-log10(min_bmi_transA1[min_bmi_transA1$gene==as.character(bmi_inter_nodes7q32$Node[i]),"pvalue"]),
0)
}
for(i in 1:dim(bmi_coloc_nodes7q32)[1]){
bmi_coloc_nodes7q32$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(bmi_coloc_nodes7q32$Node[i]),]),filt_pheno$BMI)
bmi_coloc_nodes7q32$BMI_act_cor[i]=cor(as.numeric(vip[as.character(bmi_coloc_nodes7q32$Node[i]),]),filt_pheno$BMI)
bmi_coloc_nodes7q32$Best_eQTL_Beta[i]=ifelse(bmi_coloc_nodes7q32$Node[i] %in% min_bmi_transE1$gene,
min_bmi_transE1[min_bmi_transE1$gene==as.character(bmi_coloc_nodes7q32$Node[i]),"beta"],
0)
bmi_coloc_nodes7q32$Best_eQTL_logP[i]=ifelse(bmi_coloc_nodes7q32$Node[i] %in% min_bmi_transE1$gene,
-log10(min_bmi_transE1[min_bmi_transE1$gene==as.character(bmi_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
bmi_coloc_nodes7q32$Best_aQTL_Beta[i]=ifelse(bmi_coloc_nodes7q32$Node[i] %in% min_bmi_transA1$gene,
min_bmi_transA1[min_bmi_transA1$gene==as.character(bmi_coloc_nodes7q32$Node[i]),"beta"],
0)
bmi_coloc_nodes7q32$Best_aQTL_logP[i]=ifelse(bmi_coloc_nodes7q32$Node[i] %in% min_bmi_transA1$gene,
-log10(min_bmi_transA1[min_bmi_transA1$gene==as.character(bmi_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
}
# T2D
t2d_inter_nodes7q32=data.frame("Node"=c(as.character(unique(homair_interactome7q32$Regulator)),"LINC-PINT"),"HOMA.IR_exp_cor"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),
"HOMA.IR_act_cor"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),"Best_eQTL_Beta"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),
"Best_eQTL_logP"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),"Best_aQTL_Beta"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),
"Best_aQTL_logP"=rep(0,length(unique(homair_interactome7q32$Regulator))+1))
t2d_coloc_nodes7q32=data.frame("Node"=as.character(unique(t2d_netPair7q32$traits)),"HOMA.IR_exp_cor"=rep(0,length(unique(t2d_netPair7q32$traits))),
"HOMA.IR_act_cor"=rep(0,length(unique(t2d_netPair7q32$traits))),"Best_eQTL_Beta"=rep(0,length(unique(t2d_netPair7q32$traits))),
"Best_eQTL_logP"=rep(0,length(unique(t2d_netPair7q32$traits))),"Best_aQTL_Beta"=rep(0,length(unique(t2d_netPair7q32$traits))),
"Best_aQTL_logP"=rep(0,length(unique(t2d_netPair7q32$traits))))
for(i in 1:dim(t2d_inter_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$HOMA.IR)]
t2d_inter_nodes7q32$HOMA.IR_exp_cor[i]=cor(as.numeric(tpm[as.character(t2d_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HOMA.IR"])
t2d_inter_nodes7q32$HOMA.IR_act_cor[i]=cor(as.numeric(vip[as.character(t2d_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HOMA.IR"])
t2d_inter_nodes7q32$Best_eQTL_Beta[i]=ifelse(t2d_inter_nodes7q32$Node[i] %in% min_t2d_transE1$gene,
min_t2d_transE1[min_t2d_transE1$gene==as.character(t2d_inter_nodes7q32$Node[i]),"beta"],
0)
t2d_inter_nodes7q32$Best_eQTL_logP[i]=ifelse(t2d_inter_nodes7q32$Node[i] %in% min_t2d_transE1$gene,
-log10(min_t2d_transE1[min_t2d_transE1$gene==as.character(t2d_inter_nodes7q32$Node[i]),"pvalue"]),
0)
t2d_inter_nodes7q32$Best_aQTL_Beta[i]=ifelse(t2d_inter_nodes7q32$Node[i] %in% min_t2d_transA1$gene,
min_t2d_transA1[min_t2d_transA1$gene==as.character(t2d_inter_nodes7q32$Node[i]),"beta"],
0)
t2d_inter_nodes7q32$Best_aQTL_logP[i]=ifelse(t2d_inter_nodes7q32$Node[i] %in% min_t2d_transA1$gene,
-log10(min_t2d_transA1[min_t2d_transA1$gene==as.character(t2d_inter_nodes7q32$Node[i]),"pvalue"]),
0)
}
for(i in 1:dim(t2d_coloc_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$HOMA.IR)]
t2d_coloc_nodes7q32$HOMA.IR_exp_cor[i]=cor(as.numeric(tpm[as.character(t2d_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HOMA.IR"])
t2d_coloc_nodes7q32$HOMA.IR_act_cor[i]=cor(as.numeric(vip[as.character(t2d_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HOMA.IR"])
t2d_coloc_nodes7q32$Best_eQTL_Beta[i]=ifelse(t2d_coloc_nodes7q32$Node[i] %in% min_t2d_transE1$gene,
min_t2d_transE1[min_t2d_transE1$gene==as.character(t2d_coloc_nodes7q32$Node[i]),"beta"],
0)
t2d_coloc_nodes7q32$Best_eQTL_logP[i]=ifelse(t2d_coloc_nodes7q32$Node[i] %in% min_t2d_transE1$gene,
-log10(min_t2d_transE1[min_t2d_transE1$gene==as.character(t2d_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
t2d_coloc_nodes7q32$Best_aQTL_Beta[i]=ifelse(t2d_coloc_nodes7q32$Node[i] %in% min_t2d_transA1$gene,
min_t2d_transA1[min_t2d_transA1$gene==as.character(t2d_coloc_nodes7q32$Node[i]),"beta"],
0)
t2d_coloc_nodes7q32$Best_aQTL_logP[i]=ifelse(t2d_coloc_nodes7q32$Node[i] %in% min_t2d_transA1$gene,
-log10(min_t2d_transA1[min_t2d_transA1$gene==as.character(t2d_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
}
# HDL
hdl_inter_nodes7q32=data.frame("Node"=as.character(unique(hdl_interactome7q32$Regulator)),"HDL_exp_cor"=rep(0,length(unique(hdl_interactome7q32$Regulator))),
"HDL_act_cor"=rep(0,length(unique(hdl_interactome7q32$Regulator))),"Best_eQTL_Beta"=rep(0,length(unique(hdl_interactome7q32$Regulator))),
"Best_eQTL_logP"=rep(0,length(unique(hdl_interactome7q32$Regulator))),"Best_aQTL_Beta"=rep(0,length(unique(hdl_interactome7q32$Regulator))),
"Best_aQTL_logP"=rep(0,length(unique(hdl_interactome7q32$Regulator))))
hdl_coloc_nodes7q32=data.frame("Node"=as.character(unique(hdl_netPair7q32$traits)),"HDL_exp_cor"=rep(0,length(unique(hdl_netPair7q32$traits))),
"HDL_act_cor"=rep(0,length(unique(hdl_netPair7q32$traits))),"Best_eQTL_Beta"=rep(0,length(unique(hdl_netPair7q32$traits))),
"Best_eQTL_logP"=rep(0,length(unique(hdl_netPair7q32$traits))),"Best_aQTL_Beta"=rep(0,length(unique(hdl_netPair7q32$traits))),
"Best_aQTL_logP"=rep(0,length(unique(hdl_netPair7q32$traits))))
for(i in 1:dim(hdl_inter_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$HDLcholesterol)]
hdl_inter_nodes7q32$HDL_exp_cor[i]=cor(as.numeric(tpm[as.character(hdl_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HDLcholesterol"])
hdl_inter_nodes7q32$HDL_act_cor[i]=cor(as.numeric(vip[as.character(hdl_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HDLcholesterol"])
hdl_inter_nodes7q32$Best_eQTL_Beta[i]=ifelse(hdl_inter_nodes7q32$Node[i] %in% min_hdl_transE1$gene,
min_hdl_transE1[min_hdl_transE1$gene==as.character(hdl_inter_nodes7q32$Node[i]),"beta"],
0)
hdl_inter_nodes7q32$Best_eQTL_logP[i]=ifelse(hdl_inter_nodes7q32$Node[i] %in% min_hdl_transE1$gene,
-log10(min_hdl_transE1[min_hdl_transE1$gene==as.character(hdl_inter_nodes7q32$Node[i]),"pvalue"]),
0)
hdl_inter_nodes7q32$Best_aQTL_Beta[i]=ifelse(hdl_inter_nodes7q32$Node[i] %in% min_hdl_transA1$gene,
min_hdl_transA1[min_hdl_transA1$gene==as.character(hdl_inter_nodes7q32$Node[i]),"beta"],
0)
hdl_inter_nodes7q32$Best_aQTL_logP[i]=ifelse(hdl_inter_nodes7q32$Node[i] %in% min_hdl_transA1$gene,
-log10(min_hdl_transA1[min_hdl_transA1$gene==as.character(hdl_inter_nodes7q32$Node[i]),"pvalue"]),
0)
}
for(i in 1:dim(hdl_coloc_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$HDLcholesterol)]
hdl_coloc_nodes7q32$HDL_exp_cor[i]=cor(as.numeric(tpm[as.character(hdl_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HDLcholesterol"])
hdl_coloc_nodes7q32$HDL_act_cor[i]=cor(as.numeric(vip[as.character(hdl_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HDLcholesterol"])
hdl_coloc_nodes7q32$Best_eQTL_Beta[i]=ifelse(hdl_coloc_nodes7q32$Node[i] %in% min_hdl_transE1$gene,
min_hdl_transE1[min_hdl_transE1$gene==as.character(hdl_coloc_nodes7q32$Node[i]),"beta"],
0)
hdl_coloc_nodes7q32$Best_eQTL_logP[i]=ifelse(hdl_coloc_nodes7q32$Node[i] %in% min_hdl_transE1$gene,
-log10(min_hdl_transE1[min_hdl_transE1$gene==as.character(hdl_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
hdl_coloc_nodes7q32$Best_aQTL_Beta[i]=ifelse(hdl_coloc_nodes7q32$Node[i] %in% min_hdl_transA1$gene,
min_hdl_transA1[min_hdl_transA1$gene==as.character(hdl_coloc_nodes7q32$Node[i]),"beta"],
0)
hdl_coloc_nodes7q32$Best_aQTL_logP[i]=ifelse(hdl_coloc_nodes7q32$Node[i] %in% min_hdl_transA1$gene,
-log10(min_hdl_transA1[min_hdl_transA1$gene==as.character(hdl_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
}
# TriG
triG_inter_nodes7q32=data.frame("Node"=c(as.character(unique(triG_interactome7q32$Regulator)),"LINC-PINT","KLF14"),"TriG_exp_cor"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),
"TriG_act_cor"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),"Best_eQTL_Beta"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),
"Best_eQTL_logP"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),"Best_aQTL_Beta"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),
"Best_aQTL_logP"=rep(0,length(unique(triG_interactome7q32$Regulator))+2))
triG_coloc_nodes7q32=data.frame("Node"=as.character(unique(triG_netPair7q32$traits)),"TriG_exp_cor"=rep(0,length(unique(triG_netPair7q32$traits))),
"TriG_act_cor"=rep(0,length(unique(triG_netPair7q32$traits))),"Best_eQTL_Beta"=rep(0,length(unique(triG_netPair7q32$traits))),
"Best_eQTL_logP"=rep(0,length(unique(triG_netPair7q32$traits))),"Best_aQTL_Beta"=rep(0,length(unique(triG_netPair7q32$traits))),
"Best_aQTL_logP"=rep(0,length(unique(triG_netPair7q32$traits))))
for(i in 1:dim(triG_inter_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$TotalTriglycerides)]
triG_inter_nodes7q32$TriG_exp_cor[i]=cor(as.numeric(tpm[as.character(triG_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"TotalTriglycerides"])
triG_inter_nodes7q32$TriG_act_cor[i]=cor(as.numeric(vip[as.character(triG_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"TotalTriglycerides"])
triG_inter_nodes7q32$Best_eQTL_Beta[i]=ifelse(triG_inter_nodes7q32$Node[i] %in% min_triG_transE1$gene,
min_triG_transE1[min_triG_transE1$gene==as.character(triG_inter_nodes7q32$Node[i]),"beta"],
0)
triG_inter_nodes7q32$Best_eQTL_logP[i]=ifelse(triG_inter_nodes7q32$Node[i] %in% min_triG_transE1$gene,
-log10(min_triG_transE1[min_triG_transE1$gene==as.character(triG_inter_nodes7q32$Node[i]),"pvalue"]),
0)
triG_inter_nodes7q32$Best_aQTL_Beta[i]=ifelse(triG_inter_nodes7q32$Node[i] %in% min_triG_transA1$gene,
min_triG_transA1[min_triG_transA1$gene==as.character(triG_inter_nodes7q32$Node[i]),"beta"],
0)
triG_inter_nodes7q32$Best_aQTL_logP[i]=ifelse(triG_inter_nodes7q32$Node[i] %in% min_triG_transA1$gene,
-log10(min_triG_transA1[min_triG_transA1$gene==as.character(triG_inter_nodes7q32$Node[i]),"pvalue"]),
0)
}
for(i in 1:dim(triG_coloc_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$TotalTriglycerides)]
triG_coloc_nodes7q32$TriG_exp_cor[i]=cor(as.numeric(tpm[as.character(triG_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"TotalTriglycerides"])
triG_coloc_nodes7q32$TriG_act_cor[i]=cor(as.numeric(vip[as.character(triG_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"TotalTriglycerides"])
triG_coloc_nodes7q32$Best_eQTL_Beta[i]=ifelse(triG_coloc_nodes7q32$Node[i] %in% min_triG_transE1$gene,
min_triG_transE1[min_triG_transE1$gene==as.character(triG_coloc_nodes7q32$Node[i]),"beta"],
0)
triG_coloc_nodes7q32$Best_eQTL_logP[i]=ifelse(triG_coloc_nodes7q32$Node[i] %in% min_triG_transE1$gene,
-log10(min_triG_transE1[min_triG_transE1$gene==as.character(triG_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
triG_coloc_nodes7q32$Best_aQTL_Beta[i]=ifelse(triG_coloc_nodes7q32$Node[i] %in% min_triG_transA1$gene,
min_triG_transA1[min_triG_transA1$gene==as.character(triG_coloc_nodes7q32$Node[i]),"beta"],
0)
triG_coloc_nodes7q32$Best_aQTL_logP[i]=ifelse(triG_coloc_nodes7q32$Node[i] %in% min_triG_transA1$gene,
-log10(min_triG_transA1[min_triG_transA1$gene==as.character(triG_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
}
# I think it may be more convenient to merge the networks into one and then just change which attributes I visualize in Cytoscape
# Start with 2 temporary columns concatinating the regulator-target and target-regulator for easier matching.
# BMI
bmi_interactome7q32$temp1=paste(bmi_interactome7q32$Regulator,bmi_interactome7q32$Target)
bmi_interactome7q32$temp2=paste(bmi_interactome7q32$Target,bmi_interactome7q32$Regulator)
bmi_netPair7q32$temp1=paste(bmi_netPair7q32$trait1,bmi_netPair7q32$traits)
bmi_netPair7q32$temp2=paste(bmi_netPair7q32$traits,bmi_netPair7q32$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(bmi_interactome7q32)[1],ncol = 4))
for(i in 1:dim(bmi_interactome7q32)[1]){
temp[i,1:4]=bmi_netPair7q32[ifelse(is.na(match(bmi_interactome7q32$temp1[i],bmi_netPair7q32$temp1)),
match(bmi_interactome7q32$temp1[i],bmi_netPair7q32$temp2),
match(bmi_interactome7q32$temp1[i],bmi_netPair7q32$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the BMI colocalizations
colnames(temp)=colnames(bmi_netPair7q32)[3:6]
temp=rbind(temp,bmi_netPair7q32[bmi_netPair7q32$trait1=="BMI",3:6])
# Then add rows for BMI-Gene connections with 0 for MoA and likelihood
bmi_full7q32=bmi_interactome7q32[,1:4]
temp2=bmi_netPair7q32[bmi_netPair7q32$trait1=="BMI",1:4]
colnames(temp2)=colnames(bmi_interactome7q32)[1:4]
temp2[,3:4]=0
bmi_full7q32=rbind(bmi_full7q32,temp2)
# Finally, combine the colocalization columns with the interactome columns
bmi_full7q32=cbind(bmi_full7q32,temp)
# The nodes data also needs to be combined and duplicate rows removed
bmi_full7q32_nodes=rbind(bmi_inter_nodes7q32,bmi_coloc_nodes7q32)
bmi_full7q32_nodes=bmi_full7q32_nodes[!duplicated(bmi_full7q32_nodes$Node),]
# T2D
homair_interactome7q32$temp1=paste(homair_interactome7q32$Regulator,homair_interactome7q32$Target)
homair_interactome7q32$temp2=paste(homair_interactome7q32$Target,homair_interactome7q32$Regulator)
t2d_netPair7q32$temp1=paste(t2d_netPair7q32$trait1,t2d_netPair7q32$traits)
t2d_netPair7q32$temp2=paste(t2d_netPair7q32$traits,t2d_netPair7q32$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(homair_interactome7q32)[1],ncol = 4))
for(i in 1:dim(homair_interactome7q32)[1]){
temp[i,1:4]=t2d_netPair7q32[ifelse(is.na(match(homair_interactome7q32$temp1[i],t2d_netPair7q32$temp1)),
match(homair_interactome7q32$temp1[i],t2d_netPair7q32$temp2),
match(homair_interactome7q32$temp1[i],t2d_netPair7q32$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the T2D colocalizations
colnames(temp)=colnames(t2d_netPair7q32)[3:6]
temp=rbind(temp,t2d_netPair7q32[t2d_netPair7q32$trait1=="T2D",3:6])
# Then add rows for T2D-Gene connections with 0 for MoA and likelihood
t2d_full7q32=homair_interactome7q32[,1:4]
temp2=t2d_netPair7q32[t2d_netPair7q32$trait1=="T2D",1:4]
colnames(temp2)=colnames(homair_interactome7q32)[1:4]
temp2[,3:4]=0
t2d_full7q32=rbind(t2d_full7q32,temp2)
# Finally, combine the colocalization columns with the interactome columns
t2d_full7q32=cbind(t2d_full7q32,temp)
# The nodes data also needs to be combined and duplicate rows removed
t2d_full7q32_nodes=rbind(t2d_inter_nodes7q32,t2d_coloc_nodes7q32)
t2d_full7q32_nodes=t2d_full7q32_nodes[!duplicated(t2d_full7q32_nodes$Node),]
# HDL
hdl_interactome7q32$temp1=paste(hdl_interactome7q32$Regulator,hdl_interactome7q32$Target)
hdl_interactome7q32$temp2=paste(hdl_interactome7q32$Target,hdl_interactome7q32$Regulator)
hdl_netPair7q32$temp1=paste(hdl_netPair7q32$trait1,hdl_netPair7q32$traits)
hdl_netPair7q32$temp2=paste(hdl_netPair7q32$traits,hdl_netPair7q32$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(hdl_interactome7q32)[1],ncol = 4))
for(i in 1:dim(hdl_interactome7q32)[1]){
temp[i,1:4]=hdl_netPair7q32[ifelse(is.na(match(hdl_interactome7q32$temp1[i],hdl_netPair7q32$temp1)),
match(hdl_interactome7q32$temp1[i],hdl_netPair7q32$temp2),
match(hdl_interactome7q32$temp1[i],hdl_netPair7q32$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the HDL colocalizations
colnames(temp)=colnames(hdl_netPair7q32)[3:6]
temp=rbind(temp,hdl_netPair7q32[hdl_netPair7q32$trait1=="HDL",3:6])
# Then add rows for HDL-Gene connections with 0 for MoA and likelihood
hdl_full7q32=hdl_interactome7q32[,1:4]
temp2=hdl_netPair7q32[hdl_netPair7q32$trait1=="HDL",1:4]
colnames(temp2)=colnames(hdl_interactome7q32)[1:4]
temp2[,3:4]=0
hdl_full7q32=rbind(hdl_full7q32,temp2)
# Finally, combine the colocalization columns with the interactome columns
hdl_full7q32=cbind(hdl_full7q32,temp)
# The nodes data also needs to be combined and duplicate rows removed
hdl_full7q32_nodes=rbind(hdl_inter_nodes7q32,hdl_coloc_nodes7q32)
hdl_full7q32_nodes=hdl_full7q32_nodes[!duplicated(hdl_full7q32_nodes$Node),]
# TriG
triG_interactome7q32$temp1=paste(triG_interactome7q32$Regulator,triG_interactome7q32$Target)
triG_interactome7q32$temp2=paste(triG_interactome7q32$Target,triG_interactome7q32$Regulator)
triG_netPair7q32$temp1=paste(triG_netPair7q32$trait1,triG_netPair7q32$traits)
triG_netPair7q32$temp2=paste(triG_netPair7q32$traits,triG_netPair7q32$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(triG_interactome7q32)[1],ncol = 4))
for(i in 1:dim(triG_interactome7q32)[1]){
temp[i,1:4]=triG_netPair7q32[ifelse(is.na(match(triG_interactome7q32$temp1[i],triG_netPair7q32$temp1)),
match(triG_interactome7q32$temp1[i],triG_netPair7q32$temp2),
match(triG_interactome7q32$temp1[i],triG_netPair7q32$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the TriG colocalizations
colnames(temp)=colnames(triG_netPair7q32)[3:6]
temp=rbind(temp,triG_netPair7q32[triG_netPair7q32$trait1=="TriG",3:6])
# Then add rows for TriG-Gene connections with 0 for MoA and likelihood
triG_full7q32=triG_interactome7q32[,1:4]
temp2=triG_netPair7q32[triG_netPair7q32$trait1=="TriG",1:4]
colnames(temp2)=colnames(triG_interactome7q32)[1:4]
temp2[,3:4]=0
triG_full7q32=rbind(triG_full7q32,temp2)
# Finally, combine the colocalization columns with the interactome columns
triG_full7q32=cbind(triG_full7q32,temp)
# The nodes data also needs to be combined and duplicate rows removed
triG_full7q32_nodes=rbind(triG_inter_nodes7q32,triG_coloc_nodes7q32)
triG_full7q32_nodes=triG_full7q32_nodes[!duplicated(triG_full7q32_nodes$Node),]
# Since LINC-PINT, KLF14 and AC016831.7 is the only cis gene here, I'll just deal with them manually
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_Beta"]=min_bmi_cisE1[min_bmi_cisE1$gene=="LINC-PINT","beta"]
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_logP"]=-log10(min_bmi_cisE1[min_bmi_cisE1$gene=="LINC-PINT","pvalue"])
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="KLF14","Best_eQTL_Beta"]=min_bmi_cisE1[min_bmi_cisE1$gene=="KLF14","beta"]
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="KLF14","Best_eQTL_logP"]=-log10(min_bmi_cisE1[min_bmi_cisE1$gene=="KLF14","pvalue"])
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="AC016831.7","Best_eQTL_Beta"]=min_bmi_cisE1[min_bmi_cisE1$gene=="AC016831.7","beta"]
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="AC016831.7","Best_eQTL_logP"]=-log10(min_bmi_cisE1[min_bmi_cisE1$gene=="AC016831.7","pvalue"])
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_Beta"]=min_t2d_cisE1[min_t2d_cisE1$gene=="LINC-PINT","beta"]
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_logP"]=-log10(min_t2d_cisE1[min_t2d_cisE1$gene=="LINC-PINT","pvalue"])
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="KLF14","Best_eQTL_Beta"]=min_t2d_cisE1[min_t2d_cisE1$gene=="KLF14","beta"]
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="KLF14","Best_eQTL_logP"]=-log10(min_t2d_cisE1[min_t2d_cisE1$gene=="KLF14","pvalue"])
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="AC016831.7","Best_eQTL_Beta"]=min_t2d_cisE1[min_t2d_cisE1$gene=="AC016831.7","beta"]
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="AC016831.7","Best_eQTL_logP"]=-log10(min_t2d_cisE1[min_t2d_cisE1$gene=="AC016831.7","pvalue"])
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_Beta"]=min_hdl_cisE1[min_hdl_cisE1$gene=="LINC-PINT","beta"]
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_logP"]=-log10(min_hdl_cisE1[min_hdl_cisE1$gene=="LINC-PINT","pvalue"])
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="KLF14","Best_eQTL_Beta"]=min_hdl_cisE1[min_hdl_cisE1$gene=="KLF14","beta"]
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="KLF14","Best_eQTL_logP"]=-log10(min_hdl_cisE1[min_hdl_cisE1$gene=="KLF14","pvalue"])
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="AC016831.7","Best_eQTL_Beta"]=min_hdl_cisE1[min_hdl_cisE1$gene=="AC016831.7","beta"]
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="AC016831.7","Best_eQTL_logP"]=-log10(min_hdl_cisE1[min_hdl_cisE1$gene=="AC016831.7","pvalue"])
triG_full7q32_nodes[triG_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_Beta"]=min_triG_cisE1[min_triG_cisE1$gene=="LINC-PINT","beta"]
triG_full7q32_nodes[triG_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_logP"]=-log10(min_triG_cisE1[min_triG_cisE1$gene=="LINC-PINT","pvalue"])
triG_full7q32_nodes[triG_full7q32_nodes$Node=="KLF14","Best_eQTL_Beta"]=min_triG_cisE1[min_triG_cisE1$gene=="KLF14","beta"]
triG_full7q32_nodes[triG_full7q32_nodes$Node=="KLF14","Best_eQTL_logP"]=-log10(min_triG_cisE1[min_triG_cisE1$gene=="KLF14","pvalue"])
triG_full7q32_nodes[triG_full7q32_nodes$Node=="AC016831.7","Best_eQTL_Beta"]=min_triG_cisE1[min_triG_cisE1$gene=="AC016831.7","beta"]
triG_full7q32_nodes[triG_full7q32_nodes$Node=="AC016831.7","Best_eQTL_logP"]=-log10(min_triG_cisE1[min_triG_cisE1$gene=="AC016831.7","pvalue"])
# Final touches by replacing NA with 0
bmi_full7q32_nodes[is.na(bmi_full7q32_nodes)]=0
t2d_full7q32_nodes[is.na(t2d_full7q32_nodes)]=0
hdl_full7q32_nodes[is.na(hdl_full7q32_nodes)]=0
triG_full7q32_nodes[is.na(triG_full7q32_nodes)]=0
# Write networks and node data to file for Cytoscape visualizations
write.table(bmi_full7q32,"./BMI/Chr7q32_cis-Genes_and_BMI_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(bmi_full7q32_nodes,"./BMI/Chr7q32_cis-Genes_and_BMI_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(t2d_full7q32,"./T2D/Chr7q32_cis-Genes_and_HOMA-IR_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(t2d_full7q32_nodes,"./T2D/Chr7q32_cis-Genes_and_HOMA-IR_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(hdl_full7q32,"./HDL/Chr7q32_cis-Genes_and_HDL_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(hdl_full7q32_nodes,"./HDL/Chr7q32_cis-Genes_and_HDL_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(triG_full7q32,"./Triglycerides/Chr7q32_cis-Genes_and_Triglycerides_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(triG_full7q32_nodes,"./Triglycerides/Chr7q32_cis-Genes_and_Triglycerides_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
# 12p13.1
interactome12p13=bmi_MRMR
# Grab pairwise colocalizations with PP>0.5 between BMI and QTLs and EPHB2 aQTL and trans-QTLs
bmi_pairColoc12p13=bmi_pairColoc[bmi_pairColoc$locus=="12p13.1" & bmi_pairColoc$posterior_prob>0.5,c(2,3,5)]
bmi_pairColoc12p13$traits=gsub("BMI, ","",bmi_pairColoc12p13$traits)
bmi_pairColoc12p13=cbind("trait1"=rep("BMI",dim(bmi_pairColoc12p13)[1]),bmi_pairColoc12p13)
bmi_e_pairColoc12p13=bmi_pairColoc12p13[grepl("-e_",bmi_pairColoc12p13$traits),]
bmi_a_pairColoc12p13=bmi_pairColoc12p13[grepl("-a_",bmi_pairColoc12p13$traits),]
bmi_e_pairColoc12p13$traits=gsub(".*_","",bmi_e_pairColoc12p13$traits)
bmi_a_pairColoc12p13$traits=gsub(".*_","",bmi_a_pairColoc12p13$traits)
bmi_netPair12p13=rbind(bmi_e_pairColoc12p13,bmi_a_pairColoc12p13)
bmi_netPair12p13=bmi_netPair12p13[!duplicated(bmi_netPair12p13$traits),-c(3,4)]
bmi_netPair12p13$eQTL_PP=bmi_e_pairColoc12p13[match(bmi_netPair12p13$traits,bmi_e_pairColoc12p13$traits),3]
bmi_netPair12p13$eQTL_SNP=bmi_e_pairColoc12p13[match(bmi_netPair12p13$traits,bmi_e_pairColoc12p13$traits),4]
bmi_netPair12p13$aQTL_PP=bmi_a_pairColoc12p13[match(bmi_netPair12p13$traits,bmi_a_pairColoc12p13$traits),3]
bmi_netPair12p13$aQTL_SNP=bmi_a_pairColoc12p13[match(bmi_netPair12p13$traits,bmi_a_pairColoc12p13$traits),4]
colocNet12p13=bmi_netPair12p13
colocNet12p13[is.na(colocNet12p13)]=0
# Grab the -log10(Pmin) and betas for the eQTLs and aQTLs among BMI GWAS significant SNPs at the 12p13.1 locus
transE4=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$chr==12 & filt_trans_bmi_eqtl$position>13900000 & filt_trans_bmi_eqtl$position<15000000,]
transE4=transE4[transE4$snps %in% sig_bmi$SNP,]
transE4=transE4[order(transE4$pvalue),]
min_transE4=transE4[!duplicated(transE4$gene),]
transA4=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$chr==12 & filt_trans_bmi_aqtl$position>13900000 & filt_trans_bmi_aqtl$position<15000000,]
transA4=transA4[transA4$snps %in% sig_bmi$SNP,]
transA4=transA4[order(transA4$pvalue),]
min_transA4=transA4[!duplicated(transA4$gene),]
# Make node tables for 12p13 networks
inter_nodes12p13=data.frame("Node"=as.character(unique(interactome12p13$Regulator)),"BMI_exp_cor"=rep(0,length(unique(interactome12p13$Regulator))),
"BMI_act_cor"=rep(0,length(unique(interactome12p13$Regulator))),"Best_eQTL_Beta"=rep(0,length(unique(interactome12p13$Regulator))),
"Best_eQTL_logP"=rep(0,length(unique(interactome12p13$Regulator))),"Best_aQTL_Beta"=rep(0,length(unique(interactome12p13$Regulator))),
"Best_aQTL_logP"=rep(0,length(unique(interactome12p13$Regulator))))
for(i in 1:dim(inter_nodes12p13)[1]){
inter_nodes12p13$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(inter_nodes12p13$Node[i]),]),filt_pheno$BMI)
inter_nodes12p13$BMI_act_cor[i]=cor(as.numeric(vip[as.character(inter_nodes12p13$Node[i]),]),filt_pheno$BMI)
inter_nodes12p13$Best_eQTL_Beta[i]=ifelse(inter_nodes12p13$Node[i] %in% min_transE4$gene,
min_transE4[min_transE4$gene==as.character(inter_nodes12p13$Node[i]),"beta"],
0)
inter_nodes12p13$Best_eQTL_logP[i]=ifelse(inter_nodes12p13$Node[i] %in% min_transE4$gene,
-log10(min_transE4[min_transE4$gene==as.character(inter_nodes12p13$Node[i]),"pvalue"]),
0)
inter_nodes12p13$Best_aQTL_Beta[i]=ifelse(inter_nodes12p13$Node[i] %in% min_transA4$gene,
min_transA4[min_transA4$gene==as.character(inter_nodes12p13$Node[i]),"beta"],
0)
inter_nodes12p13$Best_aQTL_logP[i]=ifelse(inter_nodes12p13$Node[i] %in% min_transA4$gene,
-log10(min_transA4[min_transA4$gene==as.character(inter_nodes12p13$Node[i]),"pvalue"]),
0)
}
coloc_nodes12p13=data.frame("Node"=as.character(unique(colocNet12p13$traits)),"BMI_exp_cor"=rep(0,length(unique(colocNet12p13$traits))),
"BMI_act_cor"=rep(0,length(unique(colocNet12p13$traits))),"Best_eQTL_Beta"=rep(0,length(unique(colocNet12p13$traits))),
"Best_eQTL_logP"=rep(0,length(unique(colocNet12p13$traits))),"Best_aQTL_Beta"=rep(0,length(unique(colocNet12p13$traits))),
"Best_aQTL_logP"=rep(0,length(unique(colocNet12p13$traits))))
for(i in 1:dim(coloc_nodes12p13)[1]){
coloc_nodes12p13$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(coloc_nodes12p13$Node[i]),]),filt_pheno$BMI)
coloc_nodes12p13$BMI_act_cor[i]=cor(as.numeric(vip[as.character(coloc_nodes12p13$Node[i]),]),filt_pheno$BMI)
coloc_nodes12p13$Best_eQTL_Beta[i]=ifelse(coloc_nodes12p13$Node[i] %in% min_transE4$gene,
min_transE4[min_transE4$gene==as.character(coloc_nodes12p13$Node[i]),"beta"],
0)
coloc_nodes12p13$Best_eQTL_logP[i]=ifelse(coloc_nodes12p13$Node[i] %in% min_transE4$gene,
-log10(min_transE4[min_transE4$gene==as.character(coloc_nodes12p13$Node[i]),"pvalue"]),
0)
coloc_nodes12p13$Best_aQTL_Beta[i]=ifelse(coloc_nodes12p13$Node[i] %in% min_transA4$gene,
min_transA4[min_transA4$gene==as.character(coloc_nodes12p13$Node[i]),"beta"],
0)
coloc_nodes12p13$Best_aQTL_logP[i]=ifelse(coloc_nodes12p13$Node[i] %in% min_transA4$gene,
-log10(min_transA4[min_transA4$gene==as.character(coloc_nodes12p13$Node[i]),"pvalue"]),
0)
}
# I think it may be more convenient to merge the networks into one and then just change which attributes I visualize in Cytoscape
# Start with 2 temporary columns concatinating the regulator-target and target-regulator for easier matching.
interactome12p13$temp1=paste(interactome12p13$Regulator,interactome12p13$Target)
interactome12p13$temp2=paste(interactome12p13$Target,interactome12p13$Regulator)
colocNet12p13$temp1=paste(colocNet12p13$trait1,colocNet12p13$traits)
colocNet12p13$temp2=paste(colocNet12p13$traits,colocNet12p13$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(interactome12p13)[1],ncol = 4))
for(i in 1:dim(interactome12p13)[1]){
temp[i,1:4]=colocNet12p13[ifelse(is.na(match(interactome12p13$temp1[i],colocNet12p13$temp1)),
match(interactome12p13$temp1[i],colocNet12p13$temp2),
match(interactome12p13$temp1[i],colocNet12p13$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the BMI colocalizations
colnames(temp)=colnames(colocNet12p13)[3:6]
temp=rbind(temp,colocNet12p13[colocNet12p13$trait1=="BMI",3:6])
# Then add rows for BMI-Gene connections with 0 for MoA and likelihood
full12p13=interactome12p13[,1:4]
temp2=colocNet12p13[colocNet12p13$trait1=="BMI",1:4]
colnames(temp2)=colnames(interactome12p13)[1:4]
temp2[,3:4]=0
full12p13=rbind(full12p13,temp2)
# Finally, combine the colocalization columns with the interactome columns
full12p13=cbind(full12p13,temp)
# The nodes data also needs to be combined and duplicate rows removed
full12p13_nodes=rbind(inter_nodes12p13,coloc_nodes12p13)
full12p13_nodes=full12p13_nodes[!duplicated(full12p13_nodes$Node),]
# Write networks and node data to file for Cytoscape visualizations
write.table(interactome12p13,"Chr12p13_BMI_MRs_interactome.txt",sep = "\t",quote = F,row.names = F)
write.table(inter_nodes12p13,"Chr12p13_BMI_MRs_interactome_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(colocNet12p13,"Chr12p13_BMI_and_BMI_MRs_pairwise_colocalization_network.txt",sep = "\t",quote = F,row.names = F)
write.table(coloc_nodes12p13,"Chr12p13_BMI_and_BMI_MRs_pairwise_colocalization_network_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(full12p13,"Chr12p13_BMI_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(full12p13_nodes,"Chr12p13_BMI_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
|
/Colocalization_analyses/R_script_for_LocusCompare_plots_and_network_extractions_for_select_loci.R
|
no_license
|
hoskinsjw/aQTL2021
|
R
| false | false | 111,008 |
r
|
### This script is for extra analyses on the most interesting select loci (1p36.1, 7q32 and 12p13.1)
#install.packages("devtools")
#library(devtools)
#install_github("jrs95/hyprcoloc", build_opts = c("--no-resave-data", "--no-manual"), build_vignettes = F)
#browseVignettes("hyprcoloc") The install kept failing when trying to build the vignettes, so I disabled that.
#devtools::install_github("boxiangliu/locuscomparer")
library(hyprcoloc)
library(locuscomparer)
setwd("YOUR WORKING DIRECTORY")
# Read in the LD matrices, and the GWAS and QTL data. The file locations are relative to your working directory, so adjust accordingly.
bmi=read.table("./Meta-analysis_Locke_et_al+UKBiobank_2018_UPDATED.txt",sep = "\t",header = T)
t2d=read.table("./Mahajan.NatGenet2018b.T2Dbmiadj.European.with.rsIDs.txt",sep = "\t",header = T)
hdl=read.table("./jointGwasMc_HDL.txt",sep = "\t",header = T)
triG=read.table("./jointGwasMc_TG.txt",sep = "\t",header = T)
cis_eqtl=read.table("./Eurobats_adipose_select_loci_cis-eQTLs_from_INT_logTPM.txt",sep = "\t",header = T)
cis_aqtl=read.table("./Eurobats_adipose_select_loci_cis-aQTLs_from_unnormalized_activities.txt",sep = "\t",header = T)
trans_bmi_eqtl=read.table("./Eurobats_adipose_select_loci_trans-eQTLs_for_BMI_MRs.txt",sep = "\t",header = T)
trans_bmi_aqtl=read.table("./Eurobats_adipose_select_loci_trans-aQTLs_for_BMI_MRs.txt",sep = "\t",header = T)
trans_t2d_eqtl=read.table("./Eurobats_adipose_select_loci_trans-eQTLs_for_HOMA-IR_MRs.txt",sep = "\t",header = T)
trans_t2d_aqtl=read.table("./Eurobats_adipose_select_loci_trans-aQTLs_for_HOMA-IR_MRs.txt",sep = "\t",header = T)
trans_hdl_eqtl=read.table("./Eurobats_adipose_select_loci_trans-eQTLs_for_HDL_MRs.txt",sep = "\t",header = T)
trans_hdl_aqtl=read.table("./Eurobats_adipose_select_loci_trans-aQTLs_for_HDL_MRs.txt",sep = "\t",header = T)
trans_triG_eqtl=read.table("./Eurobats_adipose_select_loci_trans-eQTLs_for_TriG_MRs.txt",sep = "\t",header = T)
trans_triG_aqtl=read.table("./Eurobats_adipose_select_loci_trans-aQTLs_for_TriG_MRs.txt",sep = "\t",header = T)
ld_files=c("Eurobats_chr1p36.1_LD_matrix.txt","Eurobats_chr7q32_LD_matrix.txt","Eurobats_chr12p13.33_LD_matrix.txt","Eurobats_chr12p13.1_LD_matrix.txt")
ld=list()
index=1
for(i in ld_files){
ld[[index]]=read.table(paste("./",i,sep=""),sep = "\t",header = F)
rownames(ld[[index]])=ld[[index]][,3]
ld[[index]]=ld[[index]][,-c(1:5)]
colnames(ld[[index]])=rownames(ld[[index]])
index=index+1
}
# The HDL GWAS data has coordinates for hg18 and hg 19, but I need to have CHR and POS columns (based on hg19) instead.
colnames(hdl)=c("CHR","POS","SNP","A1","A2","BETA","SE","N","P","Freq.A1.1000G.EUR")
hdl$CHR=gsub("chr","",hdl$CHR)
hdl$CHR=as.numeric(gsub(":.*","",hdl$CHR))
# This introduced NAs, but only for 3 SNPs without rsIDs (labeled only as ".")
hdl=hdl[!is.na(hdl$CHR),]
hdl$POS=as.numeric(gsub("chr.*:","",hdl$POS))
# The TriG GWAS data has coordinates for hg18 and hg 19, but I need to have CHR and POS columns (based on hg19) instead.
colnames(triG)=c("CHR","POS","SNP","A1","A2","BETA","SE","N","P","Freq.A1.1000G.EUR")
triG$CHR=gsub("chr","",triG$CHR)
triG$CHR=as.numeric(gsub(":.*","",triG$CHR))
# This introduced NAs, but only for 3 SNPs without rsIDs (labeled only as ".")
triG=triG[!is.na(triG$CHR),]
triG$POS=as.numeric(gsub("chr.*:","",triG$POS))
# Filter GWAS, QTL and LD data to the same SNPs
filt_bmi=bmi[na.omit(match(c(rownames(ld[[1]]),rownames(ld[[2]]),rownames(ld[[3]]),rownames(ld[[4]])),bmi$SNP)),]
filt_bmi=filt_bmi[na.omit(match(unique(cis_eqtl$snps),filt_bmi$SNP)),]
filt_bmi=filt_bmi[na.omit(match(t2d$rsID,filt_bmi$SNP)),]
filt_bmi=filt_bmi[na.omit(match(hdl$SNP,filt_bmi$SNP)),]
filt_bmi=filt_bmi[na.omit(match(triG$SNP,filt_bmi$SNP)),]
filt_t2d=t2d[na.omit(match(filt_bmi$SNP,t2d$rsID)),]
filt_hdl=hdl[na.omit(match(filt_bmi$SNP,hdl$SNP)),]
filt_triG=triG[na.omit(match(filt_bmi$SNP,triG$SNP)),]
filt_cis_eqtl=cis_eqtl[cis_eqtl$snps %in% filt_bmi$SNP,]
filt_cis_aqtl=cis_aqtl[cis_aqtl$snps %in% filt_bmi$SNP,]
filt_trans_bmi_eqtl=trans_bmi_eqtl[trans_bmi_eqtl$snps %in% filt_bmi$SNP,]
filt_trans_bmi_aqtl=trans_bmi_aqtl[trans_bmi_aqtl$snps %in% filt_bmi$SNP,]
filt_trans_t2d_eqtl=trans_t2d_eqtl[trans_t2d_eqtl$snps %in% filt_t2d$rsID,]
filt_trans_t2d_aqtl=trans_t2d_aqtl[trans_t2d_aqtl$snps %in% filt_t2d$rsID,]
filt_trans_hdl_eqtl=trans_hdl_eqtl[trans_hdl_eqtl$snps %in% filt_hdl$SNP,]
filt_trans_hdl_aqtl=trans_hdl_aqtl[trans_hdl_aqtl$snps %in% filt_hdl$SNP,]
filt_trans_triG_eqtl=trans_triG_eqtl[trans_triG_eqtl$snps %in% filt_triG$SNP,]
filt_trans_triG_aqtl=trans_triG_aqtl[trans_triG_aqtl$snps %in% filt_triG$SNP,]
filt_ld=list()
filt_ld[[1]]=ld[[1]][filt_bmi$SNP[filt_bmi$CHR==1],filt_bmi$SNP[filt_bmi$CHR==1]]
filt_ld[[2]]=ld[[2]][filt_bmi$SNP[filt_bmi$CHR==7],filt_bmi$SNP[filt_bmi$CHR==7]]
filt_ld[[3]]=ld[[3]][filt_bmi$SNP[filt_bmi$CHR==12 & filt_bmi$POS<1400000],filt_bmi$SNP[filt_bmi$CHR==12 & filt_bmi$POS<1400000]]
filt_ld[[4]]=ld[[4]][filt_bmi$SNP[filt_bmi$CHR==12 & filt_bmi$POS>1400000],filt_bmi$SNP[filt_bmi$CHR==12 & filt_bmi$POS>1400000]]
# Let's free up some memory by dropping the huge trans-QTL data.frames
rm(trans_bmi_eqtl)
rm(trans_bmi_aqtl)
rm(trans_t2d_eqtl)
rm(trans_t2d_aqtl)
rm(trans_hdl_eqtl)
rm(trans_hdl_aqtl)
rm(trans_triG_eqtl)
rm(trans_triG_aqtl)
# Add chromosome and position to the QTLs for sorting
filt_cis_eqtl$chr=filt_bmi[match(filt_cis_eqtl$snps,filt_bmi$SNP),1]
filt_cis_aqtl$chr=filt_bmi[match(filt_cis_aqtl$snps,filt_bmi$SNP),1]
filt_trans_bmi_eqtl$chr=filt_bmi[match(filt_trans_bmi_eqtl$snps,filt_bmi$SNP),1]
filt_trans_bmi_aqtl$chr=filt_bmi[match(filt_trans_bmi_aqtl$snps,filt_bmi$SNP),1]
filt_trans_t2d_eqtl$chr=filt_bmi[match(filt_trans_t2d_eqtl$snps,filt_bmi$SNP),1]
filt_trans_t2d_aqtl$chr=filt_bmi[match(filt_trans_t2d_aqtl$snps,filt_bmi$SNP),1]
filt_trans_hdl_eqtl$chr=filt_bmi[match(filt_trans_hdl_eqtl$snps,filt_bmi$SNP),1]
filt_trans_hdl_aqtl$chr=filt_bmi[match(filt_trans_hdl_aqtl$snps,filt_bmi$SNP),1]
filt_trans_triG_eqtl$chr=filt_bmi[match(filt_trans_triG_eqtl$snps,filt_bmi$SNP),1]
filt_trans_triG_aqtl$chr=filt_bmi[match(filt_trans_triG_aqtl$snps,filt_bmi$SNP),1]
filt_cis_eqtl$position=filt_bmi[match(filt_cis_eqtl$snps,filt_bmi$SNP),2]
filt_cis_aqtl$position=filt_bmi[match(filt_cis_aqtl$snps,filt_bmi$SNP),2]
filt_trans_bmi_eqtl$position=filt_bmi[match(filt_trans_bmi_eqtl$snps,filt_bmi$SNP),2]
filt_trans_bmi_aqtl$position=filt_bmi[match(filt_trans_bmi_aqtl$snps,filt_bmi$SNP),2]
filt_trans_t2d_eqtl$position=filt_bmi[match(filt_trans_t2d_eqtl$snps,filt_bmi$SNP),2]
filt_trans_t2d_aqtl$position=filt_bmi[match(filt_trans_t2d_aqtl$snps,filt_bmi$SNP),2]
filt_trans_hdl_eqtl$position=filt_bmi[match(filt_trans_hdl_eqtl$snps,filt_bmi$SNP),2]
filt_trans_hdl_aqtl$position=filt_bmi[match(filt_trans_hdl_aqtl$snps,filt_bmi$SNP),2]
filt_trans_triG_eqtl$position=filt_bmi[match(filt_trans_triG_eqtl$snps,filt_bmi$SNP),2]
filt_trans_triG_aqtl$position=filt_bmi[match(filt_trans_triG_aqtl$snps,filt_bmi$SNP),2]
# Sort by chr and position
filt_bmi=filt_bmi[order(filt_bmi$CHR,filt_bmi$POS),]
filt_t2d=filt_t2d[order(filt_t2d$Chr,filt_t2d$Pos),]
filt_hdl=filt_hdl[order(filt_hdl$CHR,filt_hdl$POS),]
filt_triG=filt_triG[order(filt_triG$CHR,filt_triG$POS),]
filt_cis_eqtl=filt_cis_eqtl[order(filt_cis_eqtl$chr,filt_cis_eqtl$position),]
filt_cis_aqtl=filt_cis_aqtl[order(filt_cis_aqtl$chr,filt_cis_aqtl$position),]
filt_trans_bmi_eqtl=filt_trans_bmi_eqtl[order(filt_trans_bmi_eqtl$chr,filt_trans_bmi_eqtl$position),]
filt_trans_bmi_aqtl=filt_trans_bmi_aqtl[order(filt_trans_bmi_aqtl$chr,filt_trans_bmi_aqtl$position),]
filt_trans_t2d_eqtl=filt_trans_t2d_eqtl[order(filt_trans_t2d_eqtl$chr,filt_trans_t2d_eqtl$position),]
filt_trans_t2d_aqtl=filt_trans_t2d_aqtl[order(filt_trans_t2d_aqtl$chr,filt_trans_t2d_aqtl$position),]
filt_trans_hdl_eqtl=filt_trans_hdl_eqtl[order(filt_trans_hdl_eqtl$chr,filt_trans_hdl_eqtl$position),]
filt_trans_hdl_aqtl=filt_trans_hdl_aqtl[order(filt_trans_hdl_aqtl$chr,filt_trans_hdl_aqtl$position),]
filt_trans_triG_eqtl=filt_trans_triG_eqtl[order(filt_trans_triG_eqtl$chr,filt_trans_triG_eqtl$position),]
filt_trans_triG_eqtl=filt_trans_triG_eqtl[order(filt_trans_triG_eqtl$chr,filt_trans_triG_eqtl$position),]
### LocusCompare plots
## 1p36.1
# First, grab the necessary P-values for the SNPs used in the HyPrColoc analyses for the traits of interest
bmi1=filt_bmi[filt_bmi$CHR==1,c(3,9)]
loc1_eEPHB2=filt_cis_eqtl[filt_cis_eqtl$gene=="EPHB2",c(1,4)]
loc1_aEPHB2=filt_cis_aqtl[filt_cis_aqtl$gene=="EPHB2",c(1,4)]
loc1_eZNF436=filt_cis_eqtl[filt_cis_eqtl$gene=="ZNF436",c(1,4)]
loc1_aZNF436=filt_cis_aqtl[filt_cis_aqtl$gene=="ZNF436",c(1,4)]
loc1_eTCEA3=filt_cis_eqtl[filt_cis_eqtl$gene=="TCEA3",c(1,4)]
loc1_aTCEA3=filt_cis_aqtl[filt_cis_aqtl$gene=="TCEA3",c(1,4)]
loc1_eLASP1=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="LASP1" & filt_trans_bmi_eqtl$chr==1,c(1,4)]
loc1_aLASP1=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="LASP1" & filt_trans_bmi_aqtl$chr==1,c(1,4)]
loc1_eRASSF4=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="RASSF4" & filt_trans_bmi_eqtl$chr==1,c(1,4)]
loc1_aRASSF4=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="RASSF4" & filt_trans_bmi_aqtl$chr==1,c(1,4)]
loc1_aGNA14=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="GNA14" & filt_trans_bmi_aqtl$chr==1,c(1,4)]
loc1_aDOK5=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="DOK5" & filt_trans_bmi_aqtl$chr==1,c(1,4)]
colnames(bmi1)=c("rsid","pval")
colnames(loc1_eEPHB2)=c("rsid","pval")
colnames(loc1_aEPHB2)=c("rsid","pval")
colnames(loc1_eZNF436)=c("rsid","pval")
colnames(loc1_aZNF436)=c("rsid","pval")
colnames(loc1_eTCEA3)=c("rsid","pval")
colnames(loc1_aTCEA3)=c("rsid","pval")
colnames(loc1_eLASP1)=c("rsid","pval")
colnames(loc1_aLASP1)=c("rsid","pval")
colnames(loc1_eRASSF4)=c("rsid","pval")
colnames(loc1_aRASSF4)=c("rsid","pval")
colnames(loc1_aGNA14)=c("rsid","pval")
colnames(loc1_aDOK5)=c("rsid","pval")
rownames(bmi1)=bmi1$rsid
rownames(loc1_eEPHB2)=loc1_eEPHB2$rsid
rownames(loc1_aEPHB2)=loc1_aEPHB2$rsid
rownames(loc1_eZNF436)=loc1_eZNF436$rsid
rownames(loc1_aZNF436)=loc1_aZNF436$rsid
rownames(loc1_eTCEA3)=loc1_eTCEA3$rsid
rownames(loc1_aTCEA3)=loc1_aTCEA3$rsid
rownames(loc1_eLASP1)=loc1_eLASP1$rsid
rownames(loc1_aLASP1)=loc1_aLASP1$rsid
rownames(loc1_eRASSF4)=loc1_eRASSF4$rsid
rownames(loc1_aRASSF4)=loc1_aRASSF4$rsid
rownames(loc1_aGNA14)=loc1_aGNA14$rsid
rownames(loc1_aDOK5)=loc1_aDOK5$rsid
# Check out some relevant LocusCompare plots before picking the which to write to file
locuscompare(in_fn1=bmi1,in_fn2=loc1_eEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-eQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs12408468") # Potential 3rd BMI signal?
locuscompare(in_fn1=loc1_eEPHB2,in_fn2=loc1_aEPHB2,title1 = "EPHB2 cis-eQTL", title2 = "EPHB2 cis-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_eZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-eQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=loc1_eZNF436,in_fn2=loc1_aZNF436,title1 = "ZNF436 cis-eQTL", title2 = "ZNF436 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_eTCEA3,title1 = "BMI GWAS", title2 = "TCEA3 cis-eQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aTCEA3,title1 = "BMI GWAS", title2 = "TCEA3 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_eLASP1,title1 = "BMI GWAS", title2 = "LASP1 trans-eQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aLASP1,title1 = "BMI GWAS", title2 = "LASP1 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aLASP1,title1 = "EPHB2 cis-aQTL", title2 = "LASP1 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aLASP1,title1 = "EPHB2 cis-aQTL", title2 = "LASP1 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_eRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 trans-eQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aRASSF4,title1 = "EPHB2 cis-aQTL", title2 = "RASSF4 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aRASSF4,title1 = "EPHB2 cis-aQTL", title2 = "RASSF4 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aLASP1,title1 = "BMI GWAS", title2 = "LASP1 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aGNA14,title1 = "BMI GWAS", title2 = "GNA14 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aGNA14,title1 = "BMI GWAS", title2 = "GNA14 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aGNA14,title1 = "EPHB2 cis-aQTL", title2 = "GNA14 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aGNA14,title1 = "EPHB2 cis-aQTL", title2 = "GNA14 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aDOK5,title1 = "BMI GWAS", title2 = "DOK5 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=bmi1,in_fn2=loc1_aDOK5,title1 = "BMI GWAS", title2 = "DOK5 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aDOK5,title1 = "EPHB2 cis-aQTL", title2 = "DOK5 trans-aQTL",snp = "rs6692586") # Top GWAS SNP
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aDOK5,title1 = "EPHB2 cis-aQTL", title2 = "DOK5 trans-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
locuscompare(in_fn1=loc1_eEPHB2,in_fn2=loc1_aEPHB2,title1 = "EPHB2 cis-eQTL", title2 = "EPHB2 cis-aQTL",snp = "rs4654828") # Top multi-QTL cluster SNP
# This locus is complex and consequently difficult to interpret. The EPHB2 cis-aQTL and various BMI MR trans-aQTL signals suggest 2 functional signals
# represented by rs6692586 (the top BMI GWAS SNP) and rs4654828 (the top trans-aQTL signal for many BMI MRs). The EPHB2 cis-aQTL has these two SNPs
# at roughly equal strength while rs6692586 is clearly stronger for BMI and rs4654828 is clearly stronger for the trans-aQTLs. Perhaps the best
# hypothetical explanations for these observations is that rs6692586 operates in cis thru effects on EPHB2 expression and activity, while rs4654828
# has an alternative proximal effect that distally affects the activities of many correlated BMI MRs, including EPHB2, which shows up as a bump in
# in the EPHB2 aQTL signal. The proximal effect of rs4654828 might be on ZNF436 activity, but this probably cannot be mediated via expression levels
# since there is an extremely strong cis-eQTL for ZNF436 at this locus that does not overlap the BMI signal or the cis-aQTL signal. I looked into the
# the position of rs4654828, but it is quite far away from ZNF436 in a LACTBL1 intron. LACTBL1 is apparently not expressed in our adipose tissue, so
# it is hard to imagine how it could be mediating the effect on BMI within adipose. It is best expressed in testis, which does have a sig eQTL
# between rs4654828-LACTBL1, but this also doesn't seem relevant to BMI. So if rs4654828 does affect ZNF436 activity in adipose, it is mediated
# some other way. Interestingly, rs4654828 in GTEx does show sig eQTLs with ZNF436 in other tissue types (Skin, Aorta, Tibial Artery, Esophagus,
# Tibial Nerve and Thyroid). It's hard to imagine how ZNF436 expression effects in other tissues could be relevant to ZNF436 activity in adipose.
# There are also rs4654828-TCEA3 eQTLs in Skin and Skeletal Muscle, and a TCEA3 splicing QTL in skin. The TCEA3 eQTL/aQTL LocusCompare plots do
# not look like TCEA3 is relevant to either BMI signal in adipose. Regardless, this sort of scenario might manifest epistatic effects on BMI and
# EPHB2 between these two SNPs. This is easy enough to test for EPHB2 activity, but I can't test it for BMI.
# Let's write some to PDFs
pdf("rs6692586-EPHB2_eQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_eEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-eQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-EPHB2_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-EPHB2_eQTL_and_aQTL_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_eEPHB2,in_fn2=loc1_aEPHB2,title1 = "EPHB2 cis-eQTL", title2 = "EPHB2 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-EPHB2_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-ZNF436_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-ZNF436_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aZNF436,title1 = "BMI GWAS", title2 = "ZNF436 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-DOK5_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aDOK5,title1 = "BMI GWAS", title2 = "DOK5 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-DOK5_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aDOK5,title1 = "BMI GWAS", title2 = "DOK5 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-RASSF4_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-RASSF4_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aRASSF4,title1 = "BMI GWAS", title2 = "RASSF4 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-GNA14_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aGNA14,title1 = "BMI GWAS", title2 = "GNA14 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-GNA14_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aGNA14,title1 = "BMI GWAS", title2 = "GNA14 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-LASP1_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aLASP1,title1 = "BMI GWAS", title2 = "LASP1 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs4654828-LASP1_aQTL_and_BMI_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi1,in_fn2=loc1_aLASP1,title1 = "BMI GWAS", title2 = "LASP1 cis-aQTL",snp = "rs4654828")
dev.off()
pdf("rs6692586-LASP1_aQTL_and_EPHB2_aQTL_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aLASP1,title1 = "EPHB2 cis-aQTL", title2 = "LASP1 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-GNA14_aQTL_and_EPHB2_aQTL_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aGNA14,title1 = "EPHB2 cis-aQTL", title2 = "GNA14 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-RASSF4_aQTL_and_EPHB2_aQTL_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aRASSF4,title1 = "EPHB2 cis-aQTL", title2 = "RASSF4 cis-aQTL",snp = "rs6692586")
dev.off()
pdf("rs6692586-DOK5_aQTL_and_EPHB2_aQTL_1p36_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=loc1_aEPHB2,in_fn2=loc1_aDOK5,title1 = "EPHB2 cis-aQTL", title2 = "DOK5 cis-aQTL",snp = "rs6692586")
dev.off()
## 7q32
# First, grab the necessary P-values for the SNPs used in the HyPrColoc analyses for the traits of interest
bmi2=filt_bmi[filt_bmi$CHR==7,c(3,9)]
t2d2=filt_t2d[filt_t2d$Chr==7,c(1,9)]
hdl2=filt_hdl[filt_hdl$CHR==7,c(3,9)]
triG2=filt_triG[filt_triG$CHR==7,c(3,9)]
loc2_eLINC=filt_cis_eqtl[filt_cis_eqtl$gene=="LINC-PINT",c(1,4)]
loc2_aLINC=filt_cis_aqtl[filt_cis_aqtl$gene=="LINC-PINT",c(1,4)]
loc2_eKLF14=filt_cis_eqtl[filt_cis_eqtl$gene=="KLF14",c(1,4)]
loc2_aKLF14=filt_cis_aqtl[filt_cis_aqtl$gene=="KLF14",c(1,4)]
loc2_eAC=filt_cis_eqtl[filt_cis_eqtl$gene=="AC016831.7",c(1,4)]
loc2_eTBX4=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="TBX4" & filt_trans_bmi_eqtl$chr==7,c(1,4)]
loc2_aTBX4=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="TBX4" & filt_trans_bmi_aqtl$chr==7,c(1,4)]
loc2_eGNB1=filt_trans_t2d_eqtl[filt_trans_t2d_eqtl$gene=="GNB1" & filt_trans_t2d_eqtl$chr==7,c(1,4)]
loc2_aGNB1=filt_trans_t2d_aqtl[filt_trans_t2d_aqtl$gene=="GNB1" & filt_trans_t2d_aqtl$chr==7,c(1,4)]
loc2_eESR2=filt_trans_hdl_eqtl[filt_trans_hdl_eqtl$gene=="ESR2" & filt_trans_hdl_eqtl$chr==7,c(1,4)]
loc2_aESR2=filt_trans_hdl_aqtl[filt_trans_hdl_aqtl$gene=="ESR2" & filt_trans_hdl_aqtl$chr==7,c(1,4)]
loc2_eNR2F1=filt_trans_hdl_eqtl[filt_trans_hdl_eqtl$gene=="NR2F1" & filt_trans_hdl_eqtl$chr==7,c(1,4)]
loc2_aNR2F1=filt_trans_hdl_aqtl[filt_trans_hdl_aqtl$gene=="NR2F1" & filt_trans_hdl_aqtl$chr==7,c(1,4)]
loc2_eAGT=filt_trans_triG_eqtl[filt_trans_triG_eqtl$gene=="AGT" & filt_trans_triG_eqtl$chr==7,c(1,4)]
loc2_aAGT=filt_trans_triG_aqtl[filt_trans_triG_aqtl$gene=="AGT" & filt_trans_triG_aqtl$chr==7,c(1,4)]
loc2_eRABIF=filt_trans_triG_eqtl[filt_trans_triG_eqtl$gene=="RABIF" & filt_trans_triG_eqtl$chr==7,c(1,4)]
loc2_aRABIF=filt_trans_triG_aqtl[filt_trans_triG_aqtl$gene=="RABIF" & filt_trans_triG_aqtl$chr==7,c(1,4)]
colnames(bmi2)=c("rsid","pval")
colnames(t2d2)=c("rsid","pval")
colnames(hdl2)=c("rsid","pval")
colnames(triG2)=c("rsid","pval")
colnames(loc2_eLINC)=c("rsid","pval")
colnames(loc2_aLINC)=c("rsid","pval")
colnames(loc2_eKLF14)=c("rsid","pval")
colnames(loc2_aKLF14)=c("rsid","pval")
colnames(loc2_eAC)=c("rsid","pval")
colnames(loc2_eTBX4)=c("rsid","pval")
colnames(loc2_aTBX4)=c("rsid","pval")
colnames(loc2_eGNB1)=c("rsid","pval")
colnames(loc2_aGNB1)=c("rsid","pval")
colnames(loc2_eESR2)=c("rsid","pval")
colnames(loc2_aESR2)=c("rsid","pval")
colnames(loc2_eNR2F1)=c("rsid","pval")
colnames(loc2_aNR2F1)=c("rsid","pval")
colnames(loc2_eAGT)=c("rsid","pval")
colnames(loc2_aAGT)=c("rsid","pval")
colnames(loc2_eRABIF)=c("rsid","pval")
colnames(loc2_aRABIF)=c("rsid","pval")
rownames(bmi2)=bmi2$rsid
rownames(t2d2)=t2d2$rsid
rownames(hdl2)=hdl2$rsid
rownames(triG2)=triG2$rsid
rownames(loc2_eLINC)=loc2_eLINC$rsid
rownames(loc2_aLINC)=loc2_aLINC$rsid
rownames(loc2_eKLF14)=loc2_eKLF14$rsid
rownames(loc2_aKLF14)=loc2_aKLF14$rsid
rownames(loc2_eAC)=loc2_eAC$rsid
rownames(loc2_eTBX4)=loc2_eTBX4$rsid
rownames(loc2_aTBX4)=loc2_aTBX4$rsid
rownames(loc2_eGNB1)=loc2_eGNB1$rsid
rownames(loc2_aGNB1)=loc2_aGNB1$rsid
rownames(loc2_eESR2)=loc2_eESR2$rsid
rownames(loc2_aESR2)=loc2_aESR2$rsid
rownames(loc2_eNR2F1)=loc2_eNR2F1$rsid
rownames(loc2_aNR2F1)=loc2_aNR2F1$rsid
rownames(loc2_eAGT)=loc2_eAGT$rsid
rownames(loc2_aAGT)=loc2_aAGT$rsid
rownames(loc2_eRABIF)=loc2_eRABIF$rsid
rownames(loc2_aRABIF)=loc2_aRABIF$rsid
# Check out some relevant LocusCompare plots before picking the which to write to file
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=triG2,title1 = "BMI GWAS", title2 = "Triglycerides GWAS",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=triG2,title1 = "BMI GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_eLINC,title1 = "BMI GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=loc2_eLINC,in_fn2=loc2_aLINC,title1 = "LINC-PINT cis-eQTL", title2 = "LINC-PINT cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eLINC,title1 = "T2D GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eLINC,title1 = "HDL GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eLINC,title1 = "Triglycerides GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_eKLF14,title1 = "BMI GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eKLF14,title1 = "HDL GWAS", title2 = "KLF14 cis-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_aKLF14,title1 = "BMI GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_aKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-aQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_aKLF14,title1 = "HDL GWAS", title2 = "KLF14 cis-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_aKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-aQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=loc2_eKLF14,in_fn2=loc2_aKLF14,title1 = "KLF14 cis-eQTL", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=loc2_eKLF14,in_fn2=loc2_aKLF14,title1 = "KLF14 cis-eQTL", title2 = "KLF14 cis-aQTL",snp = "rs738134") # Top BMI GWAS SNP
locuscompare(in_fn1=loc2_eKLF14,in_fn2=loc2_aKLF14,title1 = "KLF14 cis-eQTL", title2 = "KLF14 cis-aQTL",snp = "rs287621") # Top BMI GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_eAC,title1 = "BMI GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eAC,title1 = "T2D GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eAC,title1 = "HDL GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eAC,title1 = "Triglycerides GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_eTBX4,title1 = "BMI GWAS", title2 = "TBX4 trans-eQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eTBX4,title1 = "T2D GWAS", title2 = "TBX4 trans-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eTBX4,title1 = "HDL GWAS", title2 = "TBX4 trans-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eTBX4,title1 = "Triglycerides GWAS", title2 = "TBX4 trans-eQTL",snp = "rs287621") # Top HDL GWAS SNP
locuscompare(in_fn1=bmi2,in_fn2=loc2_aTBX4,title1 = "BMI GWAS", title2 = "TBX4 trans-aQTL",snp = "rs972283") # Top BMI GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_aTBX4,title1 = "T2D GWAS", title2 = "TBX4 trans-aQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_aTBX4,title1 = "HDL GWAS", title2 = "TBX4 trans-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_aTBX4,title1 = "Triglycerides GWAS", title2 = "TBX4 trans-aQTL",snp = "rs287621") # Top HDL GWAS SNP
locuscompare(in_fn1=t2d2,in_fn2=loc2_eGNB1,title1 = "T2D GWAS", title2 = "GNB1 trans-eQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=t2d2,in_fn2=loc2_aGNB1,title1 = "T2D GWAS", title2 = "GNB1 trans-aQTL",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eESR2,title1 = "HDL GWAS", title2 = "ESR2 trans-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=loc2_aESR2,title1 = "HDL GWAS", title2 = "ESR2 trans-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=loc2_eNR2F1,title1 = "HDL GWAS", title2 = "NR2F1 trans-eQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=hdl2,in_fn2=loc2_aNR2F1,title1 = "HDL GWAS", title2 = "NR2F1 trans-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=loc2_eNR2F1,in_fn2=loc2_aNR2F1,title1 = "NR2F1 trans-eQTL", title2 = "NR2F1 trans-aQTL",snp = "rs11765979") # Top HDL GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eRABIF,title1 = "Triglycerides GWAS", title2 = "RABIF trans-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_aRABIF,title1 = "Triglycerides GWAS", title2 = "RABIF trans-aQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_eAGT,title1 = "Triglycerides GWAS", title2 = "AGT trans-eQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=triG2,in_fn2=loc2_aAGT,title1 = "Triglycerides GWAS", title2 = "AGT trans-aQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=loc2_eAGT,in_fn2=loc2_aAGT,title1 = "AGT trans-eQTL", title2 = "AGT trans-aQTL",snp = "rs287621") # Top TriG GWAS SNP
locuscompare(in_fn1=loc2_eAGT,in_fn2=loc2_aAGT,title1 = "AGT trans-eQTL", title2 = "AGT trans-aQTL",snp = "rs972283") # Top TriG GWAS SNP
locuscompare(in_fn1=loc2_eAGT,in_fn2=loc2_aAGT,title1 = "AGT trans-eQTL", title2 = "AGT trans-aQTL",snp = "rs738134") # Top TriG GWAS SNP
# I never ran HyPrColoc on just cis-e_KLF14 and cis-a_KLF14 alone. First I need to calculate the SEs and format the data.
filt_cis_eqtl$SE=filt_cis_eqtl$beta/filt_cis_eqtl$statistic
filt_cis_aqtl$SE=filt_cis_aqtl$beta/filt_cis_aqtl$statistic
all(filt_cis_eqtl$snps[filt_cis_eqtl$gene=="KLF14"]==filt_cis_aqtl$snps[filt_cis_aqtl$gene=="KLF14"]) # TRUE
betas2=cbind("cis-e_KLF14"=filt_cis_eqtl[filt_cis_eqtl$gene=="KLF14","beta"],"cis-a_KLF14"=filt_cis_aqtl[filt_cis_aqtl$gene=="KLF14","beta"])
ses2=cbind("cis-e_KLF14"=filt_cis_eqtl[filt_cis_eqtl$gene=="KLF14","SE"],"cis-a_KLF14"=filt_cis_aqtl[filt_cis_aqtl$gene=="KLF14","SE"])
rownames(betas2)=filt_cis_aqtl$snps[filt_cis_aqtl$gene=="KLF14"]
rownames(ses2)=filt_cis_aqtl$snps[filt_cis_aqtl$gene=="KLF14"]
all(rownames(betas2)==rownames(filt_ld[[2]])) # TRUE
all(rownames(ses2)==rownames(filt_ld[[2]])) # TRUE
eKLF14_aKLF14=hyprcoloc(as.matrix(betas2),as.matrix(ses2),
trait.names=colnames(betas2),snp.id=rownames(betas2),ld.matrix = filt_ld[[2]],
trait.subset = c("cis-e_KLF14","cis-a_KLF14"),snpscores = T)
# KLF14 eQTL and aQTL colocalize with a PP=0.9094 that is best explained by rs4731702.
# Now let's do the same sort of analysis for NR2F1 and AGT.
filt_trans_hdl_eqtl$SE=filt_trans_hdl_eqtl$beta/filt_trans_hdl_eqtl$statistic
filt_trans_hdl_aqtl$SE=filt_trans_hdl_aqtl$beta/filt_trans_hdl_aqtl$statistic
temp_e=filt_trans_hdl_eqtl[filt_trans_hdl_eqtl$chr==7 & filt_trans_hdl_eqtl$gene=="NR2F1",]
temp_a=filt_trans_hdl_aqtl[filt_trans_hdl_aqtl$chr==7 & filt_trans_hdl_aqtl$gene=="NR2F1",]
all(rownames(betas2)==temp_e$snps) # TRUE
all(rownames(betas2)==temp_a$snps) # TRUE
betas2=cbind(betas2,"trans-e_NR2F1"=temp_e$beta,"trans-a_NR2F1"=temp_a$beta)
ses2=cbind(ses2,"trans-e_NR2F1"=temp_e$SE,"trans-a_NR2F1"=temp_a$SE)
filt_trans_triG_eqtl$SE=filt_trans_triG_eqtl$beta/filt_trans_triG_eqtl$statistic
filt_trans_triG_aqtl$SE=filt_trans_triG_aqtl$beta/filt_trans_triG_aqtl$statistic
temp_e=filt_trans_triG_eqtl[filt_trans_triG_eqtl$chr==7 & filt_trans_triG_eqtl$gene=="AGT",]
temp_a=filt_trans_triG_aqtl[filt_trans_triG_aqtl$chr==7 & filt_trans_triG_aqtl$gene=="AGT",]
all(rownames(betas2)==temp_e$snps) # TRUE
all(rownames(betas2)==temp_a$snps) # FALSE
temp_a=temp_a[match(rownames(betas2),temp_a$snps),]
all(rownames(betas2)==temp_a$snps) # TRUE
betas2=cbind(betas2,"trans-e_AGT"=temp_e$beta,"trans-a_AGT"=temp_a$beta)
ses2=cbind(ses2,"trans-e_AGT"=temp_e$SE,"trans-a_AGT"=temp_a$SE)
eNR2F1_aNR2F1=hyprcoloc(as.matrix(betas2),as.matrix(ses2),
trait.names=colnames(betas2),snp.id=rownames(betas2),ld.matrix = filt_ld[[2]],
trait.subset = c("trans-e_NR2F1","trans-a_NR2F1"),snpscores = T)
# NR2F1 eQTL and aQTL colocalize with a PP=0.8700 that is best explained by rs738134.
eAGT_aAGT=hyprcoloc(as.matrix(betas2),as.matrix(ses2),
trait.names=colnames(betas2),snp.id=rownames(betas2),ld.matrix = filt_ld[[2]],
trait.subset = c("trans-e_AGT","trans-a_AGT"),snpscores = T)
# AGT eQTL and aQTL colocalize with a PP=0.6451 that is best explained by rs11765979.
# Let's write some to PDFs
pdf("BMI_T2D_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("BMI_T2D_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("BMI_T2D_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=t2d2,title1 = "BMI GWAS", title2 = "T2D GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
dev.off()
pdf("BMI_HDL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("BMI_HDL_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("BMI_HDL_7q32_LocusCompare_rs11765979.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=hdl2,title1 = "BMI GWAS", title2 = "HDL GWAS",snp = "rs11765979") # Top HDL GWAS SNP
dev.off()
pdf("BMI_TriG_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=triG2,title1 = "BMI GWAS", title2 = "Triglycerides GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("BMI_TriG_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=triG2,title1 = "BMI GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("T2D_HDL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("T2D_HDL_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
dev.off()
pdf("T2D_HDL_7q32_LocusCompare_rs11765979.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=hdl2,title1 = "T2D GWAS", title2 = "HDL GWAS",snp = "rs11765979") # Top HDL GWAS SNP
dev.off()
pdf("T2D_TriG_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("T2D_TriG_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs738134") # Near top T2D GWAS SNP (top SNP did not overlap other GWAS)
dev.off()
pdf("T2D_TriG_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=triG2,title1 = "T2D GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("HDL_TriG_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("HDL_TriG_7q32_LocusCompare_rs11765979.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs11765979") # Top HDL GWAS SNP
dev.off()
pdf("HDL_TriG_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=triG2,title1 = "HDL GWAS", title2 = "Triglycerides GWAS",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./BMI/BMI_e_LINC-PINT_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_eLINC,title1 = "BMI GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_LINC-PINT_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eLINC,title1 = "T2D GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_e_LINC-PINT_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eLINC,title1 = "HDL GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_LINC-PINT_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eLINC,title1 = "Triglycerides GWAS", title2 = "LINC-PINT cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./LINC-PINT_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eLINC,in_fn2=loc2_aLINC,title1 = "LINC-PINT cis-eQTL", title2 = "LINC-PINT cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./BMI/BMI_e_AC016831.7_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_eAC,title1 = "BMI GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_AC016831.7_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eAC,title1 = "T2D GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_e_AC016831.7_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eAC,title1 = "HDL GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_AC016831.7_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eAC,title1 = "Triglycerides GWAS", title2 = "AC016831.7 cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./BMI/BMI_e_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_eKLF14,title1 = "BMI GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_KLF14_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-eQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_e_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eKLF14,title1 = "HDL GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_KLF14_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-eQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./BMI/BMI_a_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_aKLF14,title1 = "BMI GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_a_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_aKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_a_KLF14_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_aKLF14,title1 = "T2D GWAS", title2 = "KLF14 cis-aQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_a_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_aKLF14,title1 = "HDL GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_a_KLF14_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_aKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_a_KLF14_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_aKLF14,title1 = "Triglycerides GWAS", title2 = "KLF14 cis-aQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./KLF14_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eKLF14,in_fn2=loc2_aKLF14,title1 = "KLF14 cis-eQTL", title2 = "KLF14 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./BMI/BMI_e_TBX4_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=bmi2,in_fn2=loc2_eTBX4,title1 = "BMI GWAS", title2 = "TBX4 trans-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./T2D/T2D_e_TBX4_7q32_LocusCompare_rs738134.pdf",width = 10)
locuscompare(in_fn1=t2d2,in_fn2=loc2_eTBX4,title1 = "T2D GWAS", title2 = "TBX4 trans-eQTL",snp = "rs738134") # Near top T2D GWAS SNP
dev.off()
pdf("./HDL/HDL_e_TBX4_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eTBX4,title1 = "HDL GWAS", title2 = "TBX4 trans-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_TBX4_7q32_LocusCompare_rs287621.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eTBX4,title1 = "Triglycerides GWAS", title2 = "TBX4 trans-eQTL",snp = "rs287621") # Top TriG GWAS SNP
dev.off()
pdf("./TBX4_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eTBX4,in_fn2=loc2_aTBX4,title1 = "TBX4 cis-eQTL", title2 = "TBX4 cis-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./HDL/HDL_e_NR2F1_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_eNR2F1,title1 = "HDL GWAS", title2 = "NR2F1 trans-eQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./HDL/HDL_a_NR2F1_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=hdl2,in_fn2=loc2_aNR2F1,title1 = "HDL GWAS", title2 = "NR2F1 trans-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./NR2F1_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eNR2F1,in_fn2=loc2_aNR2F1,title1 = "NR2F1 trans-eQTL", title2 = "NR2F1 trans-aQTL",snp = "rs972283") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_e_AGT_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_eAGT,title1 = "Triglycerides GWAS", title2 = "AGT trans-eQTL",snp = "rs287621") # Top BMI GWAS SNP
dev.off()
pdf("./Triglycerides/TriG_a_AGT_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=triG2,in_fn2=loc2_aAGT,title1 = "Triglycerides GWAS", title2 = "AGT trans-aQTL",snp = "rs287621") # Top BMI GWAS SNP
dev.off()
pdf("./AGT_eQTL_aQTL_7q32_LocusCompare_rs972283.pdf",width = 10)
locuscompare(in_fn1=loc2_eAGT,in_fn2=loc2_aAGT,title1 = "AGT trans-eQTL", title2 = "AGT trans-aQTL",snp = "rs287621") # Top BMI GWAS SNP
dev.off()
## 12p13.1
# First, grab the necessary P-values for the SNPs used in the HyPrColoc analyses for the traits of interest
bmi4=filt_bmi[match(rownames(filt_ld[[4]]),filt_bmi$SNP),c(3,9)]
loc4_eANG=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="ANG",c(1,4)]
loc4_aANG=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="ANG",c(1,4)]
loc4_eID2=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="ID2",c(1,4)]
loc4_aID2=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="ID2",c(1,4)]
loc4_ePTPRJ=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="PTPRJ",c(1,4)]
loc4_aPTPRJ=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="PTPRJ",c(1,4)]
loc4_eTENM4=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="TENM4",c(1,4)]
loc4_aTENM4=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="TENM4",c(1,4)]
loc4_eEPHB2=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$gene=="EPHB2",c(1,4)]
loc4_aEPHB2=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$gene=="EPHB2",c(1,4)]
colnames(bmi4)=c("rsid","pval")
colnames(loc4_eANG)=c("rsid","pval")
colnames(loc4_aANG)=c("rsid","pval")
colnames(loc4_eID2)=c("rsid","pval")
colnames(loc4_aID2)=c("rsid","pval")
colnames(loc4_ePTPRJ)=c("rsid","pval")
colnames(loc4_aPTPRJ)=c("rsid","pval")
colnames(loc4_eTENM4)=c("rsid","pval")
colnames(loc4_aTENM4)=c("rsid","pval")
colnames(loc4_eEPHB2)=c("rsid","pval")
colnames(loc4_aEPHB2)=c("rsid","pval")
rownames(bmi4)=bmi4$rsid
rownames(loc4_eANG)=loc4_eANG$rsid
rownames(loc4_aANG)=loc4_aANG$rsid
rownames(loc4_eID2)=loc4_eID2$rsid
rownames(loc4_aID2)=loc4_aID2$rsid
rownames(loc4_ePTPRJ)=loc4_ePTPRJ$rsid
rownames(loc4_aPTPRJ)=loc4_aPTPRJ$rsid
rownames(loc4_eTENM4)=loc4_eTENM4$rsid
rownames(loc4_aTENM4)=loc4_aTENM4$rsid
rownames(loc4_eEPHB2)=loc4_eEPHB2$rsid
rownames(loc4_aEPHB2)=loc4_aEPHB2$rsid
# Check out some relevant LocusCompare plots before picking the which to write to file
locuscompare(in_fn1=bmi4,in_fn2=loc4_eEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aEPHB2,title1 = "BMI GWAS", title2 = "EPHB2 trans-aQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_eANG,title1 = "BMI GWAS", title2 = "ANG trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aANG,title1 = "BMI GWAS", title2 = "ANG trans-aQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_eID2,title1 = "BMI GWAS", title2 = "ID2 trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aID2,title1 = "BMI GWAS", title2 = "ID2 trans-aQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_ePTPRJ,title1 = "BMI GWAS", title2 = "PTPRJ trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aPTPRJ,title1 = "BMI GWAS", title2 = "PTPRJ trans-aQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_eTENM4,title1 = "BMI GWAS", title2 = "TENM4 trans-eQTL",snp = "rs12422552") # Top GWAS SNP
locuscompare(in_fn1=bmi4,in_fn2=loc4_aTENM4,title1 = "BMI GWAS", title2 = "TENM4 trans-aQTL",snp = "rs12422552") # Top GWAS SNP
# Let's write some to PDFs
pdf("./BMI/rs12422552-ANG_eQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_eANG,title1 = "BMI GWAS", title2 = "ANG cis-eQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-ANG_aQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_aANG,title1 = "BMI GWAS", title2 = "ANG cis-aQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-ID2_eQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_eID2,title1 = "BMI GWAS", title2 = "ID2 cis-eQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-ID2_aQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_aID2,title1 = "BMI GWAS", title2 = "ID2 cis-aQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-PTPRJ_eQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_ePTPRJ,title1 = "BMI GWAS", title2 = "PTPRJ cis-eQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-PTPRJ_aQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_aPTPRJ,title1 = "BMI GWAS", title2 = "PTPRJ cis-aQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-TENM4_eQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_eTENM4,title1 = "BMI GWAS", title2 = "TENM4 cis-eQTL",snp = "rs12422552")
dev.off()
pdf("./BMI/rs12422552-TENM4_aQTL_and_BMI_12p13.1_LocusCompare.pdf",width = 10)
locuscompare(in_fn1=bmi4,in_fn2=loc4_aTENM4,title1 = "BMI GWAS", title2 = "TENM4 cis-aQTL",snp = "rs12422552")
dev.off()
### Let's switch to pulling out data for Cytoscape network visualizations
# Read in data
bmi_pairColoc=read.table("./BMI/Pairwise_HyPrColoc_between_BMI_and_each_QTL_for_select_loci.txt",sep = "\t",header = T)
t2d_pairColoc=read.table("./T2D/Pairwise_HyPrColoc_between_BMIadjT2D_and_each_QTL_for_select_loci.txt",sep = "\t",header = T)
hdl_pairColoc=read.table("./HDL/Pairwise_HyPrColoc_between_HDL_and_each_QTL_for_select_loci.txt",sep = "\t",header = T)
triG_pairColoc=read.table("./Triglycerides/Pairwise_HyPrColoc_between_TriG_and_each_QTL_for_select_loci.txt",sep = "\t",header = T)
ephb2_pairColoc=read.table("./BMI/Pairwise_HyPrColoc_between_EPHB2_aQTL_and_each_other_QTL_for_1p36.txt",sep = "\t",header = T)
bmi_mrs=read.table("./BMI/Eurobats_adipose_time-matched_BMI_MRs_from_RF_modeling.txt",header = F)
homair_mrs=read.table("./HOMA-IR/Eurobats_adipose_time-matched_HOMA-IR_MRs_from_RF_modeling.txt",header = F)
hdl_mrs=read.table("./HDL/Eurobats_adipose_time-matched_HDL_MRs_from_RF_modeling.txt",header = F)
triG_mrs=read.table("./Triglycerides/Eurobats_adipose_time-matched_Triglycerides_MRs_from_RF_modeling.txt",header = F)
interactome=read.table("../Adipose expression data/FINAL_logTPMs_and_activities/Eurobats_adipose_900boots_regulon_with_LINC-PINT.txt",sep = "\t",header = T)
tpm=read.table("../Adipose expression data/FINAL_logTPMs_and_activities/Filtered_Eurobats_adipose_qnorm_INT_logTPMs_for_all_expressed_genes.txt",
sep = "\t",header = T,row.names = 1)
vip=read.table("../Adipose expression data/FINAL_logTPMs_and_activities/Filtered_Eurobats_adipose_unnormalized_activities_from_logTPM_for_4213_regulators.txt",
sep = "\t",header = T,row.names = 1)
phenos=read.table("../Eurobats phenotypes/Amendment_time-matched_phenotypes_E886_02082019_with_HOMA.txt",sep="\t",header = T,row.names = 1)
filt_pheno=phenos[na.omit(match(colnames(vip),rownames(phenos))),]
all(colnames(vip)==colnames(tpm)) # TRUE
all(colnames(vip)==rownames(filt_pheno)) # TRUE
sig_bmi=filt_bmi[filt_bmi$P<=5E-8,]
sig_t2d=filt_t2d[filt_t2d$Pvalue<=5E-8,]
sig_hdl=filt_hdl[filt_hdl$P<=5E-8,]
sig_triG=filt_triG[filt_triG$P<=5E-8,]
# Grab relevant sub-interactomes
bmi_MRregs=interactome[interactome$Target %in% bmi_mrs[,1],]
bmi_MRMR=bmi_MRregs[bmi_MRregs$Regulator %in% bmi_mrs[,1],]
homair_MRregs=interactome[interactome$Target %in% homair_mrs[,1],]
homair_MRMR=homair_MRregs[homair_MRregs$Regulator %in% homair_mrs[,1],]
hdl_MRregs=interactome[interactome$Target %in% hdl_mrs[,1],]
hdl_MRMR=hdl_MRregs[hdl_MRregs$Regulator %in% hdl_mrs[,1],]
triG_MRregs=interactome[interactome$Target %in% triG_mrs[,1],]
triG_MRMR=triG_MRregs[triG_MRregs$Regulator %in% triG_mrs[,1],]
# 1p36
# Grab interactions between EPHB2 and MRs in adipose interactome
EPHB2mrs=bmi_MRregs[bmi_MRregs$Regulator=="EPHB2",]
mrsEPHB2=interactome[(interactome$Regulator %in% bmi_mrs[,1]) & (interactome$Target=="EPHB2"),]
interactome1p36=rbind(bmi_MRMR,EPHB2mrs,mrsEPHB2)
# Grab pairwise colocalizations with PP>0.5 between BMI and QTLs and EPHB2 aQTL and trans-QTLs
bmi_pairColoc1p36=bmi_pairColoc[bmi_pairColoc$locus=="1p36.1" & bmi_pairColoc$posterior_prob>0.5,c(2,3,5)]
bmi_pairColoc1p36$traits=gsub("BMI, ","",bmi_pairColoc1p36$traits)
bmi_pairColoc1p36=cbind("trait1"=rep("BMI",dim(bmi_pairColoc1p36)[1]),bmi_pairColoc1p36)
bmi_e_pairColoc1=bmi_pairColoc1p36[grepl("-e_",bmi_pairColoc1p36$traits),]
bmi_a_pairColoc1=bmi_pairColoc1p36[grepl("-a_",bmi_pairColoc1p36$traits),]
bmi_e_pairColoc1$traits=gsub(".*_","",bmi_e_pairColoc1$traits)
bmi_a_pairColoc1$traits=gsub(".*_","",bmi_a_pairColoc1$traits)
bmi_netPair1p36=rbind(bmi_e_pairColoc1,bmi_a_pairColoc1)
bmi_netPair1p36=bmi_netPair1p36[!duplicated(bmi_netPair1p36$traits),-c(3,4)]
bmi_netPair1p36$eQTL_PP=bmi_e_pairColoc1[match(bmi_netPair1p36$traits,bmi_e_pairColoc1$traits),3]
bmi_netPair1p36$eQTL_SNP=bmi_e_pairColoc1[match(bmi_netPair1p36$traits,bmi_e_pairColoc1$traits),4]
bmi_netPair1p36$aQTL_PP=bmi_a_pairColoc1[match(bmi_netPair1p36$traits,bmi_a_pairColoc1$traits),3]
bmi_netPair1p36$aQTL_SNP=bmi_a_pairColoc1[match(bmi_netPair1p36$traits,bmi_a_pairColoc1$traits),4]
ephb2Coloc1=ephb2_pairColoc[!is.na(ephb2_pairColoc$candidate_snp),c(2,3,5)]
ephb2Coloc1=ephb2Coloc1[ephb2Coloc1$posterior_prob>0.5,]
ephb2Coloc1$traits=gsub("cis-a_EPHB2, ","",ephb2Coloc1$traits)
ephb2Coloc1=cbind("trait1"=rep("EPHB2",dim(ephb2Coloc1)[1]),ephb2Coloc1)
e_ephb2Coloc1=ephb2Coloc1[grepl("-e_",ephb2Coloc1$traits),]
a_ephb2Coloc1=ephb2Coloc1[grepl("-a_",ephb2Coloc1$traits),]
e_ephb2Coloc1$traits=gsub(".*_","",e_ephb2Coloc1$traits)
a_ephb2Coloc1$traits=gsub(".*_","",a_ephb2Coloc1$traits)
netEPHB2=rbind(e_ephb2Coloc1,a_ephb2Coloc1)
netEPHB2=netEPHB2[!duplicated(netEPHB2$traits),-c(3,4)]
netEPHB2$eQTL_PP=e_ephb2Coloc1[match(netEPHB2$traits,e_ephb2Coloc1$traits),3]
netEPHB2$eQTL_SNP=e_ephb2Coloc1[match(netEPHB2$traits,e_ephb2Coloc1$traits),4]
netEPHB2$aQTL_PP=a_ephb2Coloc1[match(netEPHB2$traits,a_ephb2Coloc1$traits),3]
netEPHB2$aQTL_SNP=a_ephb2Coloc1[match(netEPHB2$traits,a_ephb2Coloc1$traits),4]
colocNet1p36=rbind(bmi_netPair1p36,netEPHB2)
colocNet1p36[is.na(colocNet1p36)]=0
# Grab the -log10(Pmin) and betas for the eQTLs and aQTLs among BMI GWAS significant SNPs at the 1p36.1 locus
# Actually, though I originally made networks with nodes shaded according to their minP QTLs, I've since decided
# that for the manuscript I need to stick to a single SNP for all QTLs to avoid allele switching issues and to
# facilitate discussion in the manuscript. Therefore, for this locus I will focus on rs4654828 since it tends to
# be among the top SNPs for EPHB2 cis-aQTL and all BMI MR trans-aQTLs. However, since subsequent lines of code
# refer to the variables as min_cisE1, etc. I will keep that naming even though it's not an adequate description.
cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==1,]
cisE1=cisE1[cisE1$snps %in% sig_bmi$SNP,]
cisE1=cisE1[order(cisE1$pvalue),]
#min_cisE1=cisE1[!duplicated(cisE1$gene),]
min_cisE1=cisE1[cisE1$snps=="rs4654828",]
cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==1,]
cisA1=cisA1[cisA1$snps %in% sig_bmi$SNP,]
cisA1=cisA1[order(cisA1$pvalue),]
#min_cisA1=cisA1[!duplicated(cisA1$gene),]
min_cisA1=cisA1[cisA1$snps=="rs4654828",]
transE1=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$chr==1,]
transE1=transE1[transE1$snps %in% sig_bmi$SNP,]
transE1=transE1[order(transE1$pvalue),]
#min_transE1=transE1[!duplicated(transE1$gene),]
min_transE1=transE1[transE1$snps=="rs4654828",]
transA1=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$chr==1,]
transA1=transA1[transA1$snps %in% sig_bmi$SNP,]
transA1=transA1[order(transA1$pvalue),]
#min_transA1=transA1[!duplicated(transA1$gene),]
min_transA1=transA1[transA1$snps=="rs4654828",]
# Make node tables for 1p36 networks
inter_nodes1p36=data.frame("Node"=as.character(unique(interactome1p36$Regulator)),"BMI_exp_cor"=rep(0,length(unique(interactome1p36$Regulator))),
"BMI_act_cor"=rep(0,length(unique(interactome1p36$Regulator))),"rs4654828_eQTL_Beta"=rep(0,length(unique(interactome1p36$Regulator))),
"rs4654828_eQTL_logP"=rep(0,length(unique(interactome1p36$Regulator))),"rs4654828_aQTL_Beta"=rep(0,length(unique(interactome1p36$Regulator))),
"rs4654828_aQTL_logP"=rep(0,length(unique(interactome1p36$Regulator))))
for(i in 1:dim(inter_nodes1p36)[1]){
inter_nodes1p36$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(inter_nodes1p36$Node[i]),]),filt_pheno$BMI)
inter_nodes1p36$BMI_act_cor[i]=cor(as.numeric(vip[as.character(inter_nodes1p36$Node[i]),]),filt_pheno$BMI)
inter_nodes1p36$rs4654828_eQTL_Beta[i]=ifelse(inter_nodes1p36$Node[i] %in% min_transE1$gene,
min_transE1[min_transE1$gene==as.character(inter_nodes1p36$Node[i]),"beta"],
0)
inter_nodes1p36$rs4654828_eQTL_logP[i]=ifelse(inter_nodes1p36$Node[i] %in% min_transE1$gene,
-log10(min_transE1[min_transE1$gene==as.character(inter_nodes1p36$Node[i]),"pvalue"]),
0)
inter_nodes1p36$rs4654828_aQTL_Beta[i]=ifelse(inter_nodes1p36$Node[i] %in% min_transA1$gene,
min_transA1[min_transA1$gene==as.character(inter_nodes1p36$Node[i]),"beta"],
0)
inter_nodes1p36$rs4654828_aQTL_logP[i]=ifelse(inter_nodes1p36$Node[i] %in% min_transA1$gene,
-log10(min_transA1[min_transA1$gene==as.character(inter_nodes1p36$Node[i]),"pvalue"]),
0)
}
coloc_nodes1p36=data.frame("Node"=as.character(unique(colocNet1p36$traits)),"BMI_exp_cor"=rep(0,length(unique(colocNet1p36$traits))),
"BMI_act_cor"=rep(0,length(unique(colocNet1p36$traits))),"rs4654828_eQTL_Beta"=rep(0,length(unique(colocNet1p36$traits))),
"rs4654828_eQTL_logP"=rep(0,length(unique(colocNet1p36$traits))),"rs4654828_aQTL_Beta"=rep(0,length(unique(colocNet1p36$traits))),
"rs4654828_aQTL_logP"=rep(0,length(unique(colocNet1p36$traits))))
for(i in 1:dim(coloc_nodes1p36)[1]){
coloc_nodes1p36$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(coloc_nodes1p36$Node[i]),]),filt_pheno$BMI)
coloc_nodes1p36$BMI_act_cor[i]=cor(as.numeric(vip[as.character(coloc_nodes1p36$Node[i]),]),filt_pheno$BMI)
coloc_nodes1p36$rs4654828_eQTL_Beta[i]=ifelse(coloc_nodes1p36$Node[i] %in% min_transE1$gene,
min_transE1[min_transE1$gene==as.character(coloc_nodes1p36$Node[i]),"beta"],
0)
coloc_nodes1p36$rs4654828_eQTL_logP[i]=ifelse(coloc_nodes1p36$Node[i] %in% min_transE1$gene,
-log10(min_transE1[min_transE1$gene==as.character(coloc_nodes1p36$Node[i]),"pvalue"]),
0)
coloc_nodes1p36$rs4654828_aQTL_Beta[i]=ifelse(coloc_nodes1p36$Node[i] %in% min_transA1$gene,
min_transA1[min_transA1$gene==as.character(coloc_nodes1p36$Node[i]),"beta"],
0)
coloc_nodes1p36$rs4654828_aQTL_logP[i]=ifelse(coloc_nodes1p36$Node[i] %in% min_transA1$gene,
-log10(min_transA1[min_transA1$gene==as.character(coloc_nodes1p36$Node[i]),"pvalue"]),
0)
}
# Since EPHB2 is the only cis gene here, I'll just deal with it manually
inter_nodes1p36[inter_nodes1p36$Node=="EPHB2","rs4654828_eQTL_Beta"]=min_cisE1[min_cisE1$gene=="EPHB2","beta"]
inter_nodes1p36[inter_nodes1p36$Node=="EPHB2","rs4654828_eQTL_logP"]=-log10(min_cisE1[min_cisE1$gene=="EPHB2","pvalue"])
inter_nodes1p36[inter_nodes1p36$Node=="EPHB2","rs4654828_aQTL_Beta"]=min_cisA1[min_cisA1$gene=="EPHB2","beta"]
inter_nodes1p36[inter_nodes1p36$Node=="EPHB2","rs4654828_aQTL_logP"]=-log10(min_cisA1[min_cisA1$gene=="EPHB2","pvalue"])
coloc_nodes1p36[coloc_nodes1p36$Node=="EPHB2","rs4654828_eQTL_Beta"]=min_cisE1[min_cisE1$gene=="EPHB2","beta"]
coloc_nodes1p36[coloc_nodes1p36$Node=="EPHB2","rs4654828_eQTL_logP"]=-log10(min_cisE1[min_cisE1$gene=="EPHB2","pvalue"])
coloc_nodes1p36[coloc_nodes1p36$Node=="EPHB2","rs4654828_aQTL_Beta"]=min_cisA1[min_cisA1$gene=="EPHB2","beta"]
coloc_nodes1p36[coloc_nodes1p36$Node=="EPHB2","rs4654828_aQTL_logP"]=-log10(min_cisA1[min_cisA1$gene=="EPHB2","pvalue"])
# I think it may be more convenient to merge the networks into one and then just change which attributes I visualize in Cytoscape
# Start with 2 temporary columns concatinating the regulator-target and target-regulator for easier matching.
interactome1p36$temp1=paste(interactome1p36$Regulator,interactome1p36$Target)
interactome1p36$temp2=paste(interactome1p36$Target,interactome1p36$Regulator)
colocNet1p36$temp1=paste(colocNet1p36$trait1,colocNet1p36$traits)
colocNet1p36$temp2=paste(colocNet1p36$traits,colocNet1p36$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(interactome1p36)[1],ncol = 4))
for(i in 1:dim(interactome1p36)[1]){
temp[i,1:4]=colocNet1p36[ifelse(is.na(match(interactome1p36$temp1[i],colocNet1p36$temp1)),
match(interactome1p36$temp1[i],colocNet1p36$temp2),
match(interactome1p36$temp1[i],colocNet1p36$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the BMI colocalizations
colnames(temp)=colnames(colocNet1p36)[3:6]
temp=rbind(temp,colocNet1p36[colocNet1p36$trait1=="BMI",3:6])
# Then add rows for BMI-Gene connections with 0 for MoA and likelihood
full1p36=interactome1p36[,1:4]
temp2=colocNet1p36[colocNet1p36$trait1=="BMI",1:4]
colnames(temp2)=colnames(interactome1p36)[1:4]
temp2[,3:4]=0
full1p36=rbind(full1p36,temp2)
# Finally, combine the colocalization columns with the interactome columns
full1p36=cbind(full1p36,temp)
# The nodes data also needs to be combined and duplicate rows removed
full1p36_nodes=rbind(inter_nodes1p36,coloc_nodes1p36)
full1p36_nodes=full1p36_nodes[!duplicated(full1p36_nodes$Node),]
# Write networks and node data to file for Cytoscape visualizations
write.table(interactome1p36,"Chr1p36_EPHB2_and_BMI_MRs_interactome.txt",sep = "\t",quote = F,row.names = F)
write.table(inter_nodes1p36,"Chr1p36_EPHB2_and_BMI_MRs_interactome_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(colocNet1p36,"Chr1p36_BMI_EPHB2_and_BMI_MRs_pairwise_colocalization_network.txt",sep = "\t",quote = F,row.names = F)
write.table(coloc_nodes1p36,"Chr1p36_BMI_EPHB2_and_BMI_MRs_pairwise_colocalization_network_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(full1p36,"Chr1p36_EPHB2_and_BMI_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(full1p36_nodes,"Chr1p36_EPHB2_and_BMI_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
# 7q32
# Grab interactions between LINC-PINT, KLF14 and MRs in adipose interactome
linc_bmi_mrs=bmi_MRregs[bmi_MRregs$Regulator=="LINC-PINT",]
linc_homair_mrs=homair_MRregs[homair_MRregs$Regulator=="LINC-PINT",]
linc_hdl_mrs=hdl_MRregs[hdl_MRregs$Regulator=="LINC-PINT",]
linc_triG_mrs=triG_MRregs[triG_MRregs$Regulator=="LINC-PINT",]
bmi_mrsLINC=interactome[(interactome$Regulator %in% bmi_mrs[,1]) & (interactome$Target=="LINC-PINT"),]
homair_mrsLINC=interactome[(interactome$Regulator %in% homair_mrs[,1]) & (interactome$Target=="LINC-PINT"),]
hdl_mrsLINC=interactome[(interactome$Regulator %in% hdl_mrs[,1]) & (interactome$Target=="LINC-PINT"),]
triG_mrsLINC=interactome[(interactome$Regulator %in% triG_mrs[,1]) & (interactome$Target=="LINC-PINT"),]
klf14_bmi_mrs=bmi_MRregs[bmi_MRregs$Regulator=="KLF14",]
klf14_homair_mrs=homair_MRregs[homair_MRregs$Regulator=="KLF14",]
klf14_hdl_mrs=hdl_MRregs[hdl_MRregs$Regulator=="KLF14",]
klf14_triG_mrs=triG_MRregs[triG_MRregs$Regulator=="KLF14",]
bmi_mrsKLF14=interactome[(interactome$Regulator %in% bmi_mrs[,1]) & (interactome$Target=="KLF14"),]
homair_mrsKLF14=interactome[(interactome$Regulator %in% homair_mrs[,1]) & (interactome$Target=="KLF14"),]
hdl_mrsKLF14=interactome[(interactome$Regulator %in% hdl_mrs[,1]) & (interactome$Target=="KLF14"),]
triG_mrsKLF14=interactome[(interactome$Regulator %in% triG_mrs[,1]) & (interactome$Target=="KLF14"),]
bmi_interactome7q32=rbind(bmi_MRMR,linc_bmi_mrs,bmi_mrsLINC,klf14_bmi_mrs,bmi_mrsKLF14)
homair_interactome7q32=rbind(homair_MRMR,linc_homair_mrs,homair_mrsLINC,klf14_homair_mrs,homair_mrsKLF14)
hdl_interactome7q32=rbind(hdl_MRMR,linc_hdl_mrs,hdl_mrsLINC,klf14_hdl_mrs,hdl_mrsKLF14)
triG_interactome7q32=rbind(triG_MRMR,linc_triG_mrs,triG_mrsLINC,klf14_triG_mrs,triG_mrsKLF14)
# Grab pairwise colocalizations with PP>0.5 between each GWAS and QTLs. I did not run pairwise colocalization analyses for LINC-PINT or KLF14 yet.
bmi_pairColoc7q32=bmi_pairColoc[bmi_pairColoc$locus=="7q32" & bmi_pairColoc$posterior_prob>0.5,c(2,3,5)]
bmi_pairColoc7q32$traits=gsub("BMI, ","",bmi_pairColoc7q32$traits)
bmi_pairColoc7q32=cbind("trait1"=rep("BMI",dim(bmi_pairColoc7q32)[1]),bmi_pairColoc7q32)
bmi_e_pairColoc1=bmi_pairColoc7q32[grepl("-e_",bmi_pairColoc7q32$traits),]
bmi_a_pairColoc1=bmi_pairColoc7q32[grepl("-a_",bmi_pairColoc7q32$traits),]
bmi_e_pairColoc1$traits=gsub(".*_","",bmi_e_pairColoc1$traits)
bmi_a_pairColoc1$traits=gsub(".*_","",bmi_a_pairColoc1$traits)
bmi_netPair7q32=rbind(bmi_e_pairColoc1,bmi_a_pairColoc1)
bmi_netPair7q32=bmi_netPair7q32[!duplicated(bmi_netPair7q32$traits),-c(3,4)]
bmi_netPair7q32$eQTL_PP=bmi_e_pairColoc1[match(bmi_netPair7q32$traits,bmi_e_pairColoc1$traits),3]
bmi_netPair7q32$eQTL_SNP=bmi_e_pairColoc1[match(bmi_netPair7q32$traits,bmi_e_pairColoc1$traits),4]
bmi_netPair7q32$aQTL_PP=bmi_a_pairColoc1[match(bmi_netPair7q32$traits,bmi_a_pairColoc1$traits),3]
bmi_netPair7q32$aQTL_SNP=bmi_a_pairColoc1[match(bmi_netPair7q32$traits,bmi_a_pairColoc1$traits),4]
bmi_netPair7q32[is.na(bmi_netPair7q32)]=0
t2d_pairColoc7q32=t2d_pairColoc[t2d_pairColoc$locus=="7q32" & t2d_pairColoc$posterior_prob>0.5,c(2,3,5)]
t2d_pairColoc7q32$traits=gsub("T2D, ","",t2d_pairColoc7q32$traits)
t2d_pairColoc7q32=cbind("trait1"=rep("T2D",dim(t2d_pairColoc7q32)[1]),t2d_pairColoc7q32)
t2d_e_pairColoc1=t2d_pairColoc7q32[grepl("-e_",t2d_pairColoc7q32$traits),]
t2d_a_pairColoc1=t2d_pairColoc7q32[grepl("-a_",t2d_pairColoc7q32$traits),]
t2d_e_pairColoc1$traits=gsub(".*_","",t2d_e_pairColoc1$traits)
t2d_a_pairColoc1$traits=gsub(".*_","",t2d_a_pairColoc1$traits)
t2d_netPair7q32=rbind(t2d_e_pairColoc1,t2d_a_pairColoc1)
t2d_netPair7q32=t2d_netPair7q32[!duplicated(t2d_netPair7q32$traits),-c(3,4)]
t2d_netPair7q32$eQTL_PP=t2d_e_pairColoc1[match(t2d_netPair7q32$traits,t2d_e_pairColoc1$traits),3]
t2d_netPair7q32$eQTL_SNP=t2d_e_pairColoc1[match(t2d_netPair7q32$traits,t2d_e_pairColoc1$traits),4]
t2d_netPair7q32$aQTL_PP=t2d_a_pairColoc1[match(t2d_netPair7q32$traits,t2d_a_pairColoc1$traits),3]
t2d_netPair7q32$aQTL_SNP=t2d_a_pairColoc1[match(t2d_netPair7q32$traits,t2d_a_pairColoc1$traits),4]
t2d_netPair7q32[is.na(t2d_netPair7q32)]=0
hdl_pairColoc7q32=hdl_pairColoc[hdl_pairColoc$locus=="7q32" & hdl_pairColoc$posterior_prob>0.5,c(2,3,5)]
hdl_pairColoc7q32$traits=gsub("HDL, ","",hdl_pairColoc7q32$traits)
hdl_pairColoc7q32=cbind("trait1"=rep("HDL",dim(hdl_pairColoc7q32)[1]),hdl_pairColoc7q32)
hdl_e_pairColoc1=hdl_pairColoc7q32[grepl("-e_",hdl_pairColoc7q32$traits),]
hdl_a_pairColoc1=hdl_pairColoc7q32[grepl("-a_",hdl_pairColoc7q32$traits),]
hdl_e_pairColoc1$traits=gsub(".*_","",hdl_e_pairColoc1$traits)
hdl_a_pairColoc1$traits=gsub(".*_","",hdl_a_pairColoc1$traits)
hdl_netPair7q32=rbind(hdl_e_pairColoc1,hdl_a_pairColoc1)
hdl_netPair7q32=hdl_netPair7q32[!duplicated(hdl_netPair7q32$traits),-c(3,4)]
hdl_netPair7q32$eQTL_PP=hdl_e_pairColoc1[match(hdl_netPair7q32$traits,hdl_e_pairColoc1$traits),3]
hdl_netPair7q32$eQTL_SNP=hdl_e_pairColoc1[match(hdl_netPair7q32$traits,hdl_e_pairColoc1$traits),4]
hdl_netPair7q32$aQTL_PP=hdl_a_pairColoc1[match(hdl_netPair7q32$traits,hdl_a_pairColoc1$traits),3]
hdl_netPair7q32$aQTL_SNP=hdl_a_pairColoc1[match(hdl_netPair7q32$traits,hdl_a_pairColoc1$traits),4]
hdl_netPair7q32[is.na(hdl_netPair7q32)]=0
triG_pairColoc7q32=triG_pairColoc[triG_pairColoc$locus=="7q32" & triG_pairColoc$posterior_prob>0.5,c(2,3,5)]
triG_pairColoc7q32$traits=gsub("TriG, ","",triG_pairColoc7q32$traits)
triG_pairColoc7q32=cbind("trait1"=rep("TriG",dim(triG_pairColoc7q32)[1]),triG_pairColoc7q32)
triG_e_pairColoc1=triG_pairColoc7q32[grepl("-e_",triG_pairColoc7q32$traits),]
triG_a_pairColoc1=triG_pairColoc7q32[grepl("-a_",triG_pairColoc7q32$traits),]
triG_e_pairColoc1$traits=gsub(".*_","",triG_e_pairColoc1$traits)
triG_a_pairColoc1$traits=gsub(".*_","",triG_a_pairColoc1$traits)
triG_netPair7q32=rbind(triG_e_pairColoc1,triG_a_pairColoc1)
triG_netPair7q32=triG_netPair7q32[!duplicated(triG_netPair7q32$traits),-c(3,4)]
triG_netPair7q32$eQTL_PP=triG_e_pairColoc1[match(triG_netPair7q32$traits,triG_e_pairColoc1$traits),3]
triG_netPair7q32$eQTL_SNP=triG_e_pairColoc1[match(triG_netPair7q32$traits,triG_e_pairColoc1$traits),4]
triG_netPair7q32$aQTL_PP=triG_a_pairColoc1[match(triG_netPair7q32$traits,triG_a_pairColoc1$traits),3]
triG_netPair7q32$aQTL_SNP=triG_a_pairColoc1[match(triG_netPair7q32$traits,triG_a_pairColoc1$traits),4]
triG_netPair7q32[is.na(triG_netPair7q32)]=0
# Grab the -log10(Pmin) and betas for the eQTLs and aQTLs among GWAS significant SNPs at the 7q32 locus
bmi_cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==7,]
bmi_cisE1=bmi_cisE1[bmi_cisE1$snps %in% sig_bmi$SNP,]
bmi_cisE1=bmi_cisE1[order(bmi_cisE1$pvalue),]
min_bmi_cisE1=bmi_cisE1[!duplicated(bmi_cisE1$gene),]
bmi_cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==7,]
bmi_cisA1=bmi_cisA1[bmi_cisA1$snps %in% sig_bmi$SNP,]
bmi_cisA1=bmi_cisA1[order(bmi_cisA1$pvalue),]
min_bmi_cisA1=bmi_cisA1[!duplicated(bmi_cisA1$gene),]
bmi_transE1=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$chr==7,]
bmi_transE1=bmi_transE1[bmi_transE1$snps %in% sig_bmi$SNP,]
bmi_transE1=bmi_transE1[order(bmi_transE1$pvalue),]
min_bmi_transE1=bmi_transE1[!duplicated(bmi_transE1$gene),]
bmi_transA1=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$chr==7,]
bmi_transA1=bmi_transA1[bmi_transA1$snps %in% sig_bmi$SNP,]
bmi_transA1=bmi_transA1[order(bmi_transA1$pvalue),]
min_bmi_transA1=bmi_transA1[!duplicated(bmi_transA1$gene),]
t2d_cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==7,]
t2d_cisE1=t2d_cisE1[t2d_cisE1$snps %in% sig_t2d$rsID,]
t2d_cisE1=t2d_cisE1[order(t2d_cisE1$pvalue),]
min_t2d_cisE1=t2d_cisE1[!duplicated(t2d_cisE1$gene),]
t2d_cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==7,]
t2d_cisA1=t2d_cisA1[t2d_cisA1$snps %in% sig_t2d$rsID,]
t2d_cisA1=t2d_cisA1[order(t2d_cisA1$pvalue),]
min_t2d_cisA1=t2d_cisA1[!duplicated(t2d_cisA1$gene),]
t2d_transE1=filt_trans_t2d_eqtl[filt_trans_t2d_eqtl$chr==7,]
t2d_transE1=t2d_transE1[t2d_transE1$snps %in% sig_t2d$rsID,]
t2d_transE1=t2d_transE1[order(t2d_transE1$pvalue),]
min_t2d_transE1=t2d_transE1[!duplicated(t2d_transE1$gene),]
t2d_transA1=filt_trans_t2d_aqtl[filt_trans_t2d_aqtl$chr==7,]
t2d_transA1=t2d_transA1[t2d_transA1$snps %in% sig_t2d$rsID,]
t2d_transA1=t2d_transA1[order(t2d_transA1$pvalue),]
min_t2d_transA1=t2d_transA1[!duplicated(t2d_transA1$gene),]
hdl_cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==7,]
hdl_cisE1=hdl_cisE1[hdl_cisE1$snps %in% sig_hdl$SNP,]
hdl_cisE1=hdl_cisE1[order(hdl_cisE1$pvalue),]
min_hdl_cisE1=hdl_cisE1[!duplicated(hdl_cisE1$gene),]
hdl_cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==7,]
hdl_cisA1=hdl_cisA1[hdl_cisA1$snps %in% sig_hdl$SNP,]
hdl_cisA1=hdl_cisA1[order(hdl_cisA1$pvalue),]
min_hdl_cisA1=hdl_cisA1[!duplicated(hdl_cisA1$gene),]
hdl_transE1=filt_trans_hdl_eqtl[filt_trans_hdl_eqtl$chr==7,]
hdl_transE1=hdl_transE1[hdl_transE1$snps %in% sig_hdl$SNP,]
hdl_transE1=hdl_transE1[order(hdl_transE1$pvalue),]
min_hdl_transE1=hdl_transE1[!duplicated(hdl_transE1$gene),]
hdl_transA1=filt_trans_hdl_aqtl[filt_trans_hdl_aqtl$chr==7,]
hdl_transA1=hdl_transA1[hdl_transA1$snps %in% sig_hdl$SNP,]
hdl_transA1=hdl_transA1[order(hdl_transA1$pvalue),]
min_hdl_transA1=hdl_transA1[!duplicated(hdl_transA1$gene),]
triG_cisE1=filt_cis_eqtl[filt_cis_eqtl$chr==7,]
triG_cisE1=triG_cisE1[triG_cisE1$snps %in% sig_triG$SNP,]
triG_cisE1=triG_cisE1[order(triG_cisE1$pvalue),]
min_triG_cisE1=triG_cisE1[!duplicated(triG_cisE1$gene),]
triG_cisA1=filt_cis_aqtl[filt_cis_aqtl$chr==7,]
triG_cisA1=triG_cisA1[triG_cisA1$snps %in% sig_triG$SNP,]
triG_cisA1=triG_cisA1[order(triG_cisA1$pvalue),]
min_triG_cisA1=triG_cisA1[!duplicated(triG_cisA1$gene),]
triG_transE1=filt_trans_triG_eqtl[filt_trans_triG_eqtl$chr==7,]
triG_transE1=triG_transE1[triG_transE1$snps %in% sig_triG$SNP,]
triG_transE1=triG_transE1[order(triG_transE1$pvalue),]
min_triG_transE1=triG_transE1[!duplicated(triG_transE1$gene),]
triG_transA1=filt_trans_triG_aqtl[filt_trans_triG_aqtl$chr==7,]
triG_transA1=triG_transA1[triG_transA1$snps %in% sig_triG$SNP,]
triG_transA1=triG_transA1[order(triG_transA1$pvalue),]
min_triG_transA1=triG_transA1[!duplicated(triG_transA1$gene),]
# Make node tables for 7q32 networks for each GWAS. Note that some GWAS (T2D and TriG) failed to have their MRs connect at all with LINC-PINT or KLF14,
# so I manually added those to the node lists when needed.
# BMI
bmi_inter_nodes7q32=data.frame("Node"=as.character(unique(bmi_interactome7q32$Regulator)),"BMI_exp_cor"=rep(0,length(unique(bmi_interactome7q32$Regulator))),
"BMI_act_cor"=rep(0,length(unique(bmi_interactome7q32$Regulator))),"Best_eQTL_Beta"=rep(0,length(unique(bmi_interactome7q32$Regulator))),
"Best_eQTL_logP"=rep(0,length(unique(bmi_interactome7q32$Regulator))),"Best_aQTL_Beta"=rep(0,length(unique(bmi_interactome7q32$Regulator))),
"Best_aQTL_logP"=rep(0,length(unique(bmi_interactome7q32$Regulator))))
bmi_coloc_nodes7q32=data.frame("Node"=as.character(unique(bmi_netPair7q32$traits)),"BMI_exp_cor"=rep(0,length(unique(bmi_netPair7q32$traits))),
"BMI_act_cor"=rep(0,length(unique(bmi_netPair7q32$traits))),"Best_eQTL_Beta"=rep(0,length(unique(bmi_netPair7q32$traits))),
"Best_eQTL_logP"=rep(0,length(unique(bmi_netPair7q32$traits))),"Best_aQTL_Beta"=rep(0,length(unique(bmi_netPair7q32$traits))),
"Best_aQTL_logP"=rep(0,length(unique(bmi_netPair7q32$traits))))
for(i in 1:dim(bmi_inter_nodes7q32)[1]){
bmi_inter_nodes7q32$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(bmi_inter_nodes7q32$Node[i]),]),filt_pheno$BMI)
bmi_inter_nodes7q32$BMI_act_cor[i]=cor(as.numeric(vip[as.character(bmi_inter_nodes7q32$Node[i]),]),filt_pheno$BMI)
bmi_inter_nodes7q32$Best_eQTL_Beta[i]=ifelse(bmi_inter_nodes7q32$Node[i] %in% min_bmi_transE1$gene,
min_bmi_transE1[min_bmi_transE1$gene==as.character(bmi_inter_nodes7q32$Node[i]),"beta"],
0)
bmi_inter_nodes7q32$Best_eQTL_logP[i]=ifelse(bmi_inter_nodes7q32$Node[i] %in% min_bmi_transE1$gene,
-log10(min_bmi_transE1[min_bmi_transE1$gene==as.character(bmi_inter_nodes7q32$Node[i]),"pvalue"]),
0)
bmi_inter_nodes7q32$Best_aQTL_Beta[i]=ifelse(bmi_inter_nodes7q32$Node[i] %in% min_bmi_transA1$gene,
min_bmi_transA1[min_bmi_transA1$gene==as.character(bmi_inter_nodes7q32$Node[i]),"beta"],
0)
bmi_inter_nodes7q32$Best_aQTL_logP[i]=ifelse(bmi_inter_nodes7q32$Node[i] %in% min_bmi_transA1$gene,
-log10(min_bmi_transA1[min_bmi_transA1$gene==as.character(bmi_inter_nodes7q32$Node[i]),"pvalue"]),
0)
}
for(i in 1:dim(bmi_coloc_nodes7q32)[1]){
bmi_coloc_nodes7q32$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(bmi_coloc_nodes7q32$Node[i]),]),filt_pheno$BMI)
bmi_coloc_nodes7q32$BMI_act_cor[i]=cor(as.numeric(vip[as.character(bmi_coloc_nodes7q32$Node[i]),]),filt_pheno$BMI)
bmi_coloc_nodes7q32$Best_eQTL_Beta[i]=ifelse(bmi_coloc_nodes7q32$Node[i] %in% min_bmi_transE1$gene,
min_bmi_transE1[min_bmi_transE1$gene==as.character(bmi_coloc_nodes7q32$Node[i]),"beta"],
0)
bmi_coloc_nodes7q32$Best_eQTL_logP[i]=ifelse(bmi_coloc_nodes7q32$Node[i] %in% min_bmi_transE1$gene,
-log10(min_bmi_transE1[min_bmi_transE1$gene==as.character(bmi_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
bmi_coloc_nodes7q32$Best_aQTL_Beta[i]=ifelse(bmi_coloc_nodes7q32$Node[i] %in% min_bmi_transA1$gene,
min_bmi_transA1[min_bmi_transA1$gene==as.character(bmi_coloc_nodes7q32$Node[i]),"beta"],
0)
bmi_coloc_nodes7q32$Best_aQTL_logP[i]=ifelse(bmi_coloc_nodes7q32$Node[i] %in% min_bmi_transA1$gene,
-log10(min_bmi_transA1[min_bmi_transA1$gene==as.character(bmi_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
}
# T2D
t2d_inter_nodes7q32=data.frame("Node"=c(as.character(unique(homair_interactome7q32$Regulator)),"LINC-PINT"),"HOMA.IR_exp_cor"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),
"HOMA.IR_act_cor"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),"Best_eQTL_Beta"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),
"Best_eQTL_logP"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),"Best_aQTL_Beta"=rep(0,length(unique(homair_interactome7q32$Regulator))+1),
"Best_aQTL_logP"=rep(0,length(unique(homair_interactome7q32$Regulator))+1))
t2d_coloc_nodes7q32=data.frame("Node"=as.character(unique(t2d_netPair7q32$traits)),"HOMA.IR_exp_cor"=rep(0,length(unique(t2d_netPair7q32$traits))),
"HOMA.IR_act_cor"=rep(0,length(unique(t2d_netPair7q32$traits))),"Best_eQTL_Beta"=rep(0,length(unique(t2d_netPair7q32$traits))),
"Best_eQTL_logP"=rep(0,length(unique(t2d_netPair7q32$traits))),"Best_aQTL_Beta"=rep(0,length(unique(t2d_netPair7q32$traits))),
"Best_aQTL_logP"=rep(0,length(unique(t2d_netPair7q32$traits))))
for(i in 1:dim(t2d_inter_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$HOMA.IR)]
t2d_inter_nodes7q32$HOMA.IR_exp_cor[i]=cor(as.numeric(tpm[as.character(t2d_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HOMA.IR"])
t2d_inter_nodes7q32$HOMA.IR_act_cor[i]=cor(as.numeric(vip[as.character(t2d_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HOMA.IR"])
t2d_inter_nodes7q32$Best_eQTL_Beta[i]=ifelse(t2d_inter_nodes7q32$Node[i] %in% min_t2d_transE1$gene,
min_t2d_transE1[min_t2d_transE1$gene==as.character(t2d_inter_nodes7q32$Node[i]),"beta"],
0)
t2d_inter_nodes7q32$Best_eQTL_logP[i]=ifelse(t2d_inter_nodes7q32$Node[i] %in% min_t2d_transE1$gene,
-log10(min_t2d_transE1[min_t2d_transE1$gene==as.character(t2d_inter_nodes7q32$Node[i]),"pvalue"]),
0)
t2d_inter_nodes7q32$Best_aQTL_Beta[i]=ifelse(t2d_inter_nodes7q32$Node[i] %in% min_t2d_transA1$gene,
min_t2d_transA1[min_t2d_transA1$gene==as.character(t2d_inter_nodes7q32$Node[i]),"beta"],
0)
t2d_inter_nodes7q32$Best_aQTL_logP[i]=ifelse(t2d_inter_nodes7q32$Node[i] %in% min_t2d_transA1$gene,
-log10(min_t2d_transA1[min_t2d_transA1$gene==as.character(t2d_inter_nodes7q32$Node[i]),"pvalue"]),
0)
}
for(i in 1:dim(t2d_coloc_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$HOMA.IR)]
t2d_coloc_nodes7q32$HOMA.IR_exp_cor[i]=cor(as.numeric(tpm[as.character(t2d_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HOMA.IR"])
t2d_coloc_nodes7q32$HOMA.IR_act_cor[i]=cor(as.numeric(vip[as.character(t2d_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HOMA.IR"])
t2d_coloc_nodes7q32$Best_eQTL_Beta[i]=ifelse(t2d_coloc_nodes7q32$Node[i] %in% min_t2d_transE1$gene,
min_t2d_transE1[min_t2d_transE1$gene==as.character(t2d_coloc_nodes7q32$Node[i]),"beta"],
0)
t2d_coloc_nodes7q32$Best_eQTL_logP[i]=ifelse(t2d_coloc_nodes7q32$Node[i] %in% min_t2d_transE1$gene,
-log10(min_t2d_transE1[min_t2d_transE1$gene==as.character(t2d_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
t2d_coloc_nodes7q32$Best_aQTL_Beta[i]=ifelse(t2d_coloc_nodes7q32$Node[i] %in% min_t2d_transA1$gene,
min_t2d_transA1[min_t2d_transA1$gene==as.character(t2d_coloc_nodes7q32$Node[i]),"beta"],
0)
t2d_coloc_nodes7q32$Best_aQTL_logP[i]=ifelse(t2d_coloc_nodes7q32$Node[i] %in% min_t2d_transA1$gene,
-log10(min_t2d_transA1[min_t2d_transA1$gene==as.character(t2d_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
}
# HDL
hdl_inter_nodes7q32=data.frame("Node"=as.character(unique(hdl_interactome7q32$Regulator)),"HDL_exp_cor"=rep(0,length(unique(hdl_interactome7q32$Regulator))),
"HDL_act_cor"=rep(0,length(unique(hdl_interactome7q32$Regulator))),"Best_eQTL_Beta"=rep(0,length(unique(hdl_interactome7q32$Regulator))),
"Best_eQTL_logP"=rep(0,length(unique(hdl_interactome7q32$Regulator))),"Best_aQTL_Beta"=rep(0,length(unique(hdl_interactome7q32$Regulator))),
"Best_aQTL_logP"=rep(0,length(unique(hdl_interactome7q32$Regulator))))
hdl_coloc_nodes7q32=data.frame("Node"=as.character(unique(hdl_netPair7q32$traits)),"HDL_exp_cor"=rep(0,length(unique(hdl_netPair7q32$traits))),
"HDL_act_cor"=rep(0,length(unique(hdl_netPair7q32$traits))),"Best_eQTL_Beta"=rep(0,length(unique(hdl_netPair7q32$traits))),
"Best_eQTL_logP"=rep(0,length(unique(hdl_netPair7q32$traits))),"Best_aQTL_Beta"=rep(0,length(unique(hdl_netPair7q32$traits))),
"Best_aQTL_logP"=rep(0,length(unique(hdl_netPair7q32$traits))))
for(i in 1:dim(hdl_inter_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$HDLcholesterol)]
hdl_inter_nodes7q32$HDL_exp_cor[i]=cor(as.numeric(tpm[as.character(hdl_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HDLcholesterol"])
hdl_inter_nodes7q32$HDL_act_cor[i]=cor(as.numeric(vip[as.character(hdl_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HDLcholesterol"])
hdl_inter_nodes7q32$Best_eQTL_Beta[i]=ifelse(hdl_inter_nodes7q32$Node[i] %in% min_hdl_transE1$gene,
min_hdl_transE1[min_hdl_transE1$gene==as.character(hdl_inter_nodes7q32$Node[i]),"beta"],
0)
hdl_inter_nodes7q32$Best_eQTL_logP[i]=ifelse(hdl_inter_nodes7q32$Node[i] %in% min_hdl_transE1$gene,
-log10(min_hdl_transE1[min_hdl_transE1$gene==as.character(hdl_inter_nodes7q32$Node[i]),"pvalue"]),
0)
hdl_inter_nodes7q32$Best_aQTL_Beta[i]=ifelse(hdl_inter_nodes7q32$Node[i] %in% min_hdl_transA1$gene,
min_hdl_transA1[min_hdl_transA1$gene==as.character(hdl_inter_nodes7q32$Node[i]),"beta"],
0)
hdl_inter_nodes7q32$Best_aQTL_logP[i]=ifelse(hdl_inter_nodes7q32$Node[i] %in% min_hdl_transA1$gene,
-log10(min_hdl_transA1[min_hdl_transA1$gene==as.character(hdl_inter_nodes7q32$Node[i]),"pvalue"]),
0)
}
for(i in 1:dim(hdl_coloc_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$HDLcholesterol)]
hdl_coloc_nodes7q32$HDL_exp_cor[i]=cor(as.numeric(tpm[as.character(hdl_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HDLcholesterol"])
hdl_coloc_nodes7q32$HDL_act_cor[i]=cor(as.numeric(vip[as.character(hdl_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"HDLcholesterol"])
hdl_coloc_nodes7q32$Best_eQTL_Beta[i]=ifelse(hdl_coloc_nodes7q32$Node[i] %in% min_hdl_transE1$gene,
min_hdl_transE1[min_hdl_transE1$gene==as.character(hdl_coloc_nodes7q32$Node[i]),"beta"],
0)
hdl_coloc_nodes7q32$Best_eQTL_logP[i]=ifelse(hdl_coloc_nodes7q32$Node[i] %in% min_hdl_transE1$gene,
-log10(min_hdl_transE1[min_hdl_transE1$gene==as.character(hdl_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
hdl_coloc_nodes7q32$Best_aQTL_Beta[i]=ifelse(hdl_coloc_nodes7q32$Node[i] %in% min_hdl_transA1$gene,
min_hdl_transA1[min_hdl_transA1$gene==as.character(hdl_coloc_nodes7q32$Node[i]),"beta"],
0)
hdl_coloc_nodes7q32$Best_aQTL_logP[i]=ifelse(hdl_coloc_nodes7q32$Node[i] %in% min_hdl_transA1$gene,
-log10(min_hdl_transA1[min_hdl_transA1$gene==as.character(hdl_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
}
# TriG
triG_inter_nodes7q32=data.frame("Node"=c(as.character(unique(triG_interactome7q32$Regulator)),"LINC-PINT","KLF14"),"TriG_exp_cor"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),
"TriG_act_cor"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),"Best_eQTL_Beta"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),
"Best_eQTL_logP"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),"Best_aQTL_Beta"=rep(0,length(unique(triG_interactome7q32$Regulator))+2),
"Best_aQTL_logP"=rep(0,length(unique(triG_interactome7q32$Regulator))+2))
triG_coloc_nodes7q32=data.frame("Node"=as.character(unique(triG_netPair7q32$traits)),"TriG_exp_cor"=rep(0,length(unique(triG_netPair7q32$traits))),
"TriG_act_cor"=rep(0,length(unique(triG_netPair7q32$traits))),"Best_eQTL_Beta"=rep(0,length(unique(triG_netPair7q32$traits))),
"Best_eQTL_logP"=rep(0,length(unique(triG_netPair7q32$traits))),"Best_aQTL_Beta"=rep(0,length(unique(triG_netPair7q32$traits))),
"Best_aQTL_logP"=rep(0,length(unique(triG_netPair7q32$traits))))
for(i in 1:dim(triG_inter_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$TotalTriglycerides)]
triG_inter_nodes7q32$TriG_exp_cor[i]=cor(as.numeric(tpm[as.character(triG_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"TotalTriglycerides"])
triG_inter_nodes7q32$TriG_act_cor[i]=cor(as.numeric(vip[as.character(triG_inter_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"TotalTriglycerides"])
triG_inter_nodes7q32$Best_eQTL_Beta[i]=ifelse(triG_inter_nodes7q32$Node[i] %in% min_triG_transE1$gene,
min_triG_transE1[min_triG_transE1$gene==as.character(triG_inter_nodes7q32$Node[i]),"beta"],
0)
triG_inter_nodes7q32$Best_eQTL_logP[i]=ifelse(triG_inter_nodes7q32$Node[i] %in% min_triG_transE1$gene,
-log10(min_triG_transE1[min_triG_transE1$gene==as.character(triG_inter_nodes7q32$Node[i]),"pvalue"]),
0)
triG_inter_nodes7q32$Best_aQTL_Beta[i]=ifelse(triG_inter_nodes7q32$Node[i] %in% min_triG_transA1$gene,
min_triG_transA1[min_triG_transA1$gene==as.character(triG_inter_nodes7q32$Node[i]),"beta"],
0)
triG_inter_nodes7q32$Best_aQTL_logP[i]=ifelse(triG_inter_nodes7q32$Node[i] %in% min_triG_transA1$gene,
-log10(min_triG_transA1[min_triG_transA1$gene==as.character(triG_inter_nodes7q32$Node[i]),"pvalue"]),
0)
}
for(i in 1:dim(triG_coloc_nodes7q32)[1]){
noNA_samples=rownames(filt_pheno)[!is.na(filt_pheno$TotalTriglycerides)]
triG_coloc_nodes7q32$TriG_exp_cor[i]=cor(as.numeric(tpm[as.character(triG_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"TotalTriglycerides"])
triG_coloc_nodes7q32$TriG_act_cor[i]=cor(as.numeric(vip[as.character(triG_coloc_nodes7q32$Node[i]),noNA_samples]),filt_pheno[noNA_samples,"TotalTriglycerides"])
triG_coloc_nodes7q32$Best_eQTL_Beta[i]=ifelse(triG_coloc_nodes7q32$Node[i] %in% min_triG_transE1$gene,
min_triG_transE1[min_triG_transE1$gene==as.character(triG_coloc_nodes7q32$Node[i]),"beta"],
0)
triG_coloc_nodes7q32$Best_eQTL_logP[i]=ifelse(triG_coloc_nodes7q32$Node[i] %in% min_triG_transE1$gene,
-log10(min_triG_transE1[min_triG_transE1$gene==as.character(triG_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
triG_coloc_nodes7q32$Best_aQTL_Beta[i]=ifelse(triG_coloc_nodes7q32$Node[i] %in% min_triG_transA1$gene,
min_triG_transA1[min_triG_transA1$gene==as.character(triG_coloc_nodes7q32$Node[i]),"beta"],
0)
triG_coloc_nodes7q32$Best_aQTL_logP[i]=ifelse(triG_coloc_nodes7q32$Node[i] %in% min_triG_transA1$gene,
-log10(min_triG_transA1[min_triG_transA1$gene==as.character(triG_coloc_nodes7q32$Node[i]),"pvalue"]),
0)
}
# I think it may be more convenient to merge the networks into one and then just change which attributes I visualize in Cytoscape
# Start with 2 temporary columns concatinating the regulator-target and target-regulator for easier matching.
# BMI
bmi_interactome7q32$temp1=paste(bmi_interactome7q32$Regulator,bmi_interactome7q32$Target)
bmi_interactome7q32$temp2=paste(bmi_interactome7q32$Target,bmi_interactome7q32$Regulator)
bmi_netPair7q32$temp1=paste(bmi_netPair7q32$trait1,bmi_netPair7q32$traits)
bmi_netPair7q32$temp2=paste(bmi_netPair7q32$traits,bmi_netPair7q32$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(bmi_interactome7q32)[1],ncol = 4))
for(i in 1:dim(bmi_interactome7q32)[1]){
temp[i,1:4]=bmi_netPair7q32[ifelse(is.na(match(bmi_interactome7q32$temp1[i],bmi_netPair7q32$temp1)),
match(bmi_interactome7q32$temp1[i],bmi_netPair7q32$temp2),
match(bmi_interactome7q32$temp1[i],bmi_netPair7q32$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the BMI colocalizations
colnames(temp)=colnames(bmi_netPair7q32)[3:6]
temp=rbind(temp,bmi_netPair7q32[bmi_netPair7q32$trait1=="BMI",3:6])
# Then add rows for BMI-Gene connections with 0 for MoA and likelihood
bmi_full7q32=bmi_interactome7q32[,1:4]
temp2=bmi_netPair7q32[bmi_netPair7q32$trait1=="BMI",1:4]
colnames(temp2)=colnames(bmi_interactome7q32)[1:4]
temp2[,3:4]=0
bmi_full7q32=rbind(bmi_full7q32,temp2)
# Finally, combine the colocalization columns with the interactome columns
bmi_full7q32=cbind(bmi_full7q32,temp)
# The nodes data also needs to be combined and duplicate rows removed
bmi_full7q32_nodes=rbind(bmi_inter_nodes7q32,bmi_coloc_nodes7q32)
bmi_full7q32_nodes=bmi_full7q32_nodes[!duplicated(bmi_full7q32_nodes$Node),]
# T2D
homair_interactome7q32$temp1=paste(homair_interactome7q32$Regulator,homair_interactome7q32$Target)
homair_interactome7q32$temp2=paste(homair_interactome7q32$Target,homair_interactome7q32$Regulator)
t2d_netPair7q32$temp1=paste(t2d_netPair7q32$trait1,t2d_netPair7q32$traits)
t2d_netPair7q32$temp2=paste(t2d_netPair7q32$traits,t2d_netPair7q32$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(homair_interactome7q32)[1],ncol = 4))
for(i in 1:dim(homair_interactome7q32)[1]){
temp[i,1:4]=t2d_netPair7q32[ifelse(is.na(match(homair_interactome7q32$temp1[i],t2d_netPair7q32$temp1)),
match(homair_interactome7q32$temp1[i],t2d_netPair7q32$temp2),
match(homair_interactome7q32$temp1[i],t2d_netPair7q32$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the T2D colocalizations
colnames(temp)=colnames(t2d_netPair7q32)[3:6]
temp=rbind(temp,t2d_netPair7q32[t2d_netPair7q32$trait1=="T2D",3:6])
# Then add rows for T2D-Gene connections with 0 for MoA and likelihood
t2d_full7q32=homair_interactome7q32[,1:4]
temp2=t2d_netPair7q32[t2d_netPair7q32$trait1=="T2D",1:4]
colnames(temp2)=colnames(homair_interactome7q32)[1:4]
temp2[,3:4]=0
t2d_full7q32=rbind(t2d_full7q32,temp2)
# Finally, combine the colocalization columns with the interactome columns
t2d_full7q32=cbind(t2d_full7q32,temp)
# The nodes data also needs to be combined and duplicate rows removed
t2d_full7q32_nodes=rbind(t2d_inter_nodes7q32,t2d_coloc_nodes7q32)
t2d_full7q32_nodes=t2d_full7q32_nodes[!duplicated(t2d_full7q32_nodes$Node),]
# HDL
hdl_interactome7q32$temp1=paste(hdl_interactome7q32$Regulator,hdl_interactome7q32$Target)
hdl_interactome7q32$temp2=paste(hdl_interactome7q32$Target,hdl_interactome7q32$Regulator)
hdl_netPair7q32$temp1=paste(hdl_netPair7q32$trait1,hdl_netPair7q32$traits)
hdl_netPair7q32$temp2=paste(hdl_netPair7q32$traits,hdl_netPair7q32$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(hdl_interactome7q32)[1],ncol = 4))
for(i in 1:dim(hdl_interactome7q32)[1]){
temp[i,1:4]=hdl_netPair7q32[ifelse(is.na(match(hdl_interactome7q32$temp1[i],hdl_netPair7q32$temp1)),
match(hdl_interactome7q32$temp1[i],hdl_netPair7q32$temp2),
match(hdl_interactome7q32$temp1[i],hdl_netPair7q32$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the HDL colocalizations
colnames(temp)=colnames(hdl_netPair7q32)[3:6]
temp=rbind(temp,hdl_netPair7q32[hdl_netPair7q32$trait1=="HDL",3:6])
# Then add rows for HDL-Gene connections with 0 for MoA and likelihood
hdl_full7q32=hdl_interactome7q32[,1:4]
temp2=hdl_netPair7q32[hdl_netPair7q32$trait1=="HDL",1:4]
colnames(temp2)=colnames(hdl_interactome7q32)[1:4]
temp2[,3:4]=0
hdl_full7q32=rbind(hdl_full7q32,temp2)
# Finally, combine the colocalization columns with the interactome columns
hdl_full7q32=cbind(hdl_full7q32,temp)
# The nodes data also needs to be combined and duplicate rows removed
hdl_full7q32_nodes=rbind(hdl_inter_nodes7q32,hdl_coloc_nodes7q32)
hdl_full7q32_nodes=hdl_full7q32_nodes[!duplicated(hdl_full7q32_nodes$Node),]
# TriG
triG_interactome7q32$temp1=paste(triG_interactome7q32$Regulator,triG_interactome7q32$Target)
triG_interactome7q32$temp2=paste(triG_interactome7q32$Target,triG_interactome7q32$Regulator)
triG_netPair7q32$temp1=paste(triG_netPair7q32$trait1,triG_netPair7q32$traits)
triG_netPair7q32$temp2=paste(triG_netPair7q32$traits,triG_netPair7q32$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(triG_interactome7q32)[1],ncol = 4))
for(i in 1:dim(triG_interactome7q32)[1]){
temp[i,1:4]=triG_netPair7q32[ifelse(is.na(match(triG_interactome7q32$temp1[i],triG_netPair7q32$temp1)),
match(triG_interactome7q32$temp1[i],triG_netPair7q32$temp2),
match(triG_interactome7q32$temp1[i],triG_netPair7q32$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the TriG colocalizations
colnames(temp)=colnames(triG_netPair7q32)[3:6]
temp=rbind(temp,triG_netPair7q32[triG_netPair7q32$trait1=="TriG",3:6])
# Then add rows for TriG-Gene connections with 0 for MoA and likelihood
triG_full7q32=triG_interactome7q32[,1:4]
temp2=triG_netPair7q32[triG_netPair7q32$trait1=="TriG",1:4]
colnames(temp2)=colnames(triG_interactome7q32)[1:4]
temp2[,3:4]=0
triG_full7q32=rbind(triG_full7q32,temp2)
# Finally, combine the colocalization columns with the interactome columns
triG_full7q32=cbind(triG_full7q32,temp)
# The nodes data also needs to be combined and duplicate rows removed
triG_full7q32_nodes=rbind(triG_inter_nodes7q32,triG_coloc_nodes7q32)
triG_full7q32_nodes=triG_full7q32_nodes[!duplicated(triG_full7q32_nodes$Node),]
# Since LINC-PINT, KLF14 and AC016831.7 is the only cis gene here, I'll just deal with them manually
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_Beta"]=min_bmi_cisE1[min_bmi_cisE1$gene=="LINC-PINT","beta"]
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_logP"]=-log10(min_bmi_cisE1[min_bmi_cisE1$gene=="LINC-PINT","pvalue"])
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="KLF14","Best_eQTL_Beta"]=min_bmi_cisE1[min_bmi_cisE1$gene=="KLF14","beta"]
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="KLF14","Best_eQTL_logP"]=-log10(min_bmi_cisE1[min_bmi_cisE1$gene=="KLF14","pvalue"])
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="AC016831.7","Best_eQTL_Beta"]=min_bmi_cisE1[min_bmi_cisE1$gene=="AC016831.7","beta"]
bmi_full7q32_nodes[bmi_full7q32_nodes$Node=="AC016831.7","Best_eQTL_logP"]=-log10(min_bmi_cisE1[min_bmi_cisE1$gene=="AC016831.7","pvalue"])
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_Beta"]=min_t2d_cisE1[min_t2d_cisE1$gene=="LINC-PINT","beta"]
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_logP"]=-log10(min_t2d_cisE1[min_t2d_cisE1$gene=="LINC-PINT","pvalue"])
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="KLF14","Best_eQTL_Beta"]=min_t2d_cisE1[min_t2d_cisE1$gene=="KLF14","beta"]
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="KLF14","Best_eQTL_logP"]=-log10(min_t2d_cisE1[min_t2d_cisE1$gene=="KLF14","pvalue"])
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="AC016831.7","Best_eQTL_Beta"]=min_t2d_cisE1[min_t2d_cisE1$gene=="AC016831.7","beta"]
t2d_full7q32_nodes[t2d_full7q32_nodes$Node=="AC016831.7","Best_eQTL_logP"]=-log10(min_t2d_cisE1[min_t2d_cisE1$gene=="AC016831.7","pvalue"])
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_Beta"]=min_hdl_cisE1[min_hdl_cisE1$gene=="LINC-PINT","beta"]
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_logP"]=-log10(min_hdl_cisE1[min_hdl_cisE1$gene=="LINC-PINT","pvalue"])
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="KLF14","Best_eQTL_Beta"]=min_hdl_cisE1[min_hdl_cisE1$gene=="KLF14","beta"]
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="KLF14","Best_eQTL_logP"]=-log10(min_hdl_cisE1[min_hdl_cisE1$gene=="KLF14","pvalue"])
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="AC016831.7","Best_eQTL_Beta"]=min_hdl_cisE1[min_hdl_cisE1$gene=="AC016831.7","beta"]
hdl_full7q32_nodes[hdl_full7q32_nodes$Node=="AC016831.7","Best_eQTL_logP"]=-log10(min_hdl_cisE1[min_hdl_cisE1$gene=="AC016831.7","pvalue"])
triG_full7q32_nodes[triG_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_Beta"]=min_triG_cisE1[min_triG_cisE1$gene=="LINC-PINT","beta"]
triG_full7q32_nodes[triG_full7q32_nodes$Node=="LINC-PINT","Best_eQTL_logP"]=-log10(min_triG_cisE1[min_triG_cisE1$gene=="LINC-PINT","pvalue"])
triG_full7q32_nodes[triG_full7q32_nodes$Node=="KLF14","Best_eQTL_Beta"]=min_triG_cisE1[min_triG_cisE1$gene=="KLF14","beta"]
triG_full7q32_nodes[triG_full7q32_nodes$Node=="KLF14","Best_eQTL_logP"]=-log10(min_triG_cisE1[min_triG_cisE1$gene=="KLF14","pvalue"])
triG_full7q32_nodes[triG_full7q32_nodes$Node=="AC016831.7","Best_eQTL_Beta"]=min_triG_cisE1[min_triG_cisE1$gene=="AC016831.7","beta"]
triG_full7q32_nodes[triG_full7q32_nodes$Node=="AC016831.7","Best_eQTL_logP"]=-log10(min_triG_cisE1[min_triG_cisE1$gene=="AC016831.7","pvalue"])
# Final touches by replacing NA with 0
bmi_full7q32_nodes[is.na(bmi_full7q32_nodes)]=0
t2d_full7q32_nodes[is.na(t2d_full7q32_nodes)]=0
hdl_full7q32_nodes[is.na(hdl_full7q32_nodes)]=0
triG_full7q32_nodes[is.na(triG_full7q32_nodes)]=0
# Write networks and node data to file for Cytoscape visualizations
write.table(bmi_full7q32,"./BMI/Chr7q32_cis-Genes_and_BMI_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(bmi_full7q32_nodes,"./BMI/Chr7q32_cis-Genes_and_BMI_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(t2d_full7q32,"./T2D/Chr7q32_cis-Genes_and_HOMA-IR_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(t2d_full7q32_nodes,"./T2D/Chr7q32_cis-Genes_and_HOMA-IR_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(hdl_full7q32,"./HDL/Chr7q32_cis-Genes_and_HDL_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(hdl_full7q32_nodes,"./HDL/Chr7q32_cis-Genes_and_HDL_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(triG_full7q32,"./Triglycerides/Chr7q32_cis-Genes_and_Triglycerides_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(triG_full7q32_nodes,"./Triglycerides/Chr7q32_cis-Genes_and_Triglycerides_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
# 12p13.1
interactome12p13=bmi_MRMR
# Grab pairwise colocalizations with PP>0.5 between BMI and QTLs and EPHB2 aQTL and trans-QTLs
bmi_pairColoc12p13=bmi_pairColoc[bmi_pairColoc$locus=="12p13.1" & bmi_pairColoc$posterior_prob>0.5,c(2,3,5)]
bmi_pairColoc12p13$traits=gsub("BMI, ","",bmi_pairColoc12p13$traits)
bmi_pairColoc12p13=cbind("trait1"=rep("BMI",dim(bmi_pairColoc12p13)[1]),bmi_pairColoc12p13)
bmi_e_pairColoc12p13=bmi_pairColoc12p13[grepl("-e_",bmi_pairColoc12p13$traits),]
bmi_a_pairColoc12p13=bmi_pairColoc12p13[grepl("-a_",bmi_pairColoc12p13$traits),]
bmi_e_pairColoc12p13$traits=gsub(".*_","",bmi_e_pairColoc12p13$traits)
bmi_a_pairColoc12p13$traits=gsub(".*_","",bmi_a_pairColoc12p13$traits)
bmi_netPair12p13=rbind(bmi_e_pairColoc12p13,bmi_a_pairColoc12p13)
bmi_netPair12p13=bmi_netPair12p13[!duplicated(bmi_netPair12p13$traits),-c(3,4)]
bmi_netPair12p13$eQTL_PP=bmi_e_pairColoc12p13[match(bmi_netPair12p13$traits,bmi_e_pairColoc12p13$traits),3]
bmi_netPair12p13$eQTL_SNP=bmi_e_pairColoc12p13[match(bmi_netPair12p13$traits,bmi_e_pairColoc12p13$traits),4]
bmi_netPair12p13$aQTL_PP=bmi_a_pairColoc12p13[match(bmi_netPair12p13$traits,bmi_a_pairColoc12p13$traits),3]
bmi_netPair12p13$aQTL_SNP=bmi_a_pairColoc12p13[match(bmi_netPair12p13$traits,bmi_a_pairColoc12p13$traits),4]
colocNet12p13=bmi_netPair12p13
colocNet12p13[is.na(colocNet12p13)]=0
# Grab the -log10(Pmin) and betas for the eQTLs and aQTLs among BMI GWAS significant SNPs at the 12p13.1 locus
transE4=filt_trans_bmi_eqtl[filt_trans_bmi_eqtl$chr==12 & filt_trans_bmi_eqtl$position>13900000 & filt_trans_bmi_eqtl$position<15000000,]
transE4=transE4[transE4$snps %in% sig_bmi$SNP,]
transE4=transE4[order(transE4$pvalue),]
min_transE4=transE4[!duplicated(transE4$gene),]
transA4=filt_trans_bmi_aqtl[filt_trans_bmi_aqtl$chr==12 & filt_trans_bmi_aqtl$position>13900000 & filt_trans_bmi_aqtl$position<15000000,]
transA4=transA4[transA4$snps %in% sig_bmi$SNP,]
transA4=transA4[order(transA4$pvalue),]
min_transA4=transA4[!duplicated(transA4$gene),]
# Make node tables for 12p13 networks
inter_nodes12p13=data.frame("Node"=as.character(unique(interactome12p13$Regulator)),"BMI_exp_cor"=rep(0,length(unique(interactome12p13$Regulator))),
"BMI_act_cor"=rep(0,length(unique(interactome12p13$Regulator))),"Best_eQTL_Beta"=rep(0,length(unique(interactome12p13$Regulator))),
"Best_eQTL_logP"=rep(0,length(unique(interactome12p13$Regulator))),"Best_aQTL_Beta"=rep(0,length(unique(interactome12p13$Regulator))),
"Best_aQTL_logP"=rep(0,length(unique(interactome12p13$Regulator))))
for(i in 1:dim(inter_nodes12p13)[1]){
inter_nodes12p13$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(inter_nodes12p13$Node[i]),]),filt_pheno$BMI)
inter_nodes12p13$BMI_act_cor[i]=cor(as.numeric(vip[as.character(inter_nodes12p13$Node[i]),]),filt_pheno$BMI)
inter_nodes12p13$Best_eQTL_Beta[i]=ifelse(inter_nodes12p13$Node[i] %in% min_transE4$gene,
min_transE4[min_transE4$gene==as.character(inter_nodes12p13$Node[i]),"beta"],
0)
inter_nodes12p13$Best_eQTL_logP[i]=ifelse(inter_nodes12p13$Node[i] %in% min_transE4$gene,
-log10(min_transE4[min_transE4$gene==as.character(inter_nodes12p13$Node[i]),"pvalue"]),
0)
inter_nodes12p13$Best_aQTL_Beta[i]=ifelse(inter_nodes12p13$Node[i] %in% min_transA4$gene,
min_transA4[min_transA4$gene==as.character(inter_nodes12p13$Node[i]),"beta"],
0)
inter_nodes12p13$Best_aQTL_logP[i]=ifelse(inter_nodes12p13$Node[i] %in% min_transA4$gene,
-log10(min_transA4[min_transA4$gene==as.character(inter_nodes12p13$Node[i]),"pvalue"]),
0)
}
coloc_nodes12p13=data.frame("Node"=as.character(unique(colocNet12p13$traits)),"BMI_exp_cor"=rep(0,length(unique(colocNet12p13$traits))),
"BMI_act_cor"=rep(0,length(unique(colocNet12p13$traits))),"Best_eQTL_Beta"=rep(0,length(unique(colocNet12p13$traits))),
"Best_eQTL_logP"=rep(0,length(unique(colocNet12p13$traits))),"Best_aQTL_Beta"=rep(0,length(unique(colocNet12p13$traits))),
"Best_aQTL_logP"=rep(0,length(unique(colocNet12p13$traits))))
for(i in 1:dim(coloc_nodes12p13)[1]){
coloc_nodes12p13$BMI_exp_cor[i]=cor(as.numeric(tpm[as.character(coloc_nodes12p13$Node[i]),]),filt_pheno$BMI)
coloc_nodes12p13$BMI_act_cor[i]=cor(as.numeric(vip[as.character(coloc_nodes12p13$Node[i]),]),filt_pheno$BMI)
coloc_nodes12p13$Best_eQTL_Beta[i]=ifelse(coloc_nodes12p13$Node[i] %in% min_transE4$gene,
min_transE4[min_transE4$gene==as.character(coloc_nodes12p13$Node[i]),"beta"],
0)
coloc_nodes12p13$Best_eQTL_logP[i]=ifelse(coloc_nodes12p13$Node[i] %in% min_transE4$gene,
-log10(min_transE4[min_transE4$gene==as.character(coloc_nodes12p13$Node[i]),"pvalue"]),
0)
coloc_nodes12p13$Best_aQTL_Beta[i]=ifelse(coloc_nodes12p13$Node[i] %in% min_transA4$gene,
min_transA4[min_transA4$gene==as.character(coloc_nodes12p13$Node[i]),"beta"],
0)
coloc_nodes12p13$Best_aQTL_logP[i]=ifelse(coloc_nodes12p13$Node[i] %in% min_transA4$gene,
-log10(min_transA4[min_transA4$gene==as.character(coloc_nodes12p13$Node[i]),"pvalue"]),
0)
}
# I think it may be more convenient to merge the networks into one and then just change which attributes I visualize in Cytoscape
# Start with 2 temporary columns concatinating the regulator-target and target-regulator for easier matching.
interactome12p13$temp1=paste(interactome12p13$Regulator,interactome12p13$Target)
interactome12p13$temp2=paste(interactome12p13$Target,interactome12p13$Regulator)
colocNet12p13$temp1=paste(colocNet12p13$trait1,colocNet12p13$traits)
colocNet12p13$temp2=paste(colocNet12p13$traits,colocNet12p13$trait1)
# Then grab colocalization data for gene pairs in interactome
temp=as.data.frame(matrix(nrow = dim(interactome12p13)[1],ncol = 4))
for(i in 1:dim(interactome12p13)[1]){
temp[i,1:4]=colocNet12p13[ifelse(is.na(match(interactome12p13$temp1[i],colocNet12p13$temp1)),
match(interactome12p13$temp1[i],colocNet12p13$temp2),
match(interactome12p13$temp1[i],colocNet12p13$temp1)),3:6]
}
temp[is.na(temp)]=0
# Then combine with the BMI colocalizations
colnames(temp)=colnames(colocNet12p13)[3:6]
temp=rbind(temp,colocNet12p13[colocNet12p13$trait1=="BMI",3:6])
# Then add rows for BMI-Gene connections with 0 for MoA and likelihood
full12p13=interactome12p13[,1:4]
temp2=colocNet12p13[colocNet12p13$trait1=="BMI",1:4]
colnames(temp2)=colnames(interactome12p13)[1:4]
temp2[,3:4]=0
full12p13=rbind(full12p13,temp2)
# Finally, combine the colocalization columns with the interactome columns
full12p13=cbind(full12p13,temp)
# The nodes data also needs to be combined and duplicate rows removed
full12p13_nodes=rbind(inter_nodes12p13,coloc_nodes12p13)
full12p13_nodes=full12p13_nodes[!duplicated(full12p13_nodes$Node),]
# Write networks and node data to file for Cytoscape visualizations
write.table(interactome12p13,"Chr12p13_BMI_MRs_interactome.txt",sep = "\t",quote = F,row.names = F)
write.table(inter_nodes12p13,"Chr12p13_BMI_MRs_interactome_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(colocNet12p13,"Chr12p13_BMI_and_BMI_MRs_pairwise_colocalization_network.txt",sep = "\t",quote = F,row.names = F)
write.table(coloc_nodes12p13,"Chr12p13_BMI_and_BMI_MRs_pairwise_colocalization_network_node_info.txt",sep = "\t",quote = F,row.names = F)
write.table(full12p13,"Chr12p13_BMI_MRs_interactome_and_pairwise_colocalization.txt",sep = "\t",quote = F,row.names = F)
write.table(full12p13_nodes,"Chr12p13_BMI_MRs_interactome_and_pairwise_colocalization_node_info.txt",sep = "\t",quote = F,row.names = F)
|
## Testing the edit family of functions
require(apsimx)
extd.dir <- system.file("extdata", package = "apsimx")
run.test.edit.apsimx.replacement <- get(".run.local.tests", envir = apsimx.options)
tmp.dir <- tempdir()
if(run.test.edit.apsimx.replacement){
## Inspect, edit, inspect
inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir,
node = "Soybean",
node.child = "Leaf",
parm = "Gsmax350")
edit_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir,
wrt.dir = tmp.dir,
node = "Soybean",
node.child = "Leaf",
parm = "Gsmax350", value = 0.009,
verbose = FALSE)
inspect_apsimx_replacement("MaizeSoybean-edited.apsimx", src.dir = tmp.dir,
node = "Soybean",
node.child = "Leaf",
parm = "Gsmax350")
## Example for RUE
## Inspect, edit, inspect
inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir,
node = "Soybean",
node.child = "Leaf",
node.subchild = "Photosynthesis",
node.subsubchild = "RUE",
parm = "FixedValue")
edit_apsimx_replacement("MaizeSoybean.apsimx",
src.dir = extd.dir, wrt.dir = tmp.dir,
node = "Soybean",
node.child = "Leaf",
node.subchild = "Photosynthesis",
node.subsubchild = "RUE",
parm = "FixedValue", value = 1, verbose = FALSE)
inspect_apsimx_replacement("MaizeSoybean-edited.apsimx", src.dir = tmp.dir,
node = "Soybean",
node.child = "Leaf",
node.subchild = "Photosynthesis",
node.subsubchild = "RUE",
parm = "FixedValue")
#### Looking at Soybean
inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir,
node = "Soybean",
node.child = "Stephens_MG40",
parm = "Vegetative",
print.path = TRUE)
# edit_apsim
}
|
/tests/test_edit_apsimx_replacement.R
|
no_license
|
dcammarano/apsimx
|
R
| false | false | 2,493 |
r
|
## Testing the edit family of functions
require(apsimx)
extd.dir <- system.file("extdata", package = "apsimx")
run.test.edit.apsimx.replacement <- get(".run.local.tests", envir = apsimx.options)
tmp.dir <- tempdir()
if(run.test.edit.apsimx.replacement){
## Inspect, edit, inspect
inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir,
node = "Soybean",
node.child = "Leaf",
parm = "Gsmax350")
edit_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir,
wrt.dir = tmp.dir,
node = "Soybean",
node.child = "Leaf",
parm = "Gsmax350", value = 0.009,
verbose = FALSE)
inspect_apsimx_replacement("MaizeSoybean-edited.apsimx", src.dir = tmp.dir,
node = "Soybean",
node.child = "Leaf",
parm = "Gsmax350")
## Example for RUE
## Inspect, edit, inspect
inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir,
node = "Soybean",
node.child = "Leaf",
node.subchild = "Photosynthesis",
node.subsubchild = "RUE",
parm = "FixedValue")
edit_apsimx_replacement("MaizeSoybean.apsimx",
src.dir = extd.dir, wrt.dir = tmp.dir,
node = "Soybean",
node.child = "Leaf",
node.subchild = "Photosynthesis",
node.subsubchild = "RUE",
parm = "FixedValue", value = 1, verbose = FALSE)
inspect_apsimx_replacement("MaizeSoybean-edited.apsimx", src.dir = tmp.dir,
node = "Soybean",
node.child = "Leaf",
node.subchild = "Photosynthesis",
node.subsubchild = "RUE",
parm = "FixedValue")
#### Looking at Soybean
inspect_apsimx_replacement("MaizeSoybean.apsimx", src.dir = extd.dir,
node = "Soybean",
node.child = "Stephens_MG40",
parm = "Vegetative",
print.path = TRUE)
# edit_apsim
}
|
test_that("Handle n when it isn't an integer", {
file <- tempfile()
# write the Educational Attainment GWAS to a temp file for testing
eduAttainOkbay <- readLines(system.file("extdata", "eduAttainOkbay.txt",
package = "MungeSumstats"
))
writeLines(eduAttainOkbay, con = file)
# read it in and make N
sumstats_dt <- data.table::fread(file)
# Add N column and make it not an integer
sumstats_dt[, N := 10 * runif(nrow(sumstats_dt))]
sumstats_dt[, N_fixed := round(N, 0)]
data.table::fwrite(x = sumstats_dt, file = file, sep = "\t")
# Run MungeSumstats code
reformatted <- MungeSumstats::format_sumstats(file,
ref_genome = "GRCh37",
on_ref_genome = FALSE,
strand_ambig_filter = FALSE,
bi_allelic_filter = FALSE,
allele_flip_check = FALSE
)
# In results if N = N_fixed it worked
res_dt <- data.table::fread(reformatted)
expect_equal(res_dt$N, res_dt$N_FIXED)
})
|
/tests/testthat/test-n_not_integer.R
|
no_license
|
daklab/MungeSumstats
|
R
| false | false | 974 |
r
|
test_that("Handle n when it isn't an integer", {
file <- tempfile()
# write the Educational Attainment GWAS to a temp file for testing
eduAttainOkbay <- readLines(system.file("extdata", "eduAttainOkbay.txt",
package = "MungeSumstats"
))
writeLines(eduAttainOkbay, con = file)
# read it in and make N
sumstats_dt <- data.table::fread(file)
# Add N column and make it not an integer
sumstats_dt[, N := 10 * runif(nrow(sumstats_dt))]
sumstats_dt[, N_fixed := round(N, 0)]
data.table::fwrite(x = sumstats_dt, file = file, sep = "\t")
# Run MungeSumstats code
reformatted <- MungeSumstats::format_sumstats(file,
ref_genome = "GRCh37",
on_ref_genome = FALSE,
strand_ambig_filter = FALSE,
bi_allelic_filter = FALSE,
allele_flip_check = FALSE
)
# In results if N = N_fixed it worked
res_dt <- data.table::fread(reformatted)
expect_equal(res_dt$N, res_dt$N_FIXED)
})
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(4.19867256723183e-140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 3L)))
result <- do.call(distr6:::C_EmpiricalMVCdf,testlist)
str(result)
|
/distr6/inst/testfiles/C_EmpiricalMVCdf/libFuzzer_C_EmpiricalMVCdf/C_EmpiricalMVCdf_valgrind_files/1610383515-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 242 |
r
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(4.19867256723183e-140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 3L)))
result <- do.call(distr6:::C_EmpiricalMVCdf,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign.tip.colors.R
\name{assign.tip.colors}
\alias{assign.tip.colors}
\title{Assign colors to tips}
\usage{
assign.tip.colors(tree, tip2category, na.col = "black",
unassigned.col = "gray", palette = NULL)
}
\arguments{
\item{tree}{a tree object of class "phylo"}
\item{tip2category}{a named vector. Each entry of the vector is a level and each name is a tip name.}
}
\value{
a list of two entries: colors with the vector of colors and legend with a vector associating color to levels. Legend is useful to add the legned to the plot.
}
\description{
Given a tree of class "phylo" and a named vector with a category, returns a vector of colors to color the tips on the plots
}
\examples{
require(ape)
### From Saitou and Nei (1987, Table 1):
x <- c(7, 8, 11, 13, 16, 13, 17, 5, 8, 10, 13,
10, 14, 5, 7, 10, 7, 11, 8, 11, 8, 12,
5, 6, 10, 9, 13, 8)
M <- matrix(0, 8, 8)
M[lower.tri(M)] <- x
M <- t(M)
M [lower.tri(M)] <- x
dimnames(M) <- list(1:8, 1:8)
tr <- nj(M)
### Suppose that tips 1 to 4 are h.sapiens, 5 and 6 are m.musculs, 7 is NA and 8 is unassigned.
tip2category = c(rep(c("h.sapiens","m.musculs"),c(4,2)), NA)
names(tip2category) = 1:7
colors = assign.tip.colors(tr, tip2category, na.col="black", unassigned.col="gray")[["colors"]]
legenda = assign.tip.colors(tr, tip2category, na.col="black", unassigned.col="gray")[["legend"]]
plot(tr, "u", tip.color=colors, cex=2)
legend("bottomleft", legenda, pch=20, col=names(legenda))
}
|
/man/assign.tip.colors.Rd
|
no_license
|
abrozzi/SplitstRee
|
R
| false | true | 1,519 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign.tip.colors.R
\name{assign.tip.colors}
\alias{assign.tip.colors}
\title{Assign colors to tips}
\usage{
assign.tip.colors(tree, tip2category, na.col = "black",
unassigned.col = "gray", palette = NULL)
}
\arguments{
\item{tree}{a tree object of class "phylo"}
\item{tip2category}{a named vector. Each entry of the vector is a level and each name is a tip name.}
}
\value{
a list of two entries: colors with the vector of colors and legend with a vector associating color to levels. Legend is useful to add the legned to the plot.
}
\description{
Given a tree of class "phylo" and a named vector with a category, returns a vector of colors to color the tips on the plots
}
\examples{
require(ape)
### From Saitou and Nei (1987, Table 1):
x <- c(7, 8, 11, 13, 16, 13, 17, 5, 8, 10, 13,
10, 14, 5, 7, 10, 7, 11, 8, 11, 8, 12,
5, 6, 10, 9, 13, 8)
M <- matrix(0, 8, 8)
M[lower.tri(M)] <- x
M <- t(M)
M [lower.tri(M)] <- x
dimnames(M) <- list(1:8, 1:8)
tr <- nj(M)
### Suppose that tips 1 to 4 are h.sapiens, 5 and 6 are m.musculs, 7 is NA and 8 is unassigned.
tip2category = c(rep(c("h.sapiens","m.musculs"),c(4,2)), NA)
names(tip2category) = 1:7
colors = assign.tip.colors(tr, tip2category, na.col="black", unassigned.col="gray")[["colors"]]
legenda = assign.tip.colors(tr, tip2category, na.col="black", unassigned.col="gray")[["legend"]]
plot(tr, "u", tip.color=colors, cex=2)
legend("bottomleft", legenda, pch=20, col=names(legenda))
}
|
#######################################
# toLongName (code)
# requires the three letter community code as listed in COMM_CODE
# field of the data set
#
# Returns the full name of the community
#
# eg. HPK returns HIGHLAND PARK
toLongName <- function(code) {
code <- as.character(code)
if (exists("rawCommData")) {
longName <- as.character(
rawCommData[rawCommData$COMM_CODE==code,]$NAME[1]
)
} else {
longName <- "No matching community code"
}
longName <- simpleCap(longName)
return (longName)
}
#######################################
# toCommCode (code)
# requires the full community name as listed in NAMES
# field of the data set
#
# Returns the community code
#
# eg. HIGHLAND PARK returns HPK
toCommCode <- function(commName) {
commName <- toupper(as.character(commName))
if (exists("rawCommData")) {
shortName<- as.character(
rawCommData[rawCommData$NAME==commName,]$COMM_CODE[1]
)
} else {
shortName <- "XXX"
}
return (shortName)
}
#######################################
# simpleCap (x)
#
# Simple fuction to capitalize the first letter of a word
#
# Copied from: http://stackoverflow.com/questions/6364783/capitalize-the-first-letter-of-both-words-in-a-two-word-string
simpleCap <- function(x) {
x <- tolower(x)
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
#######################################
# graphName (gName)
#
# Produce the file name for the graph to be output
# name will be prepended with Community Shorr Code
# and assumes the file type is png
#
graphName <- function (gName) {
gName <- paste (gName,config$communityCode,sep="_")
gName <- paste (gName,"png",sep=".")
gName <- file.path (config$graphDir,gName)
if (config$verbose == TRUE) {print (gName)}
return (gName)
}
#######################################
# getCommunityCodes (df)
#
# returns a list of community codes based on the data frame passed in
#
getCommunityCodes <- function (df=rawCommData) {
codes <- as.vector(unique(df$COMM_CODE))
return(codes)
}
#######################################
# getCommunityNames (df)
#
# returns a list of community codes based on the data frame passed in
#
getCommunityNames <- function (df=rawCommData) {
names <- as.vector(unique(df$NAME))
return(names)
}
#######################################
# getCommunityClasses (df)
#
# returns a list of community codes based on the data frame passed in
#
getCommunityClasses <- function (df=rawCommData) {
classes <- as.vector(unique(df$CLASS))
return(classes)
}
########################################
# getCensusYears (df)
#
# returns a list of census years bases on the data frame passed in
#
getCensusYears <- function (df=rawCommData) {
years <- as.vector(unique(df$CNSS_YR))
return(years)
}
#######################################
# savePlot (p,pName="MyPlot)
savePlot <- function (p,pName="MyPlot") {
png(filename=graphName(pName),
width = config$plotWidth,
height = config$plotHeight)
plot(p)
dev.off()
}
|
/lib/helpers.R
|
permissive
|
pengler/YYC_census
|
R
| false | false | 3,069 |
r
|
#######################################
# toLongName (code)
# requires the three letter community code as listed in COMM_CODE
# field of the data set
#
# Returns the full name of the community
#
# eg. HPK returns HIGHLAND PARK
toLongName <- function(code) {
code <- as.character(code)
if (exists("rawCommData")) {
longName <- as.character(
rawCommData[rawCommData$COMM_CODE==code,]$NAME[1]
)
} else {
longName <- "No matching community code"
}
longName <- simpleCap(longName)
return (longName)
}
#######################################
# toCommCode (code)
# requires the full community name as listed in NAMES
# field of the data set
#
# Returns the community code
#
# eg. HIGHLAND PARK returns HPK
toCommCode <- function(commName) {
commName <- toupper(as.character(commName))
if (exists("rawCommData")) {
shortName<- as.character(
rawCommData[rawCommData$NAME==commName,]$COMM_CODE[1]
)
} else {
shortName <- "XXX"
}
return (shortName)
}
#######################################
# simpleCap (x)
#
# Simple fuction to capitalize the first letter of a word
#
# Copied from: http://stackoverflow.com/questions/6364783/capitalize-the-first-letter-of-both-words-in-a-two-word-string
simpleCap <- function(x) {
x <- tolower(x)
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
#######################################
# graphName (gName)
#
# Produce the file name for the graph to be output
# name will be prepended with Community Shorr Code
# and assumes the file type is png
#
graphName <- function (gName) {
gName <- paste (gName,config$communityCode,sep="_")
gName <- paste (gName,"png",sep=".")
gName <- file.path (config$graphDir,gName)
if (config$verbose == TRUE) {print (gName)}
return (gName)
}
#######################################
# getCommunityCodes (df)
#
# returns a list of community codes based on the data frame passed in
#
getCommunityCodes <- function (df=rawCommData) {
codes <- as.vector(unique(df$COMM_CODE))
return(codes)
}
#######################################
# getCommunityNames (df)
#
# returns a list of community codes based on the data frame passed in
#
getCommunityNames <- function (df=rawCommData) {
names <- as.vector(unique(df$NAME))
return(names)
}
#######################################
# getCommunityClasses (df)
#
# returns a list of community codes based on the data frame passed in
#
getCommunityClasses <- function (df=rawCommData) {
classes <- as.vector(unique(df$CLASS))
return(classes)
}
########################################
# getCensusYears (df)
#
# returns a list of census years bases on the data frame passed in
#
getCensusYears <- function (df=rawCommData) {
years <- as.vector(unique(df$CNSS_YR))
return(years)
}
#######################################
# savePlot (p,pName="MyPlot)
savePlot <- function (p,pName="MyPlot") {
png(filename=graphName(pName),
width = config$plotWidth,
height = config$plotHeight)
plot(p)
dev.off()
}
|
# title: Fairness functions
# created: 07/17/2018
# updated: 01/25/2019
# description: Functions used to run fairness analysis
# Net compensation penalty - penalize models with rev less than cost
penalty<-function(beta){
avg_rev_mh<-((t(grp) %*% (X_scale %*% beta))/n_grp)
fair<-(mhsud_cost_scale - avg_rev_mh)
}
# Mean residual difference penalty
penalty2<-function(beta){
avg_rev_mh<-((t(grp) %*% (X_scale %*% beta))/n_grp)
avg_rev_ref<-((t(ref) %*% (X_scale %*% beta))/n_ref)
fair<-( (mhsud_cost_scale-ref_cost_scale) - (avg_rev_mh - avg_rev_ref) )^2
}
# rescale y values
rescale<-function(y_scale,pred){
newpred<-pred*attr(y_scale,'scaled:scale')+attr(y_scale,'scaled:center')
}
# get predictions for test dataset and rescale them
get_preds<-function(beta){
pred_scaled<-as.matrix(X_test_scale) %*% beta
pred<-as.data.frame(rescale(y_scale, pred_scaled))
return(pred$V1)
}
# r-squared
rsquared<-function(y,predy){
SSR = sum((y-predy)^2)
SST = sum((y-mean(y))^2)
R2 = 1-SSR/SST
return(R2)
}
# mse
mse<-function(y,predy){
SSR = sum((y-predy)^2)
MSE = SSR/length(y)
return(MSE)
}
# calculate average revenue for the group
grp_rev<-function(predy){
tmp<-as.data.frame(cbind(flag_mh, predy))
names(tmp)<-c('mh','pred')
grp_rev<-mean(tmp[tmp$mh==1,'pred'])
ref_rev<-mean(tmp[tmp$mh==0,'pred'])
rev_list<-list("grp"=grp_rev, "ref"=ref_rev)
return(rev_list)
}
# net compensation
overunder<-function(predy){
rev<-grp_rev(predy)
mhsud_rev<-rev$grp
ref_rev<-rev$ref
grp_ou<-mhsud_rev - mhsud_cost
ref_ou<-ref_rev - ref_cost
ou_list<-list("grp"=grp_ou, "ref"=ref_ou)
return(ou_list)
}
# predicted ratio
predratio<-function(y,predy) {
rev<-grp_rev(predy)
mhsud_rev<-rev$grp
ref_rev<-rev$ref
grp_pr<-mhsud_rev/mhsud_cost
ref_pr<-ref_rev/ref_cost
pr_list<-list("grp"=grp_pr, "ref"=ref_pr)
return(pr_list)
}
# corr btw grp and error
grpcorr<-function(y,predy){
cval = cor(flag_mh, y-predy)
}
# cov btw grp and error
grpcov<-function(y,predy){
cval_cov = cov(flag_mh, y-predy)
}
# call evaluation metrics and return df with metrics
all_metrics<-function(y,ypred,model){
r2<-round(rsquared(y,ypred),3)
mse<-round(mse(y,ypred),3)
ou<-overunder(ypred)
ou_grp<-round(ou$grp,3)
ou_ref<-round(ou$ref,3)
pr<-predratio(y,ypred)
pr_grp<-round(pr$grp,3)
pr_ref<-round(pr$ref,3)
gc<-round(grpcorr(y,ypred),3)
gcov<-round(grpcov(y,ypred),3)
name<-model
# create data frame to return (can combine later for print)
df<-cbind(model, r2, mse, ou_grp, ou_ref, pr_grp, pr_ref, gc, gcov)
return(df)
}
|
/fairness_functions.R
|
no_license
|
wangzilongri/MarketScan-Fair
|
R
| false | false | 2,591 |
r
|
# title: Fairness functions
# created: 07/17/2018
# updated: 01/25/2019
# description: Functions used to run fairness analysis
# Net compensation penalty - penalize models with rev less than cost
penalty<-function(beta){
avg_rev_mh<-((t(grp) %*% (X_scale %*% beta))/n_grp)
fair<-(mhsud_cost_scale - avg_rev_mh)
}
# Mean residual difference penalty
penalty2<-function(beta){
avg_rev_mh<-((t(grp) %*% (X_scale %*% beta))/n_grp)
avg_rev_ref<-((t(ref) %*% (X_scale %*% beta))/n_ref)
fair<-( (mhsud_cost_scale-ref_cost_scale) - (avg_rev_mh - avg_rev_ref) )^2
}
# rescale y values
rescale<-function(y_scale,pred){
newpred<-pred*attr(y_scale,'scaled:scale')+attr(y_scale,'scaled:center')
}
# get predictions for test dataset and rescale them
get_preds<-function(beta){
pred_scaled<-as.matrix(X_test_scale) %*% beta
pred<-as.data.frame(rescale(y_scale, pred_scaled))
return(pred$V1)
}
# r-squared
rsquared<-function(y,predy){
SSR = sum((y-predy)^2)
SST = sum((y-mean(y))^2)
R2 = 1-SSR/SST
return(R2)
}
# mse
mse<-function(y,predy){
SSR = sum((y-predy)^2)
MSE = SSR/length(y)
return(MSE)
}
# calculate average revenue for the group
grp_rev<-function(predy){
tmp<-as.data.frame(cbind(flag_mh, predy))
names(tmp)<-c('mh','pred')
grp_rev<-mean(tmp[tmp$mh==1,'pred'])
ref_rev<-mean(tmp[tmp$mh==0,'pred'])
rev_list<-list("grp"=grp_rev, "ref"=ref_rev)
return(rev_list)
}
# net compensation
overunder<-function(predy){
rev<-grp_rev(predy)
mhsud_rev<-rev$grp
ref_rev<-rev$ref
grp_ou<-mhsud_rev - mhsud_cost
ref_ou<-ref_rev - ref_cost
ou_list<-list("grp"=grp_ou, "ref"=ref_ou)
return(ou_list)
}
# predicted ratio
predratio<-function(y,predy) {
rev<-grp_rev(predy)
mhsud_rev<-rev$grp
ref_rev<-rev$ref
grp_pr<-mhsud_rev/mhsud_cost
ref_pr<-ref_rev/ref_cost
pr_list<-list("grp"=grp_pr, "ref"=ref_pr)
return(pr_list)
}
# corr btw grp and error
grpcorr<-function(y,predy){
cval = cor(flag_mh, y-predy)
}
# cov btw grp and error
grpcov<-function(y,predy){
cval_cov = cov(flag_mh, y-predy)
}
# call evaluation metrics and return df with metrics
all_metrics<-function(y,ypred,model){
r2<-round(rsquared(y,ypred),3)
mse<-round(mse(y,ypred),3)
ou<-overunder(ypred)
ou_grp<-round(ou$grp,3)
ou_ref<-round(ou$ref,3)
pr<-predratio(y,ypred)
pr_grp<-round(pr$grp,3)
pr_ref<-round(pr$ref,3)
gc<-round(grpcorr(y,ypred),3)
gcov<-round(grpcov(y,ypred),3)
name<-model
# create data frame to return (can combine later for print)
df<-cbind(model, r2, mse, ou_grp, ou_ref, pr_grp, pr_ref, gc, gcov)
return(df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/item_upload_files.R
\name{item_publish_cloud}
\alias{item_publish_cloud}
\title{Publish file to public cloud S3 bucket}
\usage{
item_publish_cloud(sb_id, files, ..., session = current_session())
}
\arguments{
\item{sb_id}{An \code{\link{sbitem}} object or a character ScienceBase ID corresponding to the item}
\item{files}{A string vector of paths to files to be uploaded}
\item{...}{Additional parameters are passed on to \code{\link[httr]{GET}}, \code{\link[httr]{POST}},
\code{\link[httr]{HEAD}}, \code{\link[httr]{PUT}}, or \code{\link[httr]{DELETE}}}
\item{session}{Session object from \code{\link{authenticate_sb}}. Defaults to anonymous or
last authenticated session}
}
\value{
web service response invisibly.
}
\description{
moves a cloud file from the S3 bucket only available via
ScienceBase authenticated services to a public S3 bucket.
}
\examples{
\dontrun{
res <- item_create(user_id(), "testing 123")
cat("foo bar", file = "foobar.txt")
item_upload_cloud(res$id, "foobar.txt")
item_publish_cloud(res$id, "foobar.txt")
}
}
|
/man/item_publish_cloud.Rd
|
permissive
|
dblodgett-usgs/sbtools
|
R
| false | true | 1,121 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/item_upload_files.R
\name{item_publish_cloud}
\alias{item_publish_cloud}
\title{Publish file to public cloud S3 bucket}
\usage{
item_publish_cloud(sb_id, files, ..., session = current_session())
}
\arguments{
\item{sb_id}{An \code{\link{sbitem}} object or a character ScienceBase ID corresponding to the item}
\item{files}{A string vector of paths to files to be uploaded}
\item{...}{Additional parameters are passed on to \code{\link[httr]{GET}}, \code{\link[httr]{POST}},
\code{\link[httr]{HEAD}}, \code{\link[httr]{PUT}}, or \code{\link[httr]{DELETE}}}
\item{session}{Session object from \code{\link{authenticate_sb}}. Defaults to anonymous or
last authenticated session}
}
\value{
web service response invisibly.
}
\description{
moves a cloud file from the S3 bucket only available via
ScienceBase authenticated services to a public S3 bucket.
}
\examples{
\dontrun{
res <- item_create(user_id(), "testing 123")
cat("foo bar", file = "foobar.txt")
item_upload_cloud(res$id, "foobar.txt")
item_publish_cloud(res$id, "foobar.txt")
}
}
|
"glm.diag" <-
function (glmfit)
{
if (is.null(glmfit$prior.weights))
w <- rep(1, length(glmfit$residuals))
else w <- glmfit$prior.weights
sd <- sqrt(summary(glmfit)$dispersion)
dev <- residuals(glmfit, type = "deviance")/sd
pear <- residuals(glmfit, type = "pearson")/sd
h <- rep(0, length(w))
h[w != 0] <- lm.influence(glmfit)$hat
p <- glmfit$rank
rp <- pear/sqrt(1 - h)
rd <- dev/sqrt(1 - h)
cook <- (h * rp^2)/((1 - h) * p)
res <- sign(dev) * sqrt(dev^2 + h * rp^2)
list(res = res, rd = rd, rp = rp, cook = cook, h = h, sd = sd)
}
|
/R/glm.diag.R
|
no_license
|
cran/SMPracticals
|
R
| false | false | 597 |
r
|
"glm.diag" <-
function (glmfit)
{
if (is.null(glmfit$prior.weights))
w <- rep(1, length(glmfit$residuals))
else w <- glmfit$prior.weights
sd <- sqrt(summary(glmfit)$dispersion)
dev <- residuals(glmfit, type = "deviance")/sd
pear <- residuals(glmfit, type = "pearson")/sd
h <- rep(0, length(w))
h[w != 0] <- lm.influence(glmfit)$hat
p <- glmfit$rank
rp <- pear/sqrt(1 - h)
rd <- dev/sqrt(1 - h)
cook <- (h * rp^2)/((1 - h) * p)
res <- sign(dev) * sqrt(dev^2 + h * rp^2)
list(res = res, rd = rd, rp = rp, cook = cook, h = h, sd = sd)
}
|
myapp <- function(){
x<-list(a=1:5,b=rnorm(10))
lapply(x,mean)
}
myapp1 <- function(){
x<-list(a=1:5,b=rnorm(10),c=rnorm(10),d=rnorm(10))
lapply(x,mean)
}
myapp2 <- function(){
x<-1:5
lapply(x,runif)
}
myapp3 <- function(){
x<-1:4
lapply(x,runif,min=0,max=10)
}
myapp4 <- function(){
x<-list(a=matrix(1:4,2,2), matrix(1:6,3,2))
x
}
myapp5 <- function(){
lapply(myapp4(), function(elt) elt[,1])
}
myapp6 <- function(){
lapply(myapp4(), function(elt) elt[,1])
}
myapp7 <- function(){
x<-list(a=1:5,b=rnorm(10),c=rnorm(10),d=rnorm(10))
sapply(x,mean)
}
myapp8 <- function(){
x<-list(a=1:5,b=rnorm(10),c=rnorm(10),d=rnorm(10))
sapply(x,mean)
}
library(datasets)
data(iris)
data(mtcars)
q1<-function(){
mean( subset(iris,Species == "virginica")$Sepal.Length, na.rm=TRUE)
}
q11 <-function(){
with(iris,Sepal.Length[Sepal.Width== max(Sepal.Width[Species=="setosa"])])
#with(df, d[v== max(v[c=="foo"])])
}
q2<-function(){
apply(iris[,1:4],2,mean,na.rm=TRUE)
}
q3<-function(){
#tapply(mtcars$mpg, mtcars$cyl,mean)
with(mtcars, tapply(mpg, cyl,mean))
#sapply(mtcars, cyl,mean)
#lapply(mtcars, mean)
#mean(mtcars$mpg,mtcars$cyl)
}
q4<-function(){
tapply(mtcars$hp, mtcars$cyl,mean)
#sapply(mtcars, cyl,mean)
#lapply(mtcars, mean)
#mean(mtcars$mpg,mtcars$cyl)
#abs(x[1]-x[3])
}
myapp9 <-function(){
x<- matrix(rnorm(200), 20,10)
#collaps dim 2, keep col, collaps rows
apply(x,2,mean)
}
myapp10 <-function()
{
x<- matrix(rnorm(200), 20,10)
colMeans(x)
colSum(x)
rowMeans(x)
rowSum(x)
}
myapp11 <-function()
{
x<- matrix(rnorm(200), 20,10)
apply(x,1,quantile, probs = c(0.25,0.75))
}
myapp12 <- function()
{
a <- array(rnorm(2*2*10), c(2,2,10))
apply(a,c(1,2),mean)
rowMeans(a, dims=2)
}
myapp13 <- function()
{
a <- array(rnorm(2*2*10), c(2,2,10))
apply(a,c(1,2),mean)
rowMeans(a, dims=2)
}
myapp14 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
tapply(a,f,mean, simplify=FALSE)
}
myapp15 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
tapply(a,f,range, simplify=FALSE)
}
myapp16 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
tapply(a,f,range, simplify=FALSE)
}
myapp17 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
split(a,f)
}
myapp18 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
lapply(split(a,f),mean)
}
myapp19 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
lapply(split(a,f),mean)
}
myapp20 <- function()
{
s <- split(airquality, airquality$Month)
lapply(s, function(x) colMeans(x[, c("Ozone", "Solar.R", "Wind")]))
}
myapp21 <- function()
{
s <- split(airquality, airquality$Month)
sapply(s, function(x) colMeans(x[, c("Ozone", "Solar.R", "Wind")]))
}
myapp22 <- function()
{
s <- split(airquality, airquality$Month)
sapply(s, function(x) colMeans(x[, c("Ozone", "Solar.R", "Wind")],na.rm=TRUE))
}
myapp23 <- function()
{
x <- rnorm(10)
f1<-gl(2,5)
f2 <- gl(5,2)
interaction(f1,f2)
str(split(x,list(f1,f2),drop=TRUE))
f1
f2
interaction(f1,f2)
}
myapp24 <- function()
{
list(rep(1,4),rep(2,3),rep(3,2),rep(4,1))
mapply(rep,1:4,4:1)
}
noise <- function(n,mean,sd)
{
rnorm(n,mean,sd)
}
myapp25 <- function()
{
noise(5,1,2)
mapply(noise,1:5,1:5,2)
list(noise(1,1,2),noise(2,2,2),noise(3,3,2),noise(4,5,2),noise(5,5,2))
}
pmsg <- function(x){
if (is.na(x))
print ("x is a missing value")
else if (x > 0){
print("x is greater than zero")
print(x)
}
else
print ("x is less than or equal zero")
invisible(x)
}
myapp26 <- function()
{
x <- matrix(1:12, 4)
colMins(x)
rowMins(x)
colRanges(x)
}
|
/lect3.R
|
no_license
|
kennethchung/HopkinsDataScience
|
R
| false | false | 3,702 |
r
|
myapp <- function(){
x<-list(a=1:5,b=rnorm(10))
lapply(x,mean)
}
myapp1 <- function(){
x<-list(a=1:5,b=rnorm(10),c=rnorm(10),d=rnorm(10))
lapply(x,mean)
}
myapp2 <- function(){
x<-1:5
lapply(x,runif)
}
myapp3 <- function(){
x<-1:4
lapply(x,runif,min=0,max=10)
}
myapp4 <- function(){
x<-list(a=matrix(1:4,2,2), matrix(1:6,3,2))
x
}
myapp5 <- function(){
lapply(myapp4(), function(elt) elt[,1])
}
myapp6 <- function(){
lapply(myapp4(), function(elt) elt[,1])
}
myapp7 <- function(){
x<-list(a=1:5,b=rnorm(10),c=rnorm(10),d=rnorm(10))
sapply(x,mean)
}
myapp8 <- function(){
x<-list(a=1:5,b=rnorm(10),c=rnorm(10),d=rnorm(10))
sapply(x,mean)
}
library(datasets)
data(iris)
data(mtcars)
q1<-function(){
mean( subset(iris,Species == "virginica")$Sepal.Length, na.rm=TRUE)
}
q11 <-function(){
with(iris,Sepal.Length[Sepal.Width== max(Sepal.Width[Species=="setosa"])])
#with(df, d[v== max(v[c=="foo"])])
}
q2<-function(){
apply(iris[,1:4],2,mean,na.rm=TRUE)
}
q3<-function(){
#tapply(mtcars$mpg, mtcars$cyl,mean)
with(mtcars, tapply(mpg, cyl,mean))
#sapply(mtcars, cyl,mean)
#lapply(mtcars, mean)
#mean(mtcars$mpg,mtcars$cyl)
}
q4<-function(){
tapply(mtcars$hp, mtcars$cyl,mean)
#sapply(mtcars, cyl,mean)
#lapply(mtcars, mean)
#mean(mtcars$mpg,mtcars$cyl)
#abs(x[1]-x[3])
}
myapp9 <-function(){
x<- matrix(rnorm(200), 20,10)
#collaps dim 2, keep col, collaps rows
apply(x,2,mean)
}
myapp10 <-function()
{
x<- matrix(rnorm(200), 20,10)
colMeans(x)
colSum(x)
rowMeans(x)
rowSum(x)
}
myapp11 <-function()
{
x<- matrix(rnorm(200), 20,10)
apply(x,1,quantile, probs = c(0.25,0.75))
}
myapp12 <- function()
{
a <- array(rnorm(2*2*10), c(2,2,10))
apply(a,c(1,2),mean)
rowMeans(a, dims=2)
}
myapp13 <- function()
{
a <- array(rnorm(2*2*10), c(2,2,10))
apply(a,c(1,2),mean)
rowMeans(a, dims=2)
}
myapp14 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
tapply(a,f,mean, simplify=FALSE)
}
myapp15 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
tapply(a,f,range, simplify=FALSE)
}
myapp16 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
tapply(a,f,range, simplify=FALSE)
}
myapp17 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
split(a,f)
}
myapp18 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
lapply(split(a,f),mean)
}
myapp19 <- function()
{
a <- c(rnorm(10),runif(10),rnorm(10,1))
f<-gl(3,10)
lapply(split(a,f),mean)
}
myapp20 <- function()
{
s <- split(airquality, airquality$Month)
lapply(s, function(x) colMeans(x[, c("Ozone", "Solar.R", "Wind")]))
}
myapp21 <- function()
{
s <- split(airquality, airquality$Month)
sapply(s, function(x) colMeans(x[, c("Ozone", "Solar.R", "Wind")]))
}
myapp22 <- function()
{
s <- split(airquality, airquality$Month)
sapply(s, function(x) colMeans(x[, c("Ozone", "Solar.R", "Wind")],na.rm=TRUE))
}
myapp23 <- function()
{
x <- rnorm(10)
f1<-gl(2,5)
f2 <- gl(5,2)
interaction(f1,f2)
str(split(x,list(f1,f2),drop=TRUE))
f1
f2
interaction(f1,f2)
}
myapp24 <- function()
{
list(rep(1,4),rep(2,3),rep(3,2),rep(4,1))
mapply(rep,1:4,4:1)
}
noise <- function(n,mean,sd)
{
rnorm(n,mean,sd)
}
myapp25 <- function()
{
noise(5,1,2)
mapply(noise,1:5,1:5,2)
list(noise(1,1,2),noise(2,2,2),noise(3,3,2),noise(4,5,2),noise(5,5,2))
}
pmsg <- function(x){
if (is.na(x))
print ("x is a missing value")
else if (x > 0){
print("x is greater than zero")
print(x)
}
else
print ("x is less than or equal zero")
invisible(x)
}
myapp26 <- function()
{
x <- matrix(1:12, 4)
colMins(x)
rowMins(x)
colRanges(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s1_proc_inputs.R
\name{as.dets}
\alias{as.dets}
\title{Coerce a \code{data.frame} to class \code{dets}}
\usage{
as.dets(x, crs = 4326)
}
\arguments{
\item{x}{A \code{data.frame} to coerce to a \code{dets} object.}
\item{crs}{Coordinate Reference System to use for the detections. Passed
to \code{\link[sf:st_crs]{sf::st_crs}()} to set CRS for sf object.
Defaults to \code{4326}, longitude/latitude on the WGS84 spheroid.}
}
\description{
Coerces a \code{data.frame} to a \code{dets} object
}
\examples{
#Load a CSV of already processed detections
proc.det.csv <- read.csv(
system.file("extdata", "processed_detections.csv",
package = "ADePTR"))
#Coerce to dets
proc.det2 <- as.dets(proc.det.csv)
}
|
/man/as.dets.Rd
|
no_license
|
bsmity13/ADePTR
|
R
| false | true | 827 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s1_proc_inputs.R
\name{as.dets}
\alias{as.dets}
\title{Coerce a \code{data.frame} to class \code{dets}}
\usage{
as.dets(x, crs = 4326)
}
\arguments{
\item{x}{A \code{data.frame} to coerce to a \code{dets} object.}
\item{crs}{Coordinate Reference System to use for the detections. Passed
to \code{\link[sf:st_crs]{sf::st_crs}()} to set CRS for sf object.
Defaults to \code{4326}, longitude/latitude on the WGS84 spheroid.}
}
\description{
Coerces a \code{data.frame} to a \code{dets} object
}
\examples{
#Load a CSV of already processed detections
proc.det.csv <- read.csv(
system.file("extdata", "processed_detections.csv",
package = "ADePTR"))
#Coerce to dets
proc.det2 <- as.dets(proc.det.csv)
}
|
### Load data objects, rename and set names (if necessary)
# Load GenomicRanges library
library(GenomicRanges)
## GENES
load("summarized_overlaps_hg19_genes_abyzov_ipsc_and_parental.R")
# Rename object
genes <- overlaps
## EXONS
# Load exon data object
load("summarized_overlaps_hg19_exons_abyzov_ipsc_and_parental.R")
# Rename object
exons <- overlaps
# Set names attribute
names(rowData(exons)) <- rowData(exons)$name
###
### Define sample groups
parental <- c(1,5,9,10,14,18,22)
iPSC <- c(2:4,6:8,11:13,15:17,19:21,23:25)
###
### Prepare count tables
## Extract counts
counts_genes_parental <- assays(genes)$counts[,parental]
counts_genes_iPSC <- assays(genes)$counts[,iPSC]
counts_exons_parental <- assays(exons)$counts[,parental]
counts_exons_iPSC <- assays(exons)$counts[,iPSC]
## Shorten column names
colnames(counts_genes_parental) <- sub("segemehl_", "", basename(colnames(counts_genes_parental)))
colnames(counts_genes_iPSC) <- sub("segemehl_", "", basename(colnames(counts_genes_iPSC)))
colnames(counts_exons_parental) <- sub("segemehl_", "", basename(colnames(counts_exons_parental)))
colnames(counts_exons_iPSC) <- sub("segemehl_", "", basename(colnames(counts_exons_iPSC)))
###
# Remove unused objects
rm(overlaps, exons, genes, parental, iPSC)
## Save count tables
save(counts_genes_parental, counts_genes_iPSC, counts_exons_parental, counts_exons_iPSC, file="count_tables_abyzov.R")
write.table(counts_genes_parental, file = "counts_genes_parental", quote=FALSE, sep = "\t")
write.table(counts_genes_iPSC, file = "counts_genes_iPSC", quote=FALSE, sep = "\t")
write.table(counts_exons_parental, file = "counts_exons_parental", quote=FALSE, sep = "\t")
write.table(counts_exons_iPSC, file = "counts_exons_iPSC", quote=FALSE, sep = "\t")
## Load count tables
load("count_tables_abyzov.R")
counts_genes_parental <- as.matrix(read.table("counts_genes_parental"))
counts_genes_iPSC <- as.matrix(read.table("counts_genes_iPSC"))
counts_exons_parental <- as.matrix(read.table("counts_exons_parental"))
counts_exons_iPSC <- as.matrix(read.table("counts_exons_iPSC"))
### Prepare DGE list objects and save
# Load edgeR library
library(edgeR)
## Genes
counts_genes <- cbind(counts_genes_parental, counts_genes_iPSC)
genes_dge_l <- DGEList(counts=counts_genes, group=c(rep("parental",7),rep("iPSC",18)))
save(genes_dge_l, file="genes_dge_list_abyzov.R")
## Exons
counts_exons <- cbind(counts_exons_parental, counts_exons_iPSC)
exons_dge_l <- DGEList(counts=counts_exons, group=c(rep("parental",7),rep("iPSC",18)))
save(exons_dge_l, file="exons_dge_list_abyzov.R")
###
### STARTING POINT: Load library and objects
library(edgeR)
load("genes_dge_list_abyzov.R")
load("exons_dge_list_abyzov.R")
###
### Differential gene expression analysis (edgeR)
## GENES
genes_norm_fact <- calcNormFactors(genes_dge_l)
# Calculate common dispersion and
genes_comm_disp <- estimateCommonDisp(genes_norm_fact)
# Calculate tagwise dispersion
genes_tag_wise_disp <- estimateTagwiseDisp(genes_comm_disp)
# Exact negative binomial tagwise tests
genes_exact_test <- exactTest(genes_tag_wise_disp)
# Calculate differentially expressed
summ_de_genes <- summary(decideTestsDGE(genes_exact_test))
# Subset top tags (FDR < 0.05)
tags_de_genes <- topTags(genes_exact_test, n=sum(summ_de_genes[c(1,3)]))
# Get count table normalized to counts per million
cpm_de_genes <- cpm(genes_tag_wise_disp)[rownames(tags_de_genes),]
## Write tables
write.table(genes_dge_l$counts, file="counts_raw_genes.tsv", quote=FALSE, sep="\t")
write.table(genes_comm_disp$pseudo.counts, file="counts_norm_genes.tsv", quote=FALSE, sep="\t")
write.table(cpm_de_genes, file="counts_norm_cpm_genes.tsv", quote=FALSE, sep="\t")
write.table(genes_exact_test$table, file="diff_expr_all_genes.tsv", quote=FALSE, sep="\t")
write.table(tags_de_genes$table, file="diff_exp_fdr_cutoff_genes.tsv", quote=FALSE, sep="\t")
## Tagwise dispersion vs log2(cpm)
pdf(file="BCV_plot_genes.pdf", width = 6, height = 6)
plotBCV(genes_tag_wise_disp, cex=0.4)
dev.off()
## Tagwise log2(FC) vs log2(cpm) (~MA plot)
pdf(file="smear_plot_genes.pdf", width = 6, height = 6)
detags <- rownames(genes_tag_wise_disp)[as.logical(decideTestsDGE(genes_exact_test))]
plotSmear(genes_exact_test, de.tags=detags)
abline(h = c(-1, 1), col = "blue")
dev.off()
## EXONS
exons_norm_fact <- calcNormFactors(exons_dge_l)
# Calculate common dispersion and
exons_comm_disp <- estimateCommonDisp(exons_norm_fact)
# Calculate tagwise dispersion
exons_tag_wise_disp <- estimateTagwiseDisp(exons_comm_disp)
# Exact negative binomial tagwise tests
exons_exact_test <- exactTest(exons_tag_wise_disp)
# Calculate differentially expressed
summ_de_exons <- summary(decideTestsDGE(exons_exact_test))
# Subset top tags (FDR < 0.05)
tags_de_exons <- topTags(exons_exact_test, n=sum(summ_de_exons[c(1,3)]))
# Get count table normalized to counts per million
cpm_de_exons <- cpm(exons_tag_wise_disp)[rownames(tags_de_exons),]
## Sample comparison
pdf(file="MDS_plot_exons.pdf", width = 6, height = 6)
plotMDS(exons_comm_disp)
dev.off()
## Write tables
write.table(exons_dge_l$counts, file="counts_raw_exons.tsv", quote=FALSE, sep="\t")
write.table(exons_comm_disp$pseudo.counts, file="counts_norm_exons.tsv", quote=FALSE, sep="\t")
write.table(cpm_de_exons, file="counts_norm_cpm_exons.tsv", quote=FALSE, sep="\t")
write.table(exons_exact_test$table, file="diff_expr_all_exons.tsv", quote=FALSE, sep="\t")
write.table(tags_de_exons$table, file="diff_exp_fdr_cutoff_exons.tsv", quote=FALSE, sep="\t")
## Tagwise dispersion vs log2(cpm)
pdf(file="BCV_plot_exons.pdf", width = 6, height = 6)
plotBCV(exons_tag_wise_disp, cex=0.4)
dev.off()
## Tagwise log2(FC) vs log2(cpm) (~MA plot)
pdf(file="smear_plot_exons.pdf", width = 6, height = 6)
detags <- rownames(exons_tag_wise_disp)[as.logical(decideTestsDGE(exons_exact_test))]
plotSmear(exibs_exact_test, de.tags=detags)
abline(h = c(-1, 1), col = "blue")
dev.off()
###
#### LEFTOVERS ####
## Sample comparison
pdf(file="MDS_plot_genes.pdf", width = 6, height = 6)
plotMDS(genes_comm_disp)
dev.off()
mean_counts_genes_parental <- rowMeans(counts_genes_parental)
mean_counts_genes_iPSC <- rowMeans(counts_genes_iPSC)
mean_counts_exons_parental <- rowMeans(counts_exons_parental)
mean_counts_exons_iPSC <- rowMeans(counts_exons_iPSC)
sd_counts_genes_parental <- apply(counts_genes_parental, 1, sd)
sd_counts_genes_iPSC <- apply(counts_genes_iPSC, 1, sd)
sd_counts_exons_parental <- apply(counts_exons_parental, 1, sd)
sd_counts_exons_iPSC <- apply(counts_exons_iPSC, 1, sd)
####
|
/scripts/UNFINISHED_count_tables_AS.R
|
permissive
|
uniqueg/scripts
|
R
| false | false | 6,582 |
r
|
### Load data objects, rename and set names (if necessary)
# Load GenomicRanges library
library(GenomicRanges)
## GENES
load("summarized_overlaps_hg19_genes_abyzov_ipsc_and_parental.R")
# Rename object
genes <- overlaps
## EXONS
# Load exon data object
load("summarized_overlaps_hg19_exons_abyzov_ipsc_and_parental.R")
# Rename object
exons <- overlaps
# Set names attribute
names(rowData(exons)) <- rowData(exons)$name
###
### Define sample groups
parental <- c(1,5,9,10,14,18,22)
iPSC <- c(2:4,6:8,11:13,15:17,19:21,23:25)
###
### Prepare count tables
## Extract counts
counts_genes_parental <- assays(genes)$counts[,parental]
counts_genes_iPSC <- assays(genes)$counts[,iPSC]
counts_exons_parental <- assays(exons)$counts[,parental]
counts_exons_iPSC <- assays(exons)$counts[,iPSC]
## Shorten column names
colnames(counts_genes_parental) <- sub("segemehl_", "", basename(colnames(counts_genes_parental)))
colnames(counts_genes_iPSC) <- sub("segemehl_", "", basename(colnames(counts_genes_iPSC)))
colnames(counts_exons_parental) <- sub("segemehl_", "", basename(colnames(counts_exons_parental)))
colnames(counts_exons_iPSC) <- sub("segemehl_", "", basename(colnames(counts_exons_iPSC)))
###
# Remove unused objects
rm(overlaps, exons, genes, parental, iPSC)
## Save count tables
save(counts_genes_parental, counts_genes_iPSC, counts_exons_parental, counts_exons_iPSC, file="count_tables_abyzov.R")
write.table(counts_genes_parental, file = "counts_genes_parental", quote=FALSE, sep = "\t")
write.table(counts_genes_iPSC, file = "counts_genes_iPSC", quote=FALSE, sep = "\t")
write.table(counts_exons_parental, file = "counts_exons_parental", quote=FALSE, sep = "\t")
write.table(counts_exons_iPSC, file = "counts_exons_iPSC", quote=FALSE, sep = "\t")
## Load count tables
load("count_tables_abyzov.R")
counts_genes_parental <- as.matrix(read.table("counts_genes_parental"))
counts_genes_iPSC <- as.matrix(read.table("counts_genes_iPSC"))
counts_exons_parental <- as.matrix(read.table("counts_exons_parental"))
counts_exons_iPSC <- as.matrix(read.table("counts_exons_iPSC"))
### Prepare DGE list objects and save
# Load edgeR library
library(edgeR)
## Genes
counts_genes <- cbind(counts_genes_parental, counts_genes_iPSC)
genes_dge_l <- DGEList(counts=counts_genes, group=c(rep("parental",7),rep("iPSC",18)))
save(genes_dge_l, file="genes_dge_list_abyzov.R")
## Exons
counts_exons <- cbind(counts_exons_parental, counts_exons_iPSC)
exons_dge_l <- DGEList(counts=counts_exons, group=c(rep("parental",7),rep("iPSC",18)))
save(exons_dge_l, file="exons_dge_list_abyzov.R")
###
### STARTING POINT: Load library and objects
library(edgeR)
load("genes_dge_list_abyzov.R")
load("exons_dge_list_abyzov.R")
###
### Differential gene expression analysis (edgeR)
## GENES
genes_norm_fact <- calcNormFactors(genes_dge_l)
# Calculate common dispersion and
genes_comm_disp <- estimateCommonDisp(genes_norm_fact)
# Calculate tagwise dispersion
genes_tag_wise_disp <- estimateTagwiseDisp(genes_comm_disp)
# Exact negative binomial tagwise tests
genes_exact_test <- exactTest(genes_tag_wise_disp)
# Calculate differentially expressed
summ_de_genes <- summary(decideTestsDGE(genes_exact_test))
# Subset top tags (FDR < 0.05)
tags_de_genes <- topTags(genes_exact_test, n=sum(summ_de_genes[c(1,3)]))
# Get count table normalized to counts per million
cpm_de_genes <- cpm(genes_tag_wise_disp)[rownames(tags_de_genes),]
## Write tables
write.table(genes_dge_l$counts, file="counts_raw_genes.tsv", quote=FALSE, sep="\t")
write.table(genes_comm_disp$pseudo.counts, file="counts_norm_genes.tsv", quote=FALSE, sep="\t")
write.table(cpm_de_genes, file="counts_norm_cpm_genes.tsv", quote=FALSE, sep="\t")
write.table(genes_exact_test$table, file="diff_expr_all_genes.tsv", quote=FALSE, sep="\t")
write.table(tags_de_genes$table, file="diff_exp_fdr_cutoff_genes.tsv", quote=FALSE, sep="\t")
## Tagwise dispersion vs log2(cpm)
pdf(file="BCV_plot_genes.pdf", width = 6, height = 6)
plotBCV(genes_tag_wise_disp, cex=0.4)
dev.off()
## Tagwise log2(FC) vs log2(cpm) (~MA plot)
pdf(file="smear_plot_genes.pdf", width = 6, height = 6)
detags <- rownames(genes_tag_wise_disp)[as.logical(decideTestsDGE(genes_exact_test))]
plotSmear(genes_exact_test, de.tags=detags)
abline(h = c(-1, 1), col = "blue")
dev.off()
## EXONS
exons_norm_fact <- calcNormFactors(exons_dge_l)
# Calculate common dispersion and
exons_comm_disp <- estimateCommonDisp(exons_norm_fact)
# Calculate tagwise dispersion
exons_tag_wise_disp <- estimateTagwiseDisp(exons_comm_disp)
# Exact negative binomial tagwise tests
exons_exact_test <- exactTest(exons_tag_wise_disp)
# Calculate differentially expressed
summ_de_exons <- summary(decideTestsDGE(exons_exact_test))
# Subset top tags (FDR < 0.05)
tags_de_exons <- topTags(exons_exact_test, n=sum(summ_de_exons[c(1,3)]))
# Get count table normalized to counts per million
cpm_de_exons <- cpm(exons_tag_wise_disp)[rownames(tags_de_exons),]
## Sample comparison
pdf(file="MDS_plot_exons.pdf", width = 6, height = 6)
plotMDS(exons_comm_disp)
dev.off()
## Write tables
write.table(exons_dge_l$counts, file="counts_raw_exons.tsv", quote=FALSE, sep="\t")
write.table(exons_comm_disp$pseudo.counts, file="counts_norm_exons.tsv", quote=FALSE, sep="\t")
write.table(cpm_de_exons, file="counts_norm_cpm_exons.tsv", quote=FALSE, sep="\t")
write.table(exons_exact_test$table, file="diff_expr_all_exons.tsv", quote=FALSE, sep="\t")
write.table(tags_de_exons$table, file="diff_exp_fdr_cutoff_exons.tsv", quote=FALSE, sep="\t")
## Tagwise dispersion vs log2(cpm)
pdf(file="BCV_plot_exons.pdf", width = 6, height = 6)
plotBCV(exons_tag_wise_disp, cex=0.4)
dev.off()
## Tagwise log2(FC) vs log2(cpm) (~MA plot)
pdf(file="smear_plot_exons.pdf", width = 6, height = 6)
detags <- rownames(exons_tag_wise_disp)[as.logical(decideTestsDGE(exons_exact_test))]
plotSmear(exibs_exact_test, de.tags=detags)
abline(h = c(-1, 1), col = "blue")
dev.off()
###
#### LEFTOVERS ####
## Sample comparison
pdf(file="MDS_plot_genes.pdf", width = 6, height = 6)
plotMDS(genes_comm_disp)
dev.off()
mean_counts_genes_parental <- rowMeans(counts_genes_parental)
mean_counts_genes_iPSC <- rowMeans(counts_genes_iPSC)
mean_counts_exons_parental <- rowMeans(counts_exons_parental)
mean_counts_exons_iPSC <- rowMeans(counts_exons_iPSC)
sd_counts_genes_parental <- apply(counts_genes_parental, 1, sd)
sd_counts_genes_iPSC <- apply(counts_genes_iPSC, 1, sd)
sd_counts_exons_parental <- apply(counts_exons_parental, 1, sd)
sd_counts_exons_iPSC <- apply(counts_exons_iPSC, 1, sd)
####
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{bathy.arctic}
\alias{bathy.arctic}
\title{Arctic Bathymetry
A Matrix containing elevation data taken from NOAA via marmap.}
\format{
A matrix containing 2160 x 1080 values of elevation. Depth is given in negative. Resolution is 10 arc minutes (1/6th degree).
}
\source{
NOAA Bathymetric Database
}
\usage{
bathy.arctic
}
\description{
Arctic Bathymetry
A Matrix containing elevation data taken from NOAA via marmap.
}
\keyword{datasets}
|
/man/bathy.arctic.Rd
|
no_license
|
tbrycekelly/TheSource
|
R
| false | true | 543 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{bathy.arctic}
\alias{bathy.arctic}
\title{Arctic Bathymetry
A Matrix containing elevation data taken from NOAA via marmap.}
\format{
A matrix containing 2160 x 1080 values of elevation. Depth is given in negative. Resolution is 10 arc minutes (1/6th degree).
}
\source{
NOAA Bathymetric Database
}
\usage{
bathy.arctic
}
\description{
Arctic Bathymetry
A Matrix containing elevation data taken from NOAA via marmap.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covariance.R
\name{grl}
\alias{grl}
\title{Approximating effective-counts as proposed by Greenland & Longnecker}
\usage{
grl(y, v, cases, n, type, data, tol = 1e-05)
}
\arguments{
\item{y}{a vector, defining the (reported) log relative risks.}
\item{v}{a vector, defining the variances of the reported log relative risks.}
\item{cases}{a vector, defining the number of cases for each exposure level.}
\item{n}{a vector, defining the total number of subjects for each exposure level. For incidence-rate data \code{n} indicates the amount of person-time within
each exposure level.}
\item{type}{a vector (or a character string), specifying the design of the study. Options are
\code{cc}, \code{ir}, and \code{ci}, for case-control, incidence-rate, and cumulative incidence data, respectively.}
\item{data}{an optional data frame (or object coercible by \code{\link{as.data.frame}} to a data frame) containing the variables in the previous arguments.}
\item{tol}{define the tolerance.}
}
\value{
The results are returned structured in a matrix
\tabular{ll}{
\code{A} \tab approximated number of effective cases. \cr
\code{N} \tab approximated total number of effective subjects. \cr
}
}
\description{
Reconstructs the set of pseudo-numbers (or 'effective' numbers) of cases and non-cases consistent
with the input data (log relative risks). The method was first proposed in 1992 by Greenland and Longnecker.
}
\details{
The function reconstructs the effective counts corresponding to the multivariable adjusted log relative risks as well as their standard errors.
A unique solution is guaranteed by keeping the margins of the table of pseudo-counts equal to the margins of the crude or unadjusted data
(Greenland and Longnecker 1992). See the referenced article for a complete description of the algorithm implementation.
}
\examples{
## Loading data
data("alcohol_cvd")
## Obtaining pseudo-counts for the first study (id = 1)
grl(y = logrr, v = I(se^2), cases = cases, n = n, type = type,
data = subset(alcohol_cvd, id == 1))
## Obtaining pseudo-counts for all study
by(alcohol_cvd, alcohol_cvd$id, function(x)
grl(y = logrr, v = I(se^2), cases = cases, n = n, type = type, data = x))
## Restructuring the previous results in a matrix
do.call("rbind", by(alcohol_cvd, alcohol_cvd$id, function(x)
grl(y = logrr, v = I(se^2), cases = cases, n = n, type = type, data = x)))
}
\references{
Greenland, S., Longnecker, M. P. (1992). Methods for trend estimation from summarized dose-response data, with applications to meta-analysis. American journal of epidemiology, 135(11), 1301-1309.
Orsini, N., Li, R., Wolk, A., Khudyakov, P., Spiegelman, D. (2012). Meta-analysis for linear and nonlinear dose-response relations: examples, an evaluation of approximations, and software.
American journal of epidemiology, 175(1), 66-73.
}
\seealso{
\code{\link{covar.logrr}}, \code{\link{hamling}}
}
\author{
Alessio Crippa, \email{alessio.crippa@ki.se}
}
|
/man/grl.Rd
|
no_license
|
alecri/dosresmeta
|
R
| false | true | 3,042 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covariance.R
\name{grl}
\alias{grl}
\title{Approximating effective-counts as proposed by Greenland & Longnecker}
\usage{
grl(y, v, cases, n, type, data, tol = 1e-05)
}
\arguments{
\item{y}{a vector, defining the (reported) log relative risks.}
\item{v}{a vector, defining the variances of the reported log relative risks.}
\item{cases}{a vector, defining the number of cases for each exposure level.}
\item{n}{a vector, defining the total number of subjects for each exposure level. For incidence-rate data \code{n} indicates the amount of person-time within
each exposure level.}
\item{type}{a vector (or a character string), specifying the design of the study. Options are
\code{cc}, \code{ir}, and \code{ci}, for case-control, incidence-rate, and cumulative incidence data, respectively.}
\item{data}{an optional data frame (or object coercible by \code{\link{as.data.frame}} to a data frame) containing the variables in the previous arguments.}
\item{tol}{define the tolerance.}
}
\value{
The results are returned structured in a matrix
\tabular{ll}{
\code{A} \tab approximated number of effective cases. \cr
\code{N} \tab approximated total number of effective subjects. \cr
}
}
\description{
Reconstructs the set of pseudo-numbers (or 'effective' numbers) of cases and non-cases consistent
with the input data (log relative risks). The method was first proposed in 1992 by Greenland and Longnecker.
}
\details{
The function reconstructs the effective counts corresponding to the multivariable adjusted log relative risks as well as their standard errors.
A unique solution is guaranteed by keeping the margins of the table of pseudo-counts equal to the margins of the crude or unadjusted data
(Greenland and Longnecker 1992). See the referenced article for a complete description of the algorithm implementation.
}
\examples{
## Loading data
data("alcohol_cvd")
## Obtaining pseudo-counts for the first study (id = 1)
grl(y = logrr, v = I(se^2), cases = cases, n = n, type = type,
data = subset(alcohol_cvd, id == 1))
## Obtaining pseudo-counts for all study
by(alcohol_cvd, alcohol_cvd$id, function(x)
grl(y = logrr, v = I(se^2), cases = cases, n = n, type = type, data = x))
## Restructuring the previous results in a matrix
do.call("rbind", by(alcohol_cvd, alcohol_cvd$id, function(x)
grl(y = logrr, v = I(se^2), cases = cases, n = n, type = type, data = x)))
}
\references{
Greenland, S., Longnecker, M. P. (1992). Methods for trend estimation from summarized dose-response data, with applications to meta-analysis. American journal of epidemiology, 135(11), 1301-1309.
Orsini, N., Li, R., Wolk, A., Khudyakov, P., Spiegelman, D. (2012). Meta-analysis for linear and nonlinear dose-response relations: examples, an evaluation of approximations, and software.
American journal of epidemiology, 175(1), 66-73.
}
\seealso{
\code{\link{covar.logrr}}, \code{\link{hamling}}
}
\author{
Alessio Crippa, \email{alessio.crippa@ki.se}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/audit_output_spec.R
\name{is_audit_output_spec}
\alias{is_audit_output_spec}
\title{Test if the object is an audit_output_spec}
\usage{
is_audit_output_spec(x)
}
\arguments{
\item{x}{An object}
}
\value{
`TRUE` if the object inherits from the `audit_output_spec`
class.
}
\description{
This function returns `TRUE` for audit_output_specs
}
|
/man/is_audit_output_spec.Rd
|
no_license
|
md0u80c9/SSNAPStats
|
R
| false | true | 418 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/audit_output_spec.R
\name{is_audit_output_spec}
\alias{is_audit_output_spec}
\title{Test if the object is an audit_output_spec}
\usage{
is_audit_output_spec(x)
}
\arguments{
\item{x}{An object}
}
\value{
`TRUE` if the object inherits from the `audit_output_spec`
class.
}
\description{
This function returns `TRUE` for audit_output_specs
}
|
##############################################################################################################
################################ NEW DATASETS ################################################################
###############################################################################################################
load("../../koen2010/koen2010_orig.eqn-Koen_2010_pure.csv.RData")
results1_2 <- results
load("../../koen2010/koen2010_qrest.eqn-Koen_2010_pure.csv.RData")
results2_2 <- results
load("../../koen2010/koen2010_rrest.eqn-Koen_2010_pure.csv.RData")
results3_2 <- results
load("../../koen2011/koen2011_orig.eqn-Koen_2011.csv.RData")
results1_3 <- results
load("../../koen2011/koen2011_qrest.eqn-Koen_2011.csv.RData")
results2_3 <- results
load("../../koen2011/koen2011_rrest.eqn-Koen_2011.csv.RData")
results3_3 <- results
load("../../koen2013full/koen2013f_orig.eqn-Koen-2013_full.csv.RData")
results1_4 <- results
load("../../koen2013full/koen2013f_qrest.eqn-Koen-2013_full.csv.RData")
results2_4 <- results
load("../../koen2013full/koen2013f_rrest.eqn-Koen-2013_full.csv.RData")
results3_4 <- results
load("../../pratte2010/pratte_orig.eqn-Pratte_2010.csv.RData")
results1_5 <- results
load("../../pratte2010/pratte_qrest.eqn-Pratte_2010.csv.RData")
results2_5 <- results
load("../../pratte2010/pratte_rrest.eqn-Pratte_2010.csv.RData")
results3_5 <- results
load("../../smith2004/smith_orig.eqn-Smith_2004.csv.RData")
results1_6 <- results
load("../../smith2004/smith_qrest.eqn-Smith_2004.csv.RData")
results2_6 <- results
load("../../smith2004/smith_rrest.eqn-Smith_2004.csv.RData")
results3_6 <- results
load("../../jang2009/jang_orig.eqn-Jang_2009.csv.RData")
results1_7 <- results
load("../../jang2009/jang_qrest.eqn-Jang_2009.csv.RData")
results2_7 <- results
load("../../jang2009/jang_rrest.eqn-Jang_2009.csv.RData")
results3_7 <- results
gof_a <- bind_rows(
unnest(results1_2, gof),
unnest(results2_2, gof),
unnest(results3_2, gof),
unnest(results1_3, gof),
unnest(results2_3, gof),
unnest(results3_3, gof),
unnest(results1_4, gof),
unnest(results2_4, gof),
unnest(results3_4, gof),
unnest(results1_5, gof),
unnest(results2_5, gof),
unnest(results3_5, gof),
unnest(results1_6, gof),
unnest(results2_6, gof),
unnest(results3_6, gof),
unnest(results1_7, gof),
unnest(results2_7, gof),
unnest(results3_7, gof),
)
gof_a$pooling <- factor(gof_a$pooling, levels = c("no", "complete", "partial"),
labels = c("No", "Comp", "PP"))
gof_a$package <- factor(gof_a$package, levels = c("MPTinR", "TreeBUGS"),
labels = c("MR", "TB"))
gof_a$method <- factor(gof_a$method, levels = c("PB/MLE", "asymptotic", "simple",
"trait", "trait_uncorrelated","beta"),
labels = c("PB", "asy", "ss", "trait", "trait_u","beta"))
gof_a$inter <- with(gof_a, interaction(method, pooling, package, drop = TRUE, sep = " "))
levels(gof_a$inter) <- c("No.PB", "No.asy", "Comp.asy", "No.Bayes",
"Comp.Bayes", "Trait.PP", "Trait_u.PP","Beta.PP")
gof_a$focus <- factor(gof_a$focus, levels = c('cov', 'mean'),
labels = c('Covariance', 'Mean'))
gof_a$model <- ifelse(gof_a$model=="jang_orig.eqn"|gof_a$model=="koen2010_orig.eqn"|gof_a$model=="koen2011_orig.eqn"|gof_a$model=="koen2013f_orig.eqn"|gof_a$model=="pratte_orig.eqn"|gof_a$model=="smith_orig.eqn",
'Q & R Restr.',
ifelse(gof_a$model=="jang_qrest.eqn"|gof_a$model=="koen2010_qrest.eqn"|gof_a$model=="koen2011_qrest.eqn"|gof_a$model=="koen2013f_qrest.eqn"|gof_a$model=="pratte_qrest.eqn"|gof_a$model=="smith_qrest.eqn",
'Q Restricted',
ifelse(gof_a$model=="jang_rrest.eqn"|gof_a$model=="koen2010_rrest.eqn"|gof_a$model=="koen2011_rrest.eqn"|gof_a$model=="koen2013f_rrest.eqn"|gof_a$model=="pratte_rrest.eqn"|gof_a$model=="smith_rrest.eqn",
'R Restricted',
gof_a$model)))
gof_a$dataset <- factor(gof_a$dataset, levels = c("Jang_2009.csv", "Koen_2010_pure.csv", "Koen_2011.csv", "Koen-2013_full.csv", "Pratte_2010.csv", "Smith_2004.csv"),
labels = c("Jang et al. (2009)", "Koen & Yonelinas (2010)", "Koen & Yonelinas (2011)","Koen et al. (2013, F)","Pratte et al. (2010)","Smith & Duncan (2004, Exp. 2)"))
gof_all <- filter(gof_a, focus %in% c("Mean"))
ggplot(gof_all, aes(y = p,
x = inter, col=dataset)) +
geom_point(size=5) +
geom_hline(yintercept = .05, lty = 2)+
theme_bw() + coord_flip() +
facet_wrap(~model,ncol = 3) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0", "0.5", "1")) +
labs(x='Analysis approach',y= expression(italic(p)),
color='Dataset', title='Goodness of fit')+
theme(text=element_text(size = 22))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))
params <- bind_rows(
unnest(results1_2, est_group),
unnest(results2_2, est_group),
unnest(results3_2, est_group),
unnest(results1_3, est_group),
unnest(results2_3, est_group),
unnest(results3_3, est_group),
unnest(results1_4, est_group),
unnest(results2_4, est_group),
unnest(results3_4, est_group),
unnest(results1_5, est_group),
unnest(results2_5, est_group),
unnest(results3_5, est_group),
unnest(results1_6, est_group),
unnest(results2_6, est_group),
unnest(results3_6, est_group),
unnest(results1_7, est_group),
unnest(results2_7, est_group),
unnest(results3_7, est_group),
)
params$pooling <- factor(params$pooling, levels = c("no", "complete", "partial"),
labels = c("No", "Comp", "PP"))
params$package <- factor(params$package, levels = c("MPTinR", "TreeBUGS"),
labels = c("MR", "TB"))
params$method <- factor(params$method, levels = c("PB/MLE", "asymptotic", "simple",
"trait", "trait_uncorrelated","beta"),
labels = c("PB", "asy", "ss", "trait", "trait_u","beta"))
params$inter <- with(params, interaction(method, pooling, package, drop = TRUE, sep = " "))
levels(params$inter) <- c("No.PB", "No.asy", "Comp.asy", "No.Bayes",
"Comp.Bayes", "Trait.PP", "Trait_u.PP","Beta.PP")
params$model <- ifelse(params$model=="jang_orig.eqn"|params$model=="koen2010_orig.eqn"|params$model=="koen2011_orig.eqn"|params$model=="koen2013f_orig.eqn"|params$model=="pratte_orig.eqn"|params$model=="smith_orig.eqn",
'Q & R Restr.',
ifelse(params$model=="jang_qrest.eqn"|params$model=="koen2010_qrest.eqn"|params$model=="koen2011_qrest.eqn"|params$model=="koen2013f_qrest.eqn"|params$model=="pratte_qrest.eqn"|params$model=="smith_qrest.eqn",
'Q Restricted',
ifelse(params$model=="jang_rrest.eqn"|params$model=="koen2010_rrest.eqn"|params$model=="koen2011_rrest.eqn"|params$model=="koen2013f_rrest.eqn"|params$model=="pratte_rrest.eqn"|params$model=="smith_rrest.eqn",
'R Restricted',
params$model)))
params$dataset <- factor(params$dataset, levels = c("Jang_2009.csv", "Koen_2010_pure.csv", "Koen_2011.csv", "Koen-2013_full.csv", "Pratte_2010.csv", "Smith_2004.csv"),
labels = c("Jang et al. (2009)", "Koen & Yonelinas (2010)", "Koen & Yonelinas (2011)","Koen et al. (2013, F)","Pratte et al. (2010)","Smith & Duncan (2004, Exp. 2)"))
Core_all <- filter(params, parameter %in% c("Dn","Do","g") & model %in% c('R Restricted'))
ggplot(Core_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='Core Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Core_all <- filter(params, parameter %in% c("Dn","Do","g") & model %in% c('R Restricted'))
ggplot(Core_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='Core Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Q_all <- filter(params, parameter %in% c("q_1", "q_2", "q_5", "q_6") & model %in% c('R Restricted'))
ggplot(Q_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='Q Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Q_all <- filter(params, parameter %in% c("q_1", "q_2", "q_5", "q_6") & model %in% c('R Restricted'))
ggplot(Q_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='Q Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 20))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
R_all <- filter(params, parameter %in% c("r_1", "r_2", "r_6") & model %in% c('R Restricted'))
ggplot(R_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='R Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
R_all <- filter(params, parameter %in% c("r_1", "r_2", "r_6") & model %in% c('R Restricted'))
ggplot(R_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='R Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 20))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
####################################################################################################################
######################### ALL DATASETS #############################################################################
####################################################################################################################
#####################################################################################################################
load("../../koen2010/koen2010_orig.eqn-Koen_2010_pure.csv.RData")
results1_2 <- results
load("../../koen2010/koen2010_qrest.eqn-Koen_2010_pure.csv.RData")
results2_2 <- results
load("../../koen2010/koen2010_rrest.eqn-Koen_2010_pure.csv.RData")
results3_2 <- results
load("../../koen2011/koen2011_orig.eqn-Koen_2011.csv.RData")
results1_3 <- results
load("../../koen2011/koen2011_qrest.eqn-Koen_2011.csv.RData")
results2_3 <- results
load("../../koen2011/koen2011_rrest.eqn-Koen_2011.csv.RData")
results3_3 <- results
load("../../koen2013full/koen2013f_orig.eqn-Koen-2013_full.csv.RData")
results1_4 <- results
load("../../koen2013full/koen2013f_qrest.eqn-Koen-2013_full.csv.RData")
results2_4 <- results
load("../../koen2013full/koen2013f_rrest.eqn-Koen-2013_full.csv.RData")
results3_4 <- results
load("../../pratte2010/pratte_orig.eqn-Pratte_2010.csv.RData")
results1_5 <- results
load("../../pratte2010/pratte_qrest.eqn-Pratte_2010.csv.RData")
results2_5 <- results
load("../../pratte2010/pratte_rrest.eqn-Pratte_2010.csv.RData")
results3_5 <- results
load("../../smith2004/smith_orig.eqn-Smith_2004.csv.RData")
results1_6 <- results
load("../../smith2004/smith_qrest.eqn-Smith_2004.csv.RData")
results2_6 <- results
load("../../smith2004/smith_rrest.eqn-Smith_2004.csv.RData")
results3_6 <- results
load("../../jang2009/jang_orig.eqn-Jang_2009.csv.RData")
results1_7 <- results
load("../../jang2009/jang_qrest.eqn-Jang_2009.csv.RData")
results2_7 <- results
load("../../jang2009/jang_rrest.eqn-Jang_2009.csv.RData")
results3_7 <- results
load("../../dube2012p/dubep_orig.eqn-Dube_2012-P.csv.RData")
results1_8 <- results
load("../../dube2012p/dubep_qrest.eqn-Dube_2012-P.csv.RData")
results2_8 <- results
load("../../dube2012p/dubep_rrest.eqn-Dube_2012-P.csv.RData")
results3_8 <- results
load("../../dube2012w/dubew_orig.eqn-Dube_2012-W.csv.RData")
results1_9 <- results
load("../../dube2012w/dubeW_qrest.eqn-Dube_2012-W.csv.RData")
results2_9 <- results
load("../../dube2012w/dubeW_rrest.eqn-Dube_2012-W.csv.RData")
results3_9 <- results
load("../../heathcote2006e1/heathcote_orig.eqn-Heathcote_2006_e1.csv.RData")
results1_10 <- results
load("../../heathcote2006e1/heathcote_qrest.eqn-Heathcote_2006_e1.csv.RData")
results2_10 <- results
load("../../heathcote2006e1/heathcote_rrest.eqn-Heathcote_2006_e1.csv.RData")
results3_10 <- results
load("../../heathcote2006e2/heathcote2_orig.eqn-Heathcote_2006_e2.csv.RData")
results1_11 <- results
load("../../heathcote2006e2/heathcote2_qrest.eqn-Heathcote_2006_e2.csv.RData")
results2_11 <- results
load("../../heathcote2006e2/heathcote2_rrest.eqn-Heathcote_2006_e2.csv.RData")
results3_11 <- results
load("../../jaeger2012/jaeger_orig.eqn-Jaeger_2012.csv.RData")
results1_12 <- results
load("../../jaeger2012/jaeger_qrest.eqn-Jaeger_2012.csv.RData")
results2_12 <- results
load("../../jaeger2012/jaeger_rrest.eqn-Jaeger_2012.csv.RData")
results3_12 <- results
load("../../koen2013/koen_orig.eqn-Koen_2013_immediate.csv.RData")
results1_13 <- results
load("../../koen2013/koen_qrest.eqn-Koen_2013_immediate.csv.RData")
results2_13 <- results
load("../../koen2013/koen_rrest.eqn-Koen_2013_immediate.csv.RData")
results3_13 <- results
gof_a <- bind_rows(
unnest(results1_2, gof),
unnest(results2_2, gof),
unnest(results3_2, gof),
unnest(results1_3, gof),
unnest(results2_3, gof),
unnest(results3_3, gof),
unnest(results1_4, gof),
unnest(results2_4, gof),
unnest(results3_4, gof),
unnest(results1_5, gof),
unnest(results2_5, gof),
unnest(results3_5, gof),
unnest(results1_6, gof),
unnest(results2_6, gof),
unnest(results3_6, gof),
unnest(results1_7, gof),
unnest(results2_7, gof),
unnest(results3_7, gof),
unnest(results1_8, gof),
unnest(results2_8, gof),
unnest(results3_8, gof),
unnest(results1_9, gof),
unnest(results2_9, gof),
unnest(results3_9, gof),
unnest(results1_10, gof),
unnest(results2_10, gof),
unnest(results3_10, gof),
unnest(results1_11, gof),
unnest(results2_11, gof),
unnest(results3_11, gof),
unnest(results1_12, gof),
unnest(results2_12, gof),
unnest(results3_12, gof),
unnest(results1_13, gof),
unnest(results2_13, gof),
unnest(results3_13, gof),
)
gof_a$pooling <- factor(gof_a$pooling, levels = c("no", "complete", "partial"),
labels = c("No", "Comp", "PP"))
gof_a$package <- factor(gof_a$package, levels = c("MPTinR", "TreeBUGS"),
labels = c("MR", "TB"))
gof_a$method <- factor(gof_a$method, levels = c("PB/MLE", "asymptotic", "simple",
"trait", "trait_uncorrelated","beta"),
labels = c("PB", "asy", "ss", "trait", "trait_u","beta"))
gof_a$inter <- with(gof_a, interaction(method, pooling, package, drop = TRUE, sep = " "))
levels(gof_a$inter) <- c("No.PB", "No.asy", "Comp.asy", "No.Bayes",
"Comp.Bayes", "Trait.PP", "Trait_u.PP","Beta.PP")
gof_a$focus <- factor(gof_a$focus, levels = c('cov', 'mean'),
labels = c('Covariance', 'Mean'))
gof_a$model <- ifelse(gof_a$model=="jang_orig.eqn"|gof_a$model=="koen2010_orig.eqn"|gof_a$model=="koen2011_orig.eqn"|gof_a$model=="koen2013f_orig.eqn"|gof_a$model=="pratte_orig.eqn"|gof_a$model=="smith_orig.eqn"|gof_a$model=="dubep_orig.eqn"|gof_a$model=="jaeger_orig.eqn"|gof_a$model=="koen_orig.eqn"|gof_a$model=="koen_orig.eqn"|gof_a$model=="koen_orig.eqn"|gof_a$model=="heathcote_orig.eqn"|gof_a$model=="heathcote_orig.eqn"|gof_a$model=="dubew_orig.eqn"|gof_a$model=="heathcote2_orig.eqn"|gof_a$model=="koen_orig.eqn",
'Q & R Restr.',
ifelse(gof_a$model=="jang_qrest.eqn"|gof_a$model=="koen2010_qrest.eqn"|gof_a$model=="koen2011_qrest.eqn"|gof_a$model=="koen2013f_qrest.eqn"|gof_a$model=="pratte_qrest.eqn"|gof_a$model=="smith_qrest.eqn"|gof_a$model=="dubep_qrest.eqn"|gof_a$model=="jaeger_qrest.eqn"|gof_a$model=="koen_qrest.eqn"|gof_a$model=="heathcote_qrest.eqn"|gof_a$model=="dubew_qrest.eqn"|gof_a$model=="heathcote2_qrest.eqn"|gof_a$model=="koen_qrest.eqn",
'Q Restricted',
ifelse(gof_a$model=="jang_rrest.eqn"|gof_a$model=="koen2010_rrest.eqn"|gof_a$model=="koen2011_rrest.eqn"|gof_a$model=="koen2013f_rrest.eqn"|gof_a$model=="pratte_rrest.eqn"|gof_a$model=="smith_rrest.eqn"|gof_a$model=="dubep_rrest.eqn"|gof_a$model=="jaeger_rrest.eqn"|gof_a$model=="koen_rrest.eqn"|gof_a$model=="heathcote_rrest.eqn"|gof_a$model=="dubew_rrest.eqn"|gof_a$model=="heathcote2_rrest.eqn"|gof_a$model=="koen_rrest.eqn",
'R Restricted',
gof_a$model)))
gof_a$dataset <- factor(gof_a$dataset, levels = c("Jang_2009.csv", "Koen_2010_pure.csv", "Koen_2011.csv", "Koen-2013_full.csv", "Pratte_2010.csv", "Smith_2004.csv", "exp1.txt", "Dube_2012-P.csv", "Dube_2012-W.csv", "Heathcote_2006_e1.csv", "Heathcote_2006_e2.csv", "Jaeger_2012.csv", "Koen_2013_immediate.csv"),
labels = c("Jang et al. (2009)", "Koen & Yonelinas (2010)", "Koen & Yonelinas (2011)","Koen et al. (2013, F)","Pratte et al. (2010)","Smith & Duncan (2004, Exp. 2)","Broeder et al. (2013)", "Dube & Rotello (2012, P)", "Dube & Rotello (2012, W)","Heathcote et al. (2006, 1)","Heathcote et al. (2006, 2)","Jaeger et al. (2012)","Koen et al. (2013)"))
gof_all <- filter(gof_a, focus %in% c("Mean"))
ggplot(gof_all, aes(y = p,
x = inter, col=dataset)) +
geom_point(size=5) +
geom_hline(yintercept = .05, lty = 2)+
theme_bw() + coord_flip() +
facet_wrap(~model,ncol = 3) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0", "0.5", "1")) +
labs(x='Analysis approach',y= expression(italic(p)),
color='Dataset', title='Goodness of fit')+
theme(text=element_text(size = 22))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))
params <- bind_rows(
unnest(results1_2, est_group),
unnest(results2_2, est_group),
unnest(results3_2, est_group),
unnest(results1_3, est_group),
unnest(results2_3, est_group),
unnest(results3_3, est_group),
unnest(results1_4, est_group),
unnest(results2_4, est_group),
unnest(results3_4, est_group),
unnest(results1_5, est_group),
unnest(results2_5, est_group),
unnest(results3_5, est_group),
unnest(results1_6, est_group),
unnest(results2_6, est_group),
unnest(results3_6, est_group),
unnest(results1_7, est_group),
unnest(results2_7, est_group),
unnest(results3_7, est_group),
unnest(results1_8, est_group),
unnest(results2_8, est_group),
unnest(results3_8, est_group),
unnest(results1_9, est_group),
unnest(results2_9, est_group),
unnest(results3_9, est_group),
unnest(results1_10, est_group),
unnest(results2_10, est_group),
unnest(results3_10, est_group),
unnest(results1_11, est_group),
unnest(results2_11, est_group),
unnest(results3_11, est_group),
unnest(results1_12, est_group),
unnest(results2_12, est_group),
unnest(results3_12, est_group),
unnest(results1_13, est_group),
unnest(results2_13, est_group),
unnest(results3_13, est_group),
)
params$pooling <- factor(params$pooling, levels = c("no", "complete", "partial"),
labels = c("No", "Comp", "PP"))
params$package <- factor(params$package, levels = c("MPTinR", "TreeBUGS"),
labels = c("MR", "TB"))
params$method <- factor(params$method, levels = c("PB/MLE", "asymptotic", "simple",
"trait", "trait_uncorrelated","beta"),
labels = c("PB", "asy", "ss", "trait", "trait_u","beta"))
params$inter <- with(params, interaction(method, pooling, package, drop = TRUE, sep = " "))
levels(params$inter) <- c("No.PB", "No.asy", "Comp.asy", "No.Bayes",
"Comp.Bayes", "Trait.PP", "Trait_u.PP","Beta.PP")
params$model <- ifelse(params$model=="jang_orig.eqn"|params$model=="koen2010_orig.eqn"|params$model=="koen2011_orig.eqn"|params$model=="koen2013f_orig.eqn"|params$model=="pratte_orig.eqn"|params$model=="smith_orig.eqn"|params$model=="dubep_orig.eqn"|params$model=="jaeger_orig.eqn"|params$model=="koen_orig.eqn"|params$model=="koen_orig.eqn"|params$model=="koen_orig.eqn"|params$model=="heathcote_orig.eqn"|params$model=="heathcote_orig.eqn"|params$model=="dubew_orig.eqn"|params$model=="heathcote2_orig.eqn"|params$model=="koen_orig.eqn",
'Q & R Restr.',
ifelse(params$model=="jang_qrest.eqn"|params$model=="koen2010_qrest.eqn"|params$model=="koen2011_qrest.eqn"|params$model=="koen2013f_qrest.eqn"|params$model=="pratte_qrest.eqn"|params$model=="smith_qrest.eqn"|params$model=="dubep_qrest.eqn"|params$model=="jaeger_qrest.eqn"|params$model=="koen_qrest.eqn"|params$model=="heathcote_qrest.eqn"|params$model=="dubew_qrest.eqn"|params$model=="heathcote2_qrest.eqn"|params$model=="koen_qrest.eqn",
'Q Restricted',
ifelse(params$model=="jang_rrest.eqn"|params$model=="koen2010_rrest.eqn"|params$model=="koen2011_rrest.eqn"|params$model=="koen2013f_rrest.eqn"|params$model=="pratte_rrest.eqn"|params$model=="smith_rrest.eqn"|params$model=="dubep_rrest.eqn"|params$model=="jaeger_rrest.eqn"|params$model=="koen_rrest.eqn"|params$model=="heathcote_rrest.eqn"|params$model=="dubew_rrest.eqn"|params$model=="heathcote2_rrest.eqn"|params$model=="koen_rrest.eqn",
'R Restricted',
params$model)))
params$dataset <- factor(params$dataset, levels = c("Jang_2009.csv", "Koen_2010_pure.csv", "Koen_2011.csv", "Koen-2013_full.csv", "Pratte_2010.csv", "Smith_2004.csv", "exp1.txt", "Dube_2012-P.csv", "Dube_2012-W.csv", "Heathcote_2006_e1.csv", "Heathcote_2006_e2.csv", "Jaeger_2012.csv", "Koen_2013_immediate.csv"),
labels = c("Jang et al. (2009)", "Koen & Yonelinas (2010)", "Koen & Yonelinas (2011)","Koen et al. (2013, F)","Pratte et al. (2010)","Smith & Duncan (2004, Exp. 2)","Broeder et al. (2013)", "Dube & Rotello (2012, P)", "Dube & Rotello (2012, W)","Heathcote et al. (2006, 1)","Heathcote et al. (2006, 2)","Jaeger et al. (2012)","Koen et al. (2013)"))
Core_all <- filter(params, parameter %in% c("Dn","Do","g") & model %in% c('R Restricted'))
ggplot(Core_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='Core Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Core_all <- filter(params, parameter %in% c("Dn","Do","g") & model %in% c('R Restricted'))
ggplot(Core_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='Core Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Q_all <- filter(params, parameter %in% c("q_1", "q_2", "q_5", "q_6") & model %in% c('R Restricted'))
ggplot(Q_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='Q Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Q_all <- filter(params, parameter %in% c("q_1", "q_2", "q_5", "q_6") & model %in% c('R Restricted'))
ggplot(Q_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='Q Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 20))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
R_all <- filter(params, parameter %in% c("r_1", "r_2","r_5", "r_6") & model %in% c('R Restricted'))
ggplot(R_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='R Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
R_all <- filter(params, parameter %in% c("r_1", "r_2", "r_5", "r_6") & model %in% c('R Restricted'))
ggplot(R_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='R Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 20))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
|
/presentations/mpt-meeting-april-2018/not_used_files/new_datasets_results.R
|
no_license
|
singmann/mptmultiverse-2htm
|
R
| false | false | 29,843 |
r
|
##############################################################################################################
################################ NEW DATASETS ################################################################
###############################################################################################################
load("../../koen2010/koen2010_orig.eqn-Koen_2010_pure.csv.RData")
results1_2 <- results
load("../../koen2010/koen2010_qrest.eqn-Koen_2010_pure.csv.RData")
results2_2 <- results
load("../../koen2010/koen2010_rrest.eqn-Koen_2010_pure.csv.RData")
results3_2 <- results
load("../../koen2011/koen2011_orig.eqn-Koen_2011.csv.RData")
results1_3 <- results
load("../../koen2011/koen2011_qrest.eqn-Koen_2011.csv.RData")
results2_3 <- results
load("../../koen2011/koen2011_rrest.eqn-Koen_2011.csv.RData")
results3_3 <- results
load("../../koen2013full/koen2013f_orig.eqn-Koen-2013_full.csv.RData")
results1_4 <- results
load("../../koen2013full/koen2013f_qrest.eqn-Koen-2013_full.csv.RData")
results2_4 <- results
load("../../koen2013full/koen2013f_rrest.eqn-Koen-2013_full.csv.RData")
results3_4 <- results
load("../../pratte2010/pratte_orig.eqn-Pratte_2010.csv.RData")
results1_5 <- results
load("../../pratte2010/pratte_qrest.eqn-Pratte_2010.csv.RData")
results2_5 <- results
load("../../pratte2010/pratte_rrest.eqn-Pratte_2010.csv.RData")
results3_5 <- results
load("../../smith2004/smith_orig.eqn-Smith_2004.csv.RData")
results1_6 <- results
load("../../smith2004/smith_qrest.eqn-Smith_2004.csv.RData")
results2_6 <- results
load("../../smith2004/smith_rrest.eqn-Smith_2004.csv.RData")
results3_6 <- results
load("../../jang2009/jang_orig.eqn-Jang_2009.csv.RData")
results1_7 <- results
load("../../jang2009/jang_qrest.eqn-Jang_2009.csv.RData")
results2_7 <- results
load("../../jang2009/jang_rrest.eqn-Jang_2009.csv.RData")
results3_7 <- results
gof_a <- bind_rows(
unnest(results1_2, gof),
unnest(results2_2, gof),
unnest(results3_2, gof),
unnest(results1_3, gof),
unnest(results2_3, gof),
unnest(results3_3, gof),
unnest(results1_4, gof),
unnest(results2_4, gof),
unnest(results3_4, gof),
unnest(results1_5, gof),
unnest(results2_5, gof),
unnest(results3_5, gof),
unnest(results1_6, gof),
unnest(results2_6, gof),
unnest(results3_6, gof),
unnest(results1_7, gof),
unnest(results2_7, gof),
unnest(results3_7, gof),
)
gof_a$pooling <- factor(gof_a$pooling, levels = c("no", "complete", "partial"),
labels = c("No", "Comp", "PP"))
gof_a$package <- factor(gof_a$package, levels = c("MPTinR", "TreeBUGS"),
labels = c("MR", "TB"))
gof_a$method <- factor(gof_a$method, levels = c("PB/MLE", "asymptotic", "simple",
"trait", "trait_uncorrelated","beta"),
labels = c("PB", "asy", "ss", "trait", "trait_u","beta"))
gof_a$inter <- with(gof_a, interaction(method, pooling, package, drop = TRUE, sep = " "))
levels(gof_a$inter) <- c("No.PB", "No.asy", "Comp.asy", "No.Bayes",
"Comp.Bayes", "Trait.PP", "Trait_u.PP","Beta.PP")
gof_a$focus <- factor(gof_a$focus, levels = c('cov', 'mean'),
labels = c('Covariance', 'Mean'))
gof_a$model <- ifelse(gof_a$model=="jang_orig.eqn"|gof_a$model=="koen2010_orig.eqn"|gof_a$model=="koen2011_orig.eqn"|gof_a$model=="koen2013f_orig.eqn"|gof_a$model=="pratte_orig.eqn"|gof_a$model=="smith_orig.eqn",
'Q & R Restr.',
ifelse(gof_a$model=="jang_qrest.eqn"|gof_a$model=="koen2010_qrest.eqn"|gof_a$model=="koen2011_qrest.eqn"|gof_a$model=="koen2013f_qrest.eqn"|gof_a$model=="pratte_qrest.eqn"|gof_a$model=="smith_qrest.eqn",
'Q Restricted',
ifelse(gof_a$model=="jang_rrest.eqn"|gof_a$model=="koen2010_rrest.eqn"|gof_a$model=="koen2011_rrest.eqn"|gof_a$model=="koen2013f_rrest.eqn"|gof_a$model=="pratte_rrest.eqn"|gof_a$model=="smith_rrest.eqn",
'R Restricted',
gof_a$model)))
gof_a$dataset <- factor(gof_a$dataset, levels = c("Jang_2009.csv", "Koen_2010_pure.csv", "Koen_2011.csv", "Koen-2013_full.csv", "Pratte_2010.csv", "Smith_2004.csv"),
labels = c("Jang et al. (2009)", "Koen & Yonelinas (2010)", "Koen & Yonelinas (2011)","Koen et al. (2013, F)","Pratte et al. (2010)","Smith & Duncan (2004, Exp. 2)"))
gof_all <- filter(gof_a, focus %in% c("Mean"))
ggplot(gof_all, aes(y = p,
x = inter, col=dataset)) +
geom_point(size=5) +
geom_hline(yintercept = .05, lty = 2)+
theme_bw() + coord_flip() +
facet_wrap(~model,ncol = 3) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0", "0.5", "1")) +
labs(x='Analysis approach',y= expression(italic(p)),
color='Dataset', title='Goodness of fit')+
theme(text=element_text(size = 22))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))
params <- bind_rows(
unnest(results1_2, est_group),
unnest(results2_2, est_group),
unnest(results3_2, est_group),
unnest(results1_3, est_group),
unnest(results2_3, est_group),
unnest(results3_3, est_group),
unnest(results1_4, est_group),
unnest(results2_4, est_group),
unnest(results3_4, est_group),
unnest(results1_5, est_group),
unnest(results2_5, est_group),
unnest(results3_5, est_group),
unnest(results1_6, est_group),
unnest(results2_6, est_group),
unnest(results3_6, est_group),
unnest(results1_7, est_group),
unnest(results2_7, est_group),
unnest(results3_7, est_group),
)
params$pooling <- factor(params$pooling, levels = c("no", "complete", "partial"),
labels = c("No", "Comp", "PP"))
params$package <- factor(params$package, levels = c("MPTinR", "TreeBUGS"),
labels = c("MR", "TB"))
params$method <- factor(params$method, levels = c("PB/MLE", "asymptotic", "simple",
"trait", "trait_uncorrelated","beta"),
labels = c("PB", "asy", "ss", "trait", "trait_u","beta"))
params$inter <- with(params, interaction(method, pooling, package, drop = TRUE, sep = " "))
levels(params$inter) <- c("No.PB", "No.asy", "Comp.asy", "No.Bayes",
"Comp.Bayes", "Trait.PP", "Trait_u.PP","Beta.PP")
params$model <- ifelse(params$model=="jang_orig.eqn"|params$model=="koen2010_orig.eqn"|params$model=="koen2011_orig.eqn"|params$model=="koen2013f_orig.eqn"|params$model=="pratte_orig.eqn"|params$model=="smith_orig.eqn",
'Q & R Restr.',
ifelse(params$model=="jang_qrest.eqn"|params$model=="koen2010_qrest.eqn"|params$model=="koen2011_qrest.eqn"|params$model=="koen2013f_qrest.eqn"|params$model=="pratte_qrest.eqn"|params$model=="smith_qrest.eqn",
'Q Restricted',
ifelse(params$model=="jang_rrest.eqn"|params$model=="koen2010_rrest.eqn"|params$model=="koen2011_rrest.eqn"|params$model=="koen2013f_rrest.eqn"|params$model=="pratte_rrest.eqn"|params$model=="smith_rrest.eqn",
'R Restricted',
params$model)))
params$dataset <- factor(params$dataset, levels = c("Jang_2009.csv", "Koen_2010_pure.csv", "Koen_2011.csv", "Koen-2013_full.csv", "Pratte_2010.csv", "Smith_2004.csv"),
labels = c("Jang et al. (2009)", "Koen & Yonelinas (2010)", "Koen & Yonelinas (2011)","Koen et al. (2013, F)","Pratte et al. (2010)","Smith & Duncan (2004, Exp. 2)"))
Core_all <- filter(params, parameter %in% c("Dn","Do","g") & model %in% c('R Restricted'))
ggplot(Core_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='Core Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Core_all <- filter(params, parameter %in% c("Dn","Do","g") & model %in% c('R Restricted'))
ggplot(Core_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='Core Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Q_all <- filter(params, parameter %in% c("q_1", "q_2", "q_5", "q_6") & model %in% c('R Restricted'))
ggplot(Q_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='Q Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Q_all <- filter(params, parameter %in% c("q_1", "q_2", "q_5", "q_6") & model %in% c('R Restricted'))
ggplot(Q_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='Q Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 20))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
R_all <- filter(params, parameter %in% c("r_1", "r_2", "r_6") & model %in% c('R Restricted'))
ggplot(R_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='R Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
R_all <- filter(params, parameter %in% c("r_1", "r_2", "r_6") & model %in% c('R Restricted'))
ggplot(R_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='R Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 20))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
####################################################################################################################
######################### ALL DATASETS #############################################################################
####################################################################################################################
#####################################################################################################################
load("../../koen2010/koen2010_orig.eqn-Koen_2010_pure.csv.RData")
results1_2 <- results
load("../../koen2010/koen2010_qrest.eqn-Koen_2010_pure.csv.RData")
results2_2 <- results
load("../../koen2010/koen2010_rrest.eqn-Koen_2010_pure.csv.RData")
results3_2 <- results
load("../../koen2011/koen2011_orig.eqn-Koen_2011.csv.RData")
results1_3 <- results
load("../../koen2011/koen2011_qrest.eqn-Koen_2011.csv.RData")
results2_3 <- results
load("../../koen2011/koen2011_rrest.eqn-Koen_2011.csv.RData")
results3_3 <- results
load("../../koen2013full/koen2013f_orig.eqn-Koen-2013_full.csv.RData")
results1_4 <- results
load("../../koen2013full/koen2013f_qrest.eqn-Koen-2013_full.csv.RData")
results2_4 <- results
load("../../koen2013full/koen2013f_rrest.eqn-Koen-2013_full.csv.RData")
results3_4 <- results
load("../../pratte2010/pratte_orig.eqn-Pratte_2010.csv.RData")
results1_5 <- results
load("../../pratte2010/pratte_qrest.eqn-Pratte_2010.csv.RData")
results2_5 <- results
load("../../pratte2010/pratte_rrest.eqn-Pratte_2010.csv.RData")
results3_5 <- results
load("../../smith2004/smith_orig.eqn-Smith_2004.csv.RData")
results1_6 <- results
load("../../smith2004/smith_qrest.eqn-Smith_2004.csv.RData")
results2_6 <- results
load("../../smith2004/smith_rrest.eqn-Smith_2004.csv.RData")
results3_6 <- results
load("../../jang2009/jang_orig.eqn-Jang_2009.csv.RData")
results1_7 <- results
load("../../jang2009/jang_qrest.eqn-Jang_2009.csv.RData")
results2_7 <- results
load("../../jang2009/jang_rrest.eqn-Jang_2009.csv.RData")
results3_7 <- results
load("../../dube2012p/dubep_orig.eqn-Dube_2012-P.csv.RData")
results1_8 <- results
load("../../dube2012p/dubep_qrest.eqn-Dube_2012-P.csv.RData")
results2_8 <- results
load("../../dube2012p/dubep_rrest.eqn-Dube_2012-P.csv.RData")
results3_8 <- results
load("../../dube2012w/dubew_orig.eqn-Dube_2012-W.csv.RData")
results1_9 <- results
load("../../dube2012w/dubeW_qrest.eqn-Dube_2012-W.csv.RData")
results2_9 <- results
load("../../dube2012w/dubeW_rrest.eqn-Dube_2012-W.csv.RData")
results3_9 <- results
load("../../heathcote2006e1/heathcote_orig.eqn-Heathcote_2006_e1.csv.RData")
results1_10 <- results
load("../../heathcote2006e1/heathcote_qrest.eqn-Heathcote_2006_e1.csv.RData")
results2_10 <- results
load("../../heathcote2006e1/heathcote_rrest.eqn-Heathcote_2006_e1.csv.RData")
results3_10 <- results
load("../../heathcote2006e2/heathcote2_orig.eqn-Heathcote_2006_e2.csv.RData")
results1_11 <- results
load("../../heathcote2006e2/heathcote2_qrest.eqn-Heathcote_2006_e2.csv.RData")
results2_11 <- results
load("../../heathcote2006e2/heathcote2_rrest.eqn-Heathcote_2006_e2.csv.RData")
results3_11 <- results
load("../../jaeger2012/jaeger_orig.eqn-Jaeger_2012.csv.RData")
results1_12 <- results
load("../../jaeger2012/jaeger_qrest.eqn-Jaeger_2012.csv.RData")
results2_12 <- results
load("../../jaeger2012/jaeger_rrest.eqn-Jaeger_2012.csv.RData")
results3_12 <- results
load("../../koen2013/koen_orig.eqn-Koen_2013_immediate.csv.RData")
results1_13 <- results
load("../../koen2013/koen_qrest.eqn-Koen_2013_immediate.csv.RData")
results2_13 <- results
load("../../koen2013/koen_rrest.eqn-Koen_2013_immediate.csv.RData")
results3_13 <- results
gof_a <- bind_rows(
unnest(results1_2, gof),
unnest(results2_2, gof),
unnest(results3_2, gof),
unnest(results1_3, gof),
unnest(results2_3, gof),
unnest(results3_3, gof),
unnest(results1_4, gof),
unnest(results2_4, gof),
unnest(results3_4, gof),
unnest(results1_5, gof),
unnest(results2_5, gof),
unnest(results3_5, gof),
unnest(results1_6, gof),
unnest(results2_6, gof),
unnest(results3_6, gof),
unnest(results1_7, gof),
unnest(results2_7, gof),
unnest(results3_7, gof),
unnest(results1_8, gof),
unnest(results2_8, gof),
unnest(results3_8, gof),
unnest(results1_9, gof),
unnest(results2_9, gof),
unnest(results3_9, gof),
unnest(results1_10, gof),
unnest(results2_10, gof),
unnest(results3_10, gof),
unnest(results1_11, gof),
unnest(results2_11, gof),
unnest(results3_11, gof),
unnest(results1_12, gof),
unnest(results2_12, gof),
unnest(results3_12, gof),
unnest(results1_13, gof),
unnest(results2_13, gof),
unnest(results3_13, gof),
)
gof_a$pooling <- factor(gof_a$pooling, levels = c("no", "complete", "partial"),
labels = c("No", "Comp", "PP"))
gof_a$package <- factor(gof_a$package, levels = c("MPTinR", "TreeBUGS"),
labels = c("MR", "TB"))
gof_a$method <- factor(gof_a$method, levels = c("PB/MLE", "asymptotic", "simple",
"trait", "trait_uncorrelated","beta"),
labels = c("PB", "asy", "ss", "trait", "trait_u","beta"))
gof_a$inter <- with(gof_a, interaction(method, pooling, package, drop = TRUE, sep = " "))
levels(gof_a$inter) <- c("No.PB", "No.asy", "Comp.asy", "No.Bayes",
"Comp.Bayes", "Trait.PP", "Trait_u.PP","Beta.PP")
gof_a$focus <- factor(gof_a$focus, levels = c('cov', 'mean'),
labels = c('Covariance', 'Mean'))
gof_a$model <- ifelse(gof_a$model=="jang_orig.eqn"|gof_a$model=="koen2010_orig.eqn"|gof_a$model=="koen2011_orig.eqn"|gof_a$model=="koen2013f_orig.eqn"|gof_a$model=="pratte_orig.eqn"|gof_a$model=="smith_orig.eqn"|gof_a$model=="dubep_orig.eqn"|gof_a$model=="jaeger_orig.eqn"|gof_a$model=="koen_orig.eqn"|gof_a$model=="koen_orig.eqn"|gof_a$model=="koen_orig.eqn"|gof_a$model=="heathcote_orig.eqn"|gof_a$model=="heathcote_orig.eqn"|gof_a$model=="dubew_orig.eqn"|gof_a$model=="heathcote2_orig.eqn"|gof_a$model=="koen_orig.eqn",
'Q & R Restr.',
ifelse(gof_a$model=="jang_qrest.eqn"|gof_a$model=="koen2010_qrest.eqn"|gof_a$model=="koen2011_qrest.eqn"|gof_a$model=="koen2013f_qrest.eqn"|gof_a$model=="pratte_qrest.eqn"|gof_a$model=="smith_qrest.eqn"|gof_a$model=="dubep_qrest.eqn"|gof_a$model=="jaeger_qrest.eqn"|gof_a$model=="koen_qrest.eqn"|gof_a$model=="heathcote_qrest.eqn"|gof_a$model=="dubew_qrest.eqn"|gof_a$model=="heathcote2_qrest.eqn"|gof_a$model=="koen_qrest.eqn",
'Q Restricted',
ifelse(gof_a$model=="jang_rrest.eqn"|gof_a$model=="koen2010_rrest.eqn"|gof_a$model=="koen2011_rrest.eqn"|gof_a$model=="koen2013f_rrest.eqn"|gof_a$model=="pratte_rrest.eqn"|gof_a$model=="smith_rrest.eqn"|gof_a$model=="dubep_rrest.eqn"|gof_a$model=="jaeger_rrest.eqn"|gof_a$model=="koen_rrest.eqn"|gof_a$model=="heathcote_rrest.eqn"|gof_a$model=="dubew_rrest.eqn"|gof_a$model=="heathcote2_rrest.eqn"|gof_a$model=="koen_rrest.eqn",
'R Restricted',
gof_a$model)))
gof_a$dataset <- factor(gof_a$dataset, levels = c("Jang_2009.csv", "Koen_2010_pure.csv", "Koen_2011.csv", "Koen-2013_full.csv", "Pratte_2010.csv", "Smith_2004.csv", "exp1.txt", "Dube_2012-P.csv", "Dube_2012-W.csv", "Heathcote_2006_e1.csv", "Heathcote_2006_e2.csv", "Jaeger_2012.csv", "Koen_2013_immediate.csv"),
labels = c("Jang et al. (2009)", "Koen & Yonelinas (2010)", "Koen & Yonelinas (2011)","Koen et al. (2013, F)","Pratte et al. (2010)","Smith & Duncan (2004, Exp. 2)","Broeder et al. (2013)", "Dube & Rotello (2012, P)", "Dube & Rotello (2012, W)","Heathcote et al. (2006, 1)","Heathcote et al. (2006, 2)","Jaeger et al. (2012)","Koen et al. (2013)"))
gof_all <- filter(gof_a, focus %in% c("Mean"))
ggplot(gof_all, aes(y = p,
x = inter, col=dataset)) +
geom_point(size=5) +
geom_hline(yintercept = .05, lty = 2)+
theme_bw() + coord_flip() +
facet_wrap(~model,ncol = 3) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0", "0.5", "1")) +
labs(x='Analysis approach',y= expression(italic(p)),
color='Dataset', title='Goodness of fit')+
theme(text=element_text(size = 22))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))
params <- bind_rows(
unnest(results1_2, est_group),
unnest(results2_2, est_group),
unnest(results3_2, est_group),
unnest(results1_3, est_group),
unnest(results2_3, est_group),
unnest(results3_3, est_group),
unnest(results1_4, est_group),
unnest(results2_4, est_group),
unnest(results3_4, est_group),
unnest(results1_5, est_group),
unnest(results2_5, est_group),
unnest(results3_5, est_group),
unnest(results1_6, est_group),
unnest(results2_6, est_group),
unnest(results3_6, est_group),
unnest(results1_7, est_group),
unnest(results2_7, est_group),
unnest(results3_7, est_group),
unnest(results1_8, est_group),
unnest(results2_8, est_group),
unnest(results3_8, est_group),
unnest(results1_9, est_group),
unnest(results2_9, est_group),
unnest(results3_9, est_group),
unnest(results1_10, est_group),
unnest(results2_10, est_group),
unnest(results3_10, est_group),
unnest(results1_11, est_group),
unnest(results2_11, est_group),
unnest(results3_11, est_group),
unnest(results1_12, est_group),
unnest(results2_12, est_group),
unnest(results3_12, est_group),
unnest(results1_13, est_group),
unnest(results2_13, est_group),
unnest(results3_13, est_group),
)
params$pooling <- factor(params$pooling, levels = c("no", "complete", "partial"),
labels = c("No", "Comp", "PP"))
params$package <- factor(params$package, levels = c("MPTinR", "TreeBUGS"),
labels = c("MR", "TB"))
params$method <- factor(params$method, levels = c("PB/MLE", "asymptotic", "simple",
"trait", "trait_uncorrelated","beta"),
labels = c("PB", "asy", "ss", "trait", "trait_u","beta"))
params$inter <- with(params, interaction(method, pooling, package, drop = TRUE, sep = " "))
levels(params$inter) <- c("No.PB", "No.asy", "Comp.asy", "No.Bayes",
"Comp.Bayes", "Trait.PP", "Trait_u.PP","Beta.PP")
params$model <- ifelse(params$model=="jang_orig.eqn"|params$model=="koen2010_orig.eqn"|params$model=="koen2011_orig.eqn"|params$model=="koen2013f_orig.eqn"|params$model=="pratte_orig.eqn"|params$model=="smith_orig.eqn"|params$model=="dubep_orig.eqn"|params$model=="jaeger_orig.eqn"|params$model=="koen_orig.eqn"|params$model=="koen_orig.eqn"|params$model=="koen_orig.eqn"|params$model=="heathcote_orig.eqn"|params$model=="heathcote_orig.eqn"|params$model=="dubew_orig.eqn"|params$model=="heathcote2_orig.eqn"|params$model=="koen_orig.eqn",
'Q & R Restr.',
ifelse(params$model=="jang_qrest.eqn"|params$model=="koen2010_qrest.eqn"|params$model=="koen2011_qrest.eqn"|params$model=="koen2013f_qrest.eqn"|params$model=="pratte_qrest.eqn"|params$model=="smith_qrest.eqn"|params$model=="dubep_qrest.eqn"|params$model=="jaeger_qrest.eqn"|params$model=="koen_qrest.eqn"|params$model=="heathcote_qrest.eqn"|params$model=="dubew_qrest.eqn"|params$model=="heathcote2_qrest.eqn"|params$model=="koen_qrest.eqn",
'Q Restricted',
ifelse(params$model=="jang_rrest.eqn"|params$model=="koen2010_rrest.eqn"|params$model=="koen2011_rrest.eqn"|params$model=="koen2013f_rrest.eqn"|params$model=="pratte_rrest.eqn"|params$model=="smith_rrest.eqn"|params$model=="dubep_rrest.eqn"|params$model=="jaeger_rrest.eqn"|params$model=="koen_rrest.eqn"|params$model=="heathcote_rrest.eqn"|params$model=="dubew_rrest.eqn"|params$model=="heathcote2_rrest.eqn"|params$model=="koen_rrest.eqn",
'R Restricted',
params$model)))
params$dataset <- factor(params$dataset, levels = c("Jang_2009.csv", "Koen_2010_pure.csv", "Koen_2011.csv", "Koen-2013_full.csv", "Pratte_2010.csv", "Smith_2004.csv", "exp1.txt", "Dube_2012-P.csv", "Dube_2012-W.csv", "Heathcote_2006_e1.csv", "Heathcote_2006_e2.csv", "Jaeger_2012.csv", "Koen_2013_immediate.csv"),
labels = c("Jang et al. (2009)", "Koen & Yonelinas (2010)", "Koen & Yonelinas (2011)","Koen et al. (2013, F)","Pratte et al. (2010)","Smith & Duncan (2004, Exp. 2)","Broeder et al. (2013)", "Dube & Rotello (2012, P)", "Dube & Rotello (2012, W)","Heathcote et al. (2006, 1)","Heathcote et al. (2006, 2)","Jaeger et al. (2012)","Koen et al. (2013)"))
Core_all <- filter(params, parameter %in% c("Dn","Do","g") & model %in% c('R Restricted'))
ggplot(Core_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='Core Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Core_all <- filter(params, parameter %in% c("Dn","Do","g") & model %in% c('R Restricted'))
ggplot(Core_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=3) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='Core Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Q_all <- filter(params, parameter %in% c("q_1", "q_2", "q_5", "q_6") & model %in% c('R Restricted'))
ggplot(Q_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='Q Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
Q_all <- filter(params, parameter %in% c("q_1", "q_2", "q_5", "q_6") & model %in% c('R Restricted'))
ggplot(Q_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='Q Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 20))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
R_all <- filter(params, parameter %in% c("r_1", "r_2","r_5", "r_6") & model %in% c('R Restricted'))
ggplot(R_all, aes(y = est, x = inter,
color=dataset)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Analysis approach', y='Estimate', color='Dataset', title='R Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 24))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
R_all <- filter(params, parameter %in% c("r_1", "r_2", "r_5", "r_6") & model %in% c('R Restricted'))
ggplot(R_all, aes(y = est, x = dataset,
color=inter, shape=inter)) +
facet_wrap( ~parameter, ncol=4) +
geom_errorbar(aes(ymin = est-se, ymax = est+se), position = dd,
width = 0.6)+
geom_point(position = dd, size = 3.5) +
scale_shape_manual(values=shapes) +
scale_y_continuous(breaks=seq(0,1,by=.5),limits=c(0,1),
labels = c("0","0.5", "1")) +
labs(x='Dataset', y='Estimate', color='Analysis approach', shape='Analysis approach', title='R Parameters Across Data sets for R Restricted')+
theme_bw() +
theme(text=element_text(size = 20))+
theme(plot.title=element_text(face = 'bold',size=24, hjust = 0.5))+
coord_flip()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.