content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
# Options expirations dates between 5/2011:12/2020
d1 <- dateSeq('2011-05-01', len=115, by='months')
d2 <- dateAlign(d1, by='months', dir=-1)
d3 <- dateAlign(d2, by='weeks', week.align=5)
d4 <- dateWarp(d3, 14, by='days')
d5 <- dateAlign(d4, by='bizdays@NYSEC', dir=-1)
df$OpExDates <- as.Date(d5)
for(i in 1:length(df$date)){
df$daysUntilOpEx[i] <- as.numeric(max((df$date[i]-df$OpExDates)[df$date[i]-OpExDates < 0]))
}
#remove inf values and negative values
df$daysUntilOpEx <- df$daysUntilOpEx[!is.infinite(df$daysUntilOpEx)]
df$daysUntilOpEx <- abs(df$daysUntilOpEx)
head(df, 30)
tail(df,30)
|
/CalculateOpExDays.R
|
no_license
|
JakeDennis/MarketForecast
|
R
| false | false | 602 |
r
|
# Options expirations dates between 5/2011:12/2020
d1 <- dateSeq('2011-05-01', len=115, by='months')
d2 <- dateAlign(d1, by='months', dir=-1)
d3 <- dateAlign(d2, by='weeks', week.align=5)
d4 <- dateWarp(d3, 14, by='days')
d5 <- dateAlign(d4, by='bizdays@NYSEC', dir=-1)
df$OpExDates <- as.Date(d5)
for(i in 1:length(df$date)){
df$daysUntilOpEx[i] <- as.numeric(max((df$date[i]-df$OpExDates)[df$date[i]-OpExDates < 0]))
}
#remove inf values and negative values
df$daysUntilOpEx <- df$daysUntilOpEx[!is.infinite(df$daysUntilOpEx)]
df$daysUntilOpEx <- abs(df$daysUntilOpEx)
head(df, 30)
tail(df,30)
|
library(modQR)
### Name: compContourM1/2u
### Title: Directional Regression Quantile Computation
### Aliases: compContourM1u compContourM2u compContourM1/2u
### ** Examples
##computing all directional 0.15-quantiles of 199 random points
##uniformly distributed in the unit square centered at zero
##- preparing the input
Tau <- 0.15
XMat <- matrix(1, 199, 1)
YMat <- matrix(runif(2*199, -0.5, 0.5), 199, 2)
##- Method 1:
COutST <- compContourM1u(Tau, YMat, XMat)
##- Method 2:
COutST <- compContourM2u(Tau, YMat, XMat)
|
/data/genthat_extracted_code/modQR/examples/compContour.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 527 |
r
|
library(modQR)
### Name: compContourM1/2u
### Title: Directional Regression Quantile Computation
### Aliases: compContourM1u compContourM2u compContourM1/2u
### ** Examples
##computing all directional 0.15-quantiles of 199 random points
##uniformly distributed in the unit square centered at zero
##- preparing the input
Tau <- 0.15
XMat <- matrix(1, 199, 1)
YMat <- matrix(runif(2*199, -0.5, 0.5), 199, 2)
##- Method 1:
COutST <- compContourM1u(Tau, YMat, XMat)
##- Method 2:
COutST <- compContourM2u(Tau, YMat, XMat)
|
Problem2.45 <- data.frame(
"A" = c(
7.1662,
2.359,
19.9977,
0.9077,
-15.9034,
-6.0722,
9.9501,
-1.0944,
-4.6907,
-6.6929
),
"B" = c(
8.2416,
2.4555,
21.1018,
2.3401,
-15.0013,
-5.5941,
10.691,
-0.1358,
-3.3446,
-5.9303
),
"Delta" = c(
-1.0754,
-0.0965,
-1.1041,
-1.4324,
-0.9021,
-0.4781,
-0.7409,
-0.9586,
-1.3461,
-0.7626
))
|
/data/Problem2.45.R
|
no_license
|
ehassler/MontgomeryDAE
|
R
| false | false | 393 |
r
|
Problem2.45 <- data.frame(
"A" = c(
7.1662,
2.359,
19.9977,
0.9077,
-15.9034,
-6.0722,
9.9501,
-1.0944,
-4.6907,
-6.6929
),
"B" = c(
8.2416,
2.4555,
21.1018,
2.3401,
-15.0013,
-5.5941,
10.691,
-0.1358,
-3.3446,
-5.9303
),
"Delta" = c(
-1.0754,
-0.0965,
-1.1041,
-1.4324,
-0.9021,
-0.4781,
-0.7409,
-0.9586,
-1.3461,
-0.7626
))
|
#Creating figures of changes in Flow & Temp in urban rivers from around US
#using data from USGS gauges.
#Data available at: https://maps.waterdata.usgs.gov/mapper/index.html
#Dependencies----
#library(googleVis)
#library(devtools)
# Hit an empty line in the Console to skip updates
library(dataRetrieval)
library(lubridate)
library(ggplot2)
library(chron)
library(zoo)
#Flow figures%%%---------
#Have to be careful and check time periods downloaded to ensure there
#are actually solid data available for entire date range
flow <- "00060"
#List of rivers and gauges---------------------------------------
#listing individually & then combining allows record of gauges included
#Cedar River Renton, WA-->PNW
#discharge Dec 1986-Jan 2021
#temp Oct 2007-Jan2021
cedar <- "12119000"
#Red Butte Creek near Salt Lake City, UT-->SW
#discharge Oct 1986-Jan 2021
#temp Feb 2012-Jan 2021
redbt <- "10172200"
#Rouge River Detroit, MI-->Midwest
#discharge Oct 1989-Jan 2021
#temp Oct 2007-Jan 2021
rouge <- "04166500"
#Chattahoochee River Atlanta, GA-->South
#discharge Oct 1989-Jan 2021
#temp Oct 2007-Jan 2021
chatt <- "02336000"
#Accotink Creek Annandale, VA-->Mid-Atlantic
#discharge Oct 1990-Jan 2021
#temp Feb 2015-Jan 2021
acco <- "01654000"
#Was not able to find gauges in urban areas for
#Rockey Mtns or New Englan
rollmean(pre_flow_site$meanflow, 10, na.pad=TRUE)
#Additional Stations, for fun-------------------------------
#Trinity River Dallas, TX-->Because Texas
#discharge Jan 1988-Jan 2021
#temp Oct 2014-Jan 2021
trin <- "08057000"
#American River Fair Oaks, CA-->Because Cali
#discharge Oct 1987-Jan 2021
#temp Oct 2007-Jan 2021
amer <- "08057000"
#USGS KENAI R AT COOPER LANDING AK
#discharge 1987-10-02 2021-01-15
#temp 2007-10-01 2021-01-15
kenai <- "15258000"
#VILLAGE CREEK AT AVENUE W AT ENSLEY, AL
#discharge 1994-10-01 2021-01-13
#temp 2007-10-01 2021-01-13
#Upstream looks like rapid urbanization over past few decades.
#vilcrk <- "02458450"
#This site is too recent for the dates below.
#CO Gages ---------
# SOUTH PLATTE RIVER AT ENGLEWOOD, CO.
#temp 2007-10-01 2021-01-15
#flow 1986-10-24 2021-01-15
splatt <- "06711565"
#ARKANSAS RIVER AT MOFFAT STREET AT PUEBLO, CO
#temp 2007-10-01 2021-01-15
#flow 1988-10-01 2021-01-15
arkpeub <- "07099970"
#FOUNTAIN CREEK AT PUEBLO, CO.
#temp 2007-10-01 2021-01-15
#flow 1988-10-01 2021-01-15
fount <- "7106500" ###Throws errors in flow (IDK why) - didn't try temp.
#ARKANSAS RIVER NEAR AVONDALE, CO.
#temp 2007-10-01 2021-01-15
#flow 1986-11-17 2021-01-15
arkavon <- "07109500"
#ARKANSAS RIVER AT LAS ANIMAS, CO.
#temp 2007-10-01 2021-01-15
#flow 1987-10-04 2021-01-15
arklasa <- "07124000"
#East Coast:-----
#BRANDYWINE CREEK AT WILMINGTON, DE
#temp 2007-10-01 2021-01-15
#flow 1989-06-01 2021-01-15
brandy <- "01481500" #Flow doesn't have enough data for the earlier date
#Create list of gages for loop-----
gage.list <- list(cedar, redbt, rouge, chatt, acco, trin, amer)
#Make sure this is identical to the list prior...probably a more clever way to do this.
gage.names <- list("cedar", "redbt", "rouge", "chatt", "acco", "trin", "amer")
#Running additionals
#CO list
gage.list <- list(splatt,arkpeub,arkavon,arklasa)
#Make sure this is identical to the list prior...probably a more clever way to do this.
gage.names <- list("splatt","arkpeub","arkavon","arklasa")
#DE
gage.list <- list(reedy)
#Make sure this is identical to the list prior...probably a more clever way to do this.
gage.names <- list("reedy")
#Set working directory to deposit plots before starting loop:
#setwd("C:/Users/wilsonmatt/OneDrive - Susquehanna University/Manuscripts/LTF/plots")
#Save plots to "Figures" folder in repository
#pdf(paste("figures/", "Flow", ".pdf", sep=""))
###Start flow loop ------
for(i in seq_along(gage.list))
{ site <- gage.list[i]
#Rest of script - sub "site" for the gauge code in the functions#
#1993-95
flow_pre1 <- readNWISuv(site,flow,"1993-01-01","1993-12-31")
flow_pre1$year <- format(flow_pre1$dateTime, "%Y")
flow_pre1 <- subset(flow_pre1, year != "1994")
flow_pre1$jdate <- yday(as.Date(flow_pre1$dateTime))
pre1_day <- aggregate(X_00060_00000 ~ jdate, data = flow_pre1, mean)
flow_pre2 <- readNWISuv(site,flow,"1994-01-01","1994-12-31")
flow_pre2$year <- format(flow_pre2$dateTime, "%Y")
flow_pre2 <- subset(flow_pre2, year != "1995")
flow_pre2$jdate <- yday(as.Date(flow_pre2$dateTime))
pre2_day <- aggregate(X_00060_00000 ~ jdate, data = flow_pre2, mean)
flow_pre3 <- readNWISuv(site,flow,"1995-01-01","1995-12-31")
flow_pre3$year <- format(flow_pre3$dateTime, "%Y")
flow_pre3 <- subset(flow_pre3, year != "1996")
flow_pre3$jdate <- yday(as.Date(flow_pre3$dateTime))
pre3_day <- aggregate(X_00060_00000 ~ jdate, data = flow_pre3, mean)
df_list <- list(pre1_day, pre2_day, pre3_day)
pre_all <- Reduce(function(x, y) merge(x, y, by= "jdate"), df_list, accumulate=FALSE)
pre_flow_site <- cbind(pre_all, meanflow = rowMeans(pre_all[2:4]))
#flow, post site (2017-19)
flow_post1 <- readNWISuv(site,flow,"2017-01-01","2017-12-31")
flow_post1$year <- format(flow_post1$dateTime, "%Y")
flow_post1 <- subset(flow_post1, year != "2018")
flow_post1$jdate <- yday(as.Date(flow_post1$dateTime))
post1_day <- aggregate(X_00060_00000 ~ jdate, data = flow_post1, mean)
flow_post2 <- readNWISuv(site,flow,"2018-01-01","2018-12-31")
flow_post2$year <- format(flow_post2$dateTime, "%Y")
flow_post2 <- subset(flow_post2, year != "2019")
flow_post2$jdate <- yday(as.Date(flow_post2$dateTime))
post2_day <- aggregate(X_00060_00000 ~ jdate, data = flow_post2, mean)
flow_post3 <- readNWISuv(site,flow,"2019-01-01","2019-12-31")
flow_post3$year <- format(flow_post3$dateTime, "%Y")
flow_post3 <- subset(flow_post3, year != "2020")
flow_post3$jdate <- yday(as.Date(flow_post3$dateTime))
post3_day <- aggregate(X_00060_00000 ~ jdate, data = flow_post3, mean)
df_list <- list(post1_day, post2_day, post3_day)
post_all <- Reduce(function(x, y) merge(x, y, by= "jdate"), df_list, accumulate=FALSE)
post_flow_site <- cbind(post_all, meanflow = rowMeans(post_all[2:4]))
#head(pre_flow_site)
#pre_date <- as.data.frame(month.day.year(pre_flow_site$jdate))
#pre_flow_site$monthday <- as.Date(with(pre_date, paste(month, day,sep="-")), "%m-%d")
#pre_flow_site$monthday <- as.Date(pre_flow_site$monthday)
#then just output all of the plots
jpeg(filename=paste("Figures/", "flow.avg", gage.names[i], ".jpg"),
width = 600, height = 600, units = "px", pointsize = 12,
quality = 300)
print(ggplot(pre_flow_site, aes(jdate, meanflow))+
#geom_line(data=pre_flow_site,
# aes(jdate, meanflow, color="1993-95",linetype="dashed")) +
#geom_line(data=post_flow_site,
# aes(jdate, meanflow, color="2017-19",linetype="solid"))+
geom_line(data=pre_flow_site,
aes(y=rollmean(meanflow, 20, na.pad=TRUE), color="1993-95"),
size=1.5, na.rm = TRUE) +
geom_line(data=post_flow_site,
aes(y=rollmean(meanflow, 20, na.pad=TRUE), color="2017-19"),
size=1.5,na.rm = TRUE) +
theme_bw() +
scale_color_manual(values=c("#999999", "#000000"))+
theme(axis.line = element_line(color = "black"),
axis.text = element_text(size = 13),
axis.title.x = element_text(size = 13, face = "bold"),
axis.title.y = element_text(size = 13, face = "bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())+
ylab(expression(bold(Discharge~(ft^3/s))))+
xlab("Julian Date")+
theme(legend.title=element_blank(),
legend.text=element_text(size=13))+
theme(legend.position = c(0.05, 0.95),legend.justification = c(0, 1)))
dev.off()
}
###Start temp loop -------------
temp <- "00010"
#Loop is set to only plot sites with temp data starting by 1 Jan 2008.
#Cedar River Renton, WA-->PNW
#discharge Dec 1986-Jan 2021
#temp Oct 2007-Jan2021
cedar <- "12119000"
#Rouge River Detroit, MI-->Midwest
#discharge Oct 1989-Jan 2021
#temp Oct 2007-Jan 2021
rouge <- "04166500"
#Chattahoochee River Atlanta, GA-->South
#discharge Oct 1989-Jan 2021
#temp Oct 2007-Jan 2021
chatt <- "02336000"
#American River Fair Oaks, CA-->Because Cali
#discharge Oct 1987-Jan 2021
#temp Oct 2007-Jan 2021
amer <- "08057000"
#USGS KENAI R AT COOPER LANDING AK
#discharge 1987-10-02 2021-01-15
#temp 2007-10-01 2021-01-15
kenai <- "15258000"
#Rouge, chatt, amer had too many gaps to plot effectively.
#Create list of gages for loop
gage.list <- list(cedar, amer, kenai)
#Make sure this is identical to the list prior...probably a more clever way to do this.
gage.names <- list("cedar", "amer", "kenai")
for(i in seq_along(gage.list))
{ site <- gage.list[i]
#Rest of script - sub "site" for the gauge code in the functions#
#2008-10
temp_pre1 <- readNWISuv(site,temp,"2008-01-01","2008-12-31")
temp_pre1$year <- format(temp_pre1$dateTime, "%Y")
temp_pre1 <- subset(temp_pre1, year != "2009")
temp_pre1$jdate <- yday(as.Date(temp_pre1$dateTime))
pre1_day <- aggregate(X_00010_00000 ~ jdate, data = temp_pre1, mean)
temp_pre2 <- readNWISuv(site,temp,"2009-01-01","2009-12-31")
temp_pre2$year <- format(temp_pre2$dateTime, "%Y")
temp_pre2 <- subset(temp_pre2, year != "2010")
temp_pre2$jdate <- yday(as.Date(temp_pre2$dateTime))
pre2_day <- aggregate(X_00010_00000 ~ jdate, data = temp_pre2, mean)
temp_pre3 <- readNWISuv(site,temp,"2010-01-01","2010-12-31")
temp_pre3$jdate <- yday(as.Date(temp_pre3$dateTime))
temp_pre3$year <- format(temp_pre3$dateTime, "%Y")
temp_pre3 <- subset(temp_pre3, year != "2011")
pre3_day <- aggregate(X_00010_00000 ~ jdate, data = temp_pre3, mean)
df_list <- list(pre1_day, pre2_day, pre3_day)
pre_all <- Reduce(function(x, y) merge(x, y, by= "jdate"), df_list, accumulate=FALSE)
pre_temp_site <- cbind(pre_all, meantemp = rowMeans(pre_all[2:4]))
#temp, post site: 2017-19
temp_post1 <- readNWISuv(site,temp,"2017-01-01","2017-12-31")
temp_post1$year <- format(temp_post1$dateTime, "%Y")
temp_post1 <- subset(temp_post1, year != "2018")
temp_post1$jdate <- yday(as.Date(temp_post1$dateTime))
post1_day <- aggregate(X_00010_00000 ~ jdate, data = temp_post1, mean)
temp_post2 <- readNWISuv(site,temp,"2018-01-01","2018-12-31")
temp_post2$year <- format(temp_post2$dateTime, "%Y")
temp_post2 <- subset(temp_post2, year != "2019")
temp_post2$jdate <- yday(as.Date(temp_post2$dateTime))
post2_day <- aggregate(X_00010_00000 ~ jdate, data = temp_post2, mean)
temp_post3 <- readNWISuv(site,temp,"2019-01-01","2019-12-31")
temp_post3$year <- format(temp_post3$dateTime, "%Y")
temp_post3 <- subset(temp_post3, year != "2020")
temp_post3$jdate <- yday(as.Date(temp_post3$dateTime))
post3_day <- aggregate(X_00010_00000 ~ jdate, data = temp_post3, mean)
df_list <- list(post1_day, post2_day, post3_day)
post_all <- Reduce(function(x, y) merge(x, y, by= "jdate"), df_list, accumulate=FALSE)
post_temp_site <- cbind(post_all, meantemp = rowMeans(post_all[2:4]))
#head(pre_temp_site)
#pre_date <- as.data.frame(month.day.year(pre_temp_site$jdate))
#pre_temp_site$monthday <- as.Date(with(pre_date, paste(month, day,sep="-")), "%m-%d")
#pre_temp_site$monthday <- as.Date(pre_temp_site$monthday)
#then just output all of the plots
jpeg(filename=paste("Figures/", "temp.avg", gage.names[i], ".jpg"),
width = 600, height = 600, units = "px", pointsize = 12,
quality = 300)
print(ggplot(pre_temp_site, aes(jdate, meantemp))+
#geom_line(data=pre_temp_site, aes(jdate, meantemp, color="2008-10")) +
#geom_line(data=post_temp_site, aes(jdate, meantemp, color="2017-19"))+
geom_line(data=pre_temp_site, aes(y=rollmean(meantemp, 20, na.pad=TRUE),
color="2008-10"), size=1.5, na.rm = TRUE) +
geom_line(data=post_temp_site, aes(y=rollmean(meantemp, 20, na.pad=TRUE),
color="2017-19"),size=1.5,na.rm = TRUE) +
theme_bw() +
scale_color_manual(values=c("#999999", "#000000"))+
theme(axis.line = element_line(color = "black"),
axis.text = element_text(size = 13),
axis.title.x = element_text(size = 13, face = "bold"),
axis.title.y = element_text(size = 13, face = "bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())+
ylab(expression(bold(Temperature~(degree~C))))+
xlab("Julian Date")+
theme(legend.title=element_blank(),
legend.text=element_text(size=13))+
theme(legend.position = c(0.05, 0.95),legend.justification = c(0, 1)))
dev.off()
}
|
/Code/FlowTempPlots.R
|
no_license
|
wilson-matt/FTL
|
R
| false | false | 12,762 |
r
|
#Creating figures of changes in Flow & Temp in urban rivers from around US
#using data from USGS gauges.
#Data available at: https://maps.waterdata.usgs.gov/mapper/index.html
#Dependencies----
#library(googleVis)
#library(devtools)
# Hit an empty line in the Console to skip updates
library(dataRetrieval)
library(lubridate)
library(ggplot2)
library(chron)
library(zoo)
#Flow figures%%%---------
#Have to be careful and check time periods downloaded to ensure there
#are actually solid data available for entire date range
flow <- "00060"
#List of rivers and gauges---------------------------------------
#listing individually & then combining allows record of gauges included
#Cedar River Renton, WA-->PNW
#discharge Dec 1986-Jan 2021
#temp Oct 2007-Jan2021
cedar <- "12119000"
#Red Butte Creek near Salt Lake City, UT-->SW
#discharge Oct 1986-Jan 2021
#temp Feb 2012-Jan 2021
redbt <- "10172200"
#Rouge River Detroit, MI-->Midwest
#discharge Oct 1989-Jan 2021
#temp Oct 2007-Jan 2021
rouge <- "04166500"
#Chattahoochee River Atlanta, GA-->South
#discharge Oct 1989-Jan 2021
#temp Oct 2007-Jan 2021
chatt <- "02336000"
#Accotink Creek Annandale, VA-->Mid-Atlantic
#discharge Oct 1990-Jan 2021
#temp Feb 2015-Jan 2021
acco <- "01654000"
#Was not able to find gauges in urban areas for
#Rockey Mtns or New Englan
rollmean(pre_flow_site$meanflow, 10, na.pad=TRUE)
#Additional Stations, for fun-------------------------------
#Trinity River Dallas, TX-->Because Texas
#discharge Jan 1988-Jan 2021
#temp Oct 2014-Jan 2021
trin <- "08057000"
#American River Fair Oaks, CA-->Because Cali
#discharge Oct 1987-Jan 2021
#temp Oct 2007-Jan 2021
amer <- "08057000"
#USGS KENAI R AT COOPER LANDING AK
#discharge 1987-10-02 2021-01-15
#temp 2007-10-01 2021-01-15
kenai <- "15258000"
#VILLAGE CREEK AT AVENUE W AT ENSLEY, AL
#discharge 1994-10-01 2021-01-13
#temp 2007-10-01 2021-01-13
#Upstream looks like rapid urbanization over past few decades.
#vilcrk <- "02458450"
#This site is too recent for the dates below.
#CO Gages ---------
# SOUTH PLATTE RIVER AT ENGLEWOOD, CO.
#temp 2007-10-01 2021-01-15
#flow 1986-10-24 2021-01-15
splatt <- "06711565"
#ARKANSAS RIVER AT MOFFAT STREET AT PUEBLO, CO
#temp 2007-10-01 2021-01-15
#flow 1988-10-01 2021-01-15
arkpeub <- "07099970"
#FOUNTAIN CREEK AT PUEBLO, CO.
#temp 2007-10-01 2021-01-15
#flow 1988-10-01 2021-01-15
fount <- "7106500" ###Throws errors in flow (IDK why) - didn't try temp.
#ARKANSAS RIVER NEAR AVONDALE, CO.
#temp 2007-10-01 2021-01-15
#flow 1986-11-17 2021-01-15
arkavon <- "07109500"
#ARKANSAS RIVER AT LAS ANIMAS, CO.
#temp 2007-10-01 2021-01-15
#flow 1987-10-04 2021-01-15
arklasa <- "07124000"
#East Coast:-----
#BRANDYWINE CREEK AT WILMINGTON, DE
#temp 2007-10-01 2021-01-15
#flow 1989-06-01 2021-01-15
brandy <- "01481500" #Flow doesn't have enough data for the earlier date
#Create list of gages for loop-----
gage.list <- list(cedar, redbt, rouge, chatt, acco, trin, amer)
#Make sure this is identical to the list prior...probably a more clever way to do this.
gage.names <- list("cedar", "redbt", "rouge", "chatt", "acco", "trin", "amer")
#Running additionals
#CO list
gage.list <- list(splatt,arkpeub,arkavon,arklasa)
#Make sure this is identical to the list prior...probably a more clever way to do this.
gage.names <- list("splatt","arkpeub","arkavon","arklasa")
#DE
gage.list <- list(reedy)
#Make sure this is identical to the list prior...probably a more clever way to do this.
gage.names <- list("reedy")
#Set working directory to deposit plots before starting loop:
#setwd("C:/Users/wilsonmatt/OneDrive - Susquehanna University/Manuscripts/LTF/plots")
#Save plots to "Figures" folder in repository
#pdf(paste("figures/", "Flow", ".pdf", sep=""))
###Start flow loop ------
for(i in seq_along(gage.list))
{ site <- gage.list[i]
#Rest of script - sub "site" for the gauge code in the functions#
#1993-95
flow_pre1 <- readNWISuv(site,flow,"1993-01-01","1993-12-31")
flow_pre1$year <- format(flow_pre1$dateTime, "%Y")
flow_pre1 <- subset(flow_pre1, year != "1994")
flow_pre1$jdate <- yday(as.Date(flow_pre1$dateTime))
pre1_day <- aggregate(X_00060_00000 ~ jdate, data = flow_pre1, mean)
flow_pre2 <- readNWISuv(site,flow,"1994-01-01","1994-12-31")
flow_pre2$year <- format(flow_pre2$dateTime, "%Y")
flow_pre2 <- subset(flow_pre2, year != "1995")
flow_pre2$jdate <- yday(as.Date(flow_pre2$dateTime))
pre2_day <- aggregate(X_00060_00000 ~ jdate, data = flow_pre2, mean)
flow_pre3 <- readNWISuv(site,flow,"1995-01-01","1995-12-31")
flow_pre3$year <- format(flow_pre3$dateTime, "%Y")
flow_pre3 <- subset(flow_pre3, year != "1996")
flow_pre3$jdate <- yday(as.Date(flow_pre3$dateTime))
pre3_day <- aggregate(X_00060_00000 ~ jdate, data = flow_pre3, mean)
df_list <- list(pre1_day, pre2_day, pre3_day)
pre_all <- Reduce(function(x, y) merge(x, y, by= "jdate"), df_list, accumulate=FALSE)
pre_flow_site <- cbind(pre_all, meanflow = rowMeans(pre_all[2:4]))
#flow, post site (2017-19)
flow_post1 <- readNWISuv(site,flow,"2017-01-01","2017-12-31")
flow_post1$year <- format(flow_post1$dateTime, "%Y")
flow_post1 <- subset(flow_post1, year != "2018")
flow_post1$jdate <- yday(as.Date(flow_post1$dateTime))
post1_day <- aggregate(X_00060_00000 ~ jdate, data = flow_post1, mean)
flow_post2 <- readNWISuv(site,flow,"2018-01-01","2018-12-31")
flow_post2$year <- format(flow_post2$dateTime, "%Y")
flow_post2 <- subset(flow_post2, year != "2019")
flow_post2$jdate <- yday(as.Date(flow_post2$dateTime))
post2_day <- aggregate(X_00060_00000 ~ jdate, data = flow_post2, mean)
flow_post3 <- readNWISuv(site,flow,"2019-01-01","2019-12-31")
flow_post3$year <- format(flow_post3$dateTime, "%Y")
flow_post3 <- subset(flow_post3, year != "2020")
flow_post3$jdate <- yday(as.Date(flow_post3$dateTime))
post3_day <- aggregate(X_00060_00000 ~ jdate, data = flow_post3, mean)
df_list <- list(post1_day, post2_day, post3_day)
post_all <- Reduce(function(x, y) merge(x, y, by= "jdate"), df_list, accumulate=FALSE)
post_flow_site <- cbind(post_all, meanflow = rowMeans(post_all[2:4]))
#head(pre_flow_site)
#pre_date <- as.data.frame(month.day.year(pre_flow_site$jdate))
#pre_flow_site$monthday <- as.Date(with(pre_date, paste(month, day,sep="-")), "%m-%d")
#pre_flow_site$monthday <- as.Date(pre_flow_site$monthday)
#then just output all of the plots
jpeg(filename=paste("Figures/", "flow.avg", gage.names[i], ".jpg"),
width = 600, height = 600, units = "px", pointsize = 12,
quality = 300)
print(ggplot(pre_flow_site, aes(jdate, meanflow))+
#geom_line(data=pre_flow_site,
# aes(jdate, meanflow, color="1993-95",linetype="dashed")) +
#geom_line(data=post_flow_site,
# aes(jdate, meanflow, color="2017-19",linetype="solid"))+
geom_line(data=pre_flow_site,
aes(y=rollmean(meanflow, 20, na.pad=TRUE), color="1993-95"),
size=1.5, na.rm = TRUE) +
geom_line(data=post_flow_site,
aes(y=rollmean(meanflow, 20, na.pad=TRUE), color="2017-19"),
size=1.5,na.rm = TRUE) +
theme_bw() +
scale_color_manual(values=c("#999999", "#000000"))+
theme(axis.line = element_line(color = "black"),
axis.text = element_text(size = 13),
axis.title.x = element_text(size = 13, face = "bold"),
axis.title.y = element_text(size = 13, face = "bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())+
ylab(expression(bold(Discharge~(ft^3/s))))+
xlab("Julian Date")+
theme(legend.title=element_blank(),
legend.text=element_text(size=13))+
theme(legend.position = c(0.05, 0.95),legend.justification = c(0, 1)))
dev.off()
}
###Start temp loop -------------
temp <- "00010"
#Loop is set to only plot sites with temp data starting by 1 Jan 2008.
#Cedar River Renton, WA-->PNW
#discharge Dec 1986-Jan 2021
#temp Oct 2007-Jan2021
cedar <- "12119000"
#Rouge River Detroit, MI-->Midwest
#discharge Oct 1989-Jan 2021
#temp Oct 2007-Jan 2021
rouge <- "04166500"
#Chattahoochee River Atlanta, GA-->South
#discharge Oct 1989-Jan 2021
#temp Oct 2007-Jan 2021
chatt <- "02336000"
#American River Fair Oaks, CA-->Because Cali
#discharge Oct 1987-Jan 2021
#temp Oct 2007-Jan 2021
amer <- "08057000"
#USGS KENAI R AT COOPER LANDING AK
#discharge 1987-10-02 2021-01-15
#temp 2007-10-01 2021-01-15
kenai <- "15258000"
#Rouge, chatt, amer had too many gaps to plot effectively.
#Create list of gages for loop
gage.list <- list(cedar, amer, kenai)
#Make sure this is identical to the list prior...probably a more clever way to do this.
gage.names <- list("cedar", "amer", "kenai")
for(i in seq_along(gage.list))
{ site <- gage.list[i]
#Rest of script - sub "site" for the gauge code in the functions#
#2008-10
temp_pre1 <- readNWISuv(site,temp,"2008-01-01","2008-12-31")
temp_pre1$year <- format(temp_pre1$dateTime, "%Y")
temp_pre1 <- subset(temp_pre1, year != "2009")
temp_pre1$jdate <- yday(as.Date(temp_pre1$dateTime))
pre1_day <- aggregate(X_00010_00000 ~ jdate, data = temp_pre1, mean)
temp_pre2 <- readNWISuv(site,temp,"2009-01-01","2009-12-31")
temp_pre2$year <- format(temp_pre2$dateTime, "%Y")
temp_pre2 <- subset(temp_pre2, year != "2010")
temp_pre2$jdate <- yday(as.Date(temp_pre2$dateTime))
pre2_day <- aggregate(X_00010_00000 ~ jdate, data = temp_pre2, mean)
temp_pre3 <- readNWISuv(site,temp,"2010-01-01","2010-12-31")
temp_pre3$jdate <- yday(as.Date(temp_pre3$dateTime))
temp_pre3$year <- format(temp_pre3$dateTime, "%Y")
temp_pre3 <- subset(temp_pre3, year != "2011")
pre3_day <- aggregate(X_00010_00000 ~ jdate, data = temp_pre3, mean)
df_list <- list(pre1_day, pre2_day, pre3_day)
pre_all <- Reduce(function(x, y) merge(x, y, by= "jdate"), df_list, accumulate=FALSE)
pre_temp_site <- cbind(pre_all, meantemp = rowMeans(pre_all[2:4]))
#temp, post site: 2017-19
temp_post1 <- readNWISuv(site,temp,"2017-01-01","2017-12-31")
temp_post1$year <- format(temp_post1$dateTime, "%Y")
temp_post1 <- subset(temp_post1, year != "2018")
temp_post1$jdate <- yday(as.Date(temp_post1$dateTime))
post1_day <- aggregate(X_00010_00000 ~ jdate, data = temp_post1, mean)
temp_post2 <- readNWISuv(site,temp,"2018-01-01","2018-12-31")
temp_post2$year <- format(temp_post2$dateTime, "%Y")
temp_post2 <- subset(temp_post2, year != "2019")
temp_post2$jdate <- yday(as.Date(temp_post2$dateTime))
post2_day <- aggregate(X_00010_00000 ~ jdate, data = temp_post2, mean)
temp_post3 <- readNWISuv(site,temp,"2019-01-01","2019-12-31")
temp_post3$year <- format(temp_post3$dateTime, "%Y")
temp_post3 <- subset(temp_post3, year != "2020")
temp_post3$jdate <- yday(as.Date(temp_post3$dateTime))
post3_day <- aggregate(X_00010_00000 ~ jdate, data = temp_post3, mean)
df_list <- list(post1_day, post2_day, post3_day)
post_all <- Reduce(function(x, y) merge(x, y, by= "jdate"), df_list, accumulate=FALSE)
post_temp_site <- cbind(post_all, meantemp = rowMeans(post_all[2:4]))
#head(pre_temp_site)
#pre_date <- as.data.frame(month.day.year(pre_temp_site$jdate))
#pre_temp_site$monthday <- as.Date(with(pre_date, paste(month, day,sep="-")), "%m-%d")
#pre_temp_site$monthday <- as.Date(pre_temp_site$monthday)
#then just output all of the plots
jpeg(filename=paste("Figures/", "temp.avg", gage.names[i], ".jpg"),
width = 600, height = 600, units = "px", pointsize = 12,
quality = 300)
print(ggplot(pre_temp_site, aes(jdate, meantemp))+
#geom_line(data=pre_temp_site, aes(jdate, meantemp, color="2008-10")) +
#geom_line(data=post_temp_site, aes(jdate, meantemp, color="2017-19"))+
geom_line(data=pre_temp_site, aes(y=rollmean(meantemp, 20, na.pad=TRUE),
color="2008-10"), size=1.5, na.rm = TRUE) +
geom_line(data=post_temp_site, aes(y=rollmean(meantemp, 20, na.pad=TRUE),
color="2017-19"),size=1.5,na.rm = TRUE) +
theme_bw() +
scale_color_manual(values=c("#999999", "#000000"))+
theme(axis.line = element_line(color = "black"),
axis.text = element_text(size = 13),
axis.title.x = element_text(size = 13, face = "bold"),
axis.title.y = element_text(size = 13, face = "bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())+
ylab(expression(bold(Temperature~(degree~C))))+
xlab("Julian Date")+
theme(legend.title=element_blank(),
legend.text=element_text(size=13))+
theme(legend.position = c(0.05, 0.95),legend.justification = c(0, 1)))
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_p_spline_prev.R
\name{plot_p_spline_prev}
\alias{plot_p_spline_prev}
\title{Plotting function for the P_spline model}
\usage{
plot_p_spline_prev(
X,
Y,
N,
p_spline_fit,
target_dist_between_knots = 5,
spline_degree = 3,
ylim = 1
)
}
\arguments{
\item{X}{date vector.}
\item{Y}{Numeric vector of number of positive samples}
\item{N}{Numeric vector of total number of samples}
\item{target_dist_between_knots}{sets the number of days between adjacent knots (default = 5)}
\item{spline_degree}{sets the degree of the splines (default = 3)}
\item{ylim}{sets the ylimit of the plot}
\item{p_splinefit}{fit of the model to the same set of data using reactidd::stan_p_spline()}
}
\value{
A list of the created plot, the raw data and CI's used in the plot, the raw data for the model fit in the plot.
Plot of the model fit
}
\description{
Plotting function for the P_spline model
}
|
/man/plot_p_spline_prev.Rd
|
permissive
|
mrc-ide/reactidd
|
R
| false | true | 977 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_p_spline_prev.R
\name{plot_p_spline_prev}
\alias{plot_p_spline_prev}
\title{Plotting function for the P_spline model}
\usage{
plot_p_spline_prev(
X,
Y,
N,
p_spline_fit,
target_dist_between_knots = 5,
spline_degree = 3,
ylim = 1
)
}
\arguments{
\item{X}{date vector.}
\item{Y}{Numeric vector of number of positive samples}
\item{N}{Numeric vector of total number of samples}
\item{target_dist_between_knots}{sets the number of days between adjacent knots (default = 5)}
\item{spline_degree}{sets the degree of the splines (default = 3)}
\item{ylim}{sets the ylimit of the plot}
\item{p_splinefit}{fit of the model to the same set of data using reactidd::stan_p_spline()}
}
\value{
A list of the created plot, the raw data and CI's used in the plot, the raw data for the model fit in the plot.
Plot of the model fit
}
\description{
Plotting function for the P_spline model
}
|
library(data.table)
library(dplyr)
setwd("C:\\Users\\alexandru.toth\\datasciencecoursera\\DataPlotting-Week3")
SCC <- as.data.table(x = readRDS(file = "Source_Classification_Code.rds"))
NEI <- as.data.table(x = readRDS(file = "summarySCC_PM25.rds"))
data5 <- NEI %>%
filter(fips == "24510") %>%
filter(type == "ON-ROAD") %>%
group_by(year) %>%
summarise(TotalEmissions = sum(Emissions))
png(file='plot5.png')
barplot(data5$TotalEmissions, names=data1$year, main = "Baltimore City, On-ROAD", ylab="PM2.5 Emissions, Tons")
dev.off()
|
/plot5.r
|
no_license
|
AlexTDataScientist/ExploratoryDataAnalysis_2
|
R
| false | false | 541 |
r
|
library(data.table)
library(dplyr)
setwd("C:\\Users\\alexandru.toth\\datasciencecoursera\\DataPlotting-Week3")
SCC <- as.data.table(x = readRDS(file = "Source_Classification_Code.rds"))
NEI <- as.data.table(x = readRDS(file = "summarySCC_PM25.rds"))
data5 <- NEI %>%
filter(fips == "24510") %>%
filter(type == "ON-ROAD") %>%
group_by(year) %>%
summarise(TotalEmissions = sum(Emissions))
png(file='plot5.png')
barplot(data5$TotalEmissions, names=data1$year, main = "Baltimore City, On-ROAD", ylab="PM2.5 Emissions, Tons")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{splitMat}
\alias{splitMat}
\title{trapz: trapezoidal rule to approximate the integral values}
\usage{
splitMat(m, margin, f)
}
\arguments{
\item{m}{A numeric matrix to be divided into list of matrices.}
\item{margin}{The margin of the matrix to split.}
\item{f}{A integer vector to split the matrix.}
}
\value{
A list of matrices.
}
\description{
Returns approximation of integral.
}
\examples{
x <- matrix(rnorm(30), 6, 8)
splitMat(x, 1, rep(1:3, each = 2))
splitMat(x, 2, rep(1:4, each = 2))
}
|
/man/splitMat.Rd
|
permissive
|
alexchang2017/rfda
|
R
| false | true | 597 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{splitMat}
\alias{splitMat}
\title{trapz: trapezoidal rule to approximate the integral values}
\usage{
splitMat(m, margin, f)
}
\arguments{
\item{m}{A numeric matrix to be divided into list of matrices.}
\item{margin}{The margin of the matrix to split.}
\item{f}{A integer vector to split the matrix.}
}
\value{
A list of matrices.
}
\description{
Returns approximation of integral.
}
\examples{
x <- matrix(rnorm(30), 6, 8)
splitMat(x, 1, rep(1:3, each = 2))
splitMat(x, 2, rep(1:4, each = 2))
}
|
### Download input files
ensemble_params_file <- path(download_dir, "input-parameters.csv")
params_osf <- "87ku4"
trait_distribution_file <- path(download_dir, "trait-distribution.rds")
td_osf <- "bfyuh"
plan <- bind_plans(plan, drake_plan(
run_params_dl = target(
download.file(osf_url(params_osf), file_out(!!ensemble_params_file)),
trigger = trigger(change = get_timestamp(params_osf))
),
trait_distribution_dl = target(
download.file(osf_url(td_osf), file_out(!!trait_distribution_file)),
trigger = trigger(change = get_timestamp(td_osf))
)
))
### Trait distribution figures
plan <- plan <- bind_plans(plan, drake_plan(
trait_distribution = readRDS(file_in(!!trait_distribution_file)) %>%
mutate(pft = factor(pft, pfts("pft"))),
ed2_default_params = ed_default_params() %>%
semi_join(trait_distribution, c("pft", "trait")),
param_dist_gg = trait_distribution %>%
unnest(draws) %>%
ggplot() +
aes(x = pft, y = draws, fill = pft) +
geom_violin(alpha = 0.5) +
geom_point(aes(y = default_value, color = "default", shape = "default"),
data = ed2_default_params, size = 2) +
geom_point(aes(y = Median, color = "median", shape = "median"),
data = trait_distribution, size = 2) +
# Parameter values
## geom_text(aes(y = value, label = label), data = use_param_values) +
facet_wrap(vars(trait), scales = "free_y") +
scale_fill_manual(values = pfts("color")) +
scale_color_manual(values = c("default" = "red1", "median" = "blue1")) +
scale_shape_manual(values = c("default" = 3, "median" = 4)) +
guides(color = guide_legend(title = "param"),
shape = guide_legend(title = "param"),
fill = guide_legend(override.aes = list(size = 0))) +
labs(x = "PFT", fill = "PFT") +
theme_cowplot() +
theme(axis.title = element_blank(),
axis.text.x = element_blank(),
legend.position = c(0.7, 0.1),
legend.box = "horizontal"),
param_dist_png = ggsave(
file_out(!!path(fig_dir, "param-dist.png")),
param_dist_gg,
width = 15.3, height = 9.9
),
param_dist_knit = knitr::include_graphics(file_in(!!path(
fig_dir, "param-dist.png"))),
))
|
/analysis/drake/trait-distribution.R
|
permissive
|
femeunier/fortebaseline
|
R
| false | false | 2,223 |
r
|
### Download input files
ensemble_params_file <- path(download_dir, "input-parameters.csv")
params_osf <- "87ku4"
trait_distribution_file <- path(download_dir, "trait-distribution.rds")
td_osf <- "bfyuh"
plan <- bind_plans(plan, drake_plan(
run_params_dl = target(
download.file(osf_url(params_osf), file_out(!!ensemble_params_file)),
trigger = trigger(change = get_timestamp(params_osf))
),
trait_distribution_dl = target(
download.file(osf_url(td_osf), file_out(!!trait_distribution_file)),
trigger = trigger(change = get_timestamp(td_osf))
)
))
### Trait distribution figures
plan <- plan <- bind_plans(plan, drake_plan(
trait_distribution = readRDS(file_in(!!trait_distribution_file)) %>%
mutate(pft = factor(pft, pfts("pft"))),
ed2_default_params = ed_default_params() %>%
semi_join(trait_distribution, c("pft", "trait")),
param_dist_gg = trait_distribution %>%
unnest(draws) %>%
ggplot() +
aes(x = pft, y = draws, fill = pft) +
geom_violin(alpha = 0.5) +
geom_point(aes(y = default_value, color = "default", shape = "default"),
data = ed2_default_params, size = 2) +
geom_point(aes(y = Median, color = "median", shape = "median"),
data = trait_distribution, size = 2) +
# Parameter values
## geom_text(aes(y = value, label = label), data = use_param_values) +
facet_wrap(vars(trait), scales = "free_y") +
scale_fill_manual(values = pfts("color")) +
scale_color_manual(values = c("default" = "red1", "median" = "blue1")) +
scale_shape_manual(values = c("default" = 3, "median" = 4)) +
guides(color = guide_legend(title = "param"),
shape = guide_legend(title = "param"),
fill = guide_legend(override.aes = list(size = 0))) +
labs(x = "PFT", fill = "PFT") +
theme_cowplot() +
theme(axis.title = element_blank(),
axis.text.x = element_blank(),
legend.position = c(0.7, 0.1),
legend.box = "horizontal"),
param_dist_png = ggsave(
file_out(!!path(fig_dir, "param-dist.png")),
param_dist_gg,
width = 15.3, height = 9.9
),
param_dist_knit = knitr::include_graphics(file_in(!!path(
fig_dir, "param-dist.png"))),
))
|
#' Calculate cluster medians
#'
#' Calculate cluster medians (median expression for each cluster-sample-marker
#' combination)
#'
#' Calculate median marker expression for each cluster and sample (i.e. medians for each
#' cluster-sample-marker combination).
#'
#' The data object is assumed to contain a factor \code{marker_class} in the column
#' meta-data (see \code{\link{prepareData}}), which indicates the protein marker class for
#' each column of data (\code{"type"}, \code{"state"}, or \code{"none"}).
#'
#' The cluster medians are required for testing for differential states within cell
#' populations, and for plotting purposes.
#'
#' Variables \code{id_type_markers} and \code{id_state_markers} are saved in the
#' \code{metadata} slot of the output object. These can be used to identify the 'cell
#' type' and 'cell state' markers in the list of \code{assays} in the output
#' \code{\link{SummarizedExperiment}} object, which is useful in later steps of the
#' 'diffcyt' pipeline.
#'
#' Results are returned as a new \code{\link{SummarizedExperiment}} object, where rows =
#' clusters, columns = samples, sheets (\code{assays} slot) = markers. Note that there is
#' a separate table of values (\code{assay}) for each marker. The \code{metadata} slot
#' also contains variables \code{id_type_markers} and \code{id_state_markers}, which can
#' be used to identify the sets of cell type and cell state markers in the list of
#' \code{assays}.
#'
#'
#' @param d_se Data object from previous steps, in \code{\link{SummarizedExperiment}}
#' format, containing cluster labels as a column in the row meta-data (from
#' \code{\link{generateClusters}}). Column meta-data is assumed to contain a factor
#' \code{marker_class}.
#'
#'
#' @return \code{d_medians}: \code{\link{SummarizedExperiment}} object, where rows =
#' clusters, columns = samples, sheets (\code{assays} slot) = markers. The
#' \code{metadata} slot contains variables \code{id_type_markers} and
#' \code{id_state_markers}, which can be accessed with
#' \code{metadata(d_medians)$id_type_markers} and
#' \code{metadata(d_medians)$id_state_markers}.
#'
#'
#' @importFrom SummarizedExperiment SummarizedExperiment assays rowData colData
#' @importFrom dplyr group_by tally summarize
#' @importFrom tidyr complete
#' @importFrom reshape2 acast
#' @importFrom magrittr '%>%'
#' @importFrom stats median
#' @importFrom methods is
#'
#' @export
#'
#' @examples
#' # For a complete workflow example demonstrating each step in the 'diffcyt' pipeline,
#' # see the package vignette.
#'
#' # Function to create random data (one sample)
#' d_random <- function(n = 20000, mean = 0, sd = 1, ncol = 20, cofactor = 5) {
#' d <- sinh(matrix(rnorm(n, mean, sd), ncol = ncol)) * cofactor
#' colnames(d) <- paste0("marker", sprintf("%02d", 1:ncol))
#' d
#' }
#'
#' # Create random data (without differential signal)
#' set.seed(123)
#' d_input <- list(
#' sample1 = d_random(),
#' sample2 = d_random(),
#' sample3 = d_random(),
#' sample4 = d_random()
#' )
#'
#' experiment_info <- data.frame(
#' sample_id = factor(paste0("sample", 1:4)),
#' group_id = factor(c("group1", "group1", "group2", "group2")),
#' stringsAsFactors = FALSE
#' )
#'
#' marker_info <- data.frame(
#' channel_name = paste0("channel", sprintf("%03d", 1:20)),
#' marker_name = paste0("marker", sprintf("%02d", 1:20)),
#' marker_class = factor(c(rep("type", 10), rep("state", 10)),
#' levels = c("type", "state", "none")),
#' stringsAsFactors = FALSE
#' )
#'
#' # Prepare data
#' d_se <- prepareData(d_input, experiment_info, marker_info)
#'
#' # Transform data
#' d_se <- transformData(d_se)
#'
#' # Generate clusters
#' d_se <- generateClusters(d_se)
#'
#' # Calculate medians
#' d_medians <- calcMedians(d_se)
#'
calcMedians <- function(d_se) {
if (!is(d_se, "SummarizedExperiment")) {
stop("Data object must be a 'SummarizedExperiment'")
}
if (!("cluster_id" %in% (colnames(rowData(d_se))))) {
stop("Data object does not contain cluster labels. Run 'generateClusters' to generate cluster labels.")
}
is_marker <- colData(d_se)$marker_class != "none"
# identify 'cell type' and 'cell state' markers in final list of assays
id_type_markers <- (colData(d_se)$marker_class == "type")[is_marker]
id_state_markers <- (colData(d_se)$marker_class == "state")[is_marker]
# calculate cluster medians for each marker
assaydata_mx <- as.matrix(assays(d_se)[["exprs"]])
medians <- vector("list", sum(is_marker))
marker_names_sub <- as.character(colData(d_se)$marker_name[is_marker])
names(medians) <- marker_names_sub
clus <- rowData(d_se)$cluster_id
smp <- rowData(d_se)$sample_id
for (i in seq_along(medians)) {
assaydata_i <- assaydata_mx[, marker_names_sub[i], drop = FALSE]
assaydata_i <- as.data.frame(assaydata_i)
assaydata_i <- cbind(assaydata_i, sample_id = smp, cluster_id = clus)
colnames(assaydata_i)[1] <- "value"
assaydata_i %>%
group_by(cluster_id, sample_id, .drop = FALSE) %>%
summarize(median = median(value)) ->
med
med <- acast(med, cluster_id ~ sample_id, value.var = "median", fill = NA)
# fill in any missing clusters
if (nrow(med) < nlevels(rowData(d_se)$cluster_id)) {
ix_missing <- which(!(levels(rowData(d_se)$cluster_id) %in% rownames(med)))
med_tmp <- matrix(NA, nrow = length(ix_missing), ncol = ncol(med))
rownames(med_tmp) <- ix_missing
med <- rbind(med, med_tmp)
# re-order rows
med <- med[order(as.numeric(rownames(med))), , drop = FALSE]
}
medians[[i]] <- med
}
# check cluster IDs and sample IDs are identical
for (i in seq_along(medians)) {
if (!all(rownames(medians[[i]]) == rownames(medians[[1]]))) {
stop("Cluster IDs do not match")
}
if (!all(colnames(medians[[i]]) == colnames(medians[[1]]))) {
stop("Sample IDs do not match")
}
}
# create new SummarizedExperiment (rows = clusters, columns = samples)
row_data <- data.frame(
cluster_id = factor(rownames(medians[[1]]), levels = levels(rowData(d_se)$cluster_id)),
stringsAsFactors = FALSE
)
col_data <- metadata(d_se)$experiment_info
# rearrange sample order to match 'experiment_info'
medians <- lapply(medians, function(m) {
m[, match(col_data$sample_id, colnames(m)), drop = FALSE]
})
stopifnot(all(sapply(medians, function(m) {
col_data$sample_id == colnames(m)
})))
metadata <- list(id_type_markers = id_type_markers,
id_state_markers = id_state_markers)
d_medians <- SummarizedExperiment(
assays = medians,
rowData = row_data,
colData = col_data,
metadata = metadata
)
d_medians
}
|
/R/calcMedians.R
|
permissive
|
lucaslelann/diffcyt
|
R
| false | false | 6,821 |
r
|
#' Calculate cluster medians
#'
#' Calculate cluster medians (median expression for each cluster-sample-marker
#' combination)
#'
#' Calculate median marker expression for each cluster and sample (i.e. medians for each
#' cluster-sample-marker combination).
#'
#' The data object is assumed to contain a factor \code{marker_class} in the column
#' meta-data (see \code{\link{prepareData}}), which indicates the protein marker class for
#' each column of data (\code{"type"}, \code{"state"}, or \code{"none"}).
#'
#' The cluster medians are required for testing for differential states within cell
#' populations, and for plotting purposes.
#'
#' Variables \code{id_type_markers} and \code{id_state_markers} are saved in the
#' \code{metadata} slot of the output object. These can be used to identify the 'cell
#' type' and 'cell state' markers in the list of \code{assays} in the output
#' \code{\link{SummarizedExperiment}} object, which is useful in later steps of the
#' 'diffcyt' pipeline.
#'
#' Results are returned as a new \code{\link{SummarizedExperiment}} object, where rows =
#' clusters, columns = samples, sheets (\code{assays} slot) = markers. Note that there is
#' a separate table of values (\code{assay}) for each marker. The \code{metadata} slot
#' also contains variables \code{id_type_markers} and \code{id_state_markers}, which can
#' be used to identify the sets of cell type and cell state markers in the list of
#' \code{assays}.
#'
#'
#' @param d_se Data object from previous steps, in \code{\link{SummarizedExperiment}}
#' format, containing cluster labels as a column in the row meta-data (from
#' \code{\link{generateClusters}}). Column meta-data is assumed to contain a factor
#' \code{marker_class}.
#'
#'
#' @return \code{d_medians}: \code{\link{SummarizedExperiment}} object, where rows =
#' clusters, columns = samples, sheets (\code{assays} slot) = markers. The
#' \code{metadata} slot contains variables \code{id_type_markers} and
#' \code{id_state_markers}, which can be accessed with
#' \code{metadata(d_medians)$id_type_markers} and
#' \code{metadata(d_medians)$id_state_markers}.
#'
#'
#' @importFrom SummarizedExperiment SummarizedExperiment assays rowData colData
#' @importFrom dplyr group_by tally summarize
#' @importFrom tidyr complete
#' @importFrom reshape2 acast
#' @importFrom magrittr '%>%'
#' @importFrom stats median
#' @importFrom methods is
#'
#' @export
#'
#' @examples
#' # For a complete workflow example demonstrating each step in the 'diffcyt' pipeline,
#' # see the package vignette.
#'
#' # Function to create random data (one sample)
#' d_random <- function(n = 20000, mean = 0, sd = 1, ncol = 20, cofactor = 5) {
#' d <- sinh(matrix(rnorm(n, mean, sd), ncol = ncol)) * cofactor
#' colnames(d) <- paste0("marker", sprintf("%02d", 1:ncol))
#' d
#' }
#'
#' # Create random data (without differential signal)
#' set.seed(123)
#' d_input <- list(
#' sample1 = d_random(),
#' sample2 = d_random(),
#' sample3 = d_random(),
#' sample4 = d_random()
#' )
#'
#' experiment_info <- data.frame(
#' sample_id = factor(paste0("sample", 1:4)),
#' group_id = factor(c("group1", "group1", "group2", "group2")),
#' stringsAsFactors = FALSE
#' )
#'
#' marker_info <- data.frame(
#' channel_name = paste0("channel", sprintf("%03d", 1:20)),
#' marker_name = paste0("marker", sprintf("%02d", 1:20)),
#' marker_class = factor(c(rep("type", 10), rep("state", 10)),
#' levels = c("type", "state", "none")),
#' stringsAsFactors = FALSE
#' )
#'
#' # Prepare data
#' d_se <- prepareData(d_input, experiment_info, marker_info)
#'
#' # Transform data
#' d_se <- transformData(d_se)
#'
#' # Generate clusters
#' d_se <- generateClusters(d_se)
#'
#' # Calculate medians
#' d_medians <- calcMedians(d_se)
#'
calcMedians <- function(d_se) {
if (!is(d_se, "SummarizedExperiment")) {
stop("Data object must be a 'SummarizedExperiment'")
}
if (!("cluster_id" %in% (colnames(rowData(d_se))))) {
stop("Data object does not contain cluster labels. Run 'generateClusters' to generate cluster labels.")
}
is_marker <- colData(d_se)$marker_class != "none"
# identify 'cell type' and 'cell state' markers in final list of assays
id_type_markers <- (colData(d_se)$marker_class == "type")[is_marker]
id_state_markers <- (colData(d_se)$marker_class == "state")[is_marker]
# calculate cluster medians for each marker
assaydata_mx <- as.matrix(assays(d_se)[["exprs"]])
medians <- vector("list", sum(is_marker))
marker_names_sub <- as.character(colData(d_se)$marker_name[is_marker])
names(medians) <- marker_names_sub
clus <- rowData(d_se)$cluster_id
smp <- rowData(d_se)$sample_id
for (i in seq_along(medians)) {
assaydata_i <- assaydata_mx[, marker_names_sub[i], drop = FALSE]
assaydata_i <- as.data.frame(assaydata_i)
assaydata_i <- cbind(assaydata_i, sample_id = smp, cluster_id = clus)
colnames(assaydata_i)[1] <- "value"
assaydata_i %>%
group_by(cluster_id, sample_id, .drop = FALSE) %>%
summarize(median = median(value)) ->
med
med <- acast(med, cluster_id ~ sample_id, value.var = "median", fill = NA)
# fill in any missing clusters
if (nrow(med) < nlevels(rowData(d_se)$cluster_id)) {
ix_missing <- which(!(levels(rowData(d_se)$cluster_id) %in% rownames(med)))
med_tmp <- matrix(NA, nrow = length(ix_missing), ncol = ncol(med))
rownames(med_tmp) <- ix_missing
med <- rbind(med, med_tmp)
# re-order rows
med <- med[order(as.numeric(rownames(med))), , drop = FALSE]
}
medians[[i]] <- med
}
# check cluster IDs and sample IDs are identical
for (i in seq_along(medians)) {
if (!all(rownames(medians[[i]]) == rownames(medians[[1]]))) {
stop("Cluster IDs do not match")
}
if (!all(colnames(medians[[i]]) == colnames(medians[[1]]))) {
stop("Sample IDs do not match")
}
}
# create new SummarizedExperiment (rows = clusters, columns = samples)
row_data <- data.frame(
cluster_id = factor(rownames(medians[[1]]), levels = levels(rowData(d_se)$cluster_id)),
stringsAsFactors = FALSE
)
col_data <- metadata(d_se)$experiment_info
# rearrange sample order to match 'experiment_info'
medians <- lapply(medians, function(m) {
m[, match(col_data$sample_id, colnames(m)), drop = FALSE]
})
stopifnot(all(sapply(medians, function(m) {
col_data$sample_id == colnames(m)
})))
metadata <- list(id_type_markers = id_type_markers,
id_state_markers = id_state_markers)
d_medians <- SummarizedExperiment(
assays = medians,
rowData = row_data,
colData = col_data,
metadata = metadata
)
d_medians
}
|
source("scRNA_func.r")
library("Seurat")
library("readxl")
library(ggplot2)
options_table<-read.table(parSampleFile1, sep="\t", header=F, stringsAsFactors = F)
myoptions<-split(options_table$V1, options_table$V2)
#assay=ifelse(myoptions$by_sctransform == "0", "RNA", "SCT")
#use RNA assay for visualization
assay="RNA"
finalList<-readRDS(parFile1)
obj=finalList$obj
assaydata=GetAssayData(obj, assay=assay)
allgenes=rownames(assaydata)
rm(assaydata)
genes <- read_xlsx(parFile4, sheet = 1)
for(idx in c(2:nrow(genes))){
if(is.na(genes[idx,"Cell Type"])){
genes[idx,"Cell Type"]=genes[idx-1,"Cell Type"]
}
}
gene_names=genes$`Marker Gene`
gene_names[gene_names=="PECAM"] = "PECAM1"
gene_names[gene_names=="HGD1B"] = "HGD"
gene_names[gene_names=="EpCAM"] = "EPCAM"
gene_names[gene_names=="CD25"] = "IL2RA"
gene_names[gene_names=="ACTAA2"] = "ACTA2"
gene_names[gene_names=="MTND6"] = "MT-ND6"
gene_names[gene_names=="FOXJ!"] = "FOXJ1"
genes$`Marker Gene`<-gene_names
miss_genes=genes$`Marker Gene`[!(genes$`Marker Gene` %in% allgenes)]
writeLines(miss_genes, con="miss_gene.csv")
genes<-genes[genes$`Marker Gene` %in% allgenes,]
genes$`Cell Type`=factor(genes$`Cell Type`, levels=unique(genes$`Cell Type`))
gene_groups=split(genes$`Marker Gene`, genes$`Cell Type`)
cell_type<-read.csv(parFile3, stringsAsFactors=F)
cell_type$cell_type <- cell_type[,myoptions$celltype_name]
cell_type$seurat_celltype_clusters<-cell_type[,myoptions$cluster_name]
sheets=excel_sheets(parFile4)
if(length(sheets) > 1 && myoptions$bubblemap_use_order == "1"){
clusters<-read_xlsx(parFile4, sheet = 2)
cluster_ids<-clusters$`Order of Clusters`
cell_type$seurat_clusters<-factor(cell_type$seurat_clusters, levels=cluster_ids)
cell_type<-cell_type[order(cell_type$seurat_clusters),]
cell_type$seurat_celltype_clusters=factor(cell_type$seurat_celltype_clusters, levels=(cell_type$seurat_celltype_clusters))
rownames(cell_type)=cell_type$seurat_clusters
obj[[myoptions$cluster_name]]=cell_type[as.character(obj$seurat_clusters),"seurat_celltype_clusters"]
group.by=myoptions$cluster_name
}else{
ct_levels<-c("B cells", "Plasma cells", "NK cells", "T cells", "Macrophages", "Dendritic cells", "Monocytes", "Mast cells", "Endothelial cells", "Fibroblasts", "Epithelial cells", "Basal cells", "Olfactory epithelial cells", "Ciliated cells")
ct<-cell_type[!duplicated(cell_type$cell_type),]
missed = ct$cell_type[!(ct$cell_type %in% ct_levels)]
if(length(missed) > 0){
ct_levels = c(ct_levels, missed)
}
ct_levels = ct_levels[ct_levels %in% ct$cell_type]
cell_type$cell_type<-factor(cell_type$cell_type, levels=ct_levels)
cell_type<-cell_type[order(cell_type$cell_type, cell_type$seurat_clusters),]
cell_type$seurat_celltype_clusters=paste0(cell_type$seurat_clusters, " : ", cell_type$cell_type)
cell_type$seurat_celltype_clusters=factor(cell_type$seurat_celltype_clusters, levels=cell_type$seurat_celltype_clusters)
rownames(cell_type)=cell_type$seurat_clusters
obj[[myoptions$cluster_name]]=cell_type[as.character(obj$seurat_clusters),"seurat_celltype_clusters"]
group.by=myoptions$cluster_name
}
genes=unique(unlist(gene_groups))
g<-DotPlot(obj, features=genes, assay="RNA",group.by=group.by)
gdata<-g$data
data.plot<-NULL
gn=names(gene_groups)[1]
for(gn in names(gene_groups)){
gs=gene_groups[[gn]]
gdd<-gdata[gdata$features.plot %in% gs,]
if(nrow(gdd)== 0){
stop(gn)
}
gdd$feature.groups=gn
data.plot<-rbind(data.plot, gdd)
}
data.plot$feature.groups=factor(data.plot$feature.groups, levels=names(gene_groups))
color.by <- "avg.exp.scaled"
scale.func <- scale_radius
scale.min = NA
scale.max = NA
dot.scale = 6
cols = c("lightgrey", "blue")
library(cowplot)
plot <- ggplot(data = data.plot, mapping = aes_string(x = "features.plot", y = "id")) +
geom_point(mapping = aes_string(size = "pct.exp", color = color.by)) +
scale.func(range = c(0, dot.scale), limits = c(scale.min, scale.max)) +
theme(axis.title.x = element_blank(), axis.title.y = element_blank()) + guides(size = guide_legend(title = "Percent Expressed")) +
labs(x = "Features", y = "Identity") +
theme_cowplot() +
facet_grid(facets = ~feature.groups, scales = "free_x", space = "free_x", switch = "y") +
theme(panel.spacing = unit(x = 1,units = "lines"), strip.background = element_blank()) +
scale_color_gradient(low = cols[1], high = cols[2])
png(paste0(outFile, ".bubblemap.png"), width=5500, height=3000, res=300)
g=plot +
xlab("") + ylab("") + theme_bw() + theme(plot.title = element_text(hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust=1, vjust=0.5),
strip.background = element_blank(),
strip.text.x = element_text(angle=90, hjust=0, vjust=0.5))
print(g)
dev.off()
|
/lib/scRNA/seurat_bubblemap.r
|
permissive
|
shengqh/ngsperl
|
R
| false | false | 4,826 |
r
|
source("scRNA_func.r")
library("Seurat")
library("readxl")
library(ggplot2)
options_table<-read.table(parSampleFile1, sep="\t", header=F, stringsAsFactors = F)
myoptions<-split(options_table$V1, options_table$V2)
#assay=ifelse(myoptions$by_sctransform == "0", "RNA", "SCT")
#use RNA assay for visualization
assay="RNA"
finalList<-readRDS(parFile1)
obj=finalList$obj
assaydata=GetAssayData(obj, assay=assay)
allgenes=rownames(assaydata)
rm(assaydata)
genes <- read_xlsx(parFile4, sheet = 1)
for(idx in c(2:nrow(genes))){
if(is.na(genes[idx,"Cell Type"])){
genes[idx,"Cell Type"]=genes[idx-1,"Cell Type"]
}
}
gene_names=genes$`Marker Gene`
gene_names[gene_names=="PECAM"] = "PECAM1"
gene_names[gene_names=="HGD1B"] = "HGD"
gene_names[gene_names=="EpCAM"] = "EPCAM"
gene_names[gene_names=="CD25"] = "IL2RA"
gene_names[gene_names=="ACTAA2"] = "ACTA2"
gene_names[gene_names=="MTND6"] = "MT-ND6"
gene_names[gene_names=="FOXJ!"] = "FOXJ1"
genes$`Marker Gene`<-gene_names
miss_genes=genes$`Marker Gene`[!(genes$`Marker Gene` %in% allgenes)]
writeLines(miss_genes, con="miss_gene.csv")
genes<-genes[genes$`Marker Gene` %in% allgenes,]
genes$`Cell Type`=factor(genes$`Cell Type`, levels=unique(genes$`Cell Type`))
gene_groups=split(genes$`Marker Gene`, genes$`Cell Type`)
cell_type<-read.csv(parFile3, stringsAsFactors=F)
cell_type$cell_type <- cell_type[,myoptions$celltype_name]
cell_type$seurat_celltype_clusters<-cell_type[,myoptions$cluster_name]
sheets=excel_sheets(parFile4)
if(length(sheets) > 1 && myoptions$bubblemap_use_order == "1"){
clusters<-read_xlsx(parFile4, sheet = 2)
cluster_ids<-clusters$`Order of Clusters`
cell_type$seurat_clusters<-factor(cell_type$seurat_clusters, levels=cluster_ids)
cell_type<-cell_type[order(cell_type$seurat_clusters),]
cell_type$seurat_celltype_clusters=factor(cell_type$seurat_celltype_clusters, levels=(cell_type$seurat_celltype_clusters))
rownames(cell_type)=cell_type$seurat_clusters
obj[[myoptions$cluster_name]]=cell_type[as.character(obj$seurat_clusters),"seurat_celltype_clusters"]
group.by=myoptions$cluster_name
}else{
ct_levels<-c("B cells", "Plasma cells", "NK cells", "T cells", "Macrophages", "Dendritic cells", "Monocytes", "Mast cells", "Endothelial cells", "Fibroblasts", "Epithelial cells", "Basal cells", "Olfactory epithelial cells", "Ciliated cells")
ct<-cell_type[!duplicated(cell_type$cell_type),]
missed = ct$cell_type[!(ct$cell_type %in% ct_levels)]
if(length(missed) > 0){
ct_levels = c(ct_levels, missed)
}
ct_levels = ct_levels[ct_levels %in% ct$cell_type]
cell_type$cell_type<-factor(cell_type$cell_type, levels=ct_levels)
cell_type<-cell_type[order(cell_type$cell_type, cell_type$seurat_clusters),]
cell_type$seurat_celltype_clusters=paste0(cell_type$seurat_clusters, " : ", cell_type$cell_type)
cell_type$seurat_celltype_clusters=factor(cell_type$seurat_celltype_clusters, levels=cell_type$seurat_celltype_clusters)
rownames(cell_type)=cell_type$seurat_clusters
obj[[myoptions$cluster_name]]=cell_type[as.character(obj$seurat_clusters),"seurat_celltype_clusters"]
group.by=myoptions$cluster_name
}
genes=unique(unlist(gene_groups))
g<-DotPlot(obj, features=genes, assay="RNA",group.by=group.by)
gdata<-g$data
data.plot<-NULL
gn=names(gene_groups)[1]
for(gn in names(gene_groups)){
gs=gene_groups[[gn]]
gdd<-gdata[gdata$features.plot %in% gs,]
if(nrow(gdd)== 0){
stop(gn)
}
gdd$feature.groups=gn
data.plot<-rbind(data.plot, gdd)
}
data.plot$feature.groups=factor(data.plot$feature.groups, levels=names(gene_groups))
color.by <- "avg.exp.scaled"
scale.func <- scale_radius
scale.min = NA
scale.max = NA
dot.scale = 6
cols = c("lightgrey", "blue")
library(cowplot)
plot <- ggplot(data = data.plot, mapping = aes_string(x = "features.plot", y = "id")) +
geom_point(mapping = aes_string(size = "pct.exp", color = color.by)) +
scale.func(range = c(0, dot.scale), limits = c(scale.min, scale.max)) +
theme(axis.title.x = element_blank(), axis.title.y = element_blank()) + guides(size = guide_legend(title = "Percent Expressed")) +
labs(x = "Features", y = "Identity") +
theme_cowplot() +
facet_grid(facets = ~feature.groups, scales = "free_x", space = "free_x", switch = "y") +
theme(panel.spacing = unit(x = 1,units = "lines"), strip.background = element_blank()) +
scale_color_gradient(low = cols[1], high = cols[2])
png(paste0(outFile, ".bubblemap.png"), width=5500, height=3000, res=300)
g=plot +
xlab("") + ylab("") + theme_bw() + theme(plot.title = element_text(hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust=1, vjust=0.5),
strip.background = element_blank(),
strip.text.x = element_text(angle=90, hjust=0, vjust=0.5))
print(g)
dev.off()
|
## The Editor
vals <- seq(1, 100)
vals <- seq(from = 1,
to = 100)
## Vectors
counts <- c(4, 6, 8, 2)
## Exercise 1
...
## Factors
education <- factor (
c("college", "highschool", "college", "middle"),
levels = c("middle", "highschool", "college")
)
education <- factor (c("college", "highschool", "college", "middle"),
levels = c("middle", "highschool", "college"),
ordered = TRUE)
## Data Frames
df<-data.frame(education, counts)
## Exercise 2
species<- factor(c("fir", "pine", "maple"))
abund<- c(1,2,3)
df2<- data.frame(species, abund)
## Load data into R
plots <- read.csv("C:/Users/Admin/Desktop/SESYNC/data/data/plots.csv")
## Exercise 3
...
## Names
...(df) <- c(...)
## Subsetting ranges
days <- c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday")
weekdays <- ...
...
## Exercise 4
...
## Anatomy of a function
function(...) {
...
return(...)
}
## Flow control
if (...) {
...
} else {
...
}
firts <- function(...) {
if (...) {
...
} else {
...
}
}
## Linear models
animals <- read.csv(..., stringsAsFactors = FALSE, na.strings = '')
fit <- lm(
...,
data = ...)
## Exercise 6
...
## Pay attention to factors
animals$species_id <- ...
fit <- lm(
log(weight) ~ ...,
data = animals)
|
/worksheet-2.R
|
no_license
|
kyhap/handouts
|
R
| false | false | 1,379 |
r
|
## The Editor
vals <- seq(1, 100)
vals <- seq(from = 1,
to = 100)
## Vectors
counts <- c(4, 6, 8, 2)
## Exercise 1
...
## Factors
education <- factor (
c("college", "highschool", "college", "middle"),
levels = c("middle", "highschool", "college")
)
education <- factor (c("college", "highschool", "college", "middle"),
levels = c("middle", "highschool", "college"),
ordered = TRUE)
## Data Frames
df<-data.frame(education, counts)
## Exercise 2
species<- factor(c("fir", "pine", "maple"))
abund<- c(1,2,3)
df2<- data.frame(species, abund)
## Load data into R
plots <- read.csv("C:/Users/Admin/Desktop/SESYNC/data/data/plots.csv")
## Exercise 3
...
## Names
...(df) <- c(...)
## Subsetting ranges
days <- c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday")
weekdays <- ...
...
## Exercise 4
...
## Anatomy of a function
function(...) {
...
return(...)
}
## Flow control
if (...) {
...
} else {
...
}
firts <- function(...) {
if (...) {
...
} else {
...
}
}
## Linear models
animals <- read.csv(..., stringsAsFactors = FALSE, na.strings = '')
fit <- lm(
...,
data = ...)
## Exercise 6
...
## Pay attention to factors
animals$species_id <- ...
fit <- lm(
log(weight) ~ ...,
data = animals)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/futureheatwaves.R
\docType{package}
\name{futureheatwaves}
\alias{futureheatwaves}
\alias{futureheatwaves-package}
\title{Find, Characterize, and Explore Heat Waves in Climate Projections}
\description{
\code{futureheatwaves} takes a directory of climate projection files and,
for each, identifies and characterizes all examples of a specified type of
extreme event. The definition used to identify extreme events can be
customized. Characterizations include several
metrics of event length, intensity, and timing in the year. Extreme events
can be explored by applying custom functions across all generated heat wave
files. This work was supported in part by grants from the National Institute
of Environmental Health Sciences (R00ES022631), the National Science
Foundation (1331399), and the Colorado State University Vice President for
Research.
}
|
/man/futureheatwaves.Rd
|
no_license
|
geanders/futureheatwaves
|
R
| false | true | 929 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/futureheatwaves.R
\docType{package}
\name{futureheatwaves}
\alias{futureheatwaves}
\alias{futureheatwaves-package}
\title{Find, Characterize, and Explore Heat Waves in Climate Projections}
\description{
\code{futureheatwaves} takes a directory of climate projection files and,
for each, identifies and characterizes all examples of a specified type of
extreme event. The definition used to identify extreme events can be
customized. Characterizations include several
metrics of event length, intensity, and timing in the year. Extreme events
can be explored by applying custom functions across all generated heat wave
files. This work was supported in part by grants from the National Institute
of Environmental Health Sciences (R00ES022631), the National Science
Foundation (1331399), and the Colorado State University Vice President for
Research.
}
|
# Sparse PCA
# Library -----------------------------------------------------------------
library(sparsepca)
library(pROC)
library(tidyverse)
library(glmnet)
library(factoextra) # for eigenvalue plots
library(PMA)
library(impute)
# Load --------------------------------------------------------------------
CC_comb <- readRDS('modeling_data/train24_3.rds')
CC_comb_val <- readRDS('modeling_data/test24_3.rds')
# Combine data ------------------------------------------------------------
CC_all <- rbind(CC_comb, CC_comb_val)
# Preliminary sparse PCA and get loadings --------------------------------------------------
spca_fit <- sparsepca::spca(CC_comb[,-c(1:9)], k = 20, scale = T, alpha = 1e-02)
loadings <- spca_fit$loadings %>% data.frame()
# Add descriptions to loadings
colnames(loadings) <- paste0('PC', 1:20)
rownames(loadings) <- names(CC_comb[-c(1:9)])
loadings$Desc <- NA
# Add descriptions
# Load keys
wkpath = "raw_data/Box/Boston/MS CLIMB Data/"
ICDPheCode = read.csv(paste0(wkpath,"EHR/MS_AllEncounters_ICD_Data_03282019.csv"),
stringsAsFactors = FALSE);ICDPheCode$phecode[ICDPheCode$concept_cd == "LPA268"] = "335_"
CUIdictAll = read.xlsx(paste0(wkpath,"EHR/AllCUI_Database.xlsx"),
sheet = 1); colnames(CUIdictAll) = c("ConceptCd","Desc")
firstup <- function(x) {substr(x, 1, 1) <- toupper(substr(x, 1, 1))
return(x)}
# CPT Codes
tmp <- strtrim(rownames(loadings), 3) == 'CPT'
loadings$Desc[tmp] <- str_split(rownames(loadings)[tmp], '\\.', simplify = T)[,2] %>%
as.character() %>% firstup()
# ICD PheCodes
tmp <- strtrim(rownames(loadings), 3) == 'Phe'
tmp2 <- str_split(rownames(loadings)[tmp], '\\.', simplify = T)[,2]
tmp3 <- character(length(tmp2))
for (i in 1:length(tmp2)) {
tmp3[i] <- ICDPheCode[ICDPheCode$phecode == tmp2[i], 'phecode_description'] %>% unique()
}
loadings$Desc[tmp] <- tmp3
# CUI
# CUIs
tmp <- strtrim(rownames(loadings), 3) == 'CUI'
tmp2 <- str_split(rownames(loadings)[tmp], '\\.', simplify = T)[,2]
tmp3 <- character(length(tmp2))
for (i in 1:length(tmp2)) {
tmp3[i] <- CUIdictAll[CUIdictAll$ConceptCd == tmp2[i], 'Desc'] %>% unique()
}
loadings$Desc[tmp] <- tmp3
# Clinical vars
tmp <- !(strtrim(rownames(loadings), 3) == 'CPT' | strtrim(rownames(loadings), 3) == 'Phe' | strtrim(rownames(loadings), 3) == 'CUI')
loadings$Desc[tmp] <- rownames(loadings)[tmp]
# Truncate rownames for readability
row.names(loadings) <- strtrim(row.names(loadings), 25)
write.csv(loadings, 'model_output/spca_loadings_tuned.csv') # Loadings using spca
# write.csv(t(summary(spca_fit)), 'coefs/spca_summary.csv')
# Functions ---------------------------------------------------------------
## **_data is assumed to have the same column structure as CC_comb/CC_comb_val
fit_spca <- function(train_data, test_data, k = 20, alpha = 1e-02) {
# Run sparse PCA only on training data, project test data onto orthog basis
spca_fit <- sparsepca::spca(train_data[,-c(1:9)], k = k, scale = T, alpha = alpha)
train_PC <- spca_fit$scores
test_PC <- scale(test_data[,-c(1:9)],
center = spca_fit$center,
scale = spca_fit$scale) %*% spca_fit$transform
# Returns train and test data as PCs in the form of a labeled list
return(list(train_PC = train_PC, test_PC = test_PC))
}
fit_spc <- function(train_data, test_data, k = 20) {
# Run PCA only on training data, project test data onto orthog basis
# First scale the data
scaled_train <- scale(train_data[,-c(1:9)])
mean_train <- attr(scaled_train,"scaled:center"); scale_train <- attr(scaled_train,"scaled:scale")
# Tune sparsity parameter
set.seed(12345)
cv_obj <- PMA::SPC.cv(as.matrix(scaled_train), center = FALSE)
# Fit new sparse PCA using tuned parameter
spc_obj <- PMA::SPC(as.matrix(scaled_train), sumabsv = cv_obj$bestsumabsv, K = k,
cnames = paste0('PC', 1:k))
scaled_test <- scale(test_data[,-c(1:9)], center = mean_train, scale = scale_train)
test_PC <- as.matrix(scaled_test) %*% as.matrix(spc_obj$v)
train_PC <- spc_obj$u
# Returns train and test data as PCs in the form of a labeled list
return(list(train_PC = train_PC, test_PC = test_PC))
}
## 'pca_predict' function from pca.R script
pca_predict <- function(train_data, test_data, train_PC, test_PC, n_PCs = 2) {
# Note: n_PCs > 1 (cv.glmnet will not work for 1 predictor)
# Select only first n_PCs
X_pc_train <- train_PC[,c(1:n_PCs)]; X_pc_test <- test_PC[,c(1:n_PCs)]
# Ordinary logistic regression
tmp_df <- cbind(unlist(train_data$CC), data.frame(X_pc_train)); names(tmp_df)[1] <- 'CC'
glm.fit <- glm(CC ~ ., tmp_df, family = binomial)
preds <- predict(glm.fit, newdata = data.frame(X_pc_test), type = 'response')
auc_logreg <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
# LASSO, type.measure = 'deviance'
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(X_pc_train), unlist(train_data$CC),
family = "binomial", type.measure = "deviance", alpha = 1)
preds <- predict(glmnet.fit, newx = as.matrix(X_pc_test),
type = "response", s = glmnet.fit$lambda.1se)
auc_lasso_dev <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
# LASSO, type.measure = 'auc'
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(X_pc_train), unlist(train_data$CC),
family = "binomial", type.measure = "auc", alpha = 1)
preds <- predict(glmnet.fit, newx = as.matrix(X_pc_test),
type = "response", s = glmnet.fit$lambda.1se)
auc_lasso_auc <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
# Ridge, type.measure = 'deviance'
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(X_pc_train), unlist(train_data$CC),
family = "binomial", type.measure = "deviance", alpha = 0)
preds <- predict(glmnet.fit, newx = as.matrix(X_pc_test),
type = "response", s = glmnet.fit$lambda.1se)
auc_ridge_dev <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
# Ridge, type.measure = 'auc'
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(X_pc_train), unlist(train_data$CC),
family = "binomial", type.measure = "auc", alpha = 0)
preds <- predict(glmnet.fit, newx = as.matrix(X_pc_test),
type = "response", s = glmnet.fit$lambda.1se)
auc_ridge_auc <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
return(data.frame(num_pc = n_PCs,
lasso_dev = auc_lasso_dev, lasso_auc = auc_lasso_auc,
ridge_dev = auc_ridge_dev, ridge_auc = auc_ridge_auc,
ord_logreg = auc_logreg))
}
# Manually tune sparsity parameter alpha using spca -----------------------
# Use original train-test split and k = 20
get.test.result = function(alpha) {
spca_data <- fit_spca(CC_comb, CC_comb_val, k = 20, alpha = alpha)
# Fit LASSO using deviance
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(spca_data[[1]]), unlist(CC_comb$CC),
family = "binomial", type.measure = "deviance", alpha = 1)
preds <- predict(glmnet.fit, newx = as.matrix(spca_data[[2]]),
type = "response", s = glmnet.fit$lambda.1se)
return(roc(as.numeric(CC_comb_val$CC), as.numeric(preds))$auc)
}
result = sapply(10^(seq(-4, -1, by = 1)), function(i) get.test.result(i))
df_result = data.frame(alpha = 10^(seq(-4, -1, by = 1)), auc = result)
# Run spca ----------------------------------------------------------------
n_splits <- 6
set.seed(12345)
# Option 1
# k <- 10; pcs_vec <- c(2,3,10)
# Option 2
k <- 20; pcs_vec <- c(2,3,10,20)
alph <- 1e-02 # found from tuning above
# Run loop
all_results <- list()
for (j in 1:n_splits) {
print(paste0('Split #', j))
# Resplit and subset data
nval = floor(length(unique(CC_all$PatientID))*0.3); val = sample(unique(CC_all$PatientID),nval)
CC_val = CC_all[CC_all$PatientID%in%val,]; CC_train = CC_all[!CC_all$PatientID%in%val,]
# Fit Sparse PCA and project validation data
spca_data <- fit_spca(CC_train, CC_val, k = k, alpha = alph)
# Run models
pca_results_list <- list()
for (i in pcs_vec) {
pca_results_list[[i]] <- pca_predict(CC_train, CC_val, spca_data[[1]], spca_data[[2]], i)
}
all_results[[j]] <- do.call(rbind, pca_results_list)
}
print('All done!')
# Average results
spca_auc <- (all_results[[1]]+all_results[[2]]+all_results[[3]]+all_results[[4]]+all_results[[5]]+all_results[[6]])/6
write_csv(spca_auc, 'model_output/spca_auc.csv')
# Run SPC.cv/SPC ----------------------------------------------------------
# Fit tuning parameter sumabsvs (sum of absolute values of elements of v)
# Scale first
scaled_train <- scale(CC_comb[,-c(1:9)])
mean_train <- attr(scaled_train,"scaled:center")
scale_train <- attr(scaled_train,"scaled:scale")
cv_obj <- PMA::SPC.cv(as.matrix(scaled_train), center = FALSE)
# Now fit
k <- 20
spc_obj <- PMA::SPC(as.matrix(scaled_train), sumabsv = cv_obj$bestsumabsv, K = k,
cnames = paste0('PC', 1:k))
# loadings <- spc_obj$v %>% data.frame()
# write.csv(loadings, 'model_output/spca_loadings_tuned_sumabsv.csv') # Loadings using SPC.cv and SPC
# Center test data and project to orthogonal axes
scaled_test <- scale(CC_comb_val[,-c(1:9)], center = mean_train, scale = scale_train)
test_PC <- as.matrix(scaled_test) %*% as.matrix(spc_obj$v)
# Fit model
glmnet.fit <- cv.glmnet(as.matrix(spc_obj$u), unlist(CC_comb$CC), family = "binomial", type.measure = "deviance", alpha = 1)
preds <- predict(glmnet.fit, newx = as.matrix(test_PC), type = "response", s = glmnet.fit$lambda.1se)
roc(as.numeric(CC_comb_val$CC), as.numeric(preds))$auc
k <- 20; pcs_vec <- c(2,3,10,20)
n_splits <- 6
# Run loop
all_results <- list()
for (j in 1:n_splits) {
print(paste0('Split #', j))
# Resplit and subset data
nval = floor(length(unique(CC_all$PatientID))*0.3); val = sample(unique(CC_all$PatientID),nval)
CC_val = CC_all[CC_all$PatientID%in%val,]; CC_train = CC_all[!CC_all$PatientID%in%val,]
# Fit Sparse PCA and project validation data
spca_data <- fit_spc(CC_train, CC_val, k = k)
# Run models
pca_results_list <- list()
for (i in pcs_vec) {
pca_results_list[[i]] <- pca_predict(CC_train, CC_val, spca_data[[1]], spca_data[[2]], i)
}
all_results[[j]] <- do.call(rbind, pca_results_list)
}
print('All done!')
# Average results
spc_auc <- (all_results[[1]]+all_results[[2]]+all_results[[3]]+all_results[[4]]+all_results[[5]]+all_results[[6]])/6
write_csv(spc_auc, 'model_output/spc_cv_auc.csv')
|
/scripts/modeling/sparse_pca.R
|
no_license
|
nkim1322/MS_EHR
|
R
| false | false | 10,577 |
r
|
# Sparse PCA
# Library -----------------------------------------------------------------
library(sparsepca)
library(pROC)
library(tidyverse)
library(glmnet)
library(factoextra) # for eigenvalue plots
library(PMA)
library(impute)
# Load --------------------------------------------------------------------
CC_comb <- readRDS('modeling_data/train24_3.rds')
CC_comb_val <- readRDS('modeling_data/test24_3.rds')
# Combine data ------------------------------------------------------------
CC_all <- rbind(CC_comb, CC_comb_val)
# Preliminary sparse PCA and get loadings --------------------------------------------------
spca_fit <- sparsepca::spca(CC_comb[,-c(1:9)], k = 20, scale = T, alpha = 1e-02)
loadings <- spca_fit$loadings %>% data.frame()
# Add descriptions to loadings
colnames(loadings) <- paste0('PC', 1:20)
rownames(loadings) <- names(CC_comb[-c(1:9)])
loadings$Desc <- NA
# Add descriptions
# Load keys
wkpath = "raw_data/Box/Boston/MS CLIMB Data/"
ICDPheCode = read.csv(paste0(wkpath,"EHR/MS_AllEncounters_ICD_Data_03282019.csv"),
stringsAsFactors = FALSE);ICDPheCode$phecode[ICDPheCode$concept_cd == "LPA268"] = "335_"
CUIdictAll = read.xlsx(paste0(wkpath,"EHR/AllCUI_Database.xlsx"),
sheet = 1); colnames(CUIdictAll) = c("ConceptCd","Desc")
firstup <- function(x) {substr(x, 1, 1) <- toupper(substr(x, 1, 1))
return(x)}
# CPT Codes
tmp <- strtrim(rownames(loadings), 3) == 'CPT'
loadings$Desc[tmp] <- str_split(rownames(loadings)[tmp], '\\.', simplify = T)[,2] %>%
as.character() %>% firstup()
# ICD PheCodes
tmp <- strtrim(rownames(loadings), 3) == 'Phe'
tmp2 <- str_split(rownames(loadings)[tmp], '\\.', simplify = T)[,2]
tmp3 <- character(length(tmp2))
for (i in 1:length(tmp2)) {
tmp3[i] <- ICDPheCode[ICDPheCode$phecode == tmp2[i], 'phecode_description'] %>% unique()
}
loadings$Desc[tmp] <- tmp3
# CUI
# CUIs
tmp <- strtrim(rownames(loadings), 3) == 'CUI'
tmp2 <- str_split(rownames(loadings)[tmp], '\\.', simplify = T)[,2]
tmp3 <- character(length(tmp2))
for (i in 1:length(tmp2)) {
tmp3[i] <- CUIdictAll[CUIdictAll$ConceptCd == tmp2[i], 'Desc'] %>% unique()
}
loadings$Desc[tmp] <- tmp3
# Clinical vars
tmp <- !(strtrim(rownames(loadings), 3) == 'CPT' | strtrim(rownames(loadings), 3) == 'Phe' | strtrim(rownames(loadings), 3) == 'CUI')
loadings$Desc[tmp] <- rownames(loadings)[tmp]
# Truncate rownames for readability
row.names(loadings) <- strtrim(row.names(loadings), 25)
write.csv(loadings, 'model_output/spca_loadings_tuned.csv') # Loadings using spca
# write.csv(t(summary(spca_fit)), 'coefs/spca_summary.csv')
# Functions ---------------------------------------------------------------
## **_data is assumed to have the same column structure as CC_comb/CC_comb_val
fit_spca <- function(train_data, test_data, k = 20, alpha = 1e-02) {
# Run sparse PCA only on training data, project test data onto orthog basis
spca_fit <- sparsepca::spca(train_data[,-c(1:9)], k = k, scale = T, alpha = alpha)
train_PC <- spca_fit$scores
test_PC <- scale(test_data[,-c(1:9)],
center = spca_fit$center,
scale = spca_fit$scale) %*% spca_fit$transform
# Returns train and test data as PCs in the form of a labeled list
return(list(train_PC = train_PC, test_PC = test_PC))
}
fit_spc <- function(train_data, test_data, k = 20) {
# Run PCA only on training data, project test data onto orthog basis
# First scale the data
scaled_train <- scale(train_data[,-c(1:9)])
mean_train <- attr(scaled_train,"scaled:center"); scale_train <- attr(scaled_train,"scaled:scale")
# Tune sparsity parameter
set.seed(12345)
cv_obj <- PMA::SPC.cv(as.matrix(scaled_train), center = FALSE)
# Fit new sparse PCA using tuned parameter
spc_obj <- PMA::SPC(as.matrix(scaled_train), sumabsv = cv_obj$bestsumabsv, K = k,
cnames = paste0('PC', 1:k))
scaled_test <- scale(test_data[,-c(1:9)], center = mean_train, scale = scale_train)
test_PC <- as.matrix(scaled_test) %*% as.matrix(spc_obj$v)
train_PC <- spc_obj$u
# Returns train and test data as PCs in the form of a labeled list
return(list(train_PC = train_PC, test_PC = test_PC))
}
## 'pca_predict' function from pca.R script
pca_predict <- function(train_data, test_data, train_PC, test_PC, n_PCs = 2) {
# Note: n_PCs > 1 (cv.glmnet will not work for 1 predictor)
# Select only first n_PCs
X_pc_train <- train_PC[,c(1:n_PCs)]; X_pc_test <- test_PC[,c(1:n_PCs)]
# Ordinary logistic regression
tmp_df <- cbind(unlist(train_data$CC), data.frame(X_pc_train)); names(tmp_df)[1] <- 'CC'
glm.fit <- glm(CC ~ ., tmp_df, family = binomial)
preds <- predict(glm.fit, newdata = data.frame(X_pc_test), type = 'response')
auc_logreg <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
# LASSO, type.measure = 'deviance'
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(X_pc_train), unlist(train_data$CC),
family = "binomial", type.measure = "deviance", alpha = 1)
preds <- predict(glmnet.fit, newx = as.matrix(X_pc_test),
type = "response", s = glmnet.fit$lambda.1se)
auc_lasso_dev <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
# LASSO, type.measure = 'auc'
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(X_pc_train), unlist(train_data$CC),
family = "binomial", type.measure = "auc", alpha = 1)
preds <- predict(glmnet.fit, newx = as.matrix(X_pc_test),
type = "response", s = glmnet.fit$lambda.1se)
auc_lasso_auc <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
# Ridge, type.measure = 'deviance'
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(X_pc_train), unlist(train_data$CC),
family = "binomial", type.measure = "deviance", alpha = 0)
preds <- predict(glmnet.fit, newx = as.matrix(X_pc_test),
type = "response", s = glmnet.fit$lambda.1se)
auc_ridge_dev <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
# Ridge, type.measure = 'auc'
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(X_pc_train), unlist(train_data$CC),
family = "binomial", type.measure = "auc", alpha = 0)
preds <- predict(glmnet.fit, newx = as.matrix(X_pc_test),
type = "response", s = glmnet.fit$lambda.1se)
auc_ridge_auc <- roc(as.numeric(test_data$CC), as.numeric(preds))$auc
return(data.frame(num_pc = n_PCs,
lasso_dev = auc_lasso_dev, lasso_auc = auc_lasso_auc,
ridge_dev = auc_ridge_dev, ridge_auc = auc_ridge_auc,
ord_logreg = auc_logreg))
}
# Manually tune sparsity parameter alpha using spca -----------------------
# Use original train-test split and k = 20
get.test.result = function(alpha) {
spca_data <- fit_spca(CC_comb, CC_comb_val, k = 20, alpha = alpha)
# Fit LASSO using deviance
set.seed(12345)
glmnet.fit <- cv.glmnet(as.matrix(spca_data[[1]]), unlist(CC_comb$CC),
family = "binomial", type.measure = "deviance", alpha = 1)
preds <- predict(glmnet.fit, newx = as.matrix(spca_data[[2]]),
type = "response", s = glmnet.fit$lambda.1se)
return(roc(as.numeric(CC_comb_val$CC), as.numeric(preds))$auc)
}
result = sapply(10^(seq(-4, -1, by = 1)), function(i) get.test.result(i))
df_result = data.frame(alpha = 10^(seq(-4, -1, by = 1)), auc = result)
# Run spca ----------------------------------------------------------------
n_splits <- 6
set.seed(12345)
# Option 1
# k <- 10; pcs_vec <- c(2,3,10)
# Option 2
k <- 20; pcs_vec <- c(2,3,10,20)
alph <- 1e-02 # found from tuning above
# Run loop
all_results <- list()
for (j in 1:n_splits) {
print(paste0('Split #', j))
# Resplit and subset data
nval = floor(length(unique(CC_all$PatientID))*0.3); val = sample(unique(CC_all$PatientID),nval)
CC_val = CC_all[CC_all$PatientID%in%val,]; CC_train = CC_all[!CC_all$PatientID%in%val,]
# Fit Sparse PCA and project validation data
spca_data <- fit_spca(CC_train, CC_val, k = k, alpha = alph)
# Run models
pca_results_list <- list()
for (i in pcs_vec) {
pca_results_list[[i]] <- pca_predict(CC_train, CC_val, spca_data[[1]], spca_data[[2]], i)
}
all_results[[j]] <- do.call(rbind, pca_results_list)
}
print('All done!')
# Average results
spca_auc <- (all_results[[1]]+all_results[[2]]+all_results[[3]]+all_results[[4]]+all_results[[5]]+all_results[[6]])/6
write_csv(spca_auc, 'model_output/spca_auc.csv')
# Run SPC.cv/SPC ----------------------------------------------------------
# Fit tuning parameter sumabsvs (sum of absolute values of elements of v)
# Scale first
scaled_train <- scale(CC_comb[,-c(1:9)])
mean_train <- attr(scaled_train,"scaled:center")
scale_train <- attr(scaled_train,"scaled:scale")
cv_obj <- PMA::SPC.cv(as.matrix(scaled_train), center = FALSE)
# Now fit
k <- 20
spc_obj <- PMA::SPC(as.matrix(scaled_train), sumabsv = cv_obj$bestsumabsv, K = k,
cnames = paste0('PC', 1:k))
# loadings <- spc_obj$v %>% data.frame()
# write.csv(loadings, 'model_output/spca_loadings_tuned_sumabsv.csv') # Loadings using SPC.cv and SPC
# Center test data and project to orthogonal axes
scaled_test <- scale(CC_comb_val[,-c(1:9)], center = mean_train, scale = scale_train)
test_PC <- as.matrix(scaled_test) %*% as.matrix(spc_obj$v)
# Fit model
glmnet.fit <- cv.glmnet(as.matrix(spc_obj$u), unlist(CC_comb$CC), family = "binomial", type.measure = "deviance", alpha = 1)
preds <- predict(glmnet.fit, newx = as.matrix(test_PC), type = "response", s = glmnet.fit$lambda.1se)
roc(as.numeric(CC_comb_val$CC), as.numeric(preds))$auc
k <- 20; pcs_vec <- c(2,3,10,20)
n_splits <- 6
# Run loop
all_results <- list()
for (j in 1:n_splits) {
print(paste0('Split #', j))
# Resplit and subset data
nval = floor(length(unique(CC_all$PatientID))*0.3); val = sample(unique(CC_all$PatientID),nval)
CC_val = CC_all[CC_all$PatientID%in%val,]; CC_train = CC_all[!CC_all$PatientID%in%val,]
# Fit Sparse PCA and project validation data
spca_data <- fit_spc(CC_train, CC_val, k = k)
# Run models
pca_results_list <- list()
for (i in pcs_vec) {
pca_results_list[[i]] <- pca_predict(CC_train, CC_val, spca_data[[1]], spca_data[[2]], i)
}
all_results[[j]] <- do.call(rbind, pca_results_list)
}
print('All done!')
# Average results
spc_auc <- (all_results[[1]]+all_results[[2]]+all_results[[3]]+all_results[[4]]+all_results[[5]]+all_results[[6]])/6
write_csv(spc_auc, 'model_output/spc_cv_auc.csv')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/portkey.R
\name{twocoin}
\alias{twocoin}
\title{Two-coin algorithm: implements the Portkey algorithm for beta = 1.}
\usage{
twocoin(prop, curr, pf, Cprop, Ccurr, ...)
}
\arguments{
\item{prop}{The proposed value in the MCMC step}
\item{curr}{The current value in the MCMC step}
\item{pf}{A function that produces a Bern(p) realization, with argument \code{value} for the state
of the Markov chain}
\item{Cprop}{Upper bound for the target density at proposed value}
\item{Ccurr}{Upper bound for the target density current value}
\item{...}{additional arguments that go into \code{pf}}
}
\value{
a variable \code{x} which is either \code{curr} or \code{prop} and integer \code{loops} that returns the number
of loops the Bernoulli factory took
}
\description{
Two-coin algorithm: implements the Portkey algorithm for beta = 1.
}
|
/man/twocoin.Rd
|
no_license
|
NiVi202/portkey
|
R
| false | true | 916 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/portkey.R
\name{twocoin}
\alias{twocoin}
\title{Two-coin algorithm: implements the Portkey algorithm for beta = 1.}
\usage{
twocoin(prop, curr, pf, Cprop, Ccurr, ...)
}
\arguments{
\item{prop}{The proposed value in the MCMC step}
\item{curr}{The current value in the MCMC step}
\item{pf}{A function that produces a Bern(p) realization, with argument \code{value} for the state
of the Markov chain}
\item{Cprop}{Upper bound for the target density at proposed value}
\item{Ccurr}{Upper bound for the target density current value}
\item{...}{additional arguments that go into \code{pf}}
}
\value{
a variable \code{x} which is either \code{curr} or \code{prop} and integer \code{loops} that returns the number
of loops the Bernoulli factory took
}
\description{
Two-coin algorithm: implements the Portkey algorithm for beta = 1.
}
|
library (class)
sum =0
count =0
val_k=3
cat("k",":","Average Accuracy")
for(i in 1:5){
for(i in 1:10){
# loading data
pima_data <- read.table("http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data",sep = ",")
#Assigning column names
colnames(pima_data) <- c("No_of_times_pregnant","Plasma_glucose","D_bp","Triceps","Insulin","BMI","D_pf","Age","Class")
pima_data$Class<- factor(pima_data$Class)
noofrows<- nrow(pima_data)
train <- sort(sample(1:noofrows, floor(0.90*(noofrows))))
#Collecting 90% of data into training data
pima_data_train <- pima_data[train,]
#Collecting 10% of data into testing data
pima_data_test <- pima_data[-train,]
cl = factor(pima_data_train$Class)
cl_test = factor(pima_data_test$Class)
knn_classifier <- knn(pima_data_train, pima_data_test, cl, k = val_k, prob=TRUE)
original_values <- pima_data_test$Class
b <- cbind(knn_classifier,original_values)
correct_predictions <- length(b[knn_classifier==original_values])/2
#print(correct_predictions)
total_predictions <- length(b)/2
#print(total_predictions)
accuracy <- (correct_predictions/total_predictions)*100
#print(accuracy)
sum=sum+accuracy
count=count+1
}
average = sum/count
cat(val_k,":",average,"\n")
val_k=val_k+2
}
|
/Proj3/part4/pima_q4_knn.R
|
no_license
|
shashankadidamu/Machine-Learning
|
R
| false | false | 1,390 |
r
|
library (class)
sum =0
count =0
val_k=3
cat("k",":","Average Accuracy")
for(i in 1:5){
for(i in 1:10){
# loading data
pima_data <- read.table("http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data",sep = ",")
#Assigning column names
colnames(pima_data) <- c("No_of_times_pregnant","Plasma_glucose","D_bp","Triceps","Insulin","BMI","D_pf","Age","Class")
pima_data$Class<- factor(pima_data$Class)
noofrows<- nrow(pima_data)
train <- sort(sample(1:noofrows, floor(0.90*(noofrows))))
#Collecting 90% of data into training data
pima_data_train <- pima_data[train,]
#Collecting 10% of data into testing data
pima_data_test <- pima_data[-train,]
cl = factor(pima_data_train$Class)
cl_test = factor(pima_data_test$Class)
knn_classifier <- knn(pima_data_train, pima_data_test, cl, k = val_k, prob=TRUE)
original_values <- pima_data_test$Class
b <- cbind(knn_classifier,original_values)
correct_predictions <- length(b[knn_classifier==original_values])/2
#print(correct_predictions)
total_predictions <- length(b)/2
#print(total_predictions)
accuracy <- (correct_predictions/total_predictions)*100
#print(accuracy)
sum=sum+accuracy
count=count+1
}
average = sum/count
cat(val_k,":",average,"\n")
val_k=val_k+2
}
|
################################################
##Read extracted CSV, verify properties
# split into training and test
# Krishna Karthik Gadiraju/kgadira
################################################
rm(list=ls(all=T))
library(rgdal)
library(rgeos)
library(foreign)
data.all <- read.csv('may28TrainingAllBandsFinal.csv') #contains all sampled points generated using
#point sampling tool in QGIS
x2 <- sample(1:nrow(data.all),nrow(data.all),replace=F)
data.all <- data.all[x2,]
#messed up the original id attribute in QGIS, redo it
id <- seq.int(nrow(data.all))
data.all$id <- id
#Split data into training and testing - Assume 60,40 ratio
colnames(data.all)[4] <- 'Class'
data.all$Class <- as.factor(data.all$Class)
data.all.split <- split(data.all,data.all$Class,drop=T)
data.training<- data.frame()
data.testing <- data.frame()
#colnames(data.training) <- colnames(data.all)
for( i in 1:6){
current <- data.frame(data.all.split[i])
colnames(current) <- colnames(data.all)
noSamples <- nrow(current)
noTraining <- ceiling(0.6*noSamples)
x2 <- sample(1:nrow(current),noTraining,replace=F)
data.training <- rbind(data.training,current[x2,])
data.testing <- rbind(data.testing, current[-x2,])
}
#remove X,Y, id attribute - we don't use them
data.training <- data.training[,-c(1:3)]
data.testing <- data.testing[,-c(1:3)]
#Write down CSV and/or arff files
write.csv(x=data.training,'may28-training-AllBands-final.csv',row.names = F)
write.arff(data.training,file='may28-training-AllBands-final.arff',relation='training')
write.csv(x=data.testing,'may28-testing-AllBands-final.csv',row.names = F)
write.arff(data.testing,file='may28-testing-AllBands-final.arff',relation='testing')
|
/SplitTrainingAndTesting.R
|
permissive
|
DrRoad/SatelliteImageClassification
|
R
| false | false | 1,711 |
r
|
################################################
##Read extracted CSV, verify properties
# split into training and test
# Krishna Karthik Gadiraju/kgadira
################################################
rm(list=ls(all=T))
library(rgdal)
library(rgeos)
library(foreign)
data.all <- read.csv('may28TrainingAllBandsFinal.csv') #contains all sampled points generated using
#point sampling tool in QGIS
x2 <- sample(1:nrow(data.all),nrow(data.all),replace=F)
data.all <- data.all[x2,]
#messed up the original id attribute in QGIS, redo it
id <- seq.int(nrow(data.all))
data.all$id <- id
#Split data into training and testing - Assume 60,40 ratio
colnames(data.all)[4] <- 'Class'
data.all$Class <- as.factor(data.all$Class)
data.all.split <- split(data.all,data.all$Class,drop=T)
data.training<- data.frame()
data.testing <- data.frame()
#colnames(data.training) <- colnames(data.all)
for( i in 1:6){
current <- data.frame(data.all.split[i])
colnames(current) <- colnames(data.all)
noSamples <- nrow(current)
noTraining <- ceiling(0.6*noSamples)
x2 <- sample(1:nrow(current),noTraining,replace=F)
data.training <- rbind(data.training,current[x2,])
data.testing <- rbind(data.testing, current[-x2,])
}
#remove X,Y, id attribute - we don't use them
data.training <- data.training[,-c(1:3)]
data.testing <- data.testing[,-c(1:3)]
#Write down CSV and/or arff files
write.csv(x=data.training,'may28-training-AllBands-final.csv',row.names = F)
write.arff(data.training,file='may28-training-AllBands-final.arff',relation='training')
write.csv(x=data.testing,'may28-testing-AllBands-final.csv',row.names = F)
write.arff(data.testing,file='may28-testing-AllBands-final.arff',relation='testing')
|
testlist <- list(Beta = 0, CVLinf = 0, FM = 0, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 3.09902922427763e-312, SL95 = 0, nage = 0L, nlen = 0L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615830409-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 337 |
r
|
testlist <- list(Beta = 0, CVLinf = 0, FM = 0, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 3.09902922427763e-312, SL95 = 0, nage = 0L, nlen = 0L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
BoW <- read.table("itziar/dokumentuetahitzak.txt")
BoW <- as.matrix(BoW)
BoW <- t(BoW)
colnames(BoW) <- c("Human", "Interface", "Computer",
"User", "System", "Response",
"Time", "EPS", "Survey",
"Trees", "Graph", "Minors")
row.names(BoW) <- c("D1", "D2", "D3",
"D4", "D5", "D6",
"D7", "D8", "D9")
pr = prcomp(BoW)
PC = pr$rotation[,1:2]
Y = pr$x[,1:2]
# 0 <- interaccíon entre persona y computador 1 <- teoría de grafos
cl <- c(1, 1, 1, 1, 1, 2, 2, 2, 2)
plot(Y[,1], Y[,2], col=c("green", "cyan")[cl], bg=c("green", "cyan")[cl])
# New doc = "Graph theory with applications to engineering and computer science"
doc0_BoW <- matrix(c(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0), nrow=1)
pr$center
doc0 <- (doc0_BoW - pr$center) %*% PC
points(doc0[,1], doc0[,2], col="red")
knn(Y, doc0, cl=cl, k=2)
# No se puede decidir, ya que en nuestro vocabulario solo dos de las palabras de la
# nueva frase existen anteriormente, y cada palabra solo sale en textos que pertenecen
# a una clase o a otra.
|
/itziar/PCA_BOW_ejercicio.R
|
no_license
|
enpinzolas/ead-project
|
R
| false | false | 1,108 |
r
|
BoW <- read.table("itziar/dokumentuetahitzak.txt")
BoW <- as.matrix(BoW)
BoW <- t(BoW)
colnames(BoW) <- c("Human", "Interface", "Computer",
"User", "System", "Response",
"Time", "EPS", "Survey",
"Trees", "Graph", "Minors")
row.names(BoW) <- c("D1", "D2", "D3",
"D4", "D5", "D6",
"D7", "D8", "D9")
pr = prcomp(BoW)
PC = pr$rotation[,1:2]
Y = pr$x[,1:2]
# 0 <- interaccíon entre persona y computador 1 <- teoría de grafos
cl <- c(1, 1, 1, 1, 1, 2, 2, 2, 2)
plot(Y[,1], Y[,2], col=c("green", "cyan")[cl], bg=c("green", "cyan")[cl])
# New doc = "Graph theory with applications to engineering and computer science"
doc0_BoW <- matrix(c(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0), nrow=1)
pr$center
doc0 <- (doc0_BoW - pr$center) %*% PC
points(doc0[,1], doc0[,2], col="red")
knn(Y, doc0, cl=cl, k=2)
# No se puede decidir, ya que en nuestro vocabulario solo dos de las palabras de la
# nueva frase existen anteriormente, y cada palabra solo sale en textos que pertenecen
# a una clase o a otra.
|
{
source( file="scripts/reference.R" );
weatherData = read.csv( file="data/LansingNOAA2016-3.csv",
stringsAsFactors = FALSE );
dates = as.Date(weatherData$dateYr); # save the date column to a vector
months = format(dates, format="%b"); # extract the month -- save to vector
weatherData$month = months; # save months to data frame as new column
# this plot is overlaying the stacked blue values on the stacked red values
thePlot = ggplot(data=weatherData) +
geom_col(mapping=aes(x=month, y=heatDays),
fill = "red", # background color
width=0.4) +
geom_col(mapping=aes(x=month, y=coolDays),
color = "blue", # outline color
alpha = 0, # transparent background
width=0.4) +
scale_x_discrete(limits = month.abb) + # month.abb = c("Jan", "Feb"...)
theme_bw() +
labs(title = "Heating and Cooling Days",
subtitle = "without position_nudge",
x = "Month",
y = "Cumulative Heat/Cool Days");
plot(thePlot);
# this plot is overlaying values within each color -- it is not stacking them...
# what you see is the highest value for each month
thePlot = ggplot(data=weatherData) +
geom_col(mapping=aes(x=month, y=heatDays),
position=position_nudge(x=-0.2), # don't need after_stat()!
fill = "red", # background color
width=0.4) +
geom_col(mapping=aes(x=month, y=coolDays),
position=position_nudge(x=0.2),
color = "blue", # outline color
alpha = 0, # transparent background
width=0.4) +
scale_x_discrete(limits = month.abb) + # month.abb = c("Jan", "Feb"...)
theme_bw() +
labs(title = "Heating and Cooling Days",
subtitle = "with position_nudge",
x = "Month",
y = "Cumulative Heat/Cool Days");
plot(thePlot);
}
|
/8-3 Class Material/app08 - wrong answer.R
|
no_license
|
alexwalus/fw893
|
R
| false | false | 2,025 |
r
|
{
source( file="scripts/reference.R" );
weatherData = read.csv( file="data/LansingNOAA2016-3.csv",
stringsAsFactors = FALSE );
dates = as.Date(weatherData$dateYr); # save the date column to a vector
months = format(dates, format="%b"); # extract the month -- save to vector
weatherData$month = months; # save months to data frame as new column
# this plot is overlaying the stacked blue values on the stacked red values
thePlot = ggplot(data=weatherData) +
geom_col(mapping=aes(x=month, y=heatDays),
fill = "red", # background color
width=0.4) +
geom_col(mapping=aes(x=month, y=coolDays),
color = "blue", # outline color
alpha = 0, # transparent background
width=0.4) +
scale_x_discrete(limits = month.abb) + # month.abb = c("Jan", "Feb"...)
theme_bw() +
labs(title = "Heating and Cooling Days",
subtitle = "without position_nudge",
x = "Month",
y = "Cumulative Heat/Cool Days");
plot(thePlot);
# this plot is overlaying values within each color -- it is not stacking them...
# what you see is the highest value for each month
thePlot = ggplot(data=weatherData) +
geom_col(mapping=aes(x=month, y=heatDays),
position=position_nudge(x=-0.2), # don't need after_stat()!
fill = "red", # background color
width=0.4) +
geom_col(mapping=aes(x=month, y=coolDays),
position=position_nudge(x=0.2),
color = "blue", # outline color
alpha = 0, # transparent background
width=0.4) +
scale_x_discrete(limits = month.abb) + # month.abb = c("Jan", "Feb"...)
theme_bw() +
labs(title = "Heating and Cooling Days",
subtitle = "with position_nudge",
x = "Month",
y = "Cumulative Heat/Cool Days");
plot(thePlot);
}
|
#!/usr/bin/env Rscript
library("biomaRt")
library(seqinr)
library(abind)
library(rjson)
dir.create("data/pos_seqs/")
dir.create("data/neg_seqs/")
# Positive genes
pos_table <- read.csv(file = "data/Intellectual disability.csv", sep = '\t', header = TRUE)
pos_train_names <- pos_table[,1]
# Negative genes
neg_table <- read.csv(file = "data/control.csv", sep = '\t', header = TRUE)
neg_train_names <- neg_table[,1]
a <- listMarts()
ensembl=useMart("ensembl")
datasets <- listDatasets(ensembl)
ensembl = useMart("ensembl",dataset="hsapiens_gene_ensembl")
for(gene in pos_train_names){
bm <- getBM(attributes=c('ensembl_gene_id', 'ensembl_transcript_id', 'coding'),
filters = c('external_gene_name'),
values = gene,
mart = ensembl)
biomaRt::exportFASTA(bm, paste(c('data/pos_seqs/', gene, '.fasta'), collapse = ""))
}
for(gene in neg_train_names){
bm <- getBM(attributes=c('ensembl_gene_id', 'ensembl_transcript_id', 'coding'),
filters = c('external_gene_name'),
values = gene,
mart = ensembl)
biomaRt::exportFASTA(bm, paste(c('data/neg_seqs/', gene, '.fasta'), collapse = ""))
}
|
/get_data/get_data.R
|
no_license
|
casti11/ProGenDec_BLSTM
|
R
| false | false | 1,144 |
r
|
#!/usr/bin/env Rscript
library("biomaRt")
library(seqinr)
library(abind)
library(rjson)
dir.create("data/pos_seqs/")
dir.create("data/neg_seqs/")
# Positive genes
pos_table <- read.csv(file = "data/Intellectual disability.csv", sep = '\t', header = TRUE)
pos_train_names <- pos_table[,1]
# Negative genes
neg_table <- read.csv(file = "data/control.csv", sep = '\t', header = TRUE)
neg_train_names <- neg_table[,1]
a <- listMarts()
ensembl=useMart("ensembl")
datasets <- listDatasets(ensembl)
ensembl = useMart("ensembl",dataset="hsapiens_gene_ensembl")
for(gene in pos_train_names){
bm <- getBM(attributes=c('ensembl_gene_id', 'ensembl_transcript_id', 'coding'),
filters = c('external_gene_name'),
values = gene,
mart = ensembl)
biomaRt::exportFASTA(bm, paste(c('data/pos_seqs/', gene, '.fasta'), collapse = ""))
}
for(gene in neg_train_names){
bm <- getBM(attributes=c('ensembl_gene_id', 'ensembl_transcript_id', 'coding'),
filters = c('external_gene_name'),
values = gene,
mart = ensembl)
biomaRt::exportFASTA(bm, paste(c('data/neg_seqs/', gene, '.fasta'), collapse = ""))
}
|
\name{bactMAP}
\alias{bactMAP-package}
\alias{bactMAP}
\alias{BACTMAP}
\docType{package}
\title{BactMAP: Analyze Bacterial Cell Segmentation And Fluoresence Data
}
\description{
\emph{version 0.1.0.1}
Package to upload and uniformally analyze (bacterial) cell segmentation and fluorescence data, making it possible to combine output from different analysis tools into one or more datasets. Vizualize the data using ggplot2, statistically compare datasets and automatically produce a summary of the data.
}
\references{
van Raaphorst R, Kjos M, Veening JW (2019)
van Raaphorst R*, Kjos M*, Veening JW (2017) Chromosome segregation drives division site selection in \emph{Streptococcus pneumoniae. Proc Natl Acad Sci U S A.} 114(29):E5959-E5968.
\url{https://github.com/vrrenske/BactMAP}
}
\author{
Renske van Raaphorst<renske.vanraaphorst@unil.ch>
University of Lausanne
}
\note{
Compatible software outputs:
*MicrobeJ<\url{www.microbej.com}>
*ObjectJ<\url{https://sils.fnwi.uva.nl/bcb/objectj/}>
*ISBatch<\url{http://singlemolecule.github.io/iSBatch/}>
*Oufti<\url{www.oufti.org}>
*MicrobeTracker<\url{www.microbetracker.org}>
*Morphometrics<\url{https://simtk.org/projects/morphometrics}>
*SuperSegger<\url{https://github.com/wiggins-lab/SuperSegger/wiki}
}
|
/man/Shinyspots_documentation.Rd
|
no_license
|
vrrenske/BactMAP
|
R
| false | false | 1,272 |
rd
|
\name{bactMAP}
\alias{bactMAP-package}
\alias{bactMAP}
\alias{BACTMAP}
\docType{package}
\title{BactMAP: Analyze Bacterial Cell Segmentation And Fluoresence Data
}
\description{
\emph{version 0.1.0.1}
Package to upload and uniformally analyze (bacterial) cell segmentation and fluorescence data, making it possible to combine output from different analysis tools into one or more datasets. Vizualize the data using ggplot2, statistically compare datasets and automatically produce a summary of the data.
}
\references{
van Raaphorst R, Kjos M, Veening JW (2019)
van Raaphorst R*, Kjos M*, Veening JW (2017) Chromosome segregation drives division site selection in \emph{Streptococcus pneumoniae. Proc Natl Acad Sci U S A.} 114(29):E5959-E5968.
\url{https://github.com/vrrenske/BactMAP}
}
\author{
Renske van Raaphorst<renske.vanraaphorst@unil.ch>
University of Lausanne
}
\note{
Compatible software outputs:
*MicrobeJ<\url{www.microbej.com}>
*ObjectJ<\url{https://sils.fnwi.uva.nl/bcb/objectj/}>
*ISBatch<\url{http://singlemolecule.github.io/iSBatch/}>
*Oufti<\url{www.oufti.org}>
*MicrobeTracker<\url{www.microbetracker.org}>
*Morphometrics<\url{https://simtk.org/projects/morphometrics}>
*SuperSegger<\url{https://github.com/wiggins-lab/SuperSegger/wiki}
}
|
tempConvert <- function(startingScale, temp)
{
{
if (scale == "F") convertedTemp <- temp - 32 * 5/9 else
if (scale == "C") convertedTemp <- temp - 32 * 9/5
}
{
if (scale == "F") return{}
}
}
|
/Untitled.R
|
no_license
|
stevenpbrady/stevenpbrady.github.io
|
R
| false | false | 205 |
r
|
tempConvert <- function(startingScale, temp)
{
{
if (scale == "F") convertedTemp <- temp - 32 * 5/9 else
if (scale == "C") convertedTemp <- temp - 32 * 9/5
}
{
if (scale == "F") return{}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/processing.R
\name{remove_na_cols}
\alias{remove_na_cols}
\title{Remove columns of X that are at least some proportion NA}
\usage{
remove_na_cols(X, na_thresh = 1)
}
\arguments{
\item{X}{A data.table or data.frame whose all NA columns we want to remove}
\item{na_thresh}{The proportion of elements in the column that have to be NA
before we remove it. Defaults to 1.}
}
|
/man/remove_na_cols.Rd
|
no_license
|
krisrs1128/cleanUtils
|
R
| false | true | 450 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/processing.R
\name{remove_na_cols}
\alias{remove_na_cols}
\title{Remove columns of X that are at least some proportion NA}
\usage{
remove_na_cols(X, na_thresh = 1)
}
\arguments{
\item{X}{A data.table or data.frame whose all NA columns we want to remove}
\item{na_thresh}{The proportion of elements in the column that have to be NA
before we remove it. Defaults to 1.}
}
|
library(shiny)
library(shinydashboard)
source("pages/home.R")
source("pages/size_vs_time.R")
source("pages/harvesting.R")
source("pages/age_vs_size.R")
source("pages/conclusion.R")
source("pages/about.R")
# creates layout and puts together pages
ui <- dashboardPage(
dashboardHeader(title = "Shrinking Salmon"),
dashboardSidebar(
sidebarMenu(
menuItem("Home", tabName = "home", icon = icon("home")),
menuItem("Size vs. Time", tabName = "size_vs_time", icon = icon("clock")),
menuItem("Size vs. Age", tabName = "age_vs_size", icon = icon("stream")),
menuItem("Harvesting", tabName = "harvesting", icon = icon("fish")),
menuItem("Conclusion", tabName = "conclusion",
icon = icon("hourglass-end")),
menuItem("About", tabName = "about", icon = icon("address-card"))
)
),
dashboardBody(
tabItems(
tabItem(tabName = "home",
home_page),
tabItem(tabName = "size_vs_time",
size_vs_time_page),
tabItem(tabName = "age_vs_size",
age_vs_size_page),
tabItem(tabName = "harvesting",
harvesting_page),
tabItem(tabName = "conclusion",
conclusion_page),
tabItem(tabName = "about",
about_page)
)
)
)
|
/app_ui.R
|
no_license
|
rklein324/AE-Spicy-Salmon
|
R
| false | false | 1,280 |
r
|
library(shiny)
library(shinydashboard)
source("pages/home.R")
source("pages/size_vs_time.R")
source("pages/harvesting.R")
source("pages/age_vs_size.R")
source("pages/conclusion.R")
source("pages/about.R")
# creates layout and puts together pages
ui <- dashboardPage(
dashboardHeader(title = "Shrinking Salmon"),
dashboardSidebar(
sidebarMenu(
menuItem("Home", tabName = "home", icon = icon("home")),
menuItem("Size vs. Time", tabName = "size_vs_time", icon = icon("clock")),
menuItem("Size vs. Age", tabName = "age_vs_size", icon = icon("stream")),
menuItem("Harvesting", tabName = "harvesting", icon = icon("fish")),
menuItem("Conclusion", tabName = "conclusion",
icon = icon("hourglass-end")),
menuItem("About", tabName = "about", icon = icon("address-card"))
)
),
dashboardBody(
tabItems(
tabItem(tabName = "home",
home_page),
tabItem(tabName = "size_vs_time",
size_vs_time_page),
tabItem(tabName = "age_vs_size",
age_vs_size_page),
tabItem(tabName = "harvesting",
harvesting_page),
tabItem(tabName = "conclusion",
conclusion_page),
tabItem(tabName = "about",
about_page)
)
)
)
|
library(data.table)
train <- data.frame(fread("/pine/scr/d/d/ddray/archie_training_64.txt")) ## read in the training data
LABEL_COL=73
lr <- train[,1:LABEL_COL] # cut down the extra columns -- 209 is the label (0=not archaic, 1=archaic)
model <- glm(V73 ~ .,family=binomial(link='logit'),data=lr) # train the model
cleanModel2 = function(cm) {
cm$y = c()
cm$model = c()
cm$residuals = c()
cm$fitted.values = c()
cm$effects = c()
cm$qr$qr = c()
cm$linear.predictors = c()
cm$weights = c()
cm$prior.weights = c()
cm$data = c()
cm
}
model <- cleanModel2(model)
print(model)
save(model, file = "trained_model_ArchIE_64.Rdata") # save the trained model so we don't have to train it again. can load it with load("trained_model.Rdata")
|
/src/models/train_ArchIE_64.R
|
no_license
|
SchriderLab/intro_UNET
|
R
| false | false | 759 |
r
|
library(data.table)
train <- data.frame(fread("/pine/scr/d/d/ddray/archie_training_64.txt")) ## read in the training data
LABEL_COL=73
lr <- train[,1:LABEL_COL] # cut down the extra columns -- 209 is the label (0=not archaic, 1=archaic)
model <- glm(V73 ~ .,family=binomial(link='logit'),data=lr) # train the model
cleanModel2 = function(cm) {
cm$y = c()
cm$model = c()
cm$residuals = c()
cm$fitted.values = c()
cm$effects = c()
cm$qr$qr = c()
cm$linear.predictors = c()
cm$weights = c()
cm$prior.weights = c()
cm$data = c()
cm
}
model <- cleanModel2(model)
print(model)
save(model, file = "trained_model_ArchIE_64.Rdata") # save the trained model so we don't have to train it again. can load it with load("trained_model.Rdata")
|
library(ape)
library(raster)
library(spdep)
library(spatialreg)
# Arrange data ------------------------------------------------------------
# Subsetting to periods with more than 10 data
subsample_data <- function(p, d, smin){
nd <- subset(d, year==p)
if(nrow(nd) > smin){return(nd)}
}
data.cels.subset <- pcs.data
data.cels.subset <- data.cels.subset[, c("nri", "nri_p", "nri_significant", "nti", "nti_p", "nti_significant", "year", "Tmin", "Tmax", "Pmin", "Pmax", "AET", "ETR", "WDI", "Deglac.", "x", "y")]
data.cels.subset <- lapply(paste(PERIODS, "BP", sep=""), FUN=subsample_data, data.cels.subset, 10)
data.cels.subset <- do.call(rbind, data.cels.subset)
# Neighbour definition for Spatial Autocorrelation tests ------------------
# define neighbors with distance d
# damax=1
neigh.d1 <- dnearneigh(cbind(data.cels.subset$x, data.cels.subset$y), d1=0, d2=120, longlat=TRUE)
w.d1 <- nb2listw(neigh.d1, style = "W", zero.policy =TRUE)
# damax=2
neigh.d2 <- dnearneigh(cbind(data.cels.subset$x, data.cels.subset$y), d1=0, d2=360, longlat=TRUE)
w.d2 <- nb2listw(neigh.d2, style = "W", zero.policy = TRUE)
# damax=3
neigh.d3 <- dnearneigh(cbind(data.cels.subset$x, data.cels.subset$y), d1=0, d2=480, longlat=TRUE)
w.d3 <- nb2listw(neigh.d3, style = "W", zero.policy = TRUE)
rm(neigh.d1, neigh.d2, neigh.d3)
# Raw Spatial Autocorrelation tests ---------------------------------------
get_raw_moran <- function(moran.test){
output <- data.frame(moran.i=double(), p.value=double(), expectation=double(),
variance=double(), std_dev=double(),
alternative=as.character(), stringsAsFactors=FALSE)
output[1, "moran.i"] <- moran.test$estimate[1]
output[1, "p.value"] <- moran.test$p.value
output[1, "std_dev"] <- moran.test$statistic
output[1, "expectation"] <- moran.test$estimate[2]
output[1, "variance"] <- moran.test$estimate[3]
output[1, "alternative"] <- moran.test$alternative
return(output)
}
# test for spatial autoregression in raw NRI and NTi
pcs.moran <- list() # list for pcs metrics
pcs.moran$nri <- list() # list for spatial autocorrelation distances
pcs.moran$nri$"120" <- moran.test(data.cels.subset$nri, listw=w.d1, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nri$"360" <- moran.test(data.cels.subset$nri, listw=w.d2, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nri$"480" <- moran.test(data.cels.subset$nri, listw=w.d3, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nti <- list() # list for spatial autocorrelation distances
pcs.moran$nti$"120" <- moran.test(data.cels.subset$nti, listw=w.d1, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nti$"360" <- moran.test(data.cels.subset$nti, listw=w.d2, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nti$"480" <- moran.test(data.cels.subset$nti, listw=w.d3, zero.policy= TRUE, na.action=na.exclude)
# extract parameters
pcs.moran$nri <- lapply(pcs.moran$nri, FUN=get_raw_moran)
pcs.moran$nti <- lapply(pcs.moran$nti, FUN=get_raw_moran)
# melt parameters
pcs.moran <- melt(pcs.moran, id.vars=colnames(pcs.moran$nri$"120"))
colnames(pcs.moran) <- c("moran.i", "p.value", "expectation", "variance", "std_dev", "alternative", "distance", "pcs_metrics")
# write to the hard disk drive
write.csv(pcs.moran, file="Results/Tables/RAW_moran.csv")
# Fit OLS models ----------------------------------------------------------
fit_ols <- function(var, pcs_metric, data){
formula.1 <- formula(paste0(pcs_metric, "~", var))
formula.2 <- formula(paste0(pcs_metric, "~", var, " + year"))
formula.3 <- formula(paste0(pcs_metric, "~", var, " * year"))
ols.1 <- eval(bquote(lm(.(formula.1), data)))
ols.2 <- eval(bquote(lm(.(formula.2), data)))
ols.3 <- eval(bquote(lm(.(formula.3), data)))
ols <- list(ols.1, ols.2, ols.3)
names(ols) <- c("m1", "m2", "m3")
return(ols)
}
get_ols_param <- function(ols.list){
require(broom)
m1.output <- cbind(coef(ols.list$m1)[1], coef(ols.list$m1)[2], glance(ols.list$m1))
m2.output <- cbind(coef(ols.list$m2)[2], glance(ols.list$m2))
m3.output <- glance(ols.list$m3)
col.names <- colnames(m3.output)
colnames(m1.output) <- paste0("m1.", c("intercept", "slope", col.names))
colnames(m2.output) <- paste0("m2.", c("slope", col.names))
colnames(m3.output) <- paste0("m3.", col.names)
output <- cbind(m1.output, m2.output, m3.output)
return(output)
}
pcs.ols <- list()
pcs.ols$nri <- lapply(c(COORD_COLNAMES, VARS_NEW_NAMES), FUN=fit_ols, "nri", data.cels.subset)
names(pcs.ols$nri) <- c(COORD_COLNAMES, VARS_NEW_NAMES)
pcs.ols$nti <- lapply(c(COORD_COLNAMES, VARS_NEW_NAMES), FUN=fit_ols, "nti", data.cels.subset)
names(pcs.ols$nti) <- c(COORD_COLNAMES, VARS_NEW_NAMES)
pcs.ols.param <- list()
pcs.ols.param$nri <- lapply(pcs.ols$nri, FUN=get_ols_param)
pcs.ols.param$nti <- lapply(pcs.ols$nti, FUN=get_ols_param)
pcs.ols.param <- melt(pcs.ols.param, id.vars=colnames(pcs.ols.param$nri[[1]]))
colnames(pcs.ols.param) <- c(colnames(pcs.ols.param[c(1:36)]), "variable", "pcs_metric")
write.csv(pcs.ols.param, file="Results/Tables/OLS_models.csv")
rm(pcs.ols.param)
# OLS Spatial Autocorrelation tests ---------------------------------------
get_ols_moran <- function(models, nb.weight) {
require (spdep)
mod.1 <- models$m1
mod.2 <- models$m2
mod.3 <- models$m3
output <- data.frame(moran_i_1=double(),
p.value_1=double(),
moran_i_2=double(),
p.value_2=double(),
moran_i_3=double(),
p.value_3=double())
moran.1 <- lm.morantest(mod.1, nb.weight, zero.policy= TRUE)
output[1,1] <- moran.1$estimate[1]
output[1,2] <- moran.1$p.value
moran.2 <- lm.morantest(mod.2, nb.weight, zero.policy= TRUE)
output[1,3] <- moran.2$estimate[1]
output[1,4] <- moran.2$p.value
moran.3 <- lm.morantest(mod.3, nb.weight, zero.policy= TRUE)
output[1,5] <- moran.3$estimate[1]
output[1,6] <- moran.3$p.value
return(output)
}
# test for spatial autoregression in NRI and NTI residuals from OLS
pcs.ols.moran <- list()
pcs.ols.moran$nri <- list()
pcs.ols.moran$nri$"120" <- lapply(pcs.ols$nri, FUN=get_ols_moran, w.d1)
pcs.ols.moran$nri$"360" <- lapply(pcs.ols$nri, FUN=get_ols_moran, w.d2)
pcs.ols.moran$nri$"480" <- lapply(pcs.ols$nri, FUN=get_ols_moran, w.d3)
pcs.ols.moran$nti <- list()
pcs.ols.moran$nti$"120" <- lapply(pcs.ols$nti, FUN=get_ols_moran, w.d1)
pcs.ols.moran$nti$"360" <- lapply(pcs.ols$nti, FUN=get_ols_moran, w.d2)
pcs.ols.moran$nti$"480" <- lapply(pcs.ols$nti, FUN=get_ols_moran, w.d3)
# melt results
pcs.ols.moran <- melt(pcs.ols.moran, id.vars=c("moran_i_1", "p.value_1", "moran_i_2", "p.value_2", "moran_i_3", "p.value_3"))
colnames(pcs.ols.moran) <- c("moran_i_1", "p.value_1", "moran_i_2", "p.value_2", "moran_i_3", "p.value_3", "variable", "distance", "pcs_metric")
write.csv(pcs.ols.moran, "Results/Tables/OLS_moran.csv")
# Fit SAR models ----------------------------------------------------------
fit_sar <- function(var, pcs_metric, data, w, sar.type=c("lag", "err")){
formula.1 <- formula(paste0(pcs_metric, "~", var))
formula.2 <- formula(paste0(pcs_metric, "~", var, " + year"))
formula.3 <- formula(paste0(pcs_metric, "~", var, " * year"))
if(sar.type == "lag"){
sar.1 <- eval(bquote(lagsarlm(.(formula.1), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
sar.2 <- eval(bquote(lagsarlm(.(formula.2), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
sar.3 <- eval(bquote(lagsarlm(.(formula.3), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
}
if(sar.type == "err"){
sar.1 <- eval(bquote(errorsarlm(.(formula.1), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
sar.2 <- eval(bquote(errorsarlm(.(formula.2), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
sar.3 <- eval(bquote(errorsarlm(.(formula.3), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
}
sar <- list(sar.1, sar.2, sar.3)
names(sar) <- c("m1", "m2", "m3")
return(sar)
}
# Fit SAR-lag models
pcs.sar.lag <- list()
pcs.sar.lag$nri <- list()
pcs.sar.lag$nri$"120" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d1, "lag")
pcs.sar.lag$nri$"360" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d2, "lag")
pcs.sar.lag$nri$"480" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d3, "lag")
pcs.sar.lag$nti <- list()
pcs.sar.lag$nti$"120" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d1, "lag")
pcs.sar.lag$nti$"360" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d2, "lag")
pcs.sar.lag$nti$"480" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d3, "lag")
names(pcs.sar.lag$nri$"120") <- names(pcs.sar.lag$nri$"360") <- names(pcs.sar.lag$nri$"480") <- VARS_NEW_NAMES
names(pcs.sar.lag$nti$"120") <- names(pcs.sar.lag$nti$"360") <- names(pcs.sar.lag$nti$"480") <- VARS_NEW_NAMES
# save(pcs.sar.lag, file="Results/RObjects/SAR_lag_models.RData")
# load("Results/RObjects/SAR_lag_models.RData")
# Fit SAR-err models
pcs.sar.err <- list()
pcs.sar.err$nri <- list()
pcs.sar.err$nri$"120" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d1, "err")
pcs.sar.err$nri$"360" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d2, "err")
pcs.sar.err$nri$"480" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d3, "err")
pcs.sar.err$nti <- list()
pcs.sar.err$nti$"120" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d1, "err")
pcs.sar.err$nti$"360" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d2, "err")
pcs.sar.err$nti$"480" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d3, "err")
names(pcs.sar.err$nri$"120") <- names(pcs.sar.err$nri$"360") <- names(pcs.sar.err$nri$"480") <- VARS_NEW_NAMES
names(pcs.sar.err$nti$"120") <- names(pcs.sar.err$nti$"360") <- names(pcs.sar.err$nti$"480") <- VARS_NEW_NAMES
# save(pcs.sar.err, file="Results/RObjects/SAR_err_models.RData")
# load("Results/RObjects/SAR_err_models.RData")
# Get parameters from the SAR models
get_sar_param <- function(sar.list){
require(spdep)
.get_sar_param <- function(model) {
require(spdep)
output <- data.frame(intercept=double(), slope=double(),
r.square=double(), p.value=double(),
lr.value=double(), lr.p.value=double(),
aic=double(), lm.aic=double(),
lm.value=double())
output[1, "intercept"] <- summary(model, Nagelkerke = T)$coefficients[1]
output[1, "slope"] <- summary(model, Nagelkerke = T)$coefficients[2]
output[1, "r.square"] <- summary(model, Nagelkerke = T)$NK
output[1, "p.value"] <- summary(model, Nagelkerke = T)$Coef[2,4]
output[1, "lr.value"] <- summary(model, Nagelkerke = T)$LR1$statistic[[1]]
output[1, "lr.p.value"] <- summary(model, Nagelkerke = T)$LR1$p.value[[1]]
output[1, "aic"] <- AIC(model)
output[1, "lm.aic"] <- summary(model, Nagelkerke = T)$AIC_lm.model
if(is.null(summary(model, Nagelkerke=T)$LMtest)){
output[1, "lm.value"] <- NA
}else{
output[1, "lm.value"] <- summary(model, Nagelkerke = T)$LMtest
}
return(output)
}
output.1 <- .get_sar_param(sar.list$m1)
output.2 <- .get_sar_param(sar.list$m2)
output.3 <- .get_sar_param(sar.list$m3)
output.colnames <- colnames(output.1)
output <- cbind(output.1, output.2, output.3)
colnames(output) <- c(paste0("m1.", output.colnames), paste0("m2.", output.colnames), paste0("m3.", output.colnames))
return(output)
}
pcs.sar.lag.param <- list()
pcs.sar.lag.param$nri <- list()
pcs.sar.lag.param$nri$"120" <- lapply(pcs.sar.lag$nri$"120", FUN=get_sar_param)
pcs.sar.lag.param$nri$"360" <- lapply(pcs.sar.lag$nri$"360", FUN=get_sar_param)
pcs.sar.lag.param$nri$"480" <- lapply(pcs.sar.lag$nri$"480", FUN=get_sar_param)
pcs.sar.lag.param$nti <- list()
pcs.sar.lag.param$nti$"120" <- lapply(pcs.sar.lag$nti$"120", FUN=get_sar_param)
pcs.sar.lag.param$nti$"360" <- lapply(pcs.sar.lag$nti$"360", FUN=get_sar_param)
pcs.sar.lag.param$nti$"480" <- lapply(pcs.sar.lag$nti$"480", FUN=get_sar_param)
pcs.sar.lag.param <- melt(pcs.sar.lag.param, id.vars=colnames(pcs.sar.lag.param$nri$"120"[[1]]))
colnames(pcs.sar.lag.param) <- c(colnames(pcs.sar.lag.param)[c(1:27)], "variable", "distance", "pcs_metric")
write.csv(pcs.sar.lag.param, file="Results/Tables/SAR_lag_parameters.csv")
rm(pcs.sar.lag.param)
pcs.sar.err.param <- list()
pcs.sar.err.param$nri <- list()
pcs.sar.err.param$nri$"120" <- lapply(pcs.sar.err$nri$"120", FUN=get_sar_param)
pcs.sar.err.param$nri$"360" <- lapply(pcs.sar.err$nri$"360", FUN=get_sar_param)
pcs.sar.err.param$nri$"480" <- lapply(pcs.sar.err$nri$"480", FUN=get_sar_param)
pcs.sar.err.param$nti <- list()
pcs.sar.err.param$nti$"120" <- lapply(pcs.sar.err$nti$"120", FUN=get_sar_param)
pcs.sar.err.param$nti$"360" <- lapply(pcs.sar.err$nti$"360", FUN=get_sar_param)
pcs.sar.err.param$nti$"480" <- lapply(pcs.sar.err$nti$"480", FUN=get_sar_param)
pcs.sar.err.param <- melt(pcs.sar.err.param, id.vars=colnames(pcs.sar.err.param$nri$"120"[[1]]))
colnames(pcs.sar.err.param) <- c(colnames(pcs.sar.err.param)[c(1:27)], "variable", "distance", "pcs_metric")
write.csv(pcs.sar.err.param, file="Results/Tables/SAR_err_parameters.csv")
rm(pcs.sar.err.param)
# SAR Spatial Autocorrelation tests ---------------------------------------
get_sar_moran <- function(sar.list, nb.weight){
.get_sar_moran <- function(models, nb.weight) {
require (spdep)
mod.1 <- models[[1]]
mod.2 <- models[[2]]
mod.3 <- models[[3]]
output <- data.frame(moran_i_1=double(),
p.value_1=double(),
moran_i_2=double(),
p.value_2=double(),
moran_i_3=double(),
p.value_3=double())
moran.1 <- moran.test(residuals(mod.1),
listw=nb.weight,
zero.policy= TRUE,
na.action=na.exclude)
output[1,1] <- moran.1$estimate[1]
output[1,2] <- moran.1$p.value
moran.2 <- moran.test(residuals(mod.2),
listw = nb.weight,
zero.policy = TRUE,
na.action = na.exclude)
output[1,3] <- moran.2$estimate[1]
output[1,4] <- moran.2$p.value
moran.3 <- moran.test(residuals(mod.3),
listw = nb.weight,
zero.policy = TRUE,
na.action = na.exclude)
output[1,5] <- moran.3$estimate[1]
output[1,6] <- moran.3$p.value
return(output)
}
results <- lapply(sar.list, FUN=.get_sar_moran, nb.weight)
names(results) <- names(sar.list)
return(results)
}
pcs.sar.lag.moran <- list()
pcs.sar.lag.moran$nri <- list()
pcs.sar.lag.moran$nri$"120" <- lapply(pcs.sar.lag$nri, FUN=get_sar_moran, w.d1)
pcs.sar.lag.moran$nri$"360" <- lapply(pcs.sar.lag$nri, FUN=get_sar_moran, w.d2)
pcs.sar.lag.moran$nri$"480" <- lapply(pcs.sar.lag$nri, FUN=get_sar_moran, w.d3)
pcs.sar.lag.moran$nti <- list()
pcs.sar.lag.moran$nti$"120" <- lapply(pcs.sar.lag$nti, FUN=get_sar_moran, w.d1)
pcs.sar.lag.moran$nti$"360" <- lapply(pcs.sar.lag$nti, FUN=get_sar_moran, w.d2)
pcs.sar.lag.moran$nti$"480" <- lapply(pcs.sar.lag$nti, FUN=get_sar_moran, w.d3)
pcs.sar.err.moran <- list()
pcs.sar.err.moran$nri <- list()
pcs.sar.err.moran$nri$"120" <- lapply(pcs.sar.err$nri, FUN=get_sar_moran, w.d1)
pcs.sar.err.moran$nri$"360" <- lapply(pcs.sar.err$nri, FUN=get_sar_moran, w.d2)
pcs.sar.err.moran$nri$"480" <- lapply(pcs.sar.err$nri, FUN=get_sar_moran, w.d3)
pcs.sar.err.moran$nti <- list()
pcs.sar.err.moran$nti$"120" <- lapply(pcs.sar.err$nti, FUN=get_sar_moran, w.d1)
pcs.sar.err.moran$nti$"360" <- lapply(pcs.sar.err$nti, FUN=get_sar_moran, w.d2)
pcs.sar.err.moran$nti$"480" <- lapply(pcs.sar.err$nti, FUN=get_sar_moran, w.d3)
pcs.sar.lag.moran <- melt(pcs.sar.lag.moran, id.vars=colnames(pcs.sar.lag.moran$nri$"120"$"120"$Tmin))
pcs.sar.err.moran <- melt(pcs.sar.err.moran, id.vars=colnames(pcs.sar.err.moran$nri$"120"$"120"$Tmin))
colnames(pcs.sar.lag.moran) <- colnames(pcs.sar.err.moran) <- c("moran_i_1", "p.value_1", "moran_i_2", "p.value_2", "moran_i_3", "p.value_3", "variable", "test_distance", "model_distance", "pcs_metric")
write.csv(pcs.sar.lag.moran, file = "Results/Tables/SAR_lag_moran.csv")
write.csv(pcs.sar.err.moran, file = "Results/Tables/SAR_err_moran.csv")
# OLS - ANOVA -------------------------------------------------------------
get_ols_anova <- function(model_list){
aov <- anova(model_list$m1, model_list$m2, model_list$m3)
output <- data.frame(m1.Res.Df=double(),
m2.Res.Df=double(),
m3.Res.Df=double(),
m1.RSS=double(),
m2.RSS=double(),
m3.RSS=double(),
m1.Df=double(),
m2.Df=double(),
m3.Df=double(),
m1.Sum_of_sq=double(),
m2.Sum_of_sq=double(),
m3.Sum_of_sq=double(),
m1.F=double(),
m2.F=double(),
m3.F=double(),
m1.p.value=double(),
m2.p.value=double(),
m3.p.value=double())
output[1, 1:3] <- aov$Res.Df
output[1, 4:6] <- aov$RSS
output[1, 7:9] <- aov$Df
output[1, 10:12] <- aov$`Sum of Sq`
output[1, 13:15] <- aov$F
output[1, 16:18] <- aov$`Pr(>F)`
return(output)
}
pcs.ols.aov <- list()
pcs.ols.aov$nri <- lapply(pcs.ols$nri, FUN=get_ols_anova)
pcs.ols.aov$nti <- lapply(pcs.ols$nti, FUN=get_ols_anova)
names(pcs.ols.aov) <- PCS_METRICS
pcs.ols.aov <- melt(pcs.ols.aov, id.vars=colnames(pcs.ols.aov$nri$Tmin))
colnames(pcs.ols.aov) <- c(colnames(pcs.ols.aov)[1:18], "variable", "pcs_metric")
write.csv(pcs.ols.aov, file="Results/Tables/OLS_anova.csv")
# SAR - ANOVA ---------------------------------------------------------
get_sar_anova <- function(model_list, variable, metric){
aov <- anova(model_list$m1, model_list$m2, model_list$m3)
output <- data.frame(m1.df=integer(), m2.df=integer(), m3.df=integer(),
m1.AIC=double(), m2.AIC=double(), m3.AIC=double(),
m1.logLik=double(), m2.logLik=double(), m3.logLik=double(),
m1.Test=character(), m2.Test=character(), m3.Test=character(),
m1.L.Ratio=double(), m2.L.Ratio=double(), m3.L.Ratio=double(),
m1.p.value=double(), m2.p.value=double(), m3.p.value=double(),
stringsAsFactors=FALSE)
output[1, paste0("m", 1:3, ".df")] <- aov$df
output[1, paste0("m", 1:3, ".AIC")] <- aov$AIC
output[1, paste0("m", 1:3, ".logLik")] <- aov$logLik
output[1, paste0("m", 1:3, ".Test")] <- aov$Test
output[1, paste0("m", 1:3, ".L.Ratio")] <- aov$L.Ratio
output[1, paste0("m", 1:3, ".p.value")] <- aov$`p-value`
return(output)
}
# SAR lag
pcs.sar.lag.aov <- list()
pcs.sar.lag.aov$nri <- list()
pcs.sar.lag.aov$nri$"120" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nri$"120", names(pcs.sar.lag$nri$"120"), MoreArgs = list(metric="nri"), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nri$"360" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nri$"360", names(pcs.sar.lag$nri$"360"), MoreArgs = list(metric="nri"), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nri$"480" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nri$"480", names(pcs.sar.lag$nri$"480"), MoreArgs = list(metric="nri"), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nti <- list()
pcs.sar.lag.aov$nti$"120" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nti$"120", names(pcs.sar.lag$nti)[[1]], MoreArgs = list(names(pcs.sar.lag)[[2]]), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nti$"360" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nti$"360", names(pcs.sar.lag$nti)[[2]], MoreArgs = list(names(pcs.sar.lag)[[2]]), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nti$"480" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nti$"480", names(pcs.sar.lag$nti)[[3]], MoreArgs = list(names(pcs.sar.lag)[[2]]), SIMPLIFY=FALSE)
pcs.sar.lag.aov <- melt(pcs.sar.lag.aov, id.vars=colnames(pcs.sar.lag.aov$nri$"120"[[1]]))
colnames(pcs.sar.lag.aov) <- c(colnames(pcs.sar.lag.aov)[1:18], "variable", "distance", "pcs_metric")
write.csv(pcs.sar.lag.aov, file="Results/Tables/SAR_lag_anova.csv")
rm(pcs.sar.lag.aov)
# SAR err
pcs.sar.err.aov <- list()
pcs.sar.err.aov$nri <- list()
pcs.sar.err.aov$nri$"120" <- mapply(FUN = get_sar_anova, pcs.sar.err$nri$"120", names(pcs.sar.err$nri)[[1]], MoreArgs = list(names(pcs.sar.err)[[1]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nri$"360" <- mapply(FUN = get_sar_anova, pcs.sar.err$nri$"360", names(pcs.sar.err$nri)[[2]], MoreArgs = list(names(pcs.sar.err)[[1]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nri$"480" <- mapply(FUN = get_sar_anova, pcs.sar.err$nri$"480", names(pcs.sar.err$nri)[[3]], MoreArgs = list(names(pcs.sar.err)[[1]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nti <- list()
pcs.sar.err.aov$nti$"120" <- mapply(FUN = get_sar_anova, pcs.sar.err$nti$"120", names(pcs.sar.err$nti)[[1]], MoreArgs = list(names(pcs.sar.err)[[2]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nti$"360" <- mapply(FUN = get_sar_anova, pcs.sar.err$nti$"360", names(pcs.sar.err$nti)[[2]], MoreArgs = list(names(pcs.sar.err)[[2]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nti$"480" <- mapply(FUN = get_sar_anova, pcs.sar.err$nti$"480", names(pcs.sar.err$nti)[[3]], MoreArgs = list(names(pcs.sar.err)[[2]]), SIMPLIFY=FALSE)
pcs.sar.err.aov <- melt(pcs.sar.err.aov, id.vars=colnames(pcs.sar.err.aov$nri$"120"[[1]]))
colnames(pcs.sar.err.aov) <- c(colnames(pcs.sar.err.aov)[1:18], "variable", "distance", "pcs_metric")
write.csv(pcs.sar.err.aov, file="Results/Tables/SAR_err_anova.csv")
rm(pcs.sar.err.aov)
|
/04-OLS_SAR_models.R
|
no_license
|
fitzLab-AL/paleoPCS_plosOne
|
R
| false | false | 21,997 |
r
|
library(ape)
library(raster)
library(spdep)
library(spatialreg)
# Arrange data ------------------------------------------------------------
# Subsetting to periods with more than 10 data
subsample_data <- function(p, d, smin){
nd <- subset(d, year==p)
if(nrow(nd) > smin){return(nd)}
}
data.cels.subset <- pcs.data
data.cels.subset <- data.cels.subset[, c("nri", "nri_p", "nri_significant", "nti", "nti_p", "nti_significant", "year", "Tmin", "Tmax", "Pmin", "Pmax", "AET", "ETR", "WDI", "Deglac.", "x", "y")]
data.cels.subset <- lapply(paste(PERIODS, "BP", sep=""), FUN=subsample_data, data.cels.subset, 10)
data.cels.subset <- do.call(rbind, data.cels.subset)
# Neighbour definition for Spatial Autocorrelation tests ------------------
# define neighbors with distance d
# damax=1
neigh.d1 <- dnearneigh(cbind(data.cels.subset$x, data.cels.subset$y), d1=0, d2=120, longlat=TRUE)
w.d1 <- nb2listw(neigh.d1, style = "W", zero.policy =TRUE)
# damax=2
neigh.d2 <- dnearneigh(cbind(data.cels.subset$x, data.cels.subset$y), d1=0, d2=360, longlat=TRUE)
w.d2 <- nb2listw(neigh.d2, style = "W", zero.policy = TRUE)
# damax=3
neigh.d3 <- dnearneigh(cbind(data.cels.subset$x, data.cels.subset$y), d1=0, d2=480, longlat=TRUE)
w.d3 <- nb2listw(neigh.d3, style = "W", zero.policy = TRUE)
rm(neigh.d1, neigh.d2, neigh.d3)
# Raw Spatial Autocorrelation tests ---------------------------------------
get_raw_moran <- function(moran.test){
output <- data.frame(moran.i=double(), p.value=double(), expectation=double(),
variance=double(), std_dev=double(),
alternative=as.character(), stringsAsFactors=FALSE)
output[1, "moran.i"] <- moran.test$estimate[1]
output[1, "p.value"] <- moran.test$p.value
output[1, "std_dev"] <- moran.test$statistic
output[1, "expectation"] <- moran.test$estimate[2]
output[1, "variance"] <- moran.test$estimate[3]
output[1, "alternative"] <- moran.test$alternative
return(output)
}
# test for spatial autoregression in raw NRI and NTi
pcs.moran <- list() # list for pcs metrics
pcs.moran$nri <- list() # list for spatial autocorrelation distances
pcs.moran$nri$"120" <- moran.test(data.cels.subset$nri, listw=w.d1, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nri$"360" <- moran.test(data.cels.subset$nri, listw=w.d2, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nri$"480" <- moran.test(data.cels.subset$nri, listw=w.d3, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nti <- list() # list for spatial autocorrelation distances
pcs.moran$nti$"120" <- moran.test(data.cels.subset$nti, listw=w.d1, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nti$"360" <- moran.test(data.cels.subset$nti, listw=w.d2, zero.policy= TRUE, na.action=na.exclude)
pcs.moran$nti$"480" <- moran.test(data.cels.subset$nti, listw=w.d3, zero.policy= TRUE, na.action=na.exclude)
# extract parameters
pcs.moran$nri <- lapply(pcs.moran$nri, FUN=get_raw_moran)
pcs.moran$nti <- lapply(pcs.moran$nti, FUN=get_raw_moran)
# melt parameters
pcs.moran <- melt(pcs.moran, id.vars=colnames(pcs.moran$nri$"120"))
colnames(pcs.moran) <- c("moran.i", "p.value", "expectation", "variance", "std_dev", "alternative", "distance", "pcs_metrics")
# write to the hard disk drive
write.csv(pcs.moran, file="Results/Tables/RAW_moran.csv")
# Fit OLS models ----------------------------------------------------------
fit_ols <- function(var, pcs_metric, data){
formula.1 <- formula(paste0(pcs_metric, "~", var))
formula.2 <- formula(paste0(pcs_metric, "~", var, " + year"))
formula.3 <- formula(paste0(pcs_metric, "~", var, " * year"))
ols.1 <- eval(bquote(lm(.(formula.1), data)))
ols.2 <- eval(bquote(lm(.(formula.2), data)))
ols.3 <- eval(bquote(lm(.(formula.3), data)))
ols <- list(ols.1, ols.2, ols.3)
names(ols) <- c("m1", "m2", "m3")
return(ols)
}
get_ols_param <- function(ols.list){
require(broom)
m1.output <- cbind(coef(ols.list$m1)[1], coef(ols.list$m1)[2], glance(ols.list$m1))
m2.output <- cbind(coef(ols.list$m2)[2], glance(ols.list$m2))
m3.output <- glance(ols.list$m3)
col.names <- colnames(m3.output)
colnames(m1.output) <- paste0("m1.", c("intercept", "slope", col.names))
colnames(m2.output) <- paste0("m2.", c("slope", col.names))
colnames(m3.output) <- paste0("m3.", col.names)
output <- cbind(m1.output, m2.output, m3.output)
return(output)
}
pcs.ols <- list()
pcs.ols$nri <- lapply(c(COORD_COLNAMES, VARS_NEW_NAMES), FUN=fit_ols, "nri", data.cels.subset)
names(pcs.ols$nri) <- c(COORD_COLNAMES, VARS_NEW_NAMES)
pcs.ols$nti <- lapply(c(COORD_COLNAMES, VARS_NEW_NAMES), FUN=fit_ols, "nti", data.cels.subset)
names(pcs.ols$nti) <- c(COORD_COLNAMES, VARS_NEW_NAMES)
pcs.ols.param <- list()
pcs.ols.param$nri <- lapply(pcs.ols$nri, FUN=get_ols_param)
pcs.ols.param$nti <- lapply(pcs.ols$nti, FUN=get_ols_param)
pcs.ols.param <- melt(pcs.ols.param, id.vars=colnames(pcs.ols.param$nri[[1]]))
colnames(pcs.ols.param) <- c(colnames(pcs.ols.param[c(1:36)]), "variable", "pcs_metric")
write.csv(pcs.ols.param, file="Results/Tables/OLS_models.csv")
rm(pcs.ols.param)
# OLS Spatial Autocorrelation tests ---------------------------------------
get_ols_moran <- function(models, nb.weight) {
require (spdep)
mod.1 <- models$m1
mod.2 <- models$m2
mod.3 <- models$m3
output <- data.frame(moran_i_1=double(),
p.value_1=double(),
moran_i_2=double(),
p.value_2=double(),
moran_i_3=double(),
p.value_3=double())
moran.1 <- lm.morantest(mod.1, nb.weight, zero.policy= TRUE)
output[1,1] <- moran.1$estimate[1]
output[1,2] <- moran.1$p.value
moran.2 <- lm.morantest(mod.2, nb.weight, zero.policy= TRUE)
output[1,3] <- moran.2$estimate[1]
output[1,4] <- moran.2$p.value
moran.3 <- lm.morantest(mod.3, nb.weight, zero.policy= TRUE)
output[1,5] <- moran.3$estimate[1]
output[1,6] <- moran.3$p.value
return(output)
}
# test for spatial autoregression in NRI and NTI residuals from OLS
pcs.ols.moran <- list()
pcs.ols.moran$nri <- list()
pcs.ols.moran$nri$"120" <- lapply(pcs.ols$nri, FUN=get_ols_moran, w.d1)
pcs.ols.moran$nri$"360" <- lapply(pcs.ols$nri, FUN=get_ols_moran, w.d2)
pcs.ols.moran$nri$"480" <- lapply(pcs.ols$nri, FUN=get_ols_moran, w.d3)
pcs.ols.moran$nti <- list()
pcs.ols.moran$nti$"120" <- lapply(pcs.ols$nti, FUN=get_ols_moran, w.d1)
pcs.ols.moran$nti$"360" <- lapply(pcs.ols$nti, FUN=get_ols_moran, w.d2)
pcs.ols.moran$nti$"480" <- lapply(pcs.ols$nti, FUN=get_ols_moran, w.d3)
# melt results
pcs.ols.moran <- melt(pcs.ols.moran, id.vars=c("moran_i_1", "p.value_1", "moran_i_2", "p.value_2", "moran_i_3", "p.value_3"))
colnames(pcs.ols.moran) <- c("moran_i_1", "p.value_1", "moran_i_2", "p.value_2", "moran_i_3", "p.value_3", "variable", "distance", "pcs_metric")
write.csv(pcs.ols.moran, "Results/Tables/OLS_moran.csv")
# Fit SAR models ----------------------------------------------------------
fit_sar <- function(var, pcs_metric, data, w, sar.type=c("lag", "err")){
formula.1 <- formula(paste0(pcs_metric, "~", var))
formula.2 <- formula(paste0(pcs_metric, "~", var, " + year"))
formula.3 <- formula(paste0(pcs_metric, "~", var, " * year"))
if(sar.type == "lag"){
sar.1 <- eval(bquote(lagsarlm(.(formula.1), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
sar.2 <- eval(bquote(lagsarlm(.(formula.2), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
sar.3 <- eval(bquote(lagsarlm(.(formula.3), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
}
if(sar.type == "err"){
sar.1 <- eval(bquote(errorsarlm(.(formula.1), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
sar.2 <- eval(bquote(errorsarlm(.(formula.2), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
sar.3 <- eval(bquote(errorsarlm(.(formula.3), data, listw = w, zero.policy= TRUE, na.action=na.exclude)))
}
sar <- list(sar.1, sar.2, sar.3)
names(sar) <- c("m1", "m2", "m3")
return(sar)
}
# Fit SAR-lag models
pcs.sar.lag <- list()
pcs.sar.lag$nri <- list()
pcs.sar.lag$nri$"120" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d1, "lag")
pcs.sar.lag$nri$"360" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d2, "lag")
pcs.sar.lag$nri$"480" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d3, "lag")
pcs.sar.lag$nti <- list()
pcs.sar.lag$nti$"120" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d1, "lag")
pcs.sar.lag$nti$"360" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d2, "lag")
pcs.sar.lag$nti$"480" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d3, "lag")
names(pcs.sar.lag$nri$"120") <- names(pcs.sar.lag$nri$"360") <- names(pcs.sar.lag$nri$"480") <- VARS_NEW_NAMES
names(pcs.sar.lag$nti$"120") <- names(pcs.sar.lag$nti$"360") <- names(pcs.sar.lag$nti$"480") <- VARS_NEW_NAMES
# save(pcs.sar.lag, file="Results/RObjects/SAR_lag_models.RData")
# load("Results/RObjects/SAR_lag_models.RData")
# Fit SAR-err models
pcs.sar.err <- list()
pcs.sar.err$nri <- list()
pcs.sar.err$nri$"120" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d1, "err")
pcs.sar.err$nri$"360" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d2, "err")
pcs.sar.err$nri$"480" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nri", data.cels.subset, w.d3, "err")
pcs.sar.err$nti <- list()
pcs.sar.err$nti$"120" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d1, "err")
pcs.sar.err$nti$"360" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d2, "err")
pcs.sar.err$nti$"480" <- lapply(VARS_NEW_NAMES, FUN=fit_sar, "nti", data.cels.subset, w.d3, "err")
names(pcs.sar.err$nri$"120") <- names(pcs.sar.err$nri$"360") <- names(pcs.sar.err$nri$"480") <- VARS_NEW_NAMES
names(pcs.sar.err$nti$"120") <- names(pcs.sar.err$nti$"360") <- names(pcs.sar.err$nti$"480") <- VARS_NEW_NAMES
# save(pcs.sar.err, file="Results/RObjects/SAR_err_models.RData")
# load("Results/RObjects/SAR_err_models.RData")
# Get parameters from the SAR models
get_sar_param <- function(sar.list){
require(spdep)
.get_sar_param <- function(model) {
require(spdep)
output <- data.frame(intercept=double(), slope=double(),
r.square=double(), p.value=double(),
lr.value=double(), lr.p.value=double(),
aic=double(), lm.aic=double(),
lm.value=double())
output[1, "intercept"] <- summary(model, Nagelkerke = T)$coefficients[1]
output[1, "slope"] <- summary(model, Nagelkerke = T)$coefficients[2]
output[1, "r.square"] <- summary(model, Nagelkerke = T)$NK
output[1, "p.value"] <- summary(model, Nagelkerke = T)$Coef[2,4]
output[1, "lr.value"] <- summary(model, Nagelkerke = T)$LR1$statistic[[1]]
output[1, "lr.p.value"] <- summary(model, Nagelkerke = T)$LR1$p.value[[1]]
output[1, "aic"] <- AIC(model)
output[1, "lm.aic"] <- summary(model, Nagelkerke = T)$AIC_lm.model
if(is.null(summary(model, Nagelkerke=T)$LMtest)){
output[1, "lm.value"] <- NA
}else{
output[1, "lm.value"] <- summary(model, Nagelkerke = T)$LMtest
}
return(output)
}
output.1 <- .get_sar_param(sar.list$m1)
output.2 <- .get_sar_param(sar.list$m2)
output.3 <- .get_sar_param(sar.list$m3)
output.colnames <- colnames(output.1)
output <- cbind(output.1, output.2, output.3)
colnames(output) <- c(paste0("m1.", output.colnames), paste0("m2.", output.colnames), paste0("m3.", output.colnames))
return(output)
}
pcs.sar.lag.param <- list()
pcs.sar.lag.param$nri <- list()
pcs.sar.lag.param$nri$"120" <- lapply(pcs.sar.lag$nri$"120", FUN=get_sar_param)
pcs.sar.lag.param$nri$"360" <- lapply(pcs.sar.lag$nri$"360", FUN=get_sar_param)
pcs.sar.lag.param$nri$"480" <- lapply(pcs.sar.lag$nri$"480", FUN=get_sar_param)
pcs.sar.lag.param$nti <- list()
pcs.sar.lag.param$nti$"120" <- lapply(pcs.sar.lag$nti$"120", FUN=get_sar_param)
pcs.sar.lag.param$nti$"360" <- lapply(pcs.sar.lag$nti$"360", FUN=get_sar_param)
pcs.sar.lag.param$nti$"480" <- lapply(pcs.sar.lag$nti$"480", FUN=get_sar_param)
pcs.sar.lag.param <- melt(pcs.sar.lag.param, id.vars=colnames(pcs.sar.lag.param$nri$"120"[[1]]))
colnames(pcs.sar.lag.param) <- c(colnames(pcs.sar.lag.param)[c(1:27)], "variable", "distance", "pcs_metric")
write.csv(pcs.sar.lag.param, file="Results/Tables/SAR_lag_parameters.csv")
rm(pcs.sar.lag.param)
pcs.sar.err.param <- list()
pcs.sar.err.param$nri <- list()
pcs.sar.err.param$nri$"120" <- lapply(pcs.sar.err$nri$"120", FUN=get_sar_param)
pcs.sar.err.param$nri$"360" <- lapply(pcs.sar.err$nri$"360", FUN=get_sar_param)
pcs.sar.err.param$nri$"480" <- lapply(pcs.sar.err$nri$"480", FUN=get_sar_param)
pcs.sar.err.param$nti <- list()
pcs.sar.err.param$nti$"120" <- lapply(pcs.sar.err$nti$"120", FUN=get_sar_param)
pcs.sar.err.param$nti$"360" <- lapply(pcs.sar.err$nti$"360", FUN=get_sar_param)
pcs.sar.err.param$nti$"480" <- lapply(pcs.sar.err$nti$"480", FUN=get_sar_param)
pcs.sar.err.param <- melt(pcs.sar.err.param, id.vars=colnames(pcs.sar.err.param$nri$"120"[[1]]))
colnames(pcs.sar.err.param) <- c(colnames(pcs.sar.err.param)[c(1:27)], "variable", "distance", "pcs_metric")
write.csv(pcs.sar.err.param, file="Results/Tables/SAR_err_parameters.csv")
rm(pcs.sar.err.param)
# SAR Spatial Autocorrelation tests ---------------------------------------
get_sar_moran <- function(sar.list, nb.weight){
.get_sar_moran <- function(models, nb.weight) {
require (spdep)
mod.1 <- models[[1]]
mod.2 <- models[[2]]
mod.3 <- models[[3]]
output <- data.frame(moran_i_1=double(),
p.value_1=double(),
moran_i_2=double(),
p.value_2=double(),
moran_i_3=double(),
p.value_3=double())
moran.1 <- moran.test(residuals(mod.1),
listw=nb.weight,
zero.policy= TRUE,
na.action=na.exclude)
output[1,1] <- moran.1$estimate[1]
output[1,2] <- moran.1$p.value
moran.2 <- moran.test(residuals(mod.2),
listw = nb.weight,
zero.policy = TRUE,
na.action = na.exclude)
output[1,3] <- moran.2$estimate[1]
output[1,4] <- moran.2$p.value
moran.3 <- moran.test(residuals(mod.3),
listw = nb.weight,
zero.policy = TRUE,
na.action = na.exclude)
output[1,5] <- moran.3$estimate[1]
output[1,6] <- moran.3$p.value
return(output)
}
results <- lapply(sar.list, FUN=.get_sar_moran, nb.weight)
names(results) <- names(sar.list)
return(results)
}
pcs.sar.lag.moran <- list()
pcs.sar.lag.moran$nri <- list()
pcs.sar.lag.moran$nri$"120" <- lapply(pcs.sar.lag$nri, FUN=get_sar_moran, w.d1)
pcs.sar.lag.moran$nri$"360" <- lapply(pcs.sar.lag$nri, FUN=get_sar_moran, w.d2)
pcs.sar.lag.moran$nri$"480" <- lapply(pcs.sar.lag$nri, FUN=get_sar_moran, w.d3)
pcs.sar.lag.moran$nti <- list()
pcs.sar.lag.moran$nti$"120" <- lapply(pcs.sar.lag$nti, FUN=get_sar_moran, w.d1)
pcs.sar.lag.moran$nti$"360" <- lapply(pcs.sar.lag$nti, FUN=get_sar_moran, w.d2)
pcs.sar.lag.moran$nti$"480" <- lapply(pcs.sar.lag$nti, FUN=get_sar_moran, w.d3)
pcs.sar.err.moran <- list()
pcs.sar.err.moran$nri <- list()
pcs.sar.err.moran$nri$"120" <- lapply(pcs.sar.err$nri, FUN=get_sar_moran, w.d1)
pcs.sar.err.moran$nri$"360" <- lapply(pcs.sar.err$nri, FUN=get_sar_moran, w.d2)
pcs.sar.err.moran$nri$"480" <- lapply(pcs.sar.err$nri, FUN=get_sar_moran, w.d3)
pcs.sar.err.moran$nti <- list()
pcs.sar.err.moran$nti$"120" <- lapply(pcs.sar.err$nti, FUN=get_sar_moran, w.d1)
pcs.sar.err.moran$nti$"360" <- lapply(pcs.sar.err$nti, FUN=get_sar_moran, w.d2)
pcs.sar.err.moran$nti$"480" <- lapply(pcs.sar.err$nti, FUN=get_sar_moran, w.d3)
pcs.sar.lag.moran <- melt(pcs.sar.lag.moran, id.vars=colnames(pcs.sar.lag.moran$nri$"120"$"120"$Tmin))
pcs.sar.err.moran <- melt(pcs.sar.err.moran, id.vars=colnames(pcs.sar.err.moran$nri$"120"$"120"$Tmin))
colnames(pcs.sar.lag.moran) <- colnames(pcs.sar.err.moran) <- c("moran_i_1", "p.value_1", "moran_i_2", "p.value_2", "moran_i_3", "p.value_3", "variable", "test_distance", "model_distance", "pcs_metric")
write.csv(pcs.sar.lag.moran, file = "Results/Tables/SAR_lag_moran.csv")
write.csv(pcs.sar.err.moran, file = "Results/Tables/SAR_err_moran.csv")
# OLS - ANOVA -------------------------------------------------------------
get_ols_anova <- function(model_list){
aov <- anova(model_list$m1, model_list$m2, model_list$m3)
output <- data.frame(m1.Res.Df=double(),
m2.Res.Df=double(),
m3.Res.Df=double(),
m1.RSS=double(),
m2.RSS=double(),
m3.RSS=double(),
m1.Df=double(),
m2.Df=double(),
m3.Df=double(),
m1.Sum_of_sq=double(),
m2.Sum_of_sq=double(),
m3.Sum_of_sq=double(),
m1.F=double(),
m2.F=double(),
m3.F=double(),
m1.p.value=double(),
m2.p.value=double(),
m3.p.value=double())
output[1, 1:3] <- aov$Res.Df
output[1, 4:6] <- aov$RSS
output[1, 7:9] <- aov$Df
output[1, 10:12] <- aov$`Sum of Sq`
output[1, 13:15] <- aov$F
output[1, 16:18] <- aov$`Pr(>F)`
return(output)
}
pcs.ols.aov <- list()
pcs.ols.aov$nri <- lapply(pcs.ols$nri, FUN=get_ols_anova)
pcs.ols.aov$nti <- lapply(pcs.ols$nti, FUN=get_ols_anova)
names(pcs.ols.aov) <- PCS_METRICS
pcs.ols.aov <- melt(pcs.ols.aov, id.vars=colnames(pcs.ols.aov$nri$Tmin))
colnames(pcs.ols.aov) <- c(colnames(pcs.ols.aov)[1:18], "variable", "pcs_metric")
write.csv(pcs.ols.aov, file="Results/Tables/OLS_anova.csv")
# SAR - ANOVA ---------------------------------------------------------
get_sar_anova <- function(model_list, variable, metric){
aov <- anova(model_list$m1, model_list$m2, model_list$m3)
output <- data.frame(m1.df=integer(), m2.df=integer(), m3.df=integer(),
m1.AIC=double(), m2.AIC=double(), m3.AIC=double(),
m1.logLik=double(), m2.logLik=double(), m3.logLik=double(),
m1.Test=character(), m2.Test=character(), m3.Test=character(),
m1.L.Ratio=double(), m2.L.Ratio=double(), m3.L.Ratio=double(),
m1.p.value=double(), m2.p.value=double(), m3.p.value=double(),
stringsAsFactors=FALSE)
output[1, paste0("m", 1:3, ".df")] <- aov$df
output[1, paste0("m", 1:3, ".AIC")] <- aov$AIC
output[1, paste0("m", 1:3, ".logLik")] <- aov$logLik
output[1, paste0("m", 1:3, ".Test")] <- aov$Test
output[1, paste0("m", 1:3, ".L.Ratio")] <- aov$L.Ratio
output[1, paste0("m", 1:3, ".p.value")] <- aov$`p-value`
return(output)
}
# SAR lag
pcs.sar.lag.aov <- list()
pcs.sar.lag.aov$nri <- list()
pcs.sar.lag.aov$nri$"120" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nri$"120", names(pcs.sar.lag$nri$"120"), MoreArgs = list(metric="nri"), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nri$"360" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nri$"360", names(pcs.sar.lag$nri$"360"), MoreArgs = list(metric="nri"), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nri$"480" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nri$"480", names(pcs.sar.lag$nri$"480"), MoreArgs = list(metric="nri"), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nti <- list()
pcs.sar.lag.aov$nti$"120" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nti$"120", names(pcs.sar.lag$nti)[[1]], MoreArgs = list(names(pcs.sar.lag)[[2]]), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nti$"360" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nti$"360", names(pcs.sar.lag$nti)[[2]], MoreArgs = list(names(pcs.sar.lag)[[2]]), SIMPLIFY=FALSE)
pcs.sar.lag.aov$nti$"480" <- mapply(FUN = get_sar_anova, pcs.sar.lag$nti$"480", names(pcs.sar.lag$nti)[[3]], MoreArgs = list(names(pcs.sar.lag)[[2]]), SIMPLIFY=FALSE)
pcs.sar.lag.aov <- melt(pcs.sar.lag.aov, id.vars=colnames(pcs.sar.lag.aov$nri$"120"[[1]]))
colnames(pcs.sar.lag.aov) <- c(colnames(pcs.sar.lag.aov)[1:18], "variable", "distance", "pcs_metric")
write.csv(pcs.sar.lag.aov, file="Results/Tables/SAR_lag_anova.csv")
rm(pcs.sar.lag.aov)
# SAR err
pcs.sar.err.aov <- list()
pcs.sar.err.aov$nri <- list()
pcs.sar.err.aov$nri$"120" <- mapply(FUN = get_sar_anova, pcs.sar.err$nri$"120", names(pcs.sar.err$nri)[[1]], MoreArgs = list(names(pcs.sar.err)[[1]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nri$"360" <- mapply(FUN = get_sar_anova, pcs.sar.err$nri$"360", names(pcs.sar.err$nri)[[2]], MoreArgs = list(names(pcs.sar.err)[[1]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nri$"480" <- mapply(FUN = get_sar_anova, pcs.sar.err$nri$"480", names(pcs.sar.err$nri)[[3]], MoreArgs = list(names(pcs.sar.err)[[1]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nti <- list()
pcs.sar.err.aov$nti$"120" <- mapply(FUN = get_sar_anova, pcs.sar.err$nti$"120", names(pcs.sar.err$nti)[[1]], MoreArgs = list(names(pcs.sar.err)[[2]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nti$"360" <- mapply(FUN = get_sar_anova, pcs.sar.err$nti$"360", names(pcs.sar.err$nti)[[2]], MoreArgs = list(names(pcs.sar.err)[[2]]), SIMPLIFY=FALSE)
pcs.sar.err.aov$nti$"480" <- mapply(FUN = get_sar_anova, pcs.sar.err$nti$"480", names(pcs.sar.err$nti)[[3]], MoreArgs = list(names(pcs.sar.err)[[2]]), SIMPLIFY=FALSE)
pcs.sar.err.aov <- melt(pcs.sar.err.aov, id.vars=colnames(pcs.sar.err.aov$nri$"120"[[1]]))
colnames(pcs.sar.err.aov) <- c(colnames(pcs.sar.err.aov)[1:18], "variable", "distance", "pcs_metric")
write.csv(pcs.sar.err.aov, file="Results/Tables/SAR_err_anova.csv")
rm(pcs.sar.err.aov)
|
library(ape)
testtree <- read.tree("5083_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5083_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/5083_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 135 |
r
|
library(ape)
testtree <- read.tree("5083_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5083_0_unrooted.txt")
|
\name{horizon_legend}
\alias{horizon_legend}
\title{
Legend for the horizon chart
}
\description{
Legend for the horizon chart
}
\usage{
horizon_legend(lt, title = "", format = "\%.2f",
template = "[{x1}, {x2}]", ...)
}
\arguments{
\item{lt}{The object returned by \code{\link{spiral_horizon}}.}
\item{title}{Title of the legend.}
\item{format}{Number format of the legend labels.}
\item{template}{Template to construct the labels.}
\item{...}{Pass to \code{\link[ComplexHeatmap]{Legend}}.}
}
\value{
A \code{\link[ComplexHeatmap]{Legend}} object.
}
\examples{
# There is no example
NULL
}
|
/man/horizon_legend.Rd
|
permissive
|
HaihuaWang-hub/spiralize
|
R
| false | false | 607 |
rd
|
\name{horizon_legend}
\alias{horizon_legend}
\title{
Legend for the horizon chart
}
\description{
Legend for the horizon chart
}
\usage{
horizon_legend(lt, title = "", format = "\%.2f",
template = "[{x1}, {x2}]", ...)
}
\arguments{
\item{lt}{The object returned by \code{\link{spiral_horizon}}.}
\item{title}{Title of the legend.}
\item{format}{Number format of the legend labels.}
\item{template}{Template to construct the labels.}
\item{...}{Pass to \code{\link[ComplexHeatmap]{Legend}}.}
}
\value{
A \code{\link[ComplexHeatmap]{Legend}} object.
}
\examples{
# There is no example
NULL
}
|
file <- "household_power_consumption.txt"
hhp <- read.table(file, header = FALSE, sep = ";", skip = 1, na.strings = "?")
cnames = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
names(hhp) <- cnames
hhp <- hhp[hhp$Date %in% c("1/2/2007", "2/2/2007"),]
hhp$Date_Time <- paste(hhp$Date, hhp$Time)
hhp$Date_Time <- strptime(hhp$Date_Time, "%d/%m/%Y %H:%M:%S")
#Fig3
plot(x = hhp$Date_Time, y = hhp$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
lines(x = hhp$Date_Time, y = hhp$Sub_metering_1)
lines(x = hhp$Date_Time, y = hhp$Sub_metering_2, col = "red")
lines(x = hhp$Date_Time, y = hhp$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue"))
dev.copy(png, "plot3.png")
dev.off
|
/plot3.R
|
no_license
|
bingfang/DataExplore
|
R
| false | false | 907 |
r
|
file <- "household_power_consumption.txt"
hhp <- read.table(file, header = FALSE, sep = ";", skip = 1, na.strings = "?")
cnames = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
names(hhp) <- cnames
hhp <- hhp[hhp$Date %in% c("1/2/2007", "2/2/2007"),]
hhp$Date_Time <- paste(hhp$Date, hhp$Time)
hhp$Date_Time <- strptime(hhp$Date_Time, "%d/%m/%Y %H:%M:%S")
#Fig3
plot(x = hhp$Date_Time, y = hhp$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
lines(x = hhp$Date_Time, y = hhp$Sub_metering_1)
lines(x = hhp$Date_Time, y = hhp$Sub_metering_2, col = "red")
lines(x = hhp$Date_Time, y = hhp$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue"))
dev.copy(png, "plot3.png")
dev.off
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predictive.R
\name{gfilmmPredictive}
\alias{gfilmmPredictive}
\title{Generalized fiducial predictive distributions}
\usage{
gfilmmPredictive(gfi, newdata)
}
\arguments{
\item{gfi}{a \code{\link{gfilmm}} object}
\item{newdata}{dataframe in which to look for variables with which to
predict, or \code{NULL} if the model is an intercept-only model without
random effect}
}
\value{
A list with two fields: \code{FPD}, a dataframe containing the
simulations, and \code{WEIGHT}, their weight. This is a \code{gfilmm}
object.
}
\description{
Simulations of the generalized fiducial predictive
distributions.
}
\note{
Actually the levels of the random effects given in \code{newdata} can
be different from the original levels. For instance, in the example
provided below, we enter \code{block = c("4","6")}, but we could also
enter \code{block = c("A","B")}, even though \code{"A"} and \code{"B"}
are not some levels of the \code{block} factor. Both options only mean
that the two observations to predict are in two different blocks.
}
\examples{
gfi <- gfilmm(
~ cbind(yield-0.1, yield+0.1), ~ N, ~ block, npk, 2000, nthreads = 2
)
fpd <- gfilmmPredictive(gfi, data.frame(N = c("0","1"), block = c("4","6")))
gfiSummary(fpd)
}
|
/fuzzedpackages/gfilmm/man/gfilmmPredictive.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | true | 1,327 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predictive.R
\name{gfilmmPredictive}
\alias{gfilmmPredictive}
\title{Generalized fiducial predictive distributions}
\usage{
gfilmmPredictive(gfi, newdata)
}
\arguments{
\item{gfi}{a \code{\link{gfilmm}} object}
\item{newdata}{dataframe in which to look for variables with which to
predict, or \code{NULL} if the model is an intercept-only model without
random effect}
}
\value{
A list with two fields: \code{FPD}, a dataframe containing the
simulations, and \code{WEIGHT}, their weight. This is a \code{gfilmm}
object.
}
\description{
Simulations of the generalized fiducial predictive
distributions.
}
\note{
Actually the levels of the random effects given in \code{newdata} can
be different from the original levels. For instance, in the example
provided below, we enter \code{block = c("4","6")}, but we could also
enter \code{block = c("A","B")}, even though \code{"A"} and \code{"B"}
are not some levels of the \code{block} factor. Both options only mean
that the two observations to predict are in two different blocks.
}
\examples{
gfi <- gfilmm(
~ cbind(yield-0.1, yield+0.1), ~ N, ~ block, npk, 2000, nthreads = 2
)
fpd <- gfilmmPredictive(gfi, data.frame(N = c("0","1"), block = c("4","6")))
gfiSummary(fpd)
}
|
# Description - Describe what the app does (e.g. visualizes births data)
# User interface - how your app looks and elements users can interact with
shinyUI(fluidPage(
tags$script(src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/3.5.16/iframeResizer.contentWindow.min.js",
type="text/javascript"),
title = "Runchart Builder",
# Show a plot of the generated distribution
br(),
fluidRow(column(12,
plotOutput("runchart")
)),
br(),
hr(),
fluidRow(
column(2, pickerInput(
inputId = "datatype",
label = "Select measure",
choices = as.character(unique(measures$measure)),
width = "fit"
)),
column(2, offset = 1, pickerInput(
inputId = "hb",
label = "Select board",
choices = as.character(unique(measures$board))
)),
column(2, offset = 1, style = "margin-top: 25px;",
downloadButton("downloaddata", "Download data")),
column(2, style = "margin-top: 25px;",
downloadButton("downloadchart", "Download chart"))),
HTML('<div data-iframe-height></div>')))
#br()
# Show table
#fluidRow(column(7, dataTableOutput("rundata"))
#)
|
/ui.R
|
no_license
|
SumnerSan/webcharts
|
R
| false | false | 1,206 |
r
|
# Description - Describe what the app does (e.g. visualizes births data)
# User interface - how your app looks and elements users can interact with
shinyUI(fluidPage(
tags$script(src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/3.5.16/iframeResizer.contentWindow.min.js",
type="text/javascript"),
title = "Runchart Builder",
# Show a plot of the generated distribution
br(),
fluidRow(column(12,
plotOutput("runchart")
)),
br(),
hr(),
fluidRow(
column(2, pickerInput(
inputId = "datatype",
label = "Select measure",
choices = as.character(unique(measures$measure)),
width = "fit"
)),
column(2, offset = 1, pickerInput(
inputId = "hb",
label = "Select board",
choices = as.character(unique(measures$board))
)),
column(2, offset = 1, style = "margin-top: 25px;",
downloadButton("downloaddata", "Download data")),
column(2, style = "margin-top: 25px;",
downloadButton("downloadchart", "Download chart"))),
HTML('<div data-iframe-height></div>')))
#br()
# Show table
#fluidRow(column(7, dataTableOutput("rundata"))
#)
|
#R
# $HeadURL: http://fgcz-svn.unizh.ch/repos/fgcz/testing/proteomics/R/protViz/R/deisotoper.R $
# $Id: deisotoper.R 6178 2014-02-27 09:33:30Z cpanse $
# $Date: 2014-02-27 10:33:30 +0100 (Thu, 27 Feb 2014) $
deisotoper <- function(data,
Z=1:4,
isotopPatternDF=averagine,
massError=0.005,
plot=FALSE){
colormap=rainbow(length(Z), alpha=0.5)
val <- lapply(data, function(x){
if ( length(x$mZ) > 1 ){
# TODO check if sorted
mZ.idx <- order(x$mZ)
x$mZ <- x$mZ[mZ.idx]
x$intensity <- x$intensity[mZ.idx]
out <- .Call("deisotoper_main", x$mZ, x$intensity, Z, averagine, massError, PACKAGE="protViz")
out$group <- lapply(out$group, function(x){ x[x==-1] <- NA; return(x)})
if(plot){
op<-par(mfrow=c(1,1), mar=c(4,4,4,4))
plot(x$mZ, x$intensity, col='grey',type ='h', log='', main=x$title)
for (i in 1:length(Z)){
mapply(function(xx, ss){
if (ss > 0.80 & length(xx)>2){
points(x$mZ[xx+1], x$intensity[xx+1], type='h', col=colormap[i], lwd=max(Z)-i)
iso.mean<-mean(x$mZ[xx+1])
text(x$mZ[min(xx+1)], x$intensity[min(xx+1)], round(x$mZ[min(xx+1)],2), srt=0, cex=0.50, pos=3, col=colormap[i])
text(iso.mean, x$intensity[min(xx+1)], round(iso.mean,2), srt=0, cex=0.50, pos=4, col=colormap[i])
}
}, out$result[[i]], out$score[[i]])
}
.deisotoperUtilPlot(x, out, Z, colormap)
}
}
return(out)
})
return(val)
}
.deisotoperUtilPlot <- function(x, out, Z, colormap){
op<-par(mfrow=c(2, 2), mar=c(3,3,3,1))
lapply(out$group, function(g){
for (i in 1:length(Z)){
if (! is.na(g[i])){
idx<-(out$result[[i]][[g[i] + 1]]) + 1
my.title <- paste("mass=", x$mZ[min(idx)],sep='')
plot(xx<-x$mZ[min(idx):max(idx)], yy<-x$intensity[min(idx):max(idx)],
type='h',
axes=FALSE,
col='grey',
xlim=c(x$mZ[min(idx)]-0.5, x$mZ[max(idx)]+0.5),
ylim=c(0, max(x$intensity[min(idx):max(idx)])),
xlab="mZ",
main=paste("mass=", x$mZ[min(idx)],sep=''),
ylab="intensity",
)
text(xx,yy,xx,pos=4,cex=0.75)
axis(1, x$mZ[min(idx):max(idx)], round(x$mZ[min(idx):max(idx)], 2))
axis(2)
break;
}
}
score <- rep(NA, length(Z))
score1 <- rep(NA, length(Z))
cscore1 <- rep(NA, length(Z))
cscore <- rep(NA, length(Z))
for (i in 1:length(Z)){
if (! is.na(g[i])){
idx<-(out$result[[i]][[g[i]+1]])
cscore[i]<-round(out$score[[i]][[g[i]+1]],2)
cscore1[i]<-round(out$score1[[i]][[g[i]+1]],2)
averagine.mZ <- x$mZ[idx+1]
averagine.intensity <- averagine[, findNN_(min(x$mZ[idx]) * Z[i], as.double(colnames(averagine)))]
L2.intensity <- sqrt(sum((x$intensity[idx+1])^2))
score[i] <- round((x$intensity[idx+1] / L2.intensity) %*% averagine.intensity[1:length(averagine.mZ)], 2)
idx2<-(idx+1)[2:length(idx)]
L2.intensity2 <- sqrt(sum((x$intensity[idx2])^2))
score1[i] <- round((x$intensity[idx2] / L2.intensity) %*% averagine.intensity[1:(length(averagine.mZ)-1)], 2)
points(x$mZ[idx+1], x$intensity[idx+1],
type='h',
col='#AAAAAAAA', lwd=3)
points(a.x<-averagine.mZ, a.y<-max(x$intensity[idx+1])*averagine.intensity[1:length(averagine.mZ)], col=colormap[i], pch=25, lwd=3)
text(a.x, a.y, length(idx), pos=4, col=colormap[i])
}
}
legend("topright", paste("c",Z,'=(',cscore,", ",cscore1,")", sep=''), pch=25, col=colormap, title='C++score', cex=1.0)
box()
})
}
|
/R/deisotoper.R
|
no_license
|
jjGG/protViz
|
R
| false | false | 4,217 |
r
|
#R
# $HeadURL: http://fgcz-svn.unizh.ch/repos/fgcz/testing/proteomics/R/protViz/R/deisotoper.R $
# $Id: deisotoper.R 6178 2014-02-27 09:33:30Z cpanse $
# $Date: 2014-02-27 10:33:30 +0100 (Thu, 27 Feb 2014) $
deisotoper <- function(data,
Z=1:4,
isotopPatternDF=averagine,
massError=0.005,
plot=FALSE){
colormap=rainbow(length(Z), alpha=0.5)
val <- lapply(data, function(x){
if ( length(x$mZ) > 1 ){
# TODO check if sorted
mZ.idx <- order(x$mZ)
x$mZ <- x$mZ[mZ.idx]
x$intensity <- x$intensity[mZ.idx]
out <- .Call("deisotoper_main", x$mZ, x$intensity, Z, averagine, massError, PACKAGE="protViz")
out$group <- lapply(out$group, function(x){ x[x==-1] <- NA; return(x)})
if(plot){
op<-par(mfrow=c(1,1), mar=c(4,4,4,4))
plot(x$mZ, x$intensity, col='grey',type ='h', log='', main=x$title)
for (i in 1:length(Z)){
mapply(function(xx, ss){
if (ss > 0.80 & length(xx)>2){
points(x$mZ[xx+1], x$intensity[xx+1], type='h', col=colormap[i], lwd=max(Z)-i)
iso.mean<-mean(x$mZ[xx+1])
text(x$mZ[min(xx+1)], x$intensity[min(xx+1)], round(x$mZ[min(xx+1)],2), srt=0, cex=0.50, pos=3, col=colormap[i])
text(iso.mean, x$intensity[min(xx+1)], round(iso.mean,2), srt=0, cex=0.50, pos=4, col=colormap[i])
}
}, out$result[[i]], out$score[[i]])
}
.deisotoperUtilPlot(x, out, Z, colormap)
}
}
return(out)
})
return(val)
}
.deisotoperUtilPlot <- function(x, out, Z, colormap){
op<-par(mfrow=c(2, 2), mar=c(3,3,3,1))
lapply(out$group, function(g){
for (i in 1:length(Z)){
if (! is.na(g[i])){
idx<-(out$result[[i]][[g[i] + 1]]) + 1
my.title <- paste("mass=", x$mZ[min(idx)],sep='')
plot(xx<-x$mZ[min(idx):max(idx)], yy<-x$intensity[min(idx):max(idx)],
type='h',
axes=FALSE,
col='grey',
xlim=c(x$mZ[min(idx)]-0.5, x$mZ[max(idx)]+0.5),
ylim=c(0, max(x$intensity[min(idx):max(idx)])),
xlab="mZ",
main=paste("mass=", x$mZ[min(idx)],sep=''),
ylab="intensity",
)
text(xx,yy,xx,pos=4,cex=0.75)
axis(1, x$mZ[min(idx):max(idx)], round(x$mZ[min(idx):max(idx)], 2))
axis(2)
break;
}
}
score <- rep(NA, length(Z))
score1 <- rep(NA, length(Z))
cscore1 <- rep(NA, length(Z))
cscore <- rep(NA, length(Z))
for (i in 1:length(Z)){
if (! is.na(g[i])){
idx<-(out$result[[i]][[g[i]+1]])
cscore[i]<-round(out$score[[i]][[g[i]+1]],2)
cscore1[i]<-round(out$score1[[i]][[g[i]+1]],2)
averagine.mZ <- x$mZ[idx+1]
averagine.intensity <- averagine[, findNN_(min(x$mZ[idx]) * Z[i], as.double(colnames(averagine)))]
L2.intensity <- sqrt(sum((x$intensity[idx+1])^2))
score[i] <- round((x$intensity[idx+1] / L2.intensity) %*% averagine.intensity[1:length(averagine.mZ)], 2)
idx2<-(idx+1)[2:length(idx)]
L2.intensity2 <- sqrt(sum((x$intensity[idx2])^2))
score1[i] <- round((x$intensity[idx2] / L2.intensity) %*% averagine.intensity[1:(length(averagine.mZ)-1)], 2)
points(x$mZ[idx+1], x$intensity[idx+1],
type='h',
col='#AAAAAAAA', lwd=3)
points(a.x<-averagine.mZ, a.y<-max(x$intensity[idx+1])*averagine.intensity[1:length(averagine.mZ)], col=colormap[i], pch=25, lwd=3)
text(a.x, a.y, length(idx), pos=4, col=colormap[i])
}
}
legend("topright", paste("c",Z,'=(',cscore,", ",cscore1,")", sep=''), pch=25, col=colormap, title='C++score', cex=1.0)
box()
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mendel-interface.R
\name{mendel_control_list}
\alias{mendel_control_list}
\title{return a list of values for a Mendel definitions file for gene dropping}
\usage{
mendel_control_list(ID, Reps, Seed)
}
\arguments{
\item{ID}{the name/prefix to be given to all the files involved in this run.}
}
\description{
return a list of values for a Mendel definitions file for gene dropping
}
|
/man/mendel_control_list.Rd
|
no_license
|
krshedd/CKMRsim
|
R
| false | true | 458 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mendel-interface.R
\name{mendel_control_list}
\alias{mendel_control_list}
\title{return a list of values for a Mendel definitions file for gene dropping}
\usage{
mendel_control_list(ID, Reps, Seed)
}
\arguments{
\item{ID}{the name/prefix to be given to all the files involved in this run.}
}
\description{
return a list of values for a Mendel definitions file for gene dropping
}
|
#' @export
build_vignettes <- function(settings) {
vignettes <- pkgVignettes(settings$package, source=TRUE)
## if there are vignettes and there are no vignette sources,
## then compile the vignettes to sources. This compilation
## will result in .R files in the doc directory of package
## and will be picked up by the next step of the program
if (length(vignettes$docs) != 0 &&
length(vignettes$sources) == 0) {
checkVignettes(settings$package,
find.package(settings$package)[1],
tangle = TRUE,
weave = FALSE,
workdir = "src")
}
}
#' @export
copy_vignettes <- function(settings) {
build_vignettes(settings)
destination_paths <- path(settings$corpus_dirpath,
settings$package,
c("doc", "data"))
vignettes <- vignette(package = settings$package)$results
if(nrow(vignettes) == 0) {
dir_create(destination_paths)
}
else {
source_paths <- path(vignettes[1, "LibPath"],
vignettes[1, "Package"],
c("doc", "data"))
if(dir_exists(source_paths[1]))
dir_copy(source_paths[1], destination_paths[1])
if(dir_exists(source_paths[2]))
dir_copy(source_paths[2], destination_paths[2])
}
"doc"
}
#' @export
copy_tests <- function(settings) {
destination_path <- path(settings$corpus_dirpath, settings$package, "tests")
source_path <- path(find.package(settings$package), "tests")
if(!dir_exists(source_path)) {
dir_create(destination_path)
} else {
dir_copy(source_path, destination_path)
}
"tests"
}
#' @export
copy_examples <- function(settings) {
destination_path <- path(settings$corpus_dirpath,
settings$package,
"examples")
dir_create(destination_path)
db <- tryCatch({
Rd_db(settings$package)
}, error=function(e) {
print(e)
list()
})
iwalk(db, function(rd_data, rd_name) {
example_filepath <-
destination_path %>%
path(path_file(rd_name)) %>%
path_ext_set("R")
Rd2ex(rd_data, example_filepath, defines=NULL)
if(file_exists(example_filepath)) {
new_content <-
str_c(str_glue("library({settings$package})"),
"",
read_file(example_filepath),
sep = "\n")
write_file(new_content,
example_filepath)
}
})
"examples"
}
#' @export
wrap_scripts <- function(settings, wrap_script, script_dirname) {
path(settings$corpus_dirpath, settings$package, script_dirname) %>%
dir_ls(type = "file", glob = "*.R") %>%
path_file() %>%
map_dfr(
function(script_filename) {
wrap_script(settings, script_dirname, script_filename)
}
)
}
#' @export
run_script <- function(settings, script_filepath) {
cat("Executing ", script_filepath, "\n")
processx::run(command = settings$r_dyntrace,
args = str_c("--file=", script_filepath),
timeout = settings$tracing_timeout,
cleanup_tree = TRUE)
script_filepath
}
#' @export
run_scripts <- function(settings, script_filepaths) {
script_filepaths %>%
map(function(script_filepath) {
run_script(settings, script_filepath)
})
}
#' @export
create_trace_settings <- function(package,
tracing_timeout,
r_dyntrace,
corpus_dirpath,
raw_analysis_dirpath,
verbose,
truncate,
binary,
compression_level) {
structure(list(package = package,
tracing_timeout = tracing_timeout,
r_dyntrace = r_dyntrace,
corpus_dirpath = corpus_dirpath,
raw_analysis_dirpath = raw_analysis_dirpath,
verbose = verbose,
truncate = truncate,
binary = binary,
compression_level = compression_level),
class = "dynalyzer.settings.trace")
}
#' @export
parse_trace_settings <- function(args = commandArgs(trailingOnly = TRUE)) {
option_list <- list(
make_option(c("--tracing-timeout"),
action = "store",
type = "integer",
default = 60 * 60,
help="Timeout for tracing a script",
metavar="tracing-timeout"),
make_option(c("--r-dyntrace"),
action="store",
type="character",
help="",
metavar="r-dyntrace"),
make_option(c("--corpus-dirpath"),
action="store",
type="character",
help="",
metavar="corpus-dirpath"),
make_option(c("--raw-analysis-dirpath"),
action="store",
type="character",
help="Output directory for raw tracer analysis (*.tdf)",
metavar="raw-analysis-dirpath"),
make_option(c("--verbose"),
action="store_true",
default=FALSE,
help="Flag to enable verbose mode.",
metavar="verbose"),
make_option(c("--truncate"),
action="store_true",
default=FALSE,
help="Flag to enable overwriting of trace files",
metavar="truncate"),
make_option(c("--binary"),
action="store_true",
default = FALSE,
help="Output data format",
metavar="binary"),
make_option(c("--compression-level"),
action="store",
type="integer",
default=1,
help="Compression level for ZSTD streaming compression",
metavar="compression-level")
)
args <- parse_args(OptionParser(option_list = option_list),
positional_arguments = TRUE,
args = args)
create_trace_settings(package = args$args[1],
tracing_timeout = args$options$`tracing-timeout`,
r_dyntrace = path(getwd(), path_tidy(args$options$`r-dyntrace`)),
corpus_dirpath = path(getwd(), path_tidy(args$options$`corpus-dirpath`)),
raw_analysis_dirpath = path(getwd(), path_tidy(args$options$`raw-analysis-dirpath`)),
verbose = args$options$verbose,
truncate = args$options$truncate,
binary = args$options$binary,
compression_level = args$options$`compression-level`)
}
|
/R/trace.R
|
no_license
|
PRL-PRG/dynalyzer
|
R
| false | false | 7,324 |
r
|
#' @export
build_vignettes <- function(settings) {
vignettes <- pkgVignettes(settings$package, source=TRUE)
## if there are vignettes and there are no vignette sources,
## then compile the vignettes to sources. This compilation
## will result in .R files in the doc directory of package
## and will be picked up by the next step of the program
if (length(vignettes$docs) != 0 &&
length(vignettes$sources) == 0) {
checkVignettes(settings$package,
find.package(settings$package)[1],
tangle = TRUE,
weave = FALSE,
workdir = "src")
}
}
#' @export
copy_vignettes <- function(settings) {
build_vignettes(settings)
destination_paths <- path(settings$corpus_dirpath,
settings$package,
c("doc", "data"))
vignettes <- vignette(package = settings$package)$results
if(nrow(vignettes) == 0) {
dir_create(destination_paths)
}
else {
source_paths <- path(vignettes[1, "LibPath"],
vignettes[1, "Package"],
c("doc", "data"))
if(dir_exists(source_paths[1]))
dir_copy(source_paths[1], destination_paths[1])
if(dir_exists(source_paths[2]))
dir_copy(source_paths[2], destination_paths[2])
}
"doc"
}
#' @export
copy_tests <- function(settings) {
destination_path <- path(settings$corpus_dirpath, settings$package, "tests")
source_path <- path(find.package(settings$package), "tests")
if(!dir_exists(source_path)) {
dir_create(destination_path)
} else {
dir_copy(source_path, destination_path)
}
"tests"
}
#' @export
copy_examples <- function(settings) {
destination_path <- path(settings$corpus_dirpath,
settings$package,
"examples")
dir_create(destination_path)
db <- tryCatch({
Rd_db(settings$package)
}, error=function(e) {
print(e)
list()
})
iwalk(db, function(rd_data, rd_name) {
example_filepath <-
destination_path %>%
path(path_file(rd_name)) %>%
path_ext_set("R")
Rd2ex(rd_data, example_filepath, defines=NULL)
if(file_exists(example_filepath)) {
new_content <-
str_c(str_glue("library({settings$package})"),
"",
read_file(example_filepath),
sep = "\n")
write_file(new_content,
example_filepath)
}
})
"examples"
}
#' @export
wrap_scripts <- function(settings, wrap_script, script_dirname) {
path(settings$corpus_dirpath, settings$package, script_dirname) %>%
dir_ls(type = "file", glob = "*.R") %>%
path_file() %>%
map_dfr(
function(script_filename) {
wrap_script(settings, script_dirname, script_filename)
}
)
}
#' @export
run_script <- function(settings, script_filepath) {
cat("Executing ", script_filepath, "\n")
processx::run(command = settings$r_dyntrace,
args = str_c("--file=", script_filepath),
timeout = settings$tracing_timeout,
cleanup_tree = TRUE)
script_filepath
}
#' @export
run_scripts <- function(settings, script_filepaths) {
script_filepaths %>%
map(function(script_filepath) {
run_script(settings, script_filepath)
})
}
#' @export
create_trace_settings <- function(package,
tracing_timeout,
r_dyntrace,
corpus_dirpath,
raw_analysis_dirpath,
verbose,
truncate,
binary,
compression_level) {
structure(list(package = package,
tracing_timeout = tracing_timeout,
r_dyntrace = r_dyntrace,
corpus_dirpath = corpus_dirpath,
raw_analysis_dirpath = raw_analysis_dirpath,
verbose = verbose,
truncate = truncate,
binary = binary,
compression_level = compression_level),
class = "dynalyzer.settings.trace")
}
#' @export
parse_trace_settings <- function(args = commandArgs(trailingOnly = TRUE)) {
option_list <- list(
make_option(c("--tracing-timeout"),
action = "store",
type = "integer",
default = 60 * 60,
help="Timeout for tracing a script",
metavar="tracing-timeout"),
make_option(c("--r-dyntrace"),
action="store",
type="character",
help="",
metavar="r-dyntrace"),
make_option(c("--corpus-dirpath"),
action="store",
type="character",
help="",
metavar="corpus-dirpath"),
make_option(c("--raw-analysis-dirpath"),
action="store",
type="character",
help="Output directory for raw tracer analysis (*.tdf)",
metavar="raw-analysis-dirpath"),
make_option(c("--verbose"),
action="store_true",
default=FALSE,
help="Flag to enable verbose mode.",
metavar="verbose"),
make_option(c("--truncate"),
action="store_true",
default=FALSE,
help="Flag to enable overwriting of trace files",
metavar="truncate"),
make_option(c("--binary"),
action="store_true",
default = FALSE,
help="Output data format",
metavar="binary"),
make_option(c("--compression-level"),
action="store",
type="integer",
default=1,
help="Compression level for ZSTD streaming compression",
metavar="compression-level")
)
args <- parse_args(OptionParser(option_list = option_list),
positional_arguments = TRUE,
args = args)
create_trace_settings(package = args$args[1],
tracing_timeout = args$options$`tracing-timeout`,
r_dyntrace = path(getwd(), path_tidy(args$options$`r-dyntrace`)),
corpus_dirpath = path(getwd(), path_tidy(args$options$`corpus-dirpath`)),
raw_analysis_dirpath = path(getwd(), path_tidy(args$options$`raw-analysis-dirpath`)),
verbose = args$options$verbose,
truncate = args$options$truncate,
binary = args$options$binary,
compression_level = args$options$`compression-level`)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cache.R
\name{eia_clear_cache}
\alias{eia_clear_cache}
\alias{eia_clear_cats}
\alias{eia_clear_series}
\alias{eia_clear_geoset}
\title{Clear API results cache}
\usage{
eia_clear_cache()
eia_clear_cats()
eia_clear_series()
eia_clear_geoset()
}
\description{
Reset the results of API calls that are currently cached in memory.
}
\details{
\code{eia_clear_cache} clears the entire cache. The other functions clear the cache associated with specific endpoints.
}
\examples{
\dontrun{
key <- Sys.getenv("EIA_KEY") # your stored API key
system.time(eia_cats(key))
system.time(eia_cats(key))
eia_clear_cache()
system.time(eia_cats(key))
}
}
|
/man/eia_clear_cache.Rd
|
permissive
|
daranzolin/eia
|
R
| false | true | 715 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cache.R
\name{eia_clear_cache}
\alias{eia_clear_cache}
\alias{eia_clear_cats}
\alias{eia_clear_series}
\alias{eia_clear_geoset}
\title{Clear API results cache}
\usage{
eia_clear_cache()
eia_clear_cats()
eia_clear_series()
eia_clear_geoset()
}
\description{
Reset the results of API calls that are currently cached in memory.
}
\details{
\code{eia_clear_cache} clears the entire cache. The other functions clear the cache associated with specific endpoints.
}
\examples{
\dontrun{
key <- Sys.getenv("EIA_KEY") # your stored API key
system.time(eia_cats(key))
system.time(eia_cats(key))
eia_clear_cache()
system.time(eia_cats(key))
}
}
|
#' @import dplyr lubridate shiny shinyFiles
NULL
globalVariables(c("dn", "dt", "aw", "jerk"))
|
/R/zzz.R
|
permissive
|
FlukeAndFeather/bcg.annotation
|
R
| false | false | 95 |
r
|
#' @import dplyr lubridate shiny shinyFiles
NULL
globalVariables(c("dn", "dt", "aw", "jerk"))
|
png(filename="plot4.png", width=480, height=480, units="px")
d <- read.csv('~/Downloads/household_power_consumption.txt', sep=';', na.strings='?', colClasses=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"), skip=66637, nrows=2880, header=F)
colnames(d) = colnames(read.csv('~/Downloads/household_power_consumption.txt', sep=';', nrows=1))
d[,1] <- as.Date(d[,1], "%d/%m/%Y")
d[,2] = as.POSIXct(strptime(paste(d[,1], d[,2], sep=" "), format="%Y-%m-%d %H:%M:%S"))
par(mfcol=c(2, 2))
# Top-left plot
plot(d[,2], d[,'Global_active_power'], type="l", xlab=NA, ylab="Global Active Power")
# Bottom-left plot
plot(d[,2], d[,'Sub_metering_1'], type="n", xlab=NA, ylab="Energy sub metering")
lines(d[,2], d[,'Sub_metering_1'])
lines(d[,2], d[,'Sub_metering_2'], col="red")
lines(d[,2], d[,'Sub_metering_3'], col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=c(1, 1, 1), bty="n")
# Top-right plot
plot(d[,2], d[,'Voltage'], type="l", xlab="datetime", ylab="Voltage")
# Bottom-right plot
plot(d[,2], d[,'Global_reactive_power'], type="l", xlab="datetime", ylab="Global_reactive_power")
|
/plot4.R
|
no_license
|
malexw/ExData_Plotting1
|
R
| false | false | 1,211 |
r
|
png(filename="plot4.png", width=480, height=480, units="px")
d <- read.csv('~/Downloads/household_power_consumption.txt', sep=';', na.strings='?', colClasses=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"), skip=66637, nrows=2880, header=F)
colnames(d) = colnames(read.csv('~/Downloads/household_power_consumption.txt', sep=';', nrows=1))
d[,1] <- as.Date(d[,1], "%d/%m/%Y")
d[,2] = as.POSIXct(strptime(paste(d[,1], d[,2], sep=" "), format="%Y-%m-%d %H:%M:%S"))
par(mfcol=c(2, 2))
# Top-left plot
plot(d[,2], d[,'Global_active_power'], type="l", xlab=NA, ylab="Global Active Power")
# Bottom-left plot
plot(d[,2], d[,'Sub_metering_1'], type="n", xlab=NA, ylab="Energy sub metering")
lines(d[,2], d[,'Sub_metering_1'])
lines(d[,2], d[,'Sub_metering_2'], col="red")
lines(d[,2], d[,'Sub_metering_3'], col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=c(1, 1, 1), bty="n")
# Top-right plot
plot(d[,2], d[,'Voltage'], type="l", xlab="datetime", ylab="Voltage")
# Bottom-right plot
plot(d[,2], d[,'Global_reactive_power'], type="l", xlab="datetime", ylab="Global_reactive_power")
|
context("To test the APACHE Glasgow Coma Scale calculator")
test_that("To see if the gen_apache_gcs calculates the correct APACHE score",{
# ddata <- NULL
# hr <- "Heart rate"
# Various
ddata <- data.table("time" = c(sample(seq(0, 24, 1), 2, replace = T), 35),
"site" = sample(c("YY", "ZY"), 3, replace = T),
"episode_id" = sample(seq(116, 150, 1), 3, replace = F),
"GCS - total" = c(10, NA, 15))
gen_apache_gcs(dt = ddata, window = c(0,24))
expect_equal(c(5, 0, NA), ddata[, apache_gcs])
# ddata <- data.table("time" = c(5, 6, 22, 24, 30, 5, 8, 11, 12),
# "site" = sample(c("YY", "ZY"), 9, replace = T),
# "episode_id" = sample(seq(116, 130, 1), 9, replace = F),
# "Heart rate" = c(25, 50, 59, 70, 70, 125, 155, 195 , NA),
# "PaO2 - ABG" = c(),
# "PaCO2 - ABG" = c(),
# "PaO2/FiO2 ratio" = c())
#
# expect_error(gen_grad(dt = ddata))
})
|
/tests/testthat/test_gen_apache_gcs.R
|
no_license
|
CC-HIC/apacher
|
R
| false | false | 1,067 |
r
|
context("To test the APACHE Glasgow Coma Scale calculator")
test_that("To see if the gen_apache_gcs calculates the correct APACHE score",{
# ddata <- NULL
# hr <- "Heart rate"
# Various
ddata <- data.table("time" = c(sample(seq(0, 24, 1), 2, replace = T), 35),
"site" = sample(c("YY", "ZY"), 3, replace = T),
"episode_id" = sample(seq(116, 150, 1), 3, replace = F),
"GCS - total" = c(10, NA, 15))
gen_apache_gcs(dt = ddata, window = c(0,24))
expect_equal(c(5, 0, NA), ddata[, apache_gcs])
# ddata <- data.table("time" = c(5, 6, 22, 24, 30, 5, 8, 11, 12),
# "site" = sample(c("YY", "ZY"), 9, replace = T),
# "episode_id" = sample(seq(116, 130, 1), 9, replace = F),
# "Heart rate" = c(25, 50, 59, 70, 70, 125, 155, 195 , NA),
# "PaO2 - ABG" = c(),
# "PaCO2 - ABG" = c(),
# "PaO2/FiO2 ratio" = c())
#
# expect_error(gen_grad(dt = ddata))
})
|
#' Plot Conditional Coefficients in Mixed-Effects Models with Imputed Data and Interaction Terms
#'
#' \code{interplot.mlmmi} is a method to calculate conditional coefficient estimates from the results of multilevel (mixed-effects) regression models with interaction terms and multiply imputed data.
#'
#' @param m A model object including an interaction term, or, alternately, a data frame recording conditional coefficients.
#' @param var1 The name (as a string) of the variable of interest in the interaction term; its conditional coefficient estimates will be plotted.
#' @param var2 The name (as a string) of the other variable in the interaction term.
#' @param plot A logical value indicating whether the output is a plot or a dataframe including the conditional coefficient estimates of var1, their upper and lower bounds, and the corresponding values of var2.
#' @param hist A logical value indicating if there is a histogram of `var2` added at the bottom of the conditional effect plot.
#' @param var2_dt A numerical value indicating the frequency distibution of `var2`. It is only used when `hist == TRUE`. When the object is a model, the default is the distribution of `var2` of the model.
#' @param point A logical value determining the format of plot. By default, the function produces a line plot when var2 takes on ten or more distinct values and a point (dot-and-whisker) plot otherwise; option TRUE forces a point plot.
#' @param sims Number of independent simulation draws used to calculate upper and lower bounds of coefficient estimates: lower values run faster; higher values produce smoother curves.
#' @param xmin A numerical value indicating the minimum value shown of x shown in the graph. Rarely used.
#' @param xmax A numerical value indicating the maximum value shown of x shown in the graph. Rarely used.
#' @param ercolor A character value indicating the outline color of the whisker or ribbon.
#' @param esize A numerical value indicating the size of the whisker or ribbon.
#' @param ralpha A numerical value indicating the transparency of the ribbon.
#' @param rfill A character value indicating the filling color of the ribbon.
#' @param ... Other ggplot aesthetics arguments for points in the dot-whisker plot or lines in the line-ribbon plots. Not currently used.
#'
#' @details \code{interplot.mlmmi} is a S3 method from the \code{interplot}. It works on lists of mixed-effects objects with class \code{lmerMod} and \code{glmerMod} generated by \code{mitools} and \code{lme4}.
#'
#' Because the output function is based on \code{\link[ggplot2]{ggplot}}, any additional arguments and layers supported by \code{ggplot2} can be added with the \code{+}.
#'
#' @return The function returns a \code{ggplot} object.
#'
#' @importFrom abind abind
#' @importFrom arm sim
#' @importFrom stats quantile
#' @import ggplot2
#'
#'
#' @export
# Coding function for mlm, mi objects
interplot.mlmmi <- function(m, var1, var2, plot = TRUE, hist = FALSE, var2_dt = NA, point = FALSE, sims = 5000, xmin = NA, xmax = NA, ercolor = NA, esize = 0.5, ralpha = 0.5, rfill = "grey70", ...) {
set.seed(324)
m.list <- m
m <- m.list[[1]]
class(m.list) <- class(m)
m.sims.list <- lapply(m.list, function(i) arm::sim(i, sims))
m.sims <- m.sims.list[[1]]
for (i in 2:length(m.sims.list)) {
m.sims@fixef <- rbind(m.sims@fixef, m.sims.list[[i]]@fixef)
m.sims@ranef[[1]] <- abind::abind(m.sims@ranef[[1]], m.sims.list[[i]]@ranef[[1]], along = 1)
}
### For factor base terms###
factor_v1 <- factor_v2 <- FALSE
if (is.factor(eval(parse(text = paste0("m@frame$", var1)))) & is.factor(eval(parse(text = paste0("m@frame$",
var2)))))
stop("The function does not support interactions between two factors.")
if (is.factor(eval(parse(text = paste0("m@frame$", var1))))) {
var1_bk <- var1
var1 <- paste0(var1, levels(eval(parse(text = paste0("m@frame$", var1)))))
factor_v1 <- TRUE
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1)[-1])
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[-1][i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
} else if (is.factor(eval(parse(text = paste0("m@frame$", var2))))) {
var2_bk <- var2
var2 <- paste0(var2, levels(eval(parse(text = paste0("m@frame$", var2)))))
factor_v2 <- TRUE
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1)[-1])
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[-1][i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
} else {
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1))
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
}
###################
if (factor_v2) {
xmin <- 0
xmax <- 1
steps <- 2
} else {
if (is.na(xmin))
xmin <- min(m@frame[var2], na.rm = T)
if (is.na(xmax))
xmax <- max(m@frame[var2], na.rm = T)
steps <- eval(parse(text = paste0("length(unique(na.omit(m@frame$", var2, ")))")))
if (steps > 100)
steps <- 100 # avoid redundant calculation
}
coef <- data.frame(fake = seq(xmin, xmax, length.out = steps), coef1 = NA, ub = NA, lb = NA)
coef_df <- data.frame(fake = numeric(0), coef1 = numeric(0), ub = numeric(0), lb = numeric(0),
model = character(0))
if (factor_v1) {
for (j in 1:(length(eval(parse(text = paste0("m$xlevel$", var1_bk)))) - 1)) {
# only n - 1 interactions; one category is avoided against multicolinarity
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
coef$value <- var1[j + 1]
coef_df <- rbind(coef_df, coef)
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
coef_df$value <- as.factor(coef_df$value)
interplot.plot(m = coef_df, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...) + facet_grid(. ~ value)
} else if (factor_v2) {
for (j in 1:(length(eval(parse(text = paste0("m$xlevel$", var2_bk)))) - 1)) {
# only n - 1 interactions; one category is avoided against multicolinarity
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
coef$value <- var2[j + 1]
coef_df <- rbind(coef_df, coef)
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
coef_df$value <- as.factor(coef_df$value)
interplot.plot(m = coef_df, point = point) + facet_grid(. ~ value)
} else {
## Correct marginal effect for quadratic terms
multiplier <- if (var1 == var2)
2 else 1
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
interplot.plot(m = coef, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...)
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
}
#' @export
interplot.gmlmmi <- function(m, var1, var2, plot = TRUE, hist = FALSE, var2_dt = NA, point = FALSE, sims = 5000, xmin = NA, xmax = NA, ercolor = NA, esize = 0.5, ralpha = 0.5, rfill = "grey70", ...) {
set.seed(324)
m.list <- m
m <- m.list[[1]]
class(m.list) <- class(m)
m.sims.list <- lapply(m.list, function(i) arm::sim(i, sims))
m.sims <- m.sims.list[[1]]
for (i in 2:length(m.sims.list)) {
m.sims@fixef <- rbind(m.sims@fixef, m.sims.list[[i]]@fixef)
m.sims@ranef[[1]] <- abind::abind(m.sims@ranef[[1]], m.sims.list[[i]]@ranef[[1]], along = 1)
}
### For factor base terms###
factor_v1 <- factor_v2 <- FALSE
if (is.factor(eval(parse(text = paste0("m@frame$", var1)))) & is.factor(eval(parse(text = paste0("m@frame$",
var2)))))
stop("The function does not support interactions between two factors.")
if (is.factor(eval(parse(text = paste0("m@frame$", var1))))) {
var1_bk <- var1
var1 <- paste0(var1, levels(eval(parse(text = paste0("m@frame$", var1)))))
factor_v1 <- TRUE
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1)[-1])
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[-1][i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
} else if (is.factor(eval(parse(text = paste0("m@frame$", var2))))) {
var2_bk <- var2
var2 <- paste0(var2, levels(eval(parse(text = paste0("m@frame$", var2)))))
factor_v2 <- TRUE
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1)[-1])
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[-1][i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
} else {
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1))
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
}
###################
if (factor_v2) {
xmin <- 0
xmax <- 1
steps <- 2
} else {
if (is.na(xmin))
xmin <- min(m@frame[var2], na.rm = T)
if (is.na(xmax))
xmax <- max(m@frame[var2], na.rm = T)
steps <- eval(parse(text = paste0("length(unique(na.omit(m@frame$", var2, ")))")))
if (steps > 100)
steps <- 100 # avoid redundant calculation
}
coef <- data.frame(fake = seq(xmin, xmax, length.out = steps), coef1 = NA, ub = NA, lb = NA)
coef_df <- data.frame(fake = numeric(0), coef1 = numeric(0), ub = numeric(0), lb = numeric(0),
model = character(0))
if (factor_v1) {
for (j in 1:(length(eval(parse(text = paste0("m$xlevel$", var1_bk)))) - 1)) {
# only n - 1 interactions; one category is avoided against multicolinarity
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
coef$value <- var1[j + 1]
coef_df <- rbind(coef_df, coef)
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
coef_df$value <- as.factor(coef_df$value)
interplot.plot(m = coef_df, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...) + facet_grid(. ~ value)
} else if (factor_v2) {
for (j in 1:(length(eval(parse(text = paste0("m$xlevel$", var2_bk)))) - 1)) {
# only n - 1 interactions; one category is avoided against multicolinarity
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
coef$value <- var2[j + 1]
coef_df <- rbind(coef_df, coef)
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
coef_df$value <- as.factor(coef_df$value)
interplot.plot(m = coef_df, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...) + facet_grid(. ~ value)
} else {
## Correct marginal effect for quadratic terms
multiplier <- if (var1 == var2)
2 else 1
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
interplot.plot(m = coef, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...)
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
}
|
/interplot/R/Interplot_mlmmi.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 19,421 |
r
|
#' Plot Conditional Coefficients in Mixed-Effects Models with Imputed Data and Interaction Terms
#'
#' \code{interplot.mlmmi} is a method to calculate conditional coefficient estimates from the results of multilevel (mixed-effects) regression models with interaction terms and multiply imputed data.
#'
#' @param m A model object including an interaction term, or, alternately, a data frame recording conditional coefficients.
#' @param var1 The name (as a string) of the variable of interest in the interaction term; its conditional coefficient estimates will be plotted.
#' @param var2 The name (as a string) of the other variable in the interaction term.
#' @param plot A logical value indicating whether the output is a plot or a dataframe including the conditional coefficient estimates of var1, their upper and lower bounds, and the corresponding values of var2.
#' @param hist A logical value indicating if there is a histogram of `var2` added at the bottom of the conditional effect plot.
#' @param var2_dt A numerical value indicating the frequency distibution of `var2`. It is only used when `hist == TRUE`. When the object is a model, the default is the distribution of `var2` of the model.
#' @param point A logical value determining the format of plot. By default, the function produces a line plot when var2 takes on ten or more distinct values and a point (dot-and-whisker) plot otherwise; option TRUE forces a point plot.
#' @param sims Number of independent simulation draws used to calculate upper and lower bounds of coefficient estimates: lower values run faster; higher values produce smoother curves.
#' @param xmin A numerical value indicating the minimum value shown of x shown in the graph. Rarely used.
#' @param xmax A numerical value indicating the maximum value shown of x shown in the graph. Rarely used.
#' @param ercolor A character value indicating the outline color of the whisker or ribbon.
#' @param esize A numerical value indicating the size of the whisker or ribbon.
#' @param ralpha A numerical value indicating the transparency of the ribbon.
#' @param rfill A character value indicating the filling color of the ribbon.
#' @param ... Other ggplot aesthetics arguments for points in the dot-whisker plot or lines in the line-ribbon plots. Not currently used.
#'
#' @details \code{interplot.mlmmi} is a S3 method from the \code{interplot}. It works on lists of mixed-effects objects with class \code{lmerMod} and \code{glmerMod} generated by \code{mitools} and \code{lme4}.
#'
#' Because the output function is based on \code{\link[ggplot2]{ggplot}}, any additional arguments and layers supported by \code{ggplot2} can be added with the \code{+}.
#'
#' @return The function returns a \code{ggplot} object.
#'
#' @importFrom abind abind
#' @importFrom arm sim
#' @importFrom stats quantile
#' @import ggplot2
#'
#'
#' @export
# Coding function for mlm, mi objects
interplot.mlmmi <- function(m, var1, var2, plot = TRUE, hist = FALSE, var2_dt = NA, point = FALSE, sims = 5000, xmin = NA, xmax = NA, ercolor = NA, esize = 0.5, ralpha = 0.5, rfill = "grey70", ...) {
set.seed(324)
m.list <- m
m <- m.list[[1]]
class(m.list) <- class(m)
m.sims.list <- lapply(m.list, function(i) arm::sim(i, sims))
m.sims <- m.sims.list[[1]]
for (i in 2:length(m.sims.list)) {
m.sims@fixef <- rbind(m.sims@fixef, m.sims.list[[i]]@fixef)
m.sims@ranef[[1]] <- abind::abind(m.sims@ranef[[1]], m.sims.list[[i]]@ranef[[1]], along = 1)
}
### For factor base terms###
factor_v1 <- factor_v2 <- FALSE
if (is.factor(eval(parse(text = paste0("m@frame$", var1)))) & is.factor(eval(parse(text = paste0("m@frame$",
var2)))))
stop("The function does not support interactions between two factors.")
if (is.factor(eval(parse(text = paste0("m@frame$", var1))))) {
var1_bk <- var1
var1 <- paste0(var1, levels(eval(parse(text = paste0("m@frame$", var1)))))
factor_v1 <- TRUE
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1)[-1])
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[-1][i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
} else if (is.factor(eval(parse(text = paste0("m@frame$", var2))))) {
var2_bk <- var2
var2 <- paste0(var2, levels(eval(parse(text = paste0("m@frame$", var2)))))
factor_v2 <- TRUE
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1)[-1])
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[-1][i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
} else {
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1))
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
}
###################
if (factor_v2) {
xmin <- 0
xmax <- 1
steps <- 2
} else {
if (is.na(xmin))
xmin <- min(m@frame[var2], na.rm = T)
if (is.na(xmax))
xmax <- max(m@frame[var2], na.rm = T)
steps <- eval(parse(text = paste0("length(unique(na.omit(m@frame$", var2, ")))")))
if (steps > 100)
steps <- 100 # avoid redundant calculation
}
coef <- data.frame(fake = seq(xmin, xmax, length.out = steps), coef1 = NA, ub = NA, lb = NA)
coef_df <- data.frame(fake = numeric(0), coef1 = numeric(0), ub = numeric(0), lb = numeric(0),
model = character(0))
if (factor_v1) {
for (j in 1:(length(eval(parse(text = paste0("m$xlevel$", var1_bk)))) - 1)) {
# only n - 1 interactions; one category is avoided against multicolinarity
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
coef$value <- var1[j + 1]
coef_df <- rbind(coef_df, coef)
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
coef_df$value <- as.factor(coef_df$value)
interplot.plot(m = coef_df, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...) + facet_grid(. ~ value)
} else if (factor_v2) {
for (j in 1:(length(eval(parse(text = paste0("m$xlevel$", var2_bk)))) - 1)) {
# only n - 1 interactions; one category is avoided against multicolinarity
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
coef$value <- var2[j + 1]
coef_df <- rbind(coef_df, coef)
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
coef_df$value <- as.factor(coef_df$value)
interplot.plot(m = coef_df, point = point) + facet_grid(. ~ value)
} else {
## Correct marginal effect for quadratic terms
multiplier <- if (var1 == var2)
2 else 1
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
interplot.plot(m = coef, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...)
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
}
#' @export
interplot.gmlmmi <- function(m, var1, var2, plot = TRUE, hist = FALSE, var2_dt = NA, point = FALSE, sims = 5000, xmin = NA, xmax = NA, ercolor = NA, esize = 0.5, ralpha = 0.5, rfill = "grey70", ...) {
set.seed(324)
m.list <- m
m <- m.list[[1]]
class(m.list) <- class(m)
m.sims.list <- lapply(m.list, function(i) arm::sim(i, sims))
m.sims <- m.sims.list[[1]]
for (i in 2:length(m.sims.list)) {
m.sims@fixef <- rbind(m.sims@fixef, m.sims.list[[i]]@fixef)
m.sims@ranef[[1]] <- abind::abind(m.sims@ranef[[1]], m.sims.list[[i]]@ranef[[1]], along = 1)
}
### For factor base terms###
factor_v1 <- factor_v2 <- FALSE
if (is.factor(eval(parse(text = paste0("m@frame$", var1)))) & is.factor(eval(parse(text = paste0("m@frame$",
var2)))))
stop("The function does not support interactions between two factors.")
if (is.factor(eval(parse(text = paste0("m@frame$", var1))))) {
var1_bk <- var1
var1 <- paste0(var1, levels(eval(parse(text = paste0("m@frame$", var1)))))
factor_v1 <- TRUE
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1)[-1])
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[-1][i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
} else if (is.factor(eval(parse(text = paste0("m@frame$", var2))))) {
var2_bk <- var2
var2 <- paste0(var2, levels(eval(parse(text = paste0("m@frame$", var2)))))
factor_v2 <- TRUE
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1)[-1])
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[-1][i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
} else {
ifelse(var1 == var2, var12 <- paste0("I(", var1, "^2)"), var12 <- paste0(var2, ":", var1))
# the first category is censored to avoid multicolinarity
for (i in seq(var12)) {
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
var12[i] <- paste0(var1, ":", var2)[i]
if (!var12[i] %in% unlist(dimnames(m@pp$X)[2]))
stop(paste("Model does not include the interaction of", var1, "and", var2, "."))
}
}
###################
if (factor_v2) {
xmin <- 0
xmax <- 1
steps <- 2
} else {
if (is.na(xmin))
xmin <- min(m@frame[var2], na.rm = T)
if (is.na(xmax))
xmax <- max(m@frame[var2], na.rm = T)
steps <- eval(parse(text = paste0("length(unique(na.omit(m@frame$", var2, ")))")))
if (steps > 100)
steps <- 100 # avoid redundant calculation
}
coef <- data.frame(fake = seq(xmin, xmax, length.out = steps), coef1 = NA, ub = NA, lb = NA)
coef_df <- data.frame(fake = numeric(0), coef1 = numeric(0), ub = numeric(0), lb = numeric(0),
model = character(0))
if (factor_v1) {
for (j in 1:(length(eval(parse(text = paste0("m$xlevel$", var1_bk)))) - 1)) {
# only n - 1 interactions; one category is avoided against multicolinarity
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1[j + 1], unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
coef$value <- var1[j + 1]
coef_df <- rbind(coef_df, coef)
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
coef_df$value <- as.factor(coef_df$value)
interplot.plot(m = coef_df, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...) + facet_grid(. ~ value)
} else if (factor_v2) {
for (j in 1:(length(eval(parse(text = paste0("m$xlevel$", var2_bk)))) - 1)) {
# only n - 1 interactions; one category is avoided against multicolinarity
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] +
coef$fake[i] * m.sims@fixef[, match(var12[j], unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
coef$value <- var2[j + 1]
coef_df <- rbind(coef_df, coef)
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
coef_df$value <- as.factor(coef_df$value)
interplot.plot(m = coef_df, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...) + facet_grid(. ~ value)
} else {
## Correct marginal effect for quadratic terms
multiplier <- if (var1 == var2)
2 else 1
for (i in 1:steps) {
coef$coef1[i] <- mean(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))])
coef$ub[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))], 0.975)
coef$lb[i] <- quantile(m.sims@fixef[, match(var1, unlist(dimnames(m@pp$X)[2]))] + multiplier *
coef$fake[i] * m.sims@fixef[, match(var12, unlist(dimnames(m@pp$X)[2]))], 0.025)
}
if (plot == TRUE) {
if (hist == TRUE) {
if (is.na(var2_dt)) {
var2_dt <- eval(parse(text = paste0("m@frame$", var2)))
} else {
var2_dt <- var2_dt
}
}
interplot.plot(m = coef, hist = hist, var2_dt = var2_dt, point = point, ercolor = ercolor, esize = esize, ralpha = ralpha, rfill = rfill, ...)
} else {
names(coef) <- c(var2, "coef", "ub", "lb")
return(coef)
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_acs.R, R/data_decennial.R
\docType{data}
\name{dict_all_geocomponent_2000}
\alias{dict_all_geocomponent_2000}
\alias{dict_all_geocomponent_2000}
\title{List of all geographic components, 2000 version}
\format{A data.table with 99 rows and 2 variables:
\describe{
\item{code}{code for the geocomponent, such as "01" and "M3"}
\item{geo_component}{description of the geographic component}
}}
\source{
2000 Census Summary File 1
\href{https://www.census.gov/prod/cen2000/doc/sf1.pdf}{technical documentation}
page 7-15
}
\usage{
dict_all_geocomponent_2000
dict_all_geocomponent_2000
}
\description{
List of all geographic components, 2000 version
This dataset contains all available geographic components and codes.
}
\keyword{datasets}
|
/man/dict_all_geocomponent_2000.Rd
|
no_license
|
raivtash/totalcensus
|
R
| false | true | 823 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_acs.R, R/data_decennial.R
\docType{data}
\name{dict_all_geocomponent_2000}
\alias{dict_all_geocomponent_2000}
\alias{dict_all_geocomponent_2000}
\title{List of all geographic components, 2000 version}
\format{A data.table with 99 rows and 2 variables:
\describe{
\item{code}{code for the geocomponent, such as "01" and "M3"}
\item{geo_component}{description of the geographic component}
}}
\source{
2000 Census Summary File 1
\href{https://www.census.gov/prod/cen2000/doc/sf1.pdf}{technical documentation}
page 7-15
}
\usage{
dict_all_geocomponent_2000
dict_all_geocomponent_2000
}
\description{
List of all geographic components, 2000 version
This dataset contains all available geographic components and codes.
}
\keyword{datasets}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/JSTOR_2bigramscor.R
\name{JSTOR_2bigramscor}
\alias{JSTOR_2bigramscor}
\title{Plot the change over time of the correlation between one bigram (or set of bigrams) and another bigram (or set of bigrams) in a JSTOR DfR dataset}
\usage{
JSTOR_2bigramscor(unpack2, bigram1, bigram2, span = 0.4)
}
\arguments{
\item{unpack2}{object returned by the function JSTOR_unpack2.}
\item{bigram1}{One bigram or a vector of bigrams, each bigram surrounded by standard quote marks.}
\item{bigram2}{One bigram or a vector of bigrams, each bigram surrounded by standard quote marks.}
\item{span}{span of the loess line (controls the degree of smoothing). Default is 0.4}
}
\value{
Returns a ggplot object with publication year on the horizontal axis and Pearson's correlation on the vertical axis. Each point represents all the documents of a single year, point size is inversely proportional to p-value of the correlation.
}
\description{
Function to plot changes in the correlation of two sets of bigrams (two sets of 2-grams, or two sets of multiple bigrams) over time. For use with JSTOR's Data for Research datasets (http://dfr.jstor.org/).
}
\examples{
## JSTOR_2bigramscor(unpack2, bigram1 = "hot water", bigram2 = "cold water")
## JSTOR_2bigramscor(unpack2, c("hot water", "warm water", "tepid water"), c("cold water", "ice water"))
}
|
/man/JSTOR_2bigramscor.Rd
|
no_license
|
brooksambrose/JSTORr
|
R
| false | false | 1,414 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/JSTOR_2bigramscor.R
\name{JSTOR_2bigramscor}
\alias{JSTOR_2bigramscor}
\title{Plot the change over time of the correlation between one bigram (or set of bigrams) and another bigram (or set of bigrams) in a JSTOR DfR dataset}
\usage{
JSTOR_2bigramscor(unpack2, bigram1, bigram2, span = 0.4)
}
\arguments{
\item{unpack2}{object returned by the function JSTOR_unpack2.}
\item{bigram1}{One bigram or a vector of bigrams, each bigram surrounded by standard quote marks.}
\item{bigram2}{One bigram or a vector of bigrams, each bigram surrounded by standard quote marks.}
\item{span}{span of the loess line (controls the degree of smoothing). Default is 0.4}
}
\value{
Returns a ggplot object with publication year on the horizontal axis and Pearson's correlation on the vertical axis. Each point represents all the documents of a single year, point size is inversely proportional to p-value of the correlation.
}
\description{
Function to plot changes in the correlation of two sets of bigrams (two sets of 2-grams, or two sets of multiple bigrams) over time. For use with JSTOR's Data for Research datasets (http://dfr.jstor.org/).
}
\examples{
## JSTOR_2bigramscor(unpack2, bigram1 = "hot water", bigram2 = "cold water")
## JSTOR_2bigramscor(unpack2, c("hot water", "warm water", "tepid water"), c("cold water", "ice water"))
}
|
\name{PharmPow-package}
\alias{PharmPow-package}
\alias{PharmPow}
\docType{package}
\title{
Pharmacometric Power calculations for mixed study designs
}
\description{
This package contains functions performing power calculation for mixed (sparse/dense sampled) pharmacokinetic study designs. The input data for these functions is taylored for NONMEM .phi files.
}
\details{
\tabular{ll}{
Package: \tab PharmPow\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2013-09-16\cr
License: \tab MIT + file LICENSE\cr
}
PharmPow_parallel
PharmPow_crossover
fig_PharmPow_parallel
fig_PharmPow_crossover
}
\author{
Frank Kloprogge & Joel Tarning
Maintainer: Frank Kloprogge <frank@tropmedres.ac>
}
\keyword{ package }
|
/man/PharmPow-package.Rd
|
no_license
|
cran/PharmPow
|
R
| false | false | 745 |
rd
|
\name{PharmPow-package}
\alias{PharmPow-package}
\alias{PharmPow}
\docType{package}
\title{
Pharmacometric Power calculations for mixed study designs
}
\description{
This package contains functions performing power calculation for mixed (sparse/dense sampled) pharmacokinetic study designs. The input data for these functions is taylored for NONMEM .phi files.
}
\details{
\tabular{ll}{
Package: \tab PharmPow\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2013-09-16\cr
License: \tab MIT + file LICENSE\cr
}
PharmPow_parallel
PharmPow_crossover
fig_PharmPow_parallel
fig_PharmPow_crossover
}
\author{
Frank Kloprogge & Joel Tarning
Maintainer: Frank Kloprogge <frank@tropmedres.ac>
}
\keyword{ package }
|
x <- read.table("mantle_melting.data",sep="\t")
x <- as.matrix(x)
REE <- colnames(x)
prima <- x["PRIMA",] # PRIMA composition
dm <- x["DM",] # DM composition
kd <- x[c(-1,-2),] # table of distribution coefficients
m <- read.table("mantle_melting_modal.data",sep="\t")
m <- as.matrix(m) # table of mineral props after melting
dd <- m%*%kd # bulk distrib. coeff. [Eq.(10.4)]
print(round(dd,3))
ff <- c(0.01,0.02,0.05,0.1,0.2) # degrees of melting
# function calculating batch melt composition [Eq. (11.1)]
batch <- function(c0,ff,dd){
out <- sapply(ff,function(i){
z <- c0/(dd+i*(1-dd))
return(z)
})
out<-t(out)
rownames(out)<-ff
return(out)
}
# Shallow melting, calculation
shallow1 <- batch(prima,ff,dd["5",])# PRIMA
print(shallow1,3)
shallow2<-batch(dm,ff,dd["5",]) # DM
print(shallow2,3)
# Shallow melting, plotting - PRIMA is blue, DM green
mantle1 <- rbind(prima,dm) # Two mantle sources
col0<-c("darkblue","darkgreen")
spider(mantle1,"Boynton",0.1,100,pch=16,cex=1.5,lwd=1.5,col=col0,main="Shallow melting (5 kbar)")
col1 <- selectPalette(nrow(shallow1),"blues")
col2 <- selectPalette(nrow(shallow2),"greens")
shallow<-rbind(shallow1,shallow2)
col<-c(col1,col2)
spider(shallow,"Boynton",pch="",col=col,add=TRUE)# adds to existing
legend("bottomright",legend=rep(ff,2),pch=15,col=col,bg="white",ncol=2,title="PRIMA/DM")
# Deep melting, calculation
deep1<-batch(prima,ff,dd["15",]) # PRIMA
print(deep1,3)
deep2<-batch(dm,ff,dd["15",]) # DM
print(deep2,3)
# Deep melting, plotting - PRIMA is blue, DM green
mantle2 <- rbind(prima,dm) # Two mantle sources
col <- c("darkblue","darkgreen")
spider(mantle2,"Boynton",0.1,100,pch=16,cex=1.5,lwd=1.5,col=col,main="Deep melting (15 kbar)")
col1 <- selectPalette(nrow(deep1),"blues")
col2 <- selectPalette(nrow(deep2),"greens")
deep <- rbind(deep1,deep2)
col <- c(col1,col2)
spider(deep,"Boynton",pch="",col=col,add=TRUE)
legend("bottomright",legend=rep(ff,2),pch=15,col=col,bg="white",ncol=2,title="PRIMA/DM")
|
/Janousek_et_al_2015_modelling_Springer/Part_3/Code/Exercises/exe_14.3_mantle_melting.r
|
no_license
|
nghia1991ad/GCDkit_book_R
|
R
| false | false | 2,200 |
r
|
x <- read.table("mantle_melting.data",sep="\t")
x <- as.matrix(x)
REE <- colnames(x)
prima <- x["PRIMA",] # PRIMA composition
dm <- x["DM",] # DM composition
kd <- x[c(-1,-2),] # table of distribution coefficients
m <- read.table("mantle_melting_modal.data",sep="\t")
m <- as.matrix(m) # table of mineral props after melting
dd <- m%*%kd # bulk distrib. coeff. [Eq.(10.4)]
print(round(dd,3))
ff <- c(0.01,0.02,0.05,0.1,0.2) # degrees of melting
# function calculating batch melt composition [Eq. (11.1)]
batch <- function(c0,ff,dd){
out <- sapply(ff,function(i){
z <- c0/(dd+i*(1-dd))
return(z)
})
out<-t(out)
rownames(out)<-ff
return(out)
}
# Shallow melting, calculation
shallow1 <- batch(prima,ff,dd["5",])# PRIMA
print(shallow1,3)
shallow2<-batch(dm,ff,dd["5",]) # DM
print(shallow2,3)
# Shallow melting, plotting - PRIMA is blue, DM green
mantle1 <- rbind(prima,dm) # Two mantle sources
col0<-c("darkblue","darkgreen")
spider(mantle1,"Boynton",0.1,100,pch=16,cex=1.5,lwd=1.5,col=col0,main="Shallow melting (5 kbar)")
col1 <- selectPalette(nrow(shallow1),"blues")
col2 <- selectPalette(nrow(shallow2),"greens")
shallow<-rbind(shallow1,shallow2)
col<-c(col1,col2)
spider(shallow,"Boynton",pch="",col=col,add=TRUE)# adds to existing
legend("bottomright",legend=rep(ff,2),pch=15,col=col,bg="white",ncol=2,title="PRIMA/DM")
# Deep melting, calculation
deep1<-batch(prima,ff,dd["15",]) # PRIMA
print(deep1,3)
deep2<-batch(dm,ff,dd["15",]) # DM
print(deep2,3)
# Deep melting, plotting - PRIMA is blue, DM green
mantle2 <- rbind(prima,dm) # Two mantle sources
col <- c("darkblue","darkgreen")
spider(mantle2,"Boynton",0.1,100,pch=16,cex=1.5,lwd=1.5,col=col,main="Deep melting (15 kbar)")
col1 <- selectPalette(nrow(deep1),"blues")
col2 <- selectPalette(nrow(deep2),"greens")
deep <- rbind(deep1,deep2)
col <- c(col1,col2)
spider(deep,"Boynton",pch="",col=col,add=TRUE)
legend("bottomright",legend=rep(ff,2),pch=15,col=col,bg="white",ncol=2,title="PRIMA/DM")
|
\name{par1}
\alias{par1}
\title{Change default par parameters}
\usage{
par1()
}
\description{
Change default par parameters
}
\author{
Dustin Fife
}
|
/man/par1.Rd
|
no_license
|
mrdwab/fifer
|
R
| false | false | 156 |
rd
|
\name{par1}
\alias{par1}
\title{Change default par parameters}
\usage{
par1()
}
\description{
Change default par parameters
}
\author{
Dustin Fife
}
|
context("gating...")
gatingResults <- readRDS(system.file("tests/gatingResults.rds", package = "openCyto"))
localPath <- "~/rglab/workspace/openCyto"
test_that("tcell", {
gt_tcell <- gatingTemplate(gtFile, autostart = 1L)
gs <- load_gs(file.path(localPath,"misc/testSuite/gs-tcell"))
gating(gt_tcell, gs, mc.core = 2, parallel_type = "multicore")
thisRes <- getPopStats(gs, path = "full")
expectRes <- gatingResults[["gating_tcell"]]
expect_equal(thisRes, expectRes, tol = 0.04)
})
test_that("ICS", {
gtfile <- system.file("extdata/gating_template/ICS.csv", package = "openCyto")
gt <- gatingTemplate(gtfile)
gs <- load_gs(file.path(localPath,"misc/testSuite/gs-ICS"))
Rm("s", gs)
gating(gt, gs, mc.core = 2, parallel_type = "multicore")
thisRes <- getPopStats(gs, path = "full")
expectRes <- gatingResults[["gating_ICS"]]
expect_equal(thisRes, expectRes, tol = 0.05)
})
test_that("treg", {
gtfile <- system.file("extdata/gating_template/treg.csv", package = "openCyto")
gt <- gatingTemplate(gtfile)
gs <- load_gs(file.path(localPath,"misc/testSuite/gs-treg"))
Rm("boundary", gs)
gating(gt, gs, mc.core = 3, parallel_type = "multicore")
thisRes <- getPopStats(gs, path = "full")
expectRes <- gatingResults[["gating_treg"]]
expect_equal(thisRes, expectRes, tol = 0.25)
})
test_that("bcell", {
gtfile <- system.file("extdata/gating_template/bcell.csv", package = "openCyto")
gt <- gatingTemplate(gtfile, autostart = 1L)
gs <- load_gs(path = file.path(localPath,"misc/testSuite/gs-bcell"))
Rm("boundary", gs)
gating(gt, gs, mc.core = 3, parallel_type = "multicore")
thisRes <- getPopStats(gs, path = "full")
expectRes <- gatingResults[["gating_bcell"]]
expect_equal(thisRes, expectRes, tol = 0.08)
})
|
/inst/tests/gating-testSuite.R
|
no_license
|
petterbrodin/openCyto
|
R
| false | false | 2,049 |
r
|
context("gating...")
gatingResults <- readRDS(system.file("tests/gatingResults.rds", package = "openCyto"))
localPath <- "~/rglab/workspace/openCyto"
test_that("tcell", {
gt_tcell <- gatingTemplate(gtFile, autostart = 1L)
gs <- load_gs(file.path(localPath,"misc/testSuite/gs-tcell"))
gating(gt_tcell, gs, mc.core = 2, parallel_type = "multicore")
thisRes <- getPopStats(gs, path = "full")
expectRes <- gatingResults[["gating_tcell"]]
expect_equal(thisRes, expectRes, tol = 0.04)
})
test_that("ICS", {
gtfile <- system.file("extdata/gating_template/ICS.csv", package = "openCyto")
gt <- gatingTemplate(gtfile)
gs <- load_gs(file.path(localPath,"misc/testSuite/gs-ICS"))
Rm("s", gs)
gating(gt, gs, mc.core = 2, parallel_type = "multicore")
thisRes <- getPopStats(gs, path = "full")
expectRes <- gatingResults[["gating_ICS"]]
expect_equal(thisRes, expectRes, tol = 0.05)
})
test_that("treg", {
gtfile <- system.file("extdata/gating_template/treg.csv", package = "openCyto")
gt <- gatingTemplate(gtfile)
gs <- load_gs(file.path(localPath,"misc/testSuite/gs-treg"))
Rm("boundary", gs)
gating(gt, gs, mc.core = 3, parallel_type = "multicore")
thisRes <- getPopStats(gs, path = "full")
expectRes <- gatingResults[["gating_treg"]]
expect_equal(thisRes, expectRes, tol = 0.25)
})
test_that("bcell", {
gtfile <- system.file("extdata/gating_template/bcell.csv", package = "openCyto")
gt <- gatingTemplate(gtfile, autostart = 1L)
gs <- load_gs(path = file.path(localPath,"misc/testSuite/gs-bcell"))
Rm("boundary", gs)
gating(gt, gs, mc.core = 3, parallel_type = "multicore")
thisRes <- getPopStats(gs, path = "full")
expectRes <- gatingResults[["gating_bcell"]]
expect_equal(thisRes, expectRes, tol = 0.08)
})
|
#
# Useful functions to manipulate data
# - sub sample
# - interpolate
# - reorganize
# - etc.
#
# (c) Copyright 2009-2013 Jean-Olivier Irisson
# GNU General Public License v3
#
#------------------------------------------------------------
outliers <- function(x, method=c("hampel","g","bonferroni","custom"), factor=5.2)
#
# Return the indices of outliers in x, according to:
# . Davies and Gather, The identification of multiple outliers, JASA 88 (1993), 782-801. for methods hampel, g and custom
# . outlier.test in package car for method bonferroni
# In method custom, the higher the factor the less sensible the detection of outliers
#
{
method = match.arg(method)
if (method=="bonferroni") {
suppressPackageStartupMessages(require("car"))
return(as.numeric(outlierTest(lm(x~1))$obs))
} else {
if (method=="hampel") {
factor = 5.2
} else if (method=="g") {
n = length(x)
if (n%%2==0) {
factor = 2.906+11.99*(n-6)^-0.5651
} else {
factor = 2.906+12.99*(n-5)^-0.5781
}
} else if (method=="custom") {
factor = factor
}
return(which(abs(x-median(x, na.rm=TRUE))>(factor*mad(x, na.rm=TRUE))))
}
}
despike <- function(x, window=max(length(x)/5, 10))
#
# Remove spikes in a signal by detecting outliers using the Median Absolute Deviation in a moving window along the signal
# x series (vector) of numerical data, somewhat regular
# window size of the window
#
{
# prepare storage to count the number of times a point is checked and detected as an outlier
count <- x
out <- x
# initialize to 0
count[] <- 0
out[] <- 0
for (i in 1:(length(x)-window+1)) {
# select data in the window
idx <- i:(i+window-1)
xi <- x[idx]
# count this data as checked once
count[idx] <- count[idx] + 1
# detect outliers in the window using MAD
# NB: recompute everything here rather than using outliers() or mad() for speed puposes
absDev <- abs(xi - median(xi, na.rm=T))
mad <- median(absDev, na.rm=T)*1.4826
outi <- which(absDev > 5.2*mad)
# mark those positions as outliers
out[i + outi - 1] <- out[i + outi - 1] + 1
}
# remove the points consistently (i.e. in all windows) identified as outliers
x[out == count] <- NA
return(x)
}
# Sub-sampling or interpolation
#------------------------------------------------------------
regrid <- function(x, shift=T)
#
# Resample the same values on a grid twice as coarse
#
# x data.frame with components x and y
{
if (shift) {
x = x[x$x!=max(x$x) & x$y!=max(x$y),]
}
x = x[seq(1,nrow(x),by=2),]
return(x)
}
interp.x <- function(x, y=NULL, n=80, xo=seq(min(x),max(x),length=n), method="spline", ...)
#
# Interpolate data y defined at points x to points xo
# x can also be a data.frame with x in the first column and y in the second
# method can be "spline" or "linear"
#
{
method = match.arg(method,c("spline","linear"))
if (is.data.frame(x)) {
y = x[,2]
x = x[,1]
}
if (method == "spline") {
fun = splinefun(x,y)
yo = fun(xo)
} else if (method == "linear") {
yo = approx(x,y,xout=xo)$y
}
return(data.frame(x=xo,y=yo))
}
interp.xy <- function(x, y, z, n=80, xo=seq(min(x, na.rm=T),max(x, na.rm=T),length=n), yo=seq(min(y, na.rm=T),max(y, na.rm=T),length=n), extrapolate=F, method=c("akima", "bilinear"), output=c("list","data.frame"), ...)
#
# Interpolates data z defined at points (x,y) on a new grid
#
# x, y coordinated of input points
# z values at input points
# n number of point in the new grid
# xo, yo coordinates of output points
# extrapolate if T, also define points outside the range of x,y when possible
# method
# "akima" spline interpolation (package akima)
# "bilinear" simple bilinear
# output
# "data.frame" data.frame with columns x, y and z (for ggplot)
# "list,matrix" list with components x, y, and z (for persp, contour)
#
{
suppressPackageStartupMessages(require("reshape"))
# parse arguments
method = match.arg(method)
output = match.arg(output)
if (method=="akima") {
# interpolate a regular grid from a set of irregular points
suppressPackageStartupMessages(require("akima"))
out = interp(x, y, z, xo, yo, linear=F, extrap=extrapolate, ...)
if (output == "data.frame") {
out = list2frame(out)
}
} else if (method == "bilinear") {
# interpolate a regular grid from a set of gridded points
# original coordinates
objDat <- data.frame(x=x, y=y, value=z)
x <- sort(unique(x))
y <- sort(unique(y))
z <- as.matrix(cast(objDat,x~y))
# interpolated locations
locs <- expand.grid(xo, yo)
xNew <- locs[, 1]
yNew <- locs[, 2]
# find indexes of cells in the original grid that contain the points to be interpolated
nx <- length(x)
ny <- length(y)
lx <- approx(x,1:nx,xNew)$y
ly <- approx(y,1:ny,yNew)$y
lx1 <- floor(lx)
ly1 <- floor(ly)
# distance between grid cells origins and points
ex <- lx - lx1
ey <- ly - ly1
# for points that are exactly on the top or right of the grid, shift one cell down (cf formula below where 1 is added to the index)
ex[lx1 == nx] <- 1
ey[ly1 == ny] <- 1
lx1[lx1 == nx] <- nx - 1
ly1[ly1 == ny] <- ny - 1
# bilinear interpolation
out <- z[cbind(lx1 , ly1 )] * (1 - ex) * (1 - ey) +
z[cbind(lx1+1, ly1 )] * ex * (1 - ey) +
z[cbind(lx1 , ly1+1)] * (1 - ex) * ey +
z[cbind(lx1+1, ly1+1)] * ex * ey
out <- data.frame(x=xNew, y=yNew, z=out)
if (output == "list") {
out = frame2list(out)
}
}
return(out)
}
# Data re-organisation
#------------------------------------------------------------
frame2list <- function(X, names=c("x","y","value"))
#
# Turn a data.frame with two coordinates columns and a value, into a list suitable for persp, contour and the like
#
# X data.frame with components x, y (or lon, lat) and a value
{
if (!is.data.frame(X)) {
stop("Need a data.frame")
}
if (all(names %in% names(X))) {
# if all names are in the data frame, extract the columns
X <- X[,names]
} else if (ncol(X) == 3) {
# if the data.frame names do not match but it has the right size, assume columns are in order and rename them
warning("Assuming columns ", paste(names(X), collapse=","), " are in fact ", paste(names, collapse=","))
names(X) <- names
} else {
stop("Cannot find coordinates and values in this data.frame. Check column names")
}
# convert into list
suppressPackageStartupMessages(require("reshape"))
out = list(x=sort(unique(X$x)),y=sort(unique(X$y)))
out$z = as.matrix(cast(X,x~y))
return(out)
}
list2frame <- function(X, names=c("x", "y", "z"))
#
# Turn a list with three components (suitable for persp and the like) into a data.frame with columns x, y, z, suitable for ggplot
#
# X list with components x, y, and z
{
if (!is.list(X)) {
stop("Need a list")
}
if ( is.null(names(X)) & length(X) == 3 ) {
# if the list has no names and the right size, assume components are in order and rename them
warning("Assuming list components are in the order: ", paste(names, collapse=","))
names(X) <- names
} else if ( !all(names %in% names(X)) & length(X) == 3 ) {
# if the list names do not match but it has the right size, assume components are in order and rename them
warning("Assuming components ", paste(names(X), collapse=","), " are in fact ", paste(names, collapse=","))
names(X) <- names
} else if (all(names %in% names(X))) {
# if all names are in the list, extract the corresponding components
X <- X[names]
} else {
stop("Cannot find coordinates and values in this list. Check names")
}
# convert into data.frame
suppressPackageStartupMessages(require("reshape"))
out = melt(X$z,varnames=c("x","y"))
out$x = X$x[out$x]
out$y = X$y[out$y]
out = rename(out,c(value="z"))
return(out)
}
|
/lib_manip.R
|
no_license
|
jessluo/sandiego
|
R
| false | false | 8,165 |
r
|
#
# Useful functions to manipulate data
# - sub sample
# - interpolate
# - reorganize
# - etc.
#
# (c) Copyright 2009-2013 Jean-Olivier Irisson
# GNU General Public License v3
#
#------------------------------------------------------------
outliers <- function(x, method=c("hampel","g","bonferroni","custom"), factor=5.2)
#
# Return the indices of outliers in x, according to:
# . Davies and Gather, The identification of multiple outliers, JASA 88 (1993), 782-801. for methods hampel, g and custom
# . outlier.test in package car for method bonferroni
# In method custom, the higher the factor the less sensible the detection of outliers
#
{
method = match.arg(method)
if (method=="bonferroni") {
suppressPackageStartupMessages(require("car"))
return(as.numeric(outlierTest(lm(x~1))$obs))
} else {
if (method=="hampel") {
factor = 5.2
} else if (method=="g") {
n = length(x)
if (n%%2==0) {
factor = 2.906+11.99*(n-6)^-0.5651
} else {
factor = 2.906+12.99*(n-5)^-0.5781
}
} else if (method=="custom") {
factor = factor
}
return(which(abs(x-median(x, na.rm=TRUE))>(factor*mad(x, na.rm=TRUE))))
}
}
despike <- function(x, window=max(length(x)/5, 10))
#
# Remove spikes in a signal by detecting outliers using the Median Absolute Deviation in a moving window along the signal
# x series (vector) of numerical data, somewhat regular
# window size of the window
#
{
# prepare storage to count the number of times a point is checked and detected as an outlier
count <- x
out <- x
# initialize to 0
count[] <- 0
out[] <- 0
for (i in 1:(length(x)-window+1)) {
# select data in the window
idx <- i:(i+window-1)
xi <- x[idx]
# count this data as checked once
count[idx] <- count[idx] + 1
# detect outliers in the window using MAD
# NB: recompute everything here rather than using outliers() or mad() for speed puposes
absDev <- abs(xi - median(xi, na.rm=T))
mad <- median(absDev, na.rm=T)*1.4826
outi <- which(absDev > 5.2*mad)
# mark those positions as outliers
out[i + outi - 1] <- out[i + outi - 1] + 1
}
# remove the points consistently (i.e. in all windows) identified as outliers
x[out == count] <- NA
return(x)
}
# Sub-sampling or interpolation
#------------------------------------------------------------
regrid <- function(x, shift=T)
#
# Resample the same values on a grid twice as coarse
#
# x data.frame with components x and y
{
if (shift) {
x = x[x$x!=max(x$x) & x$y!=max(x$y),]
}
x = x[seq(1,nrow(x),by=2),]
return(x)
}
interp.x <- function(x, y=NULL, n=80, xo=seq(min(x),max(x),length=n), method="spline", ...)
#
# Interpolate data y defined at points x to points xo
# x can also be a data.frame with x in the first column and y in the second
# method can be "spline" or "linear"
#
{
method = match.arg(method,c("spline","linear"))
if (is.data.frame(x)) {
y = x[,2]
x = x[,1]
}
if (method == "spline") {
fun = splinefun(x,y)
yo = fun(xo)
} else if (method == "linear") {
yo = approx(x,y,xout=xo)$y
}
return(data.frame(x=xo,y=yo))
}
interp.xy <- function(x, y, z, n=80, xo=seq(min(x, na.rm=T),max(x, na.rm=T),length=n), yo=seq(min(y, na.rm=T),max(y, na.rm=T),length=n), extrapolate=F, method=c("akima", "bilinear"), output=c("list","data.frame"), ...)
#
# Interpolates data z defined at points (x,y) on a new grid
#
# x, y coordinated of input points
# z values at input points
# n number of point in the new grid
# xo, yo coordinates of output points
# extrapolate if T, also define points outside the range of x,y when possible
# method
# "akima" spline interpolation (package akima)
# "bilinear" simple bilinear
# output
# "data.frame" data.frame with columns x, y and z (for ggplot)
# "list,matrix" list with components x, y, and z (for persp, contour)
#
{
suppressPackageStartupMessages(require("reshape"))
# parse arguments
method = match.arg(method)
output = match.arg(output)
if (method=="akima") {
# interpolate a regular grid from a set of irregular points
suppressPackageStartupMessages(require("akima"))
out = interp(x, y, z, xo, yo, linear=F, extrap=extrapolate, ...)
if (output == "data.frame") {
out = list2frame(out)
}
} else if (method == "bilinear") {
# interpolate a regular grid from a set of gridded points
# original coordinates
objDat <- data.frame(x=x, y=y, value=z)
x <- sort(unique(x))
y <- sort(unique(y))
z <- as.matrix(cast(objDat,x~y))
# interpolated locations
locs <- expand.grid(xo, yo)
xNew <- locs[, 1]
yNew <- locs[, 2]
# find indexes of cells in the original grid that contain the points to be interpolated
nx <- length(x)
ny <- length(y)
lx <- approx(x,1:nx,xNew)$y
ly <- approx(y,1:ny,yNew)$y
lx1 <- floor(lx)
ly1 <- floor(ly)
# distance between grid cells origins and points
ex <- lx - lx1
ey <- ly - ly1
# for points that are exactly on the top or right of the grid, shift one cell down (cf formula below where 1 is added to the index)
ex[lx1 == nx] <- 1
ey[ly1 == ny] <- 1
lx1[lx1 == nx] <- nx - 1
ly1[ly1 == ny] <- ny - 1
# bilinear interpolation
out <- z[cbind(lx1 , ly1 )] * (1 - ex) * (1 - ey) +
z[cbind(lx1+1, ly1 )] * ex * (1 - ey) +
z[cbind(lx1 , ly1+1)] * (1 - ex) * ey +
z[cbind(lx1+1, ly1+1)] * ex * ey
out <- data.frame(x=xNew, y=yNew, z=out)
if (output == "list") {
out = frame2list(out)
}
}
return(out)
}
# Data re-organisation
#------------------------------------------------------------
frame2list <- function(X, names=c("x","y","value"))
#
# Turn a data.frame with two coordinates columns and a value, into a list suitable for persp, contour and the like
#
# X data.frame with components x, y (or lon, lat) and a value
{
if (!is.data.frame(X)) {
stop("Need a data.frame")
}
if (all(names %in% names(X))) {
# if all names are in the data frame, extract the columns
X <- X[,names]
} else if (ncol(X) == 3) {
# if the data.frame names do not match but it has the right size, assume columns are in order and rename them
warning("Assuming columns ", paste(names(X), collapse=","), " are in fact ", paste(names, collapse=","))
names(X) <- names
} else {
stop("Cannot find coordinates and values in this data.frame. Check column names")
}
# convert into list
suppressPackageStartupMessages(require("reshape"))
out = list(x=sort(unique(X$x)),y=sort(unique(X$y)))
out$z = as.matrix(cast(X,x~y))
return(out)
}
list2frame <- function(X, names=c("x", "y", "z"))
#
# Turn a list with three components (suitable for persp and the like) into a data.frame with columns x, y, z, suitable for ggplot
#
# X list with components x, y, and z
{
if (!is.list(X)) {
stop("Need a list")
}
if ( is.null(names(X)) & length(X) == 3 ) {
# if the list has no names and the right size, assume components are in order and rename them
warning("Assuming list components are in the order: ", paste(names, collapse=","))
names(X) <- names
} else if ( !all(names %in% names(X)) & length(X) == 3 ) {
# if the list names do not match but it has the right size, assume components are in order and rename them
warning("Assuming components ", paste(names(X), collapse=","), " are in fact ", paste(names, collapse=","))
names(X) <- names
} else if (all(names %in% names(X))) {
# if all names are in the list, extract the corresponding components
X <- X[names]
} else {
stop("Cannot find coordinates and values in this list. Check names")
}
# convert into data.frame
suppressPackageStartupMessages(require("reshape"))
out = melt(X$z,varnames=c("x","y"))
out$x = X$x[out$x]
out$y = X$y[out$y]
out = rename(out,c(value="z"))
return(out)
}
|
# Random snippets of code I'm looking for at various times -- consolidated in a single place.
# Useful resources:
# https://www.listendata.com/2016/08/dplyr-tutorial.html
#
#------------------------------- Dplyr chunks -------------------
#NEW -- Added 2020-07-16
# Dynamic variable names with across; Thanks @AC for motivating this.
tribble(
~site, ~TX_CURR, ~TX_MMD.u3, ~TX_MMD.o3,
"x", 10L, 3L, 7L,
"y", 20L, 4L, 12L,
"z", 15L, 10L, 5L
) %>%
mutate(across(starts_with("TX_MMD"), list(share = ~(.x / TX_CURR)),
.names = "IM_NOT_DEAD_to_{col}"))
# Change how the names are generated
df %>%
mutate(across(starts_with("TX_MMD"), list(share = ~(.x / TX_CURR)),
.names = "{fn}_{col}"))
# Checking for uniqueness
x2 = distinct(df, var1, var2, .keep_all = TRUE)
# Count the number of distinct values taken by a set of variables
df %>% group_by(var1, var2) %>% n_groups()
# Create a unique ID based on a grouping - two step process
# Akin to Stata egen = group(var1, var2)
shk_df %>%
mutate(id_tmp = interaction(clid, hhid)) %>%
mutate(id = group_indices(., id_tmp))
# Create a sequential id based on a order in dataset
df %>% arrange(id1, id2) %>% mutate(id = row_number())
# Equivalent of inrange in Stata inrange(var, 3, 5)
df %>% filter(between(var2, 3, 5))
# dropping vars from a data frop (like drop)
select(df, -var1, var2)
drop_vars <- c("var1", "var2")
select(df, -one_of())
#Reorder variables in a data frame
df2 <- select(df, var1, var2, everything())
# rename syntax
rename(data , new_name = old_name)
# filter with negations
mydata10 = filter(df, !var1 %in% c("A", "C"))
# using grepl - look for records where the State varaible contains "Ar"
mydata10 = filter(df, grepl("Ar", State))
# Summarise over variables - creating 3 new calcuations for each variable
summarise_at(df, vars(var1, var2), funs(n(), mean, median))
# Summarise if removing NAs
starwars %>% summarise_if(is.numeric, mean, na.rm = TRUE)
# Convert values to na
k <- c("a", "b", "", "d")
na_if(k, "")
# Print a list of names to the screen in a
dput(names(shocks))
# Lags and Leads
df %>%
mutate(prv_year_absorb = lag(Variable_to_lag, n = 1, order_by = year),
absorb_delta = Variable_to_lag - prv_year_absorb)
#------------------------------- Purrr chunks -------------------
# https://www.hvitfeldt.me/2018/01/purrr-tips-and-tricks/
# Convert each element of a list into objects
list2env(my_list ,envir=.GlobalEnv)
##### Subsetting elements in a list
# Convert an element from a list to a data.frame, retaining names
geo_cw <- map_df(df_wash[6], `[`)
# can also use pluck
test <- df_wash %>% pluck(6)
# Batch load excel sheets
# First, set the read path of the where spreadsheet lives
read_path <- file.path(datapath, "KEN_WASH_2018_tables.xlsx")
# Write everything into a list
df_wash <- excel_sheets(read_path) %>%
set_names() %>%
map(read_excel, path = read_path)
# Loop over a
keep_list <- c("6MMD", "CXCA Data", "TB_PREV Data", "TX Data")
safe <-
excel_sheets(file_in) %>%
set_names() %>%
map(., .f = ~read_excel(file_in, sheet = .x)) %>%
.[keep_list]
# Write everything to a single data frame
# Note: the map_df or map_dfr appends rows with the same header across tabs
# but creates new columns if column names do not aling across tabs (worksheets)
read_path <- file.path("/Users/tim/Desktop/Book1.xlsx")
excel_sheets("/Users/tim/Desktop/Book1.xlsx") %>%
set_names() %>%
map_df(~ read_excel(path = read_path, sheet = .x, skip = 1), .id = "sheets")
# The .id = "sheets" option embeds the name of the sheet in the resulting data frame.
# Or, write everything to separate objects
read_path %>%
excel_sheets() %>%
# Use basename with the file_path_sans_ext to strip extra info; Not NEEDED
# set_names(nm = (basename(.) %>% tools::file_path_sans_ext())) %>%
set_names() %>%
map(~read_excel(path = read_path, sheet = .x), .id = "sheet") %>%
# Use the list2env command to convert the list of dataframes to separate dfs using
# the name provided by the set_names(command). Necessary b/c not all the sheets are the same size
list2env(., envir = .GlobalEnv)
# Using fs and purrr to read in directory of files
res <- path(pathname) %>% dir_ls(regexp = "texttoregex.csv") %>% map_df(read_csv, .id = "filename")
# Read all files in a folder, place in a single dataframe
fd <- list.files("folder", pattern = "*.csv", full.names = TRUE) %>%
purrr:map_df(f, readr::read_csv, .id = "id")
# Loop over columns and summarise stuff
df %>%
select(`Education Strategy Area(s)`:`Research Approach`) %>%
map(tab_that)
# ----------------- Writing list to multiple files ----------------------
# Put a pattern of objects into a single list
graph_list <- mget(ls(pattern = "gph"))
datalist <- list(impr_sanit = impr_sanit,
unimp_sanit = unimp_sanit,
impr_h20 = impr_h20,
unimpr_h20 = unimpr_h20,
waste = waste)
datalist %>%
names() %>%
map(., ~ write_csv(datalist[[.]], file.path(washpath, str_c(., ".csv"))))
# Quick mapping of new variables
old <- c("x", "y", "x", "z")
mapping <- c("x" = "a", "y" = "b", "z" = "c")
new <- mapping[old]
#------------------------------- Tidy Eval -------------------
# https://dplyr.tidyverse.org/articles/programming.html
# https://edwinth.github.io/blog/dplyr-recipes/ - recipes for basic use
# quo returns a quosure
# First quo, then !!
my_var <- quo(a)
summarise(df, mean = mean(!! my_var), sum = sum(!! my_var), n = n())
# For functions, use enquo and !! within
my_summarise2 <- function(df, expr) {
expr <- enquo(expr)
summarise(df,
mean = mean(!! expr),
sum = sum(!! expr),
n = n()
)
}
# Create new variable names bsed on expr
my_mutate <- function(df, expr) {
expr <- enquo(expr)
mean_name <- paste0("mean_", quo_name(expr))
sum_name <- paste0("sum_", quo_name(expr))
mutate(df,
!! mean_name := mean(!! expr),
!! sum_name := sum(!! expr)
)
}
# Need three things to capture multiple variables for group_by
# 1. use the ... in the function definition to capture any number of arguments
# 2. use quos(...) to capture the ... as a list of formulas
# 3. use !!! to splice the arguments into the group_by command
# New format example {{ }}
bplot_sort <- function(df, x = County, y = value, wrap = type, ctitle = "NA", rows = 2) {
cpt <- df %>% select(source) %>% unique()
df %>%
mutate(indicator_sort = fct_reorder( {{ wrap }}, {{ y }}, .desc = TRUE),
County_sort = reorder_within( {{ x }}, {{ y }}, {{ wrap }} )) %>%
ggplot(aes(y = {{ y }}, x = County_sort)) +
geom_col(fill = "#949494") +
coord_flip() +
scale_x_reordered() +
scale_y_continuous(label = percent_format(accuracy = 1)) +
facet_wrap(vars(indicator_sort), scales = "free_y", nrow = rows) +
theme_minimal() +
labs(x = "", y = "",
title = ctitle,
caption = str_c("Source: ", cpt)) +
theme(strip.text = element_text(hjust = 0),
axis.text.y = element_text(size = 8))
}
#------------------------------- Case When -------------------
df %>% mutate(age = case_when(
age_range == "population" ~ "All",
age_range == "population_0_17" ~ "0_17",
age_range == "population_0_5" ~ "0_5",
age_range == "population_6_13" ~ "6_13",
TRUE ~ "14_17" # -- sets any other value as 14_17
))
# With GREP
case_when(
grepl("Windows", os) ~ "Windows-ish",
grepl("Red Hat|CentOS|Fedora", os) ~ "Fedora-ish",
grepl("Ubuntu|Debian", os) ~ "Debian-ish",
grepl("CoreOS|Amazon", os) ~ "Amazon-ish",
is.na(os) ~ "Unknown",
ELSE ~ "Other"
)
# standard example
case_when(
x %% 35 == 0 ~ "fizz buzz",
x %% 5 == 0 ~ "fizz",
x %% 7 == 0 ~ "buzz",
TRUE ~ as.character(x)
)
#------------------------------- ggplot colors and plots -------------------
# Use scale_fill_gradientn when you want to set a balanced divergent palette
# See colors palette
scales::show_col(colorRampPalette(RColorBrewer::brewer.pal(11,"Spectral"))(30))
scales::show_col(c("red", "grey", "black", "white", "orange", "grey90"))
# Restrict outliers to be capped at a range. In this case, -5 and 5. --> oobs = scales::squish
scale_fill_gradientn(colours = rev(RColorBrewer::brewer.pal(11, "Spectral")), limits = c(-5,5), oob = scales::squish) +
# Define your max value in an object (here called shock_dev_max)
shock_dev_max = unlist(shock_stats_county %>% summarise(max_dev = max(abs(shock_dev))))
df_spatial %>% ggplot() +
geom_sf(aes(fill = shock_dev), colour = "white", size = 0.5) +
scale_fill_gradientn(colours = RColorBrewer::brewer.pal(11, 'PiYG'),
limits = c(-1 * shock_dev_max, shock_dev_max),
labels = scales::percent) + ...
# For color brewer scale_fill_brewer and scale_colour_brewer are for categorical data
# Use scale_fill_distiller or scale_color_distiller for continuous data.
# If the aesthetics are fill = x then use former, if colour = x, then latter
# Scales -- show only major numbers on percentage scale
# add chunk below to ggplot call
scale_y_continuous(labels = scales::percent_format(accuracy = 1))
# To generate new colors
colorRampPalette(RColorBrewer::brewer.pal(11,"Spectral"))(30) %>% knitr::kable(format = "rst")
# in Atom command+D will do cursor highlighting down
# To preview palettes
palette(colorRampPalette(brewer.pal(11,"Spectral"))(30))
plot(1:30, 1:30, col = 1:30, pch = 19, cex = 5)
# Plotting reference bars
# When plotting geom_rect (reference bars), ggplot will plot a copy of each bar for each row in a dataframe
# This is incredibly annoying when you want to decrease the opacity or edit the reference bars in inkscape / AI
# To get around this problem, pipe in the first row the data frame you are plotting to the geom_rect call
# link: https://stackoverflow.com/questions/17521438/geom-rect-and-alpha-does-this-work-with-hard-coded-values;
# Example below from the Kenya Middle and Upper Arm Circumference data at the county level
muac_malnut %>%
filter(county %in% c("Turkana", "Marsabit", "Isiolo", "Samburu")) %>%
group_by(county) %>%
mutate(mean = mean(value, na.rm = TRUE)) %>%
ungroup() %>%
mutate(county_sort = fct_reorder(county, mean, .desc = TRUE)) %>%
ggplot(aes(x = date, y = value)) +
geom_ribbon(aes(ymin = 0.15, ymax = 0.4),
data = , fill = "#d6604d", alpha = 0.20) +
geom_ribbon(aes(ymin = 0, ymax = 0.15),
data = , fill = "#4393c3" , alpha = 0.20) +
geom_rect(data = muac_malnut[1, ], ymin = 0, ymax = .4,
xmin = as.Date("2009-01-01"), xmax = as.Date("2010-01-01"),
fill = "#fdfbec", alpha = 0.33) +
geom_rect(data = muac_malnut[1, ], ymin = 0, ymax = .4,
xmin = as.Date("2011-01-01"), xmax = as.Date("2012-01-01"),
fill = "#fdfbec", alpha = 0.33) +
geom_rect(data = muac_malnut[1, ], ymin = 0, ymax = .4,
xmin = as.Date("2017-01-01"), xmax = as.Date("2018-01-01"),
fill = "#", alpha = 0.33) +
geom_smooth(colour = "#", span = span, alpha = 0.5, size = 0.25) +
geom_line(colour = grey70K) +
#Sort each facet within a facet wrapped graph. Need tidytext.
fertility_plot <- gha_df$Fertility_Region %>%
mutate(Region_sort = fct_reorder(Region, `adolescent birth rate`),
reg_color = ifelse(Region == "National", '#80cdc1', grey30K)) %>%
gather("indicator", "value", `adolescent birth rate`:`demand for family planning`) %>%
mutate(indicator_sort = fct_reorder(indicator, value, .desc = TRUE),
region_sort2 = reorder_within(Region, value, indicator)) %>%
ggplot(aes(y = value, x = region_sort2, fill = reg_color)) +
coord_flip() + geom_col() +
scale_x_reordered() +
facet_wrap(~indicator_sort, scales = "free") +
scale_fill_identity() +
theme_line +
theme(panel.spacing = unit(1, "lines")) +
y_axix_pct +
labs(title = "Family planning and birth rates by region",
subtitle = "Note free scales to accomodate indicator ranges",
x = "", y = "",
caption = "Source: 2017 Multiple Indicator Cluster Survey (MICS)")
# move facets left
theme(strip.text = element_text(hjust = 0, size = 10)
# Filter a single dataframe multiple times within a function
parity_plot <- function(df, sub_filt = "Mathmatics", yearfilt = "2018-2019") {
df %>%
filter(Subject == {{sub_filt}} & year == {{yearfilt}}) %>%
filter(Subgroup != "White") %>%
#filter(option_flag == 1) %>%
mutate(school_sort = reorder_within(school_name, -op_gap, Subgroup)) %>%
{# By wrapping ggplot call in brackets we can control where the pipe flow enters (df)
# This allows us to use filters within the ggplot call
ggplot() +
geom_abline(intercept = 0, slope = 1, color = non_ats, linetype = "dotted") +
#geom_polygon(data = df_poly, aes(-x, -y), fill="#fde0ef", alpha=0.25) +
geom_point(data = dplyr::filter(., school_name != "Arlington Traditional"),
aes(y = value, x = benchmark, fill = ats_flag_color),
size = 4, shape = 21, alpha = 0.75, colour = "white") +
geom_point(data = dplyr::filter(., school_name == "Arlington Traditional"),
aes(y = value, x = benchmark, fill = ats_flag_color),
size = 4, shape = 21, alpha = 0.80, colour = "white") +
facet_wrap(~Subgroup,
labeller = labeller(groupwrap = label_wrap_gen(10))) +
coord_fixed(ratio = 1, xlim = c(40, 100), ylim = c(40, 100)) +
scale_fill_identity() +
theme_minimal() +
labs(x = "benchmark test value", y = "Subgroup test value",
title = str_c(sub_filt, " opportunity gap across subgroups (ATS in blue) for ", yearfilt),
subtitle = "Each point is a school -- points below the 45 degree line indicate an opportunity gap") +
theme(strip.text = element_text(hjust = 0))
}
}
# Creating plots in a nested data frame and writing them to a file
# Loop over plots by category, saving resulting plots in a grouped / nested dataframe
# extract the nested plots by calling the appropriate position of the nested plot
plots <-
gov %>%
group_by(Category) %>%
nest() %>%
mutate(plots = map2(data, Category,
~gov_plot(.) + labs(x = "", y = "",
title = str_c("Category ", Category, ": Governance scores for community fish refuges"),
caption = "Source: 2016 Rice Field Fishery Enhancement Project Database: Governance Scores Module")))
plots$plots[2]
map2(file.path(imagepath, paste0("Category ", plots$Category,
": Governance scores for community fish refuges.pdf")),
plots$plots,
height = 8.5,
width = 11,
dpi = 300,
ggsave)
################# Options for plots in a function ###########################
#Create a function that gives back a bar or map plot, depending on input
msme_plot <- function(df, x, option = 1) {
if (!option %in% c(1, 2)) {
stop("Select 1 (map) or 2 (graph) as option value.")
}
xvar = enquo(x)
if (option == 1) {
ggplot(df) +
geom_sf(aes(fill = !!xvar), colour = "white") +
scale_fill_viridis_c(option = "C", direction = -1, alpha = 0.90) +
theme_minimal() +
labs(caption = "GeoCenter Calculations from MSME 2016 Report",
title = (gsub("`", "", {rlang::quo_text(xvar)})))
}
else if (option == 2) {
df %>%
mutate(sortvar = fct_reorder(County, !!xvar), .desc = TRUE) %>%
ggplot(aes(x = sortvar, y = !!xvar)) +
geom_col() +
coord_flip() + theme_minimal() +
labs(caption = "GeoCenter Calculations from MSME 2016 Report",
title = (gsub("`", "", {rlang::quo_text(xvar)})))
}
}
msme_plot(msme_geo, `Table 3_1 Sampled Licensed Establishments`, option = 1)
#-------------------------------- Plot specific -----------------------------
# Setting plotting themes up front
theme_update(
axis.ticks = element_blank(),
axis.text = element_blank())
# When plotting a heatmap, you can pass the label option through the scale_X_XX part. This allow
# for formatting of percentages on the scale
... + scale_fill_viridis_c(direction = -1, alpha = 0.90, option = "A", label = percent_format(accuracy = 2)) + ...
# Change the width of the legend
... + theme(legend.position = "top",
legend.key.width = unit(2, "cm")) +
# Add captions
... + labs(caption = "text to add") +...
# Add titles to plots based on text/vars passed through tidy eval
xvar = enquo(x) ...
... labs(caption = "GeoCenter Calculations from MSME 2016 Report",
title = (gsub("`", "", {rlang::quo_text(xvar)})))
#------------------------------- listing things -------------------
# List all the functions in a package
ls(package:stringr)
# Listing everything in the workspace
mget(ls())
ls.str()
# Get a list of attached packages and paths
search()
searchpaths()
#------------------------------- Searching strings -------------------
# Use str_detect to quickly search through strings for key words
str_detect(var, "string to detect")
# For filtering (can also use for binary mutates)
df %>% filter(str_detect(var, "string"))
# Remove paratheses (from Tidy Tuesday w/ DROB - https://github.com/dgrtwo/data-screencasts/blob/master/nyc-restaurants.Rmd)
cuisine_conf_ints %>%
mutate(cuisine = str_remove(cuisine, " \\(.*"),
cuisine = fct_reorder(cuisine, estimate))
str_remove("text (with some markes here)", "\\(*")
#List the type of characters in a column
utf8::utf8_print(unique(hfr$mech_code), utf8 = FALSE)
# fix it
mutate(mech_code = gsub("(^[[:space:]]*)|([[:space:]]*$)", "", mech_code))
#------------------------------- Dates -------------------
# https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2018-10-23/movie_profit.csv
# https://github.com/dgrtwo/data-screencasts/blob/master/movie-profit.Rmd
mutate(release_date = as.Date(parse_date_time(release_date, "%m!/%d/%Y")))
#------------------------------- Counts -------------------
df %>% count(var1, var2, sort = TRUE)T
#------------------------------- BROOMING & Models -------------------
# TT snippet -- same as above.
library(broom)
cuisine_conf_ints <- by_dba %>%
add_count(cuisine) %>%
filter(n > 100) %>%
nest(-cuisine) %>%
mutate(model = map(data, ~ t.test(.$avg_score))) %>%
unnest(map(model, tidy))
# Fitting a natural spline - from Bridges Tidy Tuesday
model <- bridges %>%
mutate(good = bridge_condition == "Good") %>%
glm(y ~ ns(x, 4) + indicator_var, data = ., family = "binomial")
# Widening data and PCA -- see the widyr package - https://github.com/dgrtwo/widyr
#------------------------------- Regular Expression -------------------
# https://www.jumpingrivers.com/blog/regular-expressions-every-r-programmer-should-know/
library(stringr)
# \ (backslash) is a metacharacter, have to escape it to search for it --> "\\"
str_subset(dir(file.path(datapath)), "\\.csv")
# ^ and the $
# Use the ^ to indicate the start of line and $ to indicate the end of a line
rm(list = ls(pattern = "*_in$")) # - removing dataframes/objects that end in "_in"
# remove everything after a string
df %>% mutate(school_name = str_to_title(school_name) %>% str_remove_all(., "Elem.*"))
#------------------------------- Purrr'ing -------------------
# Split on a group, peform action across all subgroups
mtcars %>%
split(.$cyl) %>%
map(., ~ggplot(., aes(mpg, hp)) + geom_point())
# Read a batch of files in and give them names
access_files <- list.files(file.path(datapath, "RFFI_Data"), pattern = ".xlsx")
access_path <- "Data/RFFI_Data"
fish <- map(as.list(access_files), ~read_excel(file.path(access_path, .)))
names(fish) <- as.list(access_files) %>% set_names()
# ----------------------------- multi-line cursor --------------
# `control` + `option` plus up or down
# ----------------------------- System or packages loaded --------
sessionInfo()
(.packages())
# ----------------------------- Working with dates --------
# Create a date from year, month and day
flights %>% mutate(date = make_date(year, month, day))
flights %>% mutate(wday = wday(date, label = TRUE))
# Create a decade variable using the 10 * *(X %/% Y) code
economics %>% mutate(year = year(date), decade = 10 * (year %/% 10)) %>% count(year, decade) %>% print(n = Inf)
# ----------------------------- Models --------
mod <- MASS::rlm(n ~ wday * ns(date, 5), data = daily) # for natural splines
# Create a holdout and holdin data frames
holdout <- df %>% filter(id %% 5 == 0) # grab every 5th observation
holdin <- df %>% filteR(id %% 5 != 0) # everything but the 5s
# Packages that I should use more
skimr - https://github.com/ropensci/skimr
rsample - for splitting data into test/training
recipes - https://tidymodels.github.io/recipes/
parsnip - for tidy modeling (https://tidymodels.github.io/parsnip/index.html)
cartogram - https://github.com/sjewo/cartogram
patchwork - https://patchwork.data-imaginist.com/index.html
ggchicklet - https://git.rud.is/hrbrmstr/ggchicklet
DependenciesGraphs - https://github.com/datastorm-open/DependenciesGraphs (Visualize package / function dependencies)
Colormind - https://github.com/dmi3kno/colormind
|
/lookfor.R
|
no_license
|
tessam30/Things_I_look_for
|
R
| false | false | 21,682 |
r
|
# Random snippets of code I'm looking for at various times -- consolidated in a single place.
# Useful resources:
# https://www.listendata.com/2016/08/dplyr-tutorial.html
#
#------------------------------- Dplyr chunks -------------------
#NEW -- Added 2020-07-16
# Dynamic variable names with across; Thanks @AC for motivating this.
tribble(
~site, ~TX_CURR, ~TX_MMD.u3, ~TX_MMD.o3,
"x", 10L, 3L, 7L,
"y", 20L, 4L, 12L,
"z", 15L, 10L, 5L
) %>%
mutate(across(starts_with("TX_MMD"), list(share = ~(.x / TX_CURR)),
.names = "IM_NOT_DEAD_to_{col}"))
# Change how the names are generated
df %>%
mutate(across(starts_with("TX_MMD"), list(share = ~(.x / TX_CURR)),
.names = "{fn}_{col}"))
# Checking for uniqueness
x2 = distinct(df, var1, var2, .keep_all = TRUE)
# Count the number of distinct values taken by a set of variables
df %>% group_by(var1, var2) %>% n_groups()
# Create a unique ID based on a grouping - two step process
# Akin to Stata egen = group(var1, var2)
shk_df %>%
mutate(id_tmp = interaction(clid, hhid)) %>%
mutate(id = group_indices(., id_tmp))
# Create a sequential id based on a order in dataset
df %>% arrange(id1, id2) %>% mutate(id = row_number())
# Equivalent of inrange in Stata inrange(var, 3, 5)
df %>% filter(between(var2, 3, 5))
# dropping vars from a data frop (like drop)
select(df, -var1, var2)
drop_vars <- c("var1", "var2")
select(df, -one_of())
#Reorder variables in a data frame
df2 <- select(df, var1, var2, everything())
# rename syntax
rename(data , new_name = old_name)
# filter with negations
mydata10 = filter(df, !var1 %in% c("A", "C"))
# using grepl - look for records where the State varaible contains "Ar"
mydata10 = filter(df, grepl("Ar", State))
# Summarise over variables - creating 3 new calcuations for each variable
summarise_at(df, vars(var1, var2), funs(n(), mean, median))
# Summarise if removing NAs
starwars %>% summarise_if(is.numeric, mean, na.rm = TRUE)
# Convert values to na
k <- c("a", "b", "", "d")
na_if(k, "")
# Print a list of names to the screen in a
dput(names(shocks))
# Lags and Leads
df %>%
mutate(prv_year_absorb = lag(Variable_to_lag, n = 1, order_by = year),
absorb_delta = Variable_to_lag - prv_year_absorb)
#------------------------------- Purrr chunks -------------------
# https://www.hvitfeldt.me/2018/01/purrr-tips-and-tricks/
# Convert each element of a list into objects
list2env(my_list ,envir=.GlobalEnv)
##### Subsetting elements in a list
# Convert an element from a list to a data.frame, retaining names
geo_cw <- map_df(df_wash[6], `[`)
# can also use pluck
test <- df_wash %>% pluck(6)
# Batch load excel sheets
# First, set the read path of the where spreadsheet lives
read_path <- file.path(datapath, "KEN_WASH_2018_tables.xlsx")
# Write everything into a list
df_wash <- excel_sheets(read_path) %>%
set_names() %>%
map(read_excel, path = read_path)
# Loop over a
keep_list <- c("6MMD", "CXCA Data", "TB_PREV Data", "TX Data")
safe <-
excel_sheets(file_in) %>%
set_names() %>%
map(., .f = ~read_excel(file_in, sheet = .x)) %>%
.[keep_list]
# Write everything to a single data frame
# Note: the map_df or map_dfr appends rows with the same header across tabs
# but creates new columns if column names do not aling across tabs (worksheets)
read_path <- file.path("/Users/tim/Desktop/Book1.xlsx")
excel_sheets("/Users/tim/Desktop/Book1.xlsx") %>%
set_names() %>%
map_df(~ read_excel(path = read_path, sheet = .x, skip = 1), .id = "sheets")
# The .id = "sheets" option embeds the name of the sheet in the resulting data frame.
# Or, write everything to separate objects
read_path %>%
excel_sheets() %>%
# Use basename with the file_path_sans_ext to strip extra info; Not NEEDED
# set_names(nm = (basename(.) %>% tools::file_path_sans_ext())) %>%
set_names() %>%
map(~read_excel(path = read_path, sheet = .x), .id = "sheet") %>%
# Use the list2env command to convert the list of dataframes to separate dfs using
# the name provided by the set_names(command). Necessary b/c not all the sheets are the same size
list2env(., envir = .GlobalEnv)
# Using fs and purrr to read in directory of files
res <- path(pathname) %>% dir_ls(regexp = "texttoregex.csv") %>% map_df(read_csv, .id = "filename")
# Read all files in a folder, place in a single dataframe
fd <- list.files("folder", pattern = "*.csv", full.names = TRUE) %>%
purrr:map_df(f, readr::read_csv, .id = "id")
# Loop over columns and summarise stuff
df %>%
select(`Education Strategy Area(s)`:`Research Approach`) %>%
map(tab_that)
# ----------------- Writing list to multiple files ----------------------
# Put a pattern of objects into a single list
graph_list <- mget(ls(pattern = "gph"))
datalist <- list(impr_sanit = impr_sanit,
unimp_sanit = unimp_sanit,
impr_h20 = impr_h20,
unimpr_h20 = unimpr_h20,
waste = waste)
datalist %>%
names() %>%
map(., ~ write_csv(datalist[[.]], file.path(washpath, str_c(., ".csv"))))
# Quick mapping of new variables
old <- c("x", "y", "x", "z")
mapping <- c("x" = "a", "y" = "b", "z" = "c")
new <- mapping[old]
#------------------------------- Tidy Eval -------------------
# https://dplyr.tidyverse.org/articles/programming.html
# https://edwinth.github.io/blog/dplyr-recipes/ - recipes for basic use
# quo returns a quosure
# First quo, then !!
my_var <- quo(a)
summarise(df, mean = mean(!! my_var), sum = sum(!! my_var), n = n())
# For functions, use enquo and !! within
my_summarise2 <- function(df, expr) {
expr <- enquo(expr)
summarise(df,
mean = mean(!! expr),
sum = sum(!! expr),
n = n()
)
}
# Create new variable names bsed on expr
my_mutate <- function(df, expr) {
expr <- enquo(expr)
mean_name <- paste0("mean_", quo_name(expr))
sum_name <- paste0("sum_", quo_name(expr))
mutate(df,
!! mean_name := mean(!! expr),
!! sum_name := sum(!! expr)
)
}
# Need three things to capture multiple variables for group_by
# 1. use the ... in the function definition to capture any number of arguments
# 2. use quos(...) to capture the ... as a list of formulas
# 3. use !!! to splice the arguments into the group_by command
# New format example {{ }}
bplot_sort <- function(df, x = County, y = value, wrap = type, ctitle = "NA", rows = 2) {
cpt <- df %>% select(source) %>% unique()
df %>%
mutate(indicator_sort = fct_reorder( {{ wrap }}, {{ y }}, .desc = TRUE),
County_sort = reorder_within( {{ x }}, {{ y }}, {{ wrap }} )) %>%
ggplot(aes(y = {{ y }}, x = County_sort)) +
geom_col(fill = "#949494") +
coord_flip() +
scale_x_reordered() +
scale_y_continuous(label = percent_format(accuracy = 1)) +
facet_wrap(vars(indicator_sort), scales = "free_y", nrow = rows) +
theme_minimal() +
labs(x = "", y = "",
title = ctitle,
caption = str_c("Source: ", cpt)) +
theme(strip.text = element_text(hjust = 0),
axis.text.y = element_text(size = 8))
}
#------------------------------- Case When -------------------
df %>% mutate(age = case_when(
age_range == "population" ~ "All",
age_range == "population_0_17" ~ "0_17",
age_range == "population_0_5" ~ "0_5",
age_range == "population_6_13" ~ "6_13",
TRUE ~ "14_17" # -- sets any other value as 14_17
))
# With GREP
case_when(
grepl("Windows", os) ~ "Windows-ish",
grepl("Red Hat|CentOS|Fedora", os) ~ "Fedora-ish",
grepl("Ubuntu|Debian", os) ~ "Debian-ish",
grepl("CoreOS|Amazon", os) ~ "Amazon-ish",
is.na(os) ~ "Unknown",
ELSE ~ "Other"
)
# standard example
case_when(
x %% 35 == 0 ~ "fizz buzz",
x %% 5 == 0 ~ "fizz",
x %% 7 == 0 ~ "buzz",
TRUE ~ as.character(x)
)
#------------------------------- ggplot colors and plots -------------------
# Use scale_fill_gradientn when you want to set a balanced divergent palette
# See colors palette
scales::show_col(colorRampPalette(RColorBrewer::brewer.pal(11,"Spectral"))(30))
scales::show_col(c("red", "grey", "black", "white", "orange", "grey90"))
# Restrict outliers to be capped at a range. In this case, -5 and 5. --> oobs = scales::squish
scale_fill_gradientn(colours = rev(RColorBrewer::brewer.pal(11, "Spectral")), limits = c(-5,5), oob = scales::squish) +
# Define your max value in an object (here called shock_dev_max)
shock_dev_max = unlist(shock_stats_county %>% summarise(max_dev = max(abs(shock_dev))))
df_spatial %>% ggplot() +
geom_sf(aes(fill = shock_dev), colour = "white", size = 0.5) +
scale_fill_gradientn(colours = RColorBrewer::brewer.pal(11, 'PiYG'),
limits = c(-1 * shock_dev_max, shock_dev_max),
labels = scales::percent) + ...
# For color brewer scale_fill_brewer and scale_colour_brewer are for categorical data
# Use scale_fill_distiller or scale_color_distiller for continuous data.
# If the aesthetics are fill = x then use former, if colour = x, then latter
# Scales -- show only major numbers on percentage scale
# add chunk below to ggplot call
scale_y_continuous(labels = scales::percent_format(accuracy = 1))
# To generate new colors
colorRampPalette(RColorBrewer::brewer.pal(11,"Spectral"))(30) %>% knitr::kable(format = "rst")
# in Atom command+D will do cursor highlighting down
# To preview palettes
palette(colorRampPalette(brewer.pal(11,"Spectral"))(30))
plot(1:30, 1:30, col = 1:30, pch = 19, cex = 5)
# Plotting reference bars
# When plotting geom_rect (reference bars), ggplot will plot a copy of each bar for each row in a dataframe
# This is incredibly annoying when you want to decrease the opacity or edit the reference bars in inkscape / AI
# To get around this problem, pipe in the first row the data frame you are plotting to the geom_rect call
# link: https://stackoverflow.com/questions/17521438/geom-rect-and-alpha-does-this-work-with-hard-coded-values;
# Example below from the Kenya Middle and Upper Arm Circumference data at the county level
muac_malnut %>%
filter(county %in% c("Turkana", "Marsabit", "Isiolo", "Samburu")) %>%
group_by(county) %>%
mutate(mean = mean(value, na.rm = TRUE)) %>%
ungroup() %>%
mutate(county_sort = fct_reorder(county, mean, .desc = TRUE)) %>%
ggplot(aes(x = date, y = value)) +
geom_ribbon(aes(ymin = 0.15, ymax = 0.4),
data = , fill = "#d6604d", alpha = 0.20) +
geom_ribbon(aes(ymin = 0, ymax = 0.15),
data = , fill = "#4393c3" , alpha = 0.20) +
geom_rect(data = muac_malnut[1, ], ymin = 0, ymax = .4,
xmin = as.Date("2009-01-01"), xmax = as.Date("2010-01-01"),
fill = "#fdfbec", alpha = 0.33) +
geom_rect(data = muac_malnut[1, ], ymin = 0, ymax = .4,
xmin = as.Date("2011-01-01"), xmax = as.Date("2012-01-01"),
fill = "#fdfbec", alpha = 0.33) +
geom_rect(data = muac_malnut[1, ], ymin = 0, ymax = .4,
xmin = as.Date("2017-01-01"), xmax = as.Date("2018-01-01"),
fill = "#", alpha = 0.33) +
geom_smooth(colour = "#", span = span, alpha = 0.5, size = 0.25) +
geom_line(colour = grey70K) +
#Sort each facet within a facet wrapped graph. Need tidytext.
fertility_plot <- gha_df$Fertility_Region %>%
mutate(Region_sort = fct_reorder(Region, `adolescent birth rate`),
reg_color = ifelse(Region == "National", '#80cdc1', grey30K)) %>%
gather("indicator", "value", `adolescent birth rate`:`demand for family planning`) %>%
mutate(indicator_sort = fct_reorder(indicator, value, .desc = TRUE),
region_sort2 = reorder_within(Region, value, indicator)) %>%
ggplot(aes(y = value, x = region_sort2, fill = reg_color)) +
coord_flip() + geom_col() +
scale_x_reordered() +
facet_wrap(~indicator_sort, scales = "free") +
scale_fill_identity() +
theme_line +
theme(panel.spacing = unit(1, "lines")) +
y_axix_pct +
labs(title = "Family planning and birth rates by region",
subtitle = "Note free scales to accomodate indicator ranges",
x = "", y = "",
caption = "Source: 2017 Multiple Indicator Cluster Survey (MICS)")
# move facets left
theme(strip.text = element_text(hjust = 0, size = 10)
# Filter a single dataframe multiple times within a function
parity_plot <- function(df, sub_filt = "Mathmatics", yearfilt = "2018-2019") {
df %>%
filter(Subject == {{sub_filt}} & year == {{yearfilt}}) %>%
filter(Subgroup != "White") %>%
#filter(option_flag == 1) %>%
mutate(school_sort = reorder_within(school_name, -op_gap, Subgroup)) %>%
{# By wrapping ggplot call in brackets we can control where the pipe flow enters (df)
# This allows us to use filters within the ggplot call
ggplot() +
geom_abline(intercept = 0, slope = 1, color = non_ats, linetype = "dotted") +
#geom_polygon(data = df_poly, aes(-x, -y), fill="#fde0ef", alpha=0.25) +
geom_point(data = dplyr::filter(., school_name != "Arlington Traditional"),
aes(y = value, x = benchmark, fill = ats_flag_color),
size = 4, shape = 21, alpha = 0.75, colour = "white") +
geom_point(data = dplyr::filter(., school_name == "Arlington Traditional"),
aes(y = value, x = benchmark, fill = ats_flag_color),
size = 4, shape = 21, alpha = 0.80, colour = "white") +
facet_wrap(~Subgroup,
labeller = labeller(groupwrap = label_wrap_gen(10))) +
coord_fixed(ratio = 1, xlim = c(40, 100), ylim = c(40, 100)) +
scale_fill_identity() +
theme_minimal() +
labs(x = "benchmark test value", y = "Subgroup test value",
title = str_c(sub_filt, " opportunity gap across subgroups (ATS in blue) for ", yearfilt),
subtitle = "Each point is a school -- points below the 45 degree line indicate an opportunity gap") +
theme(strip.text = element_text(hjust = 0))
}
}
# Creating plots in a nested data frame and writing them to a file
# Loop over plots by category, saving resulting plots in a grouped / nested dataframe
# extract the nested plots by calling the appropriate position of the nested plot
plots <-
gov %>%
group_by(Category) %>%
nest() %>%
mutate(plots = map2(data, Category,
~gov_plot(.) + labs(x = "", y = "",
title = str_c("Category ", Category, ": Governance scores for community fish refuges"),
caption = "Source: 2016 Rice Field Fishery Enhancement Project Database: Governance Scores Module")))
plots$plots[2]
map2(file.path(imagepath, paste0("Category ", plots$Category,
": Governance scores for community fish refuges.pdf")),
plots$plots,
height = 8.5,
width = 11,
dpi = 300,
ggsave)
################# Options for plots in a function ###########################
#Create a function that gives back a bar or map plot, depending on input
msme_plot <- function(df, x, option = 1) {
if (!option %in% c(1, 2)) {
stop("Select 1 (map) or 2 (graph) as option value.")
}
xvar = enquo(x)
if (option == 1) {
ggplot(df) +
geom_sf(aes(fill = !!xvar), colour = "white") +
scale_fill_viridis_c(option = "C", direction = -1, alpha = 0.90) +
theme_minimal() +
labs(caption = "GeoCenter Calculations from MSME 2016 Report",
title = (gsub("`", "", {rlang::quo_text(xvar)})))
}
else if (option == 2) {
df %>%
mutate(sortvar = fct_reorder(County, !!xvar), .desc = TRUE) %>%
ggplot(aes(x = sortvar, y = !!xvar)) +
geom_col() +
coord_flip() + theme_minimal() +
labs(caption = "GeoCenter Calculations from MSME 2016 Report",
title = (gsub("`", "", {rlang::quo_text(xvar)})))
}
}
msme_plot(msme_geo, `Table 3_1 Sampled Licensed Establishments`, option = 1)
#-------------------------------- Plot specific -----------------------------
# Setting plotting themes up front
theme_update(
axis.ticks = element_blank(),
axis.text = element_blank())
# When plotting a heatmap, you can pass the label option through the scale_X_XX part. This allow
# for formatting of percentages on the scale
... + scale_fill_viridis_c(direction = -1, alpha = 0.90, option = "A", label = percent_format(accuracy = 2)) + ...
# Change the width of the legend
... + theme(legend.position = "top",
legend.key.width = unit(2, "cm")) +
# Add captions
... + labs(caption = "text to add") +...
# Add titles to plots based on text/vars passed through tidy eval
xvar = enquo(x) ...
... labs(caption = "GeoCenter Calculations from MSME 2016 Report",
title = (gsub("`", "", {rlang::quo_text(xvar)})))
#------------------------------- listing things -------------------
# List all the functions in a package
ls(package:stringr)
# Listing everything in the workspace
mget(ls())
ls.str()
# Get a list of attached packages and paths
search()
searchpaths()
#------------------------------- Searching strings -------------------
# Use str_detect to quickly search through strings for key words
str_detect(var, "string to detect")
# For filtering (can also use for binary mutates)
df %>% filter(str_detect(var, "string"))
# Remove paratheses (from Tidy Tuesday w/ DROB - https://github.com/dgrtwo/data-screencasts/blob/master/nyc-restaurants.Rmd)
cuisine_conf_ints %>%
mutate(cuisine = str_remove(cuisine, " \\(.*"),
cuisine = fct_reorder(cuisine, estimate))
str_remove("text (with some markes here)", "\\(*")
#List the type of characters in a column
utf8::utf8_print(unique(hfr$mech_code), utf8 = FALSE)
# fix it
mutate(mech_code = gsub("(^[[:space:]]*)|([[:space:]]*$)", "", mech_code))
#------------------------------- Dates -------------------
# https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2018-10-23/movie_profit.csv
# https://github.com/dgrtwo/data-screencasts/blob/master/movie-profit.Rmd
mutate(release_date = as.Date(parse_date_time(release_date, "%m!/%d/%Y")))
#------------------------------- Counts -------------------
df %>% count(var1, var2, sort = TRUE)T
#------------------------------- BROOMING & Models -------------------
# TT snippet -- same as above.
library(broom)
cuisine_conf_ints <- by_dba %>%
add_count(cuisine) %>%
filter(n > 100) %>%
nest(-cuisine) %>%
mutate(model = map(data, ~ t.test(.$avg_score))) %>%
unnest(map(model, tidy))
# Fitting a natural spline - from Bridges Tidy Tuesday
model <- bridges %>%
mutate(good = bridge_condition == "Good") %>%
glm(y ~ ns(x, 4) + indicator_var, data = ., family = "binomial")
# Widening data and PCA -- see the widyr package - https://github.com/dgrtwo/widyr
#------------------------------- Regular Expression -------------------
# https://www.jumpingrivers.com/blog/regular-expressions-every-r-programmer-should-know/
library(stringr)
# \ (backslash) is a metacharacter, have to escape it to search for it --> "\\"
str_subset(dir(file.path(datapath)), "\\.csv")
# ^ and the $
# Use the ^ to indicate the start of line and $ to indicate the end of a line
rm(list = ls(pattern = "*_in$")) # - removing dataframes/objects that end in "_in"
# remove everything after a string
df %>% mutate(school_name = str_to_title(school_name) %>% str_remove_all(., "Elem.*"))
#------------------------------- Purrr'ing -------------------
# Split on a group, peform action across all subgroups
mtcars %>%
split(.$cyl) %>%
map(., ~ggplot(., aes(mpg, hp)) + geom_point())
# Read a batch of files in and give them names
access_files <- list.files(file.path(datapath, "RFFI_Data"), pattern = ".xlsx")
access_path <- "Data/RFFI_Data"
fish <- map(as.list(access_files), ~read_excel(file.path(access_path, .)))
names(fish) <- as.list(access_files) %>% set_names()
# ----------------------------- multi-line cursor --------------
# `control` + `option` plus up or down
# ----------------------------- System or packages loaded --------
sessionInfo()
(.packages())
# ----------------------------- Working with dates --------
# Create a date from year, month and day
flights %>% mutate(date = make_date(year, month, day))
flights %>% mutate(wday = wday(date, label = TRUE))
# Create a decade variable using the 10 * *(X %/% Y) code
economics %>% mutate(year = year(date), decade = 10 * (year %/% 10)) %>% count(year, decade) %>% print(n = Inf)
# ----------------------------- Models --------
mod <- MASS::rlm(n ~ wday * ns(date, 5), data = daily) # for natural splines
# Create a holdout and holdin data frames
holdout <- df %>% filter(id %% 5 == 0) # grab every 5th observation
holdin <- df %>% filteR(id %% 5 != 0) # everything but the 5s
# Packages that I should use more
skimr - https://github.com/ropensci/skimr
rsample - for splitting data into test/training
recipes - https://tidymodels.github.io/recipes/
parsnip - for tidy modeling (https://tidymodels.github.io/parsnip/index.html)
cartogram - https://github.com/sjewo/cartogram
patchwork - https://patchwork.data-imaginist.com/index.html
ggchicklet - https://git.rud.is/hrbrmstr/ggchicklet
DependenciesGraphs - https://github.com/datastorm-open/DependenciesGraphs (Visualize package / function dependencies)
Colormind - https://github.com/dmi3kno/colormind
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeneSetDb-methods.R
\name{addGeneSetMetadata}
\alias{addGeneSetMetadata}
\title{Add metadata at the geneset level.}
\usage{
addGeneSetMetadata(x, meta, ...)
}
\arguments{
\item{x}{a \code{GeneSetDb} object}
\item{meta}{a \code{data.frame}-like object with \code{"collection"}, \code{"name"}, and
an arbitrary amount of columns to add as metadata for the genesets.}
\item{...}{not used yet}
}
\value{
the updated \code{GeneSetDb} object \code{x}.
}
\description{
This function adds/updates columns entries in the \code{geneSets(gdb)} table.
If there already are defined meta values for the columns of \code{meta} in \code{x},
these will be updated with the values in \code{meta}.
}
\details{
TODO: should this be a setReplaceMethod, Issue #13 (?)
https://github.com/lianos/multiGSEA/issues/13
}
\examples{
gdb <- exampleGeneSetDb()
meta.info <- transform(
geneSets(gdb)[, c("collection", "name")],
someinfo = sample(c("one", "two"), nrow(gdb), replace = TRUE))
gdb <- addGeneSetMetadata(gdb, meta.info)
}
|
/man/addGeneSetMetadata.Rd
|
permissive
|
gladkia/sparrow
|
R
| false | true | 1,088 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeneSetDb-methods.R
\name{addGeneSetMetadata}
\alias{addGeneSetMetadata}
\title{Add metadata at the geneset level.}
\usage{
addGeneSetMetadata(x, meta, ...)
}
\arguments{
\item{x}{a \code{GeneSetDb} object}
\item{meta}{a \code{data.frame}-like object with \code{"collection"}, \code{"name"}, and
an arbitrary amount of columns to add as metadata for the genesets.}
\item{...}{not used yet}
}
\value{
the updated \code{GeneSetDb} object \code{x}.
}
\description{
This function adds/updates columns entries in the \code{geneSets(gdb)} table.
If there already are defined meta values for the columns of \code{meta} in \code{x},
these will be updated with the values in \code{meta}.
}
\details{
TODO: should this be a setReplaceMethod, Issue #13 (?)
https://github.com/lianos/multiGSEA/issues/13
}
\examples{
gdb <- exampleGeneSetDb()
meta.info <- transform(
geneSets(gdb)[, c("collection", "name")],
someinfo = sample(c("one", "two"), nrow(gdb), replace = TRUE))
gdb <- addGeneSetMetadata(gdb, meta.info)
}
|
library(xRing)
### Name: imRead
### Title: Load Image From a File
### Aliases: imRead
### ** Examples
if(interactive()){
file_path <- system.file("img", "AFO1046.1200dpi.png", package="xRing")
im <- imRead(file_path)
imDisplay(im)
}
|
/data/genthat_extracted_code/xRing/examples/imRead.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 244 |
r
|
library(xRing)
### Name: imRead
### Title: Load Image From a File
### Aliases: imRead
### ** Examples
if(interactive()){
file_path <- system.file("img", "AFO1046.1200dpi.png", package="xRing")
im <- imRead(file_path)
imDisplay(im)
}
|
### deprecated function that was replaced by microarraydata()
omicdata <- function(file, check = TRUE,
norm.method = c("cyclicloess", "quantile", "scale", "none"))
{
warning("omicdata() is a deprecated function that was replaced by microarraydata().
You should replace it by microarraydata(), RNAseqdata() or metabolomicdata()
depending of the type of data you handle. \n")
microarraydata(file = file, check = check, norm.method = norm.method)
}
|
/R/omicdata.R
|
no_license
|
DrRoad/DRomics
|
R
| false | false | 496 |
r
|
### deprecated function that was replaced by microarraydata()
omicdata <- function(file, check = TRUE,
norm.method = c("cyclicloess", "quantile", "scale", "none"))
{
warning("omicdata() is a deprecated function that was replaced by microarraydata().
You should replace it by microarraydata(), RNAseqdata() or metabolomicdata()
depending of the type of data you handle. \n")
microarraydata(file = file, check = check, norm.method = norm.method)
}
|
testlist <- list(points = structure(c(3.10045080631114e-140, 5.64837120621297e+125, 4.06956751982026e+95, 6.59400621480488e+135, 1.18393360738069e-272, 5.35892842952669e-173, 2.86977581121643e-15, 2.2386738721768e+270, 2.98853116521547e+143, 3.08420528117937e+276, 3.33889424831818e-239, 2.00058425814441e-113, 2.46562043224079e-85, 1.89261876993418e-113, 1.78433108967668e+169, 9.46801839280429e-51, 2.48283345537456e-176, 8.21195771569161e+288, 4.5560582271321e+117, 1.0797329973067e+245, 3.91726596706926e-283, 2.36534175024629e+188, 4.62886637564784e+149, 1.95531992383552e-132, 2.24216371882707e+243, 3.15962229513625e-133, -Inf, 1.03502208669886e-277, 1.44329558427272e+189, 2.15560984776751e+185, 1.75933361941065e-114, 8.24221549962438e-287, 6.79398327699747e+55, 7.20263011526498e+40, 3.80926860974838e-156, 1.33550472691882e+204, 2.62538996893194e+129, 6.8576940616979e-16, 1.98743400939048e+154, 1.51886024823543e-282, 9.00123031400698e+84, 3.0243884984874e+234, 1.08707866440307e+120, 2.96591522379483e-146, 1.95816798750811e-131, 1.1753306209927e-122, 1.0936207305258e-194, 6.71962574015995e-160, Inf, -Inf, 1.92199862573809e-190, 4.96534090618195e+107, 1.35722207577192e-292, 3.18497333306282e+232, 3.64103714844602e-233, 2.88131094116101e+218, 76065827.688744, Inf, 2.8816084901775e+131, 1.27133442567742e+256, Inf, 2.58069493284837e+92, 1.83131586623993e+43, 2.30802117553111e-243, 3.00755495315194e+162, 9.88725471179051e+56, 6.83873559857537e-277, 4.07538849532164e+27, 1.15617076673217e+141, 5.3595347089513e+194, 1.83289283459492e-105, 3.22121394014806e-307, 5.10252627988266e-139, 1.55281134536723e-61, 2.06418845533417e+82, 8.83202732272626e-282, 4.79072923958292e+75, 6.2030748819062e-218, 27.5113146236504, 7.46244315476878e+67, 2460.21952908724), .Dim = c(9L, 9L)), ref = structure(c(2.29157327002727e+103, 1.71090868935708e-32, 5.31232893578367e+184, 6147.55175587533, 6.93103357665744e+279, 5.92871189432898e+180), .Dim = c(6L, 1L )))
result <- do.call(GPGame:::nonDomSet,testlist)
str(result)
|
/issuestests/GPGame/inst/testfiles/nonDomSet/nonDomSet_output/log_91247b3353f90899b269af391a37340c620dd302/nonDomSet-test.R
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false | false | 2,047 |
r
|
testlist <- list(points = structure(c(3.10045080631114e-140, 5.64837120621297e+125, 4.06956751982026e+95, 6.59400621480488e+135, 1.18393360738069e-272, 5.35892842952669e-173, 2.86977581121643e-15, 2.2386738721768e+270, 2.98853116521547e+143, 3.08420528117937e+276, 3.33889424831818e-239, 2.00058425814441e-113, 2.46562043224079e-85, 1.89261876993418e-113, 1.78433108967668e+169, 9.46801839280429e-51, 2.48283345537456e-176, 8.21195771569161e+288, 4.5560582271321e+117, 1.0797329973067e+245, 3.91726596706926e-283, 2.36534175024629e+188, 4.62886637564784e+149, 1.95531992383552e-132, 2.24216371882707e+243, 3.15962229513625e-133, -Inf, 1.03502208669886e-277, 1.44329558427272e+189, 2.15560984776751e+185, 1.75933361941065e-114, 8.24221549962438e-287, 6.79398327699747e+55, 7.20263011526498e+40, 3.80926860974838e-156, 1.33550472691882e+204, 2.62538996893194e+129, 6.8576940616979e-16, 1.98743400939048e+154, 1.51886024823543e-282, 9.00123031400698e+84, 3.0243884984874e+234, 1.08707866440307e+120, 2.96591522379483e-146, 1.95816798750811e-131, 1.1753306209927e-122, 1.0936207305258e-194, 6.71962574015995e-160, Inf, -Inf, 1.92199862573809e-190, 4.96534090618195e+107, 1.35722207577192e-292, 3.18497333306282e+232, 3.64103714844602e-233, 2.88131094116101e+218, 76065827.688744, Inf, 2.8816084901775e+131, 1.27133442567742e+256, Inf, 2.58069493284837e+92, 1.83131586623993e+43, 2.30802117553111e-243, 3.00755495315194e+162, 9.88725471179051e+56, 6.83873559857537e-277, 4.07538849532164e+27, 1.15617076673217e+141, 5.3595347089513e+194, 1.83289283459492e-105, 3.22121394014806e-307, 5.10252627988266e-139, 1.55281134536723e-61, 2.06418845533417e+82, 8.83202732272626e-282, 4.79072923958292e+75, 6.2030748819062e-218, 27.5113146236504, 7.46244315476878e+67, 2460.21952908724), .Dim = c(9L, 9L)), ref = structure(c(2.29157327002727e+103, 1.71090868935708e-32, 5.31232893578367e+184, 6147.55175587533, 6.93103357665744e+279, 5.92871189432898e+180), .Dim = c(6L, 1L )))
result <- do.call(GPGame:::nonDomSet,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load.pums.R
\name{load.pums}
\alias{load.pums}
\title{Load ACS PUMS csv files in a ready-to-use form.}
\usage{
load.pums(path)
}
\arguments{
\item{path}{path to a csv file with ACS PUMS housing or population data.}
}
\value{
data frame with ACS PUMS housing or population data from a csv file
}
\description{
Load the ACS PUMS data from a csv file with the data types set correctly.
Most fields in the ACS data are simple integers, but a few are not.
See the details section for specifics.
}
\details{
Most fields in the ACS PUMS data are either integer-coded categoricals or
counts, but there are exceptions, and the exceptions vary by year and span
(1 year or 5 year data). This function loads most fields as integers and handles
the exceptions correctly. The exceptions are:
1. \code{RT}, the record type, is 'H' in housing records and 'P' in person
records. For any given file, it is constant so it is dropped.
2. \code{SERIALNO}, is loaded as a string because it overflows on
5-year data as an integer. It is only used for joining person records
to housing records so this is fine.
3. Inflation factors. In older data, this is just \code{ADJUST}.
In recent years, housing data has \code{ADJINC} and \code{ADJHSG},
and person records have just \code{ADJINC}. In any case, they are
7-digit integers equal to the inflation factor times \code{1e6}.
This function returns them as floats after multiplication by \code{1e-6}.
4. \code{NAICSP} is an occupation code. It is quasi-numeric with a few
values like "33641M1". It is returned as a character column.
5. \code{SOCP} or \code{SOCP10} and \code{SOCP12}. These are quasi-numeric
occupation codes like \code{NAICSP}. They have a few values like "4750XX".
In 1-year files and some 5-year files, \code{SOCP} is present. In other
files both \code{SOCP10} and \code{SOCP12} are present. In either case,
they are returned as character columns.
6. \code{OCCP} or \code{OCCP10} and \code{OCCP12} are occupation codes.
\code{OCCP10} and \code{OCCP12} are nearly numeric; they include two NA
values, one for not in labor force and the other indicates that the code
for that row is under the other system. They are loaded as character columns
to preserve that information. \code{OCCP} can load as numeric, but it is
loaded as character for consistency with \code{OCCP10} and \code{OCCP12}.
}
|
/man/load.pums.Rd
|
no_license
|
davidthaler/PUMSutils
|
R
| false | true | 2,417 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load.pums.R
\name{load.pums}
\alias{load.pums}
\title{Load ACS PUMS csv files in a ready-to-use form.}
\usage{
load.pums(path)
}
\arguments{
\item{path}{path to a csv file with ACS PUMS housing or population data.}
}
\value{
data frame with ACS PUMS housing or population data from a csv file
}
\description{
Load the ACS PUMS data from a csv file with the data types set correctly.
Most fields in the ACS data are simple integers, but a few are not.
See the details section for specifics.
}
\details{
Most fields in the ACS PUMS data are either integer-coded categoricals or
counts, but there are exceptions, and the exceptions vary by year and span
(1 year or 5 year data). This function loads most fields as integers and handles
the exceptions correctly. The exceptions are:
1. \code{RT}, the record type, is 'H' in housing records and 'P' in person
records. For any given file, it is constant so it is dropped.
2. \code{SERIALNO}, is loaded as a string because it overflows on
5-year data as an integer. It is only used for joining person records
to housing records so this is fine.
3. Inflation factors. In older data, this is just \code{ADJUST}.
In recent years, housing data has \code{ADJINC} and \code{ADJHSG},
and person records have just \code{ADJINC}. In any case, they are
7-digit integers equal to the inflation factor times \code{1e6}.
This function returns them as floats after multiplication by \code{1e-6}.
4. \code{NAICSP} is an occupation code. It is quasi-numeric with a few
values like "33641M1". It is returned as a character column.
5. \code{SOCP} or \code{SOCP10} and \code{SOCP12}. These are quasi-numeric
occupation codes like \code{NAICSP}. They have a few values like "4750XX".
In 1-year files and some 5-year files, \code{SOCP} is present. In other
files both \code{SOCP10} and \code{SOCP12} are present. In either case,
they are returned as character columns.
6. \code{OCCP} or \code{OCCP10} and \code{OCCP12} are occupation codes.
\code{OCCP10} and \code{OCCP12} are nearly numeric; they include two NA
values, one for not in labor force and the other indicates that the code
for that row is under the other system. They are loaded as character columns
to preserve that information. \code{OCCP} can load as numeric, but it is
loaded as character for consistency with \code{OCCP10} and \code{OCCP12}.
}
|
# server.R
library(maps)
library(mapproj)
source('helpers.R')
counties <- readRDS("data/counties.rds")
shinyServer(function(input, output) {
output$text1 <- renderText({
"You have selected this"
})
output$text2 <- renderText({
paste("you have selected", input$var)
})
output$text3 <- renderText({
paste("the range is", input$range[1], 'to', input$range[2])
})
output$map <- renderPlot({
data <- switch(input$var,
"Percent White" = counties$white,
"Percent Black" = counties$black,
"Percent Hispanic" = counties$hispanic,
"Percent Asian" = counties$asian)
color <- switch(input$var,
"Percent White" = "darkgreen",
"Percent Black" = "black",
"Percent Hispanic" = "darkorange",
"Percent Asian" = "darkviolet")
legend <- switch(input$var,
"Percent White" = "% White",
"Percent Black" = "% Black",
"Percent Hispanic" = "% Hispanic",
"Percent Asian" = "% Asian")
percent_map(var=data, color=color, legend.title=legend,
max=input$range[2], min=input$range[1] )
})
}
)
|
/server.R
|
no_license
|
abhiabhishekthakur001/myapp
|
R
| false | false | 1,283 |
r
|
# server.R
library(maps)
library(mapproj)
source('helpers.R')
counties <- readRDS("data/counties.rds")
shinyServer(function(input, output) {
output$text1 <- renderText({
"You have selected this"
})
output$text2 <- renderText({
paste("you have selected", input$var)
})
output$text3 <- renderText({
paste("the range is", input$range[1], 'to', input$range[2])
})
output$map <- renderPlot({
data <- switch(input$var,
"Percent White" = counties$white,
"Percent Black" = counties$black,
"Percent Hispanic" = counties$hispanic,
"Percent Asian" = counties$asian)
color <- switch(input$var,
"Percent White" = "darkgreen",
"Percent Black" = "black",
"Percent Hispanic" = "darkorange",
"Percent Asian" = "darkviolet")
legend <- switch(input$var,
"Percent White" = "% White",
"Percent Black" = "% Black",
"Percent Hispanic" = "% Hispanic",
"Percent Asian" = "% Asian")
percent_map(var=data, color=color, legend.title=legend,
max=input$range[2], min=input$range[1] )
})
}
)
|
# TIME TO COMPLETE: 4 minutes
# point to the secondary store
myNameNode <- "wasb://forecasts@aapocblob.blob.core.windows.net"
myPort <- 0
# Location of the data
bigDataDirRoot <- "/data"
# set compute context to spark
mySparkCluster <- RxSpark(consoleOutput=TRUE, nameNode=myNameNode, port=myPort)
rxSetComputeContext("local")
# define HDFS file system
hdfsFS <- RxHdfsFileSystem(hostName=myNameNode, port=myPort)
# specify the input file in HDFS to analyze
inputfile_pmi <- file.path(bigDataDirRoot,"QuarterlyPMI_01apr16.csv")
pmi_text <- RxTextData(file = inputfile_pmi, missingValueString = "NA", fileSystem = hdfsFS)
pmi <- rxImport(pmi_text)
inputfile_bDat <- file.path(bigDataDirRoot,"bDat_00000.csv")
bDat_text <- RxTextData(file = inputfile_bDat, missingValueString = "NA", fileSystem = hdfsFS)
bDat <- rxImport(bDat_text)
inputfile_bDatwoNAPOA <- file.path(bigDataDirRoot,"bDatwoNAPA_00000.csv")
bDatwoNAPA_text <- RxTextData(file = inputfile_bDatwoNAPOA, missingValueString = "NA", fileSystem = hdfsFS)
bDat.woNAPA <- rxImport(bDatwoNAPA_text)
require(forecastHybrid)
pmi<-pmi[!is.na(pmi$Year),]
names(pmi)<-c("Quarter","Year","US.PMI","CHINA.PMI","JAPAN.PMI","FRA.PMI","GER.PMI","ITA.PMI","UK.PMI","GLOBAL.PMI")
pmi$QuarterMod<-pmi$Quarter %% 4
pmi$QuarterMod[pmi$QuarterMod == 0]<-4
pmi$QuarterText<-as.matrix(sapply(pmi$QuarterMod,FUN=function(x){switch(x,"Q1","Q2","Q3","Q4")}))
pmi$REQUEST_YRQTR<-paste(pmi$Year,"-",pmi$QuarterText,sep="")
getForecast<-function(x,geo,napaLabel){
# Sort by quarter, just in case
x<-x[order(x$REQUEST_YRQTR),]
if(geo == "GLOBAL"){
myArimaCovar<-x$GLOBAL.PMI
#colnames(myArimaCovar)<-names(x)[c(3)]
}
if(geo == "AMERICAS"){
myArimaCovar<-x$US.PMI
#colnames(myArimaCovar)<-names(x)[c(3)]
}
if(geo == "EMEIA"){
myArimaCovar<-cbind(x$FRA.PMI,x$GER.PMI,x$ITA.PMI,x$UK.PMI)
colnames(myArimaCovar)<-names(x)[c(3:6)]
}
if(geo == "APAC"){
myArimaCovar<-cbind(x$CHINA.PMI,x$JAPAN.PMI)
colnames(myArimaCovar)<-names(x)[c(3:4)]
}
myTS<-ts(data=x$BOOKED_AMOUNT,start=c(2000,1),deltat=1/4)
modH<-try(hybridModel(myTS,a.arg=list(xreg=myArimaCovar)),silent=TRUE)
usedCovar<-TRUE
if(!is.hybridModel(modH)){
modH<-hybridModel(myTS)
usedCovar<-FALSE
}
if(usedCovar){
if(is.element(geo,c("EMEIA","APAC"))){
mod<-forecast(modH,h=2,level=c(80,90,95,99),xreg=t(matrix(myArimaCovar[nrow(myArimaCovar),],ncol(myArimaCovar),2)))
}else{
mod<-forecast(modH,h=2,level=c(80,90,95,99),xreg=as.matrix(rep(myArimaCovar[length(myArimaCovar)],2)))
}
}else{
mod<-forecast(modH,h=2,level=c(80,90,95,99))
}
# Assemble output
outDat<-x[c("REQUEST_YRQTR")]
outDat$FORECAST<-0
outDat$BOOKED_AMOUNT<-x$BOOKED_AMOUNT
outDat$LOWER80<-NA
outDat$LOWER90<-NA
outDat$LOWER95<-NA
outDat$LOWER99<-NA
outDat$UPPER80<-NA
outDat$UPPER90<-NA
outDat$UPPER95<-NA
outDat$UPPER99<-NA
# this is ugly - but it scales
temp<-capture.output(print(mod))
temp<-temp[c(2:length(temp))]
futureQuarters<-matrix("",length(temp),1)
for(i in 1:length(temp)){
tempVec<-strsplit(temp[i]," ")
futureQuarters[i]<-paste(tempVec[[1]][1],"-",tempVec[[1]][2],sep="")
}
forecastDat<-data.frame(REQUEST_YRQTR=futureQuarters)
forecastDat$BOOKED_AMOUNT<-mod$mean
forecastDat$FORECAST<-1
forecastDat$LOWER80<-mod$lower[,1]
forecastDat$LOWER90<-mod$lower[,2]
forecastDat$LOWER95<-mod$lower[,3]
forecastDat$LOWER99<-mod$lower[,4]
forecastDat$UPPER80<-mod$upper[,1]
forecastDat$UPPER90<-mod$upper[,2]
forecastDat$UPPER95<-mod$upper[,3]
forecastDat$UPPER99<-mod$upper[,4]
outDat<-rbind(outDat,forecastDat)
names(outDat)[c(3:11)]<-paste(names(outDat)[c(3:11)],".",geo,napaLabel,sep="")
return(outDat)
}
runForecasts<-function(){
# Forecast reference date (based on most internal clock recent quarter end - specify manually if desired)
curDate<-as.character(Sys.Date())
curDate.year<-substring(curDate,1,4)
curDate.month<-substring(curDate,6,7)
curDate.quarter<-ifelse(curDate.month <= 3,1,
ifelse(curDate.month <= 6,2,
ifelse(curDate.month <= 9,3,4)))
refMonthDay<-switch(curDate.quarter,"03-31","06-30","09-30","12-31")
refDate<-paste(curDate.year,"-",refMonthDay,sep="")
# Pull data once, apply to all forecasts
# sqlstat<-paste("select concat(concat(substr(request_quarter,4,4),'-'),substr(request_quarter,1,2)) request_yrqtr,geo_continent,stripe,booked_amount from apps.nidw_bookings_std_v where request_year >= 2000 and request_date <= TO_DATE('2016-03-15', 'YYYY-MM-DD') and geo_continent != 'No Geo Continent' order by request_yrqtr",sep="")
# conn<-odbcConnect("dwmrt_analysis",uid="NIBIZ_INSIDER",pwd="3tn4")
# bDat<-sqlQuery(conn,sqlstat,stringsAsFactors=FALSE)
# odbcClose(conn)
# bDat <- read.csv("bDat_00000.csv",stringsAsFactors = FALSE,sep=",")
# sqlstat<-paste("select concat(concat(substr(request_quarter,4,4),'-'),substr(request_quarter,1,2)) request_yrqtr,geo_continent,stripe,booked_amount from apps.nidw_bookings_std_v where request_year >= 2000 and request_date <= TO_DATE('2016-03-15', 'YYYY-MM-DD') and geo_continent != 'No Geo Continent' and (LARGE_COMPLEX_OPP not like 'Napa%' and BUNDLE_CUSTOMER_NAME != 'NAPABUNDLE' and GOLDEN_ORGANIZATION_NAME not like 'Apple%' and BILL_TO_MASTER_ORG_NAME != 'Apple' and BILL_TO_ORGANIZATION_NAME not like 'Apple%' and SHIP_TO_ORGANIZATION_NAME not like 'Apple%') order by request_yrqtr",sep="")
# conn<-odbcConnect("dwmrt_analysis",uid="NIBIZ_INSIDER",pwd="3tn4")
# bDat.woNAPA<-sqlQuery(conn,sqlstat,stringsAsFactors=FALSE)
# odbcClose(conn)
# bDat.woNAPA <- read.csv("bDatwoNAPA_00000.csv",stringsAsFactors = FALSE,sep=",")
# List of forecasting calls with ETL before calls here - should link to each other
# Global forecast (with NAPA)
dat<-aggregate(bDat$BOOKED_AMOUNT,by=list(REQUEST_YRQTR=bDat$REQUEST_YRQTR),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","GLOBAL.PMI")])
forecastResults<-getForecast(dat,"GLOBAL","")
# Global forecast (without NAPA)
dat<-aggregate(bDat.woNAPA$BOOKED_AMOUNT,by=list(REQUEST_YRQTR=bDat.woNAPA$REQUEST_YRQTR),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","GLOBAL.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"GLOBAL",".noNAPA"))
# Americas forecast (with NAPA)
dat<-aggregate(bDat$BOOKED_AMOUNT[bDat$GEO_CONTINENT == "Americas"],by=list(REQUEST_YRQTR=bDat$REQUEST_YRQTR[bDat$GEO_CONTINENT == "Americas"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","US.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"AMERICAS",""))
# Americas forecast (without NAPA)
dat<-aggregate(bDat.woNAPA$BOOKED_AMOUNT[bDat.woNAPA$GEO_CONTINENT == "Americas"],by=list(REQUEST_YRQTR=bDat.woNAPA$REQUEST_YRQTR[bDat.woNAPA$GEO_CONTINENT == "Americas"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","US.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"AMERICAS",".noNAPA"))
# EMEIA forecast (with NAPA)
dat<-aggregate(bDat$BOOKED_AMOUNT[bDat$GEO_CONTINENT == "EMEIA"],by=list(REQUEST_YRQTR=bDat$REQUEST_YRQTR[bDat$GEO_CONTINENT == "EMEIA"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","FRA.PMI","GER.PMI","ITA.PMI","UK.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"EMEIA",""))
# EMEIA forecast (without NAPA)
dat<-aggregate(bDat.woNAPA$BOOKED_AMOUNT[bDat.woNAPA$GEO_CONTINENT == "EMEIA"],by=list(REQUEST_YRQTR=bDat.woNAPA$REQUEST_YRQTR[bDat.woNAPA$GEO_CONTINENT == "EMEIA"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","FRA.PMI","GER.PMI","ITA.PMI","UK.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"EMEIA",".noNAPA"))
# APAC forecast (with NAPA)
dat<-aggregate(bDat$BOOKED_AMOUNT[bDat$GEO_CONTINENT == "APAC"],by=list(REQUEST_YRQTR=bDat$REQUEST_YRQTR[bDat$GEO_CONTINENT == "APAC"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","CHINA.PMI","JAPAN.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"APAC",""))
# APAC forecast (without NAPA)
dat<-aggregate(bDat.woNAPA$BOOKED_AMOUNT[bDat.woNAPA$GEO_CONTINENT == "APAC"],by=list(REQUEST_YRQTR=bDat.woNAPA$REQUEST_YRQTR[bDat.woNAPA$GEO_CONTINENT == "APAC"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","CHINA.PMI","JAPAN.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"APAC",".noNAPA"))
return(forecastResults)
}
results <- rxExec(runForecasts)
# results <- runForecasts()
View(results)
# write.csv(results, file="results.csv")
# rxHadoopCopyFromLocal("results.csv", dest = "adl://aapocdls.azuredatalakestore.net/dalton_test")
|
/Spark_WASB_WORKING.R
|
no_license
|
dqharris6/NI
|
R
| false | false | 8,920 |
r
|
# TIME TO COMPLETE: 4 minutes
# point to the secondary store
myNameNode <- "wasb://forecasts@aapocblob.blob.core.windows.net"
myPort <- 0
# Location of the data
bigDataDirRoot <- "/data"
# set compute context to spark
mySparkCluster <- RxSpark(consoleOutput=TRUE, nameNode=myNameNode, port=myPort)
rxSetComputeContext("local")
# define HDFS file system
hdfsFS <- RxHdfsFileSystem(hostName=myNameNode, port=myPort)
# specify the input file in HDFS to analyze
inputfile_pmi <- file.path(bigDataDirRoot,"QuarterlyPMI_01apr16.csv")
pmi_text <- RxTextData(file = inputfile_pmi, missingValueString = "NA", fileSystem = hdfsFS)
pmi <- rxImport(pmi_text)
inputfile_bDat <- file.path(bigDataDirRoot,"bDat_00000.csv")
bDat_text <- RxTextData(file = inputfile_bDat, missingValueString = "NA", fileSystem = hdfsFS)
bDat <- rxImport(bDat_text)
inputfile_bDatwoNAPOA <- file.path(bigDataDirRoot,"bDatwoNAPA_00000.csv")
bDatwoNAPA_text <- RxTextData(file = inputfile_bDatwoNAPOA, missingValueString = "NA", fileSystem = hdfsFS)
bDat.woNAPA <- rxImport(bDatwoNAPA_text)
require(forecastHybrid)
pmi<-pmi[!is.na(pmi$Year),]
names(pmi)<-c("Quarter","Year","US.PMI","CHINA.PMI","JAPAN.PMI","FRA.PMI","GER.PMI","ITA.PMI","UK.PMI","GLOBAL.PMI")
pmi$QuarterMod<-pmi$Quarter %% 4
pmi$QuarterMod[pmi$QuarterMod == 0]<-4
pmi$QuarterText<-as.matrix(sapply(pmi$QuarterMod,FUN=function(x){switch(x,"Q1","Q2","Q3","Q4")}))
pmi$REQUEST_YRQTR<-paste(pmi$Year,"-",pmi$QuarterText,sep="")
getForecast<-function(x,geo,napaLabel){
# Sort by quarter, just in case
x<-x[order(x$REQUEST_YRQTR),]
if(geo == "GLOBAL"){
myArimaCovar<-x$GLOBAL.PMI
#colnames(myArimaCovar)<-names(x)[c(3)]
}
if(geo == "AMERICAS"){
myArimaCovar<-x$US.PMI
#colnames(myArimaCovar)<-names(x)[c(3)]
}
if(geo == "EMEIA"){
myArimaCovar<-cbind(x$FRA.PMI,x$GER.PMI,x$ITA.PMI,x$UK.PMI)
colnames(myArimaCovar)<-names(x)[c(3:6)]
}
if(geo == "APAC"){
myArimaCovar<-cbind(x$CHINA.PMI,x$JAPAN.PMI)
colnames(myArimaCovar)<-names(x)[c(3:4)]
}
myTS<-ts(data=x$BOOKED_AMOUNT,start=c(2000,1),deltat=1/4)
modH<-try(hybridModel(myTS,a.arg=list(xreg=myArimaCovar)),silent=TRUE)
usedCovar<-TRUE
if(!is.hybridModel(modH)){
modH<-hybridModel(myTS)
usedCovar<-FALSE
}
if(usedCovar){
if(is.element(geo,c("EMEIA","APAC"))){
mod<-forecast(modH,h=2,level=c(80,90,95,99),xreg=t(matrix(myArimaCovar[nrow(myArimaCovar),],ncol(myArimaCovar),2)))
}else{
mod<-forecast(modH,h=2,level=c(80,90,95,99),xreg=as.matrix(rep(myArimaCovar[length(myArimaCovar)],2)))
}
}else{
mod<-forecast(modH,h=2,level=c(80,90,95,99))
}
# Assemble output
outDat<-x[c("REQUEST_YRQTR")]
outDat$FORECAST<-0
outDat$BOOKED_AMOUNT<-x$BOOKED_AMOUNT
outDat$LOWER80<-NA
outDat$LOWER90<-NA
outDat$LOWER95<-NA
outDat$LOWER99<-NA
outDat$UPPER80<-NA
outDat$UPPER90<-NA
outDat$UPPER95<-NA
outDat$UPPER99<-NA
# this is ugly - but it scales
temp<-capture.output(print(mod))
temp<-temp[c(2:length(temp))]
futureQuarters<-matrix("",length(temp),1)
for(i in 1:length(temp)){
tempVec<-strsplit(temp[i]," ")
futureQuarters[i]<-paste(tempVec[[1]][1],"-",tempVec[[1]][2],sep="")
}
forecastDat<-data.frame(REQUEST_YRQTR=futureQuarters)
forecastDat$BOOKED_AMOUNT<-mod$mean
forecastDat$FORECAST<-1
forecastDat$LOWER80<-mod$lower[,1]
forecastDat$LOWER90<-mod$lower[,2]
forecastDat$LOWER95<-mod$lower[,3]
forecastDat$LOWER99<-mod$lower[,4]
forecastDat$UPPER80<-mod$upper[,1]
forecastDat$UPPER90<-mod$upper[,2]
forecastDat$UPPER95<-mod$upper[,3]
forecastDat$UPPER99<-mod$upper[,4]
outDat<-rbind(outDat,forecastDat)
names(outDat)[c(3:11)]<-paste(names(outDat)[c(3:11)],".",geo,napaLabel,sep="")
return(outDat)
}
runForecasts<-function(){
# Forecast reference date (based on most internal clock recent quarter end - specify manually if desired)
curDate<-as.character(Sys.Date())
curDate.year<-substring(curDate,1,4)
curDate.month<-substring(curDate,6,7)
curDate.quarter<-ifelse(curDate.month <= 3,1,
ifelse(curDate.month <= 6,2,
ifelse(curDate.month <= 9,3,4)))
refMonthDay<-switch(curDate.quarter,"03-31","06-30","09-30","12-31")
refDate<-paste(curDate.year,"-",refMonthDay,sep="")
# Pull data once, apply to all forecasts
# sqlstat<-paste("select concat(concat(substr(request_quarter,4,4),'-'),substr(request_quarter,1,2)) request_yrqtr,geo_continent,stripe,booked_amount from apps.nidw_bookings_std_v where request_year >= 2000 and request_date <= TO_DATE('2016-03-15', 'YYYY-MM-DD') and geo_continent != 'No Geo Continent' order by request_yrqtr",sep="")
# conn<-odbcConnect("dwmrt_analysis",uid="NIBIZ_INSIDER",pwd="3tn4")
# bDat<-sqlQuery(conn,sqlstat,stringsAsFactors=FALSE)
# odbcClose(conn)
# bDat <- read.csv("bDat_00000.csv",stringsAsFactors = FALSE,sep=",")
# sqlstat<-paste("select concat(concat(substr(request_quarter,4,4),'-'),substr(request_quarter,1,2)) request_yrqtr,geo_continent,stripe,booked_amount from apps.nidw_bookings_std_v where request_year >= 2000 and request_date <= TO_DATE('2016-03-15', 'YYYY-MM-DD') and geo_continent != 'No Geo Continent' and (LARGE_COMPLEX_OPP not like 'Napa%' and BUNDLE_CUSTOMER_NAME != 'NAPABUNDLE' and GOLDEN_ORGANIZATION_NAME not like 'Apple%' and BILL_TO_MASTER_ORG_NAME != 'Apple' and BILL_TO_ORGANIZATION_NAME not like 'Apple%' and SHIP_TO_ORGANIZATION_NAME not like 'Apple%') order by request_yrqtr",sep="")
# conn<-odbcConnect("dwmrt_analysis",uid="NIBIZ_INSIDER",pwd="3tn4")
# bDat.woNAPA<-sqlQuery(conn,sqlstat,stringsAsFactors=FALSE)
# odbcClose(conn)
# bDat.woNAPA <- read.csv("bDatwoNAPA_00000.csv",stringsAsFactors = FALSE,sep=",")
# List of forecasting calls with ETL before calls here - should link to each other
# Global forecast (with NAPA)
dat<-aggregate(bDat$BOOKED_AMOUNT,by=list(REQUEST_YRQTR=bDat$REQUEST_YRQTR),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","GLOBAL.PMI")])
forecastResults<-getForecast(dat,"GLOBAL","")
# Global forecast (without NAPA)
dat<-aggregate(bDat.woNAPA$BOOKED_AMOUNT,by=list(REQUEST_YRQTR=bDat.woNAPA$REQUEST_YRQTR),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","GLOBAL.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"GLOBAL",".noNAPA"))
# Americas forecast (with NAPA)
dat<-aggregate(bDat$BOOKED_AMOUNT[bDat$GEO_CONTINENT == "Americas"],by=list(REQUEST_YRQTR=bDat$REQUEST_YRQTR[bDat$GEO_CONTINENT == "Americas"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","US.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"AMERICAS",""))
# Americas forecast (without NAPA)
dat<-aggregate(bDat.woNAPA$BOOKED_AMOUNT[bDat.woNAPA$GEO_CONTINENT == "Americas"],by=list(REQUEST_YRQTR=bDat.woNAPA$REQUEST_YRQTR[bDat.woNAPA$GEO_CONTINENT == "Americas"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","US.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"AMERICAS",".noNAPA"))
# EMEIA forecast (with NAPA)
dat<-aggregate(bDat$BOOKED_AMOUNT[bDat$GEO_CONTINENT == "EMEIA"],by=list(REQUEST_YRQTR=bDat$REQUEST_YRQTR[bDat$GEO_CONTINENT == "EMEIA"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","FRA.PMI","GER.PMI","ITA.PMI","UK.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"EMEIA",""))
# EMEIA forecast (without NAPA)
dat<-aggregate(bDat.woNAPA$BOOKED_AMOUNT[bDat.woNAPA$GEO_CONTINENT == "EMEIA"],by=list(REQUEST_YRQTR=bDat.woNAPA$REQUEST_YRQTR[bDat.woNAPA$GEO_CONTINENT == "EMEIA"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","FRA.PMI","GER.PMI","ITA.PMI","UK.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"EMEIA",".noNAPA"))
# APAC forecast (with NAPA)
dat<-aggregate(bDat$BOOKED_AMOUNT[bDat$GEO_CONTINENT == "APAC"],by=list(REQUEST_YRQTR=bDat$REQUEST_YRQTR[bDat$GEO_CONTINENT == "APAC"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","CHINA.PMI","JAPAN.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"APAC",""))
# APAC forecast (without NAPA)
dat<-aggregate(bDat.woNAPA$BOOKED_AMOUNT[bDat.woNAPA$GEO_CONTINENT == "APAC"],by=list(REQUEST_YRQTR=bDat.woNAPA$REQUEST_YRQTR[bDat.woNAPA$GEO_CONTINENT == "APAC"]),FUN=sum)
names(dat)[2]<-"BOOKED_AMOUNT"
dat<-merge(dat,pmi[c("REQUEST_YRQTR","CHINA.PMI","JAPAN.PMI")])
forecastResults<-merge(forecastResults,getForecast(dat,"APAC",".noNAPA"))
return(forecastResults)
}
results <- rxExec(runForecasts)
# results <- runForecasts()
View(results)
# write.csv(results, file="results.csv")
# rxHadoopCopyFromLocal("results.csv", dest = "adl://aapocdls.azuredatalakestore.net/dalton_test")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/etkpf_util_R.R
\name{lyapunov}
\alias{lyapunov}
\title{Lyapunov equation solver (used by get_Weps_riccati):}
\usage{
lyapunov(C, D, E, n)
}
\arguments{
\item{C}{left multiplier (A' + X1)}
\item{D}{right multiplier (A + X1)}
\item{E}{rhs (Pag + X1 X1')}
\item{n}{ensemble size}
}
\value{
X
}
\description{
CX + XD = E
in Riccati Newton's step used to solve for X2 given X1:
(A' + X1) X2 + X2 (A + X1) = (Pag + X1 X1')
}
|
/man/lyapunov.Rd
|
no_license
|
robertsy/ETKPF
|
R
| false | true | 503 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/etkpf_util_R.R
\name{lyapunov}
\alias{lyapunov}
\title{Lyapunov equation solver (used by get_Weps_riccati):}
\usage{
lyapunov(C, D, E, n)
}
\arguments{
\item{C}{left multiplier (A' + X1)}
\item{D}{right multiplier (A + X1)}
\item{E}{rhs (Pag + X1 X1')}
\item{n}{ensemble size}
}
\value{
X
}
\description{
CX + XD = E
in Riccati Newton's step used to solve for X2 given X1:
(A' + X1) X2 + X2 (A + X1) = (Pag + X1 X1')
}
|
# Analisando o resultado atraves de graficos
# ***** Esta é a versão 2.0 deste script, atualizado em 23/05/2017 *****
# ***** Esse script pode ser executado nas versões 3.3.1, 3.3.2, 3.3.3 e 3.4.0 da linguagem R *****
# ***** Recomendamos a utilização da versão 3.4.0 da linguagem R *****
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# setwd("~/Dropbox/DSA/BigDataAnalytics-R-Azure/Cap11")
# getwd()
Azure <- FALSE
if(Azure){
source("ClassTools.R")
compFrame <- maml.mapInputPort(1)
} else {
compFrame <- outFrame
}
## Usando o dplyr para filter linhas com classificação incorreta
require(dplyr)
creditTest <- cbind(creditTest, scored = scoreFrame[ ,1] )
creditTest <- creditTest %>% filter(CreditStatus != scored)
## Plot dos residuos para os niveis de cada fator
require(ggplot2)
colNames <- c("CheckingAcctStat", "Duration_f", "Purpose",
"CreditHistory", "SavingsBonds", "Employment",
"CreditAmount_f", "Employment")
lapply(colNames, function(x){
if(is.factor(creditTest[,x])) {
ggplot(creditTest, aes_string(x)) +
geom_bar() +
facet_grid(. ~ CreditStatus) +
ggtitle(paste("Numero de creditos ruim/bom por",x))}})
## Plot dos residuos condicionados nas varivaveis CreditStatus vs CheckingAcctStat
lapply(colNames, function(x){
if(is.factor(creditTest[,x]) & x != "CheckingAcctStat") {
ggplot(creditTest, aes(CheckingAcctStat)) +
geom_bar() +
facet_grid(paste(x, " ~ CreditStatus"))+
ggtitle(paste("Numero de creditos bom/ruim por CheckingAcctStat e ",x))
}})
|
/RFundamentos/Part 11/08-AvaliaModeloGrafico.R
|
no_license
|
DaniloLFaria/DataScienceAcademy
|
R
| false | false | 1,652 |
r
|
# Analisando o resultado atraves de graficos
# ***** Esta é a versão 2.0 deste script, atualizado em 23/05/2017 *****
# ***** Esse script pode ser executado nas versões 3.3.1, 3.3.2, 3.3.3 e 3.4.0 da linguagem R *****
# ***** Recomendamos a utilização da versão 3.4.0 da linguagem R *****
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# setwd("~/Dropbox/DSA/BigDataAnalytics-R-Azure/Cap11")
# getwd()
Azure <- FALSE
if(Azure){
source("ClassTools.R")
compFrame <- maml.mapInputPort(1)
} else {
compFrame <- outFrame
}
## Usando o dplyr para filter linhas com classificação incorreta
require(dplyr)
creditTest <- cbind(creditTest, scored = scoreFrame[ ,1] )
creditTest <- creditTest %>% filter(CreditStatus != scored)
## Plot dos residuos para os niveis de cada fator
require(ggplot2)
colNames <- c("CheckingAcctStat", "Duration_f", "Purpose",
"CreditHistory", "SavingsBonds", "Employment",
"CreditAmount_f", "Employment")
lapply(colNames, function(x){
if(is.factor(creditTest[,x])) {
ggplot(creditTest, aes_string(x)) +
geom_bar() +
facet_grid(. ~ CreditStatus) +
ggtitle(paste("Numero de creditos ruim/bom por",x))}})
## Plot dos residuos condicionados nas varivaveis CreditStatus vs CheckingAcctStat
lapply(colNames, function(x){
if(is.factor(creditTest[,x]) & x != "CheckingAcctStat") {
ggplot(creditTest, aes(CheckingAcctStat)) +
geom_bar() +
facet_grid(paste(x, " ~ CreditStatus"))+
ggtitle(paste("Numero de creditos bom/ruim por CheckingAcctStat e ",x))
}})
|
## Working on Machine Learning Code
# 2 January 2018 - Cat
## Based off Tabak 2018 paper - using code from https://github.com/mikeyEcology/MLWIC
# Clear workspace
rm(list=ls()) # remove everything currently held in the R memory
options(stringsAsFactors=FALSE)
graphics.off()
# Load libraries
#library(devtools)
#devtools::install_github("mikeyEcology/MLWIC")
library(reticulate)
library(tensorflow)
library(MLWIC)
setup(python_loc = "/Users/CatherineChamberlain/anaconda3/bin/python") ## takes a minute or two!
setwd("~/Documents/git/cameratrap")
## Step 2: load tensorflow -
#devtools::install_github("rstudio/tensorflow")
#library(tensorflow)
#install_tensorflow()
sess = tf$Session() ## Sometimes gives error "I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA"
# Doesn't seem to pose a problem for our uses
hello <- tf$constant('Hello, TensorFlow!')
sess$run(hello)
# Step 3: edit names in dataframe
#d<-read.csv("image_labels.csv")
#d$a<-substr(d$EK000002.JPG.0, 0, 12)
#d$b<-substr(d$EK000002.JPG.0, 14, 14)
#d<-d[,-1]
#colnames(d)<-c("a", "b")
#d$a<-as.character(d$a)
#d<-unname(d)
#write.csv(d, file="~/Documents/git/cameratrap/image_labels.csv", row.names=FALSE)
if(FALSE){
## Prepare for Training using Wild.ID classifications
tr<-read.csv("WildlifeDetections_CameraTrap.csv", header=TRUE)
trx<-subset(tr, select=c("Sampling.Event", "Raw.Name", "Genus", "Species"))
trx$imageID <- paste(trx$Sampling.Event, trx$Raw.Name, sep="_")
trx$animaltype <- paste(trx$Genus, trx$Species, sep="_")
trx$class <- as.integer(as.factor(trx$animaltype))
#specieslist <- subset(trx, select=c("animaltype", "class"))
#specieslist <- specieslist[!duplicated(specieslist),]
#write.csv(specieslist, file="listofspecies.csv", row.names=FALSE)
trx <- subset(trx, select=c("imageID", "class"))
images <- trx
colnames(trx) <- NULL
write.csv(trx, file="train_image_labels.csv", row.names=FALSE)
### Prepare for download...
images$camera <- substr(images$imageID, 0, 6)
images$camera <- ifelse(images$camera == "ATXing", substr(images$imageID, 8, 13), images$camera)
cam01A <- images[(images$camera=="CAM01A"),]
file.copy("source_file.txt", "destination_folder")
fileNames <- Sys.glob("*.csv")
train(path_prefix = "/Users/CatherineChamberlain/Documents/git/cameratrap/images", # this is the absolute path to the images.
data_info = "/Users/CatherineChamberlain/Documents/git/cameratrap/train_image_labels.csv", # this is the location of the csv containing image information. It has Unix linebreaks and no headers.
model_dir = "/Users/CatherineChamberlain/Documents/git/cameratrap", # assuming this is where you stored the L1 folder in Step 3 of the instructions: github.com/mikeyEcology/MLWIC/blob/master/README
python_loc = "/usr/local/bin/", # the location of Python on your computer.
num_classes = 3, # this is the number of species from our model. When you train your own model, you will replace this with the number of species/groups of species in your dataset
log_dir_train = "/Users/CatherineChamberlain/Documents/git/cameratrap/training_output" # this will be a folder that contains the trained model (call it whatever you want). You will specify this folder as the "log_dir" when you classify images using this trained model. For example, the log_dir for the model included in this package is called "USDA182"
)
}
classify(path_prefix = "/Users/CatherineChamberlain/Documents/git/cameratrap/images", # this is the absolute path to the images.
data_info = "/Users/CatherineChamberlain/Documents/git/cameratrap/image_labels.csv", # this is the location of the csv containing image information. It has Unix linebreaks and no headers.
model_dir = "/Users/CatherineChamberlain/Documents/git/cameratrap", # assuming this is where you stored the L1 folder in Step 3 of the instructions: github.com/mikeyEcology/MLWIC/blob/master/README
python_loc = "/usr/local/bin/", # the location of Python on your computer.
save_predictions = "model_predictions.txt" # this is the default and you should use it unless you have reason otherwise.
)
make_output(output_location = "~/Documents/git/cameratrap", # the output csv will be stored on my dekstop
output_name = "zamba_results.csv", # the name of the csv I want to create with my output
model_dir = "~/Documents/git/cameratrap", # the location where I stored the L1 folder
saved_predictions = "output_class_names.txt" # the same name that I used for save_predictions in the classify function (if I didn't use default, I would need to change this).
)
######## Now check out example_results.cvs vs wild.id results
exam <- read.csv("example_results.csv", header=TRUE)
exam$Raw.Name <- substr(exam$fileName, 61, 72)
idnames <- read.csv("classID_names.csv", header=TRUE)
exam$Photo.Type.Sp <- NA
for(i in c(1:nrow(exam))){
for(j in c(1:nrow(idnames)))
exam$Photo.Type.Sp[i] <- ifelse(exam$guess1[i] == idnames$Class.ID[j], idnames$Group.name[j], exam$Photo.Type.Sp[i])
}
examx <- subset(exam, select = c("Raw.Name", "Photo.Type.Sp", "guess1"))
verify <- full_join(examx, trx)
animals <- c(0:10, 12:24, 25)
verify$hit <- NA
verify$hit <- ifelse(verify$Photo.Type == "Misfired" & verify$Photo.Type.Sp == "Human",
1, verify$hit)
verify$hit <- ifelse(verify$Photo.Type == "Blank" & verify$Photo.Type.Sp == "Empty",
1, verify$hit)
verify$hit <- ifelse(verify$Photo.Type == "Animal" & verify$guess1 %in% animals,
1, verify$hit)
verify$hit <- ifelse(is.na(verify$hit), 0, verify$hit)
accuracy = length(verify$hit[(verify$hit==1)]) / length(verify$hit) # 71.8% accuracy
misses <- subset(verify, verify$hit == 0)
mis.humans <- subset(misses, misses$Photo.Type=="Misfired")
photos <- unique(mis.humans$Raw.Name)
exam.humans <- exam[(exam$Raw.Name %in% photos),]
|
/Processing.R
|
no_license
|
cchambe12/cameratrap
|
R
| false | false | 5,994 |
r
|
## Working on Machine Learning Code
# 2 January 2018 - Cat
## Based off Tabak 2018 paper - using code from https://github.com/mikeyEcology/MLWIC
# Clear workspace
rm(list=ls()) # remove everything currently held in the R memory
options(stringsAsFactors=FALSE)
graphics.off()
# Load libraries
#library(devtools)
#devtools::install_github("mikeyEcology/MLWIC")
library(reticulate)
library(tensorflow)
library(MLWIC)
setup(python_loc = "/Users/CatherineChamberlain/anaconda3/bin/python") ## takes a minute or two!
setwd("~/Documents/git/cameratrap")
## Step 2: load tensorflow -
#devtools::install_github("rstudio/tensorflow")
#library(tensorflow)
#install_tensorflow()
sess = tf$Session() ## Sometimes gives error "I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA"
# Doesn't seem to pose a problem for our uses
hello <- tf$constant('Hello, TensorFlow!')
sess$run(hello)
# Step 3: edit names in dataframe
#d<-read.csv("image_labels.csv")
#d$a<-substr(d$EK000002.JPG.0, 0, 12)
#d$b<-substr(d$EK000002.JPG.0, 14, 14)
#d<-d[,-1]
#colnames(d)<-c("a", "b")
#d$a<-as.character(d$a)
#d<-unname(d)
#write.csv(d, file="~/Documents/git/cameratrap/image_labels.csv", row.names=FALSE)
if(FALSE){
## Prepare for Training using Wild.ID classifications
tr<-read.csv("WildlifeDetections_CameraTrap.csv", header=TRUE)
trx<-subset(tr, select=c("Sampling.Event", "Raw.Name", "Genus", "Species"))
trx$imageID <- paste(trx$Sampling.Event, trx$Raw.Name, sep="_")
trx$animaltype <- paste(trx$Genus, trx$Species, sep="_")
trx$class <- as.integer(as.factor(trx$animaltype))
#specieslist <- subset(trx, select=c("animaltype", "class"))
#specieslist <- specieslist[!duplicated(specieslist),]
#write.csv(specieslist, file="listofspecies.csv", row.names=FALSE)
trx <- subset(trx, select=c("imageID", "class"))
images <- trx
colnames(trx) <- NULL
write.csv(trx, file="train_image_labels.csv", row.names=FALSE)
### Prepare for download...
images$camera <- substr(images$imageID, 0, 6)
images$camera <- ifelse(images$camera == "ATXing", substr(images$imageID, 8, 13), images$camera)
cam01A <- images[(images$camera=="CAM01A"),]
file.copy("source_file.txt", "destination_folder")
fileNames <- Sys.glob("*.csv")
train(path_prefix = "/Users/CatherineChamberlain/Documents/git/cameratrap/images", # this is the absolute path to the images.
data_info = "/Users/CatherineChamberlain/Documents/git/cameratrap/train_image_labels.csv", # this is the location of the csv containing image information. It has Unix linebreaks and no headers.
model_dir = "/Users/CatherineChamberlain/Documents/git/cameratrap", # assuming this is where you stored the L1 folder in Step 3 of the instructions: github.com/mikeyEcology/MLWIC/blob/master/README
python_loc = "/usr/local/bin/", # the location of Python on your computer.
num_classes = 3, # this is the number of species from our model. When you train your own model, you will replace this with the number of species/groups of species in your dataset
log_dir_train = "/Users/CatherineChamberlain/Documents/git/cameratrap/training_output" # this will be a folder that contains the trained model (call it whatever you want). You will specify this folder as the "log_dir" when you classify images using this trained model. For example, the log_dir for the model included in this package is called "USDA182"
)
}
classify(path_prefix = "/Users/CatherineChamberlain/Documents/git/cameratrap/images", # this is the absolute path to the images.
data_info = "/Users/CatherineChamberlain/Documents/git/cameratrap/image_labels.csv", # this is the location of the csv containing image information. It has Unix linebreaks and no headers.
model_dir = "/Users/CatherineChamberlain/Documents/git/cameratrap", # assuming this is where you stored the L1 folder in Step 3 of the instructions: github.com/mikeyEcology/MLWIC/blob/master/README
python_loc = "/usr/local/bin/", # the location of Python on your computer.
save_predictions = "model_predictions.txt" # this is the default and you should use it unless you have reason otherwise.
)
make_output(output_location = "~/Documents/git/cameratrap", # the output csv will be stored on my dekstop
output_name = "zamba_results.csv", # the name of the csv I want to create with my output
model_dir = "~/Documents/git/cameratrap", # the location where I stored the L1 folder
saved_predictions = "output_class_names.txt" # the same name that I used for save_predictions in the classify function (if I didn't use default, I would need to change this).
)
######## Now check out example_results.cvs vs wild.id results
exam <- read.csv("example_results.csv", header=TRUE)
exam$Raw.Name <- substr(exam$fileName, 61, 72)
idnames <- read.csv("classID_names.csv", header=TRUE)
exam$Photo.Type.Sp <- NA
for(i in c(1:nrow(exam))){
for(j in c(1:nrow(idnames)))
exam$Photo.Type.Sp[i] <- ifelse(exam$guess1[i] == idnames$Class.ID[j], idnames$Group.name[j], exam$Photo.Type.Sp[i])
}
examx <- subset(exam, select = c("Raw.Name", "Photo.Type.Sp", "guess1"))
verify <- full_join(examx, trx)
animals <- c(0:10, 12:24, 25)
verify$hit <- NA
verify$hit <- ifelse(verify$Photo.Type == "Misfired" & verify$Photo.Type.Sp == "Human",
1, verify$hit)
verify$hit <- ifelse(verify$Photo.Type == "Blank" & verify$Photo.Type.Sp == "Empty",
1, verify$hit)
verify$hit <- ifelse(verify$Photo.Type == "Animal" & verify$guess1 %in% animals,
1, verify$hit)
verify$hit <- ifelse(is.na(verify$hit), 0, verify$hit)
accuracy = length(verify$hit[(verify$hit==1)]) / length(verify$hit) # 71.8% accuracy
misses <- subset(verify, verify$hit == 0)
mis.humans <- subset(misses, misses$Photo.Type=="Misfired")
photos <- unique(mis.humans$Raw.Name)
exam.humans <- exam[(exam$Raw.Name %in% photos),]
|
estimate.swept.area = function( gsi=NULL, x=NULL, getnames=FALSE, threshold.cv=10 ){
if (getnames) return( c("sweptarea.mean", "depth.mean", "depth.sd", "wingspread.mean", "wingspread.sd" ) )
gsi$sweptarea.mean = NA
gsi$depth.mean = NA
gsi$depth.sd = NA
gsi$wingspread.mean = NA
gsi$wingspread.sd = NA
# debug
if (FALSE){
gsi = gs[gii,]
x= nm[ii,]
}
x = x[order( x$timestamp ) ,]
bc = which( x$timestamp >=gsi$bc0.datetime & x$timestamp <= gsi$bc1.datetime )
x = x[bc,]
##--------------------------------
# timestamps have frequencies higher than 1 sec .. duplciates are created and this can pose a problem
x$ts = difftime( x$timestamp, min(x$timestamp), units="secs" )
x$lon.sm = NA # interpolated locations
x$lat.sm = NA # interpolated locations
x$time.increment = NA
ndat = nrow(x)
if ( FALSE ) {
plot (latitude~longitude, data=x, pch=20, cex=.1)
plot (depth~timestamp, data=x, pch=20, cex=.1)
plot (depth~ts, data=x, pch=20, cex=.1)
plot (wingspread~ts, data=x, pch=20, cex=.1)
plot (doorspread~ts, data=x, pch=20, cex=.1)
plot (doorspread~ts, data=x[x$door.and.wing.reliable,], pch=20, cex=.2, col="green")
}
mean.velocity.m.per.sec = gsi$speed * 1.852 * 1000 / 3600
x$distance = as.numeric( x$ts * mean.velocity.m.per.sec )
npos = sqrt( length( unique( x$longitude)) ^2 + length(unique(x$latitude))^2)
# ------------
# clean up distance /track
x$distance.sm = NA
if (npos > 30) {
# interpolated.using.velocity" .. for older data with poor GPS resolution
# use ship velocity and distance of tow estimated on board to compute incremental distance, assuming a straight line tow
nn = abs( diff( x$ts ) )
dd = median( nn[nn>0], na.rm=TRUE )
x$t = jitter( x$t, amount=dd / 20) # add noise as inla seems unhappy with duplicates in x?
uu = smooth.spline( x=x$ts, y=x$longitude, keep.data=FALSE)
x$lon.sm = uu$y
vv = smooth.spline( x=x$ts, y=x$latitude, keep.data=FALSE)
x$lat.sm = vv$y
pos = c("lon.sm", "lat.sm")
dh = rep(0, ndat-1)
for( j in 1:(ndat-1) ) dh[j] = geodist( point=x[j,pos], locations=x[j+1,pos], method="vincenty" ) * 1000 # m .. slower but high res
# dh = zapsmall( dh, 1e-9 )
x$distance.sm = c( 0, cumsum( dh ) )
}
# ------------
# clean up doorspread
doorspread.median = median(x$doorspread, na.rm=T)
doorspread.sd = sd(x$doorspread, na.rm=T)
threshold.cv = 0.5 ## ?? good value ??
x$tsv = as.numeric( x$ts )
ii = interpolate.xy.robust( xy=x[,c("tsv", "doorspread" )], method="loess" )
x$doorspread.sm =
if ( doorspread.sd / doorspread.median > threshold.cv ) {
SA.door = doorspread.median * max( x$distance.sm, na.rm=TRUE )
} else {
# piece-wise integration here.
len.dist = diff ( x$distance.sm )
len.door =
artial.area = delta.distance * mean.doorspreads
#out$surfacearea = sum( partial.area ) # km^2
#out$surfacearea = abs( out$surfacearea )
}
# ------------
# wingspread .. repeat as above
wingspread.median = median(x$wingspread, na.rm=T)
wingspread.sd = sd(x$wingspread, na.rm=T)
if ( wingspread.sd / wingspread.median > threshold.cv ) {
if (all( !is.finite(x$distance.sm)) ) {
SA.door = wingspread.median * max( x$distance.sm, na.rm=TRUE )
} else {
SA.door = wingspread.median * max( x$distance, na.rm=TRUE )
}
} else {
# piece-wise integration here.
#partial.area = delta.distance * mean.doorspreads
#out$surfacearea = sum( partial.area ) # km^2
#out$surfacearea = abs( out$surfacearea )
}
return( gsi)
}
|
/groundfish/src/_Rfunctions/net_mensuration/estimate.swept.area.r
|
no_license
|
jgmunden/ecomod
|
R
| false | false | 3,821 |
r
|
estimate.swept.area = function( gsi=NULL, x=NULL, getnames=FALSE, threshold.cv=10 ){
if (getnames) return( c("sweptarea.mean", "depth.mean", "depth.sd", "wingspread.mean", "wingspread.sd" ) )
gsi$sweptarea.mean = NA
gsi$depth.mean = NA
gsi$depth.sd = NA
gsi$wingspread.mean = NA
gsi$wingspread.sd = NA
# debug
if (FALSE){
gsi = gs[gii,]
x= nm[ii,]
}
x = x[order( x$timestamp ) ,]
bc = which( x$timestamp >=gsi$bc0.datetime & x$timestamp <= gsi$bc1.datetime )
x = x[bc,]
##--------------------------------
# timestamps have frequencies higher than 1 sec .. duplciates are created and this can pose a problem
x$ts = difftime( x$timestamp, min(x$timestamp), units="secs" )
x$lon.sm = NA # interpolated locations
x$lat.sm = NA # interpolated locations
x$time.increment = NA
ndat = nrow(x)
if ( FALSE ) {
plot (latitude~longitude, data=x, pch=20, cex=.1)
plot (depth~timestamp, data=x, pch=20, cex=.1)
plot (depth~ts, data=x, pch=20, cex=.1)
plot (wingspread~ts, data=x, pch=20, cex=.1)
plot (doorspread~ts, data=x, pch=20, cex=.1)
plot (doorspread~ts, data=x[x$door.and.wing.reliable,], pch=20, cex=.2, col="green")
}
mean.velocity.m.per.sec = gsi$speed * 1.852 * 1000 / 3600
x$distance = as.numeric( x$ts * mean.velocity.m.per.sec )
npos = sqrt( length( unique( x$longitude)) ^2 + length(unique(x$latitude))^2)
# ------------
# clean up distance /track
x$distance.sm = NA
if (npos > 30) {
# interpolated.using.velocity" .. for older data with poor GPS resolution
# use ship velocity and distance of tow estimated on board to compute incremental distance, assuming a straight line tow
nn = abs( diff( x$ts ) )
dd = median( nn[nn>0], na.rm=TRUE )
x$t = jitter( x$t, amount=dd / 20) # add noise as inla seems unhappy with duplicates in x?
uu = smooth.spline( x=x$ts, y=x$longitude, keep.data=FALSE)
x$lon.sm = uu$y
vv = smooth.spline( x=x$ts, y=x$latitude, keep.data=FALSE)
x$lat.sm = vv$y
pos = c("lon.sm", "lat.sm")
dh = rep(0, ndat-1)
for( j in 1:(ndat-1) ) dh[j] = geodist( point=x[j,pos], locations=x[j+1,pos], method="vincenty" ) * 1000 # m .. slower but high res
# dh = zapsmall( dh, 1e-9 )
x$distance.sm = c( 0, cumsum( dh ) )
}
# ------------
# clean up doorspread
doorspread.median = median(x$doorspread, na.rm=T)
doorspread.sd = sd(x$doorspread, na.rm=T)
threshold.cv = 0.5 ## ?? good value ??
x$tsv = as.numeric( x$ts )
ii = interpolate.xy.robust( xy=x[,c("tsv", "doorspread" )], method="loess" )
x$doorspread.sm =
if ( doorspread.sd / doorspread.median > threshold.cv ) {
SA.door = doorspread.median * max( x$distance.sm, na.rm=TRUE )
} else {
# piece-wise integration here.
len.dist = diff ( x$distance.sm )
len.door =
artial.area = delta.distance * mean.doorspreads
#out$surfacearea = sum( partial.area ) # km^2
#out$surfacearea = abs( out$surfacearea )
}
# ------------
# wingspread .. repeat as above
wingspread.median = median(x$wingspread, na.rm=T)
wingspread.sd = sd(x$wingspread, na.rm=T)
if ( wingspread.sd / wingspread.median > threshold.cv ) {
if (all( !is.finite(x$distance.sm)) ) {
SA.door = wingspread.median * max( x$distance.sm, na.rm=TRUE )
} else {
SA.door = wingspread.median * max( x$distance, na.rm=TRUE )
}
} else {
# piece-wise integration here.
#partial.area = delta.distance * mean.doorspreads
#out$surfacearea = sum( partial.area ) # km^2
#out$surfacearea = abs( out$surfacearea )
}
return( gsi)
}
|
#ConditionalProbability
#SivaguruB
- Class: text
Output: "Conditional Probability. (Slides for this and other Data Science courses may be found at github https://github.com/DataScienceSpecialization/courses/. If you care to use them, they must be downloaded as a zip file and viewed locally. This lesson corresponds to 06_Statistical_Inference/03_Conditional_Probability.)"
- Class: text
Output: In this lesson, as the name suggests, we'll discuss conditional probability.
- Class: mult_question
Output: If you were given a fair die and asked what the probability of rolling a 3 is, what would you reply?
AnswerChoices: 1/6; 1/2; 1/3; 1/4; 1
CorrectAnswer: 1/6
AnswerTests: omnitest(correctVal='1/6')
Hint: There are 6 possible outcomes and you want to know the probability of 1 of them.
- Class: mult_question
Output: Suppose the person who gave you the dice rolled it behind your back and told you the roll was odd. Now what is the probability that the roll was a 3?
AnswerChoices: 1/6; 1/2; 1/3; 1/4; 1
CorrectAnswer: 1/3
AnswerTests: omnitest(correctVal='1/3')
Hint: Given that there are 3 odd numbers on the die your possibilities have been reduced to 3 and you want to know the probability of 1 of them.
- Class: text
Output: The probability of this second event is conditional on this new information, so the probability of rolling a 3 is now one third.
- Class: text
Output: We represent the conditional probability of an event A given that B has occurred with the notation P(A|B). More specifically, we define the conditional probability of event A, given that B has occurred with the following.
- Class: text
Output: P(A|B) = P(A & B)/ P(B) . P(A|B) is the probability that BOTH A and B occur divided by the probability that B occurs.
- Class: mult_question
Output: Back to our dice example. Which of the following expressions represents P(A&B), where A is the event of rolling a 3 and B is the event of the roll being odd?
AnswerChoices: 1/6; 1/2; 1/3; 1/4; 1
CorrectAnswer: 1/6
AnswerTests: omnitest(correctVal='1/6')
Hint: Here A is a subset of B so the probability of both A AND B happening is the probability of A happening.
- Class: mult_question
Output: Continuing with the same dice example. Which of the following expressions represents P(A&B)/P(B), where A is the event of rolling a 3 and B is the event of the roll being odd?
AnswerChoices: (1/6)/(1/2); (1/2)/(1/6); (1/3)/(1/2); 1/6
CorrectAnswer: (1/6)/(1/2)
AnswerTests: omnitest(correctVal='(1/6)/(1/2)')
Hint: Here A is a subset of B so the probability of both A AND B happening is the probability of A happening. The probability of B is the reciprocal of the number of odd numbers between 1 and 6 (inclusive).
- Class: text
Output: From the definition of P(A|B), we can write P(A&B) = P(A|B) * P(B), right? Let's use this to express P(B|A).
- Class: text
Output: P(B|A) = P(B&A)/P(A) = P(A|B) * P(B)/P(A). This is a simple form of Bayes' Rule which relates the two conditional probabilities.
- Class: text
Output: Suppose we don't know P(A) itself, but only know its conditional probabilities, that is, the probability that it occurs if B occurs and the probability that it occurs if B doesn't occur. These are P(A|B) and P(A|~B), respectively. We use ~B to represent 'not B' or 'B complement'.
- Class: text
Output: We can then express P(A) = P(A|B) * P(B) + P(A|~B) * P(~B) and substitute this is into the denominator of Bayes' Formula.
- Class: text
Output: P(B|A) = P(A|B) * P(B) / ( P(A|B) * P(B) + P(A|~B) * P(~B) )
- Class: text
Output: Bayes' Rule has applicability to medical diagnostic tests. We'll now discuss the example of the HIV test from the slides.
- Class: text
Output: Suppose we know the accuracy rates of the test for both the positive case (positive result when the patient has HIV) and negative (negative test result when the patient doesn't have HIV). These are referred to as test sensitivity and specificity, respectively.
- Class: mult_question
Output: Let 'D' be the event that the patient has HIV, and let '+' indicate a positive test result and '-' a negative. What information do we know? Recall that we know the accuracy rates of the HIV test.
AnswerChoices: P(+|D) and P(-|~D); P(+|~D) and P(-|~D); P(+|~D) and P(-|D); P(+|D) and P(-|D)
CorrectAnswer: P(+|D) and P(-|~D)
AnswerTests: omnitest(correctVal='P(+|D) and P(-|~D)')
Hint: The clue here is accuracy. The test is positive when the patient has the disease and negative when he doesn't.
- Class: mult_question
Output: Suppose a person gets a positive test result and comes from a population with a HIV prevalence rate of .001. We'd like to know the probability that he really has HIV. Which of the following represents this?
AnswerChoices: P(+|D); P(D|+);P(~D|+); P(D|-)
CorrectAnswer: P(D|+)
AnswerTests: omnitest(correctVal='P(D|+)')
Hint: We've already been given the information that the test was positive '+'. We want to know whether D is present given the positive test result.
- Class: text
Output: By Bayes' Formula, P(D|+) = P(+|D) * P(D) / ( P(+|D) * P(D) + P(+|~D) * P(~D) )
- Class: text
Output: We can use the prevalence of HIV in the patient's population as the value for P(D). Note that since P(~D)=1-P(D) and P(+|~D) = 1-P(-|~D) we can calculate P(D|+). In other words, we know values for all the terms on the right side of the equation. Let's do it!
- Class: cmd_question
Output: Disease prevalence is .001. Test sensitivity (+ result with disease) is 99.7% and specificity (- result without disease) is 98.5%. First compute the numerator, P(+|D)*P(D). (This is also part of the denominator.)
CorrectAnswer: .997*.001
AnswerTests: equiv_val(0.000997)
Hint: Multiply the test sensitivity by the prevalence.
- Class: cmd_question
Output: Now solve for the remainder of the denominator, P(+|~D)*P(~D).
CorrectAnswer: (1-.985)*(1-.001)
AnswerTests: equiv_val(.014985)
Hint: Multiply the complement of test specificity by the complement of prevalence.
- Class: cmd_question
Output: Now put the pieces together to compute the probability that the patient has the disease given his positive test result, P(D|+). Plug your last two answers into the formula P(+|D) * P(D) / ( P(+|D) * P(D) + P(+|~D) * P(~D) ) to compute P(D|+).
CorrectAnswer: .000997/(.000997+.014985)
AnswerTests: equiv_val(.06238268)
Hint: Divide (.997*.001) by (.997*.001 + .015*.999)
- Class: text
Output: So the patient has a 6% chance of having HIV given this positive test result. The expression P(D|+) is called the positive predictive value. Similarly, P(~D|-), is called the negative predictive value, the probability that a patient does not have the disease given a negative test result.
- Class: mult_question
Output: The diagnostic likelihood ratio of a positive test, DLR_+, is the ratio of the two + conditional probabilities, one given the presence of disease and the other given the absence. Specifically, DLR_+ = P(+|D) / P(+|~D). Similarly, the DLR_- is defined as a ratio. Which of the following do you think represents the DLR_-?
AnswerChoices: P(-|D) / P(-|~D); P(+|~D) / P(-|D);P(-|D) / P(+|~D); I haven't a clue.
CorrectAnswer: P(-|D) / P(-|~D)
AnswerTests: omnitest(correctVal='P(-|D) / P(-|~D)')
Hint: The signs of the test in both the numerator and denominator have to agree as they did for the DLR_+.
- Class: text
Output: Recall that P(+|D) and P(-|~D), (test sensitivity and specificity respectively) are accuracy rates of a diagnostic test for the two possible results. They should be close to 1 because no one would take an inaccurate test, right? Since DLR_+ = P(+|D) / P(+|~D) we recognize the numerator as test sensitivity and the denominator as the complement of test specificity.
- Class: mult_question
Output: Since the numerator is close to 1 and the denominator is close to 0 do you expect DLR_+ to be large or small?
AnswerChoices: Large; Small; I haven't a clue.
CorrectAnswer: Large
AnswerTests: omnitest(correctVal='Large')
Hint: What happens when you divide a large number by a much smaller one?
- Class: mult_question
Output: Now recall that DLR_- = P(-|D) / P(-|~D). Here the numerator is the complement of sensitivity and the denominator is specificity. From the arithmetic and what you know about accuracy tests, do you expect DLR_- to be large or small?
AnswerChoices: Large; Small; I haven't a clue.
CorrectAnswer: Small
AnswerTests: omnitest(correctVal='Small')
Hint: What happens when you divide by small number by a larger one?
- Class: text
Output: Now a little more about likelihood ratios. Recall Bayes Formula. P(D|+) = P(+|D) * P(D) / ( P(+|D) * P(D) + P(+|~D) * P(~D) ) and notice that if we replace all occurrences of 'D' with '~D', the denominator doesn't change. This means that if we formed a ratio of P(D|+) to P(~D|+) we'd get a much simpler expression (since the complicated denominators would cancel each other out). Like this....
- Class: text
Output: P(D|+) / P(~D|+) = P(+|D) * P(D) / (P(+|~D) * P(~D)) = P(+|D)/P(+|~D) * P(D)/P(~D).
- Class: mult_question
Output: The left side of the equation represents the post-test odds of disease given a positive test result. The equation says that the post-test odds of disease equals the pre-test odds of disease (that is, P(D)/P(~D) ) times
AnswerChoices: the DLR_+; the DLR_-; I haven't a clue.
CorrectAnswer: the DLR_+
AnswerTests: omnitest(correctVal='the DLR_+')
Hint: Do you recognize the expression P(+|D) / P(+|~D)? The '+' signs are a big clue.
- Class: text
Output: In other words, a DLR_+ value equal to N indicates that the hypothesis of disease is N times more supported by the data than the hypothesis of no disease.
- Class: text
Output: Taking the formula above and replacing the '+' signs with '-' yields a formula with the DLR_-. Specifically, P(D|-) / P(~D|-) = P(-|D) / P(-|~D) * P(D)/P(~D). As with the positive case, this relates the odds of disease post-test, P(D|-) / P(~D|-), to those of disease pre-test, P(D)/P(~D).
- Class: mult_question
Output: The equation P(D|-) / P(~D|-) = P(-|D) / P(-|~D) * P(D)/P(~D) says what about the post-test odds of disease relative to the pre-test odds of disease given negative test results?
AnswerChoices: post-test odds are greater than pre-test odds; post-test odds are less than pre-test odds; I haven't a clue.
CorrectAnswer: the DLR_+
AnswerTests: omnitest(correctVal='post-test odds are less than pre-test odds')
Hint: Remember that we argued (hopefully convincingly) that DLR_- is small (less than 1). Post-test odds = Pre-test odds * DLR_- so post-test odds are a fraction of the pre-test odds.
- Class: text
Output: Let's cover some basics now.
- Class: text
Output: Two events, A and B, are independent if they have no effect on each other. Formally, P(A&B) = P(A)*P(B). It's easy to see that if A and B are independent, then P(A|B)=P(A). The definition is similar for random variables X and Y.
- Class: mult_question
Output: We've seen examples of independence in our previous probability lessons. Let's review a little. What's the probability of rolling a '6' twice in a row using a fair die?
AnswerChoices: 1/6; 2/6; 1/36; 1/2
CorrectAnswer: 1/36
AnswerTests: omnitest(correctVal='1/36')
Hint: Square the probability of rolling a single '6' since the two rolls are independent of one another.
- Class: mult_question
Output: You're given a fair die and asked to roll it twice. What's the probability that the second roll of the die matches the first?
AnswerChoices: 1/6; 2/6; 1/36; 1/2
CorrectAnswer: 1/6
AnswerTests: omnitest(correctVal='1/6')
Hint: Now the events aren't independent. You don't care what the first roll is so that's a probability 1 event. The second roll just has to match the first, so that's a 1/6 event.
- Class: mult_question
Output: If the chance of developing a disease with a genetic or environmental component is p, is the chance of both you and your sibling developing that disease p*p?
AnswerChoices: Yes; No
CorrectAnswer: No
AnswerTests: omnitest(correctVal='No')
Hint: The events aren't independent since genetic or environmental factors likely will affect the outcome.
- Class: text
Output: We'll conclude with iid. Random variables are said to be iid if they are independent and identically distributed. By independent we mean "statistically unrelated from one another". Identically distributed means that "all have been drawn from the same population distribution".
- Class: text
Output: Random variables which are iid are the default model for random samples and many of the important theories of statistics assume that variables are iid. We'll usually assume our samples are random and variables are iid.
- Class: text
Output: Congrats! You've concluded this lesson on conditional probability. We hope you liked it unconditionally.
|
/Swirl/ConditionalProbability.R
|
no_license
|
SivaguruB/Coursera-Statistical-Inference
|
R
| false | false | 12,984 |
r
|
#ConditionalProbability
#SivaguruB
- Class: text
Output: "Conditional Probability. (Slides for this and other Data Science courses may be found at github https://github.com/DataScienceSpecialization/courses/. If you care to use them, they must be downloaded as a zip file and viewed locally. This lesson corresponds to 06_Statistical_Inference/03_Conditional_Probability.)"
- Class: text
Output: In this lesson, as the name suggests, we'll discuss conditional probability.
- Class: mult_question
Output: If you were given a fair die and asked what the probability of rolling a 3 is, what would you reply?
AnswerChoices: 1/6; 1/2; 1/3; 1/4; 1
CorrectAnswer: 1/6
AnswerTests: omnitest(correctVal='1/6')
Hint: There are 6 possible outcomes and you want to know the probability of 1 of them.
- Class: mult_question
Output: Suppose the person who gave you the dice rolled it behind your back and told you the roll was odd. Now what is the probability that the roll was a 3?
AnswerChoices: 1/6; 1/2; 1/3; 1/4; 1
CorrectAnswer: 1/3
AnswerTests: omnitest(correctVal='1/3')
Hint: Given that there are 3 odd numbers on the die your possibilities have been reduced to 3 and you want to know the probability of 1 of them.
- Class: text
Output: The probability of this second event is conditional on this new information, so the probability of rolling a 3 is now one third.
- Class: text
Output: We represent the conditional probability of an event A given that B has occurred with the notation P(A|B). More specifically, we define the conditional probability of event A, given that B has occurred with the following.
- Class: text
Output: P(A|B) = P(A & B)/ P(B) . P(A|B) is the probability that BOTH A and B occur divided by the probability that B occurs.
- Class: mult_question
Output: Back to our dice example. Which of the following expressions represents P(A&B), where A is the event of rolling a 3 and B is the event of the roll being odd?
AnswerChoices: 1/6; 1/2; 1/3; 1/4; 1
CorrectAnswer: 1/6
AnswerTests: omnitest(correctVal='1/6')
Hint: Here A is a subset of B so the probability of both A AND B happening is the probability of A happening.
- Class: mult_question
Output: Continuing with the same dice example. Which of the following expressions represents P(A&B)/P(B), where A is the event of rolling a 3 and B is the event of the roll being odd?
AnswerChoices: (1/6)/(1/2); (1/2)/(1/6); (1/3)/(1/2); 1/6
CorrectAnswer: (1/6)/(1/2)
AnswerTests: omnitest(correctVal='(1/6)/(1/2)')
Hint: Here A is a subset of B so the probability of both A AND B happening is the probability of A happening. The probability of B is the reciprocal of the number of odd numbers between 1 and 6 (inclusive).
- Class: text
Output: From the definition of P(A|B), we can write P(A&B) = P(A|B) * P(B), right? Let's use this to express P(B|A).
- Class: text
Output: P(B|A) = P(B&A)/P(A) = P(A|B) * P(B)/P(A). This is a simple form of Bayes' Rule which relates the two conditional probabilities.
- Class: text
Output: Suppose we don't know P(A) itself, but only know its conditional probabilities, that is, the probability that it occurs if B occurs and the probability that it occurs if B doesn't occur. These are P(A|B) and P(A|~B), respectively. We use ~B to represent 'not B' or 'B complement'.
- Class: text
Output: We can then express P(A) = P(A|B) * P(B) + P(A|~B) * P(~B) and substitute this is into the denominator of Bayes' Formula.
- Class: text
Output: P(B|A) = P(A|B) * P(B) / ( P(A|B) * P(B) + P(A|~B) * P(~B) )
- Class: text
Output: Bayes' Rule has applicability to medical diagnostic tests. We'll now discuss the example of the HIV test from the slides.
- Class: text
Output: Suppose we know the accuracy rates of the test for both the positive case (positive result when the patient has HIV) and negative (negative test result when the patient doesn't have HIV). These are referred to as test sensitivity and specificity, respectively.
- Class: mult_question
Output: Let 'D' be the event that the patient has HIV, and let '+' indicate a positive test result and '-' a negative. What information do we know? Recall that we know the accuracy rates of the HIV test.
AnswerChoices: P(+|D) and P(-|~D); P(+|~D) and P(-|~D); P(+|~D) and P(-|D); P(+|D) and P(-|D)
CorrectAnswer: P(+|D) and P(-|~D)
AnswerTests: omnitest(correctVal='P(+|D) and P(-|~D)')
Hint: The clue here is accuracy. The test is positive when the patient has the disease and negative when he doesn't.
- Class: mult_question
Output: Suppose a person gets a positive test result and comes from a population with a HIV prevalence rate of .001. We'd like to know the probability that he really has HIV. Which of the following represents this?
AnswerChoices: P(+|D); P(D|+);P(~D|+); P(D|-)
CorrectAnswer: P(D|+)
AnswerTests: omnitest(correctVal='P(D|+)')
Hint: We've already been given the information that the test was positive '+'. We want to know whether D is present given the positive test result.
- Class: text
Output: By Bayes' Formula, P(D|+) = P(+|D) * P(D) / ( P(+|D) * P(D) + P(+|~D) * P(~D) )
- Class: text
Output: We can use the prevalence of HIV in the patient's population as the value for P(D). Note that since P(~D)=1-P(D) and P(+|~D) = 1-P(-|~D) we can calculate P(D|+). In other words, we know values for all the terms on the right side of the equation. Let's do it!
- Class: cmd_question
Output: Disease prevalence is .001. Test sensitivity (+ result with disease) is 99.7% and specificity (- result without disease) is 98.5%. First compute the numerator, P(+|D)*P(D). (This is also part of the denominator.)
CorrectAnswer: .997*.001
AnswerTests: equiv_val(0.000997)
Hint: Multiply the test sensitivity by the prevalence.
- Class: cmd_question
Output: Now solve for the remainder of the denominator, P(+|~D)*P(~D).
CorrectAnswer: (1-.985)*(1-.001)
AnswerTests: equiv_val(.014985)
Hint: Multiply the complement of test specificity by the complement of prevalence.
- Class: cmd_question
Output: Now put the pieces together to compute the probability that the patient has the disease given his positive test result, P(D|+). Plug your last two answers into the formula P(+|D) * P(D) / ( P(+|D) * P(D) + P(+|~D) * P(~D) ) to compute P(D|+).
CorrectAnswer: .000997/(.000997+.014985)
AnswerTests: equiv_val(.06238268)
Hint: Divide (.997*.001) by (.997*.001 + .015*.999)
- Class: text
Output: So the patient has a 6% chance of having HIV given this positive test result. The expression P(D|+) is called the positive predictive value. Similarly, P(~D|-), is called the negative predictive value, the probability that a patient does not have the disease given a negative test result.
- Class: mult_question
Output: The diagnostic likelihood ratio of a positive test, DLR_+, is the ratio of the two + conditional probabilities, one given the presence of disease and the other given the absence. Specifically, DLR_+ = P(+|D) / P(+|~D). Similarly, the DLR_- is defined as a ratio. Which of the following do you think represents the DLR_-?
AnswerChoices: P(-|D) / P(-|~D); P(+|~D) / P(-|D);P(-|D) / P(+|~D); I haven't a clue.
CorrectAnswer: P(-|D) / P(-|~D)
AnswerTests: omnitest(correctVal='P(-|D) / P(-|~D)')
Hint: The signs of the test in both the numerator and denominator have to agree as they did for the DLR_+.
- Class: text
Output: Recall that P(+|D) and P(-|~D), (test sensitivity and specificity respectively) are accuracy rates of a diagnostic test for the two possible results. They should be close to 1 because no one would take an inaccurate test, right? Since DLR_+ = P(+|D) / P(+|~D) we recognize the numerator as test sensitivity and the denominator as the complement of test specificity.
- Class: mult_question
Output: Since the numerator is close to 1 and the denominator is close to 0 do you expect DLR_+ to be large or small?
AnswerChoices: Large; Small; I haven't a clue.
CorrectAnswer: Large
AnswerTests: omnitest(correctVal='Large')
Hint: What happens when you divide a large number by a much smaller one?
- Class: mult_question
Output: Now recall that DLR_- = P(-|D) / P(-|~D). Here the numerator is the complement of sensitivity and the denominator is specificity. From the arithmetic and what you know about accuracy tests, do you expect DLR_- to be large or small?
AnswerChoices: Large; Small; I haven't a clue.
CorrectAnswer: Small
AnswerTests: omnitest(correctVal='Small')
Hint: What happens when you divide by small number by a larger one?
- Class: text
Output: Now a little more about likelihood ratios. Recall Bayes Formula. P(D|+) = P(+|D) * P(D) / ( P(+|D) * P(D) + P(+|~D) * P(~D) ) and notice that if we replace all occurrences of 'D' with '~D', the denominator doesn't change. This means that if we formed a ratio of P(D|+) to P(~D|+) we'd get a much simpler expression (since the complicated denominators would cancel each other out). Like this....
- Class: text
Output: P(D|+) / P(~D|+) = P(+|D) * P(D) / (P(+|~D) * P(~D)) = P(+|D)/P(+|~D) * P(D)/P(~D).
- Class: mult_question
Output: The left side of the equation represents the post-test odds of disease given a positive test result. The equation says that the post-test odds of disease equals the pre-test odds of disease (that is, P(D)/P(~D) ) times
AnswerChoices: the DLR_+; the DLR_-; I haven't a clue.
CorrectAnswer: the DLR_+
AnswerTests: omnitest(correctVal='the DLR_+')
Hint: Do you recognize the expression P(+|D) / P(+|~D)? The '+' signs are a big clue.
- Class: text
Output: In other words, a DLR_+ value equal to N indicates that the hypothesis of disease is N times more supported by the data than the hypothesis of no disease.
- Class: text
Output: Taking the formula above and replacing the '+' signs with '-' yields a formula with the DLR_-. Specifically, P(D|-) / P(~D|-) = P(-|D) / P(-|~D) * P(D)/P(~D). As with the positive case, this relates the odds of disease post-test, P(D|-) / P(~D|-), to those of disease pre-test, P(D)/P(~D).
- Class: mult_question
Output: The equation P(D|-) / P(~D|-) = P(-|D) / P(-|~D) * P(D)/P(~D) says what about the post-test odds of disease relative to the pre-test odds of disease given negative test results?
AnswerChoices: post-test odds are greater than pre-test odds; post-test odds are less than pre-test odds; I haven't a clue.
CorrectAnswer: the DLR_+
AnswerTests: omnitest(correctVal='post-test odds are less than pre-test odds')
Hint: Remember that we argued (hopefully convincingly) that DLR_- is small (less than 1). Post-test odds = Pre-test odds * DLR_- so post-test odds are a fraction of the pre-test odds.
- Class: text
Output: Let's cover some basics now.
- Class: text
Output: Two events, A and B, are independent if they have no effect on each other. Formally, P(A&B) = P(A)*P(B). It's easy to see that if A and B are independent, then P(A|B)=P(A). The definition is similar for random variables X and Y.
- Class: mult_question
Output: We've seen examples of independence in our previous probability lessons. Let's review a little. What's the probability of rolling a '6' twice in a row using a fair die?
AnswerChoices: 1/6; 2/6; 1/36; 1/2
CorrectAnswer: 1/36
AnswerTests: omnitest(correctVal='1/36')
Hint: Square the probability of rolling a single '6' since the two rolls are independent of one another.
- Class: mult_question
Output: You're given a fair die and asked to roll it twice. What's the probability that the second roll of the die matches the first?
AnswerChoices: 1/6; 2/6; 1/36; 1/2
CorrectAnswer: 1/6
AnswerTests: omnitest(correctVal='1/6')
Hint: Now the events aren't independent. You don't care what the first roll is so that's a probability 1 event. The second roll just has to match the first, so that's a 1/6 event.
- Class: mult_question
Output: If the chance of developing a disease with a genetic or environmental component is p, is the chance of both you and your sibling developing that disease p*p?
AnswerChoices: Yes; No
CorrectAnswer: No
AnswerTests: omnitest(correctVal='No')
Hint: The events aren't independent since genetic or environmental factors likely will affect the outcome.
- Class: text
Output: We'll conclude with iid. Random variables are said to be iid if they are independent and identically distributed. By independent we mean "statistically unrelated from one another". Identically distributed means that "all have been drawn from the same population distribution".
- Class: text
Output: Random variables which are iid are the default model for random samples and many of the important theories of statistics assume that variables are iid. We'll usually assume our samples are random and variables are iid.
- Class: text
Output: Congrats! You've concluded this lesson on conditional probability. We hope you liked it unconditionally.
|
\name{JMbayes}
\alias{JMbayes-package}
\alias{JMbayes}
\docType{package}
\title{
Joint Modeling of Longitudinal and Time-to-Event Data in R under a Bayesian Approach
}
\description{
This package fits shared parameter models for the joint modeling of normal longitudinal responses and event times
under a Bayesian approach. Various options for the survival model and the association structure are
provided.
}
\details{
\tabular{ll}{
Package: \tab JMbayes\cr
Type: \tab Package\cr
Version: \tab 0.8-61\cr
Date: \tab 2017-02-07\cr
License: \tab GPL (>=2)\cr
}
The package has a single model-fitting function called \code{\link{jointModelBayes}}, which accepts as main arguments a linear
mixed effects object fit returned by function \code{lme()} of package \pkg{nlme}, and a Cox model object fit returned
by function \code{coxph()} of package \pkg{survival}. The \code{survMod} argument of specifies the type of survival submodel
to be fitted; available options are a relative risk model with a Weibull baseline hazard (default) and a relative risk model
with a B-spline approximation of the log baseline risk function. In addition, the \code{param} specifies the association structure
between the longitudinal and survival processes; available options are: \code{"td-value"} which is the classic formulation used in
Wulfsohn and Tsiatis (1997); \code{"td-extra"} which is a user-defined, possibly time-dependent, term based on the specification of
the \code{extraForm} argument of \code{\link{jointModelBayes}}. This could be used to include terms, such as the time-dependent
slope (i.e., the derivative of the subject-specific linear predictor of the linear mixed model) and the time-dependent cumulative
effect (i.e., the integral of the subject-specific linear predictor of the linear mixed model); \code{"td-both"} which is the
combination of the previous two parameterizations, i.e., the current value and the user-specified terms are included in the linear
predictor of the relative risk model; and \code{"shared-RE"} where only the random effects of the linear mixed model are included
in the linear predictor of the survival submodel.
The package also offers several utility functions that can extract useful information from fitted joint models. The most
important of those are included in the \bold{See also} Section below.
}
\author{
Dimitris Rizopoulos
Maintainer: Dimitris Rizopoulos <d.rizopoulos@erasmusmc.nl>
}
\references{
Guo, X. and Carlin, B. (2004) Separate and joint modeling of longitudinal and event time data
using standard computer packages. \emph{The American Statistician} \bold{54}, 16--24.
Henderson, R., Diggle, P. and Dobson, A. (2000) Joint modelling of longitudinal measurements
and event time data. \emph{Biostatistics} \bold{1}, 465--480.
Rizopoulos, D. (2016). The R package JMbayes for fitting joint models for longitudinal and
time-to-event data using MCMC. \emph{Journal of Statistical Software} \bold{72(7)}, 1--45.
doi:10.18637/jss.v072.i07.
Rizopoulos, D. (2012) \emph{Joint Models for Longitudinal and Time-to-Event Data: with
Applications in R}. Boca Raton: Chapman and Hall/CRC.
Rizopoulos, D. (2011) Dynamic predictions and prospective accuracy in joint models for longitudinal
and time-to-event data. \emph{Biometrics} \bold{67}, 819--829.
Rizopoulos, D. and Ghosh, P. (2011) A Bayesian semiparametric multivariate joint model for multiple
longitudinal outcomes and a time-to-event. \emph{Statistics in Medicine} \bold{30}, 1366--1380.
Rizopoulos, D., Verbeke, G. and Molenberghs, G. (2010) Multiple-imputation-based residuals and diagnostic
plots for joint models of longitudinal and survival outcomes. \emph{Biometrics} \bold{66}, 20--29.
Tsiatis, A. and Davidian, M. (2004) Joint modeling of longitudinal and time-to-event data: an overview.
\emph{Statistica Sinica} \bold{14}, 809--834.
Wulfsohn, M. and Tsiatis, A. (1997) A joint model for survival and longitudinal data measured with error.
\emph{Biometrics} \bold{53}, 330--339.
}
\keyword{ package }
\keyword{ multivariate }
\seealso{
\code{\link{jointModelBayes}},
\code{\link{survfitJM}},
\code{\link{aucJM}},
\code{\link{dynCJM}},
\code{\link{prederrJM}},
\code{\link{predict.JMbayes}},
\code{\link{logLik.JMbayes}}
}
|
/man/JMbayes.Rd
|
no_license
|
iron0012/JMbayes
|
R
| false | false | 4,290 |
rd
|
\name{JMbayes}
\alias{JMbayes-package}
\alias{JMbayes}
\docType{package}
\title{
Joint Modeling of Longitudinal and Time-to-Event Data in R under a Bayesian Approach
}
\description{
This package fits shared parameter models for the joint modeling of normal longitudinal responses and event times
under a Bayesian approach. Various options for the survival model and the association structure are
provided.
}
\details{
\tabular{ll}{
Package: \tab JMbayes\cr
Type: \tab Package\cr
Version: \tab 0.8-61\cr
Date: \tab 2017-02-07\cr
License: \tab GPL (>=2)\cr
}
The package has a single model-fitting function called \code{\link{jointModelBayes}}, which accepts as main arguments a linear
mixed effects object fit returned by function \code{lme()} of package \pkg{nlme}, and a Cox model object fit returned
by function \code{coxph()} of package \pkg{survival}. The \code{survMod} argument of specifies the type of survival submodel
to be fitted; available options are a relative risk model with a Weibull baseline hazard (default) and a relative risk model
with a B-spline approximation of the log baseline risk function. In addition, the \code{param} specifies the association structure
between the longitudinal and survival processes; available options are: \code{"td-value"} which is the classic formulation used in
Wulfsohn and Tsiatis (1997); \code{"td-extra"} which is a user-defined, possibly time-dependent, term based on the specification of
the \code{extraForm} argument of \code{\link{jointModelBayes}}. This could be used to include terms, such as the time-dependent
slope (i.e., the derivative of the subject-specific linear predictor of the linear mixed model) and the time-dependent cumulative
effect (i.e., the integral of the subject-specific linear predictor of the linear mixed model); \code{"td-both"} which is the
combination of the previous two parameterizations, i.e., the current value and the user-specified terms are included in the linear
predictor of the relative risk model; and \code{"shared-RE"} where only the random effects of the linear mixed model are included
in the linear predictor of the survival submodel.
The package also offers several utility functions that can extract useful information from fitted joint models. The most
important of those are included in the \bold{See also} Section below.
}
\author{
Dimitris Rizopoulos
Maintainer: Dimitris Rizopoulos <d.rizopoulos@erasmusmc.nl>
}
\references{
Guo, X. and Carlin, B. (2004) Separate and joint modeling of longitudinal and event time data
using standard computer packages. \emph{The American Statistician} \bold{54}, 16--24.
Henderson, R., Diggle, P. and Dobson, A. (2000) Joint modelling of longitudinal measurements
and event time data. \emph{Biostatistics} \bold{1}, 465--480.
Rizopoulos, D. (2016). The R package JMbayes for fitting joint models for longitudinal and
time-to-event data using MCMC. \emph{Journal of Statistical Software} \bold{72(7)}, 1--45.
doi:10.18637/jss.v072.i07.
Rizopoulos, D. (2012) \emph{Joint Models for Longitudinal and Time-to-Event Data: with
Applications in R}. Boca Raton: Chapman and Hall/CRC.
Rizopoulos, D. (2011) Dynamic predictions and prospective accuracy in joint models for longitudinal
and time-to-event data. \emph{Biometrics} \bold{67}, 819--829.
Rizopoulos, D. and Ghosh, P. (2011) A Bayesian semiparametric multivariate joint model for multiple
longitudinal outcomes and a time-to-event. \emph{Statistics in Medicine} \bold{30}, 1366--1380.
Rizopoulos, D., Verbeke, G. and Molenberghs, G. (2010) Multiple-imputation-based residuals and diagnostic
plots for joint models of longitudinal and survival outcomes. \emph{Biometrics} \bold{66}, 20--29.
Tsiatis, A. and Davidian, M. (2004) Joint modeling of longitudinal and time-to-event data: an overview.
\emph{Statistica Sinica} \bold{14}, 809--834.
Wulfsohn, M. and Tsiatis, A. (1997) A joint model for survival and longitudinal data measured with error.
\emph{Biometrics} \bold{53}, 330--339.
}
\keyword{ package }
\keyword{ multivariate }
\seealso{
\code{\link{jointModelBayes}},
\code{\link{survfitJM}},
\code{\link{aucJM}},
\code{\link{dynCJM}},
\code{\link{prederrJM}},
\code{\link{predict.JMbayes}},
\code{\link{logLik.JMbayes}}
}
|
#' mutation sample cutoff gene based
#'
#' Subset a internal mutSpec file keeping only samples within the specified gene
#' list
#' @name waterfall_geneAlt
#' @param x a data frame in long format with columns 'gene', 'trv_type'
#' @param genes character vector listing genes to plot
#' @return a subset data frame
waterfall_geneAlt <- function(x, genes)
{
message("Removing genes not in: ", toString(genes))
# Perform quality checks
if(typeof(genes) != 'character' & class(genes) != 'character')
{
memo <- paste0("argument supplied to main.genes is not a character ",
"vector, attempting to coerce")
warning(memo)
genes <- as.character(genes)
}
if(!all(toupper(genes) %in% toupper(x$gene)))
{
memo <- paste0("genes supplied in main.genes contains an element not ",
"found in x or it's subsequent subsets")
warning(memo)
}
genes <- c(genes, NA)
x <- x[(toupper(x$gene) %in% toupper(genes)), ]
return(x)
}
|
/R/waterfall_geneAlt.R
|
permissive
|
cauyrd/GenVisR
|
R
| false | false | 1,038 |
r
|
#' mutation sample cutoff gene based
#'
#' Subset a internal mutSpec file keeping only samples within the specified gene
#' list
#' @name waterfall_geneAlt
#' @param x a data frame in long format with columns 'gene', 'trv_type'
#' @param genes character vector listing genes to plot
#' @return a subset data frame
waterfall_geneAlt <- function(x, genes)
{
message("Removing genes not in: ", toString(genes))
# Perform quality checks
if(typeof(genes) != 'character' & class(genes) != 'character')
{
memo <- paste0("argument supplied to main.genes is not a character ",
"vector, attempting to coerce")
warning(memo)
genes <- as.character(genes)
}
if(!all(toupper(genes) %in% toupper(x$gene)))
{
memo <- paste0("genes supplied in main.genes contains an element not ",
"found in x or it's subsequent subsets")
warning(memo)
}
genes <- c(genes, NA)
x <- x[(toupper(x$gene) %in% toupper(genes)), ]
return(x)
}
|
# Authors: Maksym Bondarenko mb4@soton.ac.uk
# Date : March 2018
# Version 0.1
#
#' wpGetindexesWhichValues to get indexes of raster pixels
#' Script is pirilised and allow to work with big raster
#' @param x Raster* object
#' @param v value of the pixel we would like to get indexes
#' @param cores Integer. Number of corest to be used
#' @param tp Type of the data to be return. Numberic or
#' @param minblocks Integer. Minimum number of blocks. If NULL then it will be calculated automaticly
#' @param silent If FALSE then the progress will be shown
#' @rdname wpGetindexesWhichValues
#' @return numeric
#' @export
#' @examples
#' wpGetindexesWhichValues( x=raster("E:/asm_grid_100m_ccidadminl1.tif"), v=1, cores=4)
wpGetindexesWhichValues <- function(x, v,
cores=NULL,
tp='numeric',
minblocks=NULL,
silent=FALSE) {
tStart <- Sys.time()
x.table <- data.frame(CellIndex=integer(),stringsAsFactors=FALSE)
# get real physical cores in a computer
if (is.null(cores)) {
max.cores <- parallel:::detectCores(logical = TRUE)
cores <- max.cores - 1
}
if (is.null(minblocks)) {
minblocks <- wpGetBlocksNeed(x,cores)
}
blocks <- blockSize(x,minblocks=minblocks)
if (!silent) {
cat(paste0('\nTotal blocks ',blocks$n))
cat('\n')
}
cl <- makeCluster(cores)
registerDoSNOW(cl)
clusterExport(cl, c("x", "v"), envir=environment())
clusterExport(cl, "blocks", envir=environment())
pb <- txtProgressBar(min = 1,
max = blocks$n,
style = 3,
width = 80)
progress <- function(n) {
ch.pb <- unlist(lapply(1:cores,
function(i) {
return(i*round(blocks$n/cores))
}),
use.names=FALSE)
if (n %in% ch.pb & !silent) {
setTxtProgressBar(pb, n)
}else if(n==blocks$n & !silent){
setTxtProgressBar(pb, n)
}
}
opts <- list(progress = progress)
oper <- foreach(i=1: blocks$n ,
.combine=rbind,
.inorder=TRUE,
.packages='raster',
.multicombine=TRUE,
.options.snow = opts) %dopar% {
x_row_data <- getValues(x, row=blocks$row[i], nrows=blocks$nrows[i])
nncol <- ncol(x)
if (i==1){
start.df <- 1
end.df <- blocks$nrows[i]*nncol
}else{
start.df <- nncol*blocks$row[i] - nncol + 1
end.df <- (nncol*blocks$row[i] + blocks$nrows[i]*nncol) - nncol
}
df <- data.frame(CellIndex = as.numeric(start.df:end.df) )
df$v <- as.numeric(x_row_data)
df2 <- df[!is.na(df$v),]
x.table <- df2[df2$v == v, ]
return(x.table)
}
stopCluster(cl)
close(pb)
names(oper) <- c("CellIndex", "v")
tEnd <- Sys.time()
if (!silent) print(paste("Elapsed Processing Time:", wpTimeDiff(tStart,tEnd)))
if (tp =='numeric'){
return(oper$CellIndex)
}else{
return(oper)
}
}
|
/R/wpGetIndexesWhichValues.R
|
no_license
|
wpgp/wpUtilities
|
R
| false | false | 3,575 |
r
|
# Authors: Maksym Bondarenko mb4@soton.ac.uk
# Date : March 2018
# Version 0.1
#
#' wpGetindexesWhichValues to get indexes of raster pixels
#' Script is pirilised and allow to work with big raster
#' @param x Raster* object
#' @param v value of the pixel we would like to get indexes
#' @param cores Integer. Number of corest to be used
#' @param tp Type of the data to be return. Numberic or
#' @param minblocks Integer. Minimum number of blocks. If NULL then it will be calculated automaticly
#' @param silent If FALSE then the progress will be shown
#' @rdname wpGetindexesWhichValues
#' @return numeric
#' @export
#' @examples
#' wpGetindexesWhichValues( x=raster("E:/asm_grid_100m_ccidadminl1.tif"), v=1, cores=4)
wpGetindexesWhichValues <- function(x, v,
cores=NULL,
tp='numeric',
minblocks=NULL,
silent=FALSE) {
tStart <- Sys.time()
x.table <- data.frame(CellIndex=integer(),stringsAsFactors=FALSE)
# get real physical cores in a computer
if (is.null(cores)) {
max.cores <- parallel:::detectCores(logical = TRUE)
cores <- max.cores - 1
}
if (is.null(minblocks)) {
minblocks <- wpGetBlocksNeed(x,cores)
}
blocks <- blockSize(x,minblocks=minblocks)
if (!silent) {
cat(paste0('\nTotal blocks ',blocks$n))
cat('\n')
}
cl <- makeCluster(cores)
registerDoSNOW(cl)
clusterExport(cl, c("x", "v"), envir=environment())
clusterExport(cl, "blocks", envir=environment())
pb <- txtProgressBar(min = 1,
max = blocks$n,
style = 3,
width = 80)
progress <- function(n) {
ch.pb <- unlist(lapply(1:cores,
function(i) {
return(i*round(blocks$n/cores))
}),
use.names=FALSE)
if (n %in% ch.pb & !silent) {
setTxtProgressBar(pb, n)
}else if(n==blocks$n & !silent){
setTxtProgressBar(pb, n)
}
}
opts <- list(progress = progress)
oper <- foreach(i=1: blocks$n ,
.combine=rbind,
.inorder=TRUE,
.packages='raster',
.multicombine=TRUE,
.options.snow = opts) %dopar% {
x_row_data <- getValues(x, row=blocks$row[i], nrows=blocks$nrows[i])
nncol <- ncol(x)
if (i==1){
start.df <- 1
end.df <- blocks$nrows[i]*nncol
}else{
start.df <- nncol*blocks$row[i] - nncol + 1
end.df <- (nncol*blocks$row[i] + blocks$nrows[i]*nncol) - nncol
}
df <- data.frame(CellIndex = as.numeric(start.df:end.df) )
df$v <- as.numeric(x_row_data)
df2 <- df[!is.na(df$v),]
x.table <- df2[df2$v == v, ]
return(x.table)
}
stopCluster(cl)
close(pb)
names(oper) <- c("CellIndex", "v")
tEnd <- Sys.time()
if (!silent) print(paste("Elapsed Processing Time:", wpTimeDiff(tStart,tEnd)))
if (tp =='numeric'){
return(oper$CellIndex)
}else{
return(oper)
}
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 30088
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 30087
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 30087
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b21_PR_1_20.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 10408
c no.of clauses 30088
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 30087
c
c QBFLIB/Sauer-Reimer/ITC99/b21_PR_1_20.qdimacs 10408 30088 E1 [1] 0 230 10137 30087 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Sauer-Reimer/ITC99/b21_PR_1_20/b21_PR_1_20.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 719 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 30088
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 30087
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 30087
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b21_PR_1_20.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 10408
c no.of clauses 30088
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 30087
c
c QBFLIB/Sauer-Reimer/ITC99/b21_PR_1_20.qdimacs 10408 30088 E1 [1] 0 230 10137 30087 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.r
\name{db}
\alias{db}
\alias{db_ucsc}
\alias{db_ensembl}
\title{Fetch data from remote databases.}
\usage{
db_ucsc(
dbname,
host = "genome-mysql.cse.ucsc.edu",
user = "genomep",
password = "password",
port = 3306,
...
)
db_ensembl(
dbname,
host = "ensembldb.ensembl.org",
user = "anonymous",
password = "",
port = 3306,
...
)
}
\arguments{
\item{dbname}{name of database}
\item{host}{hostname}
\item{user}{username}
\item{password}{password}
\item{port}{MySQL connection port}
\item{...}{params for connection}
}
\description{
Currently \code{db_ucsc} and \code{db_ensembl} are available for connections.
}
\examples{
\dontrun{
if (require(RMariaDB)) {
library(dplyr)
ucsc <- db_ucsc("hg38")
# fetch the `refGene` tbl
tbl(ucsc, "refGene")
# the `chromInfo` tbls have size information
tbl(ucsc, "chromInfo")
}
}
\dontrun{
if (require(RMariaDB)) {
library(dplyr)
# squirrel genome
ensembl <- db_ensembl("spermophilus_tridecemlineatus_core_67_2")
tbl(ensembl, "gene")
}
}
}
\seealso{
\url{https://genome.ucsc.edu/goldenpath/help/mysql.html}
\url{https://www.ensembl.org/info/data/mysql.html}
}
|
/man/db.Rd
|
permissive
|
rnabioco/valr
|
R
| false | true | 1,228 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.r
\name{db}
\alias{db}
\alias{db_ucsc}
\alias{db_ensembl}
\title{Fetch data from remote databases.}
\usage{
db_ucsc(
dbname,
host = "genome-mysql.cse.ucsc.edu",
user = "genomep",
password = "password",
port = 3306,
...
)
db_ensembl(
dbname,
host = "ensembldb.ensembl.org",
user = "anonymous",
password = "",
port = 3306,
...
)
}
\arguments{
\item{dbname}{name of database}
\item{host}{hostname}
\item{user}{username}
\item{password}{password}
\item{port}{MySQL connection port}
\item{...}{params for connection}
}
\description{
Currently \code{db_ucsc} and \code{db_ensembl} are available for connections.
}
\examples{
\dontrun{
if (require(RMariaDB)) {
library(dplyr)
ucsc <- db_ucsc("hg38")
# fetch the `refGene` tbl
tbl(ucsc, "refGene")
# the `chromInfo` tbls have size information
tbl(ucsc, "chromInfo")
}
}
\dontrun{
if (require(RMariaDB)) {
library(dplyr)
# squirrel genome
ensembl <- db_ensembl("spermophilus_tridecemlineatus_core_67_2")
tbl(ensembl, "gene")
}
}
}
\seealso{
\url{https://genome.ucsc.edu/goldenpath/help/mysql.html}
\url{https://www.ensembl.org/info/data/mysql.html}
}
|
source("data_load.R")
#
png(filename="plot4.png",width=480,height=480,units="px")
##Output to PNG
#
## Setup the canvas
par(mfrow=c(2,2))
#
## Top Left
plot(DateTime,Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
#
##Top Right
plot(DateTime,Voltage,type="l",xlab="datetime",ylab="Voltage")
#
##Bottom Left
plot(DateTime,Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")
lines(DateTime,Sub_metering_2,col="red")
lines(DateTime,Sub_metering_3,col="blue")
legend("topright",col=c("black","red","blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1)
#
##Bottom right
plot(DateTime,Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
tdwbiz/Exploatory-Data---Assignment-1
|
R
| false | false | 717 |
r
|
source("data_load.R")
#
png(filename="plot4.png",width=480,height=480,units="px")
##Output to PNG
#
## Setup the canvas
par(mfrow=c(2,2))
#
## Top Left
plot(DateTime,Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
#
##Top Right
plot(DateTime,Voltage,type="l",xlab="datetime",ylab="Voltage")
#
##Bottom Left
plot(DateTime,Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")
lines(DateTime,Sub_metering_2,col="red")
lines(DateTime,Sub_metering_3,col="blue")
legend("topright",col=c("black","red","blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1)
#
##Bottom right
plot(DateTime,Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
#' Selection of genes based on statistical significance values computed through Bootstrap-Support Vector Machine (SVM)-Maximum Relevance and Minimum Redundancy (MRMR) methods.
#'
#' @name TopGenesPvalSVMRMR
#' @aliases TopGenesPvalSVMRMR
#' @usage TopGenesPvalSVMRMR(x, y, method, beta, nboot, p.adjust.method, n)
#'
#' @param x Nxp data frame of gene expression values, where, N represents number of genes and p represents samples/time points generated in a case vs. control gene expression study.
#' @param y px1 numeric vector with entries 1 and -1 representing sample/subject labels, where 1 and -1 represents the labels of subjects/ samples for case and control conditions respectively.
#' @param method Character variable representing either 'Linear' or 'Quadratic' method for integrating the weights/scores computed through SVM and MRMR methods.
#' @param beta Scalar representing trade-off between SVM and MRMR weights.
#' @param nboot Scalar representing the number of bootsrap samples to be drawn from the data using simple random sampling with replacement (Bootstrap) procedure.
#' @param p.adjust.method Character representing the method used for multiple hypothesis correction and computation of adjusted p-values. It can be any method out of "holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr".
#' @param n Numeric constant (< N) representing the number of top ranked genes to be selected from the high dimensional gene expression data.
#'
#' @return A list of differentially expressed specified number of genes through BSM method.
#'
#' @description The function selects the top ranked genes from the high dimensional gene expression data using the statistical significance values computed through Bootstrap-Support Vector Machine-Maximum Relevance and Minimum Redundancy (BSM) approach.
#'
#' @details Selection of genes based on statistical significance values computed through BSM approach.
#' Takes the gene expression data matrix (rows as genes and coloumns as samples) and vector of class labels of subjects (1: case and -1: control) as inputs.
#'
#' @author Samarendra Das <samarendra4849 at gamil.com>
#'
#' @examples
#' x=as.data.frame(matrix(runif(1000), 50))
#' row.names(x) = paste("Gene", 1:50)
#' colnames(x) = paste("Samp", 1:20)
#' y=as.numeric(c(rep(1, 10), rep(-1, 10)))
#' TopGenesPvalSVMRMR(x, y, method="Linear", beta=0.6, nboot=20, p.adjust.method = "BH", n=5)
#'
#' @export
TopGenesPvalSVMRMR <- function (x, y, method, beta, nboot, p.adjust.method, n)
{
this.call = match.call()
if ((!class(n) == "numeric" & n > nrow(x))) {
warning("n must be numeric and it should be less than number of rows of x")
}
genes.weight <- pvalsvmmrmr (x, y, method, beta, nboot, p.adjust.method, plot=FALSE)[,3]
id <- sort(genes.weight, decreasing=FALSE, index.return = TRUE)$ix
TopGenes <- names(genes.weight) [id] [1:n]
return(TopGenes)
}
############################ TopGenesBootSVMRMR Ends here ##########################################
|
/R/TopGenesPvalSVMRMR.R
|
no_license
|
sam-uofl/BSM
|
R
| false | false | 2,991 |
r
|
#' Selection of genes based on statistical significance values computed through Bootstrap-Support Vector Machine (SVM)-Maximum Relevance and Minimum Redundancy (MRMR) methods.
#'
#' @name TopGenesPvalSVMRMR
#' @aliases TopGenesPvalSVMRMR
#' @usage TopGenesPvalSVMRMR(x, y, method, beta, nboot, p.adjust.method, n)
#'
#' @param x Nxp data frame of gene expression values, where, N represents number of genes and p represents samples/time points generated in a case vs. control gene expression study.
#' @param y px1 numeric vector with entries 1 and -1 representing sample/subject labels, where 1 and -1 represents the labels of subjects/ samples for case and control conditions respectively.
#' @param method Character variable representing either 'Linear' or 'Quadratic' method for integrating the weights/scores computed through SVM and MRMR methods.
#' @param beta Scalar representing trade-off between SVM and MRMR weights.
#' @param nboot Scalar representing the number of bootsrap samples to be drawn from the data using simple random sampling with replacement (Bootstrap) procedure.
#' @param p.adjust.method Character representing the method used for multiple hypothesis correction and computation of adjusted p-values. It can be any method out of "holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr".
#' @param n Numeric constant (< N) representing the number of top ranked genes to be selected from the high dimensional gene expression data.
#'
#' @return A list of differentially expressed specified number of genes through BSM method.
#'
#' @description The function selects the top ranked genes from the high dimensional gene expression data using the statistical significance values computed through Bootstrap-Support Vector Machine-Maximum Relevance and Minimum Redundancy (BSM) approach.
#'
#' @details Selection of genes based on statistical significance values computed through BSM approach.
#' Takes the gene expression data matrix (rows as genes and coloumns as samples) and vector of class labels of subjects (1: case and -1: control) as inputs.
#'
#' @author Samarendra Das <samarendra4849 at gamil.com>
#'
#' @examples
#' x=as.data.frame(matrix(runif(1000), 50))
#' row.names(x) = paste("Gene", 1:50)
#' colnames(x) = paste("Samp", 1:20)
#' y=as.numeric(c(rep(1, 10), rep(-1, 10)))
#' TopGenesPvalSVMRMR(x, y, method="Linear", beta=0.6, nboot=20, p.adjust.method = "BH", n=5)
#'
#' @export
TopGenesPvalSVMRMR <- function (x, y, method, beta, nboot, p.adjust.method, n)
{
this.call = match.call()
if ((!class(n) == "numeric" & n > nrow(x))) {
warning("n must be numeric and it should be less than number of rows of x")
}
genes.weight <- pvalsvmmrmr (x, y, method, beta, nboot, p.adjust.method, plot=FALSE)[,3]
id <- sort(genes.weight, decreasing=FALSE, index.return = TRUE)$ix
TopGenes <- names(genes.weight) [id] [1:n]
return(TopGenes)
}
############################ TopGenesBootSVMRMR Ends here ##########################################
|
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
bMotor <- subset(NEI, NEI$fips == "24510" & NEI$type == "ON-ROAD")
bMotorAGG <- aggregate(Emissions ~ year, bMotor, sum)
ggplot(bMotorAGG, aes(year, Emissions)) +
geom_line() +
geom_point() +
labs(title ="Baltimore Motor Vehicle Emissions by Year", x="Year", y="Motor Vehicle Emissions")
dev.copy(png,"plot5.png", width=480, height=480)
dev.off()
|
/Exploratory Data Analysis/Assignment 2/plot5.R
|
no_license
|
kernelCruncher/Data-Science-Specialisation
|
R
| false | false | 458 |
r
|
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
bMotor <- subset(NEI, NEI$fips == "24510" & NEI$type == "ON-ROAD")
bMotorAGG <- aggregate(Emissions ~ year, bMotor, sum)
ggplot(bMotorAGG, aes(year, Emissions)) +
geom_line() +
geom_point() +
labs(title ="Baltimore Motor Vehicle Emissions by Year", x="Year", y="Motor Vehicle Emissions")
dev.copy(png,"plot5.png", width=480, height=480)
dev.off()
|
confcent=function(X,alpha,tol)
{
x0=percentil(X,alpha/2,tol)
x1=percentil(X,1-alpha/2,tol)
return(c(x0,x1))
}
|
/Rscripts/confcent.R
|
no_license
|
MartinMasH/StatisticsHawkers
|
R
| false | false | 114 |
r
|
confcent=function(X,alpha,tol)
{
x0=percentil(X,alpha/2,tol)
x1=percentil(X,1-alpha/2,tol)
return(c(x0,x1))
}
|
set.seed(102)
tiff_plot = TRUE
if (tiff_plot) {
tiff(filename = "Figure1.tiff",
width = 7, height = 9, units = "in", pointsize = 12, res=300,
compression = "lzw")
}
par(mfrow=c(3,2))
### Null Case -- No selection
Figure_Label = "(a)"
source("ParamsFig1.R")
Sample_size <- 0.07*Sample_size
Admissions_Cutoff <- 0.00 # Overall rank to get accepted into graduate program
Attends_Harvard_Instead <- 100.0 # Overall rank (using Harvard's criteria) above which students go elsewhere
source("PreVsPostPredictors.R")
### Base simulation -- Committees evaluate applications perfectly
Figure_Label = "(b)"
source("ParamsFig1.R")
source("PreVsPostPredictors.R")
### Committees over-estimate the importance of LORs
Figure_Label = "(c)"
source("ParamsFig1.R")
w3 <- 3*w3 # Committee members overweigh the importance of LORs
h3 <- 3*h3 # Committee members overweigh the importance of LORs
source("PreVsPostPredictors.R")
### Committees base their decisions on irrelvant factors
Figure_Label = "(d)"
source("ParamsFig1.R")
w6 <- 6*w6 # Committee members influenced by an irrelvant factor
h7 <- 6*h7 # Committee members influenced by an irrelvant factor
source("PreVsPostPredictors.R")
### Committees over-estimate the importance of GREs
Figure_Label = "(e)"
source("ParamsFig1.R")
w1 <- 3*w1 # Committee members overweigh the importance of GRE scores
h1 <- 3*h1 # Committee members overweigh the importance of GRE scores
source("PreVsPostPredictors.R")
### Committees over-estimate the importance of GREs and irrelevant factors
Figure_Label = "(f)"
source("ParamsFig1.R")
w1 <- 2*w1 # Committee members overweigh the importance of GRE scores
h1 <- 2*h1 # Committee members overweigh the importance of GRE scores
w3 <- 2*w3 # Committee members overweigh the importance of LORs
h3 <- 2*h3 # Committee members overweigh the importance of LORs
source("PreVsPostPredictors.R")
if (tiff_plot) dev.off()
|
/Figures1and2/RunScriptFig1.R
|
no_license
|
jmittler/Admissions-Exploration
|
R
| false | false | 1,921 |
r
|
set.seed(102)
tiff_plot = TRUE
if (tiff_plot) {
tiff(filename = "Figure1.tiff",
width = 7, height = 9, units = "in", pointsize = 12, res=300,
compression = "lzw")
}
par(mfrow=c(3,2))
### Null Case -- No selection
Figure_Label = "(a)"
source("ParamsFig1.R")
Sample_size <- 0.07*Sample_size
Admissions_Cutoff <- 0.00 # Overall rank to get accepted into graduate program
Attends_Harvard_Instead <- 100.0 # Overall rank (using Harvard's criteria) above which students go elsewhere
source("PreVsPostPredictors.R")
### Base simulation -- Committees evaluate applications perfectly
Figure_Label = "(b)"
source("ParamsFig1.R")
source("PreVsPostPredictors.R")
### Committees over-estimate the importance of LORs
Figure_Label = "(c)"
source("ParamsFig1.R")
w3 <- 3*w3 # Committee members overweigh the importance of LORs
h3 <- 3*h3 # Committee members overweigh the importance of LORs
source("PreVsPostPredictors.R")
### Committees base their decisions on irrelvant factors
Figure_Label = "(d)"
source("ParamsFig1.R")
w6 <- 6*w6 # Committee members influenced by an irrelvant factor
h7 <- 6*h7 # Committee members influenced by an irrelvant factor
source("PreVsPostPredictors.R")
### Committees over-estimate the importance of GREs
Figure_Label = "(e)"
source("ParamsFig1.R")
w1 <- 3*w1 # Committee members overweigh the importance of GRE scores
h1 <- 3*h1 # Committee members overweigh the importance of GRE scores
source("PreVsPostPredictors.R")
### Committees over-estimate the importance of GREs and irrelevant factors
Figure_Label = "(f)"
source("ParamsFig1.R")
w1 <- 2*w1 # Committee members overweigh the importance of GRE scores
h1 <- 2*h1 # Committee members overweigh the importance of GRE scores
w3 <- 2*w3 # Committee members overweigh the importance of LORs
h3 <- 2*h3 # Committee members overweigh the importance of LORs
source("PreVsPostPredictors.R")
if (tiff_plot) dev.off()
|
#' Make a frequency table for a factor
#'
#' @param x factor
#'
#' @return tbl_df
#' @export
#' @examples
#'#' freq_out(iris$Species)
freq_out <- function(x) {
xdf <- dplyr::data_frame(x)
dplyr::count(xdf, x)
}
|
/R/freq_out.R
|
no_license
|
madsenmat/foofactors
|
R
| false | false | 215 |
r
|
#' Make a frequency table for a factor
#'
#' @param x factor
#'
#' @return tbl_df
#' @export
#' @examples
#'#' freq_out(iris$Species)
freq_out <- function(x) {
xdf <- dplyr::data_frame(x)
dplyr::count(xdf, x)
}
|
## A simple framework for representing and solving mixed integer linear
## programs (MILPs) of the form
## optimize obj' x
## such that mat %*% x dir rhs
## and mixed integer quadratic programs (MIQPs) of the form
## optimize x' Q x / 2 + c' x
## such that mat %*% x dir rhs
## with possibly given types (C/I/B for continuous/integer/binary) and
## given additional (lower and upper) bounds on x.
## (Default of course x >= 0).
### * MILPs
MILP <-
function(objective, constraints, bounds = NULL, types = NULL,
maximum = FALSE)
{
## Currently, 'constraints' always is a (not necessarily named) list
## with mat, dir and rhs, which really is the most general case of
## linear constraints we can think of. Let us add names for now;
## eventually, there should be more sanity checking and maybe a
## creator for linear constraint objects.
names(constraints) <- c("mat", "dir", "rhs")
.structure(list(objective = objective, constraints = constraints,
bounds = bounds, types = types, maximum = maximum),
class = "MILP")
}
.MILP_solvers <-
c("glpk", "lpsolve", "symphony", "cplex")
solve_MILP <-
function(x, solver = NULL, control = list())
{
## <NOTE>
## Ideally, we would use some registration mechanism for solvers.
## Currently, there is only little support for control arguments.
## In particular, one cannot directly pass arguments to the solver.
## </NOTE>
## Handle the boundary case of no variables.
if(!length(x$objective)) {
y <- .solve_empty_MIP(x)
if(!is.null(nos <- control[["n"]])
&& !identical(as.integer(nos), 1L))
y <- list(y)
return(y)
}
solver <- match.arg(solver, .MILP_solvers)
## If more than one (binary) solution is sought and the solver does
## not provide direct support, use poor person's branch and cut:
if(!is.null(nos <- control[["n"]]) && (solver != "cplex")) {
control[["n"]] <- NULL
## Mimic the mechanism currently employed by Rcplex(): return a
## list of solutions only if nos > 1 (or NA).
if(!identical(as.integer(nos), 1L)) {
add <- identical(control[["add"]], TRUE)
control[["add"]] <- NULL
return(.find_up_to_n_binary_MILP_solutions(x, nos, add,
solver, control))
}
}
## Note that lpSolve could find all binary solutions for all-binary
## programs.
switch(solver,
"cplex" = .solve_MILP_via_cplex(x, control),
"glpk" = .solve_MILP_via_glpk(x),
"lpsolve" = .solve_MILP_via_lpsolve(x),
"symphony" = .solve_MILP_via_symphony(x))
}
### * MIQPs
MIQP <-
function(objective, constraints, bounds = NULL, types = NULL,
maximum = FALSE)
{
## See MILP() for the comments on constraint objects.
## We also add names to the list of objective coefficients.
names(objective) <- c("Q", "L")
names(constraints) <- c("mat", "dir", "rhs")
.structure(list(objective = objective, constraints = constraints,
bounds = bounds, types = types, maximum = maximum),
class = "MIQP")
}
.MIQP_solvers <-
c("cplex")
solve_MIQP <-
function(x, solver = NULL, control = list())
{
## Currently, only CPLEX can generally be used for solving MIQPs.
## For the other MILP solvers, all-binary programs can be solved via
## linearization.
## <NOTE>
## Actually, linearization only requires that the quadratic part is
## all-binary. Maybe support the mixed linear part case eventually.
## </NOTE>
## Handle the boundary case of no variables.
if(!length(x$objective)) {
y <- .solve_empty_MIP(x)
if(!is.null(nos <- control[["n"]])
&& !identical(as.integer(nos), 1L))
y <- list(y)
return(y)
}
is_BQP <- identical(unique(x$types), "B")
solver <- if(is_BQP) {
## If this is an all-binary problem, we can linearize, so use
## non-commercial defaults (obviously, there should eventually
## be a way to specify the default MILP and MIQP solver).
match.arg(solver, .MILP_solvers)
} else {
## Use a MIQP solver by default.
match.arg(solver, c(.MIQP_solvers, .MILP_solvers))
}
## For real MIQP solvers (currently only CPLEX), do not linearize by
## default, but allow for doing so for debugging purposes.
if(solver %in% .MIQP_solvers) {
if(identical(control[["linearize"]], TRUE))
.solve_BQP_via_linearization(x, solver, control)
else {
## Add switch() when adding support for other MIQP solvers.
.solve_MIQP_via_cplex(x, control)
}
} else {
## If this is an all-binary problem, we can linearize.
if(is_BQP)
.solve_BQP_via_linearization(x, solver, control)
else
stop(gettextf("Solver '%s' can only handle all-binary quadratic programs.",
solver),
domain = NA)
}
}
.solve_BQP_via_linearization <-
function(x, solver, control)
{
## Number of variables.
n <- length(x$objective$L)
## Solve via linearization.
y <- solve_MILP(.linearize_BQP(x), solver, control)
## Reduce solution to the original variables.
finisher <- function(e) {
e$solution <- e$solution[seq_len(n)]
e
}
## <FIXME>
## Wouldn't it be simpler to check if y inherits from MIP_solution?
if(!is.null(nos <- control[["n"]]) && !identical(nos, 1L))
lapply(y, finisher)
else
finisher(y)
## </FIXME>
}
### * Solver interfaces
### ** CPLEX
.solve_MILP_via_cplex <-
function(x, control)
{
## Wrap into the common MIP CPLEX framework.
x$objective <- list(Q = NULL, L = x$objective)
.solve_MIP_via_cplex(x, control)
}
.solve_MIQP_via_cplex <-
function(x, control)
{
## Ensure that the coefficient matrix of the quadratic term is
## symmetric, as required by Rcplex.
Q <- x$objective$Q
## <CHECK>
## Does Rcplex really support simple triplet matrix Q coefficients?
## If not, would need
## x$objective$Q <- as.matrix((Q + t(Q)) / 2)
## instead:
x$objective$Q <- (Q + t(Q)) / 2
## </CHECK>
.solve_MIP_via_cplex(x, control)
}
.solve_MIP_via_cplex <-
function(x, control)
{
## Currently, no direct support for bounds.
## <FIXME>
## Should expand the given bounds and map into lb/ub arguments.
if(!is.null(x$bounds))
stop("Solver currently does not support variable bounds.")
## </FIXME>
.as_Rcplex_sense <- function(x) {
TABLE <- c("L", "L", "G", "G", "E")
names(TABLE) <- c("<", "<=", ">", ">=", "==")
TABLE[x]
}
sense <- .as_Rcplex_sense(x$constraints$dir)
types <- .expand_types(x$types, length(x$objective$L))
mat <- x$constraints$mat
if(is.simple_triplet_matrix(mat)) {
## Reorder indices as CPLEX needs a column major order
## representation i.e., column indices j have to be in ascending
## order.
column_major_order <- order(mat$j)
mat$i <- mat$i[column_major_order]
mat$j <- mat$j[column_major_order]
mat$v <- mat$v[column_major_order]
} else {
mat <- as.matrix(mat)
}
if(is.null(nos <- control[["n"]])) nos <- 1L
value_is_list_of_solutions <- !identical(as.integer(nos), 1L)
out <-
tryCatch(Rcplex::Rcplex(Qmat = x$objective$Q,
cvec = x$objective$L,
Amat = mat,
sense = sense,
bvec = x$constraints$rhs,
vtype = types,
objsense = if(x$maximum) "max" else "min",
control = list(trace = 0, round = 1),
n = nos
),
error = identity)
if(inherits(out, "error")) {
## Explicitly catch and rethrow CPLEX unavailability errors.
msg <- conditionMessage(out)
if(regexpr("Could not open CPLEX environment\\.", msg) > -1L)
stop(msg, call. = FALSE)
## Currently, Rcplex signals problems via error() rather than
## returning a non-zero status. Hence, we try catching these
## errors. (Of course, these could also be real errors ...).
solution <- rep.int(NA_real_, length(types))
objval <- NA_real_
status <- 2 # or whatever ...
names(status) <- msg # should be of length one ...
out <- .make_MIP_solution(solution, objval, status)
if(value_is_list_of_solutions) out <- list(out)
} else {
out <- if(value_is_list_of_solutions)
lapply(out, .canonicalize_solution_from_cplex, x)
else
.canonicalize_solution_from_cplex(out, x)
}
out
}
.canonicalize_solution_from_cplex <-
function(out, x)
{
solution <- out$xopt
## For the time being ...
## Since Rcplex 0.1-4 integers are rounded (via control argument
## 'round' which we set accordingly when calling Rcplex()) but no
## new optimal solution based on these values is calculated. Hence,
## we no longer round ourselves, but recompute objval.
objval <- sum(solution * x$objective$L)
if(!is.null(Q <- x$objective$Q))
objval <- objval + .xtQx(Q, solution) / 2
status <- out$status
## Simple db for "ok" status results:
ok_status_db <-
c("CPX_STAT_OPTIMAL" = 1L, # (Simplex or barrier): optimal
# solution is available
"CPXMIP_OPTIMAL" = 101L, # (MIP): optimal integer solution
# has been found
"CPXMIP_OPTIMAL_TOL" = 102L, # (MIP): Optimal soluton with
# the tolerance defined by epgap
# or epagap has been found
"CPXMIP_POPULATESOL_LIM" = 128L, # (MIP-MultSols): The limit on
# mixed integer solutions
# generated by populate has been
# reached
"CPXMIP_OPTIMAL_POPULATED" = 129L, # (MIP-MultSols): Populate
# has completed the enumeration of
# all solutions it could enumerate
"CPXMIP_OPTIMAL_POPULATED_TOL" = 130L # (MIP-MultSols): similar
# to 129L but additionally
# objective value fits the
# tolerance specified by paramaters
)
status <- ifelse(status %in% ok_status_db, 0, status)
.make_MIP_solution(solution, objval, status)
}
### ** GLPK
.solve_MILP_via_glpk <-
function(x)
{
out <- Rglpk::Rglpk_solve_LP(x$objective,
x$constraints$mat,
x$constraints$dir,
x$constraints$rhs,
bounds = x$bounds,
types = x$types,
max = x$maximum)
.make_MIP_solution(out$solution, out$optimum, out$status)
}
### ** lp_solve
.solve_MILP_via_lpsolve <-
function(x)
{
## Currently, no direct support for bounds.
## <FIXME>
## Should rewrite the given bounds into additional constraints.
if(!is.null(x$bounds))
stop("Solver currently does not support variable bounds.")
## </FIXME>
types <- .expand_types(x$types, length(x$objective))
## Version 5.6.1 of lpSolve has added sparse matrix support via
## formal 'dense.const' as well as binary variable types.
mat <- x$constraints$mat
out <- if(is.simple_triplet_matrix(mat)) {
## In the sparse case, lpSolve currently (2008-11-22) checks
## that every constraint is used in the sense that each row has
## at least one entry. So if for some reason this is not the
## case, let us add one zero entry for such rows (note that we
## cannot simply drop them as the corresponding constraint may
## be violated).
ind <- which(tabulate(mat$i, mat$nrow) == 0)
if(len <- length(ind)) {
mat$i <- c(mat$i, ind)
mat$j <- c(mat$j, rep.int(1L, len))
mat$v <- c(mat$v, rep.int(0, len))
}
lpSolve::lp(if(x$maximum) "max" else "min",
x$objective,
const.dir = x$constraints$dir,
const.rhs = x$constraints$rhs,
int.vec = which(types == "I"),
binary.vec = which(types == "B"),
dense.const = cbind(mat$i, mat$j, mat$v))
} else {
lpSolve::lp(if(x$maximum) "max" else "min",
x$objective,
as.matrix(mat),
x$constraints$dir,
x$constraints$rhs,
int.vec = which(types == "I"),
binary.vec = which(types == "B"))
}
status_db <-
## Solver status values from lp_lib.h:
c("UNKNOWNERROR" = -5L,
"DATAIGNORED" = -4L,
"NOBFP" = -3L,
"NOMEMORY" = -2L,
"NOTRUN" = -1L,
"OPTIMAL" = 0L,
"SUBOPTIMAL" = 1L,
"INFEASIBLE" = 2L,
"UNBOUNDED" = 3L,
"DEGENERATE" = 4L,
"NUMFAILURE" = 5L,
"USERABORT" = 6L,
"TIMEOUT" = 7L,
"RUNNING" = 8L,
"PRESOLVED" = 9L)
status <- status_db[match(out$status, status_db)]
solution <- out$solution
objval <- if(status == 0L)
sum(solution * out$objective)
else
out$objval
.make_MIP_solution(solution, objval, status)
}
### ** SYMPHONY
.solve_MILP_via_symphony <-
function(x)
{
out <- Rsymphony::Rsymphony_solve_LP(x$objective,
x$constraints$mat,
x$constraints$dir,
x$constraints$rhs,
bounds = x$bounds,
types = x$types,
max = x$maximum)
.make_MIP_solution(out$solution, out$objval, out$status)
}
### * Utilities
.expand_types <-
function(x, n)
{
if(is.null(x)) {
## Continuous by default.
rep.int("C", n)
}
else {
if(!is.character(x) || !all(x %in% c("C", "I", "B")))
stop("Invalid MIP variable types.")
## Be nicer than necessary ...
rep_len(x, n)
}
}
.find_up_to_n_binary_MILP_solutions <-
function(x, nos = 1L, add = FALSE, solver = NULL, control = NULL)
{
## Find up to n binary MILP solutions using a simple branch and cut
## approach (repeatedly splitting the binary variables and cutting
## the non-optimal branches).
if(is.na(nos))
nos <- .Machine$integer.max
y <- solve_MILP(x, solver, control)
if((y$status != 0) || (nos == 1L) || !any(x$types == "B"))
return(list(y))
Vopt <- y$objval
tol <- 1e-8
## (Smaller than .Machine$double.eps^0.5 as e.g. used in the
## all.equal() comparisons.)
## We used to have 1e-10, but SYMPHONY was not as precise as this.
v_is_not_optimal <- if(x$maximum)
function(v) v < Vopt - tol
else
function(v) v > Vopt + tol
## Improved versions could use relative tolerance, and/or take the
## number of variables into account. Or maybe we can figure out the
## tolerance employed by the solvers when they declare optimality?
if(add) {
## Find up to n solutions by adding binary constraints for each
## split. This is most space efficient, but typically takes
## considerably more time that the default based on successive
## reductions by substitution.
return(.find_up_to_n_binary_MILP_solutions_via_add(x, nos,
solver,
control,
y,
v_is_not_optimal))
}
## Find solutions by recursively splitting binary variable and
## substituting the values into the program.
## Suppose x_j is one of the binary variables. Let x[-j] be the
## vector of variables after dropping x_j. If x_j has value b_j,
## the objective function is
## c[-j]' x[-j] + c_j b_j,
## and the constraints become
## (mat[, -j] %*% x[-j]) dir (rhs - mat[, j] * b_j)
## Note that the new constraint matrix and dir do not depend on the
## value of b_j.
## When recursively splitting, we need to keep track of the b
## values, the sum of the c_j b_j terms to add to the reduced
## objective value to obtain the value of the original problem, and
## the right hand sides.
## Also, binary positions are a nuisance to keep track of, so we
## start by rearranging variables to have the binary variables come
## last in reverse order of splitting.
n_of_variables <- length(x$objective)
types <- .expand_types(x$types, n_of_variables)
binary_positions <- which(types == "B")
n_of_binary_variables <- length(binary_positions)
verbose <- identical(control[["verbose"]], TRUE)
.make_node <- function(b, y, v, r) list(b = b, y = y, v = v, r = r)
.split_single_binary_variable <-
function(node, i, pos_i, obj_i, mat_i, pos_b) {
## Try to avoid unnecessary copying of x via <<-
## manipulations.
## We know that the solution with the current b[i] is
## optimal, so we try the effect of flipping b[i].
b <- node$b
node$y$solution <- node$y$solution[-pos_i]
if(b[i] == 0) {
b1 <- b
b1[i] <- 1
v1 <- node$v + obj_i
r1 <- node$r - mat_i
x$constraints$rhs <<- r1
y1 <- solve_MILP(x, solver, control)
## Uncomment for debugging ...
## if(verbose) {
## V <- y1$objval + v1
## split <- (y1$status == 0) && !v_is_not_optimal(V)
## message(sprintf("b[i] = 0, flip objval: %f, Delta: %.12f, status: %d, split: %s",
## V, Vopt - V, y1$status, split))
## }
if((y1$status != 0) || v_is_not_optimal(y1$objval + v1))
list(node)
else {
## Fill the rest of b with the optimal entries.
b1[-seq_len(i)] <- y1$solution[pos_b]
list(node, .make_node(b1, y1, v1, r1))
}
} else {
b0 <- b
b0[i] <- 0
v0 <- node$v
node$v <- v0 + obj_i
r0 <- node$r
node$r <- r0 - mat_i
x$constraints$rhs <<- r0
y0 <- solve_MILP(x, solver, control)
## Uncomment for debugging ...
## if(verbose) {
## V <- y0$objval + v0
## split <- (y0$status == 0) && !v_is_not_optimal(V)
## message(sprintf("b[i] = 1, flip objval: %f, Delta: %.12f, status: %d, split: %s",
## V, Vopt - V, y0$status, split))
## }
if((y0$status != 0) || v_is_not_optimal(y0$objval + v0))
list(node)
else {
## Fill the rest of b with the optimal entries.
b0[-seq_len(i)] <- y0$solution[pos_b]
list(node, .make_node(b0, y0, v0, r0))
}
}
}
## We allow callers to specify the order in which binary splits
## should be attempted (so the i-th split is for binary variable
## order[i]).
order <- control[["order"]]
if(is.null(order) || length(order) != n_of_binary_variables)
order <- seq_len(n_of_binary_variables)
## Rearrange variables to have the binary ones last in reverse split
## order.
ind <- c(which(types != "B"), rev(which(types == "B")[order]))
x$objective <- x$objective[ind]
x$constraints$mat <- x$constraints$mat[, ind, drop = FALSE]
x$types <- x$types[ind]
y$solution <- y$solution[ind]
pos_i <- n_of_variables
pos_b <- seq.int(from = n_of_variables,
length.out = n_of_binary_variables,
by = -1L)
nodes <- list(.make_node(y$solution[pos_b], y, 0, x$constraints$rhs))
for(i in seq_len(n_of_binary_variables)) {
pos_b <- pos_b[-1L]
obj_i <- x$objective[pos_i]
mat_i <- c(x$constraints$mat[, pos_i])
x$objective <- x$objective[-pos_i]
x$constraints$mat <- x$constraints$mat[, -pos_i, drop = FALSE]
x$types <- x$types[-pos_i]
nodes <- do.call(c,
lapply(nodes,
.split_single_binary_variable,
i, pos_i, obj_i, mat_i, pos_b))
len <- length(nodes)
if(verbose)
message(gettextf("N_of_binary_variables: %d *** N_of_optimal_branches: %d",
i, len))
if(len >= nos) {
nodes <- nodes[seq_len(nos)]
break
}
pos_i <- pos_i - 1L
}
pos <- order(c(which(types != "B"), which(types == "B")[order]))
finisher <- function(node) {
## Need to reconstruct solutions from the binary and non-binary
## parts and the respective objective values.
y <- node$y
y$solution <- c(y$solution, node$b)[pos]
y$objval <- sum(y$solution * x$objective)
## In principle, should be the same as Vopt.
y
}
lapply(nodes, finisher)
}
.find_up_to_n_binary_MILP_solutions_via_add <-
function(x, nos = 1L, solver = NULL, control = NULL, y,
v_is_not_optimal)
{
## Recursively add 0/1 constraints for the binary variables.
## Note that one can do this via adding the additional constraints
## to the original ones, or by maintaining them separately (e.g., in
## a vector of values for the binary variables). We do the latter
## which uses as little space as possible, but requires extra time
## for merging both constraints when solving augmented problems.
## Alternatively, one could allow choosing between either approach.
## Note also that we need to keep new constraints and corresponding
## solutions to avoid recomputing solutions when enough were found.
n_of_variables <- length(x$objective)
types <- .expand_types(x$types, n_of_variables)
binary_positions <- which(types == "B")
n_of_binary_variables <- length(binary_positions)
verbose <- identical(control[["verbose"]], TRUE)
mat <- x$constraints$mat
.make_additional_constraint_matrix <-
if(is.simple_triplet_matrix(mat))
function(bpos) {
len <- length(bpos)
simple_triplet_matrix(seq_len(len), bpos,
rep.int(1, len),
len, n_of_variables)
}
else
function(bpos) {
len <- length(bpos)
add <- matrix(0, len, n_of_variables)
add[cbind(seq_len(len), bpos)] <- 1
add
}
.solve_MILP_with_additional_binary_constraints <-
function(x, bpos, bval) {
len <- length(bpos)
x$constraints <-
list(mat = rbind(mat,
.make_additional_constraint_matrix(bpos)),
dir = c(x$constraints$dir, rep.int("==", len)),
rhs = c(x$constraints$rhs, bval))
solve_MILP(x, solver, control)
}
.split_single_binary_variable <- function(y, i) {
oi <- order[i]
ind <- order[seq_len(i)]
pos <- binary_positions[ind]
b <- y$solution[binary_positions]
## Try flipping the i-th binary variable and see whether this
## also delivers optimal solutions.
b[oi] <- 1 - b[oi]
yf <- .solve_MILP_with_additional_binary_constraints(x, pos, b[ind])
if((yf$status != 0) || v_is_not_optimal(yf$objval))
list(y)
else
list(y, yf)
}
ylist <- list(y)
## We allow callers to specify the order in which binary splits
## should be attempted (so the i-th split is for binary variable
## order[i], i.e., at binary_positions[order[i]]).
order <- control[["order"]]
if(is.null(order))
order <- seq_len(n_of_binary_variables)
for(i in seq_len(n_of_binary_variables)) {
ylist <-
do.call(c, lapply(ylist, .split_single_binary_variable, i))
len <- length(ylist)
if(verbose)
message(gettextf("N_of_binary_variables: %d *** N_of_optimal_branches: %d",
i, len))
if(len >= nos) {
ylist <- ylist[seq_len(nos)]
break
}
}
ylist
}
.linearize_BQP <-
function(x)
{
## Linearize an all-binary quadratic program
## \sum_{i,j} q_{ij} x_i x_j / 2 + \sum_i c_i x_i
## as described e.g. in "Pseudo-Boolean Optimization" by E. Boros
## and P. Hammer (boros01pseudoboolean.pdf): rewrite the criterion
## function as
## \sum_{i < j} r_{ij} y_{ij} + \sum_i s_i x_i
## with
## r_{ij} = (q_{ij} + q_{ji}) / 2
## s_i = c_i + q_{ii} / 2
## and the additional constraints
## y_{ij} <= x_i, y_{ij} <= x_j (A)
## y_{ij} >= 0, y_{ij} >= x_i + x_j - 1 (B)
## where for a minimization problem (A) is redundant if r_{ij} > 0
## and (B) if r_{ij} < 0, and vice versa for a maximization
## problem.
if(!inherits(x, "MIQP") && !identical(unique(x$types), "B"))
stop("Can only linearize all-binary quadratic programs.")
## Could do some sanity checking here.
Q <- x$objective$Q
c <- x$objective$L
n <- length(c)
R <- (Q + t(Q)) / 2
if(is.simple_triplet_matrix(Q)) {
## Transform coefficients.
## Cannot easily have a diag() method for simple triplet
## matrices.
s <- c + Q[cbind(seq_len(n), seq_len(n))] / 2
## Quadratic coefficients and respective variables.
p <- (R$i < R$j) & (R$v != 0)
i <- R$i[p]
j <- R$j[p]
r <- R$v[p]
} else {
## Transform coefficients.
s <- c + diag(Q) / 2
## Quadratic coefficients and respective variables.
I <- upper.tri(R)
r <- R[I]
p <- which(r != 0)
I <- which(I, arr.ind = TRUE)
i <- I[p, 1L]
j <- I[p, 2L]
r <- r[p]
}
nr <- length(r)
## Constraints.
mat <- x$constraints$mat
pn <- which(r < 0) # Negative positions.
pp <- which(r > 0) # Positive positions.
## <NOTE>
## To experiment with not dropping redundant constraints, do:
## pn <- pp <- seq_along(r)
## </NOTE>
npn <- length(pn)
npp <- length(pp)
if(x$maximum) {
if(is.simple_triplet_matrix(mat)) {
add_i <- c(rep.int(seq_len(npp), 2L),
rep.int(seq_len(npp) + npp, 2L),
rep.int(seq_len(npn) + 2L * npp, 3L))
add_j <- c(i[pp], n + pp,
j[pp], n + pp,
i[pn], j[pn], n + pn)
add_v <- rep.int(c(-1, 1, -1, 1, -1, 1),
c(npp, npp, npp, npp, 2L * npn, npn))
mat <- rbind(cbind(mat,
simple_triplet_zero_matrix(nrow(mat), nr)),
simple_triplet_matrix(add_i, add_j, add_v,
npn + 2L * npp, n + nr))
} else {
add <- matrix(0, npn + 2L * npp, n + nr)
## Constraints
## y_{ij} <= x_i, y_{ij} <= x_j (A)
## if r_{ij} > 0:
ind <- seq_len(npp)
add[cbind(ind, i[pp])] <- -1
add[cbind(ind, n + pp)] <- 1
ind <- ind + npp
add[cbind(ind, j[pp])] <- -1
add[cbind(ind, n + pp)] <- 1
## Constraints
## y_{ij} >= 0, y_{ij} >= x_i + x_j - 1 (B)
## if r_{ij} < 0 (where the former is implicit):
ind <- seq_len(npn) + 2L * npp
add[cbind(ind, i[pn])] <- -1
add[cbind(ind, j[pn])] <- -1
add[cbind(ind, n + pn)] <- 1
mat <- rbind(cbind(mat, matrix(0, nrow(mat), nr)), add)
}
dir <- c(x$constraints$dir,
rep.int("<=", 2L * npp),
rep.int(">=", npn))
rhs <- c(x$constraints$rhs,
rep.int(0, 2L * npp),
rep.int(-1, npn))
} else {
if(is.simple_triplet_matrix(mat)) {
add_i <- c(rep.int(seq_len(npn), 2L),
rep.int(seq_len(npn) + npn, 2L),
rep.int(seq_len(npp) + 2L * npn, 3L))
add_j <- c(i[pn], n + pn,
j[pn], n + pn,
i[pp], j[pp], n + pp)
add_v <- rep.int(c(-1, 1, -1, 1, -1, 1),
c(npn, npn, npn, npn, 2L * npp, npp))
mat <- rbind(cbind(mat,
simple_triplet_zero_matrix(nrow(mat), nr)),
simple_triplet_matrix(add_i, add_j, add_v,
npp + 2L * npn, n + nr))
} else {
add <- matrix(0, 2L * npn + npp, n + nr)
## Constraints
## y_{ij} <= x_i, y_{ij} <= x_j (A)
## if r_{ij} < 0:
ind <- seq_len(npn)
add[cbind(ind, i[pn])] <- -1
add[cbind(ind, n + pn)] <- 1
ind <- ind + npn
add[cbind(ind, j[pn])] <- -1
add[cbind(ind, n + pn)] <- 1
## Constraints
## y_{ij} >= 0, y_{ij} >= x_i + x_j - 1 (B)
## if r_{ij} > 0 (where the former is implicit):
ind <- seq_len(npp) + 2L * npn
add[cbind(ind, i[pp])] <- -1
add[cbind(ind, j[pp])] <- -1
add[cbind(ind, n + pp)] <- 1
mat <- rbind(cbind(mat, matrix(0, nrow(mat), nr)), add)
}
dir <- c(x$constraints$dir,
rep.int("<=", 2L * npn),
rep.int(">=", npp))
rhs <- c(x$constraints$rhs,
rep.int(0, 2L * npn),
rep.int(-1, npp))
}
MILP(c(s, r),
list(mat, dir, rhs),
x$bounds,
rep.int(c("B", "C"), c(n, nr)),
x$maximum)
}
.make_MIP_solution <-
function(solution, objval, status, ...)
.structure(list(solution = solution,
objval = objval,
status = status, ...),
class = "MIP_solution")
.make_types <-
function(n, I = NULL, B = NULL)
{
## Create MIP variable types spec from possibly given positions of
## integer and binary variables.
types <- rep.int("C", n)
if(!is.null(I)) types[I] <- "I"
if(!is.null(B)) types[B] <- "B"
types
}
.relax_mixed_integer_program <-
function(x)
{
## Relax MILP or MIQP by dropping integrality constraints (I -> C),
## and changing binary constraints x \in \{0, 1\} to x \in [0,1].
if(!inherits(x, "MILP") && !inherits(x, "MIQP"))
stop("Can only relax mixed integer linear or quadratic programs.")
mat <- x$constraints$mat
n_of_variables <- ncol(mat)
types <- .expand_types(x$types, n_of_variables)
binary_positions <- which(types == "B")
if(n_of_binary_variables <- length(binary_positions)) {
## For binary variables x_i, we need to add the constraint
## x_i <= 1.
if(is.simple_triplet_matrix(mat))
add <-
simple_triplet_matrix(seq_len(n_of_binary_variables),
binary_positions,
rep.int(1, n_of_binary_variables),
n_of_binary_variables,
n_of_variables)
else {
add <- matrix(0, n_of_binary_variables, n_of_variables)
add[cbind(seq_len(n_of_binary_variables),
binary_positions)] <- 1
}
x$constraints$mat <- rbind(mat, add)
x$constraints$dir <- c(x$constraints$dir,
rep.int("<=", n_of_binary_variables))
x$constraints$rhs <- c(x$constraints$rhs,
rep.int(1, n_of_binary_variables))
}
x$types <- rep.int("C", n_of_variables)
x
}
.solve_empty_MIP <-
function(x)
{
## Check whether constraints are satisfied (interpreting each lhs as
## empty sum with value 0):
constraints <- split(x$constraints$rhs, x$constraints$dir)
if(all(unlist(Map(function(dir, rhs) get(dir)(0, rhs),
names(constraints), constraints))))
.make_MIP_solution(double(), 0, 0L)
else
.make_MIP_solution(double(), NA_real_, 2L)
}
.xtQx <-
function(Q, x)
{
## Value of quadratic form t(x) %*% Q %*% x.
## As we implement simple triplet matrices in S3, we could only have
## %*% and crossprod methods if we created S3 generics for these ...
if(is.simple_triplet_matrix(Q))
sum(Q$v * x[Q$i] * x[Q$j])
else
c(crossprod(x, Q %*% x))
}
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "### [*]+" ***
### End: ***
|
/R/mip.R
|
no_license
|
cran/relations
|
R
| false | false | 34,171 |
r
|
## A simple framework for representing and solving mixed integer linear
## programs (MILPs) of the form
## optimize obj' x
## such that mat %*% x dir rhs
## and mixed integer quadratic programs (MIQPs) of the form
## optimize x' Q x / 2 + c' x
## such that mat %*% x dir rhs
## with possibly given types (C/I/B for continuous/integer/binary) and
## given additional (lower and upper) bounds on x.
## (Default of course x >= 0).
### * MILPs
MILP <-
function(objective, constraints, bounds = NULL, types = NULL,
maximum = FALSE)
{
## Currently, 'constraints' always is a (not necessarily named) list
## with mat, dir and rhs, which really is the most general case of
## linear constraints we can think of. Let us add names for now;
## eventually, there should be more sanity checking and maybe a
## creator for linear constraint objects.
names(constraints) <- c("mat", "dir", "rhs")
.structure(list(objective = objective, constraints = constraints,
bounds = bounds, types = types, maximum = maximum),
class = "MILP")
}
.MILP_solvers <-
c("glpk", "lpsolve", "symphony", "cplex")
solve_MILP <-
function(x, solver = NULL, control = list())
{
## <NOTE>
## Ideally, we would use some registration mechanism for solvers.
## Currently, there is only little support for control arguments.
## In particular, one cannot directly pass arguments to the solver.
## </NOTE>
## Handle the boundary case of no variables.
if(!length(x$objective)) {
y <- .solve_empty_MIP(x)
if(!is.null(nos <- control[["n"]])
&& !identical(as.integer(nos), 1L))
y <- list(y)
return(y)
}
solver <- match.arg(solver, .MILP_solvers)
## If more than one (binary) solution is sought and the solver does
## not provide direct support, use poor person's branch and cut:
if(!is.null(nos <- control[["n"]]) && (solver != "cplex")) {
control[["n"]] <- NULL
## Mimic the mechanism currently employed by Rcplex(): return a
## list of solutions only if nos > 1 (or NA).
if(!identical(as.integer(nos), 1L)) {
add <- identical(control[["add"]], TRUE)
control[["add"]] <- NULL
return(.find_up_to_n_binary_MILP_solutions(x, nos, add,
solver, control))
}
}
## Note that lpSolve could find all binary solutions for all-binary
## programs.
switch(solver,
"cplex" = .solve_MILP_via_cplex(x, control),
"glpk" = .solve_MILP_via_glpk(x),
"lpsolve" = .solve_MILP_via_lpsolve(x),
"symphony" = .solve_MILP_via_symphony(x))
}
### * MIQPs
MIQP <-
function(objective, constraints, bounds = NULL, types = NULL,
maximum = FALSE)
{
## See MILP() for the comments on constraint objects.
## We also add names to the list of objective coefficients.
names(objective) <- c("Q", "L")
names(constraints) <- c("mat", "dir", "rhs")
.structure(list(objective = objective, constraints = constraints,
bounds = bounds, types = types, maximum = maximum),
class = "MIQP")
}
.MIQP_solvers <-
c("cplex")
solve_MIQP <-
function(x, solver = NULL, control = list())
{
## Currently, only CPLEX can generally be used for solving MIQPs.
## For the other MILP solvers, all-binary programs can be solved via
## linearization.
## <NOTE>
## Actually, linearization only requires that the quadratic part is
## all-binary. Maybe support the mixed linear part case eventually.
## </NOTE>
## Handle the boundary case of no variables.
if(!length(x$objective)) {
y <- .solve_empty_MIP(x)
if(!is.null(nos <- control[["n"]])
&& !identical(as.integer(nos), 1L))
y <- list(y)
return(y)
}
is_BQP <- identical(unique(x$types), "B")
solver <- if(is_BQP) {
## If this is an all-binary problem, we can linearize, so use
## non-commercial defaults (obviously, there should eventually
## be a way to specify the default MILP and MIQP solver).
match.arg(solver, .MILP_solvers)
} else {
## Use a MIQP solver by default.
match.arg(solver, c(.MIQP_solvers, .MILP_solvers))
}
## For real MIQP solvers (currently only CPLEX), do not linearize by
## default, but allow for doing so for debugging purposes.
if(solver %in% .MIQP_solvers) {
if(identical(control[["linearize"]], TRUE))
.solve_BQP_via_linearization(x, solver, control)
else {
## Add switch() when adding support for other MIQP solvers.
.solve_MIQP_via_cplex(x, control)
}
} else {
## If this is an all-binary problem, we can linearize.
if(is_BQP)
.solve_BQP_via_linearization(x, solver, control)
else
stop(gettextf("Solver '%s' can only handle all-binary quadratic programs.",
solver),
domain = NA)
}
}
.solve_BQP_via_linearization <-
function(x, solver, control)
{
## Number of variables.
n <- length(x$objective$L)
## Solve via linearization.
y <- solve_MILP(.linearize_BQP(x), solver, control)
## Reduce solution to the original variables.
finisher <- function(e) {
e$solution <- e$solution[seq_len(n)]
e
}
## <FIXME>
## Wouldn't it be simpler to check if y inherits from MIP_solution?
if(!is.null(nos <- control[["n"]]) && !identical(nos, 1L))
lapply(y, finisher)
else
finisher(y)
## </FIXME>
}
### * Solver interfaces
### ** CPLEX
.solve_MILP_via_cplex <-
function(x, control)
{
## Wrap into the common MIP CPLEX framework.
x$objective <- list(Q = NULL, L = x$objective)
.solve_MIP_via_cplex(x, control)
}
.solve_MIQP_via_cplex <-
function(x, control)
{
## Ensure that the coefficient matrix of the quadratic term is
## symmetric, as required by Rcplex.
Q <- x$objective$Q
## <CHECK>
## Does Rcplex really support simple triplet matrix Q coefficients?
## If not, would need
## x$objective$Q <- as.matrix((Q + t(Q)) / 2)
## instead:
x$objective$Q <- (Q + t(Q)) / 2
## </CHECK>
.solve_MIP_via_cplex(x, control)
}
.solve_MIP_via_cplex <-
function(x, control)
{
## Currently, no direct support for bounds.
## <FIXME>
## Should expand the given bounds and map into lb/ub arguments.
if(!is.null(x$bounds))
stop("Solver currently does not support variable bounds.")
## </FIXME>
.as_Rcplex_sense <- function(x) {
TABLE <- c("L", "L", "G", "G", "E")
names(TABLE) <- c("<", "<=", ">", ">=", "==")
TABLE[x]
}
sense <- .as_Rcplex_sense(x$constraints$dir)
types <- .expand_types(x$types, length(x$objective$L))
mat <- x$constraints$mat
if(is.simple_triplet_matrix(mat)) {
## Reorder indices as CPLEX needs a column major order
## representation i.e., column indices j have to be in ascending
## order.
column_major_order <- order(mat$j)
mat$i <- mat$i[column_major_order]
mat$j <- mat$j[column_major_order]
mat$v <- mat$v[column_major_order]
} else {
mat <- as.matrix(mat)
}
if(is.null(nos <- control[["n"]])) nos <- 1L
value_is_list_of_solutions <- !identical(as.integer(nos), 1L)
out <-
tryCatch(Rcplex::Rcplex(Qmat = x$objective$Q,
cvec = x$objective$L,
Amat = mat,
sense = sense,
bvec = x$constraints$rhs,
vtype = types,
objsense = if(x$maximum) "max" else "min",
control = list(trace = 0, round = 1),
n = nos
),
error = identity)
if(inherits(out, "error")) {
## Explicitly catch and rethrow CPLEX unavailability errors.
msg <- conditionMessage(out)
if(regexpr("Could not open CPLEX environment\\.", msg) > -1L)
stop(msg, call. = FALSE)
## Currently, Rcplex signals problems via error() rather than
## returning a non-zero status. Hence, we try catching these
## errors. (Of course, these could also be real errors ...).
solution <- rep.int(NA_real_, length(types))
objval <- NA_real_
status <- 2 # or whatever ...
names(status) <- msg # should be of length one ...
out <- .make_MIP_solution(solution, objval, status)
if(value_is_list_of_solutions) out <- list(out)
} else {
out <- if(value_is_list_of_solutions)
lapply(out, .canonicalize_solution_from_cplex, x)
else
.canonicalize_solution_from_cplex(out, x)
}
out
}
.canonicalize_solution_from_cplex <-
function(out, x)
{
solution <- out$xopt
## For the time being ...
## Since Rcplex 0.1-4 integers are rounded (via control argument
## 'round' which we set accordingly when calling Rcplex()) but no
## new optimal solution based on these values is calculated. Hence,
## we no longer round ourselves, but recompute objval.
objval <- sum(solution * x$objective$L)
if(!is.null(Q <- x$objective$Q))
objval <- objval + .xtQx(Q, solution) / 2
status <- out$status
## Simple db for "ok" status results:
ok_status_db <-
c("CPX_STAT_OPTIMAL" = 1L, # (Simplex or barrier): optimal
# solution is available
"CPXMIP_OPTIMAL" = 101L, # (MIP): optimal integer solution
# has been found
"CPXMIP_OPTIMAL_TOL" = 102L, # (MIP): Optimal soluton with
# the tolerance defined by epgap
# or epagap has been found
"CPXMIP_POPULATESOL_LIM" = 128L, # (MIP-MultSols): The limit on
# mixed integer solutions
# generated by populate has been
# reached
"CPXMIP_OPTIMAL_POPULATED" = 129L, # (MIP-MultSols): Populate
# has completed the enumeration of
# all solutions it could enumerate
"CPXMIP_OPTIMAL_POPULATED_TOL" = 130L # (MIP-MultSols): similar
# to 129L but additionally
# objective value fits the
# tolerance specified by paramaters
)
status <- ifelse(status %in% ok_status_db, 0, status)
.make_MIP_solution(solution, objval, status)
}
### ** GLPK
.solve_MILP_via_glpk <-
function(x)
{
out <- Rglpk::Rglpk_solve_LP(x$objective,
x$constraints$mat,
x$constraints$dir,
x$constraints$rhs,
bounds = x$bounds,
types = x$types,
max = x$maximum)
.make_MIP_solution(out$solution, out$optimum, out$status)
}
### ** lp_solve
.solve_MILP_via_lpsolve <-
function(x)
{
## Currently, no direct support for bounds.
## <FIXME>
## Should rewrite the given bounds into additional constraints.
if(!is.null(x$bounds))
stop("Solver currently does not support variable bounds.")
## </FIXME>
types <- .expand_types(x$types, length(x$objective))
## Version 5.6.1 of lpSolve has added sparse matrix support via
## formal 'dense.const' as well as binary variable types.
mat <- x$constraints$mat
out <- if(is.simple_triplet_matrix(mat)) {
## In the sparse case, lpSolve currently (2008-11-22) checks
## that every constraint is used in the sense that each row has
## at least one entry. So if for some reason this is not the
## case, let us add one zero entry for such rows (note that we
## cannot simply drop them as the corresponding constraint may
## be violated).
ind <- which(tabulate(mat$i, mat$nrow) == 0)
if(len <- length(ind)) {
mat$i <- c(mat$i, ind)
mat$j <- c(mat$j, rep.int(1L, len))
mat$v <- c(mat$v, rep.int(0, len))
}
lpSolve::lp(if(x$maximum) "max" else "min",
x$objective,
const.dir = x$constraints$dir,
const.rhs = x$constraints$rhs,
int.vec = which(types == "I"),
binary.vec = which(types == "B"),
dense.const = cbind(mat$i, mat$j, mat$v))
} else {
lpSolve::lp(if(x$maximum) "max" else "min",
x$objective,
as.matrix(mat),
x$constraints$dir,
x$constraints$rhs,
int.vec = which(types == "I"),
binary.vec = which(types == "B"))
}
status_db <-
## Solver status values from lp_lib.h:
c("UNKNOWNERROR" = -5L,
"DATAIGNORED" = -4L,
"NOBFP" = -3L,
"NOMEMORY" = -2L,
"NOTRUN" = -1L,
"OPTIMAL" = 0L,
"SUBOPTIMAL" = 1L,
"INFEASIBLE" = 2L,
"UNBOUNDED" = 3L,
"DEGENERATE" = 4L,
"NUMFAILURE" = 5L,
"USERABORT" = 6L,
"TIMEOUT" = 7L,
"RUNNING" = 8L,
"PRESOLVED" = 9L)
status <- status_db[match(out$status, status_db)]
solution <- out$solution
objval <- if(status == 0L)
sum(solution * out$objective)
else
out$objval
.make_MIP_solution(solution, objval, status)
}
### ** SYMPHONY
.solve_MILP_via_symphony <-
function(x)
{
out <- Rsymphony::Rsymphony_solve_LP(x$objective,
x$constraints$mat,
x$constraints$dir,
x$constraints$rhs,
bounds = x$bounds,
types = x$types,
max = x$maximum)
.make_MIP_solution(out$solution, out$objval, out$status)
}
### * Utilities
.expand_types <-
function(x, n)
{
if(is.null(x)) {
## Continuous by default.
rep.int("C", n)
}
else {
if(!is.character(x) || !all(x %in% c("C", "I", "B")))
stop("Invalid MIP variable types.")
## Be nicer than necessary ...
rep_len(x, n)
}
}
.find_up_to_n_binary_MILP_solutions <-
function(x, nos = 1L, add = FALSE, solver = NULL, control = NULL)
{
## Find up to n binary MILP solutions using a simple branch and cut
## approach (repeatedly splitting the binary variables and cutting
## the non-optimal branches).
if(is.na(nos))
nos <- .Machine$integer.max
y <- solve_MILP(x, solver, control)
if((y$status != 0) || (nos == 1L) || !any(x$types == "B"))
return(list(y))
Vopt <- y$objval
tol <- 1e-8
## (Smaller than .Machine$double.eps^0.5 as e.g. used in the
## all.equal() comparisons.)
## We used to have 1e-10, but SYMPHONY was not as precise as this.
v_is_not_optimal <- if(x$maximum)
function(v) v < Vopt - tol
else
function(v) v > Vopt + tol
## Improved versions could use relative tolerance, and/or take the
## number of variables into account. Or maybe we can figure out the
## tolerance employed by the solvers when they declare optimality?
if(add) {
## Find up to n solutions by adding binary constraints for each
## split. This is most space efficient, but typically takes
## considerably more time that the default based on successive
## reductions by substitution.
return(.find_up_to_n_binary_MILP_solutions_via_add(x, nos,
solver,
control,
y,
v_is_not_optimal))
}
## Find solutions by recursively splitting binary variable and
## substituting the values into the program.
## Suppose x_j is one of the binary variables. Let x[-j] be the
## vector of variables after dropping x_j. If x_j has value b_j,
## the objective function is
## c[-j]' x[-j] + c_j b_j,
## and the constraints become
## (mat[, -j] %*% x[-j]) dir (rhs - mat[, j] * b_j)
## Note that the new constraint matrix and dir do not depend on the
## value of b_j.
## When recursively splitting, we need to keep track of the b
## values, the sum of the c_j b_j terms to add to the reduced
## objective value to obtain the value of the original problem, and
## the right hand sides.
## Also, binary positions are a nuisance to keep track of, so we
## start by rearranging variables to have the binary variables come
## last in reverse order of splitting.
n_of_variables <- length(x$objective)
types <- .expand_types(x$types, n_of_variables)
binary_positions <- which(types == "B")
n_of_binary_variables <- length(binary_positions)
verbose <- identical(control[["verbose"]], TRUE)
.make_node <- function(b, y, v, r) list(b = b, y = y, v = v, r = r)
.split_single_binary_variable <-
function(node, i, pos_i, obj_i, mat_i, pos_b) {
## Try to avoid unnecessary copying of x via <<-
## manipulations.
## We know that the solution with the current b[i] is
## optimal, so we try the effect of flipping b[i].
b <- node$b
node$y$solution <- node$y$solution[-pos_i]
if(b[i] == 0) {
b1 <- b
b1[i] <- 1
v1 <- node$v + obj_i
r1 <- node$r - mat_i
x$constraints$rhs <<- r1
y1 <- solve_MILP(x, solver, control)
## Uncomment for debugging ...
## if(verbose) {
## V <- y1$objval + v1
## split <- (y1$status == 0) && !v_is_not_optimal(V)
## message(sprintf("b[i] = 0, flip objval: %f, Delta: %.12f, status: %d, split: %s",
## V, Vopt - V, y1$status, split))
## }
if((y1$status != 0) || v_is_not_optimal(y1$objval + v1))
list(node)
else {
## Fill the rest of b with the optimal entries.
b1[-seq_len(i)] <- y1$solution[pos_b]
list(node, .make_node(b1, y1, v1, r1))
}
} else {
b0 <- b
b0[i] <- 0
v0 <- node$v
node$v <- v0 + obj_i
r0 <- node$r
node$r <- r0 - mat_i
x$constraints$rhs <<- r0
y0 <- solve_MILP(x, solver, control)
## Uncomment for debugging ...
## if(verbose) {
## V <- y0$objval + v0
## split <- (y0$status == 0) && !v_is_not_optimal(V)
## message(sprintf("b[i] = 1, flip objval: %f, Delta: %.12f, status: %d, split: %s",
## V, Vopt - V, y0$status, split))
## }
if((y0$status != 0) || v_is_not_optimal(y0$objval + v0))
list(node)
else {
## Fill the rest of b with the optimal entries.
b0[-seq_len(i)] <- y0$solution[pos_b]
list(node, .make_node(b0, y0, v0, r0))
}
}
}
## We allow callers to specify the order in which binary splits
## should be attempted (so the i-th split is for binary variable
## order[i]).
order <- control[["order"]]
if(is.null(order) || length(order) != n_of_binary_variables)
order <- seq_len(n_of_binary_variables)
## Rearrange variables to have the binary ones last in reverse split
## order.
ind <- c(which(types != "B"), rev(which(types == "B")[order]))
x$objective <- x$objective[ind]
x$constraints$mat <- x$constraints$mat[, ind, drop = FALSE]
x$types <- x$types[ind]
y$solution <- y$solution[ind]
pos_i <- n_of_variables
pos_b <- seq.int(from = n_of_variables,
length.out = n_of_binary_variables,
by = -1L)
nodes <- list(.make_node(y$solution[pos_b], y, 0, x$constraints$rhs))
for(i in seq_len(n_of_binary_variables)) {
pos_b <- pos_b[-1L]
obj_i <- x$objective[pos_i]
mat_i <- c(x$constraints$mat[, pos_i])
x$objective <- x$objective[-pos_i]
x$constraints$mat <- x$constraints$mat[, -pos_i, drop = FALSE]
x$types <- x$types[-pos_i]
nodes <- do.call(c,
lapply(nodes,
.split_single_binary_variable,
i, pos_i, obj_i, mat_i, pos_b))
len <- length(nodes)
if(verbose)
message(gettextf("N_of_binary_variables: %d *** N_of_optimal_branches: %d",
i, len))
if(len >= nos) {
nodes <- nodes[seq_len(nos)]
break
}
pos_i <- pos_i - 1L
}
pos <- order(c(which(types != "B"), which(types == "B")[order]))
finisher <- function(node) {
## Need to reconstruct solutions from the binary and non-binary
## parts and the respective objective values.
y <- node$y
y$solution <- c(y$solution, node$b)[pos]
y$objval <- sum(y$solution * x$objective)
## In principle, should be the same as Vopt.
y
}
lapply(nodes, finisher)
}
.find_up_to_n_binary_MILP_solutions_via_add <-
function(x, nos = 1L, solver = NULL, control = NULL, y,
v_is_not_optimal)
{
## Recursively add 0/1 constraints for the binary variables.
## Note that one can do this via adding the additional constraints
## to the original ones, or by maintaining them separately (e.g., in
## a vector of values for the binary variables). We do the latter
## which uses as little space as possible, but requires extra time
## for merging both constraints when solving augmented problems.
## Alternatively, one could allow choosing between either approach.
## Note also that we need to keep new constraints and corresponding
## solutions to avoid recomputing solutions when enough were found.
n_of_variables <- length(x$objective)
types <- .expand_types(x$types, n_of_variables)
binary_positions <- which(types == "B")
n_of_binary_variables <- length(binary_positions)
verbose <- identical(control[["verbose"]], TRUE)
mat <- x$constraints$mat
.make_additional_constraint_matrix <-
if(is.simple_triplet_matrix(mat))
function(bpos) {
len <- length(bpos)
simple_triplet_matrix(seq_len(len), bpos,
rep.int(1, len),
len, n_of_variables)
}
else
function(bpos) {
len <- length(bpos)
add <- matrix(0, len, n_of_variables)
add[cbind(seq_len(len), bpos)] <- 1
add
}
.solve_MILP_with_additional_binary_constraints <-
function(x, bpos, bval) {
len <- length(bpos)
x$constraints <-
list(mat = rbind(mat,
.make_additional_constraint_matrix(bpos)),
dir = c(x$constraints$dir, rep.int("==", len)),
rhs = c(x$constraints$rhs, bval))
solve_MILP(x, solver, control)
}
.split_single_binary_variable <- function(y, i) {
oi <- order[i]
ind <- order[seq_len(i)]
pos <- binary_positions[ind]
b <- y$solution[binary_positions]
## Try flipping the i-th binary variable and see whether this
## also delivers optimal solutions.
b[oi] <- 1 - b[oi]
yf <- .solve_MILP_with_additional_binary_constraints(x, pos, b[ind])
if((yf$status != 0) || v_is_not_optimal(yf$objval))
list(y)
else
list(y, yf)
}
ylist <- list(y)
## We allow callers to specify the order in which binary splits
## should be attempted (so the i-th split is for binary variable
## order[i], i.e., at binary_positions[order[i]]).
order <- control[["order"]]
if(is.null(order))
order <- seq_len(n_of_binary_variables)
for(i in seq_len(n_of_binary_variables)) {
ylist <-
do.call(c, lapply(ylist, .split_single_binary_variable, i))
len <- length(ylist)
if(verbose)
message(gettextf("N_of_binary_variables: %d *** N_of_optimal_branches: %d",
i, len))
if(len >= nos) {
ylist <- ylist[seq_len(nos)]
break
}
}
ylist
}
.linearize_BQP <-
function(x)
{
## Linearize an all-binary quadratic program
## \sum_{i,j} q_{ij} x_i x_j / 2 + \sum_i c_i x_i
## as described e.g. in "Pseudo-Boolean Optimization" by E. Boros
## and P. Hammer (boros01pseudoboolean.pdf): rewrite the criterion
## function as
## \sum_{i < j} r_{ij} y_{ij} + \sum_i s_i x_i
## with
## r_{ij} = (q_{ij} + q_{ji}) / 2
## s_i = c_i + q_{ii} / 2
## and the additional constraints
## y_{ij} <= x_i, y_{ij} <= x_j (A)
## y_{ij} >= 0, y_{ij} >= x_i + x_j - 1 (B)
## where for a minimization problem (A) is redundant if r_{ij} > 0
## and (B) if r_{ij} < 0, and vice versa for a maximization
## problem.
if(!inherits(x, "MIQP") && !identical(unique(x$types), "B"))
stop("Can only linearize all-binary quadratic programs.")
## Could do some sanity checking here.
Q <- x$objective$Q
c <- x$objective$L
n <- length(c)
R <- (Q + t(Q)) / 2
if(is.simple_triplet_matrix(Q)) {
## Transform coefficients.
## Cannot easily have a diag() method for simple triplet
## matrices.
s <- c + Q[cbind(seq_len(n), seq_len(n))] / 2
## Quadratic coefficients and respective variables.
p <- (R$i < R$j) & (R$v != 0)
i <- R$i[p]
j <- R$j[p]
r <- R$v[p]
} else {
## Transform coefficients.
s <- c + diag(Q) / 2
## Quadratic coefficients and respective variables.
I <- upper.tri(R)
r <- R[I]
p <- which(r != 0)
I <- which(I, arr.ind = TRUE)
i <- I[p, 1L]
j <- I[p, 2L]
r <- r[p]
}
nr <- length(r)
## Constraints.
mat <- x$constraints$mat
pn <- which(r < 0) # Negative positions.
pp <- which(r > 0) # Positive positions.
## <NOTE>
## To experiment with not dropping redundant constraints, do:
## pn <- pp <- seq_along(r)
## </NOTE>
npn <- length(pn)
npp <- length(pp)
if(x$maximum) {
if(is.simple_triplet_matrix(mat)) {
add_i <- c(rep.int(seq_len(npp), 2L),
rep.int(seq_len(npp) + npp, 2L),
rep.int(seq_len(npn) + 2L * npp, 3L))
add_j <- c(i[pp], n + pp,
j[pp], n + pp,
i[pn], j[pn], n + pn)
add_v <- rep.int(c(-1, 1, -1, 1, -1, 1),
c(npp, npp, npp, npp, 2L * npn, npn))
mat <- rbind(cbind(mat,
simple_triplet_zero_matrix(nrow(mat), nr)),
simple_triplet_matrix(add_i, add_j, add_v,
npn + 2L * npp, n + nr))
} else {
add <- matrix(0, npn + 2L * npp, n + nr)
## Constraints
## y_{ij} <= x_i, y_{ij} <= x_j (A)
## if r_{ij} > 0:
ind <- seq_len(npp)
add[cbind(ind, i[pp])] <- -1
add[cbind(ind, n + pp)] <- 1
ind <- ind + npp
add[cbind(ind, j[pp])] <- -1
add[cbind(ind, n + pp)] <- 1
## Constraints
## y_{ij} >= 0, y_{ij} >= x_i + x_j - 1 (B)
## if r_{ij} < 0 (where the former is implicit):
ind <- seq_len(npn) + 2L * npp
add[cbind(ind, i[pn])] <- -1
add[cbind(ind, j[pn])] <- -1
add[cbind(ind, n + pn)] <- 1
mat <- rbind(cbind(mat, matrix(0, nrow(mat), nr)), add)
}
dir <- c(x$constraints$dir,
rep.int("<=", 2L * npp),
rep.int(">=", npn))
rhs <- c(x$constraints$rhs,
rep.int(0, 2L * npp),
rep.int(-1, npn))
} else {
if(is.simple_triplet_matrix(mat)) {
add_i <- c(rep.int(seq_len(npn), 2L),
rep.int(seq_len(npn) + npn, 2L),
rep.int(seq_len(npp) + 2L * npn, 3L))
add_j <- c(i[pn], n + pn,
j[pn], n + pn,
i[pp], j[pp], n + pp)
add_v <- rep.int(c(-1, 1, -1, 1, -1, 1),
c(npn, npn, npn, npn, 2L * npp, npp))
mat <- rbind(cbind(mat,
simple_triplet_zero_matrix(nrow(mat), nr)),
simple_triplet_matrix(add_i, add_j, add_v,
npp + 2L * npn, n + nr))
} else {
add <- matrix(0, 2L * npn + npp, n + nr)
## Constraints
## y_{ij} <= x_i, y_{ij} <= x_j (A)
## if r_{ij} < 0:
ind <- seq_len(npn)
add[cbind(ind, i[pn])] <- -1
add[cbind(ind, n + pn)] <- 1
ind <- ind + npn
add[cbind(ind, j[pn])] <- -1
add[cbind(ind, n + pn)] <- 1
## Constraints
## y_{ij} >= 0, y_{ij} >= x_i + x_j - 1 (B)
## if r_{ij} > 0 (where the former is implicit):
ind <- seq_len(npp) + 2L * npn
add[cbind(ind, i[pp])] <- -1
add[cbind(ind, j[pp])] <- -1
add[cbind(ind, n + pp)] <- 1
mat <- rbind(cbind(mat, matrix(0, nrow(mat), nr)), add)
}
dir <- c(x$constraints$dir,
rep.int("<=", 2L * npn),
rep.int(">=", npp))
rhs <- c(x$constraints$rhs,
rep.int(0, 2L * npn),
rep.int(-1, npp))
}
MILP(c(s, r),
list(mat, dir, rhs),
x$bounds,
rep.int(c("B", "C"), c(n, nr)),
x$maximum)
}
.make_MIP_solution <-
function(solution, objval, status, ...)
.structure(list(solution = solution,
objval = objval,
status = status, ...),
class = "MIP_solution")
.make_types <-
function(n, I = NULL, B = NULL)
{
## Create MIP variable types spec from possibly given positions of
## integer and binary variables.
types <- rep.int("C", n)
if(!is.null(I)) types[I] <- "I"
if(!is.null(B)) types[B] <- "B"
types
}
.relax_mixed_integer_program <-
function(x)
{
## Relax MILP or MIQP by dropping integrality constraints (I -> C),
## and changing binary constraints x \in \{0, 1\} to x \in [0,1].
if(!inherits(x, "MILP") && !inherits(x, "MIQP"))
stop("Can only relax mixed integer linear or quadratic programs.")
mat <- x$constraints$mat
n_of_variables <- ncol(mat)
types <- .expand_types(x$types, n_of_variables)
binary_positions <- which(types == "B")
if(n_of_binary_variables <- length(binary_positions)) {
## For binary variables x_i, we need to add the constraint
## x_i <= 1.
if(is.simple_triplet_matrix(mat))
add <-
simple_triplet_matrix(seq_len(n_of_binary_variables),
binary_positions,
rep.int(1, n_of_binary_variables),
n_of_binary_variables,
n_of_variables)
else {
add <- matrix(0, n_of_binary_variables, n_of_variables)
add[cbind(seq_len(n_of_binary_variables),
binary_positions)] <- 1
}
x$constraints$mat <- rbind(mat, add)
x$constraints$dir <- c(x$constraints$dir,
rep.int("<=", n_of_binary_variables))
x$constraints$rhs <- c(x$constraints$rhs,
rep.int(1, n_of_binary_variables))
}
x$types <- rep.int("C", n_of_variables)
x
}
.solve_empty_MIP <-
function(x)
{
## Check whether constraints are satisfied (interpreting each lhs as
## empty sum with value 0):
constraints <- split(x$constraints$rhs, x$constraints$dir)
if(all(unlist(Map(function(dir, rhs) get(dir)(0, rhs),
names(constraints), constraints))))
.make_MIP_solution(double(), 0, 0L)
else
.make_MIP_solution(double(), NA_real_, 2L)
}
.xtQx <-
function(Q, x)
{
## Value of quadratic form t(x) %*% Q %*% x.
## As we implement simple triplet matrices in S3, we could only have
## %*% and crossprod methods if we created S3 generics for these ...
if(is.simple_triplet_matrix(Q))
sum(Q$v * x[Q$i] * x[Q$j])
else
c(crossprod(x, Q %*% x))
}
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "### [*]+" ***
### End: ***
|
% Auto-generated: do not edit by hand
\name{dccRangeSlider}
\alias{dccRangeSlider}
\title{RangeSlider component}
\description{
A double slider with two handles. Used for specifying a range of numerical values.
}
\usage{
dccRangeSlider(id=NULL, marks=NULL, value=NULL, drag_value=NULL,
allowCross=NULL, className=NULL, count=NULL, disabled=NULL,
dots=NULL, included=NULL, min=NULL, max=NULL, pushable=NULL,
tooltip=NULL, step=NULL, vertical=NULL, verticalHeight=NULL,
updatemode=NULL, loading_state=NULL, persistence=NULL,
persisted_props=NULL, persistence_type=NULL)
}
\arguments{
\item{id}{Character. The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.}
\item{marks}{List with named elements and values of type character | lists containing elements 'label', 'style'.
those elements have the following types:
- label (character; optional)
- style (named list; optional). Marks on the slider.
The key determines the position (a number),
and the value determines what will show.
If you want to set the style of a specific mark point,
the value should be an object which
contains style and label properties.}
\item{value}{List of numerics. The value of the input}
\item{drag_value}{List of numerics. The value of the input during a drag}
\item{allowCross}{Logical. allowCross could be set as true to allow those handles to cross.}
\item{className}{Character. Additional CSS class for the root DOM node}
\item{count}{Numeric. Determine how many ranges to render, and multiple handles
will be rendered (number + 1).}
\item{disabled}{Logical. If true, the handles can't be moved.}
\item{dots}{Logical. When the step value is greater than 1,
you can set the dots to true if you want to
render the slider with dots.}
\item{included}{Logical. If the value is true, it means a continuous
value is included. Otherwise, it is an independent value.}
\item{min}{Numeric. Minimum allowed value of the slider}
\item{max}{Numeric. Maximum allowed value of the slider}
\item{pushable}{Logical | numeric. pushable could be set as true to allow pushing of
surrounding handles when moving an handle.
When set to a number, the number will be the
minimum ensured distance between handles.}
\item{tooltip}{Lists containing elements 'always_visible', 'placement'.
those elements have the following types:
- always_visible (logical; optional): determines whether tooltips should always be visible
(as opposed to the default, visible on hover)
- placement (a value equal to: 'left', 'right', 'top', 'bottom', 'topleft', 'topright', 'bottomleft', 'bottomright'; optional): determines the placement of tooltips
see https://github.com/react-component/tooltip#api
top/bottom{*} sets the _origin_ of the tooltip, so e.g. `topleft`
will in reality appear to be on the top right of the handle. Configuration for tooltips describing the current slider values}
\item{step}{Numeric. Value by which increments or decrements are made}
\item{vertical}{Logical. If true, the slider will be vertical}
\item{verticalHeight}{Numeric. The height, in px, of the slider if it is vertical.}
\item{updatemode}{A value equal to: 'mouseup', 'drag'. Determines when the component should update its `value`
property. If `mouseup` (the default) then the slider
will only trigger its value when the user has finished
dragging the slider. If `drag`, then the slider will
update its value continuously as it is being dragged.
Note that for the latter case, the `drag_value`
property could be used instead.}
\item{loading_state}{Lists containing elements 'is_loading', 'prop_name', 'component_name'.
those elements have the following types:
- is_loading (logical; optional): determines if the component is loading or not
- prop_name (character; optional): holds which property is loading
- component_name (character; optional): holds the name of the component that is loading. Object that holds the loading state object coming from dash-renderer}
\item{persistence}{Logical | character | numeric. Used to allow user interactions in this component to be persisted when
the component - or the page - is refreshed. If `persisted` is truthy and
hasn't changed from its previous value, a `value` that the user has
changed while using the app will keep that change, as long as
the new `value` also matches what was given originally.
Used in conjunction with `persistence_type`.}
\item{persisted_props}{List of a value equal to: 'value's. Properties whose user interactions will persist after refreshing the
component or the page. Since only `value` is allowed this prop can
normally be ignored.}
\item{persistence_type}{A value equal to: 'local', 'session', 'memory'. Where persisted user changes will be stored:
memory: only kept in memory, reset on page refresh.
local: window.localStorage, data is kept after the browser quit.
session: window.sessionStorage, data is cleared once the browser quit.}
}
\value{named list of JSON elements corresponding to React.js properties and their values}
\examples{
if (interactive() && require(dash)) {
library(dash)
library(dashHtmlComponents)
library(dashCoreComponents)
app <- Dash$new()
app$layout(
htmlDiv(
dccRangeSlider(
count = 1,
min = -5,
max = 10,
step = 0.5,
value = list(-3, 7),
marks = as.list(
setNames(-5:10, as.character(-5:10))
)
)
)
)
app$run_server()
}
}
|
/man/dccRangeSlider.Rd
|
permissive
|
low-decarie/dash-core-components
|
R
| false | false | 5,519 |
rd
|
% Auto-generated: do not edit by hand
\name{dccRangeSlider}
\alias{dccRangeSlider}
\title{RangeSlider component}
\description{
A double slider with two handles. Used for specifying a range of numerical values.
}
\usage{
dccRangeSlider(id=NULL, marks=NULL, value=NULL, drag_value=NULL,
allowCross=NULL, className=NULL, count=NULL, disabled=NULL,
dots=NULL, included=NULL, min=NULL, max=NULL, pushable=NULL,
tooltip=NULL, step=NULL, vertical=NULL, verticalHeight=NULL,
updatemode=NULL, loading_state=NULL, persistence=NULL,
persisted_props=NULL, persistence_type=NULL)
}
\arguments{
\item{id}{Character. The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.}
\item{marks}{List with named elements and values of type character | lists containing elements 'label', 'style'.
those elements have the following types:
- label (character; optional)
- style (named list; optional). Marks on the slider.
The key determines the position (a number),
and the value determines what will show.
If you want to set the style of a specific mark point,
the value should be an object which
contains style and label properties.}
\item{value}{List of numerics. The value of the input}
\item{drag_value}{List of numerics. The value of the input during a drag}
\item{allowCross}{Logical. allowCross could be set as true to allow those handles to cross.}
\item{className}{Character. Additional CSS class for the root DOM node}
\item{count}{Numeric. Determine how many ranges to render, and multiple handles
will be rendered (number + 1).}
\item{disabled}{Logical. If true, the handles can't be moved.}
\item{dots}{Logical. When the step value is greater than 1,
you can set the dots to true if you want to
render the slider with dots.}
\item{included}{Logical. If the value is true, it means a continuous
value is included. Otherwise, it is an independent value.}
\item{min}{Numeric. Minimum allowed value of the slider}
\item{max}{Numeric. Maximum allowed value of the slider}
\item{pushable}{Logical | numeric. pushable could be set as true to allow pushing of
surrounding handles when moving an handle.
When set to a number, the number will be the
minimum ensured distance between handles.}
\item{tooltip}{Lists containing elements 'always_visible', 'placement'.
those elements have the following types:
- always_visible (logical; optional): determines whether tooltips should always be visible
(as opposed to the default, visible on hover)
- placement (a value equal to: 'left', 'right', 'top', 'bottom', 'topleft', 'topright', 'bottomleft', 'bottomright'; optional): determines the placement of tooltips
see https://github.com/react-component/tooltip#api
top/bottom{*} sets the _origin_ of the tooltip, so e.g. `topleft`
will in reality appear to be on the top right of the handle. Configuration for tooltips describing the current slider values}
\item{step}{Numeric. Value by which increments or decrements are made}
\item{vertical}{Logical. If true, the slider will be vertical}
\item{verticalHeight}{Numeric. The height, in px, of the slider if it is vertical.}
\item{updatemode}{A value equal to: 'mouseup', 'drag'. Determines when the component should update its `value`
property. If `mouseup` (the default) then the slider
will only trigger its value when the user has finished
dragging the slider. If `drag`, then the slider will
update its value continuously as it is being dragged.
Note that for the latter case, the `drag_value`
property could be used instead.}
\item{loading_state}{Lists containing elements 'is_loading', 'prop_name', 'component_name'.
those elements have the following types:
- is_loading (logical; optional): determines if the component is loading or not
- prop_name (character; optional): holds which property is loading
- component_name (character; optional): holds the name of the component that is loading. Object that holds the loading state object coming from dash-renderer}
\item{persistence}{Logical | character | numeric. Used to allow user interactions in this component to be persisted when
the component - or the page - is refreshed. If `persisted` is truthy and
hasn't changed from its previous value, a `value` that the user has
changed while using the app will keep that change, as long as
the new `value` also matches what was given originally.
Used in conjunction with `persistence_type`.}
\item{persisted_props}{List of a value equal to: 'value's. Properties whose user interactions will persist after refreshing the
component or the page. Since only `value` is allowed this prop can
normally be ignored.}
\item{persistence_type}{A value equal to: 'local', 'session', 'memory'. Where persisted user changes will be stored:
memory: only kept in memory, reset on page refresh.
local: window.localStorage, data is kept after the browser quit.
session: window.sessionStorage, data is cleared once the browser quit.}
}
\value{named list of JSON elements corresponding to React.js properties and their values}
\examples{
if (interactive() && require(dash)) {
library(dash)
library(dashHtmlComponents)
library(dashCoreComponents)
app <- Dash$new()
app$layout(
htmlDiv(
dccRangeSlider(
count = 1,
min = -5,
max = 10,
step = 0.5,
value = list(-3, 7),
marks = as.list(
setNames(-5:10, as.character(-5:10))
)
)
)
)
app$run_server()
}
}
|
context("Check select_neighbours() function")
apartmentsTest <- apartments_test
new_apartment <- apartments[1, 2:6]
test_that("Output format - select_neighbours",{
expect_is(select_neighbours(apartmentsTest, new_apartment, n = 10), "data.frame")
expect_is(select_neighbours(apartmentsTest, new_apartment, n = NULL, frac = 0.001), "data.frame")
})
|
/tests/testthat/test_select_neighbours.R
|
no_license
|
ModelOriented/ceterisParibus2
|
R
| false | false | 354 |
r
|
context("Check select_neighbours() function")
apartmentsTest <- apartments_test
new_apartment <- apartments[1, 2:6]
test_that("Output format - select_neighbours",{
expect_is(select_neighbours(apartmentsTest, new_apartment, n = 10), "data.frame")
expect_is(select_neighbours(apartmentsTest, new_apartment, n = NULL, frac = 0.001), "data.frame")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gf_shortest_paths.R
\name{gf_shortest_paths}
\alias{gf_shortest_paths}
\title{Shortest paths}
\usage{
gf_shortest_paths(x, landmarks, ...)
}
\arguments{
\item{x}{An object coercable to a GraphFrame (typically, a
\code{gf_graphframe}).}
\item{landmarks}{IDs of landmark vertices.}
\item{...}{Optional arguments, currently not used.}
}
\description{
Computes shortest paths from every vertex to the given set of landmark vertices.
Note that this takes edge direction into account.
}
|
/man/gf_shortest_paths.Rd
|
permissive
|
kevinykuo/graphframes-1
|
R
| false | true | 563 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gf_shortest_paths.R
\name{gf_shortest_paths}
\alias{gf_shortest_paths}
\title{Shortest paths}
\usage{
gf_shortest_paths(x, landmarks, ...)
}
\arguments{
\item{x}{An object coercable to a GraphFrame (typically, a
\code{gf_graphframe}).}
\item{landmarks}{IDs of landmark vertices.}
\item{...}{Optional arguments, currently not used.}
}
\description{
Computes shortest paths from every vertex to the given set of landmark vertices.
Note that this takes edge direction into account.
}
|
### PLOT4
#_________________________________________________________________
#№4
#_________________________________________________________________
# загружаем сохраненные data frames NEI и SCC из директории.
setwd("C:/DATA/coursera/4. exploratory data analysis/week 1/Project 1")
data<-read.table(file="hpc.txt",header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
#выбираем временной интервал
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#объединить в одну колонку data & time
basetime<-strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GAP<-as.numeric(subSetData$Global_active_power)
GRP<-as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
SM1 <- as.numeric(subSetData$Sub_metering_1)
SM2 <- as.numeric(subSetData$Sub_metering_2)
SM3 <- as.numeric(subSetData$Sub_metering_3)
# открываем png для сохранения построения графика, размером 640 на 480.
png(filename='C:/DATA/coursera/4. exploratory data analysis/week 1/Project 1/plot4.png',width=800, height=600)
#настраиваем расположение графиков 2х2 ввиде матрицы
par(mfrow = c(2, 2))
#строим графики для матрицы
#график 1
plot(basetime, GAP, type="l", xlab="", ylab="Global Active Power", cex=0.2)
#график 2
plot(basetime, voltage, type="l", xlab="basetime", ylab="Voltage")
#график 3
plot(basetime, SM1, type="l", ylab="energy sub-metering")
#добавляем вторичные линии
lines(basetime, SM2, type="l", col="red")
lines(basetime, SM3, type="l", col="blue")
legend("topright", c("energy sub-metering No. 1", "energy sub-metering No. 3", "energy sub-metering No. 3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
#закрываем печать в файл
#график 4
plot(basetime, GRP, type="l", xlab="basetime", ylab="Global_reactive_power")
#закрываем печать в файл
dev.off()
#_________________________________________________________________
|
/PLOT4.R
|
no_license
|
MaksimMolokov/ExData_Plotting1
|
R
| false | false | 2,162 |
r
|
### PLOT4
#_________________________________________________________________
#№4
#_________________________________________________________________
# загружаем сохраненные data frames NEI и SCC из директории.
setwd("C:/DATA/coursera/4. exploratory data analysis/week 1/Project 1")
data<-read.table(file="hpc.txt",header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
#выбираем временной интервал
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#объединить в одну колонку data & time
basetime<-strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GAP<-as.numeric(subSetData$Global_active_power)
GRP<-as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
SM1 <- as.numeric(subSetData$Sub_metering_1)
SM2 <- as.numeric(subSetData$Sub_metering_2)
SM3 <- as.numeric(subSetData$Sub_metering_3)
# открываем png для сохранения построения графика, размером 640 на 480.
png(filename='C:/DATA/coursera/4. exploratory data analysis/week 1/Project 1/plot4.png',width=800, height=600)
#настраиваем расположение графиков 2х2 ввиде матрицы
par(mfrow = c(2, 2))
#строим графики для матрицы
#график 1
plot(basetime, GAP, type="l", xlab="", ylab="Global Active Power", cex=0.2)
#график 2
plot(basetime, voltage, type="l", xlab="basetime", ylab="Voltage")
#график 3
plot(basetime, SM1, type="l", ylab="energy sub-metering")
#добавляем вторичные линии
lines(basetime, SM2, type="l", col="red")
lines(basetime, SM3, type="l", col="blue")
legend("topright", c("energy sub-metering No. 1", "energy sub-metering No. 3", "energy sub-metering No. 3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
#закрываем печать в файл
#график 4
plot(basetime, GRP, type="l", xlab="basetime", ylab="Global_reactive_power")
#закрываем печать в файл
dev.off()
#_________________________________________________________________
|
#!/hpf/tools/centos6/R/3.1.0/bin/Rscript
# Script to generate and save the cluster labels for the complete data
argv <- as.numeric(commandArgs(T))
######################################################################
# Load libraries
library(SNFtool)
library(iClusterPlus)
library(impute)
######################################################################
# Initialize folders
homeFolder <- "/hpf/largeprojects/agoldenb/ben"
projectFolder <- paste(homeFolder, "Projects/SNF/NM_2015", sep="/")
testFolder <- paste(projectFolder, "Scripts",
"06_Two_Thousand_Features/cluster_original_data_similarity",
sep="/")
resultsFolder <- paste(testFolder, "Results", sep="/")
labelsFolder <- paste(resultsFolder, "Labels", sep="/")
######################################################################
# Initialize fixed variables
cancerTypes <- c("BRCA", "KIRC", "LIHC", "LUAD", "LUSC")
dataTypes <- c("methyl", "mirna", "mrna")
numCores <- 35
numFeat <- 2000
# Initialize variable parameters
# Data set which will be tested
cancer <- cancerTypes[argv[1]]
# Store the output in subfolders
resultsFile <- paste(paste(argv, collapse="_"), ".txt", sep="")
labelsFolder <- paste(resultsFolder, "Labels", sep="/")
######################################################################
# Load functions
# Note: some functions depend on variables initialized above!
# lsaImputation:
# -imputedFile, incompleteFile, projectFolder, jvmGBLimit
# iClusterClustering:
# -numCores
source(paste(projectFolder, "Scripts/loadFunctions.R", sep="/"))
######################################################################
# Load the original data
loadData <- function(dataType, suffix="") {
fileName <- paste(cancer, "_", dataType, suffix,".txt", sep="")
filePath <- paste(projectFolder, "Data", fileName, sep="/")
return(as.matrix(read.delim(filePath)))
}
numViews <- length(dataTypes)
cases <- vector("list", numViews)
controls <- vector("list", numViews)
# Load the biological data
for (v in 1:numViews) {
cases[[v]] <- loadData(dataTypes[v], "_cases")
controls[[v]] <- loadData(dataTypes[v], "_controls")
}
######################################################################
# Here we take the union of all the cases IDs as we want to use all of our methods on
# this union
# Extract all cases which appear in at least one of the data types
unionData <- columnUnion(cases)
######################################################################
# Select a subset of features which differ most between cases and
# controls.
featureSubsetIndices <- function(cases, subsetSize=numFeat) {
numViews <- length(cases)
featureSubsetInd <- vector("list", numViews)
for (v in 1:numViews) {
# Calculate the t-test p-value for each feature, grouped by cases
# and controls
numFeatures <- nrow(cases[[v]])
pval <- sapply(1:numFeatures,
function(i) t.test(cases[[v]][i, ],
controls[[v]][i, ])$p.value)
# Subset the data keeping the features with the smallest p-values
ind <- order(pval)
featureSubsetInd[[v]] <- ind[1:min(subsetSize, numFeatures)]
}
return(featureSubsetInd)
}
subsetData <- function(data, ind) {
for (v in 1:length(data)) {
data[[v]] <- data[[v]][ind[[v]], ]
}
return(data)
}
unionInd <- featureSubsetIndices(unionData)
unionData <- subsetData(unionData, unionInd)
######################################################################
# Normalize the features in the data sets.
rowStatistics <- function(cases) {
numViews <- length(cases)
rowStats <- vector("list", numViews)
for (v in 1:numViews) {
# Calculate the row means and standard deviations
rowMean <- apply(cases[[v]], 1, mean, na.rm=TRUE)
rowSd <- apply(cases[[v]], 1, sd, na.rm=TRUE)
constantInd <- rowSd==0
rowSd[constantInd] <- 1
rowStats[[v]] <- list(mean=rowMean, sd=rowSd, ind=constantInd)
}
return(rowStats)
}
normalizeData <- function(data, stat) {
for (v in 1:length(data)) {
data[[v]] <- (data[[v]] - stat[[v]]$mean) / stat[[v]]$sd
data[[v]] <- data[[v]][!stat[[v]]$ind, ]
}
return(data)
}
unionStat <- rowStatistics(unionData)
unionData <- normalizeData(unionData, unionStat)
#####################################################################
# Convert to affinities matrix
data <- lapply(unionData, t)
# Calculate the distance between samples
distances <- lapply(data, function(x) as.matrix(dist(x)))
# Convert the distances to affinities
affinities <- lapply(distances, affinityMatrix)
# Now that it is in the correct form, we can apply the similarity methods
################################################################
# Generate and save the similarity labels for the unionData
sampleRows <- FALSE
similarityMethods <- c(selfSimilarity, medianSimilarity,
regressionSimilarity)
similarityData <- affinities
for (i in 1:length(similarityMethods)) {
similarity <- function(x) similarityMethods[[i]](x)
filled_affinities <- similarity(affinities)
fusedMatrix <- SNF(filled_affinities)
numClusEstimates <- unlist(
estimateNumberOfClustersGivenGraph(fusedMatrix, 2:10))
numClus <- max(numClusEstimates[c(1,3)])
labels <- spectralClustering(fusedMatrix, numClus)
fileName <- paste(paste(c(argv, i), collapse="_"), ".txt", sep="")
filePath <- paste(labelsFolder, fileName, sep="/")
write(labels, filePath, ncolumns=length(labels))
}
|
/Pipeline_Scripts/06_Two_Thousand_Features/cluster_original_data_similarity/job.R
|
no_license
|
benmbrew/missing_data
|
R
| false | false | 5,501 |
r
|
#!/hpf/tools/centos6/R/3.1.0/bin/Rscript
# Script to generate and save the cluster labels for the complete data
argv <- as.numeric(commandArgs(T))
######################################################################
# Load libraries
library(SNFtool)
library(iClusterPlus)
library(impute)
######################################################################
# Initialize folders
homeFolder <- "/hpf/largeprojects/agoldenb/ben"
projectFolder <- paste(homeFolder, "Projects/SNF/NM_2015", sep="/")
testFolder <- paste(projectFolder, "Scripts",
"06_Two_Thousand_Features/cluster_original_data_similarity",
sep="/")
resultsFolder <- paste(testFolder, "Results", sep="/")
labelsFolder <- paste(resultsFolder, "Labels", sep="/")
######################################################################
# Initialize fixed variables
cancerTypes <- c("BRCA", "KIRC", "LIHC", "LUAD", "LUSC")
dataTypes <- c("methyl", "mirna", "mrna")
numCores <- 35
numFeat <- 2000
# Initialize variable parameters
# Data set which will be tested
cancer <- cancerTypes[argv[1]]
# Store the output in subfolders
resultsFile <- paste(paste(argv, collapse="_"), ".txt", sep="")
labelsFolder <- paste(resultsFolder, "Labels", sep="/")
######################################################################
# Load functions
# Note: some functions depend on variables initialized above!
# lsaImputation:
# -imputedFile, incompleteFile, projectFolder, jvmGBLimit
# iClusterClustering:
# -numCores
source(paste(projectFolder, "Scripts/loadFunctions.R", sep="/"))
######################################################################
# Load the original data
loadData <- function(dataType, suffix="") {
fileName <- paste(cancer, "_", dataType, suffix,".txt", sep="")
filePath <- paste(projectFolder, "Data", fileName, sep="/")
return(as.matrix(read.delim(filePath)))
}
numViews <- length(dataTypes)
cases <- vector("list", numViews)
controls <- vector("list", numViews)
# Load the biological data
for (v in 1:numViews) {
cases[[v]] <- loadData(dataTypes[v], "_cases")
controls[[v]] <- loadData(dataTypes[v], "_controls")
}
######################################################################
# Here we take the union of all the cases IDs as we want to use all of our methods on
# this union
# Extract all cases which appear in at least one of the data types
unionData <- columnUnion(cases)
######################################################################
# Select a subset of features which differ most between cases and
# controls.
featureSubsetIndices <- function(cases, subsetSize=numFeat) {
numViews <- length(cases)
featureSubsetInd <- vector("list", numViews)
for (v in 1:numViews) {
# Calculate the t-test p-value for each feature, grouped by cases
# and controls
numFeatures <- nrow(cases[[v]])
pval <- sapply(1:numFeatures,
function(i) t.test(cases[[v]][i, ],
controls[[v]][i, ])$p.value)
# Subset the data keeping the features with the smallest p-values
ind <- order(pval)
featureSubsetInd[[v]] <- ind[1:min(subsetSize, numFeatures)]
}
return(featureSubsetInd)
}
subsetData <- function(data, ind) {
for (v in 1:length(data)) {
data[[v]] <- data[[v]][ind[[v]], ]
}
return(data)
}
unionInd <- featureSubsetIndices(unionData)
unionData <- subsetData(unionData, unionInd)
######################################################################
# Normalize the features in the data sets.
rowStatistics <- function(cases) {
numViews <- length(cases)
rowStats <- vector("list", numViews)
for (v in 1:numViews) {
# Calculate the row means and standard deviations
rowMean <- apply(cases[[v]], 1, mean, na.rm=TRUE)
rowSd <- apply(cases[[v]], 1, sd, na.rm=TRUE)
constantInd <- rowSd==0
rowSd[constantInd] <- 1
rowStats[[v]] <- list(mean=rowMean, sd=rowSd, ind=constantInd)
}
return(rowStats)
}
normalizeData <- function(data, stat) {
for (v in 1:length(data)) {
data[[v]] <- (data[[v]] - stat[[v]]$mean) / stat[[v]]$sd
data[[v]] <- data[[v]][!stat[[v]]$ind, ]
}
return(data)
}
unionStat <- rowStatistics(unionData)
unionData <- normalizeData(unionData, unionStat)
#####################################################################
# Convert to affinities matrix
data <- lapply(unionData, t)
# Calculate the distance between samples
distances <- lapply(data, function(x) as.matrix(dist(x)))
# Convert the distances to affinities
affinities <- lapply(distances, affinityMatrix)
# Now that it is in the correct form, we can apply the similarity methods
################################################################
# Generate and save the similarity labels for the unionData
sampleRows <- FALSE
similarityMethods <- c(selfSimilarity, medianSimilarity,
regressionSimilarity)
similarityData <- affinities
for (i in 1:length(similarityMethods)) {
similarity <- function(x) similarityMethods[[i]](x)
filled_affinities <- similarity(affinities)
fusedMatrix <- SNF(filled_affinities)
numClusEstimates <- unlist(
estimateNumberOfClustersGivenGraph(fusedMatrix, 2:10))
numClus <- max(numClusEstimates[c(1,3)])
labels <- spectralClustering(fusedMatrix, numClus)
fileName <- paste(paste(c(argv, i), collapse="_"), ".txt", sep="")
filePath <- paste(labelsFolder, fileName, sep="/")
write(labels, filePath, ncolumns=length(labels))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/issues.R
\name{issues}
\alias{issues}
\title{issues()}
\usage{
issues(project = ".")
}
\arguments{
\item{project}{string: the project to return issues for}
}
\value{
issues, a data frame.
}
\description{
Return the log of issues for the current project as a data frame. If no
log of issues exists, NULL will be returned and also congratulations you're either
really early in this project of amazing. Possibly both.
}
\examples{
issues()
}
|
/man/issues.Rd
|
no_license
|
stephstammel/consultthat
|
R
| false | true | 519 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/issues.R
\name{issues}
\alias{issues}
\title{issues()}
\usage{
issues(project = ".")
}
\arguments{
\item{project}{string: the project to return issues for}
}
\value{
issues, a data frame.
}
\description{
Return the log of issues for the current project as a data frame. If no
log of issues exists, NULL will be returned and also congratulations you're either
really early in this project of amazing. Possibly both.
}
\examples{
issues()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db_describe_person.R
\name{db_get_people}
\alias{db_get_people}
\title{Get list of people currently in database}
\usage{
db_get_people(db)
}
\arguments{
\item{db}{database connection object}
}
\value{
the current first and last names in the people table
}
\description{
Get list of people currently in database
}
\examples{
#db_get_methods(db)
}
|
/man/db_get_people.Rd
|
permissive
|
KDavis0509/rodm2
|
R
| false | true | 424 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db_describe_person.R
\name{db_get_people}
\alias{db_get_people}
\title{Get list of people currently in database}
\usage{
db_get_people(db)
}
\arguments{
\item{db}{database connection object}
}
\value{
the current first and last names in the people table
}
\description{
Get list of people currently in database
}
\examples{
#db_get_methods(db)
}
|
#######################################-
#######################################-
##
## Boston - Hubway: station status ----
##
#######################################-
#######################################-
#========================================#
#### Setting up ####
#========================================#
cat("\n---------------------------------------------------------\n")
cat("Hubway (Boston)")
#==========================================#
#### Downloading Station Status ####
#==========================================#
station_status_bos <-
get_json_safely("https://gbfs.thehubway.com/gbfs/en/station_status.json")
if (length(station_status_bos$error) >= 1) {
error_bos <- TRUE
cat("\n*ERROR*")
} else {
error_bos <- FALSE
#---------------------------------#
# Connecting to database ----
#---------------------------------#
hubway_db <-
dbConnect(RSQLite::SQLite(), "data/hubway_db_060118.sqlite3")
col_names <- tbl(hubway_db, "station_status") %>% head(0) %>% colnames()
# hubway_db_1 <-
# dbConnect(RSQLite::SQLite(), "data/hubway_db_040118.sqlite3")
#
# col_names <- tbl(hubway_db_1, "station_status") %>% head(0) %>% colnames()
#---------------------------------#
#---- Station Status ----
#---------------------------------#
station_status_bos_0 <-
station_status_bos$result$content %>%
read_file() %>%
fromJSON()
station_status_bos_1 <-
station_status_bos_0$data$stations %>%
select(one_of(col_names))
rm(col_names)
station_status_bos_2 <-
station_status_bos_1 %>%
add_column(
last_updated = station_status_bos_0$last_updated %>%
as_datetime(tz = "US/Eastern"),
.before = 1
) %>%
mutate(
last_reported = as_datetime(last_reported, tz = "US/Eastern"),
last_reported_chr = as.character(last_reported),
station_id = as.integer(station_id)
) %>%
as_tibble() %>%
distinct(station_id, last_reported, .keep_all = TRUE)
rm(station_status_bos_1)
#========================================#
#### Writing to database ####
#========================================#
dbWriteTable(
hubway_db,
"station_status",
value = station_status_bos_2,
append = TRUE,
temporary = FALSE
)
#========================================#
#### Update Info ####
#========================================#
cat("\nLast updated:",
station_status_bos_0$last_updated %>%
as_datetime(tz = "US/Eastern") %>%
as.character(),
"\n")
cat(nrow(station_status_bos_2), "rows added", "\n")
cat("---------------------------------------------------------")
rm(station_status_bos_0)
rm(station_status_bos)
rm(station_status_bos_2)
################################################################################
dbDisconnect(hubway_db)
################################################################################
}
################################################################################
################################################################################
|
/code/bikeshare_systems/Boston - Hubway.R
|
no_license
|
cgettings/10-bikeshare-systems-status
|
R
| false | false | 3,385 |
r
|
#######################################-
#######################################-
##
## Boston - Hubway: station status ----
##
#######################################-
#######################################-
#========================================#
#### Setting up ####
#========================================#
cat("\n---------------------------------------------------------\n")
cat("Hubway (Boston)")
#==========================================#
#### Downloading Station Status ####
#==========================================#
station_status_bos <-
get_json_safely("https://gbfs.thehubway.com/gbfs/en/station_status.json")
if (length(station_status_bos$error) >= 1) {
error_bos <- TRUE
cat("\n*ERROR*")
} else {
error_bos <- FALSE
#---------------------------------#
# Connecting to database ----
#---------------------------------#
hubway_db <-
dbConnect(RSQLite::SQLite(), "data/hubway_db_060118.sqlite3")
col_names <- tbl(hubway_db, "station_status") %>% head(0) %>% colnames()
# hubway_db_1 <-
# dbConnect(RSQLite::SQLite(), "data/hubway_db_040118.sqlite3")
#
# col_names <- tbl(hubway_db_1, "station_status") %>% head(0) %>% colnames()
#---------------------------------#
#---- Station Status ----
#---------------------------------#
station_status_bos_0 <-
station_status_bos$result$content %>%
read_file() %>%
fromJSON()
station_status_bos_1 <-
station_status_bos_0$data$stations %>%
select(one_of(col_names))
rm(col_names)
station_status_bos_2 <-
station_status_bos_1 %>%
add_column(
last_updated = station_status_bos_0$last_updated %>%
as_datetime(tz = "US/Eastern"),
.before = 1
) %>%
mutate(
last_reported = as_datetime(last_reported, tz = "US/Eastern"),
last_reported_chr = as.character(last_reported),
station_id = as.integer(station_id)
) %>%
as_tibble() %>%
distinct(station_id, last_reported, .keep_all = TRUE)
rm(station_status_bos_1)
#========================================#
#### Writing to database ####
#========================================#
dbWriteTable(
hubway_db,
"station_status",
value = station_status_bos_2,
append = TRUE,
temporary = FALSE
)
#========================================#
#### Update Info ####
#========================================#
cat("\nLast updated:",
station_status_bos_0$last_updated %>%
as_datetime(tz = "US/Eastern") %>%
as.character(),
"\n")
cat(nrow(station_status_bos_2), "rows added", "\n")
cat("---------------------------------------------------------")
rm(station_status_bos_0)
rm(station_status_bos)
rm(station_status_bos_2)
################################################################################
dbDisconnect(hubway_db)
################################################################################
}
################################################################################
################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/capitalize.R
\name{capitalize}
\alias{capitalize}
\title{Capitalize string for use in rdfs definitions (not good version)}
\usage{
capitalize(s, strict = FALSE)
}
\arguments{
\item{s}{input string}
\item{strict}{if all character should be capitalized}
}
\value{
s capitalized
}
|
/rrdfqbcrnd0/man/capitalize.Rd
|
no_license
|
rjsheperd/rrdfqbcrnd0
|
R
| false | true | 358 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/capitalize.R
\name{capitalize}
\alias{capitalize}
\title{Capitalize string for use in rdfs definitions (not good version)}
\usage{
capitalize(s, strict = FALSE)
}
\arguments{
\item{s}{input string}
\item{strict}{if all character should be capitalized}
}
\value{
s capitalized
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/moviesdemo.R
\docType{package}
\name{moviesdemo}
\alias{moviesdemo}
\alias{moviesdemo-package}
\title{moviesdemo: A demo R package.}
\description{
A demo package for the SMCS course on creating and publishing R packages,
based on metadata of 4800 movies from The Movie Database.
}
\section{moviesdemo functions}{
\describe{
\item{\code{advise.good.movie}}{Advise movies based on another movie}
\item{\code{sim.genres}}{Movie similarity based on production companies}
\item{\code{sim.producers}}{Movie similarity based on production companies}
}
}
\section{Shiny App}{
A Shiny App is available through the \code{\link{runMovieApp}} function
}
\section{Database}{
The database used is the TMDb 4800 movies Database (see \code{\link{movies}})
}
|
/man/moviesdemo.Rd
|
no_license
|
ManonMartin/moviesdemo
|
R
| false | true | 832 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/moviesdemo.R
\docType{package}
\name{moviesdemo}
\alias{moviesdemo}
\alias{moviesdemo-package}
\title{moviesdemo: A demo R package.}
\description{
A demo package for the SMCS course on creating and publishing R packages,
based on metadata of 4800 movies from The Movie Database.
}
\section{moviesdemo functions}{
\describe{
\item{\code{advise.good.movie}}{Advise movies based on another movie}
\item{\code{sim.genres}}{Movie similarity based on production companies}
\item{\code{sim.producers}}{Movie similarity based on production companies}
}
}
\section{Shiny App}{
A Shiny App is available through the \code{\link{runMovieApp}} function
}
\section{Database}{
The database used is the TMDb 4800 movies Database (see \code{\link{movies}})
}
|
`cSum` <- function(d, fac, s=c(50, 100, 200, 500, 1000, 2000, 5000)) {
mat = matrix(nrow=length(d), ncol=length(s))
for(x in 1:length(s)) {
c = (ceiling(length(d)/s[x]))*s[x]-length(d)
ss=seq(c)
ss[1:length(ss)]=0
m = mSum(c(d,ss),s[x], fac)
mat[,x] = m[1:length(mat[,1])]
}
return(apply(mat,1,mean))
}
|
/R/cSum.R
|
no_license
|
tf2/CNsolidate
|
R
| false | false | 330 |
r
|
`cSum` <- function(d, fac, s=c(50, 100, 200, 500, 1000, 2000, 5000)) {
mat = matrix(nrow=length(d), ncol=length(s))
for(x in 1:length(s)) {
c = (ceiling(length(d)/s[x]))*s[x]-length(d)
ss=seq(c)
ss[1:length(ss)]=0
m = mSum(c(d,ss),s[x], fac)
mat[,x] = m[1:length(mat[,1])]
}
return(apply(mat,1,mean))
}
|
# Data Viz
library(forcats)
library(tidyverse)
library(here)
library(stringr)
path2cache <- here("spreadsheets/cache/allPrinters/")
# import latest version of allPrinters
all_printers <- read_csv(paste0(path2cache, "allPrinters-x1-correct04.csv"))
# FILTER all_printers ----
## remove unnecessary product_type
unique(all_printers$product_type)
# PLOTS parent_co ----
## group data set
df.by_parent <- all_printers %>%
group_by(parent_co) %>%
drop_na(source_vol)
## parent_entryexit
### generate entry (status == 1), exit (status == 4) indicators, longevity value (mkt_vols)
df.parent_entryexit <- df.by_parent %>%
summarise(min_vol = min(source_vol),
max_vol = max(source_vol),
mkt_vols = max_vol - min_vol + 1,
no_of_brands = n_distinct(product_brand)) %>%
mutate(parent_co = fct_reorder(parent_co, desc(min_vol))) %>%
rename(`1` = min_vol,
`4` = max_vol) %>%
gather(`1`, `4`, key = "status", value = "source_vol") %>%
mutate(status = as.numeric(status))
### plot entry/exit of parent_co by longevity, then entry year
plot.parent_entryexit <- df.parent_entryexit %>%
ggplot(mapping = aes(y = fct_reorder(parent_co, mkt_vols), x = factor(source_vol + 1981))) +
geom_point(aes(shape = status)) + scale_shape_identity() +
geom_line(aes(group = parent_co)) +
labs(title = "Companies in Market",
subtitle = "arranged by longevity (based on first & last appearance), then entry year (first appearance)",
x = "year in PC Magazine",
y = "Company Name")
print(plot.parent_entryexit)
## parent_status
### merge status variable into all_printers, fill in review (status == 3), fill longevity value
df.parent_status <-
left_join(all_printers, df.parent_entryexit, by = c("parent_co", "source_vol")) %>%
select(c(parent_co, status, mkt_vols, source_vol)) %>%
replace_na(list(status = 3)) %>%
fill(mkt_vols)
plot.parent_status <-
df.parent_status %>%
ggplot(mapping = aes(y = fct_reorder(parent_co, mkt_vols), x = source_vol + 1981)) +
geom_point(aes(shape = status)) + scale_shape_identity() +
geom_line(aes(group = parent_co))
print(plot.parent_status)
#
# PLOTS engine_brand
## engine_entryexit
by_engine <- all_printers %>%
group_by(engine_brand) %>%
drop_na(source_vol) %>%
filter(!engine_brand %in% c("UNKN", "N/A"))
df.engine_entryexit <- by_engine %>%
summarise(min_vol = min(source_vol),
max_vol = max(source_vol),
mkt_vols = max_vol - min_vol + 1,
no_of_brands = n_distinct(product_brand)) %>%
mutate(engine_brand = fct_reorder(engine_brand, desc(min_vol))) %>%
rename(`1` = min_vol,
`4` = max_vol) %>%
gather(`1`, `4`, key = "status", value = "source_vol") %>%
mutate(status = as.numeric(status))
plot.engine_entryexit <- df.engine_entryexit %>%
ggplot(mapping = aes(y = fct_reorder(engine_brand, mkt_vols), x = factor(source_vol + 1981))) +
geom_point(aes(shape = status)) + scale_shape_identity() +
geom_line(aes(group = engine_brand, alpha = mkt_vols)) +
labs(title = "Printer Engine Manufacturers",
subtitle = "arranged by longevity (based on first & last appearance), then entry year (first appearance)",
x = "year in PC Magazine",
y = "Engine Manufacturer") +
guides(alpha=FALSE)
#### http://felixfan.github.io/ggplot2-remove-grid-background-margin/
plot.engine_entryexit +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
## engine_entryexit+parentco count
plot.engine_entryexit_brands <- df.engine_entryexit %>%
ggplot(mapping = aes(y = fct_reorder(engine_brand, mkt_vols), x = factor(source_vol + 81))) +
geom_point(aes(shape = status)) + scale_shape_identity() +
geom_line(aes(group = engine_brand, colour = desc(no_of_brands))) +
labs(title = "Number of Printer Brands Supplied to by Engine Manufacturers",
subtitle = "arranged by longevity (based on first & last appearance), then entry year (first appearance)",
x = "Year in PC Magazine",
y = "Engine Manufacturer",
colour = "Brands")
|
/scripts/allPrinters-01-dataviz.R
|
permissive
|
cynthiahqy/dataset-pcmag
|
R
| false | false | 4,189 |
r
|
# Data Viz
library(forcats)
library(tidyverse)
library(here)
library(stringr)
path2cache <- here("spreadsheets/cache/allPrinters/")
# import latest version of allPrinters
all_printers <- read_csv(paste0(path2cache, "allPrinters-x1-correct04.csv"))
# FILTER all_printers ----
## remove unnecessary product_type
unique(all_printers$product_type)
# PLOTS parent_co ----
## group data set
df.by_parent <- all_printers %>%
group_by(parent_co) %>%
drop_na(source_vol)
## parent_entryexit
### generate entry (status == 1), exit (status == 4) indicators, longevity value (mkt_vols)
df.parent_entryexit <- df.by_parent %>%
summarise(min_vol = min(source_vol),
max_vol = max(source_vol),
mkt_vols = max_vol - min_vol + 1,
no_of_brands = n_distinct(product_brand)) %>%
mutate(parent_co = fct_reorder(parent_co, desc(min_vol))) %>%
rename(`1` = min_vol,
`4` = max_vol) %>%
gather(`1`, `4`, key = "status", value = "source_vol") %>%
mutate(status = as.numeric(status))
### plot entry/exit of parent_co by longevity, then entry year
plot.parent_entryexit <- df.parent_entryexit %>%
ggplot(mapping = aes(y = fct_reorder(parent_co, mkt_vols), x = factor(source_vol + 1981))) +
geom_point(aes(shape = status)) + scale_shape_identity() +
geom_line(aes(group = parent_co)) +
labs(title = "Companies in Market",
subtitle = "arranged by longevity (based on first & last appearance), then entry year (first appearance)",
x = "year in PC Magazine",
y = "Company Name")
print(plot.parent_entryexit)
## parent_status
### merge status variable into all_printers, fill in review (status == 3), fill longevity value
df.parent_status <-
left_join(all_printers, df.parent_entryexit, by = c("parent_co", "source_vol")) %>%
select(c(parent_co, status, mkt_vols, source_vol)) %>%
replace_na(list(status = 3)) %>%
fill(mkt_vols)
plot.parent_status <-
df.parent_status %>%
ggplot(mapping = aes(y = fct_reorder(parent_co, mkt_vols), x = source_vol + 1981)) +
geom_point(aes(shape = status)) + scale_shape_identity() +
geom_line(aes(group = parent_co))
print(plot.parent_status)
#
# PLOTS engine_brand
## engine_entryexit
by_engine <- all_printers %>%
group_by(engine_brand) %>%
drop_na(source_vol) %>%
filter(!engine_brand %in% c("UNKN", "N/A"))
df.engine_entryexit <- by_engine %>%
summarise(min_vol = min(source_vol),
max_vol = max(source_vol),
mkt_vols = max_vol - min_vol + 1,
no_of_brands = n_distinct(product_brand)) %>%
mutate(engine_brand = fct_reorder(engine_brand, desc(min_vol))) %>%
rename(`1` = min_vol,
`4` = max_vol) %>%
gather(`1`, `4`, key = "status", value = "source_vol") %>%
mutate(status = as.numeric(status))
plot.engine_entryexit <- df.engine_entryexit %>%
ggplot(mapping = aes(y = fct_reorder(engine_brand, mkt_vols), x = factor(source_vol + 1981))) +
geom_point(aes(shape = status)) + scale_shape_identity() +
geom_line(aes(group = engine_brand, alpha = mkt_vols)) +
labs(title = "Printer Engine Manufacturers",
subtitle = "arranged by longevity (based on first & last appearance), then entry year (first appearance)",
x = "year in PC Magazine",
y = "Engine Manufacturer") +
guides(alpha=FALSE)
#### http://felixfan.github.io/ggplot2-remove-grid-background-margin/
plot.engine_entryexit +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
## engine_entryexit+parentco count
plot.engine_entryexit_brands <- df.engine_entryexit %>%
ggplot(mapping = aes(y = fct_reorder(engine_brand, mkt_vols), x = factor(source_vol + 81))) +
geom_point(aes(shape = status)) + scale_shape_identity() +
geom_line(aes(group = engine_brand, colour = desc(no_of_brands))) +
labs(title = "Number of Printer Brands Supplied to by Engine Manufacturers",
subtitle = "arranged by longevity (based on first & last appearance), then entry year (first appearance)",
x = "Year in PC Magazine",
y = "Engine Manufacturer",
colour = "Brands")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/append_to_stream.R
\name{append_data_frame_to_stream}
\alias{append_data_frame_to_stream}
\title{Append an R data frame to a data.world stream.}
\usage{
append_data_frame_to_stream(owner_id, dataset_id, stream_id, data_frame,
retry_times = 3, retry_quiet = FALSE)
}
\arguments{
\item{owner_id}{User name and unique identifier of the creator of a
dataset or project}
\item{dataset_id}{Dataset unique identifier}
\item{stream_id}{Stream unique identifier as defined by the user the first
time the stream was used. Only lower case letters, numbers and
dashes are allowed.}
\item{data_frame}{The data frame containing the rows to append to the
stream}
\item{retry_times}{the number of times to retry the request}
\item{retry_quiet}{whether to suppress diagnostic messages during retries}
}
\value{
Server response message.
}
\description{
Append an R data frame to a data.world stream.
If the data.world API
returns an HTTP status of 429 (Too Many Requests), this function uses
\code{\link[httr]{RETRY}} to retry the request.
}
\examples{
\dontrun{
aDf <- data.frame(ID=1:2, Value=c('One', 'Two'), stringsAsFactors = FALSE)
dwapi::append_data_frame_to_stream(owner_id = 'user',
dataset_id = 'dataset', stream_id = 'mystream',
aDf)
aDf <- data.frame(ID=1:2, Value=c('One', 'Two'), stringsAsFactors = FALSE)
dwapi::append_data_frame_to_stream(owner_id = 'user',
dataset_id = 'dataset', stream_id = 'mystream',
aDf, retry_times = 10, retry_quiet = TRUE)
}
}
|
/man/append_data_frame_to_stream.Rd
|
no_license
|
cran/dwapi
|
R
| false | true | 1,560 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/append_to_stream.R
\name{append_data_frame_to_stream}
\alias{append_data_frame_to_stream}
\title{Append an R data frame to a data.world stream.}
\usage{
append_data_frame_to_stream(owner_id, dataset_id, stream_id, data_frame,
retry_times = 3, retry_quiet = FALSE)
}
\arguments{
\item{owner_id}{User name and unique identifier of the creator of a
dataset or project}
\item{dataset_id}{Dataset unique identifier}
\item{stream_id}{Stream unique identifier as defined by the user the first
time the stream was used. Only lower case letters, numbers and
dashes are allowed.}
\item{data_frame}{The data frame containing the rows to append to the
stream}
\item{retry_times}{the number of times to retry the request}
\item{retry_quiet}{whether to suppress diagnostic messages during retries}
}
\value{
Server response message.
}
\description{
Append an R data frame to a data.world stream.
If the data.world API
returns an HTTP status of 429 (Too Many Requests), this function uses
\code{\link[httr]{RETRY}} to retry the request.
}
\examples{
\dontrun{
aDf <- data.frame(ID=1:2, Value=c('One', 'Two'), stringsAsFactors = FALSE)
dwapi::append_data_frame_to_stream(owner_id = 'user',
dataset_id = 'dataset', stream_id = 'mystream',
aDf)
aDf <- data.frame(ID=1:2, Value=c('One', 'Two'), stringsAsFactors = FALSE)
dwapi::append_data_frame_to_stream(owner_id = 'user',
dataset_id = 'dataset', stream_id = 'mystream',
aDf, retry_times = 10, retry_quiet = TRUE)
}
}
|
# Small script snipplets that can be brief
# examples for programming or demonstrations
# Print Hello World
hello_world <- function() {
myString <- "Hello, World!"
print (myString)
}
# Monte Carlo Pi
montecarloPi <- function(trials) {
count = 0
for(i in 1:trials) {
if((runif(1,0,1)^2 + runif(1,0,1)^2) < 1) {
count = count + 1
}
}
return((count*4) / trials)
}
# Estimate Pi
est.pi <- function(n){
# drawing in [0,1] x [0,1] covers one quarter of square and circle
# draw random numbers for the coordinates of the "dart-hits"
a <- runif(n,0,1)
b <- runif(n,0,1)
# use the pythagorean theorem
c <- sqrt((a^2) + (b^2) )
inside <- sum(c<1)
#outside <- n-inside
pi.est <- inside/n*4
return(pi.est)
}
# Square function
# adapted from https://hbctraining.github.io/Intro-to-R/lessons/03_introR-functions-and-arguments.html#user-defined-functions
# and https://www.r-bloggers.com/how-to-write-and-debug-an-r-function/
# Square function
square_it <- function(x){
sq <- x*x
return(sq)
}
# Anscombe's quartet
# Examples from https://www.r-bloggers.com/using-and-abusing-data-visualization-anscombes-quartet-and-cheating-bonferroni/
# Anscombe's quartet
anscombes_quartet <- {
library(Tmisc)
# Load the data and look at it
data(quartet)
str(quartet)
# Compute simple statistics for each
library(dplyr)
quartet %>%
group_by(set) %>%
summarize(mean(x), sd(x), mean(y), sd(y), cor(x,y))
# Visualize the data
library(ggplot2)
ggplot(quartet, aes(x, y)) +
geom_point() +
geom_smooth(method = lm, se = FALSE) +
facet_wrap(~set)
}
|
/code/util_functions.R
|
permissive
|
JuliaMRogers/gitkraken_workshop
|
R
| false | false | 1,665 |
r
|
# Small script snipplets that can be brief
# examples for programming or demonstrations
# Print Hello World
hello_world <- function() {
myString <- "Hello, World!"
print (myString)
}
# Monte Carlo Pi
montecarloPi <- function(trials) {
count = 0
for(i in 1:trials) {
if((runif(1,0,1)^2 + runif(1,0,1)^2) < 1) {
count = count + 1
}
}
return((count*4) / trials)
}
# Estimate Pi
est.pi <- function(n){
# drawing in [0,1] x [0,1] covers one quarter of square and circle
# draw random numbers for the coordinates of the "dart-hits"
a <- runif(n,0,1)
b <- runif(n,0,1)
# use the pythagorean theorem
c <- sqrt((a^2) + (b^2) )
inside <- sum(c<1)
#outside <- n-inside
pi.est <- inside/n*4
return(pi.est)
}
# Square function
# adapted from https://hbctraining.github.io/Intro-to-R/lessons/03_introR-functions-and-arguments.html#user-defined-functions
# and https://www.r-bloggers.com/how-to-write-and-debug-an-r-function/
# Square function
square_it <- function(x){
sq <- x*x
return(sq)
}
# Anscombe's quartet
# Examples from https://www.r-bloggers.com/using-and-abusing-data-visualization-anscombes-quartet-and-cheating-bonferroni/
# Anscombe's quartet
anscombes_quartet <- {
library(Tmisc)
# Load the data and look at it
data(quartet)
str(quartet)
# Compute simple statistics for each
library(dplyr)
quartet %>%
group_by(set) %>%
summarize(mean(x), sd(x), mean(y), sd(y), cor(x,y))
# Visualize the data
library(ggplot2)
ggplot(quartet, aes(x, y)) +
geom_point() +
geom_smooth(method = lm, se = FALSE) +
facet_wrap(~set)
}
|
testlist <- list(n = c(10L, 349L, 1560281271L, 14966024L, 1253523200L, 1L, 3061514L, 179781981L, 28817153L, 0L, 6102016L, 1572274432L, 1L, 170833674L, 179781981L, 16801216L, NA, -1L, 2115567615L, -12506625L, 1563033857L, 0L, -1218631167L, 16777216L))
result <- do.call(gdalcubes:::libgdalcubes_set_threads,testlist)
str(result)
|
/gdalcubes/inst/testfiles/libgdalcubes_set_threads/libFuzzer_libgdalcubes_set_threads/libgdalcubes_set_threads_valgrind_files/1609875250-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 330 |
r
|
testlist <- list(n = c(10L, 349L, 1560281271L, 14966024L, 1253523200L, 1L, 3061514L, 179781981L, 28817153L, 0L, 6102016L, 1572274432L, 1L, 170833674L, 179781981L, 16801216L, NA, -1L, 2115567615L, -12506625L, 1563033857L, 0L, -1218631167L, 16777216L))
result <- do.call(gdalcubes:::libgdalcubes_set_threads,testlist)
str(result)
|
# OEML - REST API
#
# This section will provide necessary information about the `CoinAPI OEML REST API` protocol. This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a>
#
# The version of the OpenAPI document: v1
# Contact: support@coinapi.io
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title Severity
#'
#' @description Severity Class
#'
#' @format An \code{R6Class} generator object
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Severity <- R6::R6Class(
"Severity",
public = list(
initialize = function(...) {
local.optional.var <- list(...)
val <- unlist(local.optional.var)
enumvec <- .parse_Severity()
stopifnot(length(val) == 1L)
if (!val %in% enumvec)
stop("Use one of the valid values: ",
paste0(enumvec, collapse = ", "))
private$value <- val
},
toJSON = function() {
jsonlite::toJSON(private$value, auto_unbox = TRUE)
},
fromJSON = function(SeverityJson) {
private$value <- jsonlite::fromJSON(SeverityJson,
simplifyVector = FALSE)
self
},
toJSONString = function() {
as.character(jsonlite::toJSON(private$value,
auto_unbox = TRUE))
},
fromJSONString = function(SeverityJson) {
private$value <- jsonlite::fromJSON(SeverityJson,
simplifyVector = FALSE)
self
}
),
private = list(
value = NULL
)
)
# add to utils.R
.parse_Severity <- function(vals) {
res <- gsub("^\\[|\\]$", "",
"[INFO, WARNING, ERROR]"
)
unlist(strsplit(res, ", "))
}
|
/oeml-sdk/r/R/severity.R
|
permissive
|
Martin-Molinero/coinapi-sdk
|
R
| false | false | 1,861 |
r
|
# OEML - REST API
#
# This section will provide necessary information about the `CoinAPI OEML REST API` protocol. This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a>
#
# The version of the OpenAPI document: v1
# Contact: support@coinapi.io
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title Severity
#'
#' @description Severity Class
#'
#' @format An \code{R6Class} generator object
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Severity <- R6::R6Class(
"Severity",
public = list(
initialize = function(...) {
local.optional.var <- list(...)
val <- unlist(local.optional.var)
enumvec <- .parse_Severity()
stopifnot(length(val) == 1L)
if (!val %in% enumvec)
stop("Use one of the valid values: ",
paste0(enumvec, collapse = ", "))
private$value <- val
},
toJSON = function() {
jsonlite::toJSON(private$value, auto_unbox = TRUE)
},
fromJSON = function(SeverityJson) {
private$value <- jsonlite::fromJSON(SeverityJson,
simplifyVector = FALSE)
self
},
toJSONString = function() {
as.character(jsonlite::toJSON(private$value,
auto_unbox = TRUE))
},
fromJSONString = function(SeverityJson) {
private$value <- jsonlite::fromJSON(SeverityJson,
simplifyVector = FALSE)
self
}
),
private = list(
value = NULL
)
)
# add to utils.R
.parse_Severity <- function(vals) {
res <- gsub("^\\[|\\]$", "",
"[INFO, WARNING, ERROR]"
)
unlist(strsplit(res, ", "))
}
|
test_that("test suite aaa",{
expect_true(coeffs(constant(subs(homog(2,2),1,10))) == 100)
expect_true(coeffs(subs(product(1:3),3,10))==1000)
a <- spray(diag(5))
expect_true(is.empty(a[2,3,4,1,5]))
## Following test removes zero from the last row of index matrix
## M:
expect_true(is.zero(spray(matrix(c(1,1,2,2),2,2),c(1,-1),addrepeats=TRUE)))
## Following test removes zero from not-the-last row of index matrix M:
expect_silent(ignore <- spray(matrix(c(1,2,1,2,3,3),byrow=T,ncol=2),c(1,-1,10),addrepeats=TRUE))
## Following test checks a big-ish spray where multiple index rows
## cancel to zero:
expect_true(is.zero(spray(kronecker(matrix(1,16,4),1+diag(2)),rep(c(1,1,-1,-1),length=32),addrepeats=TRUE)))
## test the addrepeats error:
expect_error(spray(matrix(1,2,3)))
expect_error(spray(spray(1:4)))
expect_silent(spray(spray(1:4),3))
expect_silent(spray(1:4,5))
expect_error(spray(spray(1:4,5),x=1:2))
expect_silent(S <- spray(matrix(1:7,5,7)))
expect_error(coeffs(S) <- 1:2)
expect_silent(coeffs(S) <- 13)
expect_silent(as.spray(spray(diag(7))))
expect_silent(as.spray(list(diag(9),seq_len(9))))
expect_silent(as.spray(diag(9),seq_len(9)))
expect_silent(as.spray(array(seq_len(24),2:4)))
expect_silent(as.spray(array(seq_len(24),2:4),offbyone=TRUE))
expect_error(as.spray(sin))
expect_silent(dim(spray(diag(5),1:5)))
expect_error(dim(spray(diag(5)-1,1:5)))
expect_silent(as.array(spray(diag(5)+1,1:5)))
expect_silent(as.array(spray(diag(5)+1,1:5),compact=TRUE))
expect_silent(as.array(spray(diag(5)+1,1:5),compact=TRUE,offbyone=TRUE))
expect_error(as.array(spray(diag(5),1:5)))
expect_error(as.array(spray(diag(5)-4,1:5)))
expect_error(spray_missing_accessor(0))
S1 <- rspray(5)
S2 <- rspray(5)
expect_silent(S1[S2])
expect_silent(S1[S2,drop=TRUE])
expect_error(S1[rspray(n=3,arity=4)])
expect_silent(S1[] <- 3)
expect_silent(S1[] <- S2)
expect_silent(S1[diag(3)] <- 3)
expect_silent(S1[S2] <- 3)
expect_silent(S2[1:2,1:2,1:2] <- 55)
expect_error(S2[1:2,1:2] <- 55)
expect_silent(S2[1:2,1:2,1:2] <- 55)
expect_error(S2[1:2,1:2,1:2,1:2] <- 55)
S1 <- spray(matrix(sample(-2:2,replace=TRUE,21),ncol=3),rnorm(7),addrepeats=TRUE)
S2 <- spray(matrix(sample(-2:2,replace=TRUE,15),ncol=3),rnorm(5),addrepeats=TRUE)
f1 <- as.function(S1)
f2 <- as.function(S2)
f3 <- as.function(S1*S2)
x <- 4:6
expect_equal(f1(x)*f2(x),f3(x))
expect_silent(constant(S1) <- 3)
expect_equal(S1,S1+zero(3))
expect_silent(linear(1:4)+lone(3,4)+xyz(4)+one(4))
expect_silent(one(rspray(9)))
a <- homog(4,2)
jj <- (1-a)*ooom(a,3)
expect_true(constant(jj,drop=TRUE)==1)
expect_true(sum(rowSums(index(jj))==0)==1)
expect_true(all(rowSums(index(jj)) %in% c(0,8)))
expect_output(print(rspray(6)))
expect_output(print(S1-S1))
options(polyform=TRUE)
expect_output(print(rspray(6)))
expect_output(print(100+rspray(6)))
expect_output(print(100-rspray(6)))
expect_output(print(S1-S1))
expect_output(print(spray(diag(9))))
expect_output(print(2.2*spray(diag(9))))
expect_output(print(2.2*spray(diag(9))))
options(sprayvars=letters)
a <- diag(26)
expect_output(print(spray(a)))
expect_output(print(spray(matrix(0,2,3),5,addrepeats=TRUE)))
S <- spray(matrix(sample(0:2,60,replace=TRUE),ncol=3),addrepeats=TRUE)
expect_equal(arity(asum(S,1)),2)
## NB: following three tests must use '=='; expect_equal(asum(S,1),asum_inverted(S,c(2,3))) returns FALSE!
expect_true(asum(S,1) == asum_inverted(S,c(2,3)))
expect_true(asum(S,2) == asum_inverted(S,c(1,3)))
expect_true(asum(S,3) == asum_inverted(S,c(1,2)))
expect_equal(process_dimensions(S,c(TRUE,TRUE,FALSE)),1:2)
expect_equal(constant(asum(S,1:3,drop=TRUE),drop=TRUE),20)
expect_equal(constant(asum(S,1:3,drop=FALSE),drop=TRUE),20)
## Leibniz's rule:
S1 <- spray(matrix(sample(0:3,replace=TRUE,21),ncol=3),sample(7),addrepeats=TRUE)
S2 <- spray(matrix(sample(0:3,replace=TRUE,15),ncol=3),sample(5),addrepeats=TRUE)
expect_true(S1*deriv(S2,1) + deriv(S1,1)*S2 == deriv(S1*S2,1))
S1 <- rspray(100,vals=sample(100)-50)
S2 <- rspray(100,vals=sample(100)-50)
S3 <- rspray(100,vals=sample(100)-50)
jj <- pmax(S1,S2,S3)
expect_true(jj == maxpair_spray(S1,maxpair_spray(S2,S3)))
expect_true(jj == maxpair_spray(maxpair_spray(S1,S2),S3))
expect_true(pmax(S1,S2,S3) == -pmin(-S1,-S2,-S3))
expect_true(pmin(S1,S2,S3) == -pmax(-S1,-S2,-S3))
expect_true(pmax(S1,-Inf) == S1)
expect_true(pmin(S1, Inf) == S1)
expect_silent(jj <- minpair_spray(S1,S2))
expect_silent(jj <- maxpair_spray(S1,S2))
expect_true(minpair_spray(S1) == S1)
expect_true(maxpair_spray(S1) == S1)
expect_error(minpair_spray(S1,1:3))
expect_error(maxpair_spray(S1,1:3))
expect_error(minpair_spray(S1,-1))
expect_error(maxpair_spray(S1,+1))
SS1 <- rspray(5,arity=119)
SS2 <- rspray(7,arity=119)
expect_silent(minpair_spray(SS1,SS2))
expect_silent(minpair_spray(SS2,SS1))
expect_silent(maxpair_spray(SS1,SS2))
expect_silent(maxpair_spray(SS2,SS1))
expect_equal(constant(knight()^5,drop=TRUE),0)
expect_equal(constant(king()^5,drop=TRUE),1200)
expect_true(nterms(spray(diag(7)))==7)
expect_true(S1^0 == one(arity(S1)))
expect_true(S1^1 == S1)
expect_true(S2^0 == one(arity(S2)))
expect_true(S2^1 == S2)
Sz <- spray(matrix(sample(1:50),ncol=2),10^-(1:25))
expect_true(length(coeffs(Sz)) >= length(coeffs(zap(Sz))))
expect_true(length(coeffs(Sz)) >= length(coeffs(zapsmall(Sz))))
expect_true(length(coeffs(Sz)) >= length(zapsmall(coeffs(Sz))))
})
|
/tests/testthat/test_aaa.R
|
no_license
|
RobinHankin/spray
|
R
| false | false | 5,936 |
r
|
test_that("test suite aaa",{
expect_true(coeffs(constant(subs(homog(2,2),1,10))) == 100)
expect_true(coeffs(subs(product(1:3),3,10))==1000)
a <- spray(diag(5))
expect_true(is.empty(a[2,3,4,1,5]))
## Following test removes zero from the last row of index matrix
## M:
expect_true(is.zero(spray(matrix(c(1,1,2,2),2,2),c(1,-1),addrepeats=TRUE)))
## Following test removes zero from not-the-last row of index matrix M:
expect_silent(ignore <- spray(matrix(c(1,2,1,2,3,3),byrow=T,ncol=2),c(1,-1,10),addrepeats=TRUE))
## Following test checks a big-ish spray where multiple index rows
## cancel to zero:
expect_true(is.zero(spray(kronecker(matrix(1,16,4),1+diag(2)),rep(c(1,1,-1,-1),length=32),addrepeats=TRUE)))
## test the addrepeats error:
expect_error(spray(matrix(1,2,3)))
expect_error(spray(spray(1:4)))
expect_silent(spray(spray(1:4),3))
expect_silent(spray(1:4,5))
expect_error(spray(spray(1:4,5),x=1:2))
expect_silent(S <- spray(matrix(1:7,5,7)))
expect_error(coeffs(S) <- 1:2)
expect_silent(coeffs(S) <- 13)
expect_silent(as.spray(spray(diag(7))))
expect_silent(as.spray(list(diag(9),seq_len(9))))
expect_silent(as.spray(diag(9),seq_len(9)))
expect_silent(as.spray(array(seq_len(24),2:4)))
expect_silent(as.spray(array(seq_len(24),2:4),offbyone=TRUE))
expect_error(as.spray(sin))
expect_silent(dim(spray(diag(5),1:5)))
expect_error(dim(spray(diag(5)-1,1:5)))
expect_silent(as.array(spray(diag(5)+1,1:5)))
expect_silent(as.array(spray(diag(5)+1,1:5),compact=TRUE))
expect_silent(as.array(spray(diag(5)+1,1:5),compact=TRUE,offbyone=TRUE))
expect_error(as.array(spray(diag(5),1:5)))
expect_error(as.array(spray(diag(5)-4,1:5)))
expect_error(spray_missing_accessor(0))
S1 <- rspray(5)
S2 <- rspray(5)
expect_silent(S1[S2])
expect_silent(S1[S2,drop=TRUE])
expect_error(S1[rspray(n=3,arity=4)])
expect_silent(S1[] <- 3)
expect_silent(S1[] <- S2)
expect_silent(S1[diag(3)] <- 3)
expect_silent(S1[S2] <- 3)
expect_silent(S2[1:2,1:2,1:2] <- 55)
expect_error(S2[1:2,1:2] <- 55)
expect_silent(S2[1:2,1:2,1:2] <- 55)
expect_error(S2[1:2,1:2,1:2,1:2] <- 55)
S1 <- spray(matrix(sample(-2:2,replace=TRUE,21),ncol=3),rnorm(7),addrepeats=TRUE)
S2 <- spray(matrix(sample(-2:2,replace=TRUE,15),ncol=3),rnorm(5),addrepeats=TRUE)
f1 <- as.function(S1)
f2 <- as.function(S2)
f3 <- as.function(S1*S2)
x <- 4:6
expect_equal(f1(x)*f2(x),f3(x))
expect_silent(constant(S1) <- 3)
expect_equal(S1,S1+zero(3))
expect_silent(linear(1:4)+lone(3,4)+xyz(4)+one(4))
expect_silent(one(rspray(9)))
a <- homog(4,2)
jj <- (1-a)*ooom(a,3)
expect_true(constant(jj,drop=TRUE)==1)
expect_true(sum(rowSums(index(jj))==0)==1)
expect_true(all(rowSums(index(jj)) %in% c(0,8)))
expect_output(print(rspray(6)))
expect_output(print(S1-S1))
options(polyform=TRUE)
expect_output(print(rspray(6)))
expect_output(print(100+rspray(6)))
expect_output(print(100-rspray(6)))
expect_output(print(S1-S1))
expect_output(print(spray(diag(9))))
expect_output(print(2.2*spray(diag(9))))
expect_output(print(2.2*spray(diag(9))))
options(sprayvars=letters)
a <- diag(26)
expect_output(print(spray(a)))
expect_output(print(spray(matrix(0,2,3),5,addrepeats=TRUE)))
S <- spray(matrix(sample(0:2,60,replace=TRUE),ncol=3),addrepeats=TRUE)
expect_equal(arity(asum(S,1)),2)
## NB: following three tests must use '=='; expect_equal(asum(S,1),asum_inverted(S,c(2,3))) returns FALSE!
expect_true(asum(S,1) == asum_inverted(S,c(2,3)))
expect_true(asum(S,2) == asum_inverted(S,c(1,3)))
expect_true(asum(S,3) == asum_inverted(S,c(1,2)))
expect_equal(process_dimensions(S,c(TRUE,TRUE,FALSE)),1:2)
expect_equal(constant(asum(S,1:3,drop=TRUE),drop=TRUE),20)
expect_equal(constant(asum(S,1:3,drop=FALSE),drop=TRUE),20)
## Leibniz's rule:
S1 <- spray(matrix(sample(0:3,replace=TRUE,21),ncol=3),sample(7),addrepeats=TRUE)
S2 <- spray(matrix(sample(0:3,replace=TRUE,15),ncol=3),sample(5),addrepeats=TRUE)
expect_true(S1*deriv(S2,1) + deriv(S1,1)*S2 == deriv(S1*S2,1))
S1 <- rspray(100,vals=sample(100)-50)
S2 <- rspray(100,vals=sample(100)-50)
S3 <- rspray(100,vals=sample(100)-50)
jj <- pmax(S1,S2,S3)
expect_true(jj == maxpair_spray(S1,maxpair_spray(S2,S3)))
expect_true(jj == maxpair_spray(maxpair_spray(S1,S2),S3))
expect_true(pmax(S1,S2,S3) == -pmin(-S1,-S2,-S3))
expect_true(pmin(S1,S2,S3) == -pmax(-S1,-S2,-S3))
expect_true(pmax(S1,-Inf) == S1)
expect_true(pmin(S1, Inf) == S1)
expect_silent(jj <- minpair_spray(S1,S2))
expect_silent(jj <- maxpair_spray(S1,S2))
expect_true(minpair_spray(S1) == S1)
expect_true(maxpair_spray(S1) == S1)
expect_error(minpair_spray(S1,1:3))
expect_error(maxpair_spray(S1,1:3))
expect_error(minpair_spray(S1,-1))
expect_error(maxpair_spray(S1,+1))
SS1 <- rspray(5,arity=119)
SS2 <- rspray(7,arity=119)
expect_silent(minpair_spray(SS1,SS2))
expect_silent(minpair_spray(SS2,SS1))
expect_silent(maxpair_spray(SS1,SS2))
expect_silent(maxpair_spray(SS2,SS1))
expect_equal(constant(knight()^5,drop=TRUE),0)
expect_equal(constant(king()^5,drop=TRUE),1200)
expect_true(nterms(spray(diag(7)))==7)
expect_true(S1^0 == one(arity(S1)))
expect_true(S1^1 == S1)
expect_true(S2^0 == one(arity(S2)))
expect_true(S2^1 == S2)
Sz <- spray(matrix(sample(1:50),ncol=2),10^-(1:25))
expect_true(length(coeffs(Sz)) >= length(coeffs(zap(Sz))))
expect_true(length(coeffs(Sz)) >= length(coeffs(zapsmall(Sz))))
expect_true(length(coeffs(Sz)) >= length(zapsmall(coeffs(Sz))))
})
|
# HEADER ------------------------------------------------------------------
# Functions to help with processing address data
library(tidyverse); library(magrittr); library(PostcodesioR); library(janitor)
# Add postcode variables to a data frame via PostcodesioR ----------------
# TODO: Fix pcd_name other than default
# TODO: Handle incorrect spelling of vars
add_pcd_vars <- function(df, pcd_name = "postcode",
.admin_district = TRUE, .lat_long = FALSE,
other_vars = character(0)){
## Build a vector of variables of interest ----
# Create an empty character vector
pcd_vars <- character(0)
# From the arguments passed in the function call
if(.admin_district) pcd_vars %<>% append("admin_district")
if(.lat_long) pcd_vars %<>% append(c("longitude", "latitude"))
if(length(other_vars)>0) pcd_vars %<>% append(other_vars)
# Remove any duplicates
pcd_vars %<>% unique()
## Build a postcode lookup template and a list of columns in the template ----
# Empty postcode lookup data frame template
pcd_template <- postcode_lookup("S1 2HH") %>%
filter(postcode == "ZZ ZZZ")
# Data frame of postcode lookup column names, types & indices
pcd_cols <- tibble(name = colnames(pcd_template),
type = lapply(pcd_template, class)) %>% #TODO: lose or use & extract from list
rowid_to_column(var = "rowid") %>%
relocate(rowid, .after = last_col())
# The position of "codes" helps with nested "*_code" values
min_codes_var <- pcd_cols %>%
filter(str_ends(name, "_code")) %>%
filter(rowid == min(rowid))
codes_position <- min_codes_var$rowid
# Add index used to extract var from results
pcd_cols %<>%
mutate(index = map(rowid,
~ ifelse(.x < codes_position,
list(c(2, .x)),
list(c(2,
codes_position,
.x + 1 - codes_position))))) %>%
select(-rowid)
# Filter list of postcode columns to vars of interest
pcd_cols %<>% filter(name %in% pcd_vars)
# Filter template columns to vars of interest
pcd_details <- select(pcd_template, all_of(pcd_vars))
# Need to distinguish between postcode passed & matched e.g. s1 2hh & S1 2HH
pcd_match <- "postcode" %in% colnames(pcd_details) # flag is used more than once
if(pcd_match) pcd_details %<>% mutate(pcd_match = character(0))
## Do some checks -----
# Check we have some postcode variables in the function arguments
stopifnot(length(pcd_vars) > 0)
# Check we have some VALID postcode variables in the function arguments
stopifnot(nrow(filter(pcd_cols, name %in% pcd_vars)) > 0)
# Warn if there's an unused postcode variable from the function arguments
if(length(pcd_vars)!=length(pcd_cols)) {
warnings(str_c("One or more of the variables requested is not available. ",
"Check spelling and docs.ropensci.org/PostcodesioR/"))
}
## Handle empty & duplicate postcodes -----
# Snapshot to refer and join to later
df_orig <- df
# RpostcodesioR expects postcodes to be labelled "postcode"
df %<>% rename(postcode = {{pcd_name}}) # we rename it back in the returned df
# # Subset of empty postcodes we add back at the end
# empty_pcds <- df %>%
# filter(is.na(postcode)) %>%
# bind_rows(pcd_details) # with empty variables of interest
# Don't process empty or duplicate postcodes
df %<>%
drop_na(postcode) %>%
distinct(postcode, .keep_all = TRUE)
## Batch-by-batch postcode lookup ----
# PostcodesioR puts 100 row limit on bulk_postcode_lookup
# - so we're going to have to process the request in batches
batch_size_max <- 100
### Batch loop prep ----
# Numbers involved in breaking postcode lookup into batches
total_rows <- nrow(df)
whole_batches <- total_rows %/% batch_size_max
remainder_batch_size <- total_rows %% batch_size_max
n_batches <- ifelse(remainder_batch_size == 0, whole_batches, whole_batches + 1)
### Batch loop ----
for (i in 1:n_batches) {
#i <- 1
#### Slice a batch ----
# Determine the size of the batch
batch_size <- ifelse(i != n_batches, batch_size_max, remainder_batch_size)
# Small probability (1 in 100) but need to check ...
batch_size <- ifelse(batch_size == 0, batch_size_max, batch_size)
# Determine the start and end records of the batch
batch_start <- ifelse(i == 1, 1, ((i-1) * batch_size_max) + 1)
batch_end <- ifelse(i == n_batches,
(ifelse(i == 1, i, (i-1)) * batch_size_max) + remainder_batch_size,
i * batch_size_max)
# Get a batch of petition records
batch <- slice(df, batch_start:batch_end)
#### Bulk lookup request to postcodes.io ----
bulk_lookup_rslt <- bulk_postcode_lookup(list(postcodes = batch$postcode))
#### Extract the vars from the postcodes.io results in to a data frame ----
df_batch_rslt <- map2_dfc(pcd_cols$name, pcd_cols$index,
~ tibble(!!.x := map(bulk_lookup_rslt,
unlist(.y),
.default = NA))) %>%
unnest(cols = all_of(pcd_vars))
# Need to distinguish between postcode passed & matched e.g. s1 2hh & S1 2HH
if(pcd_match) df_batch_rslt %<>% rename(pcd_match = postcode)
# Add postcode passed i.e. our primary key
df_batch_rslt %<>%
add_column(postcode = map_chr(bulk_lookup_rslt, c(1, 1), .default = ""),
.before = 1)
# Cumulative results from batches
pcd_details <- bind_rows(pcd_details, df_batch_rslt)
}
# Stitch the subsets and variables together
df_new <- df_orig %>%
left_join(pcd_details, by = c("postcode")) #%>% # add postcode variables of interest to the original data
#bind_rows(empty_pcds) %>% # add records in the request with null postcodes
}
# Add LLPG variables to a data frame via Portal locator ----------------
set_python <- function(){
# Reticulate provides the interface for Python packages
library(reticulate)
# Define the Python environment that comes with ArcGIS Pro
use_condaenv("arcgispro-py3-clone", required=TRUE)
# TODO: RStudio prj &/or global setting?
# Sys.getenv("python_env")?
# Pass name of conda env as a function parameter?
# Best practice?
# Does arcgis package need local ArcGIS desktop install?
# Anticipate arcgis not being installed (& conda?)
}
get_gis <- function(){
# TODO: set_python() if not set
# Import the GIS library from ArcGIS Python API
arcgis_gis <- import("arcgis.gis")
# Try and retrieve Portal credentials from .environ file
portal_id = Sys.getenv("portal_id")
portal_pwd = Sys.getenv("portal_pwd")
# Prompt for Portal credentials if necessary
if (portal_id == "") portal_id <- rstudioapi::askForPassword("Portal user ID")
if (portal_pwd == "") portal_pwd <- rstudioapi::askForPassword("Portal password")
# Login to Portal and get a GIS object
gis <- arcgis_gis$GIS("https://sheffieldcitycouncil.cloud.esriuk.com/portal/home/",
portal_id, portal_pwd)
}
batch_geocode <- function(addr_batch, addr_name, geocoding, portal_geocoder){
# browser()
# Add row ID to the batch for a join later
addr_batch <- mutate(addr_batch, row_id = row_number())
# List of addresses to geocode
addresses_to_geocode <- addr_batch %>%
select(all_of(addr_name)) %>%
deframe()
# Geocode
results <- geocoding$batch_geocode(addresses_to_geocode,
geocoder = portal_geocoder)
# Put the hierarchical list of geocode results into a data frame
df_results <- tibble(result = results) %>%
unnest_wider(result) %>%
unnest_wider(location) %>%
unnest_wider(attributes) %>%
clean_names() %>%
# select(result_id, match_addr, score, status, addr_type,
# uprn, blpu_class, ward_code, parish_code, usrn) %>%
mutate(result_id = result_id + 1) # row_id starts at 1 not 0
# Join results to the original batch data
left_join(addr_batch, df_results, by=c("row_id" = "result_id")) %>%
select(-batch_id, -row_id) #remove ID vars used for processing in batches
}
geocode <- function(addr, addr_name = "address", portal_gis){
# Add suffix
addr <- rename_with(addr, ~str_c("ORIG_", .))
addr_name <- str_c("ORIG_", addr_name)
# browser()
# Import the geocoding library from ArcGIS Python API
geocoding <- import("arcgis.geocoding")
# List the different Portal geocoders
geocoders <- geocoding$get_geocoders(portal_gis)
#print(geocoders)
# Get the geocoder we want to use
geocoder_llpg_world <- geocoders[[2]]
print(str_c("geocoder_llpg_world: ", geocoder_llpg_world))
# Determine the batch size we should use
batch_size <- geocoder_llpg_world$properties$locatorProperties$SuggestedBatchSize
print(str_c("BatchSize: ", batch_size))
# Split data frame to process in batches
batches <- group_split(addr, batch_id = ceiling(row_number()/batch_size))
# Process batches and combine
df_batches <- lapply(batches, batch_geocode,
addr_name, geocoding, geocoder_llpg_world) %>%
bind_rows() %>%
mutate(uprn = ifelse(str_length(uprn) == 11, #ensure UPRN is 12 characters
str_c("0", uprn),
uprn)) %>%
select(starts_with("ORIG_"), one_of(c("uprn", "x","y"))) %>%
rename_with(~str_remove(.,"^ORIG_"))
}
# Add a multi-line address variable to a partial single-line address variable ----
add_addr_field <- function(addr, field){
if(is.na(field)){
addr <- addr
} else {
ifelse(is.na(addr),
addr <- field,
addr <- str_c(addr, ", ", field))
}
}
# Pass a list of multi-line address variables and get a ... ----
# single-line (comma & space separated) address variable
combine_addr <- function(...){
addr_fields <- list(...)
addr <- NA
for (i in seq_along(addr_fields)) {
addr <- add_addr_field(addr, addr_fields[[i]])
}
return(addr)
}
# DEPRECATED Add postcode variables to a data frame via RpostcodesioR ----
postcode_details_add <- function(df, pcd_var, .admin_district = TRUE, .lat_long = FALSE){
# Check the function arguments make sense
stopifnot(.admin_district | .lat_long) # at least one has to be true
# We'll rename the postcode column back before it's returned
df <- df %>%
rename(postcode = {{pcd_var}})
# Prepare an empty tibble to hold the cumulative results
postcode_details <- tibble(postcode = character())
if(.admin_district){
postcode_details %<>%
mutate(admin_district = character())
}
if(.lat_long){
postcode_details %<>%
mutate(longitude = numeric(),
latitude = numeric())
}
# Subset of empty postcodes we add back at the end
empty_postcodes <- df %>%
filter(is.na(postcode))
if(.admin_district){
empty_postcodes %<>%
mutate(admin_district = as.character(NA))
}
if(.lat_long){
empty_postcodes %<>%
mutate(longitude = as.numeric(NA),
latitude = as.numeric(NA))
}
# Don't try and process empty postcodes
df %<>%
drop_na(postcode)
# postcode.io puts 100 row limit on bulk_postcode_lookup
batch_size_max <- 100
# Numbers involved in breaking postcode lookup into batches
total_rows <- nrow(df)
whole_batches <- total_rows %/% batch_size_max
remainder_batch_size <- total_rows %% batch_size_max
n_batches <- ifelse(remainder_batch_size == 0, whole_batches, whole_batches + 1)
# Batch-by-batch postcode lookup
for (i in 1:n_batches) {
# Determine the size of the batch
batch_size <- ifelse(i != n_batches, batch_size_max, remainder_batch_size)
# Small probability (1 in 100) but need to check ...
batch_size <- ifelse(batch_size == 0, batch_size_max, batch_size)
# Determine the start and end records of the batch
batch_start <- ifelse(i == 1, 1, ((i-1) * batch_size_max) + 1)
batch_end <- ifelse(i == n_batches,
(ifelse(i == 1, i, (i-1)) * batch_size_max) + remainder_batch_size,
i * batch_size_max)
# Get a batch of petition records
batch <- df %>%
slice(batch_start:batch_end)
# Request to postcode.io
bulk_lookup_rslt <- bulk_postcode_lookup(list(postcodes = batch$postcode))
# Extract and format result from postcode.io
batch_rslt_tbl <- tibble(postcode = map_chr(bulk_lookup_rslt, c(1, 1), .default = ""))
if(.admin_district){
batch_rslt_tbl %<>%
mutate(admin_district = map_chr(bulk_lookup_rslt, c(2, 17), .default = "not found"))
}
if(.lat_long){
batch_rslt_tbl %<>%
mutate(longitude = map_dbl(bulk_lookup_rslt, c(2, 7), .default = NA),
latitude = map_dbl(bulk_lookup_rslt, c(2, 8), .default = NA))
}
# Cumulative results from batches
postcode_details <- bind_rows(postcode_details, batch_rslt_tbl)
}
# Remove duplicates (so subsequent join works)
postcode_details %<>%
distinct(postcode, .keep_all = TRUE)
# Format the dataframe we're returning
df %<>%
left_join(postcode_details, by = c("postcode")) %>% # Add postcode details to data
bind_rows(empty_postcodes) %>% # Add null postcode records
rename({{pcd_var}} := postcode) # Rename postcode column back to original name
}
# DEPRECATED Extract the UPRNs from the feature ... ----
# ... created by our llpg_world_geocode() Python function
extract_uprns <- function(path, feature){
uprns <- st_read(dsn = path, layer = feature) %>%
as_tibble() %>%
clean_names() %>%
mutate(addr_type = str_extract(locator_family_id, ".+?(?=:)")) %>%
mutate(addr_type = str_replace_all(addr_type, "'", "")) %>%
mutate(uprn = str_extract(locator_family_id, "[^:]+$")) %>%
mutate(uprn = ifelse(str_length(uprn) == 11,
str_c("0", uprn),
uprn)) %>%
mutate(uprn = ifelse(addr_type != "ADDRESS", NA, uprn)) %>%
select(-user_uprn, -starts_with("locator_")) %>% # Remove columns with prefix of "locator_"
rename_at(.vars = vars(starts_with("user_")),
.funs = ~sub("^user_", "", .)) %>% # Remove prefix "user_" from column names
arrange(desc(status), desc(addr_type), score)
}
|
/Addresses.R
|
no_license
|
scc-pi/functions
|
R
| false | false | 14,676 |
r
|
# HEADER ------------------------------------------------------------------
# Functions to help with processing address data
library(tidyverse); library(magrittr); library(PostcodesioR); library(janitor)
# Add postcode variables to a data frame via PostcodesioR ----------------
# TODO: Fix pcd_name other than default
# TODO: Handle incorrect spelling of vars
add_pcd_vars <- function(df, pcd_name = "postcode",
.admin_district = TRUE, .lat_long = FALSE,
other_vars = character(0)){
## Build a vector of variables of interest ----
# Create an empty character vector
pcd_vars <- character(0)
# From the arguments passed in the function call
if(.admin_district) pcd_vars %<>% append("admin_district")
if(.lat_long) pcd_vars %<>% append(c("longitude", "latitude"))
if(length(other_vars)>0) pcd_vars %<>% append(other_vars)
# Remove any duplicates
pcd_vars %<>% unique()
## Build a postcode lookup template and a list of columns in the template ----
# Empty postcode lookup data frame template
pcd_template <- postcode_lookup("S1 2HH") %>%
filter(postcode == "ZZ ZZZ")
# Data frame of postcode lookup column names, types & indices
pcd_cols <- tibble(name = colnames(pcd_template),
type = lapply(pcd_template, class)) %>% #TODO: lose or use & extract from list
rowid_to_column(var = "rowid") %>%
relocate(rowid, .after = last_col())
# The position of "codes" helps with nested "*_code" values
min_codes_var <- pcd_cols %>%
filter(str_ends(name, "_code")) %>%
filter(rowid == min(rowid))
codes_position <- min_codes_var$rowid
# Add index used to extract var from results
pcd_cols %<>%
mutate(index = map(rowid,
~ ifelse(.x < codes_position,
list(c(2, .x)),
list(c(2,
codes_position,
.x + 1 - codes_position))))) %>%
select(-rowid)
# Filter list of postcode columns to vars of interest
pcd_cols %<>% filter(name %in% pcd_vars)
# Filter template columns to vars of interest
pcd_details <- select(pcd_template, all_of(pcd_vars))
# Need to distinguish between postcode passed & matched e.g. s1 2hh & S1 2HH
pcd_match <- "postcode" %in% colnames(pcd_details) # flag is used more than once
if(pcd_match) pcd_details %<>% mutate(pcd_match = character(0))
## Do some checks -----
# Check we have some postcode variables in the function arguments
stopifnot(length(pcd_vars) > 0)
# Check we have some VALID postcode variables in the function arguments
stopifnot(nrow(filter(pcd_cols, name %in% pcd_vars)) > 0)
# Warn if there's an unused postcode variable from the function arguments
if(length(pcd_vars)!=length(pcd_cols)) {
warnings(str_c("One or more of the variables requested is not available. ",
"Check spelling and docs.ropensci.org/PostcodesioR/"))
}
## Handle empty & duplicate postcodes -----
# Snapshot to refer and join to later
df_orig <- df
# RpostcodesioR expects postcodes to be labelled "postcode"
df %<>% rename(postcode = {{pcd_name}}) # we rename it back in the returned df
# # Subset of empty postcodes we add back at the end
# empty_pcds <- df %>%
# filter(is.na(postcode)) %>%
# bind_rows(pcd_details) # with empty variables of interest
# Don't process empty or duplicate postcodes
df %<>%
drop_na(postcode) %>%
distinct(postcode, .keep_all = TRUE)
## Batch-by-batch postcode lookup ----
# PostcodesioR puts 100 row limit on bulk_postcode_lookup
# - so we're going to have to process the request in batches
batch_size_max <- 100
### Batch loop prep ----
# Numbers involved in breaking postcode lookup into batches
total_rows <- nrow(df)
whole_batches <- total_rows %/% batch_size_max
remainder_batch_size <- total_rows %% batch_size_max
n_batches <- ifelse(remainder_batch_size == 0, whole_batches, whole_batches + 1)
### Batch loop ----
for (i in 1:n_batches) {
#i <- 1
#### Slice a batch ----
# Determine the size of the batch
batch_size <- ifelse(i != n_batches, batch_size_max, remainder_batch_size)
# Small probability (1 in 100) but need to check ...
batch_size <- ifelse(batch_size == 0, batch_size_max, batch_size)
# Determine the start and end records of the batch
batch_start <- ifelse(i == 1, 1, ((i-1) * batch_size_max) + 1)
batch_end <- ifelse(i == n_batches,
(ifelse(i == 1, i, (i-1)) * batch_size_max) + remainder_batch_size,
i * batch_size_max)
# Get a batch of petition records
batch <- slice(df, batch_start:batch_end)
#### Bulk lookup request to postcodes.io ----
bulk_lookup_rslt <- bulk_postcode_lookup(list(postcodes = batch$postcode))
#### Extract the vars from the postcodes.io results in to a data frame ----
df_batch_rslt <- map2_dfc(pcd_cols$name, pcd_cols$index,
~ tibble(!!.x := map(bulk_lookup_rslt,
unlist(.y),
.default = NA))) %>%
unnest(cols = all_of(pcd_vars))
# Need to distinguish between postcode passed & matched e.g. s1 2hh & S1 2HH
if(pcd_match) df_batch_rslt %<>% rename(pcd_match = postcode)
# Add postcode passed i.e. our primary key
df_batch_rslt %<>%
add_column(postcode = map_chr(bulk_lookup_rslt, c(1, 1), .default = ""),
.before = 1)
# Cumulative results from batches
pcd_details <- bind_rows(pcd_details, df_batch_rslt)
}
# Stitch the subsets and variables together
df_new <- df_orig %>%
left_join(pcd_details, by = c("postcode")) #%>% # add postcode variables of interest to the original data
#bind_rows(empty_pcds) %>% # add records in the request with null postcodes
}
# Add LLPG variables to a data frame via Portal locator ----------------
set_python <- function(){
# Reticulate provides the interface for Python packages
library(reticulate)
# Define the Python environment that comes with ArcGIS Pro
use_condaenv("arcgispro-py3-clone", required=TRUE)
# TODO: RStudio prj &/or global setting?
# Sys.getenv("python_env")?
# Pass name of conda env as a function parameter?
# Best practice?
# Does arcgis package need local ArcGIS desktop install?
# Anticipate arcgis not being installed (& conda?)
}
get_gis <- function(){
# TODO: set_python() if not set
# Import the GIS library from ArcGIS Python API
arcgis_gis <- import("arcgis.gis")
# Try and retrieve Portal credentials from .environ file
portal_id = Sys.getenv("portal_id")
portal_pwd = Sys.getenv("portal_pwd")
# Prompt for Portal credentials if necessary
if (portal_id == "") portal_id <- rstudioapi::askForPassword("Portal user ID")
if (portal_pwd == "") portal_pwd <- rstudioapi::askForPassword("Portal password")
# Login to Portal and get a GIS object
gis <- arcgis_gis$GIS("https://sheffieldcitycouncil.cloud.esriuk.com/portal/home/",
portal_id, portal_pwd)
}
batch_geocode <- function(addr_batch, addr_name, geocoding, portal_geocoder){
# browser()
# Add row ID to the batch for a join later
addr_batch <- mutate(addr_batch, row_id = row_number())
# List of addresses to geocode
addresses_to_geocode <- addr_batch %>%
select(all_of(addr_name)) %>%
deframe()
# Geocode
results <- geocoding$batch_geocode(addresses_to_geocode,
geocoder = portal_geocoder)
# Put the hierarchical list of geocode results into a data frame
df_results <- tibble(result = results) %>%
unnest_wider(result) %>%
unnest_wider(location) %>%
unnest_wider(attributes) %>%
clean_names() %>%
# select(result_id, match_addr, score, status, addr_type,
# uprn, blpu_class, ward_code, parish_code, usrn) %>%
mutate(result_id = result_id + 1) # row_id starts at 1 not 0
# Join results to the original batch data
left_join(addr_batch, df_results, by=c("row_id" = "result_id")) %>%
select(-batch_id, -row_id) #remove ID vars used for processing in batches
}
geocode <- function(addr, addr_name = "address", portal_gis){
# Add suffix
addr <- rename_with(addr, ~str_c("ORIG_", .))
addr_name <- str_c("ORIG_", addr_name)
# browser()
# Import the geocoding library from ArcGIS Python API
geocoding <- import("arcgis.geocoding")
# List the different Portal geocoders
geocoders <- geocoding$get_geocoders(portal_gis)
#print(geocoders)
# Get the geocoder we want to use
geocoder_llpg_world <- geocoders[[2]]
print(str_c("geocoder_llpg_world: ", geocoder_llpg_world))
# Determine the batch size we should use
batch_size <- geocoder_llpg_world$properties$locatorProperties$SuggestedBatchSize
print(str_c("BatchSize: ", batch_size))
# Split data frame to process in batches
batches <- group_split(addr, batch_id = ceiling(row_number()/batch_size))
# Process batches and combine
df_batches <- lapply(batches, batch_geocode,
addr_name, geocoding, geocoder_llpg_world) %>%
bind_rows() %>%
mutate(uprn = ifelse(str_length(uprn) == 11, #ensure UPRN is 12 characters
str_c("0", uprn),
uprn)) %>%
select(starts_with("ORIG_"), one_of(c("uprn", "x","y"))) %>%
rename_with(~str_remove(.,"^ORIG_"))
}
# Add a multi-line address variable to a partial single-line address variable ----
add_addr_field <- function(addr, field){
if(is.na(field)){
addr <- addr
} else {
ifelse(is.na(addr),
addr <- field,
addr <- str_c(addr, ", ", field))
}
}
# Pass a list of multi-line address variables and get a ... ----
# single-line (comma & space separated) address variable
combine_addr <- function(...){
addr_fields <- list(...)
addr <- NA
for (i in seq_along(addr_fields)) {
addr <- add_addr_field(addr, addr_fields[[i]])
}
return(addr)
}
# DEPRECATED Add postcode variables to a data frame via RpostcodesioR ----
postcode_details_add <- function(df, pcd_var, .admin_district = TRUE, .lat_long = FALSE){
# Check the function arguments make sense
stopifnot(.admin_district | .lat_long) # at least one has to be true
# We'll rename the postcode column back before it's returned
df <- df %>%
rename(postcode = {{pcd_var}})
# Prepare an empty tibble to hold the cumulative results
postcode_details <- tibble(postcode = character())
if(.admin_district){
postcode_details %<>%
mutate(admin_district = character())
}
if(.lat_long){
postcode_details %<>%
mutate(longitude = numeric(),
latitude = numeric())
}
# Subset of empty postcodes we add back at the end
empty_postcodes <- df %>%
filter(is.na(postcode))
if(.admin_district){
empty_postcodes %<>%
mutate(admin_district = as.character(NA))
}
if(.lat_long){
empty_postcodes %<>%
mutate(longitude = as.numeric(NA),
latitude = as.numeric(NA))
}
# Don't try and process empty postcodes
df %<>%
drop_na(postcode)
# postcode.io puts 100 row limit on bulk_postcode_lookup
batch_size_max <- 100
# Numbers involved in breaking postcode lookup into batches
total_rows <- nrow(df)
whole_batches <- total_rows %/% batch_size_max
remainder_batch_size <- total_rows %% batch_size_max
n_batches <- ifelse(remainder_batch_size == 0, whole_batches, whole_batches + 1)
# Batch-by-batch postcode lookup
for (i in 1:n_batches) {
# Determine the size of the batch
batch_size <- ifelse(i != n_batches, batch_size_max, remainder_batch_size)
# Small probability (1 in 100) but need to check ...
batch_size <- ifelse(batch_size == 0, batch_size_max, batch_size)
# Determine the start and end records of the batch
batch_start <- ifelse(i == 1, 1, ((i-1) * batch_size_max) + 1)
batch_end <- ifelse(i == n_batches,
(ifelse(i == 1, i, (i-1)) * batch_size_max) + remainder_batch_size,
i * batch_size_max)
# Get a batch of petition records
batch <- df %>%
slice(batch_start:batch_end)
# Request to postcode.io
bulk_lookup_rslt <- bulk_postcode_lookup(list(postcodes = batch$postcode))
# Extract and format result from postcode.io
batch_rslt_tbl <- tibble(postcode = map_chr(bulk_lookup_rslt, c(1, 1), .default = ""))
if(.admin_district){
batch_rslt_tbl %<>%
mutate(admin_district = map_chr(bulk_lookup_rslt, c(2, 17), .default = "not found"))
}
if(.lat_long){
batch_rslt_tbl %<>%
mutate(longitude = map_dbl(bulk_lookup_rslt, c(2, 7), .default = NA),
latitude = map_dbl(bulk_lookup_rslt, c(2, 8), .default = NA))
}
# Cumulative results from batches
postcode_details <- bind_rows(postcode_details, batch_rslt_tbl)
}
# Remove duplicates (so subsequent join works)
postcode_details %<>%
distinct(postcode, .keep_all = TRUE)
# Format the dataframe we're returning
df %<>%
left_join(postcode_details, by = c("postcode")) %>% # Add postcode details to data
bind_rows(empty_postcodes) %>% # Add null postcode records
rename({{pcd_var}} := postcode) # Rename postcode column back to original name
}
# DEPRECATED Extract the UPRNs from the feature ... ----
# ... created by our llpg_world_geocode() Python function
extract_uprns <- function(path, feature){
uprns <- st_read(dsn = path, layer = feature) %>%
as_tibble() %>%
clean_names() %>%
mutate(addr_type = str_extract(locator_family_id, ".+?(?=:)")) %>%
mutate(addr_type = str_replace_all(addr_type, "'", "")) %>%
mutate(uprn = str_extract(locator_family_id, "[^:]+$")) %>%
mutate(uprn = ifelse(str_length(uprn) == 11,
str_c("0", uprn),
uprn)) %>%
mutate(uprn = ifelse(addr_type != "ADDRESS", NA, uprn)) %>%
select(-user_uprn, -starts_with("locator_")) %>% # Remove columns with prefix of "locator_"
rename_at(.vars = vars(starts_with("user_")),
.funs = ~sub("^user_", "", .)) %>% # Remove prefix "user_" from column names
arrange(desc(status), desc(addr_type), score)
}
|
library(envirem)
### Name: embergerQ
### Title: Emberger's pluviometric quotient
### Aliases: embergerQ
### ** Examples
# Find example rasters
rasterFiles <- list.files(system.file('extdata', package='envirem'), full.names=TRUE)
env <- stack(rasterFiles)
embergerQ(env[['bio_12']], env[['bio_5']], env[['bio_6']], tempScale = 10)
|
/data/genthat_extracted_code/envirem/examples/embergerQ.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 338 |
r
|
library(envirem)
### Name: embergerQ
### Title: Emberger's pluviometric quotient
### Aliases: embergerQ
### ** Examples
# Find example rasters
rasterFiles <- list.files(system.file('extdata', package='envirem'), full.names=TRUE)
env <- stack(rasterFiles)
embergerQ(env[['bio_12']], env[['bio_5']], env[['bio_6']], tempScale = 10)
|
library(compositions)
### Name: rDirichlet
### Title: Dirichlet distribution
### Aliases: rDirichlet rDirichlet.acomp rDirichlet.rcomp
### Keywords: multivariate
### ** Examples
tmp <- rDirichlet.acomp(10,alpha=c(A=2,B=0.2,C=0.2))
plot(tmp)
|
/data/genthat_extracted_code/compositions/examples/rDirichlet.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 248 |
r
|
library(compositions)
### Name: rDirichlet
### Title: Dirichlet distribution
### Aliases: rDirichlet rDirichlet.acomp rDirichlet.rcomp
### Keywords: multivariate
### ** Examples
tmp <- rDirichlet.acomp(10,alpha=c(A=2,B=0.2,C=0.2))
plot(tmp)
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----correlation--------------------------------------------------------------
# Load packages
library(ggplot2)
library(sitmo)
# Number of Observations to Generate
n = 1e6
# Number of seeds to try (1 ... S)
nseeds = 30
# Storage for seed number and the correlation of the realizations between generators.
cppdf = data.frame(s1 = numeric(nseeds), s2 = numeric(nseeds),
cor = numeric(nseeds), stringsAsFactors = F)
# Generate observations under the seeds
count = 0
for(i in seq_len(nseeds)){
for(j in i:nseeds){
u1 = runif_sitmo(n, 0.0, 1.0, i)
u2 = runif_sitmo(n, 0.0, 1.0, j)
count = count + 1
cppdf[count,] = c(i, j, cor(u1,u2))
}
}
## ----corr_plot, fig.width = 7, fig.height = 4---------------------------------
# Create Correlation Plot
ggplot(cppdf) + geom_tile(aes(x = s1, y = s2, fill = cor)) +
xlab("Seed 1") + ylab("Seed 2") +
ggtitle("Correlations between seed realizations using `sitmo`") + theme_bw()
|
/inst/doc/uniform_rng_with_sitmo.R
|
no_license
|
cran/sitmo
|
R
| false | false | 1,083 |
r
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----correlation--------------------------------------------------------------
# Load packages
library(ggplot2)
library(sitmo)
# Number of Observations to Generate
n = 1e6
# Number of seeds to try (1 ... S)
nseeds = 30
# Storage for seed number and the correlation of the realizations between generators.
cppdf = data.frame(s1 = numeric(nseeds), s2 = numeric(nseeds),
cor = numeric(nseeds), stringsAsFactors = F)
# Generate observations under the seeds
count = 0
for(i in seq_len(nseeds)){
for(j in i:nseeds){
u1 = runif_sitmo(n, 0.0, 1.0, i)
u2 = runif_sitmo(n, 0.0, 1.0, j)
count = count + 1
cppdf[count,] = c(i, j, cor(u1,u2))
}
}
## ----corr_plot, fig.width = 7, fig.height = 4---------------------------------
# Create Correlation Plot
ggplot(cppdf) + geom_tile(aes(x = s1, y = s2, fill = cor)) +
xlab("Seed 1") + ylab("Seed 2") +
ggtitle("Correlations between seed realizations using `sitmo`") + theme_bw()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\encoding{UTF-8}
\name{to_formula}
\alias{to_formula}
\title{Takes any formula-like input and returns a formula}
\usage{
to_formula(form)
}
\arguments{
\item{form}{Formula or character (with or without initial tilde/"~")}
}
\value{
A formula
}
\description{
Takes any formula-like input and returns a formula
}
\author{
Jonas Kristoffer Lindeløv \email{jonas@lindeloev.dk}
}
\keyword{internal}
|
/man/to_formula.Rd
|
no_license
|
lindeloev/mcp
|
R
| false | true | 480 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\encoding{UTF-8}
\name{to_formula}
\alias{to_formula}
\title{Takes any formula-like input and returns a formula}
\usage{
to_formula(form)
}
\arguments{
\item{form}{Formula or character (with or without initial tilde/"~")}
}
\value{
A formula
}
\description{
Takes any formula-like input and returns a formula
}
\author{
Jonas Kristoffer Lindeløv \email{jonas@lindeloev.dk}
}
\keyword{internal}
|
createDeathTests <- function () {
set_defaults_enrollment_detail(mhsacovg = '0')
patient <- createPatient()
encounter <- createEncounter()
declareTest("Date of death visit end date", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2015-12-31', dtstart = '2012-01-01', mhsacovg = 0)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09')
add_outpatient_services(enrolid = patient$enrolid, year = '2013', svcdate = '2013-01-07',tsvcdat = '2013-01-07', dx1 = '7981', dxver = '9', fachdid = encounter$caseid)
add_facility_header(enrolid=patient$enrolid, svcdate = '2013-01-07', tsvcdat = '2013-01-07', dx9 = '7981', fachdid = encounter$caseid, dxver='9')
expect_death(person_id = patient$person_id, death_date = '2013-01-09')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2016-12-31', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("ICD10 Death and Death Type Correct", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2016', svcdate = '2016-01-06', tsvcdat='2016-01-09', dxver = '0', dx1='I461')
expect_death(person_id = patient$person_id, death_type_concept_id = '38003567')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2015-12-31', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("ICD9 Death and Death Type Correct", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09', dxver = '9', dx1='798')
expect_death(person_id = patient$person_id, death_type_concept_id = '38003567')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2013-02-01', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("Discharge Status and Death Type Correct", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09', dstatus = '20')
expect_death(person_id = patient$person_id, death_type_concept_id = '38003566')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2015-12-31', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("Death by ICD9 and Death by Discharge, Keep the Discharge", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09', dstatus = '20',dxver = '9', dx1='798')
expect_death(person_id = patient$person_id, death_type_concept_id = '38003566')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2015-12-31', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("Death and then contined Activity Cancels Death", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09', dstatus = '20')
add_outpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-03-12')
expect_no_death(person_id=patient$enrolid)
if (Sys.getenv("truvenType") == "MDCD")
{
patient <- createPatient()
encounter <- createEncounter()
declareTest("Patient has icd9 death cord in ltc, death record created", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2012-12-31', dtstart = '2012-01-01')
add_long_term_care(enrolid = patient$enrolid, dx1 = '798', dxver = '9', svcdate = '11-02-2012', tsvcdat = '11-22-2012')
expect_death(person_id = patient$person_id)
}
}
|
/man/TRUVEN_CCAE_MDCR/ARCHIVE/TEST_CASES_2017/Truven_TestingFramework/R/DeathTests.R
|
permissive
|
OHDSI/ETL-CDMBuilder
|
R
| false | false | 4,012 |
r
|
createDeathTests <- function () {
set_defaults_enrollment_detail(mhsacovg = '0')
patient <- createPatient()
encounter <- createEncounter()
declareTest("Date of death visit end date", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2015-12-31', dtstart = '2012-01-01', mhsacovg = 0)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09')
add_outpatient_services(enrolid = patient$enrolid, year = '2013', svcdate = '2013-01-07',tsvcdat = '2013-01-07', dx1 = '7981', dxver = '9', fachdid = encounter$caseid)
add_facility_header(enrolid=patient$enrolid, svcdate = '2013-01-07', tsvcdat = '2013-01-07', dx9 = '7981', fachdid = encounter$caseid, dxver='9')
expect_death(person_id = patient$person_id, death_date = '2013-01-09')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2016-12-31', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("ICD10 Death and Death Type Correct", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2016', svcdate = '2016-01-06', tsvcdat='2016-01-09', dxver = '0', dx1='I461')
expect_death(person_id = patient$person_id, death_type_concept_id = '38003567')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2015-12-31', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("ICD9 Death and Death Type Correct", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09', dxver = '9', dx1='798')
expect_death(person_id = patient$person_id, death_type_concept_id = '38003567')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2013-02-01', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("Discharge Status and Death Type Correct", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09', dstatus = '20')
expect_death(person_id = patient$person_id, death_type_concept_id = '38003566')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2015-12-31', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("Death by ICD9 and Death by Discharge, Keep the Discharge", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09', dstatus = '20',dxver = '9', dx1='798')
expect_death(person_id = patient$person_id, death_type_concept_id = '38003566')
patient <- createPatient()
encounter <- createEncounter()
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2015-12-31', dtstart = '2012-01-01', mhsacovg = '0')
declareTest("Death and then contined Activity Cancels Death", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_inpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-01-06', tsvcdat='2013-01-09', dstatus = '20')
add_outpatient_services(enrolid=patient$enrolid, year = '2013', svcdate = '2013-03-12')
expect_no_death(person_id=patient$enrolid)
if (Sys.getenv("truvenType") == "MDCD")
{
patient <- createPatient()
encounter <- createEncounter()
declareTest("Patient has icd9 death cord in ltc, death record created", source_pid = patient$enrolid, cdm_pid = patient$person_id)
add_enrollment_detail(enrolid=patient$enrolid, dtend = '2012-12-31', dtstart = '2012-01-01')
add_long_term_care(enrolid = patient$enrolid, dx1 = '798', dxver = '9', svcdate = '11-02-2012', tsvcdat = '11-22-2012')
expect_death(person_id = patient$person_id)
}
}
|
options(scipen=999)
#Necessary Packages
library(bayesreg)
library(MCMCpack)
library(doParallel)
library(foreach)
library(forecast)
#Set Working Directory
setwd("D:/Mario Documents/Graduate School/Github Repositories/RegularizationMethodsForSubsetARMASelection")
source("subsetARMACode.R")
#Simulation Specs
S=500 #Number of Replications of Simulated Time Series
N=c(120,240,360) #Lengths of Simulated Time Series
noise=c(0.5,1,1.5) #Inherent Noise in Simulated Time Series
maxP=14 #Maximum Autoregressive Order
maxQ=14 #Maximum Moving Average Order
true.ar=c(0.8,rep(0,4),0.7,-0.56,rep(0,7))
true.ma=c(0.8,rep(0,4),0.7,0.56,rep(0,7))
coef.true=c(true.ar,true.ma)
#Methods Considered
methods=c("ADLASSO AIC","ADLASSO AIC/BIC","ADLASSO BIC")
#Empty Dataframe to Save Results
RESULTS=data.frame(matrix(NA,1,8+maxP+maxQ))
names(RESULTS)=c("Method","Length","Noise","Sim",
paste("AR(",1:maxP,")",sep=""),
paste("MA(",1:maxP,")",sep=""),
"P","A","FN","FP")
for(l in 1:S){
for(k in 1:length(noise)){
for(j in 1:length(N)){
####################################################################################################################################
#Simulate Dataset
model<-Arima(ts(rnorm(N[j]+1000,0,1),freq=6),order=c(1,0,1),seasonal=c(1,0,1),include.mean=F,fixed=c(0.8,0.8,0.7,0.7))
x<-simulate(model,nsim=(N[j]+1000),future=F,innov = rnorm((1000+N[j]),0,noise[k]))[-(1:1000)]
####################################################################################################################################
####################################################################################################################################
#Estimate Using Adaptive Enet Selecting Tuning Parameter Via Minimization of AIC/BIC
##Use AIC first and AIC second (no update to moving average terms)
adlasso.aic=cv.adaptlasso.arma.func(x,h=1,cores=1,long.ar.select=F,maxP=maxP,maxQ=maxQ,max.pq=NULL,K=NULL,updateMA=F,
test.per=0.2,BIC1=F,BIC2=F,eta=2,CV.method=c("AIC/BIC"))
coef.adlasso.aic=round(adlasso.aic$final.mod.coef,4)
names(coef.adlasso.aic)=c(paste("ar",1:14,sep=""),paste("ma",1:14,sep=""))
coef.adlasso.aic=c(coef.adlasso.aic,fulleval.func(truecoef=coef.true,estcoef=coef.adlasso.aic))
##Use AIC first and BIC second (no update to moving average terms)
adlasso.aicbic=cv.adaptlasso.arma.func(x,h=1,cores=1,long.ar.select=F,maxP=maxP,maxQ=maxQ,max.pq=NULL,K=NULL,updateMA=F,
test.per=0.2,BIC1=F,BIC2=T,eta=2,CV.method=c("AIC/BIC"))
coef.adlasso.aicbic=round(adlasso.aicbic$final.mod.coef,4)
names(coef.adlasso.aicbic)=c(paste("ar",1:14,sep=""),paste("ma",1:14,sep=""))
coef.adlasso.aicbic=c(coef.adlasso.aicbic,fulleval.func(truecoef=coef.true,estcoef=coef.adlasso.aicbic))
##Use BIC first and BIC second (no update to moving average terms)
adlasso.bic=cv.adaptlasso.arma.func(x,h=1,cores=1,long.ar.select=F,maxP=maxP,maxQ=maxQ,max.pq=NULL,K=NULL,updateMA=F,
test.per=0.2,BIC1=T,BIC2=T,eta=2,CV.method=c("AIC/BIC"))
coef.adlasso.bic=round(adlasso.bic$final.mod.coef,4)
names(coef.adlasso.bic)=c(paste("ar",1:14,sep=""),paste("ma",1:14,sep=""))
coef.adlasso.bic=c(coef.adlasso.bic,fulleval.func(truecoef=coef.true,estcoef=coef.adlasso.bic))
####################################################################################################################################
save.image("ADLASSOSIM2AICBIC.Rdata")
####################################################################################################################################
#Save Results into Data Frame
init.RESULTS=as.data.frame(rbind(
coef.adlasso.aic,
coef.adlasso.aicbic,
coef.adlasso.bic
))
row.names(init.RESULTS)=NULL
init.RESULTS2=cbind(methods,N[j],noise[k],l,init.RESULTS)
names(init.RESULTS2)=names(RESULTS)
RESULTS=rbind(RESULTS,init.RESULTS2)
####################################################################################################################################
save.image("ADLASSOSIM2AICBIC.Rdata")
}
}
}
save.image("ADLASSOSIM2AICBIC.Rdata")
|
/AdaptLassoFullSimulation2AICBIC.R
|
no_license
|
SuperMarioGiacomazzo/RegularizationMethodsForSubsetARMASelection
|
R
| false | false | 4,455 |
r
|
options(scipen=999)
#Necessary Packages
library(bayesreg)
library(MCMCpack)
library(doParallel)
library(foreach)
library(forecast)
#Set Working Directory
setwd("D:/Mario Documents/Graduate School/Github Repositories/RegularizationMethodsForSubsetARMASelection")
source("subsetARMACode.R")
#Simulation Specs
S=500 #Number of Replications of Simulated Time Series
N=c(120,240,360) #Lengths of Simulated Time Series
noise=c(0.5,1,1.5) #Inherent Noise in Simulated Time Series
maxP=14 #Maximum Autoregressive Order
maxQ=14 #Maximum Moving Average Order
true.ar=c(0.8,rep(0,4),0.7,-0.56,rep(0,7))
true.ma=c(0.8,rep(0,4),0.7,0.56,rep(0,7))
coef.true=c(true.ar,true.ma)
#Methods Considered
methods=c("ADLASSO AIC","ADLASSO AIC/BIC","ADLASSO BIC")
#Empty Dataframe to Save Results
RESULTS=data.frame(matrix(NA,1,8+maxP+maxQ))
names(RESULTS)=c("Method","Length","Noise","Sim",
paste("AR(",1:maxP,")",sep=""),
paste("MA(",1:maxP,")",sep=""),
"P","A","FN","FP")
for(l in 1:S){
for(k in 1:length(noise)){
for(j in 1:length(N)){
####################################################################################################################################
#Simulate Dataset
model<-Arima(ts(rnorm(N[j]+1000,0,1),freq=6),order=c(1,0,1),seasonal=c(1,0,1),include.mean=F,fixed=c(0.8,0.8,0.7,0.7))
x<-simulate(model,nsim=(N[j]+1000),future=F,innov = rnorm((1000+N[j]),0,noise[k]))[-(1:1000)]
####################################################################################################################################
####################################################################################################################################
#Estimate Using Adaptive Enet Selecting Tuning Parameter Via Minimization of AIC/BIC
##Use AIC first and AIC second (no update to moving average terms)
adlasso.aic=cv.adaptlasso.arma.func(x,h=1,cores=1,long.ar.select=F,maxP=maxP,maxQ=maxQ,max.pq=NULL,K=NULL,updateMA=F,
test.per=0.2,BIC1=F,BIC2=F,eta=2,CV.method=c("AIC/BIC"))
coef.adlasso.aic=round(adlasso.aic$final.mod.coef,4)
names(coef.adlasso.aic)=c(paste("ar",1:14,sep=""),paste("ma",1:14,sep=""))
coef.adlasso.aic=c(coef.adlasso.aic,fulleval.func(truecoef=coef.true,estcoef=coef.adlasso.aic))
##Use AIC first and BIC second (no update to moving average terms)
adlasso.aicbic=cv.adaptlasso.arma.func(x,h=1,cores=1,long.ar.select=F,maxP=maxP,maxQ=maxQ,max.pq=NULL,K=NULL,updateMA=F,
test.per=0.2,BIC1=F,BIC2=T,eta=2,CV.method=c("AIC/BIC"))
coef.adlasso.aicbic=round(adlasso.aicbic$final.mod.coef,4)
names(coef.adlasso.aicbic)=c(paste("ar",1:14,sep=""),paste("ma",1:14,sep=""))
coef.adlasso.aicbic=c(coef.adlasso.aicbic,fulleval.func(truecoef=coef.true,estcoef=coef.adlasso.aicbic))
##Use BIC first and BIC second (no update to moving average terms)
adlasso.bic=cv.adaptlasso.arma.func(x,h=1,cores=1,long.ar.select=F,maxP=maxP,maxQ=maxQ,max.pq=NULL,K=NULL,updateMA=F,
test.per=0.2,BIC1=T,BIC2=T,eta=2,CV.method=c("AIC/BIC"))
coef.adlasso.bic=round(adlasso.bic$final.mod.coef,4)
names(coef.adlasso.bic)=c(paste("ar",1:14,sep=""),paste("ma",1:14,sep=""))
coef.adlasso.bic=c(coef.adlasso.bic,fulleval.func(truecoef=coef.true,estcoef=coef.adlasso.bic))
####################################################################################################################################
save.image("ADLASSOSIM2AICBIC.Rdata")
####################################################################################################################################
#Save Results into Data Frame
init.RESULTS=as.data.frame(rbind(
coef.adlasso.aic,
coef.adlasso.aicbic,
coef.adlasso.bic
))
row.names(init.RESULTS)=NULL
init.RESULTS2=cbind(methods,N[j],noise[k],l,init.RESULTS)
names(init.RESULTS2)=names(RESULTS)
RESULTS=rbind(RESULTS,init.RESULTS2)
####################################################################################################################################
save.image("ADLASSOSIM2AICBIC.Rdata")
}
}
}
save.image("ADLASSOSIM2AICBIC.Rdata")
|
\name{hessian}
\title{hessian}
\alias{hessian}
\alias{hessian<-}
\description{Read/write to 2nd order derivative/hessian}
\usage{
hessian(x)
hessian(x) <- value
}
\arguments{
\item{x}{Autograd object with \code{ddim} derivative arguments.
For the read method, all 2nd derivatives must exist}
\item{value}{Array of dimension \code{c(dim(x), ddim, ddim)}}
}
\value{
\code{hessian}:
Hessian of \code{x} wrapped up in an array of dim \code{c(dim(x), ddim, ddim)} with dimensions possibly dropped.
\code{hessian<-}: Autograd object with all 2nd derivatives defined.
}
\details{
Note: there are no symmetry checks on \code{value} when assigning a hessian to \code{x}.
}
\seealso{
\link{deriv}
}
|
/man/hessian.Rd
|
no_license
|
naolsen/Autograd
|
R
| false | false | 690 |
rd
|
\name{hessian}
\title{hessian}
\alias{hessian}
\alias{hessian<-}
\description{Read/write to 2nd order derivative/hessian}
\usage{
hessian(x)
hessian(x) <- value
}
\arguments{
\item{x}{Autograd object with \code{ddim} derivative arguments.
For the read method, all 2nd derivatives must exist}
\item{value}{Array of dimension \code{c(dim(x), ddim, ddim)}}
}
\value{
\code{hessian}:
Hessian of \code{x} wrapped up in an array of dim \code{c(dim(x), ddim, ddim)} with dimensions possibly dropped.
\code{hessian<-}: Autograd object with all 2nd derivatives defined.
}
\details{
Note: there are no symmetry checks on \code{value} when assigning a hessian to \code{x}.
}
\seealso{
\link{deriv}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{C2}
\alias{C2}
\title{Title C2}
\usage{
C2(x, y, feature_selection_method, num_clusters_method, k = NULL,
clustering_method, ...)
}
\arguments{
\item{x}{data matrix}
\item{y}{Dependent variable}
\item{feature_selection_method}{method for the feature selection of the clinical measurements stage. Default RF.}
\item{num_clusters_method}{method for the choosing number of clusters by using the clinical measurements. Default Euclidean.}
\item{k}{number of clusters to use. If missing, we use a detection method. Defaukt as NULL}
\item{clustering_method}{method for clustering using the reduced clinical measures. Default is Hmanhattan,}
}
\value{
a list of three variables:
1) vector with the names of the omportant variables chosen.
2) number of classes that will be used for clustering
3) vector of the new assigned clusterst
}
\description{
Title C2
}
\examples{
resultC2 <- C2(x, y, feature_selection_method='RF', num_clusters_method='Manhattan', clustering_method='Manhattan', plot.num.clus=TRUE, plot.clustering=TRUE)
C2(x, y, feature_selection_method='BIC', num_clusters_method='Manhattan', clustering_method='Hmanhattan', plot.num.clus=TRUE, plot.clustering=FALSE, nbest=1, nvmax=8, B=50)
}
|
/man/C2.Rd
|
no_license
|
HBPMedical/CCC
|
R
| false | true | 1,337 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{C2}
\alias{C2}
\title{Title C2}
\usage{
C2(x, y, feature_selection_method, num_clusters_method, k = NULL,
clustering_method, ...)
}
\arguments{
\item{x}{data matrix}
\item{y}{Dependent variable}
\item{feature_selection_method}{method for the feature selection of the clinical measurements stage. Default RF.}
\item{num_clusters_method}{method for the choosing number of clusters by using the clinical measurements. Default Euclidean.}
\item{k}{number of clusters to use. If missing, we use a detection method. Defaukt as NULL}
\item{clustering_method}{method for clustering using the reduced clinical measures. Default is Hmanhattan,}
}
\value{
a list of three variables:
1) vector with the names of the omportant variables chosen.
2) number of classes that will be used for clustering
3) vector of the new assigned clusterst
}
\description{
Title C2
}
\examples{
resultC2 <- C2(x, y, feature_selection_method='RF', num_clusters_method='Manhattan', clustering_method='Manhattan', plot.num.clus=TRUE, plot.clustering=TRUE)
C2(x, y, feature_selection_method='BIC', num_clusters_method='Manhattan', clustering_method='Hmanhattan', plot.num.clus=TRUE, plot.clustering=FALSE, nbest=1, nvmax=8, B=50)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateParams.R
\name{random_covmat}
\alias{random_covmat}
\title{Create random VAR model error term covariance matrix}
\usage{
random_covmat(d, M, omega_scale, W_scale, lambda_scale, structural_pars = NULL)
}
\arguments{
\item{d}{the number of time series in the system.}
\item{M}{a positive integer specifying the number of mixture components.}
\item{omega_scale}{a size \eqn{(dx1)} strictly positive vector specifying the scale and variability of the
random covariance matrices in random mutations. The covariance matrices are drawn from (scaled) Wishart
distribution. Expected values of the random covariance matrices are \code{diag(omega_scale)}. Standard
deviations of the diagonal elements are \code{sqrt(2/d)*omega_scale[i]}
and for non-diagonal elements they are \code{sqrt(1/d*omega_scale[i]*omega_scale[j])}.
Note that for \code{d>4} this scale may need to be chosen carefully. Default in \code{GAfit} is
\code{var(stats::ar(data[,i], order.max=10)$resid, na.rm=TRUE), i=1,...,d}. This argument is ignored if
structural model is considered.}
\item{W_scale}{a size \eqn{(dx1)} strictly positive vector partly specifying the scale and variability of the
random covariance matrices in random mutations. The elements of the matrix \eqn{W} are drawn independently
from such normal distributions that the expectation of the main \strong{diagonal} elements of the first
regime's error term covariance matrix \eqn{\Omega_1 = WW'} is \code{W_scale}. The distribution of \eqn{\Omega_1}
will be in some sense like a Wishart distribution but with the columns (elements) of \eqn{W} obeying the given
constraints. The constraints are accounted for by setting the element to be always zero if it is subject to a zero
constraint and for sign constraints the absolute value or negative the absolute value are taken, and then the
variances of the elements of \eqn{W} are adjusted accordingly. This argument is ignored if reduced form model
is considered.}
\item{lambda_scale}{a length \eqn{M - 1} vector specifying the \strong{standard deviation} of the mean zero normal
distribution from which the eigenvalue \eqn{\lambda_{mi}} parameters are drawn from in random mutations.
As the eigenvalues should always be positive, the absolute value is taken. The elements of \code{lambda_scale}
should be strictly positive real numbers with the \eqn{m-1}th element giving the degrees of freedom for the \eqn{m}th
regime. The expected value of the main \strong{diagonal} elements \eqn{ij} of the \eqn{m}th \eqn{(m>1)} error term covariance
matrix will be \code{W_scale[i]*(d - n_i)^(-1)*sum(lambdas*ind_fun)} where the \eqn{(d x 1)} vector \code{lambdas} is
drawn from the absolute value of the t-distribution, \code{n_i} is the number of zero constraints in the \eqn{i}th
row of \eqn{W} and \code{ind_fun} is an indicator function that takes the value one iff the \eqn{ij}th element of
\eqn{W} is not constrained to zero. Basically, larger lambdas (or smaller degrees of freedom) imply larger variance.
If the lambda parameters are \strong{constrained} with the \eqn{(d(M - 1) x r)} constraint matrix \eqn{C_lambda},
then provide a length \eqn{r} vector specifying the standard deviation of the (absolute value of the) mean zero
normal distribution each of the \eqn{\gamma} parameters are drawn from (the \eqn{\gamma} is a \eqn{(r x 1)} vector).
The expected value of the main diagonal elements of the covariance matrices then depend on the constraints.
This argument is ignored if \eqn{M==1} or a reduced form model is considered. Default is \code{rep(3, times=M-1)}
if lambdas are not constrained and \code{rep(3, times=r)} if lambdas are constrained.
As with omega_scale and W_scale, this argument should be adjusted carefully if specified by hand. \strong{NOTE}
that if lambdas are constrained in some other way than restricting some of them to be identical, this parameter
should be adjusted accordingly in order to the estimation succeed!}
\item{structural_pars}{If \code{NULL} a reduced form model is considered. For structural model, should be a list containing
the following elements:
\itemize{
\item \code{W} - a \eqn{(dxd)} matrix with its entries imposing constraints on \eqn{W}: \code{NA} indicating that the element is
unconstrained, a positive value indicating strict positive sign constraint, a negative value indicating strict
negative sign constraint, and zero indicating that the element is constrained to zero.
\item \code{C_lambda} - a \eqn{(d(M-1) x r)} constraint matrix that satisfies (\strong{\eqn{\lambda}}\eqn{_{2}}\eqn{,...,}
\strong{\eqn{\lambda}}\eqn{_{M}) =} \strong{\eqn{C_{\lambda} \gamma}} where \strong{\eqn{\gamma}} is the new \eqn{(r x 1)}
parameter subject to which the model is estimated (similarly to AR parameter constraints). The entries of \code{C_lambda}
must be either \strong{positive} or \strong{zero}. Ignore (or set to \code{NULL}) if the eigenvalues \eqn{\lambda_{mi}}
should not be constrained.
}
See Virolainen (2020) for the conditions required to identify the shocks and for the B-matrix as well (it is \eqn{W} times
a time-varying diagonal matrix with positive diagonal entries).}
}
\value{
\describe{
\item{For \strong{reduced form models}:}{Returns a \eqn{(d(d+1)/2x1)} vector containing vech-vectorized covariance matrix
\eqn{\Omega}.}
\item{For \strong{structural models}:}{Returns a length \eqn{d^2 - n_zeros - d*(M - 1)} vector of the form
\eqn{(Wvec(W),\lambda_2,...,\lambda_M)} where \eqn{\lambda_m=(\lambda_{m1},...,\lambda_{md})}
contains the eigenvalue parameters of the \eqn{m}th regime \eqn{(m>1)} and \eqn{n_zeros} is the number of zero constraints
in \eqn{W}. If lambdas are constrained, replacce \eqn{d*(M - 1)} in the length with \eqn{r} and
\eqn{\lambda_2,...,\lambda_M)} with \strong{\eqn{\gamma}}. The operator \eqn{Wvec()} vectorizes a matrix and removes zeros.}
}
}
\description{
\code{random_covmat} generates random VAR model \eqn{(dxd)} error term covariance matrix \eqn{\Omega}
from (scaled) Wishart distribution for reduced form models and the parameters \eqn{W},\eqn{\lambda_1,...,\lambda_M}
for structural models (from normal distributions).
}
|
/man/random_covmat.Rd
|
no_license
|
yangkedc1984/gmvarkit
|
R
| false | true | 6,306 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateParams.R
\name{random_covmat}
\alias{random_covmat}
\title{Create random VAR model error term covariance matrix}
\usage{
random_covmat(d, M, omega_scale, W_scale, lambda_scale, structural_pars = NULL)
}
\arguments{
\item{d}{the number of time series in the system.}
\item{M}{a positive integer specifying the number of mixture components.}
\item{omega_scale}{a size \eqn{(dx1)} strictly positive vector specifying the scale and variability of the
random covariance matrices in random mutations. The covariance matrices are drawn from (scaled) Wishart
distribution. Expected values of the random covariance matrices are \code{diag(omega_scale)}. Standard
deviations of the diagonal elements are \code{sqrt(2/d)*omega_scale[i]}
and for non-diagonal elements they are \code{sqrt(1/d*omega_scale[i]*omega_scale[j])}.
Note that for \code{d>4} this scale may need to be chosen carefully. Default in \code{GAfit} is
\code{var(stats::ar(data[,i], order.max=10)$resid, na.rm=TRUE), i=1,...,d}. This argument is ignored if
structural model is considered.}
\item{W_scale}{a size \eqn{(dx1)} strictly positive vector partly specifying the scale and variability of the
random covariance matrices in random mutations. The elements of the matrix \eqn{W} are drawn independently
from such normal distributions that the expectation of the main \strong{diagonal} elements of the first
regime's error term covariance matrix \eqn{\Omega_1 = WW'} is \code{W_scale}. The distribution of \eqn{\Omega_1}
will be in some sense like a Wishart distribution but with the columns (elements) of \eqn{W} obeying the given
constraints. The constraints are accounted for by setting the element to be always zero if it is subject to a zero
constraint and for sign constraints the absolute value or negative the absolute value are taken, and then the
variances of the elements of \eqn{W} are adjusted accordingly. This argument is ignored if reduced form model
is considered.}
\item{lambda_scale}{a length \eqn{M - 1} vector specifying the \strong{standard deviation} of the mean zero normal
distribution from which the eigenvalue \eqn{\lambda_{mi}} parameters are drawn from in random mutations.
As the eigenvalues should always be positive, the absolute value is taken. The elements of \code{lambda_scale}
should be strictly positive real numbers with the \eqn{m-1}th element giving the degrees of freedom for the \eqn{m}th
regime. The expected value of the main \strong{diagonal} elements \eqn{ij} of the \eqn{m}th \eqn{(m>1)} error term covariance
matrix will be \code{W_scale[i]*(d - n_i)^(-1)*sum(lambdas*ind_fun)} where the \eqn{(d x 1)} vector \code{lambdas} is
drawn from the absolute value of the t-distribution, \code{n_i} is the number of zero constraints in the \eqn{i}th
row of \eqn{W} and \code{ind_fun} is an indicator function that takes the value one iff the \eqn{ij}th element of
\eqn{W} is not constrained to zero. Basically, larger lambdas (or smaller degrees of freedom) imply larger variance.
If the lambda parameters are \strong{constrained} with the \eqn{(d(M - 1) x r)} constraint matrix \eqn{C_lambda},
then provide a length \eqn{r} vector specifying the standard deviation of the (absolute value of the) mean zero
normal distribution each of the \eqn{\gamma} parameters are drawn from (the \eqn{\gamma} is a \eqn{(r x 1)} vector).
The expected value of the main diagonal elements of the covariance matrices then depend on the constraints.
This argument is ignored if \eqn{M==1} or a reduced form model is considered. Default is \code{rep(3, times=M-1)}
if lambdas are not constrained and \code{rep(3, times=r)} if lambdas are constrained.
As with omega_scale and W_scale, this argument should be adjusted carefully if specified by hand. \strong{NOTE}
that if lambdas are constrained in some other way than restricting some of them to be identical, this parameter
should be adjusted accordingly in order to the estimation succeed!}
\item{structural_pars}{If \code{NULL} a reduced form model is considered. For structural model, should be a list containing
the following elements:
\itemize{
\item \code{W} - a \eqn{(dxd)} matrix with its entries imposing constraints on \eqn{W}: \code{NA} indicating that the element is
unconstrained, a positive value indicating strict positive sign constraint, a negative value indicating strict
negative sign constraint, and zero indicating that the element is constrained to zero.
\item \code{C_lambda} - a \eqn{(d(M-1) x r)} constraint matrix that satisfies (\strong{\eqn{\lambda}}\eqn{_{2}}\eqn{,...,}
\strong{\eqn{\lambda}}\eqn{_{M}) =} \strong{\eqn{C_{\lambda} \gamma}} where \strong{\eqn{\gamma}} is the new \eqn{(r x 1)}
parameter subject to which the model is estimated (similarly to AR parameter constraints). The entries of \code{C_lambda}
must be either \strong{positive} or \strong{zero}. Ignore (or set to \code{NULL}) if the eigenvalues \eqn{\lambda_{mi}}
should not be constrained.
}
See Virolainen (2020) for the conditions required to identify the shocks and for the B-matrix as well (it is \eqn{W} times
a time-varying diagonal matrix with positive diagonal entries).}
}
\value{
\describe{
\item{For \strong{reduced form models}:}{Returns a \eqn{(d(d+1)/2x1)} vector containing vech-vectorized covariance matrix
\eqn{\Omega}.}
\item{For \strong{structural models}:}{Returns a length \eqn{d^2 - n_zeros - d*(M - 1)} vector of the form
\eqn{(Wvec(W),\lambda_2,...,\lambda_M)} where \eqn{\lambda_m=(\lambda_{m1},...,\lambda_{md})}
contains the eigenvalue parameters of the \eqn{m}th regime \eqn{(m>1)} and \eqn{n_zeros} is the number of zero constraints
in \eqn{W}. If lambdas are constrained, replacce \eqn{d*(M - 1)} in the length with \eqn{r} and
\eqn{\lambda_2,...,\lambda_M)} with \strong{\eqn{\gamma}}. The operator \eqn{Wvec()} vectorizes a matrix and removes zeros.}
}
}
\description{
\code{random_covmat} generates random VAR model \eqn{(dxd)} error term covariance matrix \eqn{\Omega}
from (scaled) Wishart distribution for reduced form models and the parameters \eqn{W},\eqn{\lambda_1,...,\lambda_M}
for structural models (from normal distributions).
}
|
# Wendy Drake
# 20 January 2015
# Boulder, Colorado
#run_analysis.r prepares a tidy data set from Source Data.
#It is 1 of 3 files required to complete the final project for Coursera class "Getting and Cleaning Data."
#See the README.md for an explanation of the script and CodeBook.md for a description of the variables, the data, and all
#transformations and work performed to clean up the data.
#SETUP ENVIRONMENT
install.packages("RCurl")
install.packages("reshape2")
install.packages("plyr")
install.packages("data.table")
library(RCurl)
library(reshape2)
library(plyr)
library(data.table)
setwd("/Users/wendy/Coursera/GetCleanData/Final Project")
#CLEAN DATA
#1a - Download and unzip zip file
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile = "./HARData.zip", method = "curl")
unzip("HARData.zip", overwrite = TRUE, exdir = "Data")
#1b - Import Data
#Activity Labels
activity.labels <- read.table("./Data/activity_labels.txt")[,2]
#Features
features <- read.table("./Data/features.txt")[,2]
#Training and Test Data
print("Reading data...")
subject.train <- read.table("./Data/train/subject_train.txt")
x.train <- read.table("./Data/train/x_train.txt")
y.train <- read.table("./Data/train/y_train.txt")
subject.test <- read.table("./Data/test/subject_test.txt")
x.test <- read.table("./Data/test/x_test.txt")
y.test <- read.table("./Data/test/y_test.txt")
print("Done reading data")
#1c - Append the descriptive Activity to the y_* data.
y.train[,2] <- activity.labels[y.train[,1]]
y.test[,2] <- activity.labels[y.test[,1]]
#Delete the no longer needed activity id.
y.train$V1 = NULL
y.test$V1 = NULL
#1d - Clean up and label columns descriptively with CamelCase
features <- gsub("(", "", features, fixed = TRUE) #Illegal
features <- gsub(")", "", features, fixed = TRUE) #Illegal
features <- gsub(",", "", features, fixed = TRUE) #Illegal
features <- gsub("-", "", features, fixed = TRUE) #Unnecessary
features <- gsub("mean", "Mean", features, fixed = TRUE) #CamelCase
features <- gsub("std", "Std", features, fixed = TRUE) #CamelCase
features <- gsub("^f", "Frequency", features) #CamelCase
features <- gsub("^t", "Time", features) #CamelCase
features <- gsub("Acc", "Accelerometer", features) #CamelCase
features <- gsub("Gyro", "Gyroscope", features) #CamelCase
features <- gsub("BodyBody", "Body", features) #Fixes Typo
#Label the columns.
names(subject.train) = "Subject"
names(x.train) = features
names(y.train) = "Activity"
names(subject.test) = "Subject"
names(x.test) = features
names(y.test) = "Activity"
print("Done Labeling Columns")
#1e - Extract only the Features (columns) from x_train and x_test containing mean or standard deviation
print("Extracting only means and stdevs")
#Features whose measures are mean or standard deviation.
Features2Extract <- grepl("Mean|Std", features)
#Extract means and stdev features.
x.train <- x.train[, Features2Extract]
x.test <- x.test[, Features2Extract]
print("Done Extracting only means and stdevs")
#1f - Merge Training and Test Data sets
print("Merging Test Data")
test.data <- cbind(y.test, x.test, subject.test)
print("Merging Training Data")
training.data <- cbind(y.train, x.train, subject.train)
print("Merging Test and Training Data")
all.data <- rbind(test.data, training.data)
#Remove all the columns referring to 'angle' measurements.
all.data <- all.data[-grep('^angle',colnames(all.data))]
#Remove the duplicated columns ending in 'MeanFreq'
all.data <- all.data[-grep('MeanFreq$',colnames(all.data))]
print("Done Merging all Data")
#Housecleaning
rm(x.test,x.train,y.test,y.train,subject.test,subject.train,features,activity.labels, Features2Extract, test.data, training.data)
#TIDY THE DATASET & EXPORT.
x <- melt(all.data, c("Subject","Activity"))
TidyData <- dcast(x, Subject + Activity ~ variable, mean)
print("Tidied AllData")
write.table(TidyData, file ="TidyData.txt", sep = ",", qmethod = "double", row.name=FALSE)
|
/run_analysis.r
|
no_license
|
DestinationEpic/TidyData
|
R
| false | false | 5,285 |
r
|
# Wendy Drake
# 20 January 2015
# Boulder, Colorado
#run_analysis.r prepares a tidy data set from Source Data.
#It is 1 of 3 files required to complete the final project for Coursera class "Getting and Cleaning Data."
#See the README.md for an explanation of the script and CodeBook.md for a description of the variables, the data, and all
#transformations and work performed to clean up the data.
#SETUP ENVIRONMENT
install.packages("RCurl")
install.packages("reshape2")
install.packages("plyr")
install.packages("data.table")
library(RCurl)
library(reshape2)
library(plyr)
library(data.table)
setwd("/Users/wendy/Coursera/GetCleanData/Final Project")
#CLEAN DATA
#1a - Download and unzip zip file
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile = "./HARData.zip", method = "curl")
unzip("HARData.zip", overwrite = TRUE, exdir = "Data")
#1b - Import Data
#Activity Labels
activity.labels <- read.table("./Data/activity_labels.txt")[,2]
#Features
features <- read.table("./Data/features.txt")[,2]
#Training and Test Data
print("Reading data...")
subject.train <- read.table("./Data/train/subject_train.txt")
x.train <- read.table("./Data/train/x_train.txt")
y.train <- read.table("./Data/train/y_train.txt")
subject.test <- read.table("./Data/test/subject_test.txt")
x.test <- read.table("./Data/test/x_test.txt")
y.test <- read.table("./Data/test/y_test.txt")
print("Done reading data")
#1c - Append the descriptive Activity to the y_* data.
y.train[,2] <- activity.labels[y.train[,1]]
y.test[,2] <- activity.labels[y.test[,1]]
#Delete the no longer needed activity id.
y.train$V1 = NULL
y.test$V1 = NULL
#1d - Clean up and label columns descriptively with CamelCase
features <- gsub("(", "", features, fixed = TRUE) #Illegal
features <- gsub(")", "", features, fixed = TRUE) #Illegal
features <- gsub(",", "", features, fixed = TRUE) #Illegal
features <- gsub("-", "", features, fixed = TRUE) #Unnecessary
features <- gsub("mean", "Mean", features, fixed = TRUE) #CamelCase
features <- gsub("std", "Std", features, fixed = TRUE) #CamelCase
features <- gsub("^f", "Frequency", features) #CamelCase
features <- gsub("^t", "Time", features) #CamelCase
features <- gsub("Acc", "Accelerometer", features) #CamelCase
features <- gsub("Gyro", "Gyroscope", features) #CamelCase
features <- gsub("BodyBody", "Body", features) #Fixes Typo
#Label the columns.
names(subject.train) = "Subject"
names(x.train) = features
names(y.train) = "Activity"
names(subject.test) = "Subject"
names(x.test) = features
names(y.test) = "Activity"
print("Done Labeling Columns")
#1e - Extract only the Features (columns) from x_train and x_test containing mean or standard deviation
print("Extracting only means and stdevs")
#Features whose measures are mean or standard deviation.
Features2Extract <- grepl("Mean|Std", features)
#Extract means and stdev features.
x.train <- x.train[, Features2Extract]
x.test <- x.test[, Features2Extract]
print("Done Extracting only means and stdevs")
#1f - Merge Training and Test Data sets
print("Merging Test Data")
test.data <- cbind(y.test, x.test, subject.test)
print("Merging Training Data")
training.data <- cbind(y.train, x.train, subject.train)
print("Merging Test and Training Data")
all.data <- rbind(test.data, training.data)
#Remove all the columns referring to 'angle' measurements.
all.data <- all.data[-grep('^angle',colnames(all.data))]
#Remove the duplicated columns ending in 'MeanFreq'
all.data <- all.data[-grep('MeanFreq$',colnames(all.data))]
print("Done Merging all Data")
#Housecleaning
rm(x.test,x.train,y.test,y.train,subject.test,subject.train,features,activity.labels, Features2Extract, test.data, training.data)
#TIDY THE DATASET & EXPORT.
x <- melt(all.data, c("Subject","Activity"))
TidyData <- dcast(x, Subject + Activity ~ variable, mean)
print("Tidied AllData")
write.table(TidyData, file ="TidyData.txt", sep = ",", qmethod = "double", row.name=FALSE)
|
plot1 <- function(){
## Read data
data <- read.csv2("household_power_consumption.txt",colClasses="character")
data1 <- data[data$Date=="1/2/2007" | data$Date=="2/2/2007",]
## histogram graph for "Global_active_power"
gap <- as.numeric(data1[,3])
xname <- "Global Active Power"
hist(gap,col="red",main=xname,xlab=paste(xname,"(kilowatts)"))
dev.copy(png,'plot1.png')
dev.off()
}
|
/plot1.R
|
no_license
|
wllnju/ExData_Plotting1
|
R
| false | false | 387 |
r
|
plot1 <- function(){
## Read data
data <- read.csv2("household_power_consumption.txt",colClasses="character")
data1 <- data[data$Date=="1/2/2007" | data$Date=="2/2/2007",]
## histogram graph for "Global_active_power"
gap <- as.numeric(data1[,3])
xname <- "Global Active Power"
hist(gap,col="red",main=xname,xlab=paste(xname,"(kilowatts)"))
dev.copy(png,'plot1.png')
dev.off()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.