content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
VIP_data <- read.csv("VIP_161102.csv", header = TRUE, sep = ",", row.names = NULL, fill=TRUE)
VIP_enummer_missing_besok<-VIP_data[is.na(VIP_data$besok),2]
missing_besoks_ids<-VIP_data$Subject_id[is.na(VIP_data$besok)][!duplicated(VIP_data$Subject_id[is.na(VIP_data$besok)])]
for (subject_id in 1:length(missing_besoks_ids)){
besoks<-VIP_data[VIP_data$Subject_id==missing_besoks_ids[subject_id],228]
datums<-as.numeric(substr(VIP_data[VIP_data$Subject_id==missing_besoks_ids[subject_id],3],7,10))
write(paste0(missing_besoks_ids[subject_id],":",paste0(besoks[sort(datums, index.return=TRUE)$ix],collapse="")), file="temporal_checking_missing_besok", append = TRUE, sep = ",")
}
#with the temporal file do :
#jerneja_m@purple Private]$ cat temporal_checking_missing_besok | cut -d":" -f2| sort | uniq -c
#results, NA is for missing besok:
# 1 123NA
# 3813 12NA
# 3722 1NA
# 1 1NANA
# 6 2NA
# 10415 NA
# 3359 NA1
# 2268 NA12
# 3 NA123
# 27 NA12NA
# 58 NA1NA
# 18 NA2
# 16 NA23
# 1 NA2NANA
# 26 NANA
# 2 NANA1
# 2 NANA12
|
/code_leanPhty/missing_besok/missing_besok.R
|
no_license
|
jernejaMislej/all_code
|
R
| false | false | 1,106 |
r
|
VIP_data <- read.csv("VIP_161102.csv", header = TRUE, sep = ",", row.names = NULL, fill=TRUE)
VIP_enummer_missing_besok<-VIP_data[is.na(VIP_data$besok),2]
missing_besoks_ids<-VIP_data$Subject_id[is.na(VIP_data$besok)][!duplicated(VIP_data$Subject_id[is.na(VIP_data$besok)])]
for (subject_id in 1:length(missing_besoks_ids)){
besoks<-VIP_data[VIP_data$Subject_id==missing_besoks_ids[subject_id],228]
datums<-as.numeric(substr(VIP_data[VIP_data$Subject_id==missing_besoks_ids[subject_id],3],7,10))
write(paste0(missing_besoks_ids[subject_id],":",paste0(besoks[sort(datums, index.return=TRUE)$ix],collapse="")), file="temporal_checking_missing_besok", append = TRUE, sep = ",")
}
#with the temporal file do :
#jerneja_m@purple Private]$ cat temporal_checking_missing_besok | cut -d":" -f2| sort | uniq -c
#results, NA is for missing besok:
# 1 123NA
# 3813 12NA
# 3722 1NA
# 1 1NANA
# 6 2NA
# 10415 NA
# 3359 NA1
# 2268 NA12
# 3 NA123
# 27 NA12NA
# 58 NA1NA
# 18 NA2
# 16 NA23
# 1 NA2NANA
# 26 NANA
# 2 NANA1
# 2 NANA12
|
## Do not edit this file manually.
## It has been automatically generated from *.org sources.
## taken from "pcts"
##
## This is called by show() methods, so it is save to use 'class(object) == class'.
## Nevertheless (2020-02-29), change the test to 'class(object)[1] == class' to make it
## suitable for S3 methods, as well.
##
## Note: inherits(object, class) is not suitable here.
.reportClassName <- function(object, class, trailer = "\n"){
if(class(object)[1] == class)
cat('An object of class "', class, '"', trailer, sep = "")
NULL
}
## s - character string.
## v - numeric vector.
## none - character string.
.formatNameNumeric <- function(s, v, none = " <None>"){
if(length(v) == 0)
paste0(s, none)
else{
wrk <- paste0(v, collapse=" ")
paste0(s, wrk)
}
}
.capturePrint <- function(x){ # used by show() methods to print polynomials
capture.output(print(x))
}
# stats:::format.perc() is not exported, copy it here.
.stats.format.perc <- function (probs, digits)
paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits), "%")
diagOfVcov <- function(object, ...){
diag(vcov(object, ...))
}
setGeneric("diagOfVcov")
|
/R/utils.R
|
no_license
|
GeoBosh/sarima
|
R
| false | false | 1,210 |
r
|
## Do not edit this file manually.
## It has been automatically generated from *.org sources.
## taken from "pcts"
##
## This is called by show() methods, so it is save to use 'class(object) == class'.
## Nevertheless (2020-02-29), change the test to 'class(object)[1] == class' to make it
## suitable for S3 methods, as well.
##
## Note: inherits(object, class) is not suitable here.
.reportClassName <- function(object, class, trailer = "\n"){
if(class(object)[1] == class)
cat('An object of class "', class, '"', trailer, sep = "")
NULL
}
## s - character string.
## v - numeric vector.
## none - character string.
.formatNameNumeric <- function(s, v, none = " <None>"){
if(length(v) == 0)
paste0(s, none)
else{
wrk <- paste0(v, collapse=" ")
paste0(s, wrk)
}
}
.capturePrint <- function(x){ # used by show() methods to print polynomials
capture.output(print(x))
}
# stats:::format.perc() is not exported, copy it here.
.stats.format.perc <- function (probs, digits)
paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits), "%")
diagOfVcov <- function(object, ...){
diag(vcov(object, ...))
}
setGeneric("diagOfVcov")
|
library('ProjectTemplate')
load.project()
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = casual)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Casual Users Vs Day of Week")
ggsave(filename = "graphs/casual-vs-day-boxplot.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = casual)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Casual Users Vs Day of Week") + coord_cartesian(ylim = c(0, 100))
ggsave(filename = "graphs/casual-vs-day-boxplot-zoom.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = registered)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Registered Users Vs Day of Week")
ggsave(filename = "graphs/registered-vs-day-boxplot.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = registered)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Registered Users Vs Day of Week") + coord_cartesian(ylim = c(0, 250))
ggsave(filename = "graphs/registered-vs-day-boxplot-zoom.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = count)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Total Users Vs Day of Week")
ggsave(filename = "graphs/count-vs-day-boxplot.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = count)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Total Users Vs Day of Week") + coord_cartesian(ylim = c(0, 325))
ggsave(filename = "graphs/count-vs-day-boxplot-zoom.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = casual)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Casual Users Vs Month")
ggsave(filename = "graphs/casual-vs-month-boxplot.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = casual)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Casual Users Vs Month") + coord_cartesian(ylim = c(0,90))
ggsave(filename = "graphs/casual-vs-month-boxplot-zoom.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = registered)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Registered Users Vs Month")
ggsave(filename = "graphs/registered-vs-month-boxplot.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = registered)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Registered Users Vs Month") + coord_cartesian(ylim = c(0,300))
ggsave(filename = "graphs/registered-vs-month-boxplot-zoom.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = count)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Total Users Vs Month")
ggsave(filename = "graphs/count-vs-month-boxplot.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = count)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Total Users Vs Month") + coord_cartesian(ylim = c(0, 375))
ggsave(filename = "graphs/count-vs-month-boxplot-zoom.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE), day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = weather, y = count)) + geom_boxplot(alpha = 0.5) + facet_grid(month ~ day, scales = "free") + ggtitle("Total Users vs Day, Month & Weather Scatterplot")
ggsave(filename = "graphs/count-vs-day-month-weather.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = casual)) + geom_point(alpha = 0.25) + ggtitle("Casual Users vs Hour")
ggsave(filename = "graphs/casual-hour-scatterplot.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = count)) + geom_point(alpha = 0.25) + ggtitle("Total Users Vs Hour")
ggsave(filename = "graphs/count-hour-scatterplot.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = registered)) + geom_point(alpha = 0.25) + ggtitle("Registered Users Vs Hour")
ggsave(filename = "graphs/registered-hour-scatterplot.png")
#http://stackoverflow.com/questions/5178813/ggplot2-legend-for-stat-summary
ggplot(data = train, aes(x = temp, y = casual)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Casual Users vs Temperature")
ggsave("graphs/casual-temp-summary.png")
ggplot(data = train, aes(x = temp, y = registered)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Registered Users vs Temperature")
ggsave("graphs/registered-temp-summary.png")
ggplot(data = train, aes(x = temp, y = count)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Total Users vs Temperature")
ggsave("graphs/count-temp-summary.png")
ggplot(data = train, aes(x = humidity, y = casual)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Casual Users vs Humidity")
ggsave("graphs/casual-humidity-summary.png")
ggplot(data = train, aes(x = humidity, y = registered)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Registered Users vs Humidity")
ggsave("graphs/registered-humidity-summary.png")
ggplot(data = train, aes(x = humidity, y = count)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Total Users vs Humidity")
ggsave("graphs/count-humidity-summary.png")
ggplot(data = train, aes(x = windspeed, y = casual)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Casual Users vs Windspeed")
ggsave("graphs/casual-windspeed-summary.png")
ggplot(data = train, aes(x = windspeed, y = registered)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Registered Users vs Windspeed")
ggsave("graphs/registered-windspeed-summary.png")
ggplot(data = train, aes(x = windspeed, y = count)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Total Users vs Windspeed")
ggsave("graphs/count-windspeed-summary.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = count)) + geom_point(alpha = 0.25, color = "orange") + ggtitle("Total Users Vs Hour & Working Day") + facet_wrap(~ workingday) + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue"))
ggsave(filename = "graphs/count-hour-workingday-scatterplot.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = casual)) + geom_point(alpha = 0.25, color = "orange") + ggtitle("Casual Users vs Hour & Working Day") + facet_wrap(~ workingday) + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue"))
ggsave(filename = "graphs/casual-hour-workingday-scatterplot.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = registered)) + geom_point(alpha = 0.25, color = "orange") + ggtitle("Registered Users Vs Hour & Working Day") + facet_wrap(~ workingday) + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue"))
ggsave(filename = "graphs/registered-hour-workingday-scatterplot.png")
|
/src/03-complex-eda-plots.R
|
no_license
|
jarhin/Kaggle-Competition-Bike-Sharing-Demand
|
R
| false | false | 14,288 |
r
|
library('ProjectTemplate')
load.project()
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = casual)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Casual Users Vs Day of Week")
ggsave(filename = "graphs/casual-vs-day-boxplot.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = casual)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Casual Users Vs Day of Week") + coord_cartesian(ylim = c(0, 100))
ggsave(filename = "graphs/casual-vs-day-boxplot-zoom.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = registered)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Registered Users Vs Day of Week")
ggsave(filename = "graphs/registered-vs-day-boxplot.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = registered)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Registered Users Vs Day of Week") + coord_cartesian(ylim = c(0, 250))
ggsave(filename = "graphs/registered-vs-day-boxplot-zoom.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = count)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Total Users Vs Day of Week")
ggsave(filename = "graphs/count-vs-day-boxplot.png")
ggplot(data = transform(train, day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = day, y = count)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Total Users Vs Day of Week") + coord_cartesian(ylim = c(0, 325))
ggsave(filename = "graphs/count-vs-day-boxplot-zoom.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = casual)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Casual Users Vs Month")
ggsave(filename = "graphs/casual-vs-month-boxplot.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = casual)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Casual Users Vs Month") + coord_cartesian(ylim = c(0,90))
ggsave(filename = "graphs/casual-vs-month-boxplot-zoom.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = registered)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Registered Users Vs Month")
ggsave(filename = "graphs/registered-vs-month-boxplot.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = registered)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Registered Users Vs Month") + coord_cartesian(ylim = c(0,300))
ggsave(filename = "graphs/registered-vs-month-boxplot-zoom.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = count)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Total Users Vs Month")
ggsave(filename = "graphs/count-vs-month-boxplot.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE)), aes(x = month, y = count)) + geom_boxplot() + stat_summary(geom = "point", fun.y = mean, colour = "blue") + ggtitle("Total Users Vs Month") + coord_cartesian(ylim = c(0, 375))
ggsave(filename = "graphs/count-vs-month-boxplot-zoom.png")
ggplot(data = transform(train, month = factor(strftime(train[["datetime"]], format = "%b"), levels = c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered = TRUE), day = factor(strftime(train[["datetime"]], format = "%a"), levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"), ordered = TRUE)), aes(x = weather, y = count)) + geom_boxplot(alpha = 0.5) + facet_grid(month ~ day, scales = "free") + ggtitle("Total Users vs Day, Month & Weather Scatterplot")
ggsave(filename = "graphs/count-vs-day-month-weather.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = casual)) + geom_point(alpha = 0.25) + ggtitle("Casual Users vs Hour")
ggsave(filename = "graphs/casual-hour-scatterplot.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = count)) + geom_point(alpha = 0.25) + ggtitle("Total Users Vs Hour")
ggsave(filename = "graphs/count-hour-scatterplot.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = registered)) + geom_point(alpha = 0.25) + ggtitle("Registered Users Vs Hour")
ggsave(filename = "graphs/registered-hour-scatterplot.png")
#http://stackoverflow.com/questions/5178813/ggplot2-legend-for-stat-summary
ggplot(data = train, aes(x = temp, y = casual)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Casual Users vs Temperature")
ggsave("graphs/casual-temp-summary.png")
ggplot(data = train, aes(x = temp, y = registered)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Registered Users vs Temperature")
ggsave("graphs/registered-temp-summary.png")
ggplot(data = train, aes(x = temp, y = count)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Total Users vs Temperature")
ggsave("graphs/count-temp-summary.png")
ggplot(data = train, aes(x = humidity, y = casual)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Casual Users vs Humidity")
ggsave("graphs/casual-humidity-summary.png")
ggplot(data = train, aes(x = humidity, y = registered)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Registered Users vs Humidity")
ggsave("graphs/registered-humidity-summary.png")
ggplot(data = train, aes(x = humidity, y = count)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Total Users vs Humidity")
ggsave("graphs/count-humidity-summary.png")
ggplot(data = train, aes(x = windspeed, y = casual)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Casual Users vs Windspeed")
ggsave("graphs/casual-windspeed-summary.png")
ggplot(data = train, aes(x = windspeed, y = registered)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Registered Users vs Windspeed")
ggsave("graphs/registered-windspeed-summary.png")
ggplot(data = train, aes(x = windspeed, y = count)) + geom_point(alpha = 0.25, colour = "orange") + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue")) + ggtitle("Total Users vs Windspeed")
ggsave("graphs/count-windspeed-summary.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = count)) + geom_point(alpha = 0.25, color = "orange") + ggtitle("Total Users Vs Hour & Working Day") + facet_wrap(~ workingday) + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue"))
ggsave(filename = "graphs/count-hour-workingday-scatterplot.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = casual)) + geom_point(alpha = 0.25, color = "orange") + ggtitle("Casual Users vs Hour & Working Day") + facet_wrap(~ workingday) + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue"))
ggsave(filename = "graphs/casual-hour-workingday-scatterplot.png")
ggplot(data = transform(train, hour = as.numeric(strftime(train[["datetime"]], format = "%H"))), aes(x = hour, y = registered)) + geom_point(alpha = 0.25, color = "orange") + ggtitle("Registered Users Vs Hour & Working Day") + facet_wrap(~ workingday) + geom_line(stat = "summary", fun.y = mean, aes(colour = "mean")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.10, aes(colour = "10%_quartile")) + stat_summary(geom = "line", fun.y = quantile, probs = 0.90, aes(colour = "90%_quartile")) + stat_summary(geom = "line", fun.y = median, aes(colour = "median")) + scale_color_manual("", values = c("mean" = "black", "10%_quartile" = "red", "90%_quartile" = "green", "median" = "blue"))
ggsave(filename = "graphs/registered-hour-workingday-scatterplot.png")
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
missing<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::missing(params)
}
}
|
/R/missing.R
|
no_license
|
granatb/RapeR
|
R
| false | false | 669 |
r
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
missing<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::missing(params)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils2.R
\name{ltbl}
\alias{ltbl}
\title{Helper function to extract the last part of question headings}
\usage{
ltbl(x, y, z)
}
\arguments{
\item{x}{= database name}
\item{y}{= column index group}
\item{z}{= column index}
}
\value{
last part of question headings
}
\description{
Helper function to extract the last part of question headings
}
\examples{
\dontrun{
ltbl(x,y,z)
}
}
\author{
Someone
}
|
/man/ltbl.Rd
|
no_license
|
unhcr/koboloadeR
|
R
| false | true | 480 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils2.R
\name{ltbl}
\alias{ltbl}
\title{Helper function to extract the last part of question headings}
\usage{
ltbl(x, y, z)
}
\arguments{
\item{x}{= database name}
\item{y}{= column index group}
\item{z}{= column index}
}
\value{
last part of question headings
}
\description{
Helper function to extract the last part of question headings
}
\examples{
\dontrun{
ltbl(x,y,z)
}
}
\author{
Someone
}
|
\name{slowprofile, velprofile}
\alias{velprofile}
\alias{slowprofile}
\title{Display the slowness or velocity profile}
\description{
This function plots the layered velocity or slowness model versus
depth.
}
\usage{
velprofile(mod, add = FALSE, col = "blue", lty = 1, lwd = 1, new = TRUE,
nticks = NULL, grid = FALSE, axes = TRUE, depth = "v",
bounds = TRUE)
slowprofile(mod, add = FALSE, col = "blue", lty = 1, lwd = 1,
new = TRUE, nticks = NULL, grid = FALSE, axes = TRUE,
depth = "v", bounds = TRUE)
}
\arguments{
\item{mod}{a velocity model returned from \code{Rvelslant}.}
\item{add}{logical value for if the model will be added to a previous
profile for comparison.}
\item{col}{line color for the profile.}
\item{lty}{line type for the profile.}
\item{lwd}{line width for the profile.}
\item{new}{logical value for if a new window will be created for the
profile.}
\item{nticks}{approximate number of tick marks desired for depth-axis
on travel-time plots. See \code{pretty} function in R package
\sQuote{base}.}
\item{grid}{logical value for plotting grid lines.}
\item{axes}{see \code{plot.default}.}
\item{depth}{layout of plots. Value can be \sQuote{v} for vertical or
\sQuote{h} for horizontal orientation of depth on the travel-time
plots.}
\item{bounds}{logical value for if the upper and lower bounds should
be plotted.}
}
\value{none}
\seealso{\code{\link{Rvelslant}}}
\author{Eric M. Thompson <eric.thompson@tufts.edu>}
\examples{
# See ?Rvelslant for example.
}
\keyword{}
|
/man/velprofile.Rd
|
permissive
|
emthompson-usgs/rvelslant
|
R
| false | false | 1,592 |
rd
|
\name{slowprofile, velprofile}
\alias{velprofile}
\alias{slowprofile}
\title{Display the slowness or velocity profile}
\description{
This function plots the layered velocity or slowness model versus
depth.
}
\usage{
velprofile(mod, add = FALSE, col = "blue", lty = 1, lwd = 1, new = TRUE,
nticks = NULL, grid = FALSE, axes = TRUE, depth = "v",
bounds = TRUE)
slowprofile(mod, add = FALSE, col = "blue", lty = 1, lwd = 1,
new = TRUE, nticks = NULL, grid = FALSE, axes = TRUE,
depth = "v", bounds = TRUE)
}
\arguments{
\item{mod}{a velocity model returned from \code{Rvelslant}.}
\item{add}{logical value for if the model will be added to a previous
profile for comparison.}
\item{col}{line color for the profile.}
\item{lty}{line type for the profile.}
\item{lwd}{line width for the profile.}
\item{new}{logical value for if a new window will be created for the
profile.}
\item{nticks}{approximate number of tick marks desired for depth-axis
on travel-time plots. See \code{pretty} function in R package
\sQuote{base}.}
\item{grid}{logical value for plotting grid lines.}
\item{axes}{see \code{plot.default}.}
\item{depth}{layout of plots. Value can be \sQuote{v} for vertical or
\sQuote{h} for horizontal orientation of depth on the travel-time
plots.}
\item{bounds}{logical value for if the upper and lower bounds should
be plotted.}
}
\value{none}
\seealso{\code{\link{Rvelslant}}}
\author{Eric M. Thompson <eric.thompson@tufts.edu>}
\examples{
# See ?Rvelslant for example.
}
\keyword{}
|
#' @name sweetpt
#' @title Sample data of sweet potato
#' @docType data
#' @aliases spg
#' @description This dataset contains sweet potato data
#' @references This data is related to HiDAP crop template
#' @usage spg
#' @format data frame
#' @source International Potato Center, sweet potato experimental data.
NULL
#save(spg,file = "data/spg.rda")
#' @name subsample
#' @title trial data using sub sampling for each observation
#' @docType data
#' @aliases subsample
#' @description This dataset contains data comec from experiment using sub sampling for each genotype.
#' @references This data is related to HiDAP fbcheck module.
#' @usage subsample
#' @format data frame
#' @source International Potato Center, potato experimental data.
NULL
# datos <- datos %>% dplyr::mutate(yield= rnorm(n= 500,mean = 12, sd = 1))
# subsample <- datos
# save(subsample, file = "data/subsample.rda")
#' @name fctsubsample data with factor
#' @title trial data using factor with sub sampling at the same time.
#' @docType data
#' @aliases fctsubsample
#' @description This dataset contains data which comes from experiment using factors and sub sampling.
#' @references This data is related to HiDAP fbcheck module.
#' @usage fctsubsample
#' @format data frame
#' @source International Potato Center, potato and sweet potato experimental data.
NULL
#' @name augbd augmendted block design data using factors and subsamples
#' @title trial data of an augmented block design experiment using factor with three levels. Beside it has subsample per genotype
#' @docType data
#' @aliases augbd
#' @description This dataset contains data which comes from experiment using factors and sub sampling.
#' @references This data is related to HiDAP fbcheck module.
#' @usage augbd
#' @format data frame
#' @source International Potato Center, potato and sweet potato experimental data.
NULL
# datos <- read.delim2("clipboard")
# datos <- datos %>% dplyr::mutate(yield= rnorm(n= nrow(datos),mean = 12, sd = 1))
# fctsubsample <- datos
# save(fctsubsample, file = "data/fctsubsample.rda")
|
/R/data.R
|
permissive
|
CIP-RIU/traittools
|
R
| false | false | 2,071 |
r
|
#' @name sweetpt
#' @title Sample data of sweet potato
#' @docType data
#' @aliases spg
#' @description This dataset contains sweet potato data
#' @references This data is related to HiDAP crop template
#' @usage spg
#' @format data frame
#' @source International Potato Center, sweet potato experimental data.
NULL
#save(spg,file = "data/spg.rda")
#' @name subsample
#' @title trial data using sub sampling for each observation
#' @docType data
#' @aliases subsample
#' @description This dataset contains data comec from experiment using sub sampling for each genotype.
#' @references This data is related to HiDAP fbcheck module.
#' @usage subsample
#' @format data frame
#' @source International Potato Center, potato experimental data.
NULL
# datos <- datos %>% dplyr::mutate(yield= rnorm(n= 500,mean = 12, sd = 1))
# subsample <- datos
# save(subsample, file = "data/subsample.rda")
#' @name fctsubsample data with factor
#' @title trial data using factor with sub sampling at the same time.
#' @docType data
#' @aliases fctsubsample
#' @description This dataset contains data which comes from experiment using factors and sub sampling.
#' @references This data is related to HiDAP fbcheck module.
#' @usage fctsubsample
#' @format data frame
#' @source International Potato Center, potato and sweet potato experimental data.
NULL
#' @name augbd augmendted block design data using factors and subsamples
#' @title trial data of an augmented block design experiment using factor with three levels. Beside it has subsample per genotype
#' @docType data
#' @aliases augbd
#' @description This dataset contains data which comes from experiment using factors and sub sampling.
#' @references This data is related to HiDAP fbcheck module.
#' @usage augbd
#' @format data frame
#' @source International Potato Center, potato and sweet potato experimental data.
NULL
# datos <- read.delim2("clipboard")
# datos <- datos %>% dplyr::mutate(yield= rnorm(n= nrow(datos),mean = 12, sd = 1))
# fctsubsample <- datos
# save(fctsubsample, file = "data/fctsubsample.rda")
|
# description -------------------------------------------------------------
# this script
# 1. plots built up area and population data saved at 03_1 and 03_2
# setup -------------------------------------------------------------------
source('R/setup.R')
# directory ---------------------------------------------------------------
ghsl_results_dir <- "//storage6/usuarios/Proj_acess_oport/data/urbanformbr/ghsl/results/"
# read data ---------------------------------------------------------------
# uca results based on ghsl
uca_all_final <- readr::read_rds('//storage6/usuarios/Proj_acess_oport/data/urbanformbr/ghsl/results/uca_pop_100000_built_up_area_population_results.rds')
# region dataset
regiao <- geobr::read_region()
# clean and manipulate data -----------------------------------------------
setDT(uca_all_final)
# add regiao
uca_all_final[
,
name_region := data.table::fcase(
grepl("^1", uca_all_final$code_urban_concentration), 'Norte',
grepl("^2", uca_all_final$code_urban_concentration), 'Nordeste',
grepl("^3", uca_all_final$code_urban_concentration), 'Sudeste',
grepl("^4", uca_all_final$code_urban_concentration), 'Sul',
grepl("^5", uca_all_final$code_urban_concentration), 'Centro Oeste'
)
]
# convert sf
uca_all_final <- sf::st_as_sf(uca_all_final)
# estimate polygon area
uca_all_final <- uca_all_final %>%
dplyr::mutate(area_polygon = units::set_units(sf::st_area(uca_all_final), value = km^2))
# estimate built up area
uca_all_final <- uca_all_final %>%
dplyr::mutate(
built_up_area1975 = bua_mean1975 * 0.01 * area_polygon,
built_up_area1990 = bua_mean1990 * 0.01 * area_polygon,
built_up_area2000 = bua_mean2000 * 0.01 * area_polygon,
built_up_area2014 = bua_mean2014 * 0.01 * area_polygon,
)
# * double variables (bua_mean and pop) -----------------------------------
uca_pivot_double <- uca_all_final %>%
setDT() %>%
select(name_uca_case,bua_mean1975:pop2015) %>%
pivot_longer(
cols = bua_mean1975:pop2015,
names_to = 'variavel'
) %>%
separate(variavel,c('variavel','ano'), -4) %>%
setDT()
# change year 2014 and 2015 into 2015
uca_pivot_double[ano %in% c(2014,2015), ano := 2015]
# tidy dataset
uca_pivot_double <- uca_pivot_double %>%
pivot_wider(id_cols = c(name_uca_case,ano),names_from = variavel,values_from = value)
# setdt
setDT(uca_pivot_double)
# * units variable (built_up_area -----------------------------------------
uca_pivot_units <- uca_all_final %>%
setDT() %>%
select(name_uca_case,built_up_area1975:built_up_area2014) %>%
pivot_longer(
cols = built_up_area1975:built_up_area2014,
names_to = 'variavel'
) %>%
separate(variavel,c('variavel','ano'), -4) %>%
setDT()
# change year 2014 into 2015
uca_pivot_units[ano %in% c(2014), ano := 2015]
# tidy dataset
uca_pivot_units <- uca_pivot_units %>%
pivot_wider(id_cols = c(name_uca_case,ano),names_from = variavel,values_from = value)
setDT(uca_pivot_units)
uca_pivot <- uca_pivot_double
uca_pivot[
uca_pivot_units,
`:=` (
built_up_area = i.built_up_area
),
on = c('name_uca_case', 'ano')
]
rm(uca_pivot_double,uca_pivot_units)
# add region
uca_pivot <- uca_pivot %>%
dplyr::left_join(
uca_all_final %>% dplyr::select(name_uca_case,name_region),
by = c('name_uca_case' = 'name_uca_case')
) %>%
dplyr::relocate(name_region, .after = name_uca_case)
# plot data ----------------------------------------------------------------
uca_pivot %>%
#st_transform(4326) %>%
ggplot() +
geom_point(aes(x = ano, y = bua_mean, size = pop))
uca_pivot %>%
#st_transform(4326) %>%
ggplot() +
geom_point(aes(x = bua_mean, y = pop)) +
geom_smooth(aes(x = bua_mean, y = pop)
, method = 'lm'
) +
facet_wrap(~ano,nrow = 1) +
#scale_x_continuous(trans = 'log10') +
scale_y_continuous(trans = 'log10')
uca_pivot %>%
#dplyr::filter(!name_uca_case %in% c('rio_de_janeiro_rj', 'sao_paulo_sp')) %>%
dplyr::mutate(built_up_area = as.double(built_up_area)) %>%
ggplot() +
geom_point(aes(x = built_up_area, y = pop, colour = name_region)) +
geom_smooth(aes(x = built_up_area, y = pop)
, method = 'lm'
) +
facet_wrap(~ano,nrow = 1) +
ggforce::scale_x_unit(unit = 'km^2', trans = 'log10') +
#scale_x_continuous(trans = 'log10') +
scale_y_continuous(trans = 'log10')
|
/R/GHSL/05_1_plot_bua_pop.R
|
no_license
|
luizpedrocouto/urbanformbr
|
R
| false | false | 4,351 |
r
|
# description -------------------------------------------------------------
# this script
# 1. plots built up area and population data saved at 03_1 and 03_2
# setup -------------------------------------------------------------------
source('R/setup.R')
# directory ---------------------------------------------------------------
ghsl_results_dir <- "//storage6/usuarios/Proj_acess_oport/data/urbanformbr/ghsl/results/"
# read data ---------------------------------------------------------------
# uca results based on ghsl
uca_all_final <- readr::read_rds('//storage6/usuarios/Proj_acess_oport/data/urbanformbr/ghsl/results/uca_pop_100000_built_up_area_population_results.rds')
# region dataset
regiao <- geobr::read_region()
# clean and manipulate data -----------------------------------------------
setDT(uca_all_final)
# add regiao
uca_all_final[
,
name_region := data.table::fcase(
grepl("^1", uca_all_final$code_urban_concentration), 'Norte',
grepl("^2", uca_all_final$code_urban_concentration), 'Nordeste',
grepl("^3", uca_all_final$code_urban_concentration), 'Sudeste',
grepl("^4", uca_all_final$code_urban_concentration), 'Sul',
grepl("^5", uca_all_final$code_urban_concentration), 'Centro Oeste'
)
]
# convert sf
uca_all_final <- sf::st_as_sf(uca_all_final)
# estimate polygon area
uca_all_final <- uca_all_final %>%
dplyr::mutate(area_polygon = units::set_units(sf::st_area(uca_all_final), value = km^2))
# estimate built up area
uca_all_final <- uca_all_final %>%
dplyr::mutate(
built_up_area1975 = bua_mean1975 * 0.01 * area_polygon,
built_up_area1990 = bua_mean1990 * 0.01 * area_polygon,
built_up_area2000 = bua_mean2000 * 0.01 * area_polygon,
built_up_area2014 = bua_mean2014 * 0.01 * area_polygon,
)
# * double variables (bua_mean and pop) -----------------------------------
uca_pivot_double <- uca_all_final %>%
setDT() %>%
select(name_uca_case,bua_mean1975:pop2015) %>%
pivot_longer(
cols = bua_mean1975:pop2015,
names_to = 'variavel'
) %>%
separate(variavel,c('variavel','ano'), -4) %>%
setDT()
# change year 2014 and 2015 into 2015
uca_pivot_double[ano %in% c(2014,2015), ano := 2015]
# tidy dataset
uca_pivot_double <- uca_pivot_double %>%
pivot_wider(id_cols = c(name_uca_case,ano),names_from = variavel,values_from = value)
# setdt
setDT(uca_pivot_double)
# * units variable (built_up_area -----------------------------------------
uca_pivot_units <- uca_all_final %>%
setDT() %>%
select(name_uca_case,built_up_area1975:built_up_area2014) %>%
pivot_longer(
cols = built_up_area1975:built_up_area2014,
names_to = 'variavel'
) %>%
separate(variavel,c('variavel','ano'), -4) %>%
setDT()
# change year 2014 into 2015
uca_pivot_units[ano %in% c(2014), ano := 2015]
# tidy dataset
uca_pivot_units <- uca_pivot_units %>%
pivot_wider(id_cols = c(name_uca_case,ano),names_from = variavel,values_from = value)
setDT(uca_pivot_units)
uca_pivot <- uca_pivot_double
uca_pivot[
uca_pivot_units,
`:=` (
built_up_area = i.built_up_area
),
on = c('name_uca_case', 'ano')
]
rm(uca_pivot_double,uca_pivot_units)
# add region
uca_pivot <- uca_pivot %>%
dplyr::left_join(
uca_all_final %>% dplyr::select(name_uca_case,name_region),
by = c('name_uca_case' = 'name_uca_case')
) %>%
dplyr::relocate(name_region, .after = name_uca_case)
# plot data ----------------------------------------------------------------
uca_pivot %>%
#st_transform(4326) %>%
ggplot() +
geom_point(aes(x = ano, y = bua_mean, size = pop))
uca_pivot %>%
#st_transform(4326) %>%
ggplot() +
geom_point(aes(x = bua_mean, y = pop)) +
geom_smooth(aes(x = bua_mean, y = pop)
, method = 'lm'
) +
facet_wrap(~ano,nrow = 1) +
#scale_x_continuous(trans = 'log10') +
scale_y_continuous(trans = 'log10')
uca_pivot %>%
#dplyr::filter(!name_uca_case %in% c('rio_de_janeiro_rj', 'sao_paulo_sp')) %>%
dplyr::mutate(built_up_area = as.double(built_up_area)) %>%
ggplot() +
geom_point(aes(x = built_up_area, y = pop, colour = name_region)) +
geom_smooth(aes(x = built_up_area, y = pop)
, method = 'lm'
) +
facet_wrap(~ano,nrow = 1) +
ggforce::scale_x_unit(unit = 'km^2', trans = 'log10') +
#scale_x_continuous(trans = 'log10') +
scale_y_continuous(trans = 'log10')
|
################################################################################
##
## Implements the IWL-SSE model. This work was published in the paper:
## [1] "The days before zero day: Investment models for secure software
## engineering" by C. Heitzenrater, R. Böhme, and A. C. Simpson.
## In Proceedings of the 15th Workshop on the Economics of Information Security
## (WEIS 2016) (June 2016).
##
## This code leverages code provided by Rainer Böhme, Feb 2016. (IWL-alt.R)
## See file "iwl-Alt.R" for information on that code
##
################################################################################
## Dependent source files
source("./iwl-Alt.R",local=TRUE)
################################################################
# IWL-SSE MODEL
################################################################
####
## Calculates the phase cost of review
## iter Number of review iterations (default=0)
## iterCost Cost per review iteration (default=1)
## probSucc Probability of success of each review (effectiveness) (default=1)
## fixCost Cost incurred when a review is successful
##
phaseCostRev <- function( iter=0, iterCost=1, probSucc=1, fixCost=1 ){
### DEBUG
#message("review=",(iter * iterCost) + iter * (probSucc * fixCost))
return( (iter * iterCost) + iter * (probSucc * fixCost) )
}
####
## Calculates the phase cost of test
## iter Number of test iterations (default=0)
## iterCost Cost per test iteration (default=1)
## probSucc Probability of success of each test (test effectiveness) (default=1)
## prevSucc Probability of success of reviews (review effectiveness) (default=1)
## prevIter The number of review iterations undertaken previously (default=0)
## costBug Cost incurred when a bug is found
## costFlaw Cost incurred when a flaw is found
##
phaseCostTest <- function( iter=0, iterCost=1, probSucc=1, prevSucc=1, prevIter=0, costBug=1, costFlaw=10 ){
### DEBUG
#message("test=",(iter * iterCost) +
# iter * (probSucc * ((costBug+costFlaw)/2)))
return( (iter * iterCost) +
iter * (probSucc * ( (costFlaw) / (2^(prevIter*prevSucc)) + costBug ) ) )
}
####
## Calculates the overall uncertaintly resulting after the execution of software
## process steps
## sigmaMax starting uncertainty (default=16)
## alpha effectiveness of reviews (default=1)
## beta effectiveness of tests (default=1)
## revIter number of iterations in the review phase (default=0)
## testIter number of iterations in the test phase (default=0)
##
setOverallUncertainty <- function( sigmaMax=16, alpha=1, beta=1, revIter=0,
testIter=0 ){
return( sigmaMax <- sigmaMax -
setPhaseUncertainty(sigmaPhase=(sigmaMax/2), eff=alpha, iter=revIter) -
setPhaseUncertainty(sigmaPhase=(sigmaMax/2), eff=beta, iter=testIter) );
}
####
## Calculates the uncertaintly resulting after the execution of one phase of
## software process
## sigmaPhase starting uncertainty for that phase (default=8)
## eff effectiveness of the process in this phase (default=1)
## iter number of iterations in this phase (default=0)
##
setPhaseUncertainty <- function( sigmaPhase=8, eff=1, iter=0 ){
return( sigmaPhase * (eff) ^ (1/iter) )
}
####
## Calculates the current attack gradient
## revEff effectiveness of the process in the review phase (default=1)
## testEff effectiveness of the process in the test phase (default=1)
## revIter number of iterations in the review phase (default=0)
## testIter number of iterations in the test phase (default=0)
##
setGradientOfAttack <- function( revEff=1, testEff=1, revIter=0, testIter=0 ){
return( sqrt( 1 + revEff*revIter + testEff*testIter) )
}
####
## Calculates the costs of the software process
## revIter number of iterations in the review phase
## testIter number of iterations in the test phase
## revEff effectiveness of the process in the review phase
## testEff effectiveness of the process in the test phase
## revCost cost per iteration of the process in the review phase
## testCost cost per iteration of the process in the test phase
## sigmaMax starting uncertainty (default=16)
## x0 starting attack cost (default=15, based on IWL)
##
CalculateSWProcessCosts <- function( revIter=0, testIter=0, revEff=1, testEff=1, revCost=1, testCost=1, sigmaMax=16, x0=15 ){
cr=0; ## phase costs for review
ct=0; ## phase costs for test
uncert=0; ## overall uncertainty
deltaX=0; ## overall gradient of attack cost
## Review phase (AD), t = -2
cr <- phaseCostRev( iter=revIter, iterCost=revCost, probSucc=revEff,
fixCost=0.01 )
## Test phase (IT), t = -1
ct <- phaseCostTest( iter=testIter, iterCost=testCost, probSucc=testEff,
prevSucc=revEff, prevIter=revIter, costBug=0.01, costFlaw=0.1 )
## set overall costs, based on per-phase costs
costs <- (cr + ct);
### DEBUG
#message("overall costs=",costs)
## set overall Uncertainty, based on per-phase effectiveness &
## number of iterations
uncert <- setOverallUncertainty(sigmaMax=sigmaMax, alpha=revEff,
beta=testEff, revIter=revIter, testIter=testIter)
## calculate gradient of attack, beased on per-phase effectiveness &
## number of iterations
deltaX <- setGradientOfAttack( revEff=revEff, testEff=testEff,
revIter=revIter, testIter=testIter )
## return the software process results as a list
## costs Costs incurred by the process
## sigma The residual uncertainty after the process
## dx The resulting attack gradient after the process
return(list(costs=costs,sigma=uncert,dx=deltaX))
}
###
# Calculates the costs of the software process in conjunction with the IWL
# revIter number of iterations in the review phase
# testIter number of iterations in the test phase
# revEff effectiveness of the process in the review phase
# testEff effectiveness of the process in the test phase
# revCost cost per iteration of the process in the review phase
# testCost cost per iteration of the process in the test phase
# sigmaMax starting uncertainty (default=16)
# x0 starting attack cost (default=15, based on IWL)
#
CalculateOverallCosts <- function( revIter=0, testIter=0, revEff=1, testEff=1, revCost=1, testCost=1, sigmaMax=16, x0=15 ){
cr=0; ## phase costs for review
ct=0; ## phase costs for test
uncert=0; ## overall uncertainty
deltaX=0; ## overall gradient of attack cost
### Review phase (AD), t = -2
cr <- phaseCostRev( iter=revIter, iterCost=revCost, probSucc=revEff,
fixCost=0.01 )
### Test phase (IT), t = -1
ct <- phaseCostTest( iter=testIter, iterCost=testCost, probSucc=testEff,
prevSucc=revEff, prevIter=revIter, costBug=0.01, costFlaw=0.1 )
## set overall costs, based on per-phase costs
costs <- (cr + ct);
### DEBUG
#message("overall costs=",costs)
## set overall Uncertainty, based on per-phase effectiveness &
## number of iterations
uncert <- setOverallUncertainty(sigmaMax=sigmaMax, alpha=revEff,
beta=testEff, revIter=revIter, testIter=testIter)
## calculate gradient of attack, beased on per-phase effectiveness &
## number of iterations
deltaX <- setGradientOfAttack( revEff=revEff, testEff=testEff,
revIter=revIter, testIter=testIter )
dynamicVals <- dynamic.revenue( n=25, a=1000, z=.025, r=.05,
rho=.1, dx=deltaX, x0, sigma=uncert, lambda=0, cp=costs )
## DEBUG --- testing code
#staticVals <- static.revenue(n=25,a=1000,z=.025,r=.05,rho=.1,dx=deltaX,x0=15,sigma=uncert,cp=costs)
#### Code commented out ######################################
#v <- as.numeric(dynamicVals$rev[dynamicVals$k+1])
#message(dynamicVals$rev[dynamicVals$k+1])
#produceDynamicRevenueGraphs( initialCTB=15, uncert=uncert,
# processCosts=costs, revIter=revIter, testIter=testIter, deltaX=deltaX )
#produceDynamicRevenueGraphs( initialCTB=15, uncert=uncert,
# processCosts=costs, revIter=revIter, testIter=testIter, deltaX=deltaX,
# dynamicVals=dynamicVals, staticVals=staticVals )
### this is the one to fix to make it work
## DEBUG
#return(list(vals=dynamicVals$rev[dynamicVals$k+1],k=k+1))
############################################################
return(dynamicVals)
}
####
## Calculates the Return on Secure Software Process (ROSSP)
## ROSI_SSE The return on investment with SSE
## ROSI_NOSSE The return on investment with no SSE
##
ROSSP <- function( ROSI_SSE=0, ROSI_NOSSE=0 ){
return( ROSI_SSE - ROSI_NOSSE )
}
################################################################
## OPTIMAL INVESTMENTS
################################################################
####
## Uses CalculateOverallCosts in order to identify the optimal SSE investment
## maxRevIter Maximum number of iterations in the review phase
## (outer loop: 0 to max; default = 0)
## maxTestIter Maximum number of iterations in the test phase
## (inner loop: 0 to max; default = 0)
## revEff effectiveness of the process in the review phase (default=1)
## testEff effectiveness of the process in the test phase (default=1)
## revCost cost per iteration of the process in the review phase
## (default=1)
## testCost cost per iteration of the process in the test phase (default=1)
## sigmaMax starting uncertainty (default=16)
## x0 starting attack cost (default=15, based on IWL)
##
FindOptimalInvestmentState <- function( maxRevIter=0, maxTestIter=0, revEff=1, testEff=1, revCost=1, testCost=1, sigmaMax=16, x0=15 ){
dynamicVals <- CalculateOverallCosts( maxRevIter, maxTestIter, revEff,
testEff, revCost, testCost, sigmaMax, x0 )
return(dynamicVals$rev[dynamicVals$k+1])
}
####
## Loops over a number of review and test iterations to identify the optimal
## investment
## maxRevIter Maximum number of iterations in the review phase
## (outer loop: 0 to max; default = 0)
## maxTestIter Maximum number of iterations in the test phase
## (inner loop: 0 to max; default = 0)
## revEff effectiveness of the process in the review phase (default=1)
## testEff effectiveness of the process in the test phase (default=1)
## revCost cost per iteration of the process in the review phase
## (default=1)
## testCost cost per iteration of the process in the test phase (default=1)
## sigmaMax starting uncertainty (default=16)
## x0 starting attack cost (default=15, based on IWL)
##
FindOptimalSWInvestment <- function( maxRevIter=0, maxTestIter=0, revEff=1, testEff=1, revCost=1, testCost=1, sigmaMax=16, x0=15 ){
## matrix to hold results
vals <- matrix(data=NA,nrow=maxRevIter, ncol=maxTestIter)
## number of initial defences
k <- 0;
## loop over reviews, then tests....
for( i in 0:maxRevIter ){
for( j in 0:maxTestIter ){
## Set the i,j value equal to the effectiveness of the integrated
## IWL-SSE process for that number of reviews, phases
vals[i,j] <- FindOptimalInvestmentState( i, j, revEff=revEff,
testEff=testEff, revCost=revCost, testCost=testCost,
sigmaMax=sigmaMax, x0=x0 )
### DEBUG
#message("---- iteration ", i, " ", j)
#message(vals[i+1,j+1])
}
}
## Identify the index that points to the maximal value of return
ind <- which(vals == max(vals), arr.ind = TRUE)
## Output the max value of reviews, tests and return
paste("Best is ", ind[1], " review(s) and ", ind[2],
" test(s), with return ", format(vals[ind],digits=8), sep="")
}
################################################################################
##
## LEGACY CODE
## NOTE: commented out --- produces previously used graph
##
################################################################################
################################################################
## UNUSED --- Used with old graph generation functions above
##
# produceDynamicRevenueGraphs <- function( initialCTB=15, uncert=1,
# processCosts=0, revIter, testIter, deltaX=1, dynamicVals, staticVals ){
# plot(NA,NA,las=1,
# xlab=c("number of defenses in place (k)",
# paste("probAttack=", format(prob.tatt(n=1:25, x0=initialCTB, dx=deltaX,
# sigma=uncert, a=1000,z=.025),digits=8),sep="")),
# ylab=c( paste("Exp Return: cp =",format(processCosts,digits=8),",
# delta x=",format(deltaX,digits=8),", sigma=", format(uncert,digits=8),
# sep=""), paste("rev=",revIter,", test=",testIter,", dynamic= ",
# format(max(dynamicVals$rev), digits=8),", static= ",
# format(max(staticVals$rev), digits=8), sep="") ),
# ylim=range(-10:50),xlim=c(0,25) )
# rect(-80,-80,30,0,col=gray(.85),lty=3)
# points(staticVals$k,staticVals$rev[staticVals$k+1],pch=16,cex=2,col="yellow")
# lines(0:25,staticVals$rev,type="b",col="black",pch=16)
# points(dynamicVals$k,dynamicVals$rev[dynamicVals$k+1],pch=15,cex=2,
# col="yellow")
# lines(0:25,dynamicVals$rev,type="b",col="black",pch=15)
# legend("right","top", c("Static","Dynamic"), pch=c(16,15))
# Sys.sleep(0.2)
# }
|
/models/iwl-SSE.R
|
no_license
|
CHeitzenrater/InfoSecEcon-InvestmentModels
|
R
| false | false | 12,922 |
r
|
################################################################################
##
## Implements the IWL-SSE model. This work was published in the paper:
## [1] "The days before zero day: Investment models for secure software
## engineering" by C. Heitzenrater, R. Böhme, and A. C. Simpson.
## In Proceedings of the 15th Workshop on the Economics of Information Security
## (WEIS 2016) (June 2016).
##
## This code leverages code provided by Rainer Böhme, Feb 2016. (IWL-alt.R)
## See file "iwl-Alt.R" for information on that code
##
################################################################################
## Dependent source files
source("./iwl-Alt.R",local=TRUE)
################################################################
# IWL-SSE MODEL
################################################################
####
## Calculates the phase cost of review
## iter Number of review iterations (default=0)
## iterCost Cost per review iteration (default=1)
## probSucc Probability of success of each review (effectiveness) (default=1)
## fixCost Cost incurred when a review is successful
##
phaseCostRev <- function( iter=0, iterCost=1, probSucc=1, fixCost=1 ){
### DEBUG
#message("review=",(iter * iterCost) + iter * (probSucc * fixCost))
return( (iter * iterCost) + iter * (probSucc * fixCost) )
}
####
## Calculates the phase cost of test
## iter Number of test iterations (default=0)
## iterCost Cost per test iteration (default=1)
## probSucc Probability of success of each test (test effectiveness) (default=1)
## prevSucc Probability of success of reviews (review effectiveness) (default=1)
## prevIter The number of review iterations undertaken previously (default=0)
## costBug Cost incurred when a bug is found
## costFlaw Cost incurred when a flaw is found
##
phaseCostTest <- function( iter=0, iterCost=1, probSucc=1, prevSucc=1, prevIter=0, costBug=1, costFlaw=10 ){
### DEBUG
#message("test=",(iter * iterCost) +
# iter * (probSucc * ((costBug+costFlaw)/2)))
return( (iter * iterCost) +
iter * (probSucc * ( (costFlaw) / (2^(prevIter*prevSucc)) + costBug ) ) )
}
####
## Calculates the overall uncertaintly resulting after the execution of software
## process steps
## sigmaMax starting uncertainty (default=16)
## alpha effectiveness of reviews (default=1)
## beta effectiveness of tests (default=1)
## revIter number of iterations in the review phase (default=0)
## testIter number of iterations in the test phase (default=0)
##
setOverallUncertainty <- function( sigmaMax=16, alpha=1, beta=1, revIter=0,
testIter=0 ){
return( sigmaMax <- sigmaMax -
setPhaseUncertainty(sigmaPhase=(sigmaMax/2), eff=alpha, iter=revIter) -
setPhaseUncertainty(sigmaPhase=(sigmaMax/2), eff=beta, iter=testIter) );
}
####
## Calculates the uncertaintly resulting after the execution of one phase of
## software process
## sigmaPhase starting uncertainty for that phase (default=8)
## eff effectiveness of the process in this phase (default=1)
## iter number of iterations in this phase (default=0)
##
setPhaseUncertainty <- function( sigmaPhase=8, eff=1, iter=0 ){
return( sigmaPhase * (eff) ^ (1/iter) )
}
####
## Calculates the current attack gradient
## revEff effectiveness of the process in the review phase (default=1)
## testEff effectiveness of the process in the test phase (default=1)
## revIter number of iterations in the review phase (default=0)
## testIter number of iterations in the test phase (default=0)
##
setGradientOfAttack <- function( revEff=1, testEff=1, revIter=0, testIter=0 ){
return( sqrt( 1 + revEff*revIter + testEff*testIter) )
}
####
## Calculates the costs of the software process
## revIter number of iterations in the review phase
## testIter number of iterations in the test phase
## revEff effectiveness of the process in the review phase
## testEff effectiveness of the process in the test phase
## revCost cost per iteration of the process in the review phase
## testCost cost per iteration of the process in the test phase
## sigmaMax starting uncertainty (default=16)
## x0 starting attack cost (default=15, based on IWL)
##
CalculateSWProcessCosts <- function( revIter=0, testIter=0, revEff=1, testEff=1, revCost=1, testCost=1, sigmaMax=16, x0=15 ){
cr=0; ## phase costs for review
ct=0; ## phase costs for test
uncert=0; ## overall uncertainty
deltaX=0; ## overall gradient of attack cost
## Review phase (AD), t = -2
cr <- phaseCostRev( iter=revIter, iterCost=revCost, probSucc=revEff,
fixCost=0.01 )
## Test phase (IT), t = -1
ct <- phaseCostTest( iter=testIter, iterCost=testCost, probSucc=testEff,
prevSucc=revEff, prevIter=revIter, costBug=0.01, costFlaw=0.1 )
## set overall costs, based on per-phase costs
costs <- (cr + ct);
### DEBUG
#message("overall costs=",costs)
## set overall Uncertainty, based on per-phase effectiveness &
## number of iterations
uncert <- setOverallUncertainty(sigmaMax=sigmaMax, alpha=revEff,
beta=testEff, revIter=revIter, testIter=testIter)
## calculate gradient of attack, beased on per-phase effectiveness &
## number of iterations
deltaX <- setGradientOfAttack( revEff=revEff, testEff=testEff,
revIter=revIter, testIter=testIter )
## return the software process results as a list
## costs Costs incurred by the process
## sigma The residual uncertainty after the process
## dx The resulting attack gradient after the process
return(list(costs=costs,sigma=uncert,dx=deltaX))
}
###
# Calculates the costs of the software process in conjunction with the IWL
# revIter number of iterations in the review phase
# testIter number of iterations in the test phase
# revEff effectiveness of the process in the review phase
# testEff effectiveness of the process in the test phase
# revCost cost per iteration of the process in the review phase
# testCost cost per iteration of the process in the test phase
# sigmaMax starting uncertainty (default=16)
# x0 starting attack cost (default=15, based on IWL)
#
CalculateOverallCosts <- function( revIter=0, testIter=0, revEff=1, testEff=1, revCost=1, testCost=1, sigmaMax=16, x0=15 ){
cr=0; ## phase costs for review
ct=0; ## phase costs for test
uncert=0; ## overall uncertainty
deltaX=0; ## overall gradient of attack cost
### Review phase (AD), t = -2
cr <- phaseCostRev( iter=revIter, iterCost=revCost, probSucc=revEff,
fixCost=0.01 )
### Test phase (IT), t = -1
ct <- phaseCostTest( iter=testIter, iterCost=testCost, probSucc=testEff,
prevSucc=revEff, prevIter=revIter, costBug=0.01, costFlaw=0.1 )
## set overall costs, based on per-phase costs
costs <- (cr + ct);
### DEBUG
#message("overall costs=",costs)
## set overall Uncertainty, based on per-phase effectiveness &
## number of iterations
uncert <- setOverallUncertainty(sigmaMax=sigmaMax, alpha=revEff,
beta=testEff, revIter=revIter, testIter=testIter)
## calculate gradient of attack, beased on per-phase effectiveness &
## number of iterations
deltaX <- setGradientOfAttack( revEff=revEff, testEff=testEff,
revIter=revIter, testIter=testIter )
dynamicVals <- dynamic.revenue( n=25, a=1000, z=.025, r=.05,
rho=.1, dx=deltaX, x0, sigma=uncert, lambda=0, cp=costs )
## DEBUG --- testing code
#staticVals <- static.revenue(n=25,a=1000,z=.025,r=.05,rho=.1,dx=deltaX,x0=15,sigma=uncert,cp=costs)
#### Code commented out ######################################
#v <- as.numeric(dynamicVals$rev[dynamicVals$k+1])
#message(dynamicVals$rev[dynamicVals$k+1])
#produceDynamicRevenueGraphs( initialCTB=15, uncert=uncert,
# processCosts=costs, revIter=revIter, testIter=testIter, deltaX=deltaX )
#produceDynamicRevenueGraphs( initialCTB=15, uncert=uncert,
# processCosts=costs, revIter=revIter, testIter=testIter, deltaX=deltaX,
# dynamicVals=dynamicVals, staticVals=staticVals )
### this is the one to fix to make it work
## DEBUG
#return(list(vals=dynamicVals$rev[dynamicVals$k+1],k=k+1))
############################################################
return(dynamicVals)
}
####
## Calculates the Return on Secure Software Process (ROSSP)
## ROSI_SSE The return on investment with SSE
## ROSI_NOSSE The return on investment with no SSE
##
ROSSP <- function( ROSI_SSE=0, ROSI_NOSSE=0 ){
return( ROSI_SSE - ROSI_NOSSE )
}
################################################################
## OPTIMAL INVESTMENTS
################################################################
####
## Uses CalculateOverallCosts in order to identify the optimal SSE investment
## maxRevIter Maximum number of iterations in the review phase
## (outer loop: 0 to max; default = 0)
## maxTestIter Maximum number of iterations in the test phase
## (inner loop: 0 to max; default = 0)
## revEff effectiveness of the process in the review phase (default=1)
## testEff effectiveness of the process in the test phase (default=1)
## revCost cost per iteration of the process in the review phase
## (default=1)
## testCost cost per iteration of the process in the test phase (default=1)
## sigmaMax starting uncertainty (default=16)
## x0 starting attack cost (default=15, based on IWL)
##
FindOptimalInvestmentState <- function( maxRevIter=0, maxTestIter=0, revEff=1, testEff=1, revCost=1, testCost=1, sigmaMax=16, x0=15 ){
dynamicVals <- CalculateOverallCosts( maxRevIter, maxTestIter, revEff,
testEff, revCost, testCost, sigmaMax, x0 )
return(dynamicVals$rev[dynamicVals$k+1])
}
####
## Loops over a number of review and test iterations to identify the optimal
## investment
## maxRevIter Maximum number of iterations in the review phase
## (outer loop: 0 to max; default = 0)
## maxTestIter Maximum number of iterations in the test phase
## (inner loop: 0 to max; default = 0)
## revEff effectiveness of the process in the review phase (default=1)
## testEff effectiveness of the process in the test phase (default=1)
## revCost cost per iteration of the process in the review phase
## (default=1)
## testCost cost per iteration of the process in the test phase (default=1)
## sigmaMax starting uncertainty (default=16)
## x0 starting attack cost (default=15, based on IWL)
##
FindOptimalSWInvestment <- function( maxRevIter=0, maxTestIter=0, revEff=1, testEff=1, revCost=1, testCost=1, sigmaMax=16, x0=15 ){
## matrix to hold results
vals <- matrix(data=NA,nrow=maxRevIter, ncol=maxTestIter)
## number of initial defences
k <- 0;
## loop over reviews, then tests....
for( i in 0:maxRevIter ){
for( j in 0:maxTestIter ){
## Set the i,j value equal to the effectiveness of the integrated
## IWL-SSE process for that number of reviews, phases
vals[i,j] <- FindOptimalInvestmentState( i, j, revEff=revEff,
testEff=testEff, revCost=revCost, testCost=testCost,
sigmaMax=sigmaMax, x0=x0 )
### DEBUG
#message("---- iteration ", i, " ", j)
#message(vals[i+1,j+1])
}
}
## Identify the index that points to the maximal value of return
ind <- which(vals == max(vals), arr.ind = TRUE)
## Output the max value of reviews, tests and return
paste("Best is ", ind[1], " review(s) and ", ind[2],
" test(s), with return ", format(vals[ind],digits=8), sep="")
}
################################################################################
##
## LEGACY CODE
## NOTE: commented out --- produces previously used graph
##
################################################################################
################################################################
## UNUSED --- Used with old graph generation functions above
##
# produceDynamicRevenueGraphs <- function( initialCTB=15, uncert=1,
# processCosts=0, revIter, testIter, deltaX=1, dynamicVals, staticVals ){
# plot(NA,NA,las=1,
# xlab=c("number of defenses in place (k)",
# paste("probAttack=", format(prob.tatt(n=1:25, x0=initialCTB, dx=deltaX,
# sigma=uncert, a=1000,z=.025),digits=8),sep="")),
# ylab=c( paste("Exp Return: cp =",format(processCosts,digits=8),",
# delta x=",format(deltaX,digits=8),", sigma=", format(uncert,digits=8),
# sep=""), paste("rev=",revIter,", test=",testIter,", dynamic= ",
# format(max(dynamicVals$rev), digits=8),", static= ",
# format(max(staticVals$rev), digits=8), sep="") ),
# ylim=range(-10:50),xlim=c(0,25) )
# rect(-80,-80,30,0,col=gray(.85),lty=3)
# points(staticVals$k,staticVals$rev[staticVals$k+1],pch=16,cex=2,col="yellow")
# lines(0:25,staticVals$rev,type="b",col="black",pch=16)
# points(dynamicVals$k,dynamicVals$rev[dynamicVals$k+1],pch=15,cex=2,
# col="yellow")
# lines(0:25,dynamicVals$rev,type="b",col="black",pch=15)
# legend("right","top", c("Static","Dynamic"), pch=c(16,15))
# Sys.sleep(0.2)
# }
|
#ratings <- read.delim('~/Downloads/ml-100k/u.data', sep='\t', header=FALSE)
#users <- read.delim('~/Downloads/ml-100k/u.user', sep='|', header=FALSE)
#movies <- read.delim('~/Downloads/ml-100k/u.item', sep='|', header=FALSE)
#install.packages("twitteR")
#install.packages("ROAuth")
#install.packages("plyr")
#install.packages("stringr")
#install.packages("ggplot2")
library(twitteR)
library(ROAuth)
library(plyr)
library(stringr)
library(ggplot2)
download.file(url="https://curl.haxx.se/ca/cacert.pem", destfile ="cacert.pem")
#reqURL <- 'https://api.twitter.com/oauth/request_token'
#accessURL <- 'https://api.twitter.com/oauth/access_token'
#authURL <- 'https://api.twitter.com/oauth/authorize'
#consumerKey <- 'DiEWrBB8uRYEcz6uhu3PjUYDT' #put the Consumer Key from Twitter Application
#consumerSecret <- 'IoPA9hDXSSD6xu2YKo0I4zYrz2frByMtcu7cFGG3iPkGeajTE2' #put the Consumer Secret from Twitter Application
#Cred <- OAuthFactory$new(consumerKey=consumerKey,
#consumerSecret=consumerSecret,
#requestURL=reqURL,
#accessURL=accessURL,
#authURL=authURL)
#Cred$handshake(cainfo = system.file('CurlSSL', 'cacert.pem', package = 'RCurl'))
#6561979
api_key = 'DiEWrBB8uRYEcz6uhu3PjUYDT'
api_secret = 'IoPA9hDXSSD6xu2YKo0I4zYrz2frByMtcu7cFGG3iPkGeajTE2'
access_token = '742900921508630528-jo9OgR1uql9KXuA0bQHR1PUEkNgTc7v'
access_token_secret = '36A6n7bG5Y18k6OPyQcfVOfgoTF4FIuJcre2zBrCrgdaJ'
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
some_tweets <- searchTwitter("#starbucks", n=100, lang="en")
tweetdf <- twListToDF(some_tweets)
write.csv(tweetdf, '~/Desktop/tweetdf.csv')
#sentiment analysis
#thailand <- searchTwitter("thailand", n=1000, lang="en")
#thailand2 <- twListToDF(thailand)
#sentiment function
library(plyr)
library(stringr)
score.sentiment = function(sentences, pos.words, neg.words, .progress='none'){
require(plyr)
require(stringr)
scores = laply(sentences, function(sentences, pos.words, neg.words){
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
sentence = tolower(sentence)
word.list = str_split(sentence, '\\s+')
words = ublist(word.list)
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress)
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
|
/data-BigData/krid2.R
|
no_license
|
Kundjanasith/data-mini1
|
R
| false | false | 2,562 |
r
|
#ratings <- read.delim('~/Downloads/ml-100k/u.data', sep='\t', header=FALSE)
#users <- read.delim('~/Downloads/ml-100k/u.user', sep='|', header=FALSE)
#movies <- read.delim('~/Downloads/ml-100k/u.item', sep='|', header=FALSE)
#install.packages("twitteR")
#install.packages("ROAuth")
#install.packages("plyr")
#install.packages("stringr")
#install.packages("ggplot2")
library(twitteR)
library(ROAuth)
library(plyr)
library(stringr)
library(ggplot2)
download.file(url="https://curl.haxx.se/ca/cacert.pem", destfile ="cacert.pem")
#reqURL <- 'https://api.twitter.com/oauth/request_token'
#accessURL <- 'https://api.twitter.com/oauth/access_token'
#authURL <- 'https://api.twitter.com/oauth/authorize'
#consumerKey <- 'DiEWrBB8uRYEcz6uhu3PjUYDT' #put the Consumer Key from Twitter Application
#consumerSecret <- 'IoPA9hDXSSD6xu2YKo0I4zYrz2frByMtcu7cFGG3iPkGeajTE2' #put the Consumer Secret from Twitter Application
#Cred <- OAuthFactory$new(consumerKey=consumerKey,
#consumerSecret=consumerSecret,
#requestURL=reqURL,
#accessURL=accessURL,
#authURL=authURL)
#Cred$handshake(cainfo = system.file('CurlSSL', 'cacert.pem', package = 'RCurl'))
#6561979
api_key = 'DiEWrBB8uRYEcz6uhu3PjUYDT'
api_secret = 'IoPA9hDXSSD6xu2YKo0I4zYrz2frByMtcu7cFGG3iPkGeajTE2'
access_token = '742900921508630528-jo9OgR1uql9KXuA0bQHR1PUEkNgTc7v'
access_token_secret = '36A6n7bG5Y18k6OPyQcfVOfgoTF4FIuJcre2zBrCrgdaJ'
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
some_tweets <- searchTwitter("#starbucks", n=100, lang="en")
tweetdf <- twListToDF(some_tweets)
write.csv(tweetdf, '~/Desktop/tweetdf.csv')
#sentiment analysis
#thailand <- searchTwitter("thailand", n=1000, lang="en")
#thailand2 <- twListToDF(thailand)
#sentiment function
library(plyr)
library(stringr)
score.sentiment = function(sentences, pos.words, neg.words, .progress='none'){
require(plyr)
require(stringr)
scores = laply(sentences, function(sentences, pos.words, neg.words){
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
sentence = tolower(sentence)
word.list = str_split(sentence, '\\s+')
words = ublist(word.list)
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress)
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
|
library(shinyWidgets)
library(shinyjs)
library(shiny)
library(shinythemes)
library(rdrop2)
token <- load("token.rds", bucket = s3BucketName)
drop_acc(dtoken = token)
################ dropbox save and load data function ####################
saveData <- function(input) {
old_df = drop_read_csv("mentors.csv")
data <- data.frame(matrix(nrow=1,ncol=0))
for (x in fields) {
var <- input[[x]]
if (x == "photo_wig" & length(var)!=0){
img_file=var$datapath
if (grepl("\\.jpg|\\.JPG|\\.jpeg|\\.JPEG",img_file)){
img_format=".jpeg"
}
if (grepl("\\.png|\\.PNG",img_file)){
img_format=".png"
}
}else if (x == "photo_wig" & length(var)==0){
img_file="unknown.jpg"
}
else{
if (length(var)==0){
data[[x]] <- " "
}
else if (length(var) > 1 ) {
# handles lists from checkboxGroup and multiple Select
data[[x]] <- list(var)
} else {
# all other data types
data[[x]] <- var
}
}
}
data$submit_time <- date()
# Create a unique file name
name1=as.integer(Sys.time())
name2=digest::digest(data)
fileName <- sprintf(
"%s_%s.rds",
name1,
name2
)
# rename imagefilename
if (img_file!="unknown.jpg"){
img_newName <-sprintf(
paste0("%s_%s",img_format),
name1,
name2
)
file.rename(from=img_file, to=file.path(tempdir(),img_newName))
# upload the file to dropbox
drop_upload(file.path(tempdir(),img_newName))
}else{
img_newName = "unknown.jpg"
}
# save tmp file here
# add phone name to data column
data["photo_wig"]=img_newName
colnames(data) = c("name","pronoun","linkedin", "signUp.type","expertises","primary.employment","preferred.mentor.method","submit.timestamp","photo.link")
# write new data to csv and upload to dropbox
old_df = bind_rows(old_df, data)
write.csv(old_df, file=file.path(tempdir(),"mentors.csv"))
drop_upload(file.path(tempdir(),"mentors.csv"))
}
loadData <- function() {
# read csv
data <- drop_read_csv("mentors.csv")
if (nrow(data) == 0) {
# create empty data frame with correct columns
field_list <- c(fields, "submit_time")
data <- data.frame(matrix(ncol = length(field_list), nrow = 0))
names(data) <- field_list
}
drop_get("jigglypuff.jpeg")
# data
out = tibble(
photo=sapply(data$photo.link,function(pic){paste0('<img src=',pic,' height=52></img>')})
)
out = out %>%
mutate(name=mapply(function(url,text){paste0("<a href='",url,"'>",text,"</a>")}, data$linkedin, data$name))
out = bind_cols(
out %>% as.data.frame(),
data[,c("pronoun","signUp.type","expertises","primary.employment","preferred.mentor.method")]
)
out
}
#################### user variables #########################
types=c("Speaker","Mentor")
expertises=c("Academia to industry transition","Transition to new field/industry","Project/team management","Making data science more accessible","Working with big datasets","Language research","Data cleaning","Capacity building","Global health","Data visualization","Package creation","Geospatial science","Ecological modeling","Mental health","Building scalable tools","Reproducible research","App development")
employment=c("Academic","Pharmaceutical","Financial","Business","Research","Quality assurance","Government/public sector")
meets=c("In-person","Remote (e.g. by phone or online)")
genders=c("She/her", "He/him", "They/them","Other")
#################### define wigget (variable_name/field_name and inputId must be the same) #################
fields <- c("name_wig", "gender_wig", "linkedin_wig", "photo_wig",
"type_wig", "expertise_wig", "employment_wig", "meet_wig")
name_wig <- textInput("name_wig", "Name:", "")
gender_wig <- radioButtons(
"gender_wig",
"Pronouns:",
genders,
inline = TRUE,
selected = "none"
)
linkedin_wig <- textInput("linkedin_wig","LinkedIn Profile Link:","")
photo_wig <- fileInput("photo_wig", "Your photo (eg. .jpeg, .png)", accept = c("jpeg","png"))
type_wig <- checkboxGroupInput(
"type_wig",
"Available as mentor and/or speaker?",
types
)
expertise_wig <- selectizeInput(
inputId = "expertise_wig",
label = "Areas of expertise",
choices = expertises,
multiple = T,
options = list(create = TRUE)
)
employment_wig <- selectizeInput(
inputId = "employment_wig",
label = "Primary type of employment",
choices = employment,
multiple = F,
options = list(create = TRUE)
)
meet_wig <- checkboxGroupInput(
"meet_wig",
"If you are willing to serve as a mentor, \nwhat is your preferred method of communication with your mentees?",
meets
)
clear_wig <- actionButton("clear", "Clear Form")
submit_wig <- actionButton("submit", "Submit")
##################### resetForm function #######################
resetForm <- function(session) {
updateTextInput(session, "name_wig", value = "")
updateRadioButtons(session, "gender_wig", selected = "none")
updateTextInput(session, "linkedin_wig", value = "")
updateCheckboxGroupInput(session, "type_wig", selected=character(0))
updateSelectizeInput(session, "expertise_wig", selected=character(0))
updateSelectizeInput(session, "employment_wig", selected=character(0))
updateCheckboxGroupInput(session, "meet_wig", selected=character(0))
}
######################### ui ####################################
ui <- navbarPage(
title = "Mentor/Speaker", theme = shinytheme("flatly"),id="tab",
tabPanel(
title="Sign-Up",
tags$head(
tags$style(
HTML(".shiny-notification {position:fixed; top: calc(50%);left: calc(30%);}")
)
),
fluidRow(
column(width=12,
name_wig,
offset=3
)
),
fluidRow(
column(12,
gender_wig,
offset=3
)
),
fluidRow(
column(12,
linkedin_wig,
offset=3
)
),
fluidRow(
column(12,
photo_wig,
offset=3
)
),
fluidRow(
column(12,
type_wig,
offset=3
)
),
fluidRow(
column(12,
expertise_wig,
offset=3
)
),
fluidRow(
column(width=12,
employment_wig,
offset=3
)
),
fluidRow(
column(
12,
meet_wig,
offset=3
)
),
fluidRow(
column(
3,
clear_wig,
offset=2
),
column(
3,
submit_wig,
offset=1
)
)
),
tabPanel(
title="Search",
sidebarLayout(
sidebarPanel(
width = 3,
checkboxGroupInput(
inputId = "search_type",
label = "Mentor/Speaker",
choices = types,
selected = types
),
checkboxGroupInput(
inputId = "search_gender",
label = "Pronoun",
choices = genders,
selected = genders
),
checkboxGroupInput(
inputId = "search_meet",
label = "Preferred Mentorship Meeting Format",
choices = meets,
selected = meets
),
pickerInput(
inputId = "search_employment",
label = "Primary Employment",
choices = employment,
selected = employment,
multiple = T,
options = list(`actions-box` = TRUE)
),
pickerInput(
inputId = "search_expertise",
label = "Area(s) of Expertise",
choices = expertises,
selected = expertises,
multiple = T,
options = list(`actions-box` = TRUE)
),
actionButton("search_submit", "Search")
),
mainPanel(
width = 9,
dataTableOutput("responses")
)
)
)
)
########################### server ####################################
server <- function(input, output, session) {
### input tab
# When the Submit button is clicked, save the form data
observeEvent(input$submit, {
# validate linkedInde
validate(
need(input$name_wig!="",
showNotification("Please signup with your name", duration = 0, type = "error", id="name_error")
)
)
removeNotification("name_error", session = getDefaultReactiveDomain())
validate(
need(grepl("linkedin\\.com",input$linkedin_wig),
showNotification("Please add a valid linkedin url", duration = 0, type = "error", id="linkedin_error")
)
)
removeNotification("linkedin_error", session = getDefaultReactiveDomain())
validate(
need(input$type_wig!="",
showNotification("Please select mentor or speaker", duration = 0, type = "error", id="type_error")
)
)
removeNotification("type_error", session = getDefaultReactiveDomain())
# thank the user
response <- paste0("Thank you for signing up for Rladies menter/speaker program!")
showNotification(response, duration = 0, type = "message")
saveData(input)
resetForm(session)
})
# clear the fields
observeEvent(input$clear, {
resetForm(session)
})
# output$responses <- renderDataTable({
# # update with current response when Submit or Delete are clicked
# input$submit
# loadData()
# },escape = FALSE)
#
# #### search tab
v <- reactiveValues(data = loadData())
observeEvent(input$search_submit, {
search_type_bl = unlist(lapply(loadData()$signUp.type, function(x){any(x %in% input$search_type)}))
search_gender_bl = unlist(lapply(loadData()$pronoun, function(x){any(x %in% input$search_gender)}))
search_meet_bl = unlist(lapply(loadData()$preferred.mentor.method, function(x){any(x %in% input$search_meet)}))
search_employment_bl = unlist(lapply(loadData()$primary.employment, function(x){any(x %in% input$search_employment)}))
search_expertise_bl = unlist(lapply(loadData()$expertises, function(x){any(x %in% input$search_expertise)}))
bl = as.logical(search_type_bl*search_gender_bl*search_meet_bl*search_employment_bl*search_expertise_bl)
v$data = loadData()[bl,]
})
output$responses <- renderDataTable({
# update with current response when Submit or Delete are clicked
input$submit
v$data
},escape = FALSE)
}
####### run
shinyApp(ui = ui, server = server)
|
/shinyapp_examples/app_dropbox.R
|
no_license
|
sckinta/example_code
|
R
| false | false | 14,466 |
r
|
library(shinyWidgets)
library(shinyjs)
library(shiny)
library(shinythemes)
library(rdrop2)
token <- load("token.rds", bucket = s3BucketName)
drop_acc(dtoken = token)
################ dropbox save and load data function ####################
saveData <- function(input) {
old_df = drop_read_csv("mentors.csv")
data <- data.frame(matrix(nrow=1,ncol=0))
for (x in fields) {
var <- input[[x]]
if (x == "photo_wig" & length(var)!=0){
img_file=var$datapath
if (grepl("\\.jpg|\\.JPG|\\.jpeg|\\.JPEG",img_file)){
img_format=".jpeg"
}
if (grepl("\\.png|\\.PNG",img_file)){
img_format=".png"
}
}else if (x == "photo_wig" & length(var)==0){
img_file="unknown.jpg"
}
else{
if (length(var)==0){
data[[x]] <- " "
}
else if (length(var) > 1 ) {
# handles lists from checkboxGroup and multiple Select
data[[x]] <- list(var)
} else {
# all other data types
data[[x]] <- var
}
}
}
data$submit_time <- date()
# Create a unique file name
name1=as.integer(Sys.time())
name2=digest::digest(data)
fileName <- sprintf(
"%s_%s.rds",
name1,
name2
)
# rename imagefilename
if (img_file!="unknown.jpg"){
img_newName <-sprintf(
paste0("%s_%s",img_format),
name1,
name2
)
file.rename(from=img_file, to=file.path(tempdir(),img_newName))
# upload the file to dropbox
drop_upload(file.path(tempdir(),img_newName))
}else{
img_newName = "unknown.jpg"
}
# save tmp file here
# add phone name to data column
data["photo_wig"]=img_newName
colnames(data) = c("name","pronoun","linkedin", "signUp.type","expertises","primary.employment","preferred.mentor.method","submit.timestamp","photo.link")
# write new data to csv and upload to dropbox
old_df = bind_rows(old_df, data)
write.csv(old_df, file=file.path(tempdir(),"mentors.csv"))
drop_upload(file.path(tempdir(),"mentors.csv"))
}
loadData <- function() {
# read csv
data <- drop_read_csv("mentors.csv")
if (nrow(data) == 0) {
# create empty data frame with correct columns
field_list <- c(fields, "submit_time")
data <- data.frame(matrix(ncol = length(field_list), nrow = 0))
names(data) <- field_list
}
drop_get("jigglypuff.jpeg")
# data
out = tibble(
photo=sapply(data$photo.link,function(pic){paste0('<img src=',pic,' height=52></img>')})
)
out = out %>%
mutate(name=mapply(function(url,text){paste0("<a href='",url,"'>",text,"</a>")}, data$linkedin, data$name))
out = bind_cols(
out %>% as.data.frame(),
data[,c("pronoun","signUp.type","expertises","primary.employment","preferred.mentor.method")]
)
out
}
#################### user variables #########################
types=c("Speaker","Mentor")
expertises=c("Academia to industry transition","Transition to new field/industry","Project/team management","Making data science more accessible","Working with big datasets","Language research","Data cleaning","Capacity building","Global health","Data visualization","Package creation","Geospatial science","Ecological modeling","Mental health","Building scalable tools","Reproducible research","App development")
employment=c("Academic","Pharmaceutical","Financial","Business","Research","Quality assurance","Government/public sector")
meets=c("In-person","Remote (e.g. by phone or online)")
genders=c("She/her", "He/him", "They/them","Other")
#################### define wigget (variable_name/field_name and inputId must be the same) #################
fields <- c("name_wig", "gender_wig", "linkedin_wig", "photo_wig",
"type_wig", "expertise_wig", "employment_wig", "meet_wig")
name_wig <- textInput("name_wig", "Name:", "")
gender_wig <- radioButtons(
"gender_wig",
"Pronouns:",
genders,
inline = TRUE,
selected = "none"
)
linkedin_wig <- textInput("linkedin_wig","LinkedIn Profile Link:","")
photo_wig <- fileInput("photo_wig", "Your photo (eg. .jpeg, .png)", accept = c("jpeg","png"))
type_wig <- checkboxGroupInput(
"type_wig",
"Available as mentor and/or speaker?",
types
)
expertise_wig <- selectizeInput(
inputId = "expertise_wig",
label = "Areas of expertise",
choices = expertises,
multiple = T,
options = list(create = TRUE)
)
employment_wig <- selectizeInput(
inputId = "employment_wig",
label = "Primary type of employment",
choices = employment,
multiple = F,
options = list(create = TRUE)
)
meet_wig <- checkboxGroupInput(
"meet_wig",
"If you are willing to serve as a mentor, \nwhat is your preferred method of communication with your mentees?",
meets
)
clear_wig <- actionButton("clear", "Clear Form")
submit_wig <- actionButton("submit", "Submit")
##################### resetForm function #######################
resetForm <- function(session) {
updateTextInput(session, "name_wig", value = "")
updateRadioButtons(session, "gender_wig", selected = "none")
updateTextInput(session, "linkedin_wig", value = "")
updateCheckboxGroupInput(session, "type_wig", selected=character(0))
updateSelectizeInput(session, "expertise_wig", selected=character(0))
updateSelectizeInput(session, "employment_wig", selected=character(0))
updateCheckboxGroupInput(session, "meet_wig", selected=character(0))
}
######################### ui ####################################
ui <- navbarPage(
title = "Mentor/Speaker", theme = shinytheme("flatly"),id="tab",
tabPanel(
title="Sign-Up",
tags$head(
tags$style(
HTML(".shiny-notification {position:fixed; top: calc(50%);left: calc(30%);}")
)
),
fluidRow(
column(width=12,
name_wig,
offset=3
)
),
fluidRow(
column(12,
gender_wig,
offset=3
)
),
fluidRow(
column(12,
linkedin_wig,
offset=3
)
),
fluidRow(
column(12,
photo_wig,
offset=3
)
),
fluidRow(
column(12,
type_wig,
offset=3
)
),
fluidRow(
column(12,
expertise_wig,
offset=3
)
),
fluidRow(
column(width=12,
employment_wig,
offset=3
)
),
fluidRow(
column(
12,
meet_wig,
offset=3
)
),
fluidRow(
column(
3,
clear_wig,
offset=2
),
column(
3,
submit_wig,
offset=1
)
)
),
tabPanel(
title="Search",
sidebarLayout(
sidebarPanel(
width = 3,
checkboxGroupInput(
inputId = "search_type",
label = "Mentor/Speaker",
choices = types,
selected = types
),
checkboxGroupInput(
inputId = "search_gender",
label = "Pronoun",
choices = genders,
selected = genders
),
checkboxGroupInput(
inputId = "search_meet",
label = "Preferred Mentorship Meeting Format",
choices = meets,
selected = meets
),
pickerInput(
inputId = "search_employment",
label = "Primary Employment",
choices = employment,
selected = employment,
multiple = T,
options = list(`actions-box` = TRUE)
),
pickerInput(
inputId = "search_expertise",
label = "Area(s) of Expertise",
choices = expertises,
selected = expertises,
multiple = T,
options = list(`actions-box` = TRUE)
),
actionButton("search_submit", "Search")
),
mainPanel(
width = 9,
dataTableOutput("responses")
)
)
)
)
########################### server ####################################
server <- function(input, output, session) {
### input tab
# When the Submit button is clicked, save the form data
observeEvent(input$submit, {
# validate linkedInde
validate(
need(input$name_wig!="",
showNotification("Please signup with your name", duration = 0, type = "error", id="name_error")
)
)
removeNotification("name_error", session = getDefaultReactiveDomain())
validate(
need(grepl("linkedin\\.com",input$linkedin_wig),
showNotification("Please add a valid linkedin url", duration = 0, type = "error", id="linkedin_error")
)
)
removeNotification("linkedin_error", session = getDefaultReactiveDomain())
validate(
need(input$type_wig!="",
showNotification("Please select mentor or speaker", duration = 0, type = "error", id="type_error")
)
)
removeNotification("type_error", session = getDefaultReactiveDomain())
# thank the user
response <- paste0("Thank you for signing up for Rladies menter/speaker program!")
showNotification(response, duration = 0, type = "message")
saveData(input)
resetForm(session)
})
# clear the fields
observeEvent(input$clear, {
resetForm(session)
})
# output$responses <- renderDataTable({
# # update with current response when Submit or Delete are clicked
# input$submit
# loadData()
# },escape = FALSE)
#
# #### search tab
v <- reactiveValues(data = loadData())
observeEvent(input$search_submit, {
search_type_bl = unlist(lapply(loadData()$signUp.type, function(x){any(x %in% input$search_type)}))
search_gender_bl = unlist(lapply(loadData()$pronoun, function(x){any(x %in% input$search_gender)}))
search_meet_bl = unlist(lapply(loadData()$preferred.mentor.method, function(x){any(x %in% input$search_meet)}))
search_employment_bl = unlist(lapply(loadData()$primary.employment, function(x){any(x %in% input$search_employment)}))
search_expertise_bl = unlist(lapply(loadData()$expertises, function(x){any(x %in% input$search_expertise)}))
bl = as.logical(search_type_bl*search_gender_bl*search_meet_bl*search_employment_bl*search_expertise_bl)
v$data = loadData()[bl,]
})
output$responses <- renderDataTable({
# update with current response when Submit or Delete are clicked
input$submit
v$data
},escape = FALSE)
}
####### run
shinyApp(ui = ui, server = server)
|
testlist <- list(A = structure(c(9.98004437694192e-316, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result)
|
/mgss/inst/testfiles/MVP_normalfactor_rcpp/AFL_MVP_normalfactor_rcpp/MVP_normalfactor_rcpp_valgrind_files/1615952101-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 292 |
r
|
testlist <- list(A = structure(c(9.98004437694192e-316, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-lognorm.R
\docType{class}
\name{Zelig-lognorm-class}
\alias{Zelig-lognorm-class}
\alias{zlognorm}
\title{Log-Normal Regression for Duration Dependent Variables}
\description{
Vignette: \url{http://docs.zeligproject.org/articles/zelig_lognorm.html}
}
\section{Methods}{
\describe{
\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
bootstrap = FALSE)}}{The zelig function estimates a variety of statistical models}
}}
|
/man/Zelig-lognorm-class.Rd
|
no_license
|
mbsabath/Zelig
|
R
| false | true | 526 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-lognorm.R
\docType{class}
\name{Zelig-lognorm-class}
\alias{Zelig-lognorm-class}
\alias{zlognorm}
\title{Log-Normal Regression for Duration Dependent Variables}
\description{
Vignette: \url{http://docs.zeligproject.org/articles/zelig_lognorm.html}
}
\section{Methods}{
\describe{
\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
bootstrap = FALSE)}}{The zelig function estimates a variety of statistical models}
}}
|
#Use the library() function to load the dplyr package.
library(dplyr)
#Import and read in the MechaCar_mpg.csv file as a dataframe.
mtcars <- read.csv("../MechaCar_mpg.csv")
#Perform linear regression using the lm() function. In the lm() function, pass in all six variables (i.e., columns),
#and add the dataframe you created in Step 4 as the data parameter.
#lm(qsec ~ hp,mtcars)
reg <- lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data = mtcars)
#Using the summary() function, determine the p-value and the r-squared value for the linear regression model.
summary(reg)
#Save your MechaCarChallenge.RScript file to your GitHub repository.
|
/mechacar/deliverable 1.R
|
no_license
|
melaniekwak/MechaCar_Statistical_Analysis
|
R
| false | false | 685 |
r
|
#Use the library() function to load the dplyr package.
library(dplyr)
#Import and read in the MechaCar_mpg.csv file as a dataframe.
mtcars <- read.csv("../MechaCar_mpg.csv")
#Perform linear regression using the lm() function. In the lm() function, pass in all six variables (i.e., columns),
#and add the dataframe you created in Step 4 as the data parameter.
#lm(qsec ~ hp,mtcars)
reg <- lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data = mtcars)
#Using the summary() function, determine the p-value and the r-squared value for the linear regression model.
summary(reg)
#Save your MechaCarChallenge.RScript file to your GitHub repository.
|
\name{base64encode & base64decode}
\alias{base64encode}
\alias{base64decode}
\title{Convert R vectors to/from the Base64 format }
\description{
Convert R vectors of any type to and from the Base64 format for encrypting
any binary data as string using alphanumeric subset of ASCII character set.
}
\usage{
base64encode(x, size=NA, endian=.Platform$endian)
base64decode(z, what, size=NA, signed = TRUE, endian=.Platform$endian)
}
\arguments{
\item{x}{vector or any structure that can be converted to a vector by
\code{\link{as.vector}} function. Strings are also allowed.}
\item{z}{String with Base64 code, using [A-Z,a-z,0-9,+,/,=] subset of
characters}
\item{what}{Either an object whose mode will give the mode of the vector
to be created, or a character vector of length one describing
the mode: one of '"numeric", "double", "integer", "int",
"logical", "complex", "character", "raw".
Same as variable \code{what} in \code{\link{readBin}} functions. }
\item{size}{ integer. The number of bytes per element in the byte stream
stored in \code{r}. The default, '\code{NA}', uses the natural size.
Same as variable \code{size} in \code{\link{readBin}} functions. }
\item{signed}{logical. Only used for integers of sizes 1 and 2, when it
determines if the quantity stored as raw should be regarded as a
signed or unsigned integer.
Same as variable \code{signed} in \code{\link{readBin}} functions. }
\item{endian}{If provided, can be used to swap endian-ness. Using '"swap"'
will force swapping of byte order. Use '"big"' (big-endian, aka IEEE,
aka "network") or '"little"' (little-endian, format used on PC/Intel
machines) to indicate type of data encoded in "raw" format.
Same as variable \code{endian} in \code{\link{readBin}} functions.}
}
\details{
The Base64 encoding is designed to encode arbitrary binary information for
transmission by electronic mail. It is defined by MIME (Multipurpose Internet
Mail Extensions) specification RFC 1341, RFC 1421, RFC 2045 and others.
Triplets of 8-bit octets are encoded as groups of four characters, each
representing 6 bits of the source 24 bits. Only a 65-character subset
([A-Z,a-z,0-9,+,/,=]) present in all variants of ASCII and EBCDIC is used,
enabling 6 bits to be represented per printable character.
Default \code{size}s for different types of \code{what}: \code{logical} - 4,
\code{integer} - 4, \code{double} - 8 , \code{complex} - 16,
\code{character} - 2, \code{raw} - 1.
}
\value{
Function \code{\link{base64encode}} returns a string with Base64 code.
Function \code{\link{base64decode}} returns vector of appropriate mode
and length (see \code{x} above).
}
\references{
\itemize{
\item Base64 description in \emph{Connected: An Internet Encyclopedia}
\url{http://www.freesoft.org/CIE/RFC/1521/7.htm}
\item MIME RFC 1341 \url{http://www.faqs.org/rfcs/rfc1341.html}
\item MIME RFC 1421 \url{http://www.faqs.org/rfcs/rfc1421.html}
\item MIME RFC 2045 \url{http://www.faqs.org/rfcs/rfc2045.html}
\item Portions of the code are based on Matlab code by Peter Acklam
\url{http://home.online.no/~pjacklam/matlab/software/util/datautil/}
}
}
\author{Jarek Tuszynski (SAIC) \email{jaroslaw.w.tuszynski@saic.com}}
\seealso{
\code{\link[XML]{xmlValue}} from \pkg{XML} package reads XML code
which sometimes is encoded in Base64 format.
\code{\link{readBin}}, \code{\link{writeBin}}
}
\examples{
x = (10*runif(10)>5) # logical
for (i in c(NA, 1, 2, 4)) {
y = base64encode(x, size=i)
z = base64decode(y, typeof(x), size=i)
stopifnot(x==z)
}
print("Checked base64 for encode/decode logical type")
x = as.integer(1:10) # integer
for (i in c(NA, 1, 2, 4)) {
y = base64encode(x, size=i)
z = base64decode(y, typeof(x), size=i)
stopifnot(x==z)
}
print("Checked base64 encode/decode for integer type")
x = (1:10)*pi # double
for (i in c(NA, 4, 8)) {
y = base64encode(x, size=i)
z = base64decode(y, typeof(x), size=i)
stopifnot(mean(abs(x-z))<1e-5)
}
print("Checked base64 for encode/decode double type")
x = log(as.complex(-(1:10)*pi)) # complex
y = base64encode(x)
z = base64decode(y, typeof(x))
stopifnot(x==z)
print("Checked base64 for encode/decode complex type")
x = "Chance favors the prepared mind" # character
y = base64encode(x)
z = base64decode(y, typeof(x))
stopifnot(x==z)
print("Checked base64 for encode/decode character type")
}
\keyword{file}
\concept{XML}
|
/man/base64.Rd
|
no_license
|
spluque/caTools
|
R
| false | false | 4,646 |
rd
|
\name{base64encode & base64decode}
\alias{base64encode}
\alias{base64decode}
\title{Convert R vectors to/from the Base64 format }
\description{
Convert R vectors of any type to and from the Base64 format for encrypting
any binary data as string using alphanumeric subset of ASCII character set.
}
\usage{
base64encode(x, size=NA, endian=.Platform$endian)
base64decode(z, what, size=NA, signed = TRUE, endian=.Platform$endian)
}
\arguments{
\item{x}{vector or any structure that can be converted to a vector by
\code{\link{as.vector}} function. Strings are also allowed.}
\item{z}{String with Base64 code, using [A-Z,a-z,0-9,+,/,=] subset of
characters}
\item{what}{Either an object whose mode will give the mode of the vector
to be created, or a character vector of length one describing
the mode: one of '"numeric", "double", "integer", "int",
"logical", "complex", "character", "raw".
Same as variable \code{what} in \code{\link{readBin}} functions. }
\item{size}{ integer. The number of bytes per element in the byte stream
stored in \code{r}. The default, '\code{NA}', uses the natural size.
Same as variable \code{size} in \code{\link{readBin}} functions. }
\item{signed}{logical. Only used for integers of sizes 1 and 2, when it
determines if the quantity stored as raw should be regarded as a
signed or unsigned integer.
Same as variable \code{signed} in \code{\link{readBin}} functions. }
\item{endian}{If provided, can be used to swap endian-ness. Using '"swap"'
will force swapping of byte order. Use '"big"' (big-endian, aka IEEE,
aka "network") or '"little"' (little-endian, format used on PC/Intel
machines) to indicate type of data encoded in "raw" format.
Same as variable \code{endian} in \code{\link{readBin}} functions.}
}
\details{
The Base64 encoding is designed to encode arbitrary binary information for
transmission by electronic mail. It is defined by MIME (Multipurpose Internet
Mail Extensions) specification RFC 1341, RFC 1421, RFC 2045 and others.
Triplets of 8-bit octets are encoded as groups of four characters, each
representing 6 bits of the source 24 bits. Only a 65-character subset
([A-Z,a-z,0-9,+,/,=]) present in all variants of ASCII and EBCDIC is used,
enabling 6 bits to be represented per printable character.
Default \code{size}s for different types of \code{what}: \code{logical} - 4,
\code{integer} - 4, \code{double} - 8 , \code{complex} - 16,
\code{character} - 2, \code{raw} - 1.
}
\value{
Function \code{\link{base64encode}} returns a string with Base64 code.
Function \code{\link{base64decode}} returns vector of appropriate mode
and length (see \code{x} above).
}
\references{
\itemize{
\item Base64 description in \emph{Connected: An Internet Encyclopedia}
\url{http://www.freesoft.org/CIE/RFC/1521/7.htm}
\item MIME RFC 1341 \url{http://www.faqs.org/rfcs/rfc1341.html}
\item MIME RFC 1421 \url{http://www.faqs.org/rfcs/rfc1421.html}
\item MIME RFC 2045 \url{http://www.faqs.org/rfcs/rfc2045.html}
\item Portions of the code are based on Matlab code by Peter Acklam
\url{http://home.online.no/~pjacklam/matlab/software/util/datautil/}
}
}
\author{Jarek Tuszynski (SAIC) \email{jaroslaw.w.tuszynski@saic.com}}
\seealso{
\code{\link[XML]{xmlValue}} from \pkg{XML} package reads XML code
which sometimes is encoded in Base64 format.
\code{\link{readBin}}, \code{\link{writeBin}}
}
\examples{
x = (10*runif(10)>5) # logical
for (i in c(NA, 1, 2, 4)) {
y = base64encode(x, size=i)
z = base64decode(y, typeof(x), size=i)
stopifnot(x==z)
}
print("Checked base64 for encode/decode logical type")
x = as.integer(1:10) # integer
for (i in c(NA, 1, 2, 4)) {
y = base64encode(x, size=i)
z = base64decode(y, typeof(x), size=i)
stopifnot(x==z)
}
print("Checked base64 encode/decode for integer type")
x = (1:10)*pi # double
for (i in c(NA, 4, 8)) {
y = base64encode(x, size=i)
z = base64decode(y, typeof(x), size=i)
stopifnot(mean(abs(x-z))<1e-5)
}
print("Checked base64 for encode/decode double type")
x = log(as.complex(-(1:10)*pi)) # complex
y = base64encode(x)
z = base64decode(y, typeof(x))
stopifnot(x==z)
print("Checked base64 for encode/decode complex type")
x = "Chance favors the prepared mind" # character
y = base64encode(x)
z = base64decode(y, typeof(x))
stopifnot(x==z)
print("Checked base64 for encode/decode character type")
}
\keyword{file}
\concept{XML}
|
#load the library
library(ggplot2)
# loading the emissions and source file
emissions_data <- readRDS("summarySCC_PM25.rds")
source_file <- readRDS("Source_Classification_Code.rds")
vehicles <- grepl("vehicle", source_file$SCC.Level.Two, ignore.case=TRUE)
vehiclesSCC <- source_file[vehicles,]$SCC
vehiclesNEI <- emissions_data[emissions_data$SCC %in% vehiclesSCC,]
# Subset the vehicles NEI data by each city's fip and add city name.
vehiclesBaltimore <- vehiclesNEI[vehiclesNEI$fips=="24510",]
vehiclesBaltimore$city <- "Baltimore City"
vehiclesLA <- vehiclesNEI[vehiclesNEI$fips=="06037",]
vehiclesLA$city <- "Los Angeles County"
# Combine the two subsets with city name into one data frame
bothCities <- rbind(vehiclesBaltimore,vehiclesLA)
png("plot6.png")
plot6_viz <- ggplot(bothCities, aes(x=factor(year), y=Emissions, fill=city)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(scales="free", space="free", .~city) +
guides(fill=FALSE) + theme_bw() +
labs(x="Year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
print(plot6_viz)
dev.off()
|
/Plot6.R
|
no_license
|
Peter1966-r/Air-Emission-Analysis-NEI-United-States
|
R
| false | false | 1,192 |
r
|
#load the library
library(ggplot2)
# loading the emissions and source file
emissions_data <- readRDS("summarySCC_PM25.rds")
source_file <- readRDS("Source_Classification_Code.rds")
vehicles <- grepl("vehicle", source_file$SCC.Level.Two, ignore.case=TRUE)
vehiclesSCC <- source_file[vehicles,]$SCC
vehiclesNEI <- emissions_data[emissions_data$SCC %in% vehiclesSCC,]
# Subset the vehicles NEI data by each city's fip and add city name.
vehiclesBaltimore <- vehiclesNEI[vehiclesNEI$fips=="24510",]
vehiclesBaltimore$city <- "Baltimore City"
vehiclesLA <- vehiclesNEI[vehiclesNEI$fips=="06037",]
vehiclesLA$city <- "Los Angeles County"
# Combine the two subsets with city name into one data frame
bothCities <- rbind(vehiclesBaltimore,vehiclesLA)
png("plot6.png")
plot6_viz <- ggplot(bothCities, aes(x=factor(year), y=Emissions, fill=city)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(scales="free", space="free", .~city) +
guides(fill=FALSE) + theme_bw() +
labs(x="Year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
print(plot6_viz)
dev.off()
|
#!/usr/bin/env Rscript
args=commandArgs(trailingOnly=T)
pheno=as.character(args[[1]])
env=as.character(args[[2]])
library('data.table')
library('dplyr')
library('ggplot2')
options(scipen=20)
all_reps=c()
for(c in 1:10){
haps=seq(2,16)
for(h in haps){
r=readRDS(sprintf('test_models/chr%.0f_haplogroup%.0f_%s_x_%s_1000rep_max_pvalues.rds',c,h,pheno,env))
df=c()
df=sapply(seq(1,1000),function(x) rbind(df,unlist(r[[x]])))
df=t(df)
df=as.data.frame(df)
names(df)=c('chr','hapgrp','replicate','pval')
#df$chr=c
#df$replicate=as.numeric(df$replicate)
df=df[!is.na(df$pval),]
#tmp=data.frame(chr=c,replicate=df$replicate,hapgrp=h,pval=df$pval,stringsAsFactors=F)
all_reps=rbind(all_reps,df)
}
}
fwrite(all_reps,sprintf('max_reps/%s_x_%s_rep1000_max_pvalues.txt',pheno,env),quote=F,row.names=F,sep='\t')
minp = all_reps %>% group_by(replicate) %>% summarize(pval=min(pval))
minp=as.data.frame(minp)
threshold=quantile(minp$pval,0.05,lower.tail=T)
print(threshold)
print(-log10(threshold))
png(sprintf('%s_x_%s_perm_1000_pval_dist.png',pheno,env))
print(ggplot(minp,aes(x=pval)) + geom_histogram() + geom_vline(xintercept=threshold))
dev.off()
png(sprintf('%s_x_%s_perm_1000_log10pval_dist.png',pheno,env))
print(ggplot(minp,aes(x=-log10(pval))) + geom_histogram() + geom_vline(xintercept=-log10(threshold)))
dev.off()
|
/permutation/find_threshold.R
|
no_license
|
sarahodell/impute
|
R
| false | false | 1,378 |
r
|
#!/usr/bin/env Rscript
args=commandArgs(trailingOnly=T)
pheno=as.character(args[[1]])
env=as.character(args[[2]])
library('data.table')
library('dplyr')
library('ggplot2')
options(scipen=20)
all_reps=c()
for(c in 1:10){
haps=seq(2,16)
for(h in haps){
r=readRDS(sprintf('test_models/chr%.0f_haplogroup%.0f_%s_x_%s_1000rep_max_pvalues.rds',c,h,pheno,env))
df=c()
df=sapply(seq(1,1000),function(x) rbind(df,unlist(r[[x]])))
df=t(df)
df=as.data.frame(df)
names(df)=c('chr','hapgrp','replicate','pval')
#df$chr=c
#df$replicate=as.numeric(df$replicate)
df=df[!is.na(df$pval),]
#tmp=data.frame(chr=c,replicate=df$replicate,hapgrp=h,pval=df$pval,stringsAsFactors=F)
all_reps=rbind(all_reps,df)
}
}
fwrite(all_reps,sprintf('max_reps/%s_x_%s_rep1000_max_pvalues.txt',pheno,env),quote=F,row.names=F,sep='\t')
minp = all_reps %>% group_by(replicate) %>% summarize(pval=min(pval))
minp=as.data.frame(minp)
threshold=quantile(minp$pval,0.05,lower.tail=T)
print(threshold)
print(-log10(threshold))
png(sprintf('%s_x_%s_perm_1000_pval_dist.png',pheno,env))
print(ggplot(minp,aes(x=pval)) + geom_histogram() + geom_vline(xintercept=threshold))
dev.off()
png(sprintf('%s_x_%s_perm_1000_log10pval_dist.png',pheno,env))
print(ggplot(minp,aes(x=-log10(pval))) + geom_histogram() + geom_vline(xintercept=-log10(threshold)))
dev.off()
|
setwd('C:/Users/Jain/Desktop/R')
library(e1071)
dataset<-read.csv("list.csv")
train<-dataset[c(2:1500),c(1,5,7)]
train$Grade<-paste(train$Grade,train$Range,sep="")
train$Range<-NULL
test<-dataset[c(1501:3249),c(1,5,7)]
test$Grade<-paste(test$Grade,test$Range,sep="")
test$Range<-NULL
new2<-dataset[c(2:1500),1]
starttime2<-Sys.time()
sv1<-svm(new2 ~.,data=train,cost=100)
pred<-predict(sv1,test)
pred
en2<-Sys.time()
x2<-en2-starttime2
x2
sizesv<-object.size(pred)
sizesv
table(pred,test[,2])
train1<-train[c(2:200),]
test1<-test[c(2:200),]
new2<-dataset[c(2:200),1]
starttimesv1<-Sys.time()
sv1<-svm(new2 ~.,data=train1,cost=100)
pred<-predict(sv1,test1)
ensv1<-Sys.time()
xsv1<-ensv1-starttimesv1
sizesv1<-object.size(train1)
sizesv1
xsv1
train2<-train[c(2:400),]
test2<-test[c(2:400),]
new2<-dataset[c(2:400),1]
starttimesv2<-Sys.time()
sv1<-svm(new2 ~.,data=train2,cost=100)
pred<-predict(sv1,test2)
ensv2<-Sys.time()
xsv2<-ensv2-starttimesv2
sizesv2<-object.size(train2)
sizesv2
xsv2
train3<-train[c(2:800),]
test3<-test[c(2:800),]
new2<-dataset[c(2:800),1]
starttimesv3<-Sys.time()
sv1<-svm(new2 ~.,data=train3,cost=200)
pred<-predict(sv1,test3)
ensv3<-Sys.time()
xsv3<-ensv3-starttimesv3
sizesv3<-object.size(train3)
sizesv3
xsv3
train4<-train[c(2:1500),]
test4<-test[c(2:1500),]
new2<-dataset[c(2:1500),1]
starttimesv4<-Sys.time()
sv1<-svm(new2 ~.,data=train4,cost=100)
pred<-predict(sv1,test4)
ensv4<-Sys.time()
xsv4<-ensv4-starttimesv4
sizesv4<-object.size(train4)
sizesv4
xsv4
trialsv1<-matrix(c(xsv1,xsv2,xsv3,xsv4),ncol=4)
colnames(trialsv1)<-c(200,400,800,1500)
rownames(trialsv1)<-'TIME'
trialsv1.table<-as.table(trialsv1)
trialsvmem1<-matrix(c(sizesv1,sizesv2,sizesv3,sizesv4),ncol=4)
colnames(trialsvmem1)<-c(200,400,800,1500)
rownames(trialsvmem1)<-'MEMORY'
trialsvmem1.table<-as.table(trialsvmem1)
barplot(trialsv1.table, main="Comparison For SVM Algorithm", ylab="TIME in seconds",xlab="Number of Sequence")
barplot(trialsvmem1.table, main="Comparison For SVM Algorithm", ylab="MEMORY in Bytes",xlab="Number of Sequence")
|
/svmmarks.R
|
no_license
|
gargarchit10/knitadmission
|
R
| false | false | 2,058 |
r
|
setwd('C:/Users/Jain/Desktop/R')
library(e1071)
dataset<-read.csv("list.csv")
train<-dataset[c(2:1500),c(1,5,7)]
train$Grade<-paste(train$Grade,train$Range,sep="")
train$Range<-NULL
test<-dataset[c(1501:3249),c(1,5,7)]
test$Grade<-paste(test$Grade,test$Range,sep="")
test$Range<-NULL
new2<-dataset[c(2:1500),1]
starttime2<-Sys.time()
sv1<-svm(new2 ~.,data=train,cost=100)
pred<-predict(sv1,test)
pred
en2<-Sys.time()
x2<-en2-starttime2
x2
sizesv<-object.size(pred)
sizesv
table(pred,test[,2])
train1<-train[c(2:200),]
test1<-test[c(2:200),]
new2<-dataset[c(2:200),1]
starttimesv1<-Sys.time()
sv1<-svm(new2 ~.,data=train1,cost=100)
pred<-predict(sv1,test1)
ensv1<-Sys.time()
xsv1<-ensv1-starttimesv1
sizesv1<-object.size(train1)
sizesv1
xsv1
train2<-train[c(2:400),]
test2<-test[c(2:400),]
new2<-dataset[c(2:400),1]
starttimesv2<-Sys.time()
sv1<-svm(new2 ~.,data=train2,cost=100)
pred<-predict(sv1,test2)
ensv2<-Sys.time()
xsv2<-ensv2-starttimesv2
sizesv2<-object.size(train2)
sizesv2
xsv2
train3<-train[c(2:800),]
test3<-test[c(2:800),]
new2<-dataset[c(2:800),1]
starttimesv3<-Sys.time()
sv1<-svm(new2 ~.,data=train3,cost=200)
pred<-predict(sv1,test3)
ensv3<-Sys.time()
xsv3<-ensv3-starttimesv3
sizesv3<-object.size(train3)
sizesv3
xsv3
train4<-train[c(2:1500),]
test4<-test[c(2:1500),]
new2<-dataset[c(2:1500),1]
starttimesv4<-Sys.time()
sv1<-svm(new2 ~.,data=train4,cost=100)
pred<-predict(sv1,test4)
ensv4<-Sys.time()
xsv4<-ensv4-starttimesv4
sizesv4<-object.size(train4)
sizesv4
xsv4
trialsv1<-matrix(c(xsv1,xsv2,xsv3,xsv4),ncol=4)
colnames(trialsv1)<-c(200,400,800,1500)
rownames(trialsv1)<-'TIME'
trialsv1.table<-as.table(trialsv1)
trialsvmem1<-matrix(c(sizesv1,sizesv2,sizesv3,sizesv4),ncol=4)
colnames(trialsvmem1)<-c(200,400,800,1500)
rownames(trialsvmem1)<-'MEMORY'
trialsvmem1.table<-as.table(trialsvmem1)
barplot(trialsv1.table, main="Comparison For SVM Algorithm", ylab="TIME in seconds",xlab="Number of Sequence")
barplot(trialsvmem1.table, main="Comparison For SVM Algorithm", ylab="MEMORY in Bytes",xlab="Number of Sequence")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting.R
\name{plot.date.distribution}
\alias{plot.date.distribution}
\title{Plotting Date Distributions}
\usage{
plot.date.distribution(points.benchmarked)
}
\arguments{
\item{points.benchmarked}{Data frame. The output from \code{benchmark()}.}
}
\value{
A \code{ggplot} plot of the date distribution as a histogram.
}
\description{
Plotting Date Distributions
}
|
/man/plot.date.distribution.Rd
|
permissive
|
acelt/aim.analysis
|
R
| false | true | 445 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting.R
\name{plot.date.distribution}
\alias{plot.date.distribution}
\title{Plotting Date Distributions}
\usage{
plot.date.distribution(points.benchmarked)
}
\arguments{
\item{points.benchmarked}{Data frame. The output from \code{benchmark()}.}
}
\value{
A \code{ggplot} plot of the date distribution as a histogram.
}
\description{
Plotting Date Distributions
}
|
# read all the data
tlabels <- read.table("test/y_test.txt", col.names="label")
tsubjects <- read.table("test/subject_test.txt", col.names="subject")
tdata <- read.table("test/X_test.txt")
trlabels <- read.table("train/y_train.txt", col.names="label")
trsubjects <- read.table("train/subject_train.txt", col.names="subject")
trdata <- read.table("train/X_train.txt")
datatable <- rbind(cbind(tsubjects, tlabels, tdata), cbind(trsubjects, trlabels, trdata))
# read the features
features <- read.table("features.txt", strip.white=TRUE, stringsAsFactors=FALSE)
features.mean.std <- features[grep("mean\\(\\)|std\\(\\)", features$V2), ]
# increment by 2 because data has subjects and labels in the beginning
datatable.mean.std <- datatable[, c(1, 2, features.mean.std$V1+2)]
# read the labels (activities)
labels <- read.table("activity_labels.txt", stringsAsFactors=FALSE)
# replace labels in data with label names
datatable.mean.std$label <- labels[datatable.mean.std$label, 2]
# first make a list of the current column names and feature names
columnam <- c("subject", "label", features.mean.std$V2)
# removing every non-alphabetic character and converting to lowercase
columnam <- tolower(gsub("[^[:alpha:]]", "", columnam))
# then use the list as column names for data
colnames(datatable.mean.std) <- columnam
# find the mean for each combination of subject and label
avgdata <- aggregate(datatable.mean.std[, 3:ncol(datatable.mean.std)],
by=list(subject = datatable.mean.std$subject,
label = datatable.mean.std$label),
mean)
write.table(format(avgdata, scientific=T), "tidydata.txt",
row.names=F, col.names=F, quote=2)
|
/run_analysis.R
|
no_license
|
Hemant2022/getting-and-cleaning-data-
|
R
| false | false | 1,711 |
r
|
# read all the data
tlabels <- read.table("test/y_test.txt", col.names="label")
tsubjects <- read.table("test/subject_test.txt", col.names="subject")
tdata <- read.table("test/X_test.txt")
trlabels <- read.table("train/y_train.txt", col.names="label")
trsubjects <- read.table("train/subject_train.txt", col.names="subject")
trdata <- read.table("train/X_train.txt")
datatable <- rbind(cbind(tsubjects, tlabels, tdata), cbind(trsubjects, trlabels, trdata))
# read the features
features <- read.table("features.txt", strip.white=TRUE, stringsAsFactors=FALSE)
features.mean.std <- features[grep("mean\\(\\)|std\\(\\)", features$V2), ]
# increment by 2 because data has subjects and labels in the beginning
datatable.mean.std <- datatable[, c(1, 2, features.mean.std$V1+2)]
# read the labels (activities)
labels <- read.table("activity_labels.txt", stringsAsFactors=FALSE)
# replace labels in data with label names
datatable.mean.std$label <- labels[datatable.mean.std$label, 2]
# first make a list of the current column names and feature names
columnam <- c("subject", "label", features.mean.std$V2)
# removing every non-alphabetic character and converting to lowercase
columnam <- tolower(gsub("[^[:alpha:]]", "", columnam))
# then use the list as column names for data
colnames(datatable.mean.std) <- columnam
# find the mean for each combination of subject and label
avgdata <- aggregate(datatable.mean.std[, 3:ncol(datatable.mean.std)],
by=list(subject = datatable.mean.std$subject,
label = datatable.mean.std$label),
mean)
write.table(format(avgdata, scientific=T), "tidydata.txt",
row.names=F, col.names=F, quote=2)
|
setwd(dir = "~/PATH/TO/BMD_TURKEY")
#load data and normalize it
library(randomForest)
library(dplyr)
library(openxlsx)
#load data and normalize it
data <- read.csv("data/metabolome_cc.csv");
data2 <- data[,c(-1,-3:-9)]
metabolome_key <- data[,c(2,3,1,4,6,8)]
data2_t <- t(data2[,-1])
column_names <- (data2[,1])
colnames(data2_t) <- column_names
turkey_info <- read.table("data/BMD_design_cc.txt", header = TRUE)
turkey_info <- filter(turkey_info, location =="cc")
str(turkey_info)
turkey_info <- droplevels(turkey_info)
turkey_info <- turkey_info[match(row.names(data2_t), turkey_info$turkey),]
data2_t <- merge(turkey_info, data2_t, by.x = "turkey", by.y = 0)
data <- data2_t
str(data)
days <- unique(turkey_info$day)
trts <- c("sub", "ther")
#d<-7
#t<-"sub"
rm(rf_summary)
for(d in days){
print(d)
day_ctrl <- filter(data, day==d) %>%
filter(trt=="ctrl")
for(t in trts){
day_trt <- filter(data, day==d) %>%
filter(trt==t)
data_comp <- rbind(day_ctrl, day_trt)
data_comp1 <- data_comp[,c(-1:-3, -5:-9)]
data_comp1$group <- factor(data_comp1$group)
row.names(data_comp1) <- data_comp$turkey
rf_temp <- randomForest(x = data_comp1[, 2:ncol(data_comp1)], y = data_comp1[, 1], importance=TRUE, proximity=TRUE, ntree=5000)
png(paste0("output/d", d, t, "_metabolome_rf.png"), height=1800, width=1800, res = 300)
par(mfrow=c(2,1))
par(pty="s")
varImpPlot(rf_temp, type=1, pch=19, col=1, cex=.5, main="")
varImpPlot(rf_temp, type=2, pch=19, col=1, cex=.5, main="")
dev.off()
fo <- file(paste0("output/d", d, t, "_metabolome_rf.txt"), "w")
imp <- as.data.frame(importance(rf_temp))
write.table(imp, fo, sep="\t")
flush(fo)
close(fo)
colnames(imp)[1:2] <- c("ctrl", "trt")
imp$biochemical <- row.names(imp)
imp$comparison <- t
imp$day <- d
imp <- arrange(imp, desc(MeanDecreaseAccuracy))
imp_filtered <- filter(imp, MeanDecreaseAccuracy > 6)
if(!exists("rf_summary")){
rf_summary <- imp_filtered
} else {
rf_summary <- rbind(rf_summary, imp_filtered)
}
}
}
str(rf_summary$biochemical)
molecules <- read.xlsx("data/molecules_key.xlsx")
str(molecules)
rf_summary_key <- merge(molecules, rf_summary, by.x = "BIOCHEMICAL", by.y = "biochemical")
rf_summary_key$SUPER.PATHWAY <- factor(rf_summary_key$SUPER.PATHWAY)
rf_summary_key$BIOCHEMICAL <- factor(rf_summary_key$BIOCHEMICAL, levels = rf_summary_key$BIOCHEMICAL[order((rf_summary_key$day))], ordered = TRUE)
str(rf_summary_key)
#ggplot(complete(rf_summary_key, BIOCHEMICAL, comparison, day), aes(x = reorder(BIOCHEMICAL, -(day)), y = MeanDecreaseAccuracy, fill = comparison)) +
my_colors <- c(
'#1f78b4', '#33a02c','#fb9a99','#e31a1c',
'#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928',
"#CBD588", "#5F7FC7", "orange","#DA5724", "#508578", "#CD9BCD",
"#AD6F3B", "#673770","#D14285", "#652926", "#C84248",
"#8569D5", "#5E738F","#D1A33D", "#8A7C64", "#599861", "black"
)
ggplot(complete(rf_summary_key, BIOCHEMICAL, comparison, day), aes(x = BIOCHEMICAL, y = MeanDecreaseAccuracy, fill = comparison)) +
facet_grid(.~day) +
geom_bar(stat = "identity", position = "dodge") +
scale_fill_manual(values = my_colors) +
# Remove x axis title
#theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_blank()) +
ylim(c(0,11)) +
guides(fill = guide_legend(reverse = F, keywidth = .5, keyheight = .5, ncol = 1)) +
theme(legend.text=element_text(size=8)) +
#theme(legend.position="bottom") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 10)) +
coord_flip()
ggsave("output/rf_summary.png", height = 7, width = 6)
|
/scripts/randomForest_metabolome.R
|
no_license
|
john2929/bmd_turkey
|
R
| false | false | 3,687 |
r
|
setwd(dir = "~/PATH/TO/BMD_TURKEY")
#load data and normalize it
library(randomForest)
library(dplyr)
library(openxlsx)
#load data and normalize it
data <- read.csv("data/metabolome_cc.csv");
data2 <- data[,c(-1,-3:-9)]
metabolome_key <- data[,c(2,3,1,4,6,8)]
data2_t <- t(data2[,-1])
column_names <- (data2[,1])
colnames(data2_t) <- column_names
turkey_info <- read.table("data/BMD_design_cc.txt", header = TRUE)
turkey_info <- filter(turkey_info, location =="cc")
str(turkey_info)
turkey_info <- droplevels(turkey_info)
turkey_info <- turkey_info[match(row.names(data2_t), turkey_info$turkey),]
data2_t <- merge(turkey_info, data2_t, by.x = "turkey", by.y = 0)
data <- data2_t
str(data)
days <- unique(turkey_info$day)
trts <- c("sub", "ther")
#d<-7
#t<-"sub"
rm(rf_summary)
for(d in days){
print(d)
day_ctrl <- filter(data, day==d) %>%
filter(trt=="ctrl")
for(t in trts){
day_trt <- filter(data, day==d) %>%
filter(trt==t)
data_comp <- rbind(day_ctrl, day_trt)
data_comp1 <- data_comp[,c(-1:-3, -5:-9)]
data_comp1$group <- factor(data_comp1$group)
row.names(data_comp1) <- data_comp$turkey
rf_temp <- randomForest(x = data_comp1[, 2:ncol(data_comp1)], y = data_comp1[, 1], importance=TRUE, proximity=TRUE, ntree=5000)
png(paste0("output/d", d, t, "_metabolome_rf.png"), height=1800, width=1800, res = 300)
par(mfrow=c(2,1))
par(pty="s")
varImpPlot(rf_temp, type=1, pch=19, col=1, cex=.5, main="")
varImpPlot(rf_temp, type=2, pch=19, col=1, cex=.5, main="")
dev.off()
fo <- file(paste0("output/d", d, t, "_metabolome_rf.txt"), "w")
imp <- as.data.frame(importance(rf_temp))
write.table(imp, fo, sep="\t")
flush(fo)
close(fo)
colnames(imp)[1:2] <- c("ctrl", "trt")
imp$biochemical <- row.names(imp)
imp$comparison <- t
imp$day <- d
imp <- arrange(imp, desc(MeanDecreaseAccuracy))
imp_filtered <- filter(imp, MeanDecreaseAccuracy > 6)
if(!exists("rf_summary")){
rf_summary <- imp_filtered
} else {
rf_summary <- rbind(rf_summary, imp_filtered)
}
}
}
str(rf_summary$biochemical)
molecules <- read.xlsx("data/molecules_key.xlsx")
str(molecules)
rf_summary_key <- merge(molecules, rf_summary, by.x = "BIOCHEMICAL", by.y = "biochemical")
rf_summary_key$SUPER.PATHWAY <- factor(rf_summary_key$SUPER.PATHWAY)
rf_summary_key$BIOCHEMICAL <- factor(rf_summary_key$BIOCHEMICAL, levels = rf_summary_key$BIOCHEMICAL[order((rf_summary_key$day))], ordered = TRUE)
str(rf_summary_key)
#ggplot(complete(rf_summary_key, BIOCHEMICAL, comparison, day), aes(x = reorder(BIOCHEMICAL, -(day)), y = MeanDecreaseAccuracy, fill = comparison)) +
my_colors <- c(
'#1f78b4', '#33a02c','#fb9a99','#e31a1c',
'#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928',
"#CBD588", "#5F7FC7", "orange","#DA5724", "#508578", "#CD9BCD",
"#AD6F3B", "#673770","#D14285", "#652926", "#C84248",
"#8569D5", "#5E738F","#D1A33D", "#8A7C64", "#599861", "black"
)
ggplot(complete(rf_summary_key, BIOCHEMICAL, comparison, day), aes(x = BIOCHEMICAL, y = MeanDecreaseAccuracy, fill = comparison)) +
facet_grid(.~day) +
geom_bar(stat = "identity", position = "dodge") +
scale_fill_manual(values = my_colors) +
# Remove x axis title
#theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_blank()) +
ylim(c(0,11)) +
guides(fill = guide_legend(reverse = F, keywidth = .5, keyheight = .5, ncol = 1)) +
theme(legend.text=element_text(size=8)) +
#theme(legend.position="bottom") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 10)) +
coord_flip()
ggsave("output/rf_summary.png", height = 7, width = 6)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date_sequence.R
\name{seq_date_by_year}
\alias{seq_date_by_year}
\title{make sequence date data by year}
\usage{
seq_date_by_year(min, max)
}
\arguments{
\item{min}{Date}
\item{max}{Date}
}
\value{
vector of Date
}
\description{
make sequence date data by year.
}
\examples{
seq_date_by_year(as.Date("2000-01-01"),as.Date("2010-01-01"))
}
|
/man/seq_date_by_year.Rd
|
permissive
|
rea-osaka/retiex
|
R
| false | true | 418 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date_sequence.R
\name{seq_date_by_year}
\alias{seq_date_by_year}
\title{make sequence date data by year}
\usage{
seq_date_by_year(min, max)
}
\arguments{
\item{min}{Date}
\item{max}{Date}
}
\value{
vector of Date
}
\description{
make sequence date data by year.
}
\examples{
seq_date_by_year(as.Date("2000-01-01"),as.Date("2010-01-01"))
}
|
landscape_OCN <- function(OCN,
slope0=1,
zMin=0,
optimizeDZ=FALSE,
optimMethod="BFGS",
optimControl=list(maxit=100*length(OCN$FD$outlet), trace=1),
displayUpdates=0) {
if (!(displayUpdates %in% c(0,1,2))) {stop("Invalid displayUpdates")}
if (displayUpdates>0){message("Calculating lengths and slopes... \r", appendLF = FALSE)}
AvailableNodes <- setdiff(1:OCN$FD$nNodes,OCN$FD$outlet)
# calculate elevation gain through each pixel
Slope <- slope0*(OCN$FD$A/(OCN$FD$nNodes*OCN$cellsize^2))^(OCN$expEnergy-1)
Length <- rep(0,OCN$FD$nNodes)
kount <- 0
for (i in AvailableNodes){
Length[i] <- sqrt((abs(OCN$FD$X[OCN$FD$downNode[i]]-OCN$FD$X[i]) %% ((OCN$dimX-1)*OCN$cellsize-2*min(OCN$FD$X)))^2 +
(abs(OCN$FD$Y[OCN$FD$downNode[i]]-OCN$FD$Y[i]) %% ((OCN$dimY-1)*OCN$cellsize-2*min(OCN$FD$Y)))^2)
kount <- kount + 1
if (displayUpdates==2){message(sprintf("Calculating lengths and slopes... %.1f%%\r",kount/length(AvailableNodes)*100), appendLF = FALSE)}
}
DeltaZ <- Slope*Length
# build neighbouring nodes at FD level
# find list of possible neighbouring pixels
movement <- matrix(c(0,-1,-1,-1,0,1,1,1,1,1,0,-1,-1,-1,0,1),nrow=2,byrow=TRUE)
NeighbouringNodes <- vector("list", OCN$FD$nNodes)
cont_node <- 0
for (cc in 1:OCN$dimX) {
for (rr in 1:OCN$dimY) {
cont_node <- cont_node + 1
neigh_r <- rep(rr,8)+movement[1,]
neigh_c <- rep(cc,8)+movement[2,]
if (OCN$periodicBoundaries == TRUE){
neigh_r[neigh_r==0] <- OCN$dimY
neigh_c[neigh_c==0] <- OCN$dimX
neigh_r[neigh_r>OCN$dimY] <- 1
neigh_c[neigh_c>OCN$dimX] <- 1
}
NotAboundary <- neigh_r>0 & neigh_r<=OCN$dimY & neigh_c>0 & neigh_c<=OCN$dimX # only effective when periodicBoundaries=FALSE
NeighbouringNodes[[cont_node]] <- neigh_r[NotAboundary] + (neigh_c[NotAboundary]-1)*OCN$dimY
}
}
if (displayUpdates>0){message("Calculating lengths and slopes... 100%\n", appendLF = FALSE)}
# find elevation pattern with respect to main outlet
if (displayUpdates>0){message("Determining elevation... \r", appendLF = FALSE)}
kount <- 0
Z <- numeric(OCN$FD$nNodes)
FD_to_CM <- numeric(OCN$FD$nNodes)
CM_to_FD <- vector("list",OCN$nOutlet)
for (outlet in 1:length(OCN$FD$outlet)){
next_nodes <- OCN$FD$outlet[outlet]
FD_to_CM[OCN$FD$outlet[outlet]] <- outlet
CM_to_FD[[outlet]] <- OCN$FD$outlet[outlet]
while (length(next_nodes)>0) {
current_nodes <- next_nodes
kount <- kount + length(current_nodes)
next_nodes <- integer(0) # empty next_nodes
for (i in 1:length(current_nodes)){
node <- current_nodes[i]
neighbours <- which(OCN$FD$downNode==node)
Z[neighbours] <- Z[node] + DeltaZ[neighbours]
FD_to_CM[neighbours] <- outlet
CM_to_FD[[outlet]] <- c(CM_to_FD[[outlet]],neighbours)
next_nodes <- c(next_nodes,neighbours)
}
if (displayUpdates==2){message(sprintf("Determining elevation... %.1f%%\r",kount/OCN$FD$nNodes*100), appendLF = FALSE)}
}
}
# determine catchment area
A <- numeric(OCN$nOutlet)
for (i in 1:OCN$nOutlet){
A[i] <- sum(FD_to_CM==i)*OCN$cellsize^2
}
sortA <- sort(A,decreasing=TRUE,index.return=TRUE)
# adjacency matrix at catchment level
if (OCN$nOutlet>1){
#W_CM <- sparseMatrix(i=1,j=1,x=0,dims=c(OCN$nOutlet,OCN$nOutlet))
W_CM <- spam(0,OCN$nOutlet,OCN$nOutlet)
for (i in 1:OCN$nOutlet){
for (k in 1:length(CM_to_FD[[i]])){
ind <- CM_to_FD[[i]][k]
set <- NeighbouringNodes[[ind]]
NeighSubcatch <- FD_to_CM[set]
NeighSubcatch <- NeighSubcatch[!is.nan(NeighSubcatch)]
Border <- which(NeighSubcatch!=i)
if (length(Border)>0) {W_CM[i,unique(NeighSubcatch[Border])] <- 1}
}
}
}else {W_CM <- 0}
if (displayUpdates>0){message("Determining elevation... 100%\n", appendLF = FALSE)}
# find altitude of secondary outlets with respect to altitude of the main outlet
if (optimizeDZ==TRUE){
if (displayUpdates==1){message("Optimizing outlet elevations... \r", appendLF = FALSE)}
if (length(OCN$FD$outlet)>1){
if (optimControl$trace>0) {message("Optimizing outlet elevations...\n", appendLF = FALSE)}
CatchmentMat <- matrix(data=FD_to_CM,nrow=OCN$dimY,ncol=OCN$dimX)
# find border pixels between catchments
# BorderMat <- sparseMatrix(i=1,j=1,x=0,dims=c(OCN$FD$nNodes,OCN$FD$nNodes))
BorderMat <- spam(0,OCN$FD$nNodes,OCN$FD$nNodes)
ind <- matrix(0,1000*OCN$FD$nNodes,2)
k <- 1
for (i in 1:OCN$FD$nNodes){
NeighCatch <- FD_to_CM[NeighbouringNodes[[i]]]
isBorder <- (NeighCatch!=FD_to_CM[i])
len <- length(NeighCatch[isBorder])
if (len>0){
# BorderMat[i,NeighbouringNodes[[i]][isBorder]] <- 1
if ((k+len-1) <= dim(ind)[1]){
ind[k:(k+len-1),] <- matrix(c(rep(i,len),NeighbouringNodes[[i]][isBorder]),nrow=len,ncol=2)
} else {ind <- rbind(ind,matrix(c(rep(i,len),NeighbouringNodes[[i]][isBorder]),nrow=len,ncol=2))}
k <- k + len
}
}
ind <- ind[-which(ind[,1]==0),]
BorderMat[ind] <- 1
# function for minimization of delta Z at the catchment borders
OptimizeDeltaZ <- function(x) {
UpdateZ <- rep(0,OCN$FD$nNodes)
# the elevation of the biggest catchment is not changed
for (i in 1:(length(OCN$FD$outlet)-1)){
UpdateZ <- UpdateZ + (FD_to_CM==(sortA$ix[i+1]))*x[i]}
Znew <- Z+UpdateZ
# Znew <- Znew %*% t(1+numeric(length(Z)))
mat <- BorderMat
mat@entries <- mat@entries*rep(Znew, diff(mat@rowpointers))
sum(abs(mat - t(mat) )) # functional to be minimized
}
# use Nelder-Mead solver for minimization of OptimizeDeltaZ
OptList <- optim(rep(0,length(OCN$FD$outlet)-1),OptimizeDeltaZ,method=optimMethod,
control=optimControl)
Z_lifts <- OptList$par
# apply lifting to catchments
UpdateZ <- rep(0,OCN$FD$nNodes)
for (i in 1:(length(OCN$FD$outlet)-1)){
UpdateZ <- UpdateZ + (FD_to_CM==(sortA$ix[i+1]))*Z_lifts[i]}
Z <- Z+UpdateZ
if (min(Z_lifts)<0) {
Z <- Z - min(Z_lifts)
#print(sprintf("Outlet of main catchment has been lifted by %.2f elevation units",- min(Z_lifts)))
}
}
if (displayUpdates>0){message("Optimizing outlet elevations... 100%\n", appendLF = FALSE)}
}
Z <- Z + zMin
# exact drawing for reflecting boundaries networks
X_draw <- OCN$FD$X # new vector of X coordinates
Y_draw <- OCN$FD$Y # new vector of Y coordinates
if(OCN$periodicBoundaries==TRUE){
if (displayUpdates>0){message("Calculating real X, Y coordinates... \r", appendLF = FALSE)}
kount <- 0
CurrentPath <- OCN$FD$outlet # start reattributing coordinates from outlet(s)
while (length(CurrentPath)>0){ # iterate until all pixels have been explored
ContinuePath <- vector(mode="numeric", length=0) # create empty set of pixels upstream of CurrentPath
for (k in 1:length(CurrentPath)){
UpOneLevel <- which(OCN$FD$downNode==CurrentPath[k]) # find pixels upstream of CurrentPath
if (length(UpOneLevel)>0){ # if CurrentPath[k] is not a headwater, continue
for (i in 1:length(UpOneLevel)){
# reattribute X coordinate of UpOneLevel[i]
if (X_draw[UpOneLevel[i]]-X_draw[CurrentPath[k]] > OCN$cellsize){
X_draw[UpOneLevel[i]] <- X_draw[UpOneLevel[i]] -
OCN$cellsize*OCN$dimX*(1 + floor((X_draw[UpOneLevel[i]] - X_draw[CurrentPath[k]] - 2*OCN$cellsize)/(OCN$dimX*OCN$cellsize)))
# the factor floor(...) is added to adjust for pixels that are flipped several times
} else if (X_draw[UpOneLevel[i]]-X_draw[CurrentPath[k]] < -OCN$cellsize) {
X_draw[UpOneLevel[i]] <- X_draw[UpOneLevel[i]] +
OCN$cellsize*OCN$dimX*(1+floor((X_draw[CurrentPath[k]] - X_draw[UpOneLevel[i]] - 2*OCN$cellsize)/(OCN$dimX*OCN$cellsize)))
}
# reattribute Y coordinate of UpOneLevel[i]
if (Y_draw[UpOneLevel[i]]-Y_draw[CurrentPath[k]] > OCN$cellsize){
Y_draw[UpOneLevel[i]] <- Y_draw[UpOneLevel[i]] -
OCN$cellsize*OCN$dimY*(1+floor((Y_draw[UpOneLevel[i]] - Y_draw[CurrentPath[k]] - 2*OCN$cellsize)/(OCN$dimY*OCN$cellsize)))
} else if (Y_draw[UpOneLevel[i]]-Y_draw[CurrentPath[k]] < -OCN$cellsize) {
Y_draw[UpOneLevel[i]] <- Y_draw[UpOneLevel[i]] +
OCN$cellsize*OCN$dimY*(1+floor((Y_draw[CurrentPath[k]] - Y_draw[UpOneLevel[i]] - 2*OCN$cellsize)/(OCN$dimY*OCN$cellsize)))
}
}
}
ContinuePath <- c(ContinuePath,UpOneLevel) # add UpOneLevel to set of pixels that will be explored on the next iteration
}
CurrentPath <- ContinuePath # move to next iteration
kount <- kount + length(CurrentPath)
if (displayUpdates==2){message(sprintf("Calculating real X, Y coordinates... %.1f%%\r",kount/OCN$FD$nNodes*100), appendLF = FALSE)}
}
}
if (displayUpdates>0){message("Calculating real X, Y coordinates... 100%\n", appendLF = FALSE)}
if (displayUpdates>0){message("Calculating catchment contour(s)... ", appendLF = FALSE)}
# determine contour of catchments (with original coordinates)
X_contour <- vector("list", length = OCN$nOutlet)
Y_contour <- vector("list", length = OCN$nOutlet)
kount <- 0
for (j in 1:OCN$nOutlet){
subset_X <- OCN$FD$X[FD_to_CM==j]
subset_Y <- OCN$FD$Y[FD_to_CM==j]
X_mesh <- seq(min(subset_X)-OCN$cellsize,max(subset_X)+OCN$cellsize,OCN$cellsize)
Y_mesh <- seq(min(subset_Y)-OCN$cellsize,max(subset_Y)+OCN$cellsize,OCN$cellsize)
mesh <- matrix(data=0,nrow=length(Y_mesh),ncol=length(X_mesh))
for (i in 1:OCN$FD$nNodes){
ind_x <- which(X_mesh==subset_X[i])
ind_y <- which(Y_mesh==subset_Y[i])
mesh[ind_y,ind_x] <- 1
}
count <- contourLines(X_mesh,Y_mesh,t(mesh),levels=1)
X_contour[[j]] <- vector("list", length = length(count))
Y_contour[[j]] <- vector("list", length = length(count))
for (k in 1:length(count)){
X_contour[[j]][[k]] <- count[[k]]$x
Y_contour[[j]][[k]] <- count[[k]]$y
}
}
# determine contour of catchments (for real-shaped OCN)
X_contour_draw <- vector("list", length = OCN$nOutlet)
Y_contour_draw <- vector("list", length = OCN$nOutlet)
for (j in 1:OCN$nOutlet){
subset_X <- X_draw[FD_to_CM==j]
subset_Y <- Y_draw[FD_to_CM==j]
X_mesh <- seq(min(subset_X)-OCN$cellsize,max(subset_X)+OCN$cellsize,OCN$cellsize)
Y_mesh <- seq(min(subset_Y)-OCN$cellsize,max(subset_Y)+OCN$cellsize,OCN$cellsize)
mesh <- matrix(data=0,nrow=length(Y_mesh),ncol=length(X_mesh))
for (i in 1:OCN$FD$nNodes){
ind_x <- which(X_mesh==subset_X[i])
ind_y <- which(Y_mesh==subset_Y[i])
mesh[ind_y,ind_x] <- 1
}
count <- contourLines(X_mesh,Y_mesh,t(mesh),levels=1)
X_contour_draw[[j]] <- vector("list", length = length(count))
Y_contour_draw[[j]] <- vector("list", length = length(count))
for (k in 1:length(count)){
X_contour_draw[[j]][[k]] <- count[[k]]$x
Y_contour_draw[[j]][[k]] <- count[[k]]$y
}
}
if (displayUpdates>0){message(" 100%\n", appendLF = FALSE)}
# add results to OCN list
OCN$FD[["slope"]] <- Slope
OCN$FD[["leng"]] <- Length
OCN$FD[["toCM"]] <- FD_to_CM
OCN$FD[["XDraw"]] <- X_draw
OCN$FD[["YDraw"]] <- Y_draw
OCN$FD[["Z"]] <- Z
OCN$CM[["A"]] <- A
OCN$CM[["W"]] <- W_CM
OCN$CM[["XContour"]] <- X_contour
OCN$CM[["YContour"]] <- Y_contour
OCN$CM[["XContourDraw"]] <- X_contour_draw
OCN$CM[["YContourDraw"]] <- Y_contour_draw
OCN$slope0 <- slope0
OCN$zMin <- zMin
if (optimizeDZ==TRUE) {OCN$optList <- OptList}
invisible(OCN)
}
|
/R/landscape_OCN.R
|
no_license
|
jsta/OCNet
|
R
| false | false | 12,387 |
r
|
landscape_OCN <- function(OCN,
slope0=1,
zMin=0,
optimizeDZ=FALSE,
optimMethod="BFGS",
optimControl=list(maxit=100*length(OCN$FD$outlet), trace=1),
displayUpdates=0) {
if (!(displayUpdates %in% c(0,1,2))) {stop("Invalid displayUpdates")}
if (displayUpdates>0){message("Calculating lengths and slopes... \r", appendLF = FALSE)}
AvailableNodes <- setdiff(1:OCN$FD$nNodes,OCN$FD$outlet)
# calculate elevation gain through each pixel
Slope <- slope0*(OCN$FD$A/(OCN$FD$nNodes*OCN$cellsize^2))^(OCN$expEnergy-1)
Length <- rep(0,OCN$FD$nNodes)
kount <- 0
for (i in AvailableNodes){
Length[i] <- sqrt((abs(OCN$FD$X[OCN$FD$downNode[i]]-OCN$FD$X[i]) %% ((OCN$dimX-1)*OCN$cellsize-2*min(OCN$FD$X)))^2 +
(abs(OCN$FD$Y[OCN$FD$downNode[i]]-OCN$FD$Y[i]) %% ((OCN$dimY-1)*OCN$cellsize-2*min(OCN$FD$Y)))^2)
kount <- kount + 1
if (displayUpdates==2){message(sprintf("Calculating lengths and slopes... %.1f%%\r",kount/length(AvailableNodes)*100), appendLF = FALSE)}
}
DeltaZ <- Slope*Length
# build neighbouring nodes at FD level
# find list of possible neighbouring pixels
movement <- matrix(c(0,-1,-1,-1,0,1,1,1,1,1,0,-1,-1,-1,0,1),nrow=2,byrow=TRUE)
NeighbouringNodes <- vector("list", OCN$FD$nNodes)
cont_node <- 0
for (cc in 1:OCN$dimX) {
for (rr in 1:OCN$dimY) {
cont_node <- cont_node + 1
neigh_r <- rep(rr,8)+movement[1,]
neigh_c <- rep(cc,8)+movement[2,]
if (OCN$periodicBoundaries == TRUE){
neigh_r[neigh_r==0] <- OCN$dimY
neigh_c[neigh_c==0] <- OCN$dimX
neigh_r[neigh_r>OCN$dimY] <- 1
neigh_c[neigh_c>OCN$dimX] <- 1
}
NotAboundary <- neigh_r>0 & neigh_r<=OCN$dimY & neigh_c>0 & neigh_c<=OCN$dimX # only effective when periodicBoundaries=FALSE
NeighbouringNodes[[cont_node]] <- neigh_r[NotAboundary] + (neigh_c[NotAboundary]-1)*OCN$dimY
}
}
if (displayUpdates>0){message("Calculating lengths and slopes... 100%\n", appendLF = FALSE)}
# find elevation pattern with respect to main outlet
if (displayUpdates>0){message("Determining elevation... \r", appendLF = FALSE)}
kount <- 0
Z <- numeric(OCN$FD$nNodes)
FD_to_CM <- numeric(OCN$FD$nNodes)
CM_to_FD <- vector("list",OCN$nOutlet)
for (outlet in 1:length(OCN$FD$outlet)){
next_nodes <- OCN$FD$outlet[outlet]
FD_to_CM[OCN$FD$outlet[outlet]] <- outlet
CM_to_FD[[outlet]] <- OCN$FD$outlet[outlet]
while (length(next_nodes)>0) {
current_nodes <- next_nodes
kount <- kount + length(current_nodes)
next_nodes <- integer(0) # empty next_nodes
for (i in 1:length(current_nodes)){
node <- current_nodes[i]
neighbours <- which(OCN$FD$downNode==node)
Z[neighbours] <- Z[node] + DeltaZ[neighbours]
FD_to_CM[neighbours] <- outlet
CM_to_FD[[outlet]] <- c(CM_to_FD[[outlet]],neighbours)
next_nodes <- c(next_nodes,neighbours)
}
if (displayUpdates==2){message(sprintf("Determining elevation... %.1f%%\r",kount/OCN$FD$nNodes*100), appendLF = FALSE)}
}
}
# determine catchment area
A <- numeric(OCN$nOutlet)
for (i in 1:OCN$nOutlet){
A[i] <- sum(FD_to_CM==i)*OCN$cellsize^2
}
sortA <- sort(A,decreasing=TRUE,index.return=TRUE)
# adjacency matrix at catchment level
if (OCN$nOutlet>1){
#W_CM <- sparseMatrix(i=1,j=1,x=0,dims=c(OCN$nOutlet,OCN$nOutlet))
W_CM <- spam(0,OCN$nOutlet,OCN$nOutlet)
for (i in 1:OCN$nOutlet){
for (k in 1:length(CM_to_FD[[i]])){
ind <- CM_to_FD[[i]][k]
set <- NeighbouringNodes[[ind]]
NeighSubcatch <- FD_to_CM[set]
NeighSubcatch <- NeighSubcatch[!is.nan(NeighSubcatch)]
Border <- which(NeighSubcatch!=i)
if (length(Border)>0) {W_CM[i,unique(NeighSubcatch[Border])] <- 1}
}
}
}else {W_CM <- 0}
if (displayUpdates>0){message("Determining elevation... 100%\n", appendLF = FALSE)}
# find altitude of secondary outlets with respect to altitude of the main outlet
if (optimizeDZ==TRUE){
if (displayUpdates==1){message("Optimizing outlet elevations... \r", appendLF = FALSE)}
if (length(OCN$FD$outlet)>1){
if (optimControl$trace>0) {message("Optimizing outlet elevations...\n", appendLF = FALSE)}
CatchmentMat <- matrix(data=FD_to_CM,nrow=OCN$dimY,ncol=OCN$dimX)
# find border pixels between catchments
# BorderMat <- sparseMatrix(i=1,j=1,x=0,dims=c(OCN$FD$nNodes,OCN$FD$nNodes))
BorderMat <- spam(0,OCN$FD$nNodes,OCN$FD$nNodes)
ind <- matrix(0,1000*OCN$FD$nNodes,2)
k <- 1
for (i in 1:OCN$FD$nNodes){
NeighCatch <- FD_to_CM[NeighbouringNodes[[i]]]
isBorder <- (NeighCatch!=FD_to_CM[i])
len <- length(NeighCatch[isBorder])
if (len>0){
# BorderMat[i,NeighbouringNodes[[i]][isBorder]] <- 1
if ((k+len-1) <= dim(ind)[1]){
ind[k:(k+len-1),] <- matrix(c(rep(i,len),NeighbouringNodes[[i]][isBorder]),nrow=len,ncol=2)
} else {ind <- rbind(ind,matrix(c(rep(i,len),NeighbouringNodes[[i]][isBorder]),nrow=len,ncol=2))}
k <- k + len
}
}
ind <- ind[-which(ind[,1]==0),]
BorderMat[ind] <- 1
# function for minimization of delta Z at the catchment borders
OptimizeDeltaZ <- function(x) {
UpdateZ <- rep(0,OCN$FD$nNodes)
# the elevation of the biggest catchment is not changed
for (i in 1:(length(OCN$FD$outlet)-1)){
UpdateZ <- UpdateZ + (FD_to_CM==(sortA$ix[i+1]))*x[i]}
Znew <- Z+UpdateZ
# Znew <- Znew %*% t(1+numeric(length(Z)))
mat <- BorderMat
mat@entries <- mat@entries*rep(Znew, diff(mat@rowpointers))
sum(abs(mat - t(mat) )) # functional to be minimized
}
# use Nelder-Mead solver for minimization of OptimizeDeltaZ
OptList <- optim(rep(0,length(OCN$FD$outlet)-1),OptimizeDeltaZ,method=optimMethod,
control=optimControl)
Z_lifts <- OptList$par
# apply lifting to catchments
UpdateZ <- rep(0,OCN$FD$nNodes)
for (i in 1:(length(OCN$FD$outlet)-1)){
UpdateZ <- UpdateZ + (FD_to_CM==(sortA$ix[i+1]))*Z_lifts[i]}
Z <- Z+UpdateZ
if (min(Z_lifts)<0) {
Z <- Z - min(Z_lifts)
#print(sprintf("Outlet of main catchment has been lifted by %.2f elevation units",- min(Z_lifts)))
}
}
if (displayUpdates>0){message("Optimizing outlet elevations... 100%\n", appendLF = FALSE)}
}
Z <- Z + zMin
# exact drawing for reflecting boundaries networks
X_draw <- OCN$FD$X # new vector of X coordinates
Y_draw <- OCN$FD$Y # new vector of Y coordinates
if(OCN$periodicBoundaries==TRUE){
if (displayUpdates>0){message("Calculating real X, Y coordinates... \r", appendLF = FALSE)}
kount <- 0
CurrentPath <- OCN$FD$outlet # start reattributing coordinates from outlet(s)
while (length(CurrentPath)>0){ # iterate until all pixels have been explored
ContinuePath <- vector(mode="numeric", length=0) # create empty set of pixels upstream of CurrentPath
for (k in 1:length(CurrentPath)){
UpOneLevel <- which(OCN$FD$downNode==CurrentPath[k]) # find pixels upstream of CurrentPath
if (length(UpOneLevel)>0){ # if CurrentPath[k] is not a headwater, continue
for (i in 1:length(UpOneLevel)){
# reattribute X coordinate of UpOneLevel[i]
if (X_draw[UpOneLevel[i]]-X_draw[CurrentPath[k]] > OCN$cellsize){
X_draw[UpOneLevel[i]] <- X_draw[UpOneLevel[i]] -
OCN$cellsize*OCN$dimX*(1 + floor((X_draw[UpOneLevel[i]] - X_draw[CurrentPath[k]] - 2*OCN$cellsize)/(OCN$dimX*OCN$cellsize)))
# the factor floor(...) is added to adjust for pixels that are flipped several times
} else if (X_draw[UpOneLevel[i]]-X_draw[CurrentPath[k]] < -OCN$cellsize) {
X_draw[UpOneLevel[i]] <- X_draw[UpOneLevel[i]] +
OCN$cellsize*OCN$dimX*(1+floor((X_draw[CurrentPath[k]] - X_draw[UpOneLevel[i]] - 2*OCN$cellsize)/(OCN$dimX*OCN$cellsize)))
}
# reattribute Y coordinate of UpOneLevel[i]
if (Y_draw[UpOneLevel[i]]-Y_draw[CurrentPath[k]] > OCN$cellsize){
Y_draw[UpOneLevel[i]] <- Y_draw[UpOneLevel[i]] -
OCN$cellsize*OCN$dimY*(1+floor((Y_draw[UpOneLevel[i]] - Y_draw[CurrentPath[k]] - 2*OCN$cellsize)/(OCN$dimY*OCN$cellsize)))
} else if (Y_draw[UpOneLevel[i]]-Y_draw[CurrentPath[k]] < -OCN$cellsize) {
Y_draw[UpOneLevel[i]] <- Y_draw[UpOneLevel[i]] +
OCN$cellsize*OCN$dimY*(1+floor((Y_draw[CurrentPath[k]] - Y_draw[UpOneLevel[i]] - 2*OCN$cellsize)/(OCN$dimY*OCN$cellsize)))
}
}
}
ContinuePath <- c(ContinuePath,UpOneLevel) # add UpOneLevel to set of pixels that will be explored on the next iteration
}
CurrentPath <- ContinuePath # move to next iteration
kount <- kount + length(CurrentPath)
if (displayUpdates==2){message(sprintf("Calculating real X, Y coordinates... %.1f%%\r",kount/OCN$FD$nNodes*100), appendLF = FALSE)}
}
}
if (displayUpdates>0){message("Calculating real X, Y coordinates... 100%\n", appendLF = FALSE)}
if (displayUpdates>0){message("Calculating catchment contour(s)... ", appendLF = FALSE)}
# determine contour of catchments (with original coordinates)
X_contour <- vector("list", length = OCN$nOutlet)
Y_contour <- vector("list", length = OCN$nOutlet)
kount <- 0
for (j in 1:OCN$nOutlet){
subset_X <- OCN$FD$X[FD_to_CM==j]
subset_Y <- OCN$FD$Y[FD_to_CM==j]
X_mesh <- seq(min(subset_X)-OCN$cellsize,max(subset_X)+OCN$cellsize,OCN$cellsize)
Y_mesh <- seq(min(subset_Y)-OCN$cellsize,max(subset_Y)+OCN$cellsize,OCN$cellsize)
mesh <- matrix(data=0,nrow=length(Y_mesh),ncol=length(X_mesh))
for (i in 1:OCN$FD$nNodes){
ind_x <- which(X_mesh==subset_X[i])
ind_y <- which(Y_mesh==subset_Y[i])
mesh[ind_y,ind_x] <- 1
}
count <- contourLines(X_mesh,Y_mesh,t(mesh),levels=1)
X_contour[[j]] <- vector("list", length = length(count))
Y_contour[[j]] <- vector("list", length = length(count))
for (k in 1:length(count)){
X_contour[[j]][[k]] <- count[[k]]$x
Y_contour[[j]][[k]] <- count[[k]]$y
}
}
# determine contour of catchments (for real-shaped OCN)
X_contour_draw <- vector("list", length = OCN$nOutlet)
Y_contour_draw <- vector("list", length = OCN$nOutlet)
for (j in 1:OCN$nOutlet){
subset_X <- X_draw[FD_to_CM==j]
subset_Y <- Y_draw[FD_to_CM==j]
X_mesh <- seq(min(subset_X)-OCN$cellsize,max(subset_X)+OCN$cellsize,OCN$cellsize)
Y_mesh <- seq(min(subset_Y)-OCN$cellsize,max(subset_Y)+OCN$cellsize,OCN$cellsize)
mesh <- matrix(data=0,nrow=length(Y_mesh),ncol=length(X_mesh))
for (i in 1:OCN$FD$nNodes){
ind_x <- which(X_mesh==subset_X[i])
ind_y <- which(Y_mesh==subset_Y[i])
mesh[ind_y,ind_x] <- 1
}
count <- contourLines(X_mesh,Y_mesh,t(mesh),levels=1)
X_contour_draw[[j]] <- vector("list", length = length(count))
Y_contour_draw[[j]] <- vector("list", length = length(count))
for (k in 1:length(count)){
X_contour_draw[[j]][[k]] <- count[[k]]$x
Y_contour_draw[[j]][[k]] <- count[[k]]$y
}
}
if (displayUpdates>0){message(" 100%\n", appendLF = FALSE)}
# add results to OCN list
OCN$FD[["slope"]] <- Slope
OCN$FD[["leng"]] <- Length
OCN$FD[["toCM"]] <- FD_to_CM
OCN$FD[["XDraw"]] <- X_draw
OCN$FD[["YDraw"]] <- Y_draw
OCN$FD[["Z"]] <- Z
OCN$CM[["A"]] <- A
OCN$CM[["W"]] <- W_CM
OCN$CM[["XContour"]] <- X_contour
OCN$CM[["YContour"]] <- Y_contour
OCN$CM[["XContourDraw"]] <- X_contour_draw
OCN$CM[["YContourDraw"]] <- Y_contour_draw
OCN$slope0 <- slope0
OCN$zMin <- zMin
if (optimizeDZ==TRUE) {OCN$optList <- OptList}
invisible(OCN)
}
|
#2.1.determine cells of dataframe within growing season Tmin
setwd(species.dir)
files=list.files()
files.of.interest=grep('tmin', files, value=TRUE)
load(file=files.of.interest)
tout=NULL
tout = tmindb; tout[,] = NA
for (ii in 1:12) {
cols.of.interest = 1+(ii:(ii+gmin)%%12)
tout[,ii] = rowSums(tmindb[,cols.of.interest],na.rm=TRUE)
}
save(tout, file=paste(species.dir, 'gmin.', files.of.interest, sep=''))
#2.2.determine cells of dataframe within growing season Tmax
files=list.files()
files.of.interest=grep('tmax', files, value=TRUE)
load(file=files.of.interest)
tout=NULL
tout = tmaxdb; tout[,] = NA
for (ii in 1:12) {
cols.of.interest = 1+(ii:(ii+gmin)%%12)
tout[,ii] = rowSums(tmaxdb[,cols.of.interest],na.rm=TRUE)
}
save(tout, file=paste(species.dir, 'gmin.', files.of.interest, sep=''))
#3.cellsuitability by growing season
files=list.files()
files.of.interest=grep('gmin',files, value=TRUE)
tout=NULL
for (tfile in files.of.interest) { cat(tfile,'\n')
load(tfile)
for (ii in 1:12) {
tout[which(tout[,ii]<gmin),ii] = 0
tout[which(tout[,ii]>=gmin),ii] = 1
}
save(tout, file=paste(species.dir, 'true.', tfile, sep=''))
}
#4.for one row/col, find max (has this pos ever been suitable for gmin?)
pos.tmin = pos
pos.tmax = pos
files=list.files()
files.of.interest=grep('true',files, value=TRUE)
tmin=grep('tmin',files.of.interest,value=TRUE)
tmax=grep('tmax',files.of.interest,value=TRUE)
#4.1 tmin
load(tmin)
pos.tmin$current=apply(tout,1,max)
save(pos.tmin, file=paste(species.dir,species,'.tmin.rData', sep=''))
#4.2.tmax
load(tmax)
tt=NULL
pos.tmax$current=apply(tout,1,max)
save(pos.tmax, file=paste(species.dir,species,'.tmax.rData', sep=''))
#5.positions suitable both for min and max temp
copy.tmin=pos.tmin
copy.tmax=pos.tmax
pos.temp = pos
pos.temp$total.temp= copy.tmin$current+ copy.tmax$current
pos.temp$total.temp[which(pos.temp$total.temp<2)]=0
pos.temp$total.temp[which(pos.temp$total.temp==2)]=1
|
/ecocrop/refining/current.lessthan_twelve.r
|
no_license
|
LaurenHodgson/Projects
|
R
| false | false | 1,944 |
r
|
#2.1.determine cells of dataframe within growing season Tmin
setwd(species.dir)
files=list.files()
files.of.interest=grep('tmin', files, value=TRUE)
load(file=files.of.interest)
tout=NULL
tout = tmindb; tout[,] = NA
for (ii in 1:12) {
cols.of.interest = 1+(ii:(ii+gmin)%%12)
tout[,ii] = rowSums(tmindb[,cols.of.interest],na.rm=TRUE)
}
save(tout, file=paste(species.dir, 'gmin.', files.of.interest, sep=''))
#2.2.determine cells of dataframe within growing season Tmax
files=list.files()
files.of.interest=grep('tmax', files, value=TRUE)
load(file=files.of.interest)
tout=NULL
tout = tmaxdb; tout[,] = NA
for (ii in 1:12) {
cols.of.interest = 1+(ii:(ii+gmin)%%12)
tout[,ii] = rowSums(tmaxdb[,cols.of.interest],na.rm=TRUE)
}
save(tout, file=paste(species.dir, 'gmin.', files.of.interest, sep=''))
#3.cellsuitability by growing season
files=list.files()
files.of.interest=grep('gmin',files, value=TRUE)
tout=NULL
for (tfile in files.of.interest) { cat(tfile,'\n')
load(tfile)
for (ii in 1:12) {
tout[which(tout[,ii]<gmin),ii] = 0
tout[which(tout[,ii]>=gmin),ii] = 1
}
save(tout, file=paste(species.dir, 'true.', tfile, sep=''))
}
#4.for one row/col, find max (has this pos ever been suitable for gmin?)
pos.tmin = pos
pos.tmax = pos
files=list.files()
files.of.interest=grep('true',files, value=TRUE)
tmin=grep('tmin',files.of.interest,value=TRUE)
tmax=grep('tmax',files.of.interest,value=TRUE)
#4.1 tmin
load(tmin)
pos.tmin$current=apply(tout,1,max)
save(pos.tmin, file=paste(species.dir,species,'.tmin.rData', sep=''))
#4.2.tmax
load(tmax)
tt=NULL
pos.tmax$current=apply(tout,1,max)
save(pos.tmax, file=paste(species.dir,species,'.tmax.rData', sep=''))
#5.positions suitable both for min and max temp
copy.tmin=pos.tmin
copy.tmax=pos.tmax
pos.temp = pos
pos.temp$total.temp= copy.tmin$current+ copy.tmax$current
pos.temp$total.temp[which(pos.temp$total.temp<2)]=0
pos.temp$total.temp[which(pos.temp$total.temp==2)]=1
|
context('GetDeepComps')
set_zillow_web_service_id('X1-ZWz181enkd4cgb_82rpe')
zapi_key = getOption('ZillowR-zws_id')
test_that(" provide the correct zip code", {
# expect an error due to incorrect zip code
expect_error(GetDeepComps(abc, count=10, rentzestimate=FALSE, api_key=zapi_key,raw=FALSE))
# expect an error due to incorrect zip code
expect_error(GetDeepComps(a1b2c3, count=10, rentzestimate=FALSE, api_key=zapi_key,raw=FALSE))
})
test_that(" provide the correct count code", {
# expect an error due to incorrect count code
expect_error(GetDeepComps('1341571', count=abc, rentzestimate=TRUE, api_key=zapi_key,raw=FALSE))
# expect an error due to incorrect count code
expect_error(GetDeepComps('1341571', count=a1b2c3, rentzestimate=TRUE, api_key=zapi_key,raw=FALSE))
})
test_that(" do not include a rentzestimate", {
# expect an error due to incorrect rentzestimate
expect_error(GetDeepComps('1341571', count=10, rentzestimate=abs, api_key=zapi_key,raw=FALSE))
# expect an error due to incorrect zip code
expect_error(GetDeepComps('1341571', count=10, rentzestimate=a1b2c3, api_key=zapi_key,raw=FALSE))
})
test_that(" provide the correct api key", {
# expect an error due to incorrect api key
expect_error(GetDeepComps('1341571', count=10, rentzestimate=FALSE, api_key=abc,raw=FALSE))
# expect an error due to incorrect api key
expect_error(GetDeepComps('1341571', count=10, rentzestimate=FALSE, api_key=a1b2c3,raw=FALSE))
})
test_that(" output is a dataframe", {
# expect data frame
expect_s3_class(GetDeepComps('1341571', count=10, rentzestimate=FALSE, api_key=zapi_key,raw=FALSE), "data.frame")
})
|
/R/tests/testthat/test-GetDeepComps.R
|
no_license
|
stharms/realEstAnalytics.r
|
R
| false | false | 1,664 |
r
|
context('GetDeepComps')
set_zillow_web_service_id('X1-ZWz181enkd4cgb_82rpe')
zapi_key = getOption('ZillowR-zws_id')
test_that(" provide the correct zip code", {
# expect an error due to incorrect zip code
expect_error(GetDeepComps(abc, count=10, rentzestimate=FALSE, api_key=zapi_key,raw=FALSE))
# expect an error due to incorrect zip code
expect_error(GetDeepComps(a1b2c3, count=10, rentzestimate=FALSE, api_key=zapi_key,raw=FALSE))
})
test_that(" provide the correct count code", {
# expect an error due to incorrect count code
expect_error(GetDeepComps('1341571', count=abc, rentzestimate=TRUE, api_key=zapi_key,raw=FALSE))
# expect an error due to incorrect count code
expect_error(GetDeepComps('1341571', count=a1b2c3, rentzestimate=TRUE, api_key=zapi_key,raw=FALSE))
})
test_that(" do not include a rentzestimate", {
# expect an error due to incorrect rentzestimate
expect_error(GetDeepComps('1341571', count=10, rentzestimate=abs, api_key=zapi_key,raw=FALSE))
# expect an error due to incorrect zip code
expect_error(GetDeepComps('1341571', count=10, rentzestimate=a1b2c3, api_key=zapi_key,raw=FALSE))
})
test_that(" provide the correct api key", {
# expect an error due to incorrect api key
expect_error(GetDeepComps('1341571', count=10, rentzestimate=FALSE, api_key=abc,raw=FALSE))
# expect an error due to incorrect api key
expect_error(GetDeepComps('1341571', count=10, rentzestimate=FALSE, api_key=a1b2c3,raw=FALSE))
})
test_that(" output is a dataframe", {
# expect data frame
expect_s3_class(GetDeepComps('1341571', count=10, rentzestimate=FALSE, api_key=zapi_key,raw=FALSE), "data.frame")
})
|
# Ji and Jiang "Enlightened One-Party Rule? Ideological Differences between Chinese Communist Party Members and the Mass Public"
# Replication File -- Figures 2019.4.19
# Required packages
# install.packages("openxlsx")
# install.packages("ggplot2")
# install.packages("tidyr")
# install.packages("readstata13")
# install.packages("ggridges")
# install.packages("gridExtra")
######################################################################################
# Figure 1 Sample Deviation from Population Statistics: Before and After Reweighting #
######################################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
data<-read.xlsx("ji_and_jiang_2019/Fig.xlsx",sheet=1) %>% View()
datatidyr <- data %>% gather(attribute, value, CCP:age)
datatidyr$att <- factor(datatidyr$attribute, levels = c("CCP", "female", "edu", "age"),labels=c("% CCP membership","% of female CCP member","% of college educated CCP member","% of CCP member at age<=35"))
datatidyr$survey1 <- factor(datatidyr$survey, levels = c("ABS4(2015)", "ABS3(2011)", "CGSS2015", "CGSS2013","CGSS2012","CGSS2010","CFPS2014"))
pdf("Figure1.pdf",width=10, height=4)
ggplot(datatidyr,aes(x=value, y=att,group=as.factor(group),
shape=as.factor(group),color=as.factor(group)))+
geom_point(size = 2)+
facet_grid(. ~ survey1)+
scale_y_discrete(limits=c("% of CCP member at age<=35","% of college educated CCP member",
"% of female CCP member","% CCP membership"))+
geom_vline(aes(xintercept=0), colour="#62BCE9", linetype="dashed")+
scale_color_brewer(palette="Set1",
name = "",
breaks=c("Origin", "Raked"),
labels=c("Before weight calibration","After weight calibration"))+
scale_shape_manual(name = "",
breaks=c("Origin", "Raked"),values=c(19,17),
labels=c("Before weight calibration","After weight calibration"))+
xlab("% in Survey - % in Population")+ylab("")+
scale_x_continuous(limits=c(-0.22,0.1),labels = scales::percent_format(accuracy = 1))+
theme_bw() +
theme(axis.text.x = element_text(size=8),
axis.text.y = element_text(size=11),
axis.title.x=element_text(face="bold",vjust=-.5,size=11),
axis.title.y=element_text(face="bold",vjust=1),
legend.text = element_text(size=11),
strip.text=element_text(size=13),
text=element_text(family="Times"),
legend.position="bottom",
legend.direction="horizontal")
dev.off()
#######################################################
# Figure 2: Main Result: Weighted Difference in Means #
#######################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(ggridges)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
datatidyr <- data %>% gather(attribute, value, social_value:modern_all_value)
datatidyr$values <- factor(datatidyr$attribute, levels = c("social_value", "political_value", "intl_value", "modern_all_value"),labels=c("Social","Political","International","Overall modern value"))
dg_cfps <-data.frame(values=unique(datatidyr$values),labels1=c("Public:0.093\n[0.082,0.104]","","",""),labels2=c("CCP:0.410\n[0.373,0.447]","","",""))
dg_cgss2010<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.090\n[-0.109,-0.071]","","",""),labels2=c("CCP:0.337\n[0.287,0.386]","","",""))
dg_cgss2012<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.050\n[-0.069,-0.031]","","",""),labels2=c("CCP:0.412\n[0.360,0.463]","","",""))
dg_cgss2013<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.031\n[-0.050,-0.012]","","",""),labels2=c("CCP:0.301\n[0.243,0.360]","","",""))
dg_cgss2015<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.054\n[-0.074,-0.035]","","",""),labels2=c("CCP:0.287\n[0.229,0.345]","","",""))
dg_abs3<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:0.012\n[-0.025,0.048]",
"Public:0.012\n[-0.025,0.048]",
"Public:0.005\n[-0.031,0.041]",
"Public:0.012\n[-0.024,0.048]"),
labels2=c("CCP:0.230\n[0.150,0.310]",
"CCP:0.326\n[0.244,0.407]",
"CCP:0.169\n[0.084,0.254]",
"CCP:0.347\n[0.265,0.429]"))
dg_abs4<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:0.084\n[0.052,0.115]",
"Public:0.122\n[0.091,0.153]",
"Public:0.041\n[0.009,0.074]",
"Public:0.125\n[0.094,0.156]"),
labels2=c("CCP:0.421\n[0.327,0.516]",
"CCP:0.351\n[0.259,0.444]",
"CCP:0.165\n[0.056,0.275]",
"CCP:0.458\n[0.368,0.549]"))
pdf("Figure2.pdf",width=10, height=8)
ggplot(weight = datatidyr$wcn2)+
geom_density_ridges(data=datatidyr,aes(y = str_survey,
x = value,color = paste(str_survey, party),
fill = paste(str_survey, party),
linetype = paste(str_survey, party)),
alpha = .001,na.rm = FALSE,scale = 0.95)+
facet_grid(. ~ values)+
scale_fill_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_color_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_linetype_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c(1,5),
name = "") +
ylab("")+xlab("")+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
theme_bw() + scale_x_continuous(expand=c(0.01,0))+
scale_y_discrete(expand=c(0.01,0),
limits=c("CFPS2014","CGSS2010","CGSS2012",
"CGSS2013","CGSS2015",
"ABS3(2011)","ABS4(2015)"))+
theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",vjust=-.5),
axis.title.y=element_text(face="bold",vjust=1),
axis.text.x=element_text(size=12),
strip.text=element_text(size=13))
dev.off()
###################################################
# Figure 3: Citizen-Party Member-Cadre Comparison #
###################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(ggridges)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
data=data[complete.cases(data$cate),]
datatidyr <- data %>% gather(attribute, value, social_value:modern_all_value)
datatidyr$values <- factor(datatidyr$attribute, levels = c("social_value", "political_value", "intl_value", "modern_all_value"),labels=c("Social","Political","International","Overall modern value"))
pdf("Figure3.pdf",width=10, height=8)
ggplot(weight = datatidyr$wcn2)+
geom_density_ridges(data=datatidyr,aes(y = values,
x = value,
color = paste(values, cate),
linetype=paste(values, cate),
fill = paste(values, cate)),
alpha = .001,na.rm = FALSE,scale=0.95)+
scale_fill_cyclical(breaks = c("0", "1","2"),
labels = c("Public","CCP Member","Cadre"),
values = c("#377eb8","#e41a1c","#4daf4a"),
name = "") +
scale_color_cyclical(breaks = c("0", "1","2"),
labels = c("Public","CCP Member","Cadre"),
values = c("#377eb8","#e41a1c","#4daf4a"),
name = "") +
scale_linetype_cyclical(breaks = c("0", "1","2"),
labels = c("Public","CCP Member","Cadre"),
values = c(1,2,4),
name = "") +
ylab("")+xlab("")+
scale_y_discrete(expand=c(0.01,0),
limits=c("Overall modern value",
"International",
"Political",
"Social"))+
annotate(geom="text", x=3, y=4.4,hjust = 0,size=4,
color="#377eb8",lineheight=0.8,
label="Public:0.011\n[0.004,0.018]")+
annotate(geom="text", x=3, y=4.25,hjust = 0,size=4,
color="#e41a1c",lineheight=0.8,
label="Party:0.328\n[0.305,0.351]")+
annotate(geom="text", x=3, y=4.1,hjust = 0,size=4,
color="#4daf4a",lineheight=0.8,
label="Cadre:0.563\n[0.508,0.618]")+
annotate(geom="text", x=3, y=3.5,hjust = 0,size=4,
color="#377eb8",lineheight=0.8,
label="Public:0.122\n[0.091,0.153]")+
annotate(geom="text", x=3, y=3.35,hjust = 0,size=4,
color="#e41a1c",lineheight=0.8,
label="Party:0.356\n[0.255,0.457]")+
annotate(geom="text", x=3, y=3.2,hjust = 0,size=4,
color="#4daf4a",lineheight=0.8,
label="Cadre:0.317\n[0.116,0.518]")+
annotate(geom="text", x=3, y=2.5,hjust = 0,size=4,
color="#377eb8",lineheight=0.8,
label="Public:0.041\n[0.009,0.074]")+
annotate(geom="text", x=3, y=2.35,hjust = 0,size=4,
color="#e41a1c",lineheight=0.8,
label="Party:0.174\n[0.057,0.291]")+
annotate(geom="text", x=3, y=2.2,hjust = 0,size=4,
color="#4daf4a",lineheight=0.8,
label="Cadre:0.100\n[-0.220,0.420]")+
annotate(geom="text", x=3, y=1.5,hjust = 0,size=4,
color="#377eb8",lineheight=0.8,
label="Public:0.125\n[0.094,0.156]")+
annotate(geom="text", x=3, y=1.35,hjust = 0,size=4,
color="#e41a1c",lineheight=0.8,
label="Party:0.465\n[0.366,0.564]")+
annotate(geom="text", x=3, y=1.2,hjust = 0,size=4,
color="#4daf4a",lineheight=0.8,
label="Cadre:0.409\n[0.206,0.613]")+
theme_bw()+theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",size=16,vjust=-.5),
axis.title.y=element_text(face="bold",size=16,vjust=1),
axis.text.x=element_text(size=12),
axis.text.y=element_text(size=14),
strip.text=element_text(size=13))
dev.off()
#####################################################################################
# Figure 4: Party-Public Comparison in Subsamples Less Affected Social Desirability #
#####################################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=4)
data$Value <- factor(data$Value, levels = c("social","political","intl","modern"),labels=c("Social","Political","International","Overall modern value"))
data$question <- factor(data$question, levels = c('Answer reliable.',
'No doubts about the survey.',
'"Government officials often violate law and abuse power"',
'"Officials often conceal information from the public"',
'"Criminal officials often escape punishment"'),
labels=c('Answer reliable.',
'No doubts about the survey.',
'"Government officials often \n violate law and abuse power"',
'"Officials often conceal \n information from the public"',
'"Criminal officials often \n escape punishment"'))
pd <- position_dodge(0.6)
pdf("Figure4.pdf",width=9, height=5)
ggplot(data,aes(x=question, y=mean,
group=as.factor(party),
shape=as.factor(party),
color=as.factor(party),
label = round(mean,3)))+
geom_point(size = 1.5,position=pd)+
geom_hline(aes(yintercept=0), colour="#62BCE9", linetype="dashed")+
geom_text(size=2.8,hjust = 0.5,vjust = -0.6,position=pd,show.legend=FALSE)+
geom_errorbar(aes(ymin=lb95,
ymax=ub95),width = 0,position=pd)+
facet_grid(. ~ Value)+
scale_color_manual(values=c("#377eb8","#e41a1c"),
name = "",
breaks=c("0","1"),
labels=c("Public","CCP member"))+
scale_shape_manual(name = "",
breaks=c("0","1"),
labels=c("Public","CCP member"),
values=c(19,17))+
xlab("")+ylab("Weighted Mean Estimator")+
scale_y_continuous(limits=c(-0.1,0.8),breaks=seq(-0,0.8,0.2))+
theme_bw()+
theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",size=16,vjust=-.5),
axis.title.y=element_text(face="bold",size=16,vjust=1),
axis.text.x = element_text(size=12),
axis.text.y = element_text(hjust=0),
axis.text.y.left = element_text(size=10),
strip.text=element_text(size=13),
legend.text = element_text(size=10))+
coord_flip()
dev.off()
########################################
# Figure 5: Heterogenous Party Effects #
########################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=5)
data$Value <- factor(data$Value, levels = c("social", "political", "intl", "modern"),labels=c("Social","Political","International","Overall modern value"))
pd <- position_dodge(0.8)
pdf("Figure5.pdf", width=9, height=8)
ggplot(data,aes(x=cate,y=mean,label=round(mean,3),
color=as.factor(party),shape=as.factor(party)))+
geom_hline(aes(yintercept=0), colour="#62BCE9", linetype="dashed")+
geom_point(size = 1.8,position=pd)+
geom_errorbar(aes(ymin=lb95,ymax=ub95),width = 0,position=pd)+
geom_text(size=2.8,hjust = 0.5,vjust = -0.6,
position=pd,show.legend=FALSE)+
facet_grid(~Value)+
scale_y_continuous(limits=c(-0.6,1.02),breaks=seq(-0.4,1.0,0.4))+
scale_x_discrete(limits=c(" 75%-100% (highest)",
" 25%-75% (middle)",
" 0%-25% (lowest)",
"Income level",
" Old (60)",
" Middle age (45)",
" Young (30)","Age",
" Urban"," Rural","Residency",
" Female"," Male","Gender",
" College",
" No college",
"Education"))+
scale_color_manual(values=c("#377eb8","#e41a1c"),
name = "",
breaks=c("0","1"),
labels=c("Public","CCP member"))+
scale_shape_manual(name = "",
breaks=c("0","1"),
labels=c("Public","CCP member"),
values=c(19,17))+
xlab("")+ylab("Predicted Ideology")+theme_bw() +
theme(text=element_text(family="Times"),
legend.position="bottom",
legend.direction="horizontal",
axis.title.x=element_text(face="bold",size=16,vjust=-.5),
axis.title.y=element_text(face="bold",size=16,vjust=1),
axis.text.x=element_text(size=12),
axis.text.y = element_text(size=c(10,10,10,11,10,10,10,
11,10,10,11,10,10,11,10,10,11),
face = c('plain','plain','plain','bold',
'plain','plain','plain','bold',
'plain','plain','bold','plain',
'plain','bold','plain','plain',
'bold'),hjust = 0),
strip.text.x = element_text(size=13),
legend.text = element_text(size=11))+
coord_flip()
dev.off()
####################################
# Figure A.1: PCA and Cronbach??s ?? #
####################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=6)
data<-subset(data,data$comp<11)
data$Value <- factor(data$Value, levels = c("social", "political",
"intl", "modern"),
labels=c("Social","Political","International","Overall modern value"))
data$survey <- factor(data$survey, levels = c("ABS4(2015)", "ABS3(2011)", "CGSS2015", "CGSS2013","CGSS2012","CGSS2010","CFPS2014"))
pdf("Figurea1.pdf",width=10, height=8)
ggplot(data,aes(x=comp,y=pro,label=propt))+
geom_bar(stat="identity")+geom_line()+geom_point()+
geom_text(vjust=-0.1,hjust=-0.1)+
geom_text(data=data,mapping=aes(x=9,y=0.5,label=alpha),
size=4,lineheight=0.8,hjust = 1,parse = TRUE)+
facet_grid(survey ~ Value)+
scale_x_continuous(limits=c(0.5,10.5),breaks=seq(1,10,1))+
scale_y_continuous(limits=c(0,0.64))+
ylab("Explained variances share")+xlab("Component Number")+
theme_bw()+theme(text=element_text(family="Times"),
legend.position="bottom",
legend.direction="horizontal",
axis.title.x=element_text(face="bold",vjust=-.5),
axis.title.y=element_text(face="bold",vjust=1),
title=element_text(face="bold",size=16,vjust=1.5),
axis.text.x=element_text(size=12),
strip.text=element_text(size=13))
dev.off()
############################################################################
# FFigure A.3: Comparing Demographics of CCP Members and Population Census #
############################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=7)
datatidyr <- data %>% gather(census, value, Population.Census:CCP.Census)
theme_set(theme_bw() + theme(text=element_text(family="Times"),legend.position="bottom",legend.direction="horizontal",axis.title.x=element_text(face="bold",vjust=-.5),axis.title.y=element_text(face="bold",vjust=1),title=element_text(face="bold",size=16,vjust=1.5),axis.text.x=element_text(size=12),strip.text=element_text(size=13)))
pdf("Figurea3.pdf",width=6, height=8)
ggplot(datatidyr,aes(x=att, y=value,fill=census))+
geom_bar(stat = "identity",position="dodge",
colour="black",alpha=0.5)+
scale_fill_brewer(palette="Set1",name = "",
breaks=c("CCP.Census", "Population.Census"),
labels=c("CCP Census", "Population Census"))+
scale_x_discrete(limits=c("minority","some university and above",
"71 and above","66-70","61-65",
"56-60","51-55","46-50","41-45",
"36-40","31-35","18-30","female"))+
xlab("Attribute")+ylab("Proportion")+
coord_flip()
dev.off()
###########################################################
# Figure A.4: Party-Public Value Differences (No Weights) #
###########################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(ggridges)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
datatidyr <- data %>% gather(attribute, value, social_value:modern_all_value)
datatidyr$values <- factor(datatidyr$attribute, levels = c("social_value", "political_value", "intl_value", "modern_all_value"),labels=c("Social","Political","International","Overall modern value"))
dg_cfps <-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.021\n[-0.032,-0.010]","","",""),labels2=c("CCP:0.247\n[0.210,0.284]","","",""))
dg_cgss2010<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.055\n[-0.074,-0.036]","","",""),labels2=c("CCP:0.391\n[0.341,0.440]","","",""))
dg_cgss2012<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.051\n[-0.070,-0.032]","","",""),labels2=c("CCP:0.376\n[0.325,0.427]","","",""))
dg_cgss2013<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.031\n[-0.050,-0.012]","","",""),labels2=c("CCP:0.275\n[0.216,0.334]","","",""))
dg_cgss2015<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.035\n[-0.055,-0.015]","","",""),labels2=c("CCP:0.302\n[0.244,0.360]","","",""))
dg_abs3<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:-0.020\n[-0.056,0.016]",
"Public:-0.035\n[-0.071,0.001]",
"Public:-0.020\n[-0.056,0.016]",
"Public:-0.037\n[-0.074,-0.001]"),
labels2=c("CCP:0.125\n[0.044,0.206]",
"CCP:0.197\n[0.118,0.275]",
"CCP:0.113\n[0.029,0.198]",
"CCP:0.211\n[0.132,0.290]"))
dg_abs4<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:-0.032\n[-0.065,0.000]",
"Public:-0.023\n[-0.055,0.010]",
"Public:-0.017\n[-0.050,0.015]",
"Public:-0.036\n[-0.069,-0.004]"),
labels2=c("CCP:0.292\n[0.198,0.387]",
"CCP:0.203\n[0.108,0.299]",
"CCP:0.096\n[-0.009,0.200]",
"CCP:0.296\n[0.204,0.388]"))
pdf("Figurea4.pdf",width=10, height=8)
ggplot()+
geom_density_ridges(data=datatidyr,aes(y = str_survey,
x = value,color = paste(str_survey, party),
fill = paste(str_survey, party),
linetype = paste(str_survey, party)),
alpha = .001,na.rm = FALSE,scale = 0.95)+
facet_grid(. ~ values)+
scale_fill_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_color_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_linetype_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c(1,5),
name = "") +
ylab("")+xlab("")+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
theme_bw() + scale_x_continuous(expand=c(0.01,0))+
scale_y_discrete(expand=c(0.01,0),
limits=c("CFPS2014","CGSS2010","CGSS2012",
"CGSS2013","CGSS2015",
"ABS3(2011)","ABS4(2015)"))+
theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",vjust=-.5),
axis.title.y=element_text(face="bold",vjust=1),
axis.text.x=element_text(size=12),
strip.text=element_text(size=13))
dev.off()
########################################################################
# Figure A.5: Party-Public Value Differences (Original Survey Weights) #
########################################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(ggridges)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
datatidyr <- data %>% gather(attribute, value, social_value:modern_all_value)
datatidyr$values <- factor(datatidyr$attribute, levels = c("social_value", "political_value", "intl_value", "modern_all_value"),labels=c("Social","Political","International","Overall modern value"))
dg_cfps <-data.frame(values=unique(datatidyr$values),labels1=c("Public:0.093\n[0.082,0.104]","","",""),labels2=c("CCP:0.339\n[0.301,0.376]","","",""))
dg_cgss2010<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.090\n[-0.109,-0.071]","","",""),labels2=c("CCP:0.347\n[0.297,0.396]","","",""))
dg_cgss2012<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.050\n[-0.069,-0.031]","","",""),labels2=c("CCP:0.414\n[0.363,0.466]","","",""))
dg_cgss2013<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.031\n[-0.050,-0.012]","","",""),labels2=c("CCP:0.317\n[0.257,0.376]","","",""))
dg_cgss2015<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.054\n[-0.074,-0.035]","","",""),labels2=c("CCP:0.284\n[0.226,0.343]","","",""))
dg_abs3<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:0.012\n[-0.025,0.048]",
"Public:0.012\n[-0.024,0.048]",
"Public:0.005\n[-0.031,0.041]",
"Public:0.012\n[-0.024,0.048]"),
labels2=c("CCP:0.163\n[0.082,0.244]",
"CCP:0.245\n[0.167,0.324]",
"CCP:0.142\n[0.057,0.227]",
"CCP:0.264\n[0.185,0.343]"))
dg_abs4<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:0.084\n[0.052,0.115]",
"Public:0.122\n[0.091,0.153]",
"Public:0.037\n[0.004,0.069]",
"Public:0.122\n[0.091,0.154]"),
labels2=c("CCP:0.384\n[0.290,0.479]",
"CCP:0.307\n[0.214,0.400]",
"CCP:0.132\n[0.025,0.240]",
"CCP:0.407\n[0.316,0.497]"))
pdf("Figurea5.pdf",width=10, height=8)
ggplot(weight = datatidyr$wcn)+
geom_density_ridges(data=datatidyr,aes(y = str_survey,
x = value,color = paste(str_survey, party),
fill = paste(str_survey, party),
linetype = paste(str_survey, party)),
alpha = .001,na.rm = FALSE,scale = 0.95)+
facet_grid(. ~ values)+
scale_fill_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_color_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_linetype_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c(1,5),
name = "") +
ylab("")+xlab("")+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
theme_bw() + scale_x_continuous(expand=c(0.01,0))+
scale_y_discrete(expand=c(0.01,0),
limits=c("CFPS2014","CGSS2010","CGSS2012",
"CGSS2013","CGSS2015",
"ABS3(2011)","ABS4(2015)"))+
theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",vjust=-.5),
axis.title.y=element_text(face="bold",vjust=1),
axis.text.x=element_text(size=12),
strip.text=element_text(size=13))
dev.off()
##################################################################################################
# Figure A.6: Correlations between Social and Political Domains for Party Members and NonMembers #
##################################################################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(gridExtra)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
party<-subset(data,data$party==1)
public<-subset(data,data$party==0)
p1<-ggplot(party,aes(x=social_value,y=political_value))+
geom_point(alpha=0.7)+geom_smooth(method='lm')+
xlab("Social")+ylab("Political")+
annotate(geom="text", x=-4, y=4,hjust = 0,size=5,
label = "paste(Party: rho==0.467, \" *** \")",
parse = TRUE)+
scale_x_continuous(limits=c(-4.1,4.1),breaks=seq(-4,4,1))+
scale_y_continuous(limits=c(-4.1,4.1),breaks=seq(-4,4,1))
p2<-ggplot(public,aes(x=social_value,y=political_value))+
geom_point(alpha=0.7)+geom_smooth(method='lm')+
xlab("Social")+ylab("")+
annotate(geom="text", x=-4, y=4,hjust = 0,size=5,
label = "paste(Public: rho==0.492, \" *** \")",
parse = TRUE)+
scale_x_continuous(limits=c(-4.1,4.1),breaks=seq(-4,4,1))+
scale_y_continuous(limits=c(-4.1,4.1),breaks=seq(-4,4,1))
pdf("Figurea6.pdf", width=10, height=5)
grid.arrange(p1,p2, ncol=2, nrow=1)
dev.off()
###########################################
# Figure A.7: By Year of Party Enrollment #
###########################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=10)
datatidyr <- data %>% gather(attribute, value, social:modern)
theme_set(theme_bw() + theme(text=element_text(family="Times"),legend.position="bottom",legend.direction="horizontal",axis.title.x=element_text(face="bold",vjust=-.5),axis.title.y=element_text(face="bold",vjust=1),title=element_text(face="bold",size=16,vjust=1.5),axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),strip.text=element_text(size=13)))
pdf("Figurea7.pdf",width=10, height=6)
ggplot(datatidyr,aes(x=yrgroup, y=value,color=attribute,
group=attribute,shape=attribute))+
geom_point(size=2)+
geom_line(aes(linetype=attribute))+
scale_color_brewer(palette="Set1",name = "",
breaks=c("social", "political","intl","modern"),
labels=c("Social",
"Political",
"International",
"Overall modern value"))+
scale_linetype_manual(name = "",
breaks=c("modern", "demo","intl","liberal"),
labels=c("Social",
"Political",
"International",
"Overall modern value"),
values=c("dashed","twodash","solid","dotted"))+
scale_shape_manual(name = "",
breaks=c("modern", "demo","intl","liberal"),
labels=c("Social",
"Political",
"International",
"Overall modern value"),
values=c(19,17,18,15))+
xlab("Year Group")+ylab("")+
theme(legend.text = element_text(size=11))+
scale_x_discrete(limits=c("<1950","1950-1965","1966-1976",
"1977-1989","1990-2002",
"2003-2012",">2012"))
dev.off()
##################################################################################
# Figure A.8: Comparing China with Other East Asian Countries/Regions (ABS 2015) #
##################################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
library(gridExtra)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=11)
data1<-subset(data,data$Value=="social")
data2<-subset(data,data$Value=="political")
data3<-subset(data,data$Value=="intl")
data4<-subset(data,data$Value=="modern")
p1<-ggplot(data1, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Social")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','#377eb8','black','black','black','black','#e41a1c','black','black','black'),
face = c('plain','plain','plain','plain','plain','plain','bold','plain','plain','plain','plain','bold','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p2<-ggplot(data2, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Political")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','black','#377eb8','black','#e41a1c','black','black','black'),
face = c('plain','plain','plain','plain','plain','plain','plain','bold','plain','bold','plain','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p3<-ggplot(data3, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("International")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','black','black','black','black','#377eb8','black','black','#e41a1c'),
face = c('plain','plain','plain','plain','plain','plain','plain','plain','plain','plain','bold','plain','plain','bold')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p4<-ggplot(data4, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Overall modern value")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','black','black','#377eb8','black','#e41a1c','black','black'),
face = c('plain','plain','plain','plain','plain','plain','plain','plain','bold','plain','bold','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
pdf("Figurea8.pdf",width=16, height=8)
grid.arrange(p1,p2,p3,p4, ncol=4, nrow=1)
dev.off()
##################################################################################
# Figure A.9: Comparing China with Other East Asian Countries/Regions (ABS 2011) #
##################################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
library(gridExtra)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("ji_and_jiang_2019/Fig.xlsx",sheet=12)
data1<-subset(data,data$Value=="social")
data2<-subset(data,data$Value=="political")
data3<-subset(data,data$Value=="intl")
data4<-subset(data,data$Value=="modern")
p1<-ggplot(data1, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Social")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','#377eb8','black','black','black','#e41a1c','black','black','black','black','black','black'),
face = c('plain','plain','plain','bold','plain','plain','plain','bold','plain','plain','plain','plain','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p2<-ggplot(data2, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Political")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','#377eb8','black','#e41a1c','black','black','black','black','black'),
face = c('plain','plain','plain','plain','plain','plain','bold','plain','bold','plain','plain','plain','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p3<-ggplot(data3, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("International")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','black','black','black','black','black','#377eb8','#e41a1c','black'),
face = c('plain','plain','plain','plain','plain','plain','plain','plain','plain','plain','plain','bold','bold','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p4<-ggplot(data4, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Overall modern value")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','#377eb8','black','black','#e41a1c','black','black','black','black'),
face = c('plain','plain','plain','plain','plain','plain','bold','plain','plain','bold','plain','plain','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
pdf("Figurea9.pdf",width=16, height=8)
grid.arrange(p1,p2,p3,p4, ncol=4, nrow=1)
dev.off()
##################################################################
# Figure A.10: Compare Party with Intellectuals and Middle-Class #
##################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("ji_and_jiang_2019/Fig.xlsx",sheet=13)
theme_set(theme_bw() + theme(text=element_text(family="Times"),legend.position="bottom",legend.direction="horizontal",axis.title.x=element_text(face="bold",vjust=-.5),axis.title.y=element_text(face="bold",vjust=1),title=element_text(face="bold",size=16,vjust=1.5),axis.text.x=element_text(size=12),strip.text=element_text(size=13)))
data$Value <- factor(data$Value, levels = c("social", "political", "intl", "modern"),labels=c("Social","Political","International","Overall modern value"))
data$group <- factor(data$group, levels = c("Public", "CCP (6.34% of the sample)", "Middle class (14.15% of the sample)", "Intellectuals (11.72% of the sample)"),
labels=c("The rest (65.36% of the sample)", "CCP members (6.34% of the sample)", "Middle class (14.15% of the sample)", "Intellectuals (11.72% of the sample)"))
pdf("Figurea10.pdf",width=9, height=4)
ggplot(data,aes(x=mean,y=group,label=round(mean,3)))+
geom_point(size = 1.2)+
geom_text(size=4,hjust = 0.5,vjust = -.7, show.legend=FALSE)+
xlab("Weighted Mean Estimator")+ylab("")+
scale_x_continuous(limits=c(-0.28,1.1))+
facet_grid(. ~ Value)
dev.off()
|
/ji_and_jiang_2019/replicate4_figures.R
|
no_license
|
tianan2/chinese-elites
|
R
| false | false | 46,450 |
r
|
# Ji and Jiang "Enlightened One-Party Rule? Ideological Differences between Chinese Communist Party Members and the Mass Public"
# Replication File -- Figures 2019.4.19
# Required packages
# install.packages("openxlsx")
# install.packages("ggplot2")
# install.packages("tidyr")
# install.packages("readstata13")
# install.packages("ggridges")
# install.packages("gridExtra")
######################################################################################
# Figure 1 Sample Deviation from Population Statistics: Before and After Reweighting #
######################################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
data<-read.xlsx("ji_and_jiang_2019/Fig.xlsx",sheet=1) %>% View()
datatidyr <- data %>% gather(attribute, value, CCP:age)
datatidyr$att <- factor(datatidyr$attribute, levels = c("CCP", "female", "edu", "age"),labels=c("% CCP membership","% of female CCP member","% of college educated CCP member","% of CCP member at age<=35"))
datatidyr$survey1 <- factor(datatidyr$survey, levels = c("ABS4(2015)", "ABS3(2011)", "CGSS2015", "CGSS2013","CGSS2012","CGSS2010","CFPS2014"))
pdf("Figure1.pdf",width=10, height=4)
ggplot(datatidyr,aes(x=value, y=att,group=as.factor(group),
shape=as.factor(group),color=as.factor(group)))+
geom_point(size = 2)+
facet_grid(. ~ survey1)+
scale_y_discrete(limits=c("% of CCP member at age<=35","% of college educated CCP member",
"% of female CCP member","% CCP membership"))+
geom_vline(aes(xintercept=0), colour="#62BCE9", linetype="dashed")+
scale_color_brewer(palette="Set1",
name = "",
breaks=c("Origin", "Raked"),
labels=c("Before weight calibration","After weight calibration"))+
scale_shape_manual(name = "",
breaks=c("Origin", "Raked"),values=c(19,17),
labels=c("Before weight calibration","After weight calibration"))+
xlab("% in Survey - % in Population")+ylab("")+
scale_x_continuous(limits=c(-0.22,0.1),labels = scales::percent_format(accuracy = 1))+
theme_bw() +
theme(axis.text.x = element_text(size=8),
axis.text.y = element_text(size=11),
axis.title.x=element_text(face="bold",vjust=-.5,size=11),
axis.title.y=element_text(face="bold",vjust=1),
legend.text = element_text(size=11),
strip.text=element_text(size=13),
text=element_text(family="Times"),
legend.position="bottom",
legend.direction="horizontal")
dev.off()
#######################################################
# Figure 2: Main Result: Weighted Difference in Means #
#######################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(ggridges)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
datatidyr <- data %>% gather(attribute, value, social_value:modern_all_value)
datatidyr$values <- factor(datatidyr$attribute, levels = c("social_value", "political_value", "intl_value", "modern_all_value"),labels=c("Social","Political","International","Overall modern value"))
dg_cfps <-data.frame(values=unique(datatidyr$values),labels1=c("Public:0.093\n[0.082,0.104]","","",""),labels2=c("CCP:0.410\n[0.373,0.447]","","",""))
dg_cgss2010<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.090\n[-0.109,-0.071]","","",""),labels2=c("CCP:0.337\n[0.287,0.386]","","",""))
dg_cgss2012<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.050\n[-0.069,-0.031]","","",""),labels2=c("CCP:0.412\n[0.360,0.463]","","",""))
dg_cgss2013<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.031\n[-0.050,-0.012]","","",""),labels2=c("CCP:0.301\n[0.243,0.360]","","",""))
dg_cgss2015<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.054\n[-0.074,-0.035]","","",""),labels2=c("CCP:0.287\n[0.229,0.345]","","",""))
dg_abs3<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:0.012\n[-0.025,0.048]",
"Public:0.012\n[-0.025,0.048]",
"Public:0.005\n[-0.031,0.041]",
"Public:0.012\n[-0.024,0.048]"),
labels2=c("CCP:0.230\n[0.150,0.310]",
"CCP:0.326\n[0.244,0.407]",
"CCP:0.169\n[0.084,0.254]",
"CCP:0.347\n[0.265,0.429]"))
dg_abs4<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:0.084\n[0.052,0.115]",
"Public:0.122\n[0.091,0.153]",
"Public:0.041\n[0.009,0.074]",
"Public:0.125\n[0.094,0.156]"),
labels2=c("CCP:0.421\n[0.327,0.516]",
"CCP:0.351\n[0.259,0.444]",
"CCP:0.165\n[0.056,0.275]",
"CCP:0.458\n[0.368,0.549]"))
pdf("Figure2.pdf",width=10, height=8)
ggplot(weight = datatidyr$wcn2)+
geom_density_ridges(data=datatidyr,aes(y = str_survey,
x = value,color = paste(str_survey, party),
fill = paste(str_survey, party),
linetype = paste(str_survey, party)),
alpha = .001,na.rm = FALSE,scale = 0.95)+
facet_grid(. ~ values)+
scale_fill_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_color_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_linetype_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c(1,5),
name = "") +
ylab("")+xlab("")+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
theme_bw() + scale_x_continuous(expand=c(0.01,0))+
scale_y_discrete(expand=c(0.01,0),
limits=c("CFPS2014","CGSS2010","CGSS2012",
"CGSS2013","CGSS2015",
"ABS3(2011)","ABS4(2015)"))+
theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",vjust=-.5),
axis.title.y=element_text(face="bold",vjust=1),
axis.text.x=element_text(size=12),
strip.text=element_text(size=13))
dev.off()
###################################################
# Figure 3: Citizen-Party Member-Cadre Comparison #
###################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(ggridges)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
data=data[complete.cases(data$cate),]
datatidyr <- data %>% gather(attribute, value, social_value:modern_all_value)
datatidyr$values <- factor(datatidyr$attribute, levels = c("social_value", "political_value", "intl_value", "modern_all_value"),labels=c("Social","Political","International","Overall modern value"))
pdf("Figure3.pdf",width=10, height=8)
ggplot(weight = datatidyr$wcn2)+
geom_density_ridges(data=datatidyr,aes(y = values,
x = value,
color = paste(values, cate),
linetype=paste(values, cate),
fill = paste(values, cate)),
alpha = .001,na.rm = FALSE,scale=0.95)+
scale_fill_cyclical(breaks = c("0", "1","2"),
labels = c("Public","CCP Member","Cadre"),
values = c("#377eb8","#e41a1c","#4daf4a"),
name = "") +
scale_color_cyclical(breaks = c("0", "1","2"),
labels = c("Public","CCP Member","Cadre"),
values = c("#377eb8","#e41a1c","#4daf4a"),
name = "") +
scale_linetype_cyclical(breaks = c("0", "1","2"),
labels = c("Public","CCP Member","Cadre"),
values = c(1,2,4),
name = "") +
ylab("")+xlab("")+
scale_y_discrete(expand=c(0.01,0),
limits=c("Overall modern value",
"International",
"Political",
"Social"))+
annotate(geom="text", x=3, y=4.4,hjust = 0,size=4,
color="#377eb8",lineheight=0.8,
label="Public:0.011\n[0.004,0.018]")+
annotate(geom="text", x=3, y=4.25,hjust = 0,size=4,
color="#e41a1c",lineheight=0.8,
label="Party:0.328\n[0.305,0.351]")+
annotate(geom="text", x=3, y=4.1,hjust = 0,size=4,
color="#4daf4a",lineheight=0.8,
label="Cadre:0.563\n[0.508,0.618]")+
annotate(geom="text", x=3, y=3.5,hjust = 0,size=4,
color="#377eb8",lineheight=0.8,
label="Public:0.122\n[0.091,0.153]")+
annotate(geom="text", x=3, y=3.35,hjust = 0,size=4,
color="#e41a1c",lineheight=0.8,
label="Party:0.356\n[0.255,0.457]")+
annotate(geom="text", x=3, y=3.2,hjust = 0,size=4,
color="#4daf4a",lineheight=0.8,
label="Cadre:0.317\n[0.116,0.518]")+
annotate(geom="text", x=3, y=2.5,hjust = 0,size=4,
color="#377eb8",lineheight=0.8,
label="Public:0.041\n[0.009,0.074]")+
annotate(geom="text", x=3, y=2.35,hjust = 0,size=4,
color="#e41a1c",lineheight=0.8,
label="Party:0.174\n[0.057,0.291]")+
annotate(geom="text", x=3, y=2.2,hjust = 0,size=4,
color="#4daf4a",lineheight=0.8,
label="Cadre:0.100\n[-0.220,0.420]")+
annotate(geom="text", x=3, y=1.5,hjust = 0,size=4,
color="#377eb8",lineheight=0.8,
label="Public:0.125\n[0.094,0.156]")+
annotate(geom="text", x=3, y=1.35,hjust = 0,size=4,
color="#e41a1c",lineheight=0.8,
label="Party:0.465\n[0.366,0.564]")+
annotate(geom="text", x=3, y=1.2,hjust = 0,size=4,
color="#4daf4a",lineheight=0.8,
label="Cadre:0.409\n[0.206,0.613]")+
theme_bw()+theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",size=16,vjust=-.5),
axis.title.y=element_text(face="bold",size=16,vjust=1),
axis.text.x=element_text(size=12),
axis.text.y=element_text(size=14),
strip.text=element_text(size=13))
dev.off()
#####################################################################################
# Figure 4: Party-Public Comparison in Subsamples Less Affected Social Desirability #
#####################################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=4)
data$Value <- factor(data$Value, levels = c("social","political","intl","modern"),labels=c("Social","Political","International","Overall modern value"))
data$question <- factor(data$question, levels = c('Answer reliable.',
'No doubts about the survey.',
'"Government officials often violate law and abuse power"',
'"Officials often conceal information from the public"',
'"Criminal officials often escape punishment"'),
labels=c('Answer reliable.',
'No doubts about the survey.',
'"Government officials often \n violate law and abuse power"',
'"Officials often conceal \n information from the public"',
'"Criminal officials often \n escape punishment"'))
pd <- position_dodge(0.6)
pdf("Figure4.pdf",width=9, height=5)
ggplot(data,aes(x=question, y=mean,
group=as.factor(party),
shape=as.factor(party),
color=as.factor(party),
label = round(mean,3)))+
geom_point(size = 1.5,position=pd)+
geom_hline(aes(yintercept=0), colour="#62BCE9", linetype="dashed")+
geom_text(size=2.8,hjust = 0.5,vjust = -0.6,position=pd,show.legend=FALSE)+
geom_errorbar(aes(ymin=lb95,
ymax=ub95),width = 0,position=pd)+
facet_grid(. ~ Value)+
scale_color_manual(values=c("#377eb8","#e41a1c"),
name = "",
breaks=c("0","1"),
labels=c("Public","CCP member"))+
scale_shape_manual(name = "",
breaks=c("0","1"),
labels=c("Public","CCP member"),
values=c(19,17))+
xlab("")+ylab("Weighted Mean Estimator")+
scale_y_continuous(limits=c(-0.1,0.8),breaks=seq(-0,0.8,0.2))+
theme_bw()+
theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",size=16,vjust=-.5),
axis.title.y=element_text(face="bold",size=16,vjust=1),
axis.text.x = element_text(size=12),
axis.text.y = element_text(hjust=0),
axis.text.y.left = element_text(size=10),
strip.text=element_text(size=13),
legend.text = element_text(size=10))+
coord_flip()
dev.off()
########################################
# Figure 5: Heterogenous Party Effects #
########################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=5)
data$Value <- factor(data$Value, levels = c("social", "political", "intl", "modern"),labels=c("Social","Political","International","Overall modern value"))
pd <- position_dodge(0.8)
pdf("Figure5.pdf", width=9, height=8)
ggplot(data,aes(x=cate,y=mean,label=round(mean,3),
color=as.factor(party),shape=as.factor(party)))+
geom_hline(aes(yintercept=0), colour="#62BCE9", linetype="dashed")+
geom_point(size = 1.8,position=pd)+
geom_errorbar(aes(ymin=lb95,ymax=ub95),width = 0,position=pd)+
geom_text(size=2.8,hjust = 0.5,vjust = -0.6,
position=pd,show.legend=FALSE)+
facet_grid(~Value)+
scale_y_continuous(limits=c(-0.6,1.02),breaks=seq(-0.4,1.0,0.4))+
scale_x_discrete(limits=c(" 75%-100% (highest)",
" 25%-75% (middle)",
" 0%-25% (lowest)",
"Income level",
" Old (60)",
" Middle age (45)",
" Young (30)","Age",
" Urban"," Rural","Residency",
" Female"," Male","Gender",
" College",
" No college",
"Education"))+
scale_color_manual(values=c("#377eb8","#e41a1c"),
name = "",
breaks=c("0","1"),
labels=c("Public","CCP member"))+
scale_shape_manual(name = "",
breaks=c("0","1"),
labels=c("Public","CCP member"),
values=c(19,17))+
xlab("")+ylab("Predicted Ideology")+theme_bw() +
theme(text=element_text(family="Times"),
legend.position="bottom",
legend.direction="horizontal",
axis.title.x=element_text(face="bold",size=16,vjust=-.5),
axis.title.y=element_text(face="bold",size=16,vjust=1),
axis.text.x=element_text(size=12),
axis.text.y = element_text(size=c(10,10,10,11,10,10,10,
11,10,10,11,10,10,11,10,10,11),
face = c('plain','plain','plain','bold',
'plain','plain','plain','bold',
'plain','plain','bold','plain',
'plain','bold','plain','plain',
'bold'),hjust = 0),
strip.text.x = element_text(size=13),
legend.text = element_text(size=11))+
coord_flip()
dev.off()
####################################
# Figure A.1: PCA and Cronbach??s ?? #
####################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=6)
data<-subset(data,data$comp<11)
data$Value <- factor(data$Value, levels = c("social", "political",
"intl", "modern"),
labels=c("Social","Political","International","Overall modern value"))
data$survey <- factor(data$survey, levels = c("ABS4(2015)", "ABS3(2011)", "CGSS2015", "CGSS2013","CGSS2012","CGSS2010","CFPS2014"))
pdf("Figurea1.pdf",width=10, height=8)
ggplot(data,aes(x=comp,y=pro,label=propt))+
geom_bar(stat="identity")+geom_line()+geom_point()+
geom_text(vjust=-0.1,hjust=-0.1)+
geom_text(data=data,mapping=aes(x=9,y=0.5,label=alpha),
size=4,lineheight=0.8,hjust = 1,parse = TRUE)+
facet_grid(survey ~ Value)+
scale_x_continuous(limits=c(0.5,10.5),breaks=seq(1,10,1))+
scale_y_continuous(limits=c(0,0.64))+
ylab("Explained variances share")+xlab("Component Number")+
theme_bw()+theme(text=element_text(family="Times"),
legend.position="bottom",
legend.direction="horizontal",
axis.title.x=element_text(face="bold",vjust=-.5),
axis.title.y=element_text(face="bold",vjust=1),
title=element_text(face="bold",size=16,vjust=1.5),
axis.text.x=element_text(size=12),
strip.text=element_text(size=13))
dev.off()
############################################################################
# FFigure A.3: Comparing Demographics of CCP Members and Population Census #
############################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=7)
datatidyr <- data %>% gather(census, value, Population.Census:CCP.Census)
theme_set(theme_bw() + theme(text=element_text(family="Times"),legend.position="bottom",legend.direction="horizontal",axis.title.x=element_text(face="bold",vjust=-.5),axis.title.y=element_text(face="bold",vjust=1),title=element_text(face="bold",size=16,vjust=1.5),axis.text.x=element_text(size=12),strip.text=element_text(size=13)))
pdf("Figurea3.pdf",width=6, height=8)
ggplot(datatidyr,aes(x=att, y=value,fill=census))+
geom_bar(stat = "identity",position="dodge",
colour="black",alpha=0.5)+
scale_fill_brewer(palette="Set1",name = "",
breaks=c("CCP.Census", "Population.Census"),
labels=c("CCP Census", "Population Census"))+
scale_x_discrete(limits=c("minority","some university and above",
"71 and above","66-70","61-65",
"56-60","51-55","46-50","41-45",
"36-40","31-35","18-30","female"))+
xlab("Attribute")+ylab("Proportion")+
coord_flip()
dev.off()
###########################################################
# Figure A.4: Party-Public Value Differences (No Weights) #
###########################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(ggridges)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
datatidyr <- data %>% gather(attribute, value, social_value:modern_all_value)
datatidyr$values <- factor(datatidyr$attribute, levels = c("social_value", "political_value", "intl_value", "modern_all_value"),labels=c("Social","Political","International","Overall modern value"))
dg_cfps <-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.021\n[-0.032,-0.010]","","",""),labels2=c("CCP:0.247\n[0.210,0.284]","","",""))
dg_cgss2010<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.055\n[-0.074,-0.036]","","",""),labels2=c("CCP:0.391\n[0.341,0.440]","","",""))
dg_cgss2012<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.051\n[-0.070,-0.032]","","",""),labels2=c("CCP:0.376\n[0.325,0.427]","","",""))
dg_cgss2013<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.031\n[-0.050,-0.012]","","",""),labels2=c("CCP:0.275\n[0.216,0.334]","","",""))
dg_cgss2015<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.035\n[-0.055,-0.015]","","",""),labels2=c("CCP:0.302\n[0.244,0.360]","","",""))
dg_abs3<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:-0.020\n[-0.056,0.016]",
"Public:-0.035\n[-0.071,0.001]",
"Public:-0.020\n[-0.056,0.016]",
"Public:-0.037\n[-0.074,-0.001]"),
labels2=c("CCP:0.125\n[0.044,0.206]",
"CCP:0.197\n[0.118,0.275]",
"CCP:0.113\n[0.029,0.198]",
"CCP:0.211\n[0.132,0.290]"))
dg_abs4<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:-0.032\n[-0.065,0.000]",
"Public:-0.023\n[-0.055,0.010]",
"Public:-0.017\n[-0.050,0.015]",
"Public:-0.036\n[-0.069,-0.004]"),
labels2=c("CCP:0.292\n[0.198,0.387]",
"CCP:0.203\n[0.108,0.299]",
"CCP:0.096\n[-0.009,0.200]",
"CCP:0.296\n[0.204,0.388]"))
pdf("Figurea4.pdf",width=10, height=8)
ggplot()+
geom_density_ridges(data=datatidyr,aes(y = str_survey,
x = value,color = paste(str_survey, party),
fill = paste(str_survey, party),
linetype = paste(str_survey, party)),
alpha = .001,na.rm = FALSE,scale = 0.95)+
facet_grid(. ~ values)+
scale_fill_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_color_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_linetype_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c(1,5),
name = "") +
ylab("")+xlab("")+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
theme_bw() + scale_x_continuous(expand=c(0.01,0))+
scale_y_discrete(expand=c(0.01,0),
limits=c("CFPS2014","CGSS2010","CGSS2012",
"CGSS2013","CGSS2015",
"ABS3(2011)","ABS4(2015)"))+
theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",vjust=-.5),
axis.title.y=element_text(face="bold",vjust=1),
axis.text.x=element_text(size=12),
strip.text=element_text(size=13))
dev.off()
########################################################################
# Figure A.5: Party-Public Value Differences (Original Survey Weights) #
########################################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(ggridges)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
datatidyr <- data %>% gather(attribute, value, social_value:modern_all_value)
datatidyr$values <- factor(datatidyr$attribute, levels = c("social_value", "political_value", "intl_value", "modern_all_value"),labels=c("Social","Political","International","Overall modern value"))
dg_cfps <-data.frame(values=unique(datatidyr$values),labels1=c("Public:0.093\n[0.082,0.104]","","",""),labels2=c("CCP:0.339\n[0.301,0.376]","","",""))
dg_cgss2010<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.090\n[-0.109,-0.071]","","",""),labels2=c("CCP:0.347\n[0.297,0.396]","","",""))
dg_cgss2012<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.050\n[-0.069,-0.031]","","",""),labels2=c("CCP:0.414\n[0.363,0.466]","","",""))
dg_cgss2013<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.031\n[-0.050,-0.012]","","",""),labels2=c("CCP:0.317\n[0.257,0.376]","","",""))
dg_cgss2015<-data.frame(values=unique(datatidyr$values),labels1=c("Public:-0.054\n[-0.074,-0.035]","","",""),labels2=c("CCP:0.284\n[0.226,0.343]","","",""))
dg_abs3<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:0.012\n[-0.025,0.048]",
"Public:0.012\n[-0.024,0.048]",
"Public:0.005\n[-0.031,0.041]",
"Public:0.012\n[-0.024,0.048]"),
labels2=c("CCP:0.163\n[0.082,0.244]",
"CCP:0.245\n[0.167,0.324]",
"CCP:0.142\n[0.057,0.227]",
"CCP:0.264\n[0.185,0.343]"))
dg_abs4<-data.frame(values=unique(datatidyr$values),
labels1=c("Public:0.084\n[0.052,0.115]",
"Public:0.122\n[0.091,0.153]",
"Public:0.037\n[0.004,0.069]",
"Public:0.122\n[0.091,0.154]"),
labels2=c("CCP:0.384\n[0.290,0.479]",
"CCP:0.307\n[0.214,0.400]",
"CCP:0.132\n[0.025,0.240]",
"CCP:0.407\n[0.316,0.497]"))
pdf("Figurea5.pdf",width=10, height=8)
ggplot(weight = datatidyr$wcn)+
geom_density_ridges(data=datatidyr,aes(y = str_survey,
x = value,color = paste(str_survey, party),
fill = paste(str_survey, party),
linetype = paste(str_survey, party)),
alpha = .001,na.rm = FALSE,scale = 0.95)+
facet_grid(. ~ values)+
scale_fill_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_color_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c("#377eb8","#e41a1c"),
name = "") +
scale_linetype_cyclical(breaks = c("0", "1"),
labels = c("Public","CCP Member"),
values = c(1,5),
name = "") +
ylab("")+xlab("")+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cfps, mapping=aes(x=6,y=1.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2010,mapping=aes(x=6,y=2.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2012,mapping=aes(x=6,y=3.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2013,mapping=aes(x=6,y=4.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_cgss2015,mapping=aes(x=6,y=5.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs3, mapping=aes(x=6,y=6.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.5,label=labels1,color="#377eb8"),size=2.8,lineheight=0.8,hjust = 1)+
geom_text(data=dg_abs4, mapping=aes(x=6,y=7.8,label=labels2,color="#e41a1c"),size=2.8,lineheight=0.8,hjust = 1)+
theme_bw() + scale_x_continuous(expand=c(0.01,0))+
scale_y_discrete(expand=c(0.01,0),
limits=c("CFPS2014","CGSS2010","CGSS2012",
"CGSS2013","CGSS2015",
"ABS3(2011)","ABS4(2015)"))+
theme(text=element_text(family="Times"),
legend.position="bottom",legend.direction="horizontal",
axis.title.x=element_text(face="bold",vjust=-.5),
axis.title.y=element_text(face="bold",vjust=1),
axis.text.x=element_text(size=12),
strip.text=element_text(size=13))
dev.off()
##################################################################################################
# Figure A.6: Correlations between Social and Political Domains for Party Members and NonMembers #
##################################################################################################
rm(list=ls(all=TRUE))
library(readstata13)
library(ggplot2)
library(gridExtra)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.dta13("data.dta")
party<-subset(data,data$party==1)
public<-subset(data,data$party==0)
p1<-ggplot(party,aes(x=social_value,y=political_value))+
geom_point(alpha=0.7)+geom_smooth(method='lm')+
xlab("Social")+ylab("Political")+
annotate(geom="text", x=-4, y=4,hjust = 0,size=5,
label = "paste(Party: rho==0.467, \" *** \")",
parse = TRUE)+
scale_x_continuous(limits=c(-4.1,4.1),breaks=seq(-4,4,1))+
scale_y_continuous(limits=c(-4.1,4.1),breaks=seq(-4,4,1))
p2<-ggplot(public,aes(x=social_value,y=political_value))+
geom_point(alpha=0.7)+geom_smooth(method='lm')+
xlab("Social")+ylab("")+
annotate(geom="text", x=-4, y=4,hjust = 0,size=5,
label = "paste(Public: rho==0.492, \" *** \")",
parse = TRUE)+
scale_x_continuous(limits=c(-4.1,4.1),breaks=seq(-4,4,1))+
scale_y_continuous(limits=c(-4.1,4.1),breaks=seq(-4,4,1))
pdf("Figurea6.pdf", width=10, height=5)
grid.arrange(p1,p2, ncol=2, nrow=1)
dev.off()
###########################################
# Figure A.7: By Year of Party Enrollment #
###########################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=10)
datatidyr <- data %>% gather(attribute, value, social:modern)
theme_set(theme_bw() + theme(text=element_text(family="Times"),legend.position="bottom",legend.direction="horizontal",axis.title.x=element_text(face="bold",vjust=-.5),axis.title.y=element_text(face="bold",vjust=1),title=element_text(face="bold",size=16,vjust=1.5),axis.text.x=element_text(size=12),axis.text.y=element_text(size=12),strip.text=element_text(size=13)))
pdf("Figurea7.pdf",width=10, height=6)
ggplot(datatidyr,aes(x=yrgroup, y=value,color=attribute,
group=attribute,shape=attribute))+
geom_point(size=2)+
geom_line(aes(linetype=attribute))+
scale_color_brewer(palette="Set1",name = "",
breaks=c("social", "political","intl","modern"),
labels=c("Social",
"Political",
"International",
"Overall modern value"))+
scale_linetype_manual(name = "",
breaks=c("modern", "demo","intl","liberal"),
labels=c("Social",
"Political",
"International",
"Overall modern value"),
values=c("dashed","twodash","solid","dotted"))+
scale_shape_manual(name = "",
breaks=c("modern", "demo","intl","liberal"),
labels=c("Social",
"Political",
"International",
"Overall modern value"),
values=c(19,17,18,15))+
xlab("Year Group")+ylab("")+
theme(legend.text = element_text(size=11))+
scale_x_discrete(limits=c("<1950","1950-1965","1966-1976",
"1977-1989","1990-2002",
"2003-2012",">2012"))
dev.off()
##################################################################################
# Figure A.8: Comparing China with Other East Asian Countries/Regions (ABS 2015) #
##################################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
library(gridExtra)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("Fig.xlsx",sheet=11)
data1<-subset(data,data$Value=="social")
data2<-subset(data,data$Value=="political")
data3<-subset(data,data$Value=="intl")
data4<-subset(data,data$Value=="modern")
p1<-ggplot(data1, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Social")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','#377eb8','black','black','black','black','#e41a1c','black','black','black'),
face = c('plain','plain','plain','plain','plain','plain','bold','plain','plain','plain','plain','bold','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p2<-ggplot(data2, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Political")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','black','#377eb8','black','#e41a1c','black','black','black'),
face = c('plain','plain','plain','plain','plain','plain','plain','bold','plain','bold','plain','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p3<-ggplot(data3, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("International")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','black','black','black','black','#377eb8','black','black','#e41a1c'),
face = c('plain','plain','plain','plain','plain','plain','plain','plain','plain','plain','bold','plain','plain','bold')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p4<-ggplot(data4, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Overall modern value")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','black','black','#377eb8','black','#e41a1c','black','black'),
face = c('plain','plain','plain','plain','plain','plain','plain','plain','bold','plain','bold','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
pdf("Figurea8.pdf",width=16, height=8)
grid.arrange(p1,p2,p3,p4, ncol=4, nrow=1)
dev.off()
##################################################################################
# Figure A.9: Comparing China with Other East Asian Countries/Regions (ABS 2011) #
##################################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
library(tidyr)
library(gridExtra)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("ji_and_jiang_2019/Fig.xlsx",sheet=12)
data1<-subset(data,data$Value=="social")
data2<-subset(data,data$Value=="political")
data3<-subset(data,data$Value=="intl")
data4<-subset(data,data$Value=="modern")
p1<-ggplot(data1, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Social")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','#377eb8','black','black','black','#e41a1c','black','black','black','black','black','black'),
face = c('plain','plain','plain','bold','plain','plain','plain','bold','plain','plain','plain','plain','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p2<-ggplot(data2, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Political")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','#377eb8','black','#e41a1c','black','black','black','black','black'),
face = c('plain','plain','plain','plain','plain','plain','bold','plain','bold','plain','plain','plain','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p3<-ggplot(data3, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("International")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','black','black','black','black','black','#377eb8','#e41a1c','black'),
face = c('plain','plain','plain','plain','plain','plain','plain','plain','plain','plain','plain','bold','bold','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
p4<-ggplot(data4, aes(x=mean, y=reorder(country, mean),
color=as.factor(color),
shape=as.factor(color))) +
geom_point(size=3,fill='white') +
theme_bw() +ylab("")+xlab("")+
ggtitle("Overall modern value")+
scale_color_manual(values=c("#000000","#377eb8","#e41a1c"))+
scale_shape_manual(values=c(21,16,16))+
theme(panel.grid.major.x = element_blank(),
axis.text.y = element_text(size=12,color = c('black','black','black','black','black','black','#377eb8','black','black','#e41a1c','black','black','black','black'),
face = c('plain','plain','plain','plain','plain','plain','bold','plain','plain','bold','plain','plain','plain','plain')),
legend.position="none",
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="grey60", linetype="dashed"))
pdf("Figurea9.pdf",width=16, height=8)
grid.arrange(p1,p2,p3,p4, ncol=4, nrow=1)
dev.off()
##################################################################
# Figure A.10: Compare Party with Intellectuals and Middle-Class #
##################################################################
rm(list=ls(all=TRUE))
library(openxlsx)
library(ggplot2)
setwd("C:/Users/JCY/Dropbox/Ideology/replicate")
data<-read.xlsx("ji_and_jiang_2019/Fig.xlsx",sheet=13)
theme_set(theme_bw() + theme(text=element_text(family="Times"),legend.position="bottom",legend.direction="horizontal",axis.title.x=element_text(face="bold",vjust=-.5),axis.title.y=element_text(face="bold",vjust=1),title=element_text(face="bold",size=16,vjust=1.5),axis.text.x=element_text(size=12),strip.text=element_text(size=13)))
data$Value <- factor(data$Value, levels = c("social", "political", "intl", "modern"),labels=c("Social","Political","International","Overall modern value"))
data$group <- factor(data$group, levels = c("Public", "CCP (6.34% of the sample)", "Middle class (14.15% of the sample)", "Intellectuals (11.72% of the sample)"),
labels=c("The rest (65.36% of the sample)", "CCP members (6.34% of the sample)", "Middle class (14.15% of the sample)", "Intellectuals (11.72% of the sample)"))
pdf("Figurea10.pdf",width=9, height=4)
ggplot(data,aes(x=mean,y=group,label=round(mean,3)))+
geom_point(size = 1.2)+
geom_text(size=4,hjust = 0.5,vjust = -.7, show.legend=FALSE)+
xlab("Weighted Mean Estimator")+ylab("")+
scale_x_continuous(limits=c(-0.28,1.1))+
facet_grid(. ~ Value)
dev.off()
|
library(BayesLCA)
### Name: blca.gibbs
### Title: Bayesian Latent Class Analysis via Gibbs Sampling
### Aliases: blca.gibbs
### Keywords: blca gibbs
### ** Examples
## Generate a 4-dim. sample with 2 latent classes of 500 data points each.
## The probabilities for the 2 classes are given by type1 and type2.
type1 <- c(0.8, 0.8, 0.2, 0.2)
type2 <- c(0.2, 0.2, 0.8, 0.8)
x<- rlca(1000, rbind(type1,type2), c(0.6,0.4))
## Not run: fit.gibbs<-blca.gibbs(x,2, iter=1000, burn.in=10)
## Not run: summary(fit.gibbs)
## Not run: plot(fit.gibbs)
## Not run: raftery.diag(as.mcmc(fit.gibbs))
## Not run: fit.gibbs<-blca.gibbs(x,2, iter=10000, burn.in=100, thin=0.5)
## Not run: plot(fit.gibbs, which=4)
## Not run: raftery.diag(as.mcmc(fit.gibbs))
|
/data/genthat_extracted_code/BayesLCA/examples/blca.gibbs.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 753 |
r
|
library(BayesLCA)
### Name: blca.gibbs
### Title: Bayesian Latent Class Analysis via Gibbs Sampling
### Aliases: blca.gibbs
### Keywords: blca gibbs
### ** Examples
## Generate a 4-dim. sample with 2 latent classes of 500 data points each.
## The probabilities for the 2 classes are given by type1 and type2.
type1 <- c(0.8, 0.8, 0.2, 0.2)
type2 <- c(0.2, 0.2, 0.8, 0.8)
x<- rlca(1000, rbind(type1,type2), c(0.6,0.4))
## Not run: fit.gibbs<-blca.gibbs(x,2, iter=1000, burn.in=10)
## Not run: summary(fit.gibbs)
## Not run: plot(fit.gibbs)
## Not run: raftery.diag(as.mcmc(fit.gibbs))
## Not run: fit.gibbs<-blca.gibbs(x,2, iter=10000, burn.in=100, thin=0.5)
## Not run: plot(fit.gibbs, which=4)
## Not run: raftery.diag(as.mcmc(fit.gibbs))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmv.R
\name{pmv}
\alias{pmv}
\title{Predicted Mean Vote}
\usage{
pmv(clo, met, air.temp, saturation)
}
\arguments{
\item{clo}{Thermal insulation of clothing in [clo] (underwear, blouse/shirt, slacks/trousers, jacket, socks and shoes are approximately 1 clo)}
\item{met}{Physical activity in [met] (one person seated at rest is approximately 1 met)}
\item{air.temp}{Indoor air temperature (assumed equal to mean radiant temperature) in [C]}
\item{saturation}{Ratio of moisture content to moisture content of saturated air at the same temperature, in [\%] (approximately the same thing as relative humidity)}
}
\value{
The predicted mean vote, a value between -3 (cold) to +3 (hot)
}
\description{
Computes Fanger's predicted mean vote
}
\details{
Compute the predicted mean vote for one or more combinations of
clo, met, air temperature and moisture saturation. The inputs arguments
can be scalars or vectors.
}
\examples{
# With scalars
pmv(clo=1.0,
met=1.2,
air.temp=19,
saturation=40)
# With vectors
pmv(clo=c(1.0, 1.5),
met=c(1.2, 0.6),
air.temp=c(19, 30),
sat=c(35, 40))
}
\references{
CIBSE Guide A, section 1.4 and 1.A1.2 (from which this implementation is derived)
}
|
/man/pmv.Rd
|
no_license
|
neurobat/homeR
|
R
| false | true | 1,282 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmv.R
\name{pmv}
\alias{pmv}
\title{Predicted Mean Vote}
\usage{
pmv(clo, met, air.temp, saturation)
}
\arguments{
\item{clo}{Thermal insulation of clothing in [clo] (underwear, blouse/shirt, slacks/trousers, jacket, socks and shoes are approximately 1 clo)}
\item{met}{Physical activity in [met] (one person seated at rest is approximately 1 met)}
\item{air.temp}{Indoor air temperature (assumed equal to mean radiant temperature) in [C]}
\item{saturation}{Ratio of moisture content to moisture content of saturated air at the same temperature, in [\%] (approximately the same thing as relative humidity)}
}
\value{
The predicted mean vote, a value between -3 (cold) to +3 (hot)
}
\description{
Computes Fanger's predicted mean vote
}
\details{
Compute the predicted mean vote for one or more combinations of
clo, met, air temperature and moisture saturation. The inputs arguments
can be scalars or vectors.
}
\examples{
# With scalars
pmv(clo=1.0,
met=1.2,
air.temp=19,
saturation=40)
# With vectors
pmv(clo=c(1.0, 1.5),
met=c(1.2, 0.6),
air.temp=c(19, 30),
sat=c(35, 40))
}
\references{
CIBSE Guide A, section 1.4 and 1.A1.2 (from which this implementation is derived)
}
|
#This code requires air_movements, clean_data, weather data, seasons_four [see import_supplemental_data.R].
#a1 - air_movements with Season
#-- Remember to download file "23100003.csv" and change location
air_movements <- read_csv("ext-data/aircraft-movements/23100003.csv")
#c1 - clean_data with Season
#w1 - weather data with Season [see import_weather_data.R]
#Civil and military movements
air_movements %>%
filter( Airports == "Oshawa, Ontario", str_detect(REF_DATE, "2015|2016|2017|2018|2019|2020") ) %>%
select( REF_DATE, `Civil and military movements`, VALUE) -> air_movements
air_movements %>%
separate(REF_DATE, c("Year", "Month"), sep = "-") %>%
mutate(Year = as.integer(Year), Month = as.integer(Month)) -> air_movements
clean_data %>%
inner_join(seasons_four, by = c("Month")) -> c1
#Comparing num of sessions and season for given data
c1 %>%
group_by(Season) %>%
summarise(s = n_distinct(Session_ID)) %>% # sum(Duration, na.rm = T)
ggplot(aes(x = Season, y = s)) + geom_bar(stat = "identity")
air_movements %>%
inner_join(seasons_four, by = c("Month")) -> a1
#Comparing air traffic for external data
a1 %>%
group_by(Season) %>%
summarise(s = mean(VALUE)) %>%
ggplot(aes(x = Season, y = s)) + geom_bar(stat = "identity")
weather %>%
inner_join(seasons_four, by = c("Month")) -> w1
#Comparing mean temp and season
w1 %>%
group_by(Season) %>%
summarise(s = mean(`Mean Temp (°C)`, na.rm = T)) %>%
ggplot(aes(x = Season, y = s)) + geom_bar(stat = "identity")
#Comparing mean temp and air traffic
w1 %>% group_by(Year, Month) %>% summarise( avg_temp = mean(`Mean Temp (°C)`, na.rm = T) ) %>%
full_join(a1 %>% group_by(Year, Month) %>%
summarise( traffic = mean(VALUE, na.rm = T) ), by = c("Year", "Month")) %>%
ggplot(aes(x=avg_temp, y=traffic)) + geom_point() #+ facet_wrap(facets = ~Year)
#Comparing mean temp and num of sessions
w1 %>% group_by(Year, Month) %>% summarise( avg_temp = mean(`Mean Temp (°C)`, na.rm = T) ) %>%
inner_join(c1 %>% group_by(Year, Month) %>%
summarise( traffic = sum(Duration, na.rm = T) ), by = c("Year", "Month")) %>%
ggplot(aes(x=avg_temp, y=traffic)) + geom_point() #+ facet_wrap(facets = ~Year)
#Comparing spd of max gust and air traffic
w1 %>% group_by(Year, Month) %>% drop_na(`Spd of Max Gust (km/h)`) %>%
full_join(a1 %>% group_by(Year, Month) %>%
summarise( traffic = mean(VALUE, na.rm = T) ), by = c("Year", "Month")) %>%
ggplot(aes(x=`Spd of Max Gust (km/h)`, y=traffic)) + geom_bar(stat="identity")
#Comparing spd of max gust and num of sessions
w1 %>% group_by(Year, Month) %>% drop_na(`Spd of Max Gust (km/h)`) %>%
inner_join(c1 %>% group_by(Year, Month) %>%
summarise( traffic = sum(Duration, na.rm = T) ), by = c("Year", "Month")) %>%
ggplot(aes(x=`Spd of Max Gust (km/h)`, y=traffic)) + geom_bar(stat="identity")
|
/Feb15_Analysis/Analysis1.R
|
no_license
|
wasim985/Data-analysis-project
|
R
| false | false | 2,917 |
r
|
#This code requires air_movements, clean_data, weather data, seasons_four [see import_supplemental_data.R].
#a1 - air_movements with Season
#-- Remember to download file "23100003.csv" and change location
air_movements <- read_csv("ext-data/aircraft-movements/23100003.csv")
#c1 - clean_data with Season
#w1 - weather data with Season [see import_weather_data.R]
#Civil and military movements
air_movements %>%
filter( Airports == "Oshawa, Ontario", str_detect(REF_DATE, "2015|2016|2017|2018|2019|2020") ) %>%
select( REF_DATE, `Civil and military movements`, VALUE) -> air_movements
air_movements %>%
separate(REF_DATE, c("Year", "Month"), sep = "-") %>%
mutate(Year = as.integer(Year), Month = as.integer(Month)) -> air_movements
clean_data %>%
inner_join(seasons_four, by = c("Month")) -> c1
#Comparing num of sessions and season for given data
c1 %>%
group_by(Season) %>%
summarise(s = n_distinct(Session_ID)) %>% # sum(Duration, na.rm = T)
ggplot(aes(x = Season, y = s)) + geom_bar(stat = "identity")
air_movements %>%
inner_join(seasons_four, by = c("Month")) -> a1
#Comparing air traffic for external data
a1 %>%
group_by(Season) %>%
summarise(s = mean(VALUE)) %>%
ggplot(aes(x = Season, y = s)) + geom_bar(stat = "identity")
weather %>%
inner_join(seasons_four, by = c("Month")) -> w1
#Comparing mean temp and season
w1 %>%
group_by(Season) %>%
summarise(s = mean(`Mean Temp (°C)`, na.rm = T)) %>%
ggplot(aes(x = Season, y = s)) + geom_bar(stat = "identity")
#Comparing mean temp and air traffic
w1 %>% group_by(Year, Month) %>% summarise( avg_temp = mean(`Mean Temp (°C)`, na.rm = T) ) %>%
full_join(a1 %>% group_by(Year, Month) %>%
summarise( traffic = mean(VALUE, na.rm = T) ), by = c("Year", "Month")) %>%
ggplot(aes(x=avg_temp, y=traffic)) + geom_point() #+ facet_wrap(facets = ~Year)
#Comparing mean temp and num of sessions
w1 %>% group_by(Year, Month) %>% summarise( avg_temp = mean(`Mean Temp (°C)`, na.rm = T) ) %>%
inner_join(c1 %>% group_by(Year, Month) %>%
summarise( traffic = sum(Duration, na.rm = T) ), by = c("Year", "Month")) %>%
ggplot(aes(x=avg_temp, y=traffic)) + geom_point() #+ facet_wrap(facets = ~Year)
#Comparing spd of max gust and air traffic
w1 %>% group_by(Year, Month) %>% drop_na(`Spd of Max Gust (km/h)`) %>%
full_join(a1 %>% group_by(Year, Month) %>%
summarise( traffic = mean(VALUE, na.rm = T) ), by = c("Year", "Month")) %>%
ggplot(aes(x=`Spd of Max Gust (km/h)`, y=traffic)) + geom_bar(stat="identity")
#Comparing spd of max gust and num of sessions
w1 %>% group_by(Year, Month) %>% drop_na(`Spd of Max Gust (km/h)`) %>%
inner_join(c1 %>% group_by(Year, Month) %>%
summarise( traffic = sum(Duration, na.rm = T) ), by = c("Year", "Month")) %>%
ggplot(aes(x=`Spd of Max Gust (km/h)`, y=traffic)) + geom_bar(stat="identity")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Collate.r
\name{collator_freqs}
\alias{collator_freqs}
\title{Collate frequencies.}
\usage{
collator_freqs(runs, dict, row.dim.label = "Year", col.dim.label = "",
numbers = FALSE, CI = FALSE)
}
\arguments{
\item{runs}{a list of lists of matrices, one inner list per run.
Each inner list may have any number of matrices,
and each matrix may have a different sets of rows or columns.
The matrices will be flattened into rows.}
\item{dict}{Dictionary object. Used to label columns.}
\item{row.dim.label}{name of the entire row dimension}
\item{col.dim.label}{name of the entire col dimension}
\item{numbers}{If TRUE, it produces frequency table. Otherwise, it will return
percentage table.}
\item{CI}{if TRUE and length(runs) > 1, lower and upper confidence intervals
are returned in additional columns}
}
\value{
a matrix of collated result for each iteration
}
\description{
Performs the following:
\itemize{
\item Takes mean without confidence intervals using \code{\link{collator_mutiple_lists_mx}}
\item Labels the result using the dictionary
\item Converts frequencies to percentages
\item Labels the output
}
}
\examples{
run1_mx1 = matrix(1:2, nrow=1, dimnames=list(1, c("F","M")))
run1_mx2 = matrix(1:4, nrow=2, dimnames=list(1:2, c("F","M")), byrow = TRUE)
run1 = structure(list(run1_mx1, run1_mx2), meta=c(varname="disability_state", grpby.tag="sex"))
run2_mx1 = matrix(11:12, nrow=1, dimnames=list(1, c("F","M")))
run2_mx2 = matrix(11:14, nrow=2, dimnames=list(3:4, c("F","M")), byrow = TRUE)
run2 = structure(list(run2_mx1, run2_mx2), meta=c(varname="disability_state", grpby.tag="sex"))
runs <- list(run1=run1,run2=run2)
dict <- dict_example
collator_freqs(runs, dict)
collator_freqs(runs, dict, numbers=TRUE)
}
\seealso{
\code{\link{collator_mutiple_lists_mx}}
}
|
/src/man/collator_freqs.Rd
|
no_license
|
compassresearchcentre/simario
|
R
| false | false | 1,939 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Collate.r
\name{collator_freqs}
\alias{collator_freqs}
\title{Collate frequencies.}
\usage{
collator_freqs(runs, dict, row.dim.label = "Year", col.dim.label = "",
numbers = FALSE, CI = FALSE)
}
\arguments{
\item{runs}{a list of lists of matrices, one inner list per run.
Each inner list may have any number of matrices,
and each matrix may have a different sets of rows or columns.
The matrices will be flattened into rows.}
\item{dict}{Dictionary object. Used to label columns.}
\item{row.dim.label}{name of the entire row dimension}
\item{col.dim.label}{name of the entire col dimension}
\item{numbers}{If TRUE, it produces frequency table. Otherwise, it will return
percentage table.}
\item{CI}{if TRUE and length(runs) > 1, lower and upper confidence intervals
are returned in additional columns}
}
\value{
a matrix of collated result for each iteration
}
\description{
Performs the following:
\itemize{
\item Takes mean without confidence intervals using \code{\link{collator_mutiple_lists_mx}}
\item Labels the result using the dictionary
\item Converts frequencies to percentages
\item Labels the output
}
}
\examples{
run1_mx1 = matrix(1:2, nrow=1, dimnames=list(1, c("F","M")))
run1_mx2 = matrix(1:4, nrow=2, dimnames=list(1:2, c("F","M")), byrow = TRUE)
run1 = structure(list(run1_mx1, run1_mx2), meta=c(varname="disability_state", grpby.tag="sex"))
run2_mx1 = matrix(11:12, nrow=1, dimnames=list(1, c("F","M")))
run2_mx2 = matrix(11:14, nrow=2, dimnames=list(3:4, c("F","M")), byrow = TRUE)
run2 = structure(list(run2_mx1, run2_mx2), meta=c(varname="disability_state", grpby.tag="sex"))
runs <- list(run1=run1,run2=run2)
dict <- dict_example
collator_freqs(runs, dict)
collator_freqs(runs, dict, numbers=TRUE)
}
\seealso{
\code{\link{collator_mutiple_lists_mx}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_times.R
\name{get_times}
\alias{get_times}
\title{Get times}
\usage{
get_times(graph, demand)
}
\arguments{
\item{graph}{Graph object created with configure_graph() function}
\item{demand}{Demand object created with configure_demand() function}
}
\value{
Square matrix of distances between nodes
}
\description{
Wrapper function for dodgr function 'R/dodgr_times.R'
}
\examples{
flist <- configure_graph(sioux_network,sioux_zones,use_cost_col=TRUE)
graph <- flist[[1]]
zones <- flist[[2]]
demand <- configure_demand(sioux_demand,zones)
paths <- get_times(graph,demand)
}
|
/man/get_times.Rd
|
no_license
|
douglascm/trafficr
|
R
| false | true | 655 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_times.R
\name{get_times}
\alias{get_times}
\title{Get times}
\usage{
get_times(graph, demand)
}
\arguments{
\item{graph}{Graph object created with configure_graph() function}
\item{demand}{Demand object created with configure_demand() function}
}
\value{
Square matrix of distances between nodes
}
\description{
Wrapper function for dodgr function 'R/dodgr_times.R'
}
\examples{
flist <- configure_graph(sioux_network,sioux_zones,use_cost_col=TRUE)
graph <- flist[[1]]
zones <- flist[[2]]
demand <- configure_demand(sioux_demand,zones)
paths <- get_times(graph,demand)
}
|
library(ggplot2)
library(maps)
library(rgdal)# R wrapper around GDAL/OGR
library(sp)
library(plyr)
# library(dplyr)
library(viridis)
library(scales)
require(RColorBrewer)
library(glue)
# library(ggpubr)
library(cowplot)
library(RPostgreSQL)
library(postGIStools)
library(rasterVis)
library(grid)
library(scales)
library(viridis) # better colors for everyone
library(ggthemes) # theme_map()
user <- "mbougie"
host <- '144.92.235.105'
port <- '5432'
password <- 'Mend0ta!'
### Make the connection to database ######################################################################
con_synthesis <- dbConnect(PostgreSQL(), dbname = 'usxp_deliverables', user = user, host = host, port=port, password = password)
### Expansion:attach df to specific object in json #####################################################
# bb = get_postgis_query(con_synthesis, "SELECT ST_Intersection(states.geom, bb.geom) as geom
# FROM (SELECT st_transform(ST_MakeEnvelope(-111.144047, 36.585669, -79.748903, 48.760751, 4326),5070)as geom) as bb, spatial.states
# WHERE ST_Intersects(states.geom, bb.geom) ",
# geom_name = "geom")
#
# bb.df <- fortify(bb)
### Expansion:attach df to specific object in json #####################################################
states = get_postgis_query(con_synthesis, "SELECT geom FROM spatial.states WHERE st_abbrev
IN ('MT','MN','IA','ND','SD')",
geom_name = "geom")
summary(states)
states.df <- fortify(states)
summary(states.df)
## Expansion:attach df to specific object in json #####################################################
# region = get_postgis_query(con_synthesis, "SELECT wkb_geometry as geom FROM waterfowl.tstorm_dissolved_5070",
# geom_name = "geom")
#
# region.df <- fortify(region)
# ### Expansion:attach df to specific object in json #####################################################
region = get_postgis_query(con_synthesis, "SELECT geom FROM waterfowl.waterfowl_wgs84",
geom_name = "geom")
#### reproct to 5070 so aligned with other datasets
region <- spTransform(region, CRS("+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs
+ellps=GRS80 +towgs84=0,0,0"))
summary(region)
region.df <- fortify(region)
summary(region.df)
#
### Expansion:attach df to specific object in json #####################################################
states_large = get_postgis_query(con_synthesis, "SELECT geom FROM spatial.states",
geom_name = "geom")
states_large.df <- fortify(states_large)
### Expansion:attach df to specific object in json #####################################################
states = get_postgis_query(con_synthesis, "SELECT geom FROM spatial.states WHERE st_abbrev
IN ('MT','IA','MN','ND','SD')",
geom_name = "geom")
states_region.df <- fortify(states)
fgdb = 'I:\\d_drive\\projects\\usxp\\series\\s35\\deliverables\\habitat_impacts\\waterfowl\\data\\waterfowl.gdb'
mapa <- readOGR(dsn=fgdb,layer="s35_waterfowl_bs_maj900m_fc_dissolve")
# crs_wgs84 = CRS('+init=EPSG:4326')
# mapa <- spTransform(mapa, crs_wgs84)
#fortify() creates zany attributes so need to reattach the values from intial dataframe
mapa.df <- fortify(mapa)
#creates a numeric index for each row in dataframe
mapa@data$id <- rownames(mapa@data)
#merge the attributes of mapa@data to the fortified dataframe with id column
mapa.df <- join(mapa.df, mapa@data, by="id")
hist(mapa.df$gridcode,breaks = 50)
####this converts a Continuous value supplied to discrete scale!!!!!!!!!!!!
mapa.df$gridcode <- as.factor(mapa.df$gridcode)
##########################################################################
#### graphics############################################################
##########################################################################
d <- ggplot() +
### state boundary background ###########
### states_large boundary background ###########
geom_polygon(
data=states_large.df,
aes(x=long,y=lat,group=group),
fill='#f0f0f0') +
#
### states_large boundary strokes ###########
geom_polygon(
data=states_large.df,
aes(y=lat, x=long, group=group),
alpha=0,
colour='white',
size=6
) +
### states_region boundary background ###########
geom_polygon(
data=states_region.df,
aes(x=long,y=lat,group=group),
fill='#cccccc') +
### states_region boundary strokes ###########
geom_polygon(
data=states_region.df,
aes(y=lat, x=long, group=group),
alpha=0,
colour='white',
size=2
) +
### region boundary background ###########
geom_polygon(
data=region.df,
aes(x=long,y=lat,group=group),
fill='#7e7e7e') +
geom_polygon(
data=mapa.df,
aes(x=long,y=lat, group=group, fill=gridcode)
) +
### state boundary strokes ###########
geom_polygon(
data=states_region.df,
aes(y=lat, x=long, group=group),
alpha=0,
colour='white',
size=2
) +
# coord_equal() +
###### Equal scale cartesian coordinates
######have to use projection limits and NOT lat long
coord_equal(xlim = c(-1250000,250000), ylim = c(2080000,3000000)) +
#### add title to map #######
labs(title = '') +
theme(
#### nulled attributes ##################
axis.text.x = element_blank(),
axis.title.x=element_blank(),
axis.text.y = element_blank(),
axis.title.y=element_blank(),
axis.ticks = element_blank(),
axis.line = element_blank(),
panel.background = element_rect(fill = NA, color = NA),
panel.grid.major = element_blank(),
plot.background = element_rect(fill = NA, color = NA),
plot.margin = unit(c(0, 0, 0, 0), "cm"),
#### modified attributes ########################
##parameters for the map title
# plot.title = element_text(size= 45, vjust=-12.0, hjust=0.10, color = "#4e4d47"),
##shifts the entire legend (graphic AND labels)
legend.justification = c(0,0),
legend.position = c(0.30, 0.04), ####(horizontal, vertical)
legend.spacing.x = unit(0.5, 'cm'),
text = element_blank() ##these are the legend numeric values
) +
###this is modifying the specifics of INSIDE the legend (i.e. the legends components that make up the legend)
### create a discrete scale. These functions allow you to specify your own set of mappings from levels in the data to aesthetic values.
scale_fill_manual(values = c("#0b2c7a", "#1e9094", "#0ec441", "#7bed00","#f7d707", "#e68e1c", "#c2523c"),
###legend labels
labels = c('0-10','10-20','20-40','40-60','60-80','80-100','>100'),
#Legend type guide shows key (i.e., geoms) mapped onto values.
guide = guide_legend( title='',
title.theme = element_text(
size = 0,
color = "#4e4d47",
vjust=0.0,
angle = 0,
face="bold"
),
# legend bin dimensions
keyheight = unit(0.015, units = "npc"),
keywidth = unit(0.05, units = "npc"),
#legend elements position
label.position = "bottom",
title.position = 'top',
#The desired number of rows of legends.
nrow=1
# byrow=TRUE
)
)
getggplotObject <- function(x, multiplier, slots, labels){
###declare the empty list that will hold all the ggplot objects
ggplot_object_list <- list()
print('slots')
print(slots)
limit = x + (multiplier * slots)
print('limit')
print(limit)
i = 1
# labels <- c("20%","40%","60%","80%",">80%")
while (x <= limit) {
print('x-top')
print(x)
ggplot_object = annotation_custom(grobTree(textGrob(labels[i], x=x, y= 0.03, rot = 0,gp=gpar(col="#4e4d47", fontsize=45, fontface="bold"))))
ggplot_object_list <- append(ggplot_object_list, list(ggplot_object))
x = x + multiplier
print('x-bottom')
print(x)
i = i + 1
}
return(ggplot_object_list)
}
legend_title_abandon = annotation_custom(grobTree(textGrob("Nesting Opportunities (pair/sq.mi.)", x=0.49, y= 0.09, rot = 0,gp=gpar(col="#4e4d47", fontsize=45, fontface="bold"))))
legendlabels_abandon <- getggplotObject(x = 0.30, multiplier = 0.056, slots = 7, labels = c("0","10","20","40","60","80","100"))
d + legend_title_abandon + legendlabels_abandon
fileout = 'I:\\d_drive\\projects\\usxp\\series\\s35\\deliverables\\habitat_impacts\\waterfowl\\deliverables\\test.png'
ggsave(fileout, width = 34, height = 25, dpi = 500)
|
/projects/usxp/stages/deliverables/habitat_impacts/waterfowl/waterfowl.R
|
no_license
|
mbougie/gibbs
|
R
| false | false | 9,360 |
r
|
library(ggplot2)
library(maps)
library(rgdal)# R wrapper around GDAL/OGR
library(sp)
library(plyr)
# library(dplyr)
library(viridis)
library(scales)
require(RColorBrewer)
library(glue)
# library(ggpubr)
library(cowplot)
library(RPostgreSQL)
library(postGIStools)
library(rasterVis)
library(grid)
library(scales)
library(viridis) # better colors for everyone
library(ggthemes) # theme_map()
user <- "mbougie"
host <- '144.92.235.105'
port <- '5432'
password <- 'Mend0ta!'
### Make the connection to database ######################################################################
con_synthesis <- dbConnect(PostgreSQL(), dbname = 'usxp_deliverables', user = user, host = host, port=port, password = password)
### Expansion:attach df to specific object in json #####################################################
# bb = get_postgis_query(con_synthesis, "SELECT ST_Intersection(states.geom, bb.geom) as geom
# FROM (SELECT st_transform(ST_MakeEnvelope(-111.144047, 36.585669, -79.748903, 48.760751, 4326),5070)as geom) as bb, spatial.states
# WHERE ST_Intersects(states.geom, bb.geom) ",
# geom_name = "geom")
#
# bb.df <- fortify(bb)
### Expansion:attach df to specific object in json #####################################################
states = get_postgis_query(con_synthesis, "SELECT geom FROM spatial.states WHERE st_abbrev
IN ('MT','MN','IA','ND','SD')",
geom_name = "geom")
summary(states)
states.df <- fortify(states)
summary(states.df)
## Expansion:attach df to specific object in json #####################################################
# region = get_postgis_query(con_synthesis, "SELECT wkb_geometry as geom FROM waterfowl.tstorm_dissolved_5070",
# geom_name = "geom")
#
# region.df <- fortify(region)
# ### Expansion:attach df to specific object in json #####################################################
region = get_postgis_query(con_synthesis, "SELECT geom FROM waterfowl.waterfowl_wgs84",
geom_name = "geom")
#### reproct to 5070 so aligned with other datasets
region <- spTransform(region, CRS("+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs
+ellps=GRS80 +towgs84=0,0,0"))
summary(region)
region.df <- fortify(region)
summary(region.df)
#
### Expansion:attach df to specific object in json #####################################################
states_large = get_postgis_query(con_synthesis, "SELECT geom FROM spatial.states",
geom_name = "geom")
states_large.df <- fortify(states_large)
### Expansion:attach df to specific object in json #####################################################
states = get_postgis_query(con_synthesis, "SELECT geom FROM spatial.states WHERE st_abbrev
IN ('MT','IA','MN','ND','SD')",
geom_name = "geom")
states_region.df <- fortify(states)
fgdb = 'I:\\d_drive\\projects\\usxp\\series\\s35\\deliverables\\habitat_impacts\\waterfowl\\data\\waterfowl.gdb'
mapa <- readOGR(dsn=fgdb,layer="s35_waterfowl_bs_maj900m_fc_dissolve")
# crs_wgs84 = CRS('+init=EPSG:4326')
# mapa <- spTransform(mapa, crs_wgs84)
#fortify() creates zany attributes so need to reattach the values from intial dataframe
mapa.df <- fortify(mapa)
#creates a numeric index for each row in dataframe
mapa@data$id <- rownames(mapa@data)
#merge the attributes of mapa@data to the fortified dataframe with id column
mapa.df <- join(mapa.df, mapa@data, by="id")
hist(mapa.df$gridcode,breaks = 50)
####this converts a Continuous value supplied to discrete scale!!!!!!!!!!!!
mapa.df$gridcode <- as.factor(mapa.df$gridcode)
##########################################################################
#### graphics############################################################
##########################################################################
d <- ggplot() +
### state boundary background ###########
### states_large boundary background ###########
geom_polygon(
data=states_large.df,
aes(x=long,y=lat,group=group),
fill='#f0f0f0') +
#
### states_large boundary strokes ###########
geom_polygon(
data=states_large.df,
aes(y=lat, x=long, group=group),
alpha=0,
colour='white',
size=6
) +
### states_region boundary background ###########
geom_polygon(
data=states_region.df,
aes(x=long,y=lat,group=group),
fill='#cccccc') +
### states_region boundary strokes ###########
geom_polygon(
data=states_region.df,
aes(y=lat, x=long, group=group),
alpha=0,
colour='white',
size=2
) +
### region boundary background ###########
geom_polygon(
data=region.df,
aes(x=long,y=lat,group=group),
fill='#7e7e7e') +
geom_polygon(
data=mapa.df,
aes(x=long,y=lat, group=group, fill=gridcode)
) +
### state boundary strokes ###########
geom_polygon(
data=states_region.df,
aes(y=lat, x=long, group=group),
alpha=0,
colour='white',
size=2
) +
# coord_equal() +
###### Equal scale cartesian coordinates
######have to use projection limits and NOT lat long
coord_equal(xlim = c(-1250000,250000), ylim = c(2080000,3000000)) +
#### add title to map #######
labs(title = '') +
theme(
#### nulled attributes ##################
axis.text.x = element_blank(),
axis.title.x=element_blank(),
axis.text.y = element_blank(),
axis.title.y=element_blank(),
axis.ticks = element_blank(),
axis.line = element_blank(),
panel.background = element_rect(fill = NA, color = NA),
panel.grid.major = element_blank(),
plot.background = element_rect(fill = NA, color = NA),
plot.margin = unit(c(0, 0, 0, 0), "cm"),
#### modified attributes ########################
##parameters for the map title
# plot.title = element_text(size= 45, vjust=-12.0, hjust=0.10, color = "#4e4d47"),
##shifts the entire legend (graphic AND labels)
legend.justification = c(0,0),
legend.position = c(0.30, 0.04), ####(horizontal, vertical)
legend.spacing.x = unit(0.5, 'cm'),
text = element_blank() ##these are the legend numeric values
) +
###this is modifying the specifics of INSIDE the legend (i.e. the legends components that make up the legend)
### create a discrete scale. These functions allow you to specify your own set of mappings from levels in the data to aesthetic values.
scale_fill_manual(values = c("#0b2c7a", "#1e9094", "#0ec441", "#7bed00","#f7d707", "#e68e1c", "#c2523c"),
###legend labels
labels = c('0-10','10-20','20-40','40-60','60-80','80-100','>100'),
#Legend type guide shows key (i.e., geoms) mapped onto values.
guide = guide_legend( title='',
title.theme = element_text(
size = 0,
color = "#4e4d47",
vjust=0.0,
angle = 0,
face="bold"
),
# legend bin dimensions
keyheight = unit(0.015, units = "npc"),
keywidth = unit(0.05, units = "npc"),
#legend elements position
label.position = "bottom",
title.position = 'top',
#The desired number of rows of legends.
nrow=1
# byrow=TRUE
)
)
getggplotObject <- function(x, multiplier, slots, labels){
###declare the empty list that will hold all the ggplot objects
ggplot_object_list <- list()
print('slots')
print(slots)
limit = x + (multiplier * slots)
print('limit')
print(limit)
i = 1
# labels <- c("20%","40%","60%","80%",">80%")
while (x <= limit) {
print('x-top')
print(x)
ggplot_object = annotation_custom(grobTree(textGrob(labels[i], x=x, y= 0.03, rot = 0,gp=gpar(col="#4e4d47", fontsize=45, fontface="bold"))))
ggplot_object_list <- append(ggplot_object_list, list(ggplot_object))
x = x + multiplier
print('x-bottom')
print(x)
i = i + 1
}
return(ggplot_object_list)
}
legend_title_abandon = annotation_custom(grobTree(textGrob("Nesting Opportunities (pair/sq.mi.)", x=0.49, y= 0.09, rot = 0,gp=gpar(col="#4e4d47", fontsize=45, fontface="bold"))))
legendlabels_abandon <- getggplotObject(x = 0.30, multiplier = 0.056, slots = 7, labels = c("0","10","20","40","60","80","100"))
d + legend_title_abandon + legendlabels_abandon
fileout = 'I:\\d_drive\\projects\\usxp\\series\\s35\\deliverables\\habitat_impacts\\waterfowl\\deliverables\\test.png'
ggsave(fileout, width = 34, height = 25, dpi = 500)
|
# Written by Andrew Marderstein (2018-2019). Contact: anm2868@med.cornell.edu
# What to analyze:
# based on whether tissue has noticable infiltration across samples,
# whether cell type is well-represented in samples,
# and whether sample size is sufficient.
# ALSO: xcell and cibersort need to "generally" agree w/ each other (no inverse correlation)
library(data.table)
df.rel <- fread('/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_profiles/GTEx_v7_genexpr_ALL.CIBERSORT.ABS-F.QN-F.perm-1000.txt',data.table = F,stringsAsFactors = F)
df.abs <- fread('/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_profiles/GTEx_v7_genexpr_ALL.CIBERSORT.ABS-T.QN-F.perm-1000.txt',data.table = F,stringsAsFactors = F)
df.xcell <- fread('/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_profiles/XCell.all_tissues.txt',data.table = F,stringsAsFactors = F)
genetic_data_fam <- fread('/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/bin/gtex_all.filter.name.fam',data.table = F,stringsAsFactors = F)
# filter: indiv needs to have genetic data
df.rel <- subset(df.rel,ID %in% genetic_data_fam[,2])
tis.uniq <- data.frame(table(df.rel$SMTSD))
df.results <- data.frame(tissue=c(),cell=c())
cellTypes.df <- data.frame(
ciber=c('T cells CD8','T cells CD4 naive','CD4_memory','Neutrophils','MacrophageSum',
'Bcellsum','NK_Sum','DendriticSum','MastSum','Myeloid_Sum',
'T cells follicular helper','T cells regulatory (Tregs)','T cells gamma delta',
'Monocytes','Eosinophils','Lymph_Sum'),
xcell=c('CD8Sum','CD4+ naive T-cells','CD4_memory','Neutrophils','MacrophageSum',
'Bcellsum','NK cells','DendriticSum','Mast cells','Myeloid_Sum',
'Th_Sum','Tregs','Tgd cells',
'Monocytes','Eosinophils','Lymph_Sum'),
stringsAsFactors = F)
# cellTypes.df <- data.frame(ciber=c('T cells CD8','CD4_Tcells','Neutrophils','MacrophageSum'),
# xcell=c('CD8Sum','CD4Sum','Neutrophils','MacrophageSum'),stringsAsFactors = F)
for (i in 1:nrow(tis.uniq)) {
TISSUE <- tis.uniq[i,1]
df.sub <- subset(df.rel,SMTSD==TISSUE)
# cond 1: does this tissue have sufficient infiltration?
ciberSigSig <- subset(df.sub, `P-value` < 0.5)
infil <- (nrow(ciberSigSig)/nrow(df.sub))
# cond 2: is this cell type represented in the sample?
cellTypes <- cellTypes.df$xcell
df.xcell.sub <- subset(df.xcell,SMTSD==TISSUE)
cellTypes2 <- as.matrix(df.xcell.sub[, cellTypes])
rownames(cellTypes2) <- df.xcell.sub$IID
cellTypeFreq2 <- apply(cellTypes2, 2, mean)
cellTypes <- cellTypes.df$ciber
cellTypes2 <- as.matrix(df.sub[, cellTypes])
rownames(cellTypes2) <- df.sub$ID
cellTypeFreq <- apply(cellTypes2, 2, mean)
condition2 <- cellTypes[as.numeric(which(cellTypeFreq > 0.05 & cellTypeFreq2 > 1e-3))]
# cond 3: are there enough samples total?
N <- tis.uniq[i,2]
if (infil > 0.5 & N >= 70) {
for (cell in condition2) {
# cond 4: do xcell and cibersort "somewhat" agree?
df.abs.sub <- subset(df.abs,SMTSD==TISSUE)
df.xcell.sub <- subset(df.xcell,SMTSD==TISSUE)
cor.res <- cor.test(df.abs.sub[,cell],df.xcell.sub[,cellTypes.df$xcell[cellTypes.df$ciber==cell]])
condition4 <- !(cor.res$estimate < 0)
if (condition4 & !is.na(condition4)) {
# save infiltration phenotype
df.results <- rbind(df.results,data.frame(tissue=TISSUE,cell=cell))
}
}
}
}
# save results
df.results$tissue <- as.character(df.results$tissue)
df.results$cell <- as.character(df.results$cell)
df.results$phenotype <- paste(df.results$tissue,df.results$cell,sep = '-')
df.results <- subset(df.results,tissue != 'Cells - EBV-transformed lymphocytes')
fwrite(df.results,'/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_phenotypes.txt',sep='\t',quote=F,row.names = F,col.names = T)
# x <- rbind(data.frame(tissue='Whole Blood',cell='CD4.CD8'),
# data.frame(tissue='Whole Blood',cell='Myeloid.Lymph'))
# fwrite(x,'/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_ratios.txt',sep='\t',quote=F,row.names = F,col.names = T)
#
|
/scripts/GTEx_Deconv/filter.R
|
permissive
|
drewmard/GTEx_infil
|
R
| false | false | 4,251 |
r
|
# Written by Andrew Marderstein (2018-2019). Contact: anm2868@med.cornell.edu
# What to analyze:
# based on whether tissue has noticable infiltration across samples,
# whether cell type is well-represented in samples,
# and whether sample size is sufficient.
# ALSO: xcell and cibersort need to "generally" agree w/ each other (no inverse correlation)
library(data.table)
df.rel <- fread('/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_profiles/GTEx_v7_genexpr_ALL.CIBERSORT.ABS-F.QN-F.perm-1000.txt',data.table = F,stringsAsFactors = F)
df.abs <- fread('/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_profiles/GTEx_v7_genexpr_ALL.CIBERSORT.ABS-T.QN-F.perm-1000.txt',data.table = F,stringsAsFactors = F)
df.xcell <- fread('/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_profiles/XCell.all_tissues.txt',data.table = F,stringsAsFactors = F)
genetic_data_fam <- fread('/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/bin/gtex_all.filter.name.fam',data.table = F,stringsAsFactors = F)
# filter: indiv needs to have genetic data
df.rel <- subset(df.rel,ID %in% genetic_data_fam[,2])
tis.uniq <- data.frame(table(df.rel$SMTSD))
df.results <- data.frame(tissue=c(),cell=c())
cellTypes.df <- data.frame(
ciber=c('T cells CD8','T cells CD4 naive','CD4_memory','Neutrophils','MacrophageSum',
'Bcellsum','NK_Sum','DendriticSum','MastSum','Myeloid_Sum',
'T cells follicular helper','T cells regulatory (Tregs)','T cells gamma delta',
'Monocytes','Eosinophils','Lymph_Sum'),
xcell=c('CD8Sum','CD4+ naive T-cells','CD4_memory','Neutrophils','MacrophageSum',
'Bcellsum','NK cells','DendriticSum','Mast cells','Myeloid_Sum',
'Th_Sum','Tregs','Tgd cells',
'Monocytes','Eosinophils','Lymph_Sum'),
stringsAsFactors = F)
# cellTypes.df <- data.frame(ciber=c('T cells CD8','CD4_Tcells','Neutrophils','MacrophageSum'),
# xcell=c('CD8Sum','CD4Sum','Neutrophils','MacrophageSum'),stringsAsFactors = F)
for (i in 1:nrow(tis.uniq)) {
TISSUE <- tis.uniq[i,1]
df.sub <- subset(df.rel,SMTSD==TISSUE)
# cond 1: does this tissue have sufficient infiltration?
ciberSigSig <- subset(df.sub, `P-value` < 0.5)
infil <- (nrow(ciberSigSig)/nrow(df.sub))
# cond 2: is this cell type represented in the sample?
cellTypes <- cellTypes.df$xcell
df.xcell.sub <- subset(df.xcell,SMTSD==TISSUE)
cellTypes2 <- as.matrix(df.xcell.sub[, cellTypes])
rownames(cellTypes2) <- df.xcell.sub$IID
cellTypeFreq2 <- apply(cellTypes2, 2, mean)
cellTypes <- cellTypes.df$ciber
cellTypes2 <- as.matrix(df.sub[, cellTypes])
rownames(cellTypes2) <- df.sub$ID
cellTypeFreq <- apply(cellTypes2, 2, mean)
condition2 <- cellTypes[as.numeric(which(cellTypeFreq > 0.05 & cellTypeFreq2 > 1e-3))]
# cond 3: are there enough samples total?
N <- tis.uniq[i,2]
if (infil > 0.5 & N >= 70) {
for (cell in condition2) {
# cond 4: do xcell and cibersort "somewhat" agree?
df.abs.sub <- subset(df.abs,SMTSD==TISSUE)
df.xcell.sub <- subset(df.xcell,SMTSD==TISSUE)
cor.res <- cor.test(df.abs.sub[,cell],df.xcell.sub[,cellTypes.df$xcell[cellTypes.df$ciber==cell]])
condition4 <- !(cor.res$estimate < 0)
if (condition4 & !is.na(condition4)) {
# save infiltration phenotype
df.results <- rbind(df.results,data.frame(tissue=TISSUE,cell=cell))
}
}
}
}
# save results
df.results$tissue <- as.character(df.results$tissue)
df.results$cell <- as.character(df.results$cell)
df.results$phenotype <- paste(df.results$tissue,df.results$cell,sep = '-')
df.results <- subset(df.results,tissue != 'Cells - EBV-transformed lymphocytes')
fwrite(df.results,'/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_phenotypes.txt',sep='\t',quote=F,row.names = F,col.names = T)
# x <- rbind(data.frame(tissue='Whole Blood',cell='CD4.CD8'),
# data.frame(tissue='Whole Blood',cell='Myeloid.Lymph'))
# fwrite(x,'/athena/elementolab/scratch/anm2868/GTEx/GTEx_infil/output/infiltration_ratios.txt',sep='\t',quote=F,row.names = F,col.names = T)
#
|
#Import data
consump <- read.table("data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
#Convert Date & Time variables into single DateTime variable, force data to date type.
consump$DateTime <- paste(consump$Date, consump$Time)
consump$Date <- as.Date(consump$Date, format = "%d/%m/%Y")
consump$DateTime <- strptime(consump$DateTime, format = "%d/%m/%Y %H:%M:%S")
#Filter years
consump_range <- subset(consump, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
#Create png
png(file = "plot3.png", width = 480, height = 480, antialias = "cleartype")
par(mar= c(4, 4, 2, 1))
#Implement the plot
with(consump_range, plot(x = DateTime, y = Sub_metering_1, xlab = "", ylab = "Energy sub metering", main = NULL, col = "black", type = "l"))
with(consump_range, lines(x = DateTime, y = Sub_metering_2, col = "red"))
with(consump_range, lines(x = DateTime, y = Sub_metering_3, col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), xjust=1)
dev.off()
|
/plot_3.R
|
no_license
|
DAWHEEL/ExData_Plotting1
|
R
| false | false | 1,108 |
r
|
#Import data
consump <- read.table("data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
#Convert Date & Time variables into single DateTime variable, force data to date type.
consump$DateTime <- paste(consump$Date, consump$Time)
consump$Date <- as.Date(consump$Date, format = "%d/%m/%Y")
consump$DateTime <- strptime(consump$DateTime, format = "%d/%m/%Y %H:%M:%S")
#Filter years
consump_range <- subset(consump, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
#Create png
png(file = "plot3.png", width = 480, height = 480, antialias = "cleartype")
par(mar= c(4, 4, 2, 1))
#Implement the plot
with(consump_range, plot(x = DateTime, y = Sub_metering_1, xlab = "", ylab = "Energy sub metering", main = NULL, col = "black", type = "l"))
with(consump_range, lines(x = DateTime, y = Sub_metering_2, col = "red"))
with(consump_range, lines(x = DateTime, y = Sub_metering_3, col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), xjust=1)
dev.off()
|
try(library(parallel))
try(library(conveniencefunctions))
try(library(dplyrExtras))
try(library(dtplyr))
try(library(data.table))
try(library(forcats))
try(library(stringr))
try(library(purrr))
try(library(readr))
try(library(tidyr))
try(library(tibble))
try(library(ggplot2))
try(library(tidyverse))
try(library(dplyr))
try(library(rootSolve))
try(library(deSolve))
try(library(dMod))
try(library(cOde))
try(library(stats))
try(library(graphics))
try(library(grDevices))
try(library(utils))
try(library(datasets))
try(library(methods))
try(library(base))
setwd('~/D_1trust_folder')
rm(list = ls())
library(doParallel)
procs <- as.numeric(Sys.getenv('SLURM_NTASKS'))
registerDoParallel(cores=procs)
load('D_1trust.RData')
files <- list.files(pattern = '.so')
for (f in files) dyn.load(f)
.node <- 8
.runbgOutput <- try({
mstrust(obj, ini, rinit = 1, rmax = 10, iterlim = 40000, sd = 3, parupper = 12, parlower = -12, fits = 1 * 16, cores = 16, fixed = NULL)
})
save(.runbgOutput, file = 'D_1trust_8_result.RData')
|
/WORK/04-Results/MM-03.0/01-Model/D_1trust_8.R
|
no_license
|
svenjakemmer/HGFdepletion
|
R
| false | false | 1,017 |
r
|
try(library(parallel))
try(library(conveniencefunctions))
try(library(dplyrExtras))
try(library(dtplyr))
try(library(data.table))
try(library(forcats))
try(library(stringr))
try(library(purrr))
try(library(readr))
try(library(tidyr))
try(library(tibble))
try(library(ggplot2))
try(library(tidyverse))
try(library(dplyr))
try(library(rootSolve))
try(library(deSolve))
try(library(dMod))
try(library(cOde))
try(library(stats))
try(library(graphics))
try(library(grDevices))
try(library(utils))
try(library(datasets))
try(library(methods))
try(library(base))
setwd('~/D_1trust_folder')
rm(list = ls())
library(doParallel)
procs <- as.numeric(Sys.getenv('SLURM_NTASKS'))
registerDoParallel(cores=procs)
load('D_1trust.RData')
files <- list.files(pattern = '.so')
for (f in files) dyn.load(f)
.node <- 8
.runbgOutput <- try({
mstrust(obj, ini, rinit = 1, rmax = 10, iterlim = 40000, sd = 3, parupper = 12, parlower = -12, fits = 1 * 16, cores = 16, fixed = NULL)
})
save(.runbgOutput, file = 'D_1trust_8_result.RData')
|
asFlag = function(x, default, na.ok = FALSE) {
if (missing(x)) {
if (!missing(default))
return(default)
stopf("Argument %s is missing", deparse(substitute(x)))
}
assertFlag(x, na.ok = na.ok, .var.name = deparse(substitute(x)))
x
}
asKeys = function(.self, keys, len, default) {
if (missing(keys)) {
if (!missing(default))
return(default)
stop("Keys are missing")
}
if (!is.character(keys)) {
keys = try(as.character(keys))
if (is.error(keys))
stop("Keys must be of type character or be convertible to character")
}
if (!missing(len) && length(keys) != len)
stop("Keys must have length ", len)
if (anyMissing(keys))
stop("Keys contain NAs")
# R variable pattern: "^((\\.[[:alpha:]._]+)|([[:alpha:]]+))[[:alnum:]_.]*$"
pattern = "^[[:alnum:]._-]+$"
ok = grepl(pattern, keys)
if (!all(ok))
stopf("Key '%s' in illegal format, see help", head(keys[!ok], 1L))
if (!.self$all.files && any(substr(keys, 1L, 1L) == "."))
stop("Cannot work with hidden files (files starting with a dot) if 'all.files' is set to TRUE.")
return(keys)
}
checkPath = function(path) {
qassert(path, "S1")
if (!file.exists(path) && !dir.create(path, recursive = TRUE))
stopf("Could not create directory '%s'", path)
assertDirectory(path, access = "r")
path
}
checkExtension = function(extension) {
qassert(extension, "S1")
if (grepl("[^[:alnum:]]", extension))
stop("Extension contains illegal characters: ",
collapse(strsplit(gsub("[[:alnum:]]", "", extension), ""), " "))
return(extension)
}
checkCollision = function(keys) {
dups = duplicated(tolower(keys))
if (any(dups)) {
warningf("The following keys result in colliding files on case insensitive file systems: %s",
collapse(keys[dups]))
}
invisible(TRUE)
}
checkCollisionNew = function(new, old) {
dups = new %nin% old & tolower(new) %in% tolower(old)
if (any(dups))
warningf("Keys collide on case insensitive file systems: %s", collapse(new[dups]))
invisible(TRUE)
}
fn2key = function(.self, fn) {
return(sub(sprintf("\\.%s$", .self$extension), "", fn))
}
key2fn = function(.self, key) {
return(file.path(.self$path, sprintf("%s.%s", key, .self$extension)))
}
nkeys = function(.self) {
length(list.files(.self$path, pattern = sprintf("\\.%s$", .self$extension), ignore.case = TRUE, all.files = .self$all.files))
}
|
/fail/R/helper.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 2,405 |
r
|
asFlag = function(x, default, na.ok = FALSE) {
if (missing(x)) {
if (!missing(default))
return(default)
stopf("Argument %s is missing", deparse(substitute(x)))
}
assertFlag(x, na.ok = na.ok, .var.name = deparse(substitute(x)))
x
}
asKeys = function(.self, keys, len, default) {
if (missing(keys)) {
if (!missing(default))
return(default)
stop("Keys are missing")
}
if (!is.character(keys)) {
keys = try(as.character(keys))
if (is.error(keys))
stop("Keys must be of type character or be convertible to character")
}
if (!missing(len) && length(keys) != len)
stop("Keys must have length ", len)
if (anyMissing(keys))
stop("Keys contain NAs")
# R variable pattern: "^((\\.[[:alpha:]._]+)|([[:alpha:]]+))[[:alnum:]_.]*$"
pattern = "^[[:alnum:]._-]+$"
ok = grepl(pattern, keys)
if (!all(ok))
stopf("Key '%s' in illegal format, see help", head(keys[!ok], 1L))
if (!.self$all.files && any(substr(keys, 1L, 1L) == "."))
stop("Cannot work with hidden files (files starting with a dot) if 'all.files' is set to TRUE.")
return(keys)
}
checkPath = function(path) {
qassert(path, "S1")
if (!file.exists(path) && !dir.create(path, recursive = TRUE))
stopf("Could not create directory '%s'", path)
assertDirectory(path, access = "r")
path
}
checkExtension = function(extension) {
qassert(extension, "S1")
if (grepl("[^[:alnum:]]", extension))
stop("Extension contains illegal characters: ",
collapse(strsplit(gsub("[[:alnum:]]", "", extension), ""), " "))
return(extension)
}
checkCollision = function(keys) {
dups = duplicated(tolower(keys))
if (any(dups)) {
warningf("The following keys result in colliding files on case insensitive file systems: %s",
collapse(keys[dups]))
}
invisible(TRUE)
}
checkCollisionNew = function(new, old) {
dups = new %nin% old & tolower(new) %in% tolower(old)
if (any(dups))
warningf("Keys collide on case insensitive file systems: %s", collapse(new[dups]))
invisible(TRUE)
}
fn2key = function(.self, fn) {
return(sub(sprintf("\\.%s$", .self$extension), "", fn))
}
key2fn = function(.self, key) {
return(file.path(.self$path, sprintf("%s.%s", key, .self$extension)))
}
nkeys = function(.self) {
length(list.files(.self$path, pattern = sprintf("\\.%s$", .self$extension), ignore.case = TRUE, all.files = .self$all.files))
}
|
#' Create a GLS model and directly perform kriging
#'
#' @param data_manifold list or array [\code{p,p,N}] of \code{N} symmetric positive definite matrices of dimension \code{p*p}
#' @param coords \code{N*2} or \code{N*3} matrix of [lat,long], [x,y] or [x,y,z] coordinates. [lat,long] are supposed to
#' be provided in signed decimal degrees
#' @param X matrix (N rows and unrestricted number of columns) of additional covariates for the tangent space model, possibly NULL
#' @param Sigma \code{p*p} matrix representing the tangent point. If NULL the tangent point is computed as the intrinsic mean of
#' \code{data_manifold}
#' @param metric_manifold metric used on the manifold. It must be chosen among "Frobenius", "LogEuclidean", "SquareRoot", "Correlation"
#' @param metric_ts metric used on the tangent space. It must be chosen among "Frobenius", "FrobeniusScaled", "Correlation"
#' @param model_ts type of model fitted on the tangent space. It must be chosen among "Intercept", "Coord1", "Coord2", "Additive"
#' @param vario_model type of variogram fitted. It must be chosen among "Gaussian", "Spherical", "Exponential"
#' @param n_h number of bins in the emprical variogram
#' @param distance type of distance between coordinates. It must be either "Eucldist" or "Geodist"
#' @param data_dist_mat Matrix of dimension \code{N*N} of distances between data points. If not provided it is computed using \code{distance}
#' @param data_grid_dist_mat Matrix of dimension \code{N*M} of distances between data points and grid points. If not provided it is computed using \code{distance}
#' @param max_it max number of iterations for the main loop
#' @param tolerance tolerance for the main loop
#' @param weight_intrinsic vector of length \code{N} to weight the locations in the computation of the intrinsic mean. If NULL
#' a vector of ones is used. Not needed if Sigma is provided
#' @param tolerance_intrinsic tolerance for the computation of the intrinsic mean. Not needed if Sigma is provided
#' @param max_sill max value allowed for \code{sill} in the fitted variogram. If NULL it is defined as \code{1.15*max(emp_vario_values)}
#' @param max_a maximum value for \code{a} in the fitted variogram. If NULL it is defined as \code{1.15*h_max}
#' @param param_weighted_vario List of 7 elements to be provided to consider Kernel weights for the variogram (significant only within an RDD procedure).
#' Indeed in this case the N_tot data regarding the whole domain must be provided to the algorithm, not only the N in the cell under consideration. Therefore
#' the list must contain the following fields:
#' \code{weight_vario} (vector of length \code{N_tot} to weight the locations in the computation of the empirical variogram),
#' \code{distance_matrix_tot} (\code{N_tot*N_tot} matrix of distances between the locations), \code{data_manifold_tot}
#' (list or array \cr [\code{p,p,N_tot}] of \code{N_tot} symmetric positive definite matrices of dimension \code{p*p}),
#' \code{coords_tot} (\code{N_tot*2} or \code{N_tot*3} matrix of [lat,long], [x,y] or [x,y,z] coordinates),
#' \code{X_tot} (matrix with N_tot rows and unrestricted number of columns of additional covariates for the tangent space model, possibly NULL),
#' \code{h_max} (maximum value of distance for which the variogram is computed),
#' \code{indexes_model} (indexes of the N_tot data corresponding to the N data in the cell).
#' @param new_coords matrix of coordinates for the M new locations where to perform kriging
#' @param X_new matrix (with the same number of rows of \code{new_coords}) of additional covariates for the new locations, possibly NULL
#' @param create_pdf_vario boolean. If \code{TRUE} the empirical and fitted variograms are plotted in a pdf file
#' @param pdf_parameters list with the fields \code{test_nr} and \code{sample_draw}. Additional parameters to name the pdf
#' @param suppressMes boolean. If \code{TRUE} warning messagges are not printed
#' @param weight_extrinsic vector of length \code{N} to weight the locations in the computation of the extrinsic mean. If NULL
#' weight_intrinsic are used. Needed only if \code{Sigma} is not provided and \code{metric_manifold== "Correlation"}
#' @param tolerance_map_cor tolerance to use in the maps.\cr Required only if \code{metric_manifold== "Correlation"}
#' @return list with the following fields:
#' \item{\code{beta}}{ vector of the beta matrices of the fitted model}
#' \item{\code{gamma_matrix}}{ \code{N*N} covariogram matrix}
#' \item{\code{residuals}}{ vector of the \code{N} residual matrices}
#' \item{\code{emp_vario_values}}{ vector of empircal variogram values in correspondence of \code{h_vec}}
#' \item{\code{h_vec}}{ vector of positions at which the empirical variogram is computed}
#' \item{\code{fitted_par_vario}}{ estimates of \emph{nugget}, \emph{sill-nugget} and \emph{practical range}}
#' \item{\code{iterations}}{ number of iterations of the main loop}
#' \item{\code{Sigma}}{ tangent point}
#' \item{\code{prediction}}{ vector of matrices predicted at the new locations}
#' @description Given the coordinates and corresponding manifold values, this function firstly creates a GLS model on the tangent space, and then
#' performs kriging on the new locations.
#' @details The manifold values are mapped on the tangent space and then a GLS model is fitted to them. A first estimate of the beta coefficients
#' is obtained assuming spatially uncorrelated errors. Then, in the main the loop, new estimates of the beta are obtained as a result of a
#' weighted least square problem where the weight matrix is the inverse of \code{gamma_matrix}. The residuals \cr
#' \code{(residuals = data_ts - fitted)}
#' are updated accordingly. The parameters of the variogram fitted to the residuals (and used in the evaluation of the \code{gamma_matrix}) are
#' computed using Gauss-Newton with backtrack method to solve the associated non-linear least square problem. The stopping criteria is based on the
#' absolute value of the variogram residuals' norm if \code{ker.width.vario=0}, while it is based on its increment otherwise.
#' Once the model is computed, simple kriging on the tangent space is performed in correspondence of the new locations and eventually
#' the estimates are mapped to the manifold.
#' @references D. Pigoli, A. Menafoglio & P. Secchi (2016):
#' Kriging prediction for manifold-valued random fields.
#' Journal of Multivariate Analysis, 145, 117-131.
#' @examples
#' data_manifold_tot <- Manifoldgstat::fieldCov
#' data_manifold_model <- Manifoldgstat::rCov
#' coords_model <- Manifoldgstat::rGrid
#' coords_tot <- Manifoldgstat::gridCov
#' Sigma <- matrix(c(2,1,1,1), 2,2)
#'
#' result = model_kriging (data_manifold = data_manifold_model, coords = coords_model,
#' Sigma = Sigma, metric_manifold = "Frobenius",
#' metric_ts = "Frobenius", model_ts = "Coord1",
#' vario_model = "Spherical", n_h = 15, distance = "Eucldist",
#' max_it = 100, tolerance = 10e-7, new_coords = coords_model)
#' result_tot = model_kriging (data_manifold = data_manifold_model, coords = coords_model,
#' metric_ts = "Frobenius", Sigma = Sigma,
#' metric_manifold = "Frobenius", model_ts = "Coord1",
#' vario_model = "Spherical", n_h = 15, distance = "Eucldist",
#' max_it = 100, tolerance = 10e-7, new_coords = coords_tot,
#' create_pdf_vario = FALSE)
#' x.min=min(coords_tot[,1])
#' x.max=max(coords_tot[,1])
#' y.min=min(coords_tot[,2])
#' y.max=max(coords_tot[,2])
#' dimgrid=dim(coords_tot)[1]
#' radius = 0.02
#'
#' par(cex=1.25)
#' plot(0,0, asp=1, col=fields::tim.colors(100), ylim=c(y.min,y.max), xlim=c(x.min, x.max),
#' pch='', xlab='', ylab='', main = "Real Values")
#' for(i in 1:dimgrid){
#' if(i %% 3 == 0)
#' car::ellipse(c(coords_tot[i,1],coords_tot[i,2]) , data_manifold_tot[,,i],
#' radius=radius, center.cex=.5, col='navyblue')
#' }
#' rect(x.min, y.min, x.max, y.max)
#'
#' for(i in 1:250)
#' { car::ellipse(c(coords_model[i,1],coords_model[i,2]) , data_manifold_model[,,i],
#' radius=radius, center.cex=.5, col='green')}
#' rect(x.min, y.min, x.max, y.max)
#'
#' par(cex=1.25)
#' plot(0,0, asp=1, col=fields::tim.colors(100), ylim=c(y.min,y.max),xlim=c(x.min, x.max),
#' pch='', xlab='', ylab='',main = "Predicted values")
#'
#' for(i in 1:dimgrid){
#' if(i %% 3 == 0)
#' car::ellipse(c(coords_tot[i,1],coords_tot[i,2]), result_tot$prediction[[i]],
#' radius=radius, center.cex=.5, col='navyblue' )
#' }
#' rect(x.min, y.min, x.max, y.max)
#'
#' for(i in 1:250)
#' { car::ellipse(c(rGrid[i,1],rGrid[i,2]), result$prediction[[i]],radius=radius,
#' center.cex=.5, col='red')
#' }
#' rect(x.min, y.min, x.max, y.max)
#' @useDynLib Manifoldgstat
#' @export
#'
model_kriging = function(data_manifold, coords, X = NULL, Sigma = NULL, metric_manifold = "Frobenius",
metric_ts = "Frobenius", model_ts = "Additive", vario_model = "Gaussian",
n_h=15, distance = NULL, data_dist_mat=NULL, data_grid_dist_mat=NULL, max_it = 100, tolerance = 1e-6, weight_intrinsic = NULL,
tolerance_intrinsic = 1e-6, max_sill=NULL, max_a=NULL, param_weighted_vario = NULL,
new_coords, X_new = NULL, create_pdf_vario = TRUE, pdf_parameters=NULL, suppressMes = FALSE, weight_extrinsic=NULL, tolerance_map_cor=1e-6){
if ((metric_manifold=="Correlation" && metric_ts !="Correlation")
|| (metric_manifold!="Correlation" && metric_ts =="Correlation"))
stop("Either metric_manifold and metric_ts are both Correlation, or none of them")
coords = as.matrix(coords)
new_coords = as.matrix(new_coords)
N = dim(coords)[1]
M = dim(new_coords)[1]
if(is.null(distance)) {
if ((is.null(data_grid_dist_mat)+is.null(data_dist_mat))!=0)
stop("If distance is NULL data_dist_mat and data_grid_dist_mat must be provided")
else {
# Controllo dimensioni matrici
if(dim(data_dist_mat)[1]!=N || dim(data_dist_mat)[2]!=N) stop("data_dist_mat must be an N*N matrix")
if(dim(data_grid_dist_mat)[1]!=N || dim(data_grid_dist_mat)[2]!=M) stop("data_dist_mat must be an N*M matrix")
}
}
else {
if ((is.null(data_grid_dist_mat)+is.null(data_dist_mat))!=2)
warning("Since distance is not NULL parameters data_dist_mat and data_grid_dist_mat will be discarded")
if ( distance == "Geodist" & dim(coords)[2] != 2){
stop("Geodist requires two coordinates")
}
}
if(!is.null(X)) {
X = as.matrix(X)
check = (dim(X)[1] == dim(coords)[1])
if(!check) stop("X and coords must have the same number of rows")
if(is.null(X_new)) stop("X and X_new must have the same number of columns")
else {
X_new = as.matrix(X_new)
check = (dim(X_new)[1] == dim(new_coords)[1])
if(!check) stop("X_new and new_coords must have the same number of rows")
if (dim(X)[2]!=dim(X_new)[2]) stop("X and X_new must have the same number of columns")
}
}
else {
if (!is.null(X_new)) stop("X and X_new must have the same number of columns")
}
if( is.array(data_manifold)){
data_manifold = alply(data_manifold,3)
}
if(length(data_manifold) != dim(coords)[1]){
stop("Dimension of data_manifold and coords must agree")
}
if (metric_manifold=="Correlation" && (diag(data_manifold[[1]]) !=rep(1,dim(data_manifold[[1]])[1])))
stop ("Manifold data must be correlation matrices")
if(is.null(Sigma)){
if(is.null(weight_intrinsic)) weight_intrinsic = rep(1, length(data_manifold))
# if(metric_manifold=="Correlation" && is.null(weight_extrinsic)) {weight_extrinsic = weight_intrinsic}
if(is.null(weight_extrinsic)) {weight_extrinsic = weight_intrinsic}
}
else{
if(metric_manifold == "Correlation" && (diag(Sigma) != rep(1, dim(Sigma)[1])))
stop("Sigma must be a correlation matrix")
}
# controllare che else faccia riferimento a if precedente
if(!is.null(param_weighted_vario)){
param_weighted_vario$coords_tot = as.matrix(param_weighted_vario$coords_tot)
N_tot = length(param_weighted_vario$weight_vario)
if(is.array(param_weighted_vario$data_manifold_tot)){
param_weighted_vario$data_manifold_tot = alply(param_weighted_vario$data_manifold_tot,3)
}
if ( (dim(param_weighted_vario$coords_tot)[1] != N_tot) ||
length(param_weighted_vario$data_manifold_tot) != N_tot ||
dim(param_weighted_vario$distance_matrix_tot)[1] != N_tot ||
dim(param_weighted_vario$distance_matrix_tot)[2] != N_tot){
stop("Dimensions of weight_vario, coords_tot, data_manifold_tot and distance_matrix_tot must agree")
}
if(!is.null(param_weighted_vario$X_tot)) {
param_weighted_vario$X_tot = as.matrix(param_weighted_vario$X_tot)
check = (dim(param_weighted_vario$X_tot)[1] == N_tot && dim(param_weighted_vario$X_tot)[2]==dim(X)[2])
if(!check) stop("X_tot must have the same number of rows of coords_tot and the same number of columns of X")
}
if(length(param_weighted_vario) != 7) stop("Param_weight_vario must be a list with length 7")
result =.Call("get_model_and_kriging",data_manifold, coords,X, Sigma, distance, data_dist_mat, data_grid_dist_mat, metric_manifold, metric_ts, model_ts, vario_model,
n_h, max_it, tolerance, max_sill, max_a, param_weighted_vario$weight_vario, param_weighted_vario$distance_matrix_tot,
param_weighted_vario$data_manifold_tot, param_weighted_vario$coords_tot, param_weighted_vario$X_tot,
param_weighted_vario$indexes_model, weight_intrinsic, tolerance_intrinsic, weight_extrinsic, new_coords, X_new, suppressMes, tolerance_map_cor )
}
else {
result =.Call("get_model_and_kriging",data_manifold, coords,X, Sigma, distance, data_dist_mat, data_grid_dist_mat, metric_manifold, metric_ts, model_ts, vario_model,
n_h, max_it, tolerance, max_sill, max_a, weight_vario = NULL, distance_matrix_tot = NULL, data_manifold_tot = NULL,
coords_tot = NULL, X_tot = NULL, indexes_model = NULL, weight_intrinsic, tolerance_intrinsic, weight_extrinsic, new_coords, X_new, suppressMes, tolerance_map_cor)
}
empirical_variogram = list(emp_vario_values = result$emp_vario_values, h = result$h_vec)
fitted_variogram = list(fit_vario_values = result$fit_vario_values, hh = result$hh)
if(create_pdf_vario){
if (is.null(pdf_parameters)) pdf("Variogram-Method-SingleCell.pdf", width=14, height=7)
else pdf(paste0("Variogram-Method-SingleCell-Test_nr-", pdf_parameters$test_nr,"-Sample_draw-", pdf_parameters$sample_draw,".pdf"), width=14, height=7)
plot_variogram(empirical_variogram = empirical_variogram, fitted_variogram = fitted_variogram, model = vario_model,
distance = distance)
dev.off()
}
result_list = result[-c(2,3)]
return (result_list)
}
|
/R/model_kriging.R
|
no_license
|
chencaf/KrigingManifoldData
|
R
| false | false | 15,170 |
r
|
#' Create a GLS model and directly perform kriging
#'
#' @param data_manifold list or array [\code{p,p,N}] of \code{N} symmetric positive definite matrices of dimension \code{p*p}
#' @param coords \code{N*2} or \code{N*3} matrix of [lat,long], [x,y] or [x,y,z] coordinates. [lat,long] are supposed to
#' be provided in signed decimal degrees
#' @param X matrix (N rows and unrestricted number of columns) of additional covariates for the tangent space model, possibly NULL
#' @param Sigma \code{p*p} matrix representing the tangent point. If NULL the tangent point is computed as the intrinsic mean of
#' \code{data_manifold}
#' @param metric_manifold metric used on the manifold. It must be chosen among "Frobenius", "LogEuclidean", "SquareRoot", "Correlation"
#' @param metric_ts metric used on the tangent space. It must be chosen among "Frobenius", "FrobeniusScaled", "Correlation"
#' @param model_ts type of model fitted on the tangent space. It must be chosen among "Intercept", "Coord1", "Coord2", "Additive"
#' @param vario_model type of variogram fitted. It must be chosen among "Gaussian", "Spherical", "Exponential"
#' @param n_h number of bins in the emprical variogram
#' @param distance type of distance between coordinates. It must be either "Eucldist" or "Geodist"
#' @param data_dist_mat Matrix of dimension \code{N*N} of distances between data points. If not provided it is computed using \code{distance}
#' @param data_grid_dist_mat Matrix of dimension \code{N*M} of distances between data points and grid points. If not provided it is computed using \code{distance}
#' @param max_it max number of iterations for the main loop
#' @param tolerance tolerance for the main loop
#' @param weight_intrinsic vector of length \code{N} to weight the locations in the computation of the intrinsic mean. If NULL
#' a vector of ones is used. Not needed if Sigma is provided
#' @param tolerance_intrinsic tolerance for the computation of the intrinsic mean. Not needed if Sigma is provided
#' @param max_sill max value allowed for \code{sill} in the fitted variogram. If NULL it is defined as \code{1.15*max(emp_vario_values)}
#' @param max_a maximum value for \code{a} in the fitted variogram. If NULL it is defined as \code{1.15*h_max}
#' @param param_weighted_vario List of 7 elements to be provided to consider Kernel weights for the variogram (significant only within an RDD procedure).
#' Indeed in this case the N_tot data regarding the whole domain must be provided to the algorithm, not only the N in the cell under consideration. Therefore
#' the list must contain the following fields:
#' \code{weight_vario} (vector of length \code{N_tot} to weight the locations in the computation of the empirical variogram),
#' \code{distance_matrix_tot} (\code{N_tot*N_tot} matrix of distances between the locations), \code{data_manifold_tot}
#' (list or array \cr [\code{p,p,N_tot}] of \code{N_tot} symmetric positive definite matrices of dimension \code{p*p}),
#' \code{coords_tot} (\code{N_tot*2} or \code{N_tot*3} matrix of [lat,long], [x,y] or [x,y,z] coordinates),
#' \code{X_tot} (matrix with N_tot rows and unrestricted number of columns of additional covariates for the tangent space model, possibly NULL),
#' \code{h_max} (maximum value of distance for which the variogram is computed),
#' \code{indexes_model} (indexes of the N_tot data corresponding to the N data in the cell).
#' @param new_coords matrix of coordinates for the M new locations where to perform kriging
#' @param X_new matrix (with the same number of rows of \code{new_coords}) of additional covariates for the new locations, possibly NULL
#' @param create_pdf_vario boolean. If \code{TRUE} the empirical and fitted variograms are plotted in a pdf file
#' @param pdf_parameters list with the fields \code{test_nr} and \code{sample_draw}. Additional parameters to name the pdf
#' @param suppressMes boolean. If \code{TRUE} warning messagges are not printed
#' @param weight_extrinsic vector of length \code{N} to weight the locations in the computation of the extrinsic mean. If NULL
#' weight_intrinsic are used. Needed only if \code{Sigma} is not provided and \code{metric_manifold== "Correlation"}
#' @param tolerance_map_cor tolerance to use in the maps.\cr Required only if \code{metric_manifold== "Correlation"}
#' @return list with the following fields:
#' \item{\code{beta}}{ vector of the beta matrices of the fitted model}
#' \item{\code{gamma_matrix}}{ \code{N*N} covariogram matrix}
#' \item{\code{residuals}}{ vector of the \code{N} residual matrices}
#' \item{\code{emp_vario_values}}{ vector of empircal variogram values in correspondence of \code{h_vec}}
#' \item{\code{h_vec}}{ vector of positions at which the empirical variogram is computed}
#' \item{\code{fitted_par_vario}}{ estimates of \emph{nugget}, \emph{sill-nugget} and \emph{practical range}}
#' \item{\code{iterations}}{ number of iterations of the main loop}
#' \item{\code{Sigma}}{ tangent point}
#' \item{\code{prediction}}{ vector of matrices predicted at the new locations}
#' @description Given the coordinates and corresponding manifold values, this function firstly creates a GLS model on the tangent space, and then
#' performs kriging on the new locations.
#' @details The manifold values are mapped on the tangent space and then a GLS model is fitted to them. A first estimate of the beta coefficients
#' is obtained assuming spatially uncorrelated errors. Then, in the main the loop, new estimates of the beta are obtained as a result of a
#' weighted least square problem where the weight matrix is the inverse of \code{gamma_matrix}. The residuals \cr
#' \code{(residuals = data_ts - fitted)}
#' are updated accordingly. The parameters of the variogram fitted to the residuals (and used in the evaluation of the \code{gamma_matrix}) are
#' computed using Gauss-Newton with backtrack method to solve the associated non-linear least square problem. The stopping criteria is based on the
#' absolute value of the variogram residuals' norm if \code{ker.width.vario=0}, while it is based on its increment otherwise.
#' Once the model is computed, simple kriging on the tangent space is performed in correspondence of the new locations and eventually
#' the estimates are mapped to the manifold.
#' @references D. Pigoli, A. Menafoglio & P. Secchi (2016):
#' Kriging prediction for manifold-valued random fields.
#' Journal of Multivariate Analysis, 145, 117-131.
#' @examples
#' data_manifold_tot <- Manifoldgstat::fieldCov
#' data_manifold_model <- Manifoldgstat::rCov
#' coords_model <- Manifoldgstat::rGrid
#' coords_tot <- Manifoldgstat::gridCov
#' Sigma <- matrix(c(2,1,1,1), 2,2)
#'
#' result = model_kriging (data_manifold = data_manifold_model, coords = coords_model,
#' Sigma = Sigma, metric_manifold = "Frobenius",
#' metric_ts = "Frobenius", model_ts = "Coord1",
#' vario_model = "Spherical", n_h = 15, distance = "Eucldist",
#' max_it = 100, tolerance = 10e-7, new_coords = coords_model)
#' result_tot = model_kriging (data_manifold = data_manifold_model, coords = coords_model,
#' metric_ts = "Frobenius", Sigma = Sigma,
#' metric_manifold = "Frobenius", model_ts = "Coord1",
#' vario_model = "Spherical", n_h = 15, distance = "Eucldist",
#' max_it = 100, tolerance = 10e-7, new_coords = coords_tot,
#' create_pdf_vario = FALSE)
#' x.min=min(coords_tot[,1])
#' x.max=max(coords_tot[,1])
#' y.min=min(coords_tot[,2])
#' y.max=max(coords_tot[,2])
#' dimgrid=dim(coords_tot)[1]
#' radius = 0.02
#'
#' par(cex=1.25)
#' plot(0,0, asp=1, col=fields::tim.colors(100), ylim=c(y.min,y.max), xlim=c(x.min, x.max),
#' pch='', xlab='', ylab='', main = "Real Values")
#' for(i in 1:dimgrid){
#' if(i %% 3 == 0)
#' car::ellipse(c(coords_tot[i,1],coords_tot[i,2]) , data_manifold_tot[,,i],
#' radius=radius, center.cex=.5, col='navyblue')
#' }
#' rect(x.min, y.min, x.max, y.max)
#'
#' for(i in 1:250)
#' { car::ellipse(c(coords_model[i,1],coords_model[i,2]) , data_manifold_model[,,i],
#' radius=radius, center.cex=.5, col='green')}
#' rect(x.min, y.min, x.max, y.max)
#'
#' par(cex=1.25)
#' plot(0,0, asp=1, col=fields::tim.colors(100), ylim=c(y.min,y.max),xlim=c(x.min, x.max),
#' pch='', xlab='', ylab='',main = "Predicted values")
#'
#' for(i in 1:dimgrid){
#' if(i %% 3 == 0)
#' car::ellipse(c(coords_tot[i,1],coords_tot[i,2]), result_tot$prediction[[i]],
#' radius=radius, center.cex=.5, col='navyblue' )
#' }
#' rect(x.min, y.min, x.max, y.max)
#'
#' for(i in 1:250)
#' { car::ellipse(c(rGrid[i,1],rGrid[i,2]), result$prediction[[i]],radius=radius,
#' center.cex=.5, col='red')
#' }
#' rect(x.min, y.min, x.max, y.max)
#' @useDynLib Manifoldgstat
#' @export
#'
model_kriging = function(data_manifold, coords, X = NULL, Sigma = NULL, metric_manifold = "Frobenius",
metric_ts = "Frobenius", model_ts = "Additive", vario_model = "Gaussian",
n_h=15, distance = NULL, data_dist_mat=NULL, data_grid_dist_mat=NULL, max_it = 100, tolerance = 1e-6, weight_intrinsic = NULL,
tolerance_intrinsic = 1e-6, max_sill=NULL, max_a=NULL, param_weighted_vario = NULL,
new_coords, X_new = NULL, create_pdf_vario = TRUE, pdf_parameters=NULL, suppressMes = FALSE, weight_extrinsic=NULL, tolerance_map_cor=1e-6){
if ((metric_manifold=="Correlation" && metric_ts !="Correlation")
|| (metric_manifold!="Correlation" && metric_ts =="Correlation"))
stop("Either metric_manifold and metric_ts are both Correlation, or none of them")
coords = as.matrix(coords)
new_coords = as.matrix(new_coords)
N = dim(coords)[1]
M = dim(new_coords)[1]
if(is.null(distance)) {
if ((is.null(data_grid_dist_mat)+is.null(data_dist_mat))!=0)
stop("If distance is NULL data_dist_mat and data_grid_dist_mat must be provided")
else {
# Controllo dimensioni matrici
if(dim(data_dist_mat)[1]!=N || dim(data_dist_mat)[2]!=N) stop("data_dist_mat must be an N*N matrix")
if(dim(data_grid_dist_mat)[1]!=N || dim(data_grid_dist_mat)[2]!=M) stop("data_dist_mat must be an N*M matrix")
}
}
else {
if ((is.null(data_grid_dist_mat)+is.null(data_dist_mat))!=2)
warning("Since distance is not NULL parameters data_dist_mat and data_grid_dist_mat will be discarded")
if ( distance == "Geodist" & dim(coords)[2] != 2){
stop("Geodist requires two coordinates")
}
}
if(!is.null(X)) {
X = as.matrix(X)
check = (dim(X)[1] == dim(coords)[1])
if(!check) stop("X and coords must have the same number of rows")
if(is.null(X_new)) stop("X and X_new must have the same number of columns")
else {
X_new = as.matrix(X_new)
check = (dim(X_new)[1] == dim(new_coords)[1])
if(!check) stop("X_new and new_coords must have the same number of rows")
if (dim(X)[2]!=dim(X_new)[2]) stop("X and X_new must have the same number of columns")
}
}
else {
if (!is.null(X_new)) stop("X and X_new must have the same number of columns")
}
if( is.array(data_manifold)){
data_manifold = alply(data_manifold,3)
}
if(length(data_manifold) != dim(coords)[1]){
stop("Dimension of data_manifold and coords must agree")
}
if (metric_manifold=="Correlation" && (diag(data_manifold[[1]]) !=rep(1,dim(data_manifold[[1]])[1])))
stop ("Manifold data must be correlation matrices")
if(is.null(Sigma)){
if(is.null(weight_intrinsic)) weight_intrinsic = rep(1, length(data_manifold))
# if(metric_manifold=="Correlation" && is.null(weight_extrinsic)) {weight_extrinsic = weight_intrinsic}
if(is.null(weight_extrinsic)) {weight_extrinsic = weight_intrinsic}
}
else{
if(metric_manifold == "Correlation" && (diag(Sigma) != rep(1, dim(Sigma)[1])))
stop("Sigma must be a correlation matrix")
}
# controllare che else faccia riferimento a if precedente
if(!is.null(param_weighted_vario)){
param_weighted_vario$coords_tot = as.matrix(param_weighted_vario$coords_tot)
N_tot = length(param_weighted_vario$weight_vario)
if(is.array(param_weighted_vario$data_manifold_tot)){
param_weighted_vario$data_manifold_tot = alply(param_weighted_vario$data_manifold_tot,3)
}
if ( (dim(param_weighted_vario$coords_tot)[1] != N_tot) ||
length(param_weighted_vario$data_manifold_tot) != N_tot ||
dim(param_weighted_vario$distance_matrix_tot)[1] != N_tot ||
dim(param_weighted_vario$distance_matrix_tot)[2] != N_tot){
stop("Dimensions of weight_vario, coords_tot, data_manifold_tot and distance_matrix_tot must agree")
}
if(!is.null(param_weighted_vario$X_tot)) {
param_weighted_vario$X_tot = as.matrix(param_weighted_vario$X_tot)
check = (dim(param_weighted_vario$X_tot)[1] == N_tot && dim(param_weighted_vario$X_tot)[2]==dim(X)[2])
if(!check) stop("X_tot must have the same number of rows of coords_tot and the same number of columns of X")
}
if(length(param_weighted_vario) != 7) stop("Param_weight_vario must be a list with length 7")
result =.Call("get_model_and_kriging",data_manifold, coords,X, Sigma, distance, data_dist_mat, data_grid_dist_mat, metric_manifold, metric_ts, model_ts, vario_model,
n_h, max_it, tolerance, max_sill, max_a, param_weighted_vario$weight_vario, param_weighted_vario$distance_matrix_tot,
param_weighted_vario$data_manifold_tot, param_weighted_vario$coords_tot, param_weighted_vario$X_tot,
param_weighted_vario$indexes_model, weight_intrinsic, tolerance_intrinsic, weight_extrinsic, new_coords, X_new, suppressMes, tolerance_map_cor )
}
else {
result =.Call("get_model_and_kriging",data_manifold, coords,X, Sigma, distance, data_dist_mat, data_grid_dist_mat, metric_manifold, metric_ts, model_ts, vario_model,
n_h, max_it, tolerance, max_sill, max_a, weight_vario = NULL, distance_matrix_tot = NULL, data_manifold_tot = NULL,
coords_tot = NULL, X_tot = NULL, indexes_model = NULL, weight_intrinsic, tolerance_intrinsic, weight_extrinsic, new_coords, X_new, suppressMes, tolerance_map_cor)
}
empirical_variogram = list(emp_vario_values = result$emp_vario_values, h = result$h_vec)
fitted_variogram = list(fit_vario_values = result$fit_vario_values, hh = result$hh)
if(create_pdf_vario){
if (is.null(pdf_parameters)) pdf("Variogram-Method-SingleCell.pdf", width=14, height=7)
else pdf(paste0("Variogram-Method-SingleCell-Test_nr-", pdf_parameters$test_nr,"-Sample_draw-", pdf_parameters$sample_draw,".pdf"), width=14, height=7)
plot_variogram(empirical_variogram = empirical_variogram, fitted_variogram = fitted_variogram, model = vario_model,
distance = distance)
dev.off()
}
result_list = result[-c(2,3)]
return (result_list)
}
|
# DMA: Dynamic model averaging
rm(list = ls(all = TRUE))
#setwd('//paradis/eleves//NSALEILLE//Bureau//sauvegarde_ofpr//data')
setwd('/Users/nicolassaleille/Dropbox/ofpr')
source('./scripts/dynamic_model_averaging/predict_update.R')
# toy model
m <- 4
d <- 2
T <- 100
beta <- matrix(1, nrow = m, ncol = d)
X <- matrix(data = rnorm(m*T), nrow = T, ncol = m)
#colnames(X) <- past('VAR', seq(1:m))
y <- X%*%beta + matrix(rnorm(d*T), ncol = d)
K <- 2^m
# parameters
lambda <- 0.99 # forgetting factor
alpha <- 0.99 # forgetting factor
# construction of the model space
models <- lapply(seq(1:m), FUN = combn, x = m, simplify = FALSE)
models <- unlist(models, recursive = FALSE)
models <- lapply(models, FUN = function(x){return(list(name = paste('model', paste(x, collapse=''), sep = '_'), vars = x))})
model_names <- sapply(models, FUN = function(x){return(x$name)})
# priors
prob0 <- rep(1/2^length(models), length(models))
theta0 <- 0 # only supports a scalar value for now
# for each model, we keep track of the estimated values in a list of matrix
# one matrix for each model
theta <- lapply(models, FUN = function(x){return(matrix(nrow = nrow(X), ncol = length(x$vars)))})
theta <- lapply(theta, FUN = function(x){x[1,] <- theta0; return(x)})
names(theta) <- model_names
sigma <- lapply(models, FUN = function(x){list()})
prob <- matrix(nrow = nrow(X), ncol = length(models))
colnames(prob) <- model_names
prob[1, ] <- prob0
t = 3
results <- lapply(models, FUN = predict_update, t = t, y_obs = y, vars = X,
theta = theta, sigma = sigma, prob_last = prob[t-1,],
lambda = lambda, alpha = alpha)
theta_up <- sapply(results, FUN = function(x){return(x$theta_up)})
theta <- mapply(FUN = function(x,y){x[t,] <- y; return(x)}, theta, theta_up)
test <- lapply(merge(theta, theta_up), FUN = function(x)
names(test) <- rep(model_names, 2)
attributes(test)
length(test)
lapply(list(theta, theta_up), FUN = function)
unlist(theta_up) # tapply ???
model_dims <- sapply(theta_up, FUN = length)
lapply(theta_up, FUN = function(x){theta[[models]][t,] <- theta_up})
theta[[model_name]][t,] <- theta_up
names(results) <- model_names
model <- models[[14]]
prob_last <- prob[t-1,]
vars <- X
y_obs <- y
# select model
y_obs <- y_obs[t,]
X <- vars[t,model$vars]
theta_last <- theta[[model$name]][t-1,]
sigma_last <- diag(length(model$vars)) # !!!!!!
# predict for one model / one step ahead
theta_pred <- theta_last # F: identité
sigma_pred <- (1/lambda) * sigma_last
prob_pred <- prob_last^alpha / sum(prob_last)
y_pred <- X %*% theta_pred
# Update for one model / one step ahead
error <- y_obs - y_pred
S <- sigma_pred
xSx <- t(X) %*% S %*% X
R <- (1/t)*(error%*%t(error)) # !!!!
F_inv <- solve(R + xSx)
theta_up <- theta_pred + S %*% X %*% F_inv %*% (y_obs - X %*% theta_pred)
sigma_up <- S - S %*% X %*% F_inv %*% X %*% S
weight <- pnorm(q = y_obs, mean = X %*% theta_pred, sd = sqrt(R + xSx)) * prob_pred[model$name]
|
/main_scripts/old/dma.R
|
no_license
|
HanjoStudy/gdp_forecasts
|
R
| false | false | 2,955 |
r
|
# DMA: Dynamic model averaging
rm(list = ls(all = TRUE))
#setwd('//paradis/eleves//NSALEILLE//Bureau//sauvegarde_ofpr//data')
setwd('/Users/nicolassaleille/Dropbox/ofpr')
source('./scripts/dynamic_model_averaging/predict_update.R')
# toy model
m <- 4
d <- 2
T <- 100
beta <- matrix(1, nrow = m, ncol = d)
X <- matrix(data = rnorm(m*T), nrow = T, ncol = m)
#colnames(X) <- past('VAR', seq(1:m))
y <- X%*%beta + matrix(rnorm(d*T), ncol = d)
K <- 2^m
# parameters
lambda <- 0.99 # forgetting factor
alpha <- 0.99 # forgetting factor
# construction of the model space
models <- lapply(seq(1:m), FUN = combn, x = m, simplify = FALSE)
models <- unlist(models, recursive = FALSE)
models <- lapply(models, FUN = function(x){return(list(name = paste('model', paste(x, collapse=''), sep = '_'), vars = x))})
model_names <- sapply(models, FUN = function(x){return(x$name)})
# priors
prob0 <- rep(1/2^length(models), length(models))
theta0 <- 0 # only supports a scalar value for now
# for each model, we keep track of the estimated values in a list of matrix
# one matrix for each model
theta <- lapply(models, FUN = function(x){return(matrix(nrow = nrow(X), ncol = length(x$vars)))})
theta <- lapply(theta, FUN = function(x){x[1,] <- theta0; return(x)})
names(theta) <- model_names
sigma <- lapply(models, FUN = function(x){list()})
prob <- matrix(nrow = nrow(X), ncol = length(models))
colnames(prob) <- model_names
prob[1, ] <- prob0
t = 3
results <- lapply(models, FUN = predict_update, t = t, y_obs = y, vars = X,
theta = theta, sigma = sigma, prob_last = prob[t-1,],
lambda = lambda, alpha = alpha)
theta_up <- sapply(results, FUN = function(x){return(x$theta_up)})
theta <- mapply(FUN = function(x,y){x[t,] <- y; return(x)}, theta, theta_up)
test <- lapply(merge(theta, theta_up), FUN = function(x)
names(test) <- rep(model_names, 2)
attributes(test)
length(test)
lapply(list(theta, theta_up), FUN = function)
unlist(theta_up) # tapply ???
model_dims <- sapply(theta_up, FUN = length)
lapply(theta_up, FUN = function(x){theta[[models]][t,] <- theta_up})
theta[[model_name]][t,] <- theta_up
names(results) <- model_names
model <- models[[14]]
prob_last <- prob[t-1,]
vars <- X
y_obs <- y
# select model
y_obs <- y_obs[t,]
X <- vars[t,model$vars]
theta_last <- theta[[model$name]][t-1,]
sigma_last <- diag(length(model$vars)) # !!!!!!
# predict for one model / one step ahead
theta_pred <- theta_last # F: identité
sigma_pred <- (1/lambda) * sigma_last
prob_pred <- prob_last^alpha / sum(prob_last)
y_pred <- X %*% theta_pred
# Update for one model / one step ahead
error <- y_obs - y_pred
S <- sigma_pred
xSx <- t(X) %*% S %*% X
R <- (1/t)*(error%*%t(error)) # !!!!
F_inv <- solve(R + xSx)
theta_up <- theta_pred + S %*% X %*% F_inv %*% (y_obs - X %*% theta_pred)
sigma_up <- S - S %*% X %*% F_inv %*% X %*% S
weight <- pnorm(q = y_obs, mean = X %*% theta_pred, sd = sqrt(R + xSx)) * prob_pred[model$name]
|
# Loading libraries
library(tidyverse)
library(lubridate)
library(ggplot2)
library(caret)
library(rattle)
library(fpc)
library(dbscan)
# Importing the data set
vivi03 <- read.csv("C:/Users/B037123/Documents/06 - Red Flag Siniestros Vivienda/SINIESTROS_VIVI_201811_201903_v2.txt", sep="|",header = T, encoding = "UTF-8")
glimpse(vivi03)
# Counting types of circuits
table(vivi03$CIRCUITO_LIQUIDACION_ID)
# Eliminating the 1 type 5 case
vivi03 <- vivi03 %>% filter(CIRCUITO_LIQUIDACION_ID <= 3)
# Circuito de Liquidación
# 1 = Fast Track
# 2 = Administrativo
# 3 = Liquidador
# 4 = Judicial
# 1) Creating Time Variables for analysis
# 2) Creating index between Cobertura and Damage estimated by Client
# 3) Percentage of previous Siniestros from Fast Track
vivi03 <- vivi03 %>%
mutate(MONTHS_SINIESTRO_FIRSTDATE = interval(dmy(PRIMER_POLIZA_FC), dmy(OCURRENCIA_FC)) %/% months(1),
MONTHS_SINIESTRO_DECLARACION = interval(dmy(DECLARACION_FC), dmy(OCURRENCIA_FC)) %/% months(1),
MONTHS_SINIESTRO_INICIO_POL = interval(dmy(VIGENCIA_CERTIFICADO_DESDE_FC), dmy(OCURRENCIA_FC)) %/% months(1),
MONTHS_SINIESTRO_FIN_POL = interval(dmy(VIGENCIA_CERTIFICADO_HASTA_FC), dmy(OCURRENCIA_FC)) %/% months(1),
INDEX_DECLARA_COBERTURA = round(ESTIMACION_DAÑO_CLIENTE_DE / SUMA_ASEGURADA_DE, 4),
PERC_SIN_1MONTH_FS = round(QTY_SINIESTRO_1MONTH_FS / QTY_SINIESTRO_1MONTH, 4),
PERC_SIN_3MONTH_FS = round(QTY_SINIESTRO_3MONTH_FS / QTY_SINIESTRO_3MONTH, 4),
PERC_SIN_6MONTH_FS = round(QTY_SINIESTRO_6MONTH_FS / QTY_SINIESTRO_6MONTH, 4)
) %>%
replace(is.na(.),0)
# Checking that the %'s make sense
vivi03 %>% group_by(SINIESTRO_ID) %>% filter(PERC_SIN_6MONTH_FS > 1) %>% summarise(n = n())
vivi03 %>% group_by(SINIESTRO_ID) %>% filter(PERC_SIN_3MONTH_FS > 1) %>% summarise(n = n())
# Eliminating what makes no sense: there is a duplicity for some Certificados in the query. Should be at Siniestro_Id level, not Certificado
vivi03 <- vivi03 %>% filter(PERC_SIN_6MONTH_FS <= 1)
vivi03 <- vivi03 %>% filter(PERC_SIN_3MONTH_FS <= 1)
#####################
# DISCOVERY
##################################
# Tasa de Aprobación por Circuito
vivi03 %>%
group_by(CIRCUITO_LIQUIDACION_ID, ESTADO_SINIESTRO_TX) %>%
summarise(n = n())
# Dado que muchos quedan abiertos, me quedo sólo con los Siniestros
# Rechazados y Pagados de Circuito 2 (Analista)
#########
# Comparación por fechas de inicio y fin de pólizas (con y sin renovación)
########
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 250000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.5) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 100000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=MONTHS_SINIESTRO_INICIO_POL, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.5) + guides(color=guide_legend(title="Circuito"))
with(vivi03,prop.table(table(CIRCUITO_LIQUIDACION_ID, MONTHS_SINIESTRO_INICIO_POL),1))
# 23% of Fast Track claims have less than 3 months since the Poliza renovó
with(vivi03[vivi03$MONTHS_SINIESTRO_FIRSTDATE < 37 & vivi03$MONTHS_SINIESTRO_FIRSTDATE > 11,],prop.table(table(CIRCUITO_LIQUIDACION_ID, MONTHS_SINIESTRO_INICIO_POL),1))
with(vivi03,prop.table(table(CIRCUITO_LIQUIDACION_ID, MONTHS_SINIESTRO_FIRSTDATE),1))
vivi03 %>%
filter(MONTHS_SINIESTRO_FIRSTDATE < 49, MONTHS_SINIESTRO_INICIO_POL <= 6) %>%
ggplot(aes(x=MONTHS_SINIESTRO_INICIO_POL,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.5, height = 1, width = 1) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(MONTHS_SINIESTRO_FIRSTDATE < 61) %>%
ggplot(aes(x=MONTHS_SINIESTRO_FIN_POL,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.5, height = 1, width = 1) + guides(color=guide_legend(title="Circuito"))
#########
# Comparación por sumas aseguradas, daños declarados
########
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 100000, SUMA_ASEGURADA_DE < 3000000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=SUMA_ASEGURADA_DE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.5) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(INDEX_DECLARA_COBERTURA < 5, ESTIMACION_DAÑO_CLIENTE_DE < 15000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=INDEX_DECLARA_COBERTURA, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.3) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 30000, SUMA_ASEGURADA_DE < 35000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=SUMA_ASEGURADA_DE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.3) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 30000, SUMA_ASEGURADA_DE < 35000) %>%
ggplot(aes(x=CIRCUITO_LIQUIDACION_ID,y=INDEX_DECLARA_COBERTURA, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_boxplot() + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(INDEX_DECLARA_COBERTURA < 3, ESTIMACION_DAÑO_CLIENTE_DE < 250000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=MONTHS_SINIESTRO_INICIO_POL, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point() + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(MONTHS_SINIESTRO_FIRSTDATE < 150) %>%
ggplot(aes(x=MONTHS_SINIESTRO_INICIO_POL,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.5, width = 1, height = 1) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(MONTHS_SINIESTRO_FIRSTDATE < 25) %>%
ggplot(aes(x=MONTHS_SINIESTRO_INICIO_POL,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(PRODUCTO_EMPLEADOS_FL))) +
geom_jitter(alpha=0.5, width = 1, height = 1) + guides(color=guide_legend(title="Empleado"))
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 55000, SUMA_ASEGURADA_DE < 150000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=SUMA_ASEGURADA_DE, color=as.factor(PRODUCTO_EMPLEADOS_FL))) +
geom_point(alpha=0.5) + guides(color=guide_legend(title="Empleado"))
vivi03 %>%
#filter(ESTIMACION_DAÑO_CLIENTE_DE < 55000, SUMA_ASEGURADA_DE < 150000) %>%
ggplot(aes(x=as.factor(PRODUCTO_EMPLEADOS_FL),y=MONTHS_SINIESTRO_INICIO_POL, color=as.factor(PRODUCTO_EMPLEADOS_FL))) +
geom_boxplot() + guides(color=guide_legend(title="Empleado")) + xlab("Empleado = S, No Empleado = N")
with(vivi03, prop.table(table(PRODUCTO_EMPLEADOS_FL,ESTADO_SINIESTRO_TX),1))
with(vivi03, prop.table(table(CIRCUITO_LIQUIDACION_ID,PRODUCTO_EMPLEADOS_FL),2))
with(vivi03, prop.table(table(CIRCUITO_LIQUIDACION_ID,ESTADO_SINIESTRO_TX,PRODUCTO_EMPLEADOS_FL),1))
vivi03 %>% group_by(PRODUCTO_EMPLEADOS_FL) %>% summarise(avg = mean(MONTHS_SINIESTRO_FIRSTDATE),
med = median(MONTHS_SINIESTRO_FIRSTDATE))
vivi03 %>% filter(CIRCUITO_LIQUIDACION_ID == 2) %>% group_by(PRODUCTO_EMPLEADOS_FL) %>% summarise(avg = mean(ESTIMACION_DAÑO_CLIENTE_DE),
med = median(ESTIMACION_DAÑO_CLIENTE_DE))
#########
# Comparación por Siniestros anteriores
########
siniestro <- vivi03 %>%
select(CIRCUITO_LIQUIDACION_ID,
ESTIMACION_DAÑO_CLIENTE_DE,
SUMA_ASEGURADA_DE,QTY_SINIESTRO_1MONTH_FS,
QTY_SINIESTRO_1MONTH,QTY_SINIESTRO_3MONTH,QTY_SINIESTRO_6MONTH,
QTY_SINIESTRO_3MONTH_FS, QTY_SINIESTRO_6MONTH_FS,
MONTHS_SINIESTRO_FIRSTDATE, MONTHS_SINIESTRO_DECLARACION,
MONTHS_SINIESTRO_INICIO_POL,MONTHS_SINIESTRO_FIN_POL,
INDEX_DECLARA_COBERTURA,PERC_SIN_1MONTH_FS,
PERC_SIN_3MONTH_FS,PERC_SIN_6MONTH_FS) %>%
replace(is.na(.),0)
# Siniestros anteriores
siniestro %>%
ggplot(aes(x=as.factor(CIRCUITO_LIQUIDACION_ID),y=QTY_SINIESTRO_1MONTH)) + geom_boxplot()
siniestro %>%
ggplot(aes(x=as.factor(CIRCUITO_LIQUIDACION_ID),y=QTY_SINIESTRO_3MONTH)) + geom_boxplot()
siniestro %>%
ggplot(aes(x=as.factor(CIRCUITO_LIQUIDACION_ID),y=QTY_SINIESTRO_6MONTH)) + geom_boxplot()
# Analizar porcentajes
# % Siniestros FS sobre total por Circuito
siniestro %>%
filter(INDEX_DECLARA_COBERTURA < 10) %>%
ggplot(aes(x = PERC_SIN_6MONTH_FS, y=INDEX_DECLARA_COBERTURA, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.4) + guides(color=guide_legend(title="Circuito"))
siniestro %>%
filter(INDEX_DECLARA_COBERTURA < 10) %>%
ggplot(aes(x = QTY_SINIESTRO_3MONTH, y=QTY_SINIESTRO_6MONTH, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.4) + guides(color=guide_legend(title="Circuito"))
siniestro %>%
filter(INDEX_DECLARA_COBERTURA < 10, ESTIMACION_DAÑO_CLIENTE_DE < 30000, SUMA_ASEGURADA_DE < 35000) %>%
ggplot(aes(x = QTY_SINIESTRO_6MONTH, y=PERC_SIN_6MONTH_FS, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.4) + guides(color=guide_legend(title="Circuito"))
####################################
###################################
# EXTRACT FAST TRACK RULES
viv_rules <- vivi03 %>%
filter(CIRCUITO_LIQUIDACION_ID < 3) %>%
select(CIRCUITO_LIQUIDACION_ID,
ESTIMACION_DAÑO_CLIENTE_DE,
SUMA_ASEGURADA_DE,
PRODUCTO_EMPLEADOS_FL,
QTY_SINIESTRO_1MONTH_FS,
QTY_SINIESTRO_1MONTH,QTY_SINIESTRO_3MONTH,QTY_SINIESTRO_6MONTH,
QTY_SINIESTRO_3MONTH_FS, QTY_SINIESTRO_6MONTH_FS,
MONTHS_SINIESTRO_FIRSTDATE, MONTHS_SINIESTRO_DECLARACION,
MONTHS_SINIESTRO_INICIO_POL,MONTHS_SINIESTRO_FIN_POL,
INDEX_DECLARA_COBERTURA,PERC_SIN_1MONTH_FS,
PERC_SIN_3MONTH_FS,PERC_SIN_6MONTH_FS) %>%
replace(is.na(.),0)
# Factoring the Objective
viv_rules$CIRCUITO_LIQUIDACION_ID <- as.factor(viv_rules$CIRCUITO_LIQUIDACION_ID)
# Setting random seed
set.seed(1956451)
# Training and testing (75-25)
inTrain <- createDataPartition(y = viv_rules$CIRCUITO_LIQUIDACION_ID, p = .75, list = FALSE)
train_fs <- viv_rules[inTrain,]
test_fs <- viv_rules[-inTrain,]
# No fit_control needed. We actually want to overfit to find the hidden rules
fit_control <- trainControl(method = "cv", number = 10)
rf_fit <- train(CIRCUITO_LIQUIDACION_ID ~ ESTIMACION_DAÑO_CLIENTE_DE +
PRODUCTO_EMPLEADOS_FL +
SUMA_ASEGURADA_DE +
QTY_SINIESTRO_3MONTH +
QTY_SINIESTRO_3MONTH_FS +
QTY_SINIESTRO_6MONTH +
MONTHS_SINIESTRO_DECLARACION +
MONTHS_SINIESTRO_FIRSTDATE +
MONTHS_SINIESTRO_INICIO_POL +
INDEX_DECLARA_COBERTURA,
data = train_fs, # trying to overfit...
trControl = fit_control,
method = "rpart2", maxdepth = 5)
# Predicting with the model and comparing
predict_fs <- predict(rf_fit, newdata = test_fs)
# 93.46% accuracy for FastTrack category (Sensitivity)
confusionMatrix(data = predict_fs, test_fs$CIRCUITO_LIQUIDACION_ID)
# Extracting the rules: Daño Declarado, Indice Declarado/Cobertura, Siniestros last 3 months
fancyRpartPlot(rf_fit$finalModel)
############################
# PCA
# Genero componentes principales sobre las variables más importantes
# Paso a 0 los Nulls
vivi_pca <- vivi03 %>%
select(SINIESTRO_ID,CIRCUITO_LIQUIDACION_ID,
QTY_SINIESTRO_3MONTH, QTY_SINIESTRO_6MONTH,
QTY_SINIESTRO_3MONTH_FS, QTY_SINIESTRO_6MONTH_FS,
MONTHS_SINIESTRO_FIRSTDATE,
MONTHS_SINIESTRO_INICIO_POL,MONTHS_SINIESTRO_FIN_POL,
PERC_SIN_3MONTH_FS,PERC_SIN_6MONTH_FS) %>%
replace(is.na(.),0)
# Checking everything is OK
glimpse(vivi_pca)
# Find correlations for PCA variables
pairs(vivi_pca[,7:9], col = as.factor(vivi_pca[,2]), upper.panel = NULL, pch = 16, cex = 0.5)
pairs(vivi_pca[,3:6], col = as.factor(vivi_pca[,2]), upper.panel = NULL, pch = 16, cex = 0.5)
# Principal Component Analysis
set.seed(110245)
pca_time <- prcomp(vivi_pca[,3:8], scale = T, center = T)
pca_sin <- prcomp(vivi_pca[,3:6], scale = T, center = T)
# Suma de variabilidades de las PCA's
summary(pca_time)
summary(pca_sin)
# Composición de variables de cada PCA
pca_time$rotation[,1:4]
# Valores del data set convertidos a PCA para primeras 2 PCA
vivi_pca2 <- cbind(vivi_pca[,1:2],pca_time$x[,1:2])
names(vivi_pca2) <- c("ID_Certificado","circuito","pca1","pca2")
# Grafico cada Siniestro por PCA y agrupo por Circuito
vivi_pca2 %>%
ggplot(aes(x=pca1, y=pca2, col=as.factor(circuito))) +
geom_jitter(alpha=0.4, height= 0.01, width = 0.01) + guides(color=guide_legend(title="Circuito"))
#############################
#
# Taking each Circuit separately for cumulative sum
cum_circuito_inicio <- data.frame(cbind(
seq(0,11,1),
cumsum(with(vivi03[vivi03$CIRCUITO_LIQUIDACION_ID == 1,],prop.table(table(MONTHS_SINIESTRO_INICIO_POL)))),
cumsum(with(vivi03[vivi03$CIRCUITO_LIQUIDACION_ID == 2,],prop.table(table(MONTHS_SINIESTRO_INICIO_POL)))),
cumsum(with(vivi03[vivi03$CIRCUITO_LIQUIDACION_ID == 3,],prop.table(table(MONTHS_SINIESTRO_INICIO_POL))))
))
cum_circuito_inicio[12,4] <- 1
names(cum_circuito_inicio) <- c("Time","Circuito_1","Circuito_2","Circuito_3")
# Plotting by Circuit the Cum% of Siniestros
# Fast Track goes for Siniestros longer from Init of current Poliza
ggplot(cum_circuito_inicio, aes(x=Time, y=Circuito_1)) +
geom_line(color="blue") +
geom_line(aes(y=Circuito_2), color = "red") +
geom_line(aes(y=Circuito_3), color = "darkgreen")
#######################
# Clustering
library(cluster)
set.seed(43278)
# Mutating the Employee Flag to numeric
vivi03 <- vivi03 %>%
mutate(MARCA_EMPLEADO = case_when(vivi03$PRODUCTO_EMPLEADOS_FL == "S" ~ 1.0,
TRUE ~ -1.0))
# Using PCA 1 and 2 + Monto + Index
vivi_final <- vivi03 %>%
select(SINIESTRO_ID, CIRCUITO_LIQUIDACION_ID,
MARCA_EMPLEADO,
ESTIMACION_DAÑO_CLIENTE_DE,INDEX_DECLARA_COBERTURA,
QTY_SINIESTRO_3MONTH,QTY_SINIESTRO_3MONTH_FS,
QTY_SINIESTRO_6MONTH,QTY_SINIESTRO_6MONTH_FS,
MONTHS_SINIESTRO_FIRSTDATE, MONTHS_SINIESTRO_INICIO_POL)
# cbind(vivi03[,c(1,5,6,7,12,15,17,19,21)],pca$x[,1])
# Determine the optimal amount of Clusters
library(factoextra)
fviz_nbclust(scale(vivi_final[,-c(1,2)]), kmeans, method = "wss") +
geom_vline(xintercept = 5, linetype = 2) +
labs(subtitle = "Elbow method")
# k = 7
fviz_nbclust(scale(vivi_final[,-c(1,2)]), kmeans, method = "silhouette") +
labs(subtitle = "Silhouette method")
# k = 2
fviz_nbclust(scale(vivi_final[,-c(1,2)]), kmeans, nstart = 25, method = "gap_stat", nboot = 50) +
labs(subtitle = "Gap statistic method")
# k = 9, but no convergence
# Now creating the clusters using Kmeans
vivi_kmeans <- kmeans(scale(vivi_final[,-c(1,2)]), 7)
vivi_kmeans$cluster
vivi_kmeans$centers
####################
# DBscan Clustering
set.seed(43278)
dbscan::kNNdistplot(scale(vivi_final[,-c(1,2)]), k=10)
abline(h = 2.5, lty = 2)
vivi_dbscan <- fpc::dbscan(scale(vivi_final[,-c(1,2)]), eps=4)
# eps = 1.5 => 18 clusters
# eps = 1.6 => 18 clusters
# eps = 1 => 33 clusters
# eps = 2 => 4 clusters
# eps = 1.7 => 16 clusters
# eps = 1.75 => 10 clusters
# eps = 1.77 => 10 clusters
# eps = 1.8 => 10 clusters
# eps = 1.85 => 5 clusters
# eps = 1.9 => 8 clusters
table(vivi_dbscan$cluster)
###########################################################
# Defining the new data set for clustering rule extraction
clusters <- data.frame(vivi_kmeans$cluster)
clusters_scan <- data.frame(vivi_dbscan$cluster)
other_data <- data.frame(vivi_final[,-c(1)])
#pca_data <- data.frame(vivi2[,c(3,4,5,6,7,8,9)])
vivi_tree_set <- data.frame(cbind(clusters, other_data))
vivi_scan_set <- data.frame(cbind(clusters_scan, other_data))
names(vivi_tree_set) <- c("cluster","circuito","flag_empleado",
"estimacion_daño","indice_cociente",
"qty_sin_3m","qty_sin_3m_fs",
"qty_sin_6m","qty_sin_6m_fs",
"months_first_date","months_inicio_pol")
names(vivi_scan_set) <- c("cluster","circuito","flag_empleado",
"estimacion_daño","indice_cociente",
"qty_sin_3m","qty_sin_3m_fs",
"qty_sin_6m","qty_sin_6m_fs",
"months_first_date","months_inicio_pol")
# Factoring the Objective
vivi_tree_set$cluster <- as.factor(vivi_tree_set$cluster)
vivi_tree_set$circuito <- as.factor(vivi_tree_set$circuito)
vivi_scan_set$cluster <- as.factor(vivi_scan_set$cluster)
vivi_scan_set$circuito <- as.factor(vivi_scan_set$circuito)
#####################################################
# Some Discovery of Clusters (before running a tree)
vivi_tree_set %>%
filter(cluster == 4, estimacion_daño < 100000) %>%
ggplot(aes(x=estimacion_daño, y=indice_cociente, color=circuito)) +
geom_jitter(alpha=0.45, width=0.5, height=0.5)
vivi_scan_set %>%
filter(cluster != 1, estimacion_daño < 100000, months_first_date < 37) %>%
ggplot(aes(x=months_first_date, y=months_inicio_pol, color=cluster)) +
geom_jitter(alpha=0.45, width=0.5, height=0.5)
# Circuit by cluster
prop.table(table(vivi_tree_set$cluster, vivi_tree_set$circuito),1)
prop.table(table(vivi_scan_set$cluster, vivi_scan_set$circuito),1)
discovery_scan <- vivi_scan_set %>%
group_by(cluster) %>%
summarise(avg_daño= mean(estimacion_daño),
avg_index= mean(indice_cociente),
avg_sin3= mean(qty_sin_3m),
avg_sin3_fs= mean(qty_sin_3m_fs),
avg_first_date= mean(months_first_date),
avg_inicio_pol= mean(months_inicio_pol),
avg_flag_empleado= mean(flag_empleado),
median_daño= median(estimacion_daño),
median_index= median(indice_cociente),
median_sin3= median(qty_sin_3m),
median_sin3_fs= median(qty_sin_3m_fs),
median_first_date= median(months_first_date),
median_inicio_pol= median(months_inicio_pol),
median_flag_empleado= median(flag_empleado)
) %>%
data.frame()
# Cluster 7 is mainly composed by FS (7% of all FS claims)
# PCA1 negativo
# Daño / Cobertura muy arriba de 1
# Sumas Aseguradas muy bajas
# varios claims previos por FS
# Cluster 3 and 4 are mainly composed by Administrativos, but around 10% of FS (check on that)
# Cluster 4:
# Young Polizas (o Inicio de Renovación o Nuevas)
# bajo indice daño/cobertura
# Cluster 6 is about big Claims (mostly "young" Polizas)
# Cluster 1 (27% FS)
# biggest Cluster
# more previous claims (6 months) than other clusters, many of them by fast track
# indice daño/cobertura alrededor de 1
# Cluster 5:
# pólizas cerca de renovación
# 75% daño/cobertura
# pocos claims anteriores por FS
# All Circuitos are distributed equally (more or less) between clusters
vivi_tree_set %>%
filter(indice_cociente < 15) %>%
ggplot(aes(x=cluster, y=indice_cociente)) +
geom_boxplot()
vivi_tree_set %>%
filter(qty_sin_3m != 0) %>%
ggplot(aes(x=cluster, y=(qty_sin_3m_fs / qty_sin_3m))) +
geom_boxplot()
##################################
# Extracting Rules of Clusters
# Setting random seed
set.seed(1956451)
# Training and testing (75-25)
inTrain <- createDataPartition(y = vivi_scan_set$cluster, p = .75, list = FALSE)
train_fs <- vivi_scan_set[inTrain,]
test_fs <- vivi_scan_set[-inTrain,]
# No fit_control needed. We actually want to overfit to find the hidden rules
fit_control <- trainControl(method = "cv", number = 10)
rf_fit <- train(cluster ~ estimacion_daño + indice_cociente +
qty_sin_3m + qty_sin_3m_fs +
months_first_date + months_inicio_pol,
data = train_fs,
#preProc = c("center", "scale"),
trControl = fit_control,
method = "rpart2", maxdepth = 6)
# Predicting with the model and comparing
predict_fs <- predict(rf_fit, newdata = test_fs)
# 93.46% accuracy for FastTrack category (Sensitivity)
confusionMatrix(data = predict_fs, test_fs$cluster)
# Extracting the rules: Daño Declarado, Indice Declarado/Cobertura, Siniestros last 3 months
fancyRpartPlot(rf_fit$finalModel)
############################
# Extracting Rules
# Setting random seed
set.seed(1956451)
# Training and testing (75-25)
inTrain <- createDataPartition(y = vivi1$CIRCUITO_LIQUIDACION_ID, p = .75, list = FALSE)
train_fs <- vivi1[inTrain,]
test_fs <- vivi1[-inTrain,]
# No fit_control needed. We actually want to overfit to find the hidden rules
#fit_control <- trainControl(method = "cv", number = 10)
rf_fit <- train(CIRCUITO_LIQUIDACION_ID ~ CANTIDAD_REAPERTURAS +
ESTIMACION_DAÑO_CLIENTE_DE +
QTY_SINIESTRO_3MONTH +
QTY_SINIESTRO_3MONTH_FS +
QTY_SINIESTRO_6MONTH +
MONTHS_SINIESTRO_DECLARACION +
MONTHS_SINIESTRO_INICIO_POL +
INDEX_DECLARA_COBERTURA,
data = vivi1, # trying to overfit...
method = "rpart2", maxdepth = 6)
# Predicting with the model and comparing
predict_fs <- predict(rf_fit, newdata = vivi1)
# 93.46% accuracy for FastTrack category (Sensitivity)
confusionMatrix(data = predict_fs, vivi1$CIRCUITO_LIQUIDACION_ID)
# Extracting the rules: Daño Declarado, Indice Declarado/Cobertura, Siniestros last 3 months
fancyRpartPlot(rf_fit$finalModel)
############################################
###########################################
# Entendiendo a los Analistas
admin <- vivi03 %>% filter(CIRCUITO_LIQUIDACION_ID == 2, ESTADO_SINIESTRO_TX == "Pagado" | ESTADO_SINIESTRO_TX == "Rechazado") %>% droplevels()
# Agrego las causas de siniestros
causas <- read.csv("CAUSAS_SINIESTROS_VIVIENDA.txt", quote="", row.names = NULL, sep="|", header = T, encoding = "ISO 8859-1")
admin2 <- inner_join(admin, causas, by="SINIESTRO_ID")
rechazos <- admin2 %>% filter(ESTADO_SINIESTRO_TX == "Rechazado")
prop.table(table(admin2$CAUSA_SINIESTRO_TX,
admin2$ESTADO_SINIESTRO_TX),1)
table(rechazos$MOTIVO_ESTADO_SINIESTRO_TX, rechazos$CAUSA_SINIESTRO_TX)
# Balance 75-25 overall
# MIN: Incendio 50-50
# MAX: Pérdida Frío 97.5-2.5
############
# Discovery
# Pérdida de Frío
admin2 %>%
filter(CAUSA_SINIESTRO_TX == "PERDIDA DE FRIO") %>%
ggplot(aes(x = QTY_SINIESTRO_6MONTH, y = INDEX_DECLARA_COBERTURA, color=as.factor(ESTADO_SINIESTRO_TX))) +
geom_jitter(alpha = 0.7, width = 0.5, height = 0.5) +
theme(legend.position = "none")
# Sólo 2 siniestros (sobre 82) rechazados, por falta de pago. No hay variables de comportamiento para el Rechazo
# Conclusión: pasar Pérdida de Frío a FS, con check sobre pago últimas X cuotas
# Daños y Roturas
admin2 %>%
filter(CAUSA_SINIESTRO_TX == "DAÑOS / ROTURAS") %>%
ggplot(aes(x = SUMA_ASEGURADA_DE, y = ESTIMACION_DAÑO_CLIENTE_DE, color=as.factor(ESTADO_SINIESTRO_TX))) +
geom_jitter(alpha = 0.7, width = 0.5, height = 0.5) +
theme(legend.position = "none")
# Eventos Climáticos
admin2 %>%
filter(CAUSA_SINIESTRO_TX == "EVENTOS CLIMATICOS", ESTIMACION_DAÑO_CLIENTE_DE < 750000) %>%
ggplot(aes(x = SUMA_ASEGURADA_DE, y = QTY_SINIESTRO_3MONTH, color=as.factor(ESTADO_SINIESTRO_TX))) +
geom_jitter(alpha = 0.5, width = 0.5, height = 0.5) +
theme(legend.position = "none")
############################
# Extracting Rules DAÑOS
danos <- admin2 %>%
filter(CAUSA_SINIESTRO_TX == "DAÑOS / ROTURAS") %>%
droplevels()
# Setting random seed
set.seed(1956451)
# Training and testing (75-25)
inTrain <- createDataPartition(y = danos$ESTADO_SINIESTRO_TX, p = 0.85, list = FALSE)
train <- danos[inTrain,]
test <- danos[-inTrain,]
# No fit_control needed. We actually want to overfit to find the hidden rules
fit_control <- trainControl(method = "cv", number = 10)
rf_fit <- train(ESTADO_SINIESTRO_TX ~ ESTIMACION_DAÑO_CLIENTE_DE +
QTY_SINIESTRO_3MONTH +
MONTHS_SINIESTRO_FIRSTDATE +
MONTHS_SINIESTRO_INICIO_POL +
PRODUCTO_EMPLEADOS_FL +
SUMA_ASEGURADA_DE,
data = train, # trying to overfit...
method = "rpart2", maxdepth = 3,
trControl = fit_control)
# Predicting with the model and comparing
predict <- predict(rf_fit, newdata = test)
# 97.14% accuracy for Siniestros Pagados (vs 80% en muestra)
confusionMatrix(data = predict, test$ESTADO_SINIESTRO_TX)
# Extracting the rules: Relación entre Daño Estimado vs Suma Asegurada
fancyRpartPlot(rf_fit$finalModel)
|
/Discovering_Fraud.R
|
no_license
|
gonzalofichero/G_in_Zurich
|
R
| false | false | 25,755 |
r
|
# Loading libraries
library(tidyverse)
library(lubridate)
library(ggplot2)
library(caret)
library(rattle)
library(fpc)
library(dbscan)
# Importing the data set
vivi03 <- read.csv("C:/Users/B037123/Documents/06 - Red Flag Siniestros Vivienda/SINIESTROS_VIVI_201811_201903_v2.txt", sep="|",header = T, encoding = "UTF-8")
glimpse(vivi03)
# Counting types of circuits
table(vivi03$CIRCUITO_LIQUIDACION_ID)
# Eliminating the 1 type 5 case
vivi03 <- vivi03 %>% filter(CIRCUITO_LIQUIDACION_ID <= 3)
# Circuito de Liquidación
# 1 = Fast Track
# 2 = Administrativo
# 3 = Liquidador
# 4 = Judicial
# 1) Creating Time Variables for analysis
# 2) Creating index between Cobertura and Damage estimated by Client
# 3) Percentage of previous Siniestros from Fast Track
vivi03 <- vivi03 %>%
mutate(MONTHS_SINIESTRO_FIRSTDATE = interval(dmy(PRIMER_POLIZA_FC), dmy(OCURRENCIA_FC)) %/% months(1),
MONTHS_SINIESTRO_DECLARACION = interval(dmy(DECLARACION_FC), dmy(OCURRENCIA_FC)) %/% months(1),
MONTHS_SINIESTRO_INICIO_POL = interval(dmy(VIGENCIA_CERTIFICADO_DESDE_FC), dmy(OCURRENCIA_FC)) %/% months(1),
MONTHS_SINIESTRO_FIN_POL = interval(dmy(VIGENCIA_CERTIFICADO_HASTA_FC), dmy(OCURRENCIA_FC)) %/% months(1),
INDEX_DECLARA_COBERTURA = round(ESTIMACION_DAÑO_CLIENTE_DE / SUMA_ASEGURADA_DE, 4),
PERC_SIN_1MONTH_FS = round(QTY_SINIESTRO_1MONTH_FS / QTY_SINIESTRO_1MONTH, 4),
PERC_SIN_3MONTH_FS = round(QTY_SINIESTRO_3MONTH_FS / QTY_SINIESTRO_3MONTH, 4),
PERC_SIN_6MONTH_FS = round(QTY_SINIESTRO_6MONTH_FS / QTY_SINIESTRO_6MONTH, 4)
) %>%
replace(is.na(.),0)
# Checking that the %'s make sense
vivi03 %>% group_by(SINIESTRO_ID) %>% filter(PERC_SIN_6MONTH_FS > 1) %>% summarise(n = n())
vivi03 %>% group_by(SINIESTRO_ID) %>% filter(PERC_SIN_3MONTH_FS > 1) %>% summarise(n = n())
# Eliminating what makes no sense: there is a duplicity for some Certificados in the query. Should be at Siniestro_Id level, not Certificado
vivi03 <- vivi03 %>% filter(PERC_SIN_6MONTH_FS <= 1)
vivi03 <- vivi03 %>% filter(PERC_SIN_3MONTH_FS <= 1)
#####################
# DISCOVERY
##################################
# Tasa de Aprobación por Circuito
vivi03 %>%
group_by(CIRCUITO_LIQUIDACION_ID, ESTADO_SINIESTRO_TX) %>%
summarise(n = n())
# Dado que muchos quedan abiertos, me quedo sólo con los Siniestros
# Rechazados y Pagados de Circuito 2 (Analista)
#########
# Comparación por fechas de inicio y fin de pólizas (con y sin renovación)
########
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 250000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.5) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 100000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=MONTHS_SINIESTRO_INICIO_POL, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.5) + guides(color=guide_legend(title="Circuito"))
with(vivi03,prop.table(table(CIRCUITO_LIQUIDACION_ID, MONTHS_SINIESTRO_INICIO_POL),1))
# 23% of Fast Track claims have less than 3 months since the Poliza renovó
with(vivi03[vivi03$MONTHS_SINIESTRO_FIRSTDATE < 37 & vivi03$MONTHS_SINIESTRO_FIRSTDATE > 11,],prop.table(table(CIRCUITO_LIQUIDACION_ID, MONTHS_SINIESTRO_INICIO_POL),1))
with(vivi03,prop.table(table(CIRCUITO_LIQUIDACION_ID, MONTHS_SINIESTRO_FIRSTDATE),1))
vivi03 %>%
filter(MONTHS_SINIESTRO_FIRSTDATE < 49, MONTHS_SINIESTRO_INICIO_POL <= 6) %>%
ggplot(aes(x=MONTHS_SINIESTRO_INICIO_POL,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.5, height = 1, width = 1) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(MONTHS_SINIESTRO_FIRSTDATE < 61) %>%
ggplot(aes(x=MONTHS_SINIESTRO_FIN_POL,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.5, height = 1, width = 1) + guides(color=guide_legend(title="Circuito"))
#########
# Comparación por sumas aseguradas, daños declarados
########
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 100000, SUMA_ASEGURADA_DE < 3000000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=SUMA_ASEGURADA_DE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.5) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(INDEX_DECLARA_COBERTURA < 5, ESTIMACION_DAÑO_CLIENTE_DE < 15000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=INDEX_DECLARA_COBERTURA, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.3) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 30000, SUMA_ASEGURADA_DE < 35000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=SUMA_ASEGURADA_DE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point(alpha=0.3) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 30000, SUMA_ASEGURADA_DE < 35000) %>%
ggplot(aes(x=CIRCUITO_LIQUIDACION_ID,y=INDEX_DECLARA_COBERTURA, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_boxplot() + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(INDEX_DECLARA_COBERTURA < 3, ESTIMACION_DAÑO_CLIENTE_DE < 250000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=MONTHS_SINIESTRO_INICIO_POL, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_point() + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(MONTHS_SINIESTRO_FIRSTDATE < 150) %>%
ggplot(aes(x=MONTHS_SINIESTRO_INICIO_POL,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.5, width = 1, height = 1) + guides(color=guide_legend(title="Circuito"))
vivi03 %>%
filter(MONTHS_SINIESTRO_FIRSTDATE < 25) %>%
ggplot(aes(x=MONTHS_SINIESTRO_INICIO_POL,y=MONTHS_SINIESTRO_FIRSTDATE, color=as.factor(PRODUCTO_EMPLEADOS_FL))) +
geom_jitter(alpha=0.5, width = 1, height = 1) + guides(color=guide_legend(title="Empleado"))
vivi03 %>%
filter(ESTIMACION_DAÑO_CLIENTE_DE < 55000, SUMA_ASEGURADA_DE < 150000) %>%
ggplot(aes(x=ESTIMACION_DAÑO_CLIENTE_DE,y=SUMA_ASEGURADA_DE, color=as.factor(PRODUCTO_EMPLEADOS_FL))) +
geom_point(alpha=0.5) + guides(color=guide_legend(title="Empleado"))
vivi03 %>%
#filter(ESTIMACION_DAÑO_CLIENTE_DE < 55000, SUMA_ASEGURADA_DE < 150000) %>%
ggplot(aes(x=as.factor(PRODUCTO_EMPLEADOS_FL),y=MONTHS_SINIESTRO_INICIO_POL, color=as.factor(PRODUCTO_EMPLEADOS_FL))) +
geom_boxplot() + guides(color=guide_legend(title="Empleado")) + xlab("Empleado = S, No Empleado = N")
with(vivi03, prop.table(table(PRODUCTO_EMPLEADOS_FL,ESTADO_SINIESTRO_TX),1))
with(vivi03, prop.table(table(CIRCUITO_LIQUIDACION_ID,PRODUCTO_EMPLEADOS_FL),2))
with(vivi03, prop.table(table(CIRCUITO_LIQUIDACION_ID,ESTADO_SINIESTRO_TX,PRODUCTO_EMPLEADOS_FL),1))
vivi03 %>% group_by(PRODUCTO_EMPLEADOS_FL) %>% summarise(avg = mean(MONTHS_SINIESTRO_FIRSTDATE),
med = median(MONTHS_SINIESTRO_FIRSTDATE))
vivi03 %>% filter(CIRCUITO_LIQUIDACION_ID == 2) %>% group_by(PRODUCTO_EMPLEADOS_FL) %>% summarise(avg = mean(ESTIMACION_DAÑO_CLIENTE_DE),
med = median(ESTIMACION_DAÑO_CLIENTE_DE))
#########
# Comparación por Siniestros anteriores
########
siniestro <- vivi03 %>%
select(CIRCUITO_LIQUIDACION_ID,
ESTIMACION_DAÑO_CLIENTE_DE,
SUMA_ASEGURADA_DE,QTY_SINIESTRO_1MONTH_FS,
QTY_SINIESTRO_1MONTH,QTY_SINIESTRO_3MONTH,QTY_SINIESTRO_6MONTH,
QTY_SINIESTRO_3MONTH_FS, QTY_SINIESTRO_6MONTH_FS,
MONTHS_SINIESTRO_FIRSTDATE, MONTHS_SINIESTRO_DECLARACION,
MONTHS_SINIESTRO_INICIO_POL,MONTHS_SINIESTRO_FIN_POL,
INDEX_DECLARA_COBERTURA,PERC_SIN_1MONTH_FS,
PERC_SIN_3MONTH_FS,PERC_SIN_6MONTH_FS) %>%
replace(is.na(.),0)
# Siniestros anteriores
siniestro %>%
ggplot(aes(x=as.factor(CIRCUITO_LIQUIDACION_ID),y=QTY_SINIESTRO_1MONTH)) + geom_boxplot()
siniestro %>%
ggplot(aes(x=as.factor(CIRCUITO_LIQUIDACION_ID),y=QTY_SINIESTRO_3MONTH)) + geom_boxplot()
siniestro %>%
ggplot(aes(x=as.factor(CIRCUITO_LIQUIDACION_ID),y=QTY_SINIESTRO_6MONTH)) + geom_boxplot()
# Analizar porcentajes
# % Siniestros FS sobre total por Circuito
siniestro %>%
filter(INDEX_DECLARA_COBERTURA < 10) %>%
ggplot(aes(x = PERC_SIN_6MONTH_FS, y=INDEX_DECLARA_COBERTURA, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.4) + guides(color=guide_legend(title="Circuito"))
siniestro %>%
filter(INDEX_DECLARA_COBERTURA < 10) %>%
ggplot(aes(x = QTY_SINIESTRO_3MONTH, y=QTY_SINIESTRO_6MONTH, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.4) + guides(color=guide_legend(title="Circuito"))
siniestro %>%
filter(INDEX_DECLARA_COBERTURA < 10, ESTIMACION_DAÑO_CLIENTE_DE < 30000, SUMA_ASEGURADA_DE < 35000) %>%
ggplot(aes(x = QTY_SINIESTRO_6MONTH, y=PERC_SIN_6MONTH_FS, color=as.factor(CIRCUITO_LIQUIDACION_ID))) +
geom_jitter(alpha=0.4) + guides(color=guide_legend(title="Circuito"))
####################################
###################################
# EXTRACT FAST TRACK RULES
viv_rules <- vivi03 %>%
filter(CIRCUITO_LIQUIDACION_ID < 3) %>%
select(CIRCUITO_LIQUIDACION_ID,
ESTIMACION_DAÑO_CLIENTE_DE,
SUMA_ASEGURADA_DE,
PRODUCTO_EMPLEADOS_FL,
QTY_SINIESTRO_1MONTH_FS,
QTY_SINIESTRO_1MONTH,QTY_SINIESTRO_3MONTH,QTY_SINIESTRO_6MONTH,
QTY_SINIESTRO_3MONTH_FS, QTY_SINIESTRO_6MONTH_FS,
MONTHS_SINIESTRO_FIRSTDATE, MONTHS_SINIESTRO_DECLARACION,
MONTHS_SINIESTRO_INICIO_POL,MONTHS_SINIESTRO_FIN_POL,
INDEX_DECLARA_COBERTURA,PERC_SIN_1MONTH_FS,
PERC_SIN_3MONTH_FS,PERC_SIN_6MONTH_FS) %>%
replace(is.na(.),0)
# Factoring the Objective
viv_rules$CIRCUITO_LIQUIDACION_ID <- as.factor(viv_rules$CIRCUITO_LIQUIDACION_ID)
# Setting random seed
set.seed(1956451)
# Training and testing (75-25)
inTrain <- createDataPartition(y = viv_rules$CIRCUITO_LIQUIDACION_ID, p = .75, list = FALSE)
train_fs <- viv_rules[inTrain,]
test_fs <- viv_rules[-inTrain,]
# No fit_control needed. We actually want to overfit to find the hidden rules
fit_control <- trainControl(method = "cv", number = 10)
rf_fit <- train(CIRCUITO_LIQUIDACION_ID ~ ESTIMACION_DAÑO_CLIENTE_DE +
PRODUCTO_EMPLEADOS_FL +
SUMA_ASEGURADA_DE +
QTY_SINIESTRO_3MONTH +
QTY_SINIESTRO_3MONTH_FS +
QTY_SINIESTRO_6MONTH +
MONTHS_SINIESTRO_DECLARACION +
MONTHS_SINIESTRO_FIRSTDATE +
MONTHS_SINIESTRO_INICIO_POL +
INDEX_DECLARA_COBERTURA,
data = train_fs, # trying to overfit...
trControl = fit_control,
method = "rpart2", maxdepth = 5)
# Predicting with the model and comparing
predict_fs <- predict(rf_fit, newdata = test_fs)
# 93.46% accuracy for FastTrack category (Sensitivity)
confusionMatrix(data = predict_fs, test_fs$CIRCUITO_LIQUIDACION_ID)
# Extracting the rules: Daño Declarado, Indice Declarado/Cobertura, Siniestros last 3 months
fancyRpartPlot(rf_fit$finalModel)
############################
# PCA
# Genero componentes principales sobre las variables más importantes
# Paso a 0 los Nulls
vivi_pca <- vivi03 %>%
select(SINIESTRO_ID,CIRCUITO_LIQUIDACION_ID,
QTY_SINIESTRO_3MONTH, QTY_SINIESTRO_6MONTH,
QTY_SINIESTRO_3MONTH_FS, QTY_SINIESTRO_6MONTH_FS,
MONTHS_SINIESTRO_FIRSTDATE,
MONTHS_SINIESTRO_INICIO_POL,MONTHS_SINIESTRO_FIN_POL,
PERC_SIN_3MONTH_FS,PERC_SIN_6MONTH_FS) %>%
replace(is.na(.),0)
# Checking everything is OK
glimpse(vivi_pca)
# Find correlations for PCA variables
pairs(vivi_pca[,7:9], col = as.factor(vivi_pca[,2]), upper.panel = NULL, pch = 16, cex = 0.5)
pairs(vivi_pca[,3:6], col = as.factor(vivi_pca[,2]), upper.panel = NULL, pch = 16, cex = 0.5)
# Principal Component Analysis
set.seed(110245)
pca_time <- prcomp(vivi_pca[,3:8], scale = T, center = T)
pca_sin <- prcomp(vivi_pca[,3:6], scale = T, center = T)
# Suma de variabilidades de las PCA's
summary(pca_time)
summary(pca_sin)
# Composición de variables de cada PCA
pca_time$rotation[,1:4]
# Valores del data set convertidos a PCA para primeras 2 PCA
vivi_pca2 <- cbind(vivi_pca[,1:2],pca_time$x[,1:2])
names(vivi_pca2) <- c("ID_Certificado","circuito","pca1","pca2")
# Grafico cada Siniestro por PCA y agrupo por Circuito
vivi_pca2 %>%
ggplot(aes(x=pca1, y=pca2, col=as.factor(circuito))) +
geom_jitter(alpha=0.4, height= 0.01, width = 0.01) + guides(color=guide_legend(title="Circuito"))
#############################
#
# Taking each Circuit separately for cumulative sum
cum_circuito_inicio <- data.frame(cbind(
seq(0,11,1),
cumsum(with(vivi03[vivi03$CIRCUITO_LIQUIDACION_ID == 1,],prop.table(table(MONTHS_SINIESTRO_INICIO_POL)))),
cumsum(with(vivi03[vivi03$CIRCUITO_LIQUIDACION_ID == 2,],prop.table(table(MONTHS_SINIESTRO_INICIO_POL)))),
cumsum(with(vivi03[vivi03$CIRCUITO_LIQUIDACION_ID == 3,],prop.table(table(MONTHS_SINIESTRO_INICIO_POL))))
))
cum_circuito_inicio[12,4] <- 1
names(cum_circuito_inicio) <- c("Time","Circuito_1","Circuito_2","Circuito_3")
# Plotting by Circuit the Cum% of Siniestros
# Fast Track goes for Siniestros longer from Init of current Poliza
ggplot(cum_circuito_inicio, aes(x=Time, y=Circuito_1)) +
geom_line(color="blue") +
geom_line(aes(y=Circuito_2), color = "red") +
geom_line(aes(y=Circuito_3), color = "darkgreen")
#######################
# Clustering
library(cluster)
set.seed(43278)
# Mutating the Employee Flag to numeric
vivi03 <- vivi03 %>%
mutate(MARCA_EMPLEADO = case_when(vivi03$PRODUCTO_EMPLEADOS_FL == "S" ~ 1.0,
TRUE ~ -1.0))
# Using PCA 1 and 2 + Monto + Index
vivi_final <- vivi03 %>%
select(SINIESTRO_ID, CIRCUITO_LIQUIDACION_ID,
MARCA_EMPLEADO,
ESTIMACION_DAÑO_CLIENTE_DE,INDEX_DECLARA_COBERTURA,
QTY_SINIESTRO_3MONTH,QTY_SINIESTRO_3MONTH_FS,
QTY_SINIESTRO_6MONTH,QTY_SINIESTRO_6MONTH_FS,
MONTHS_SINIESTRO_FIRSTDATE, MONTHS_SINIESTRO_INICIO_POL)
# cbind(vivi03[,c(1,5,6,7,12,15,17,19,21)],pca$x[,1])
# Determine the optimal amount of Clusters
library(factoextra)
fviz_nbclust(scale(vivi_final[,-c(1,2)]), kmeans, method = "wss") +
geom_vline(xintercept = 5, linetype = 2) +
labs(subtitle = "Elbow method")
# k = 7
fviz_nbclust(scale(vivi_final[,-c(1,2)]), kmeans, method = "silhouette") +
labs(subtitle = "Silhouette method")
# k = 2
fviz_nbclust(scale(vivi_final[,-c(1,2)]), kmeans, nstart = 25, method = "gap_stat", nboot = 50) +
labs(subtitle = "Gap statistic method")
# k = 9, but no convergence
# Now creating the clusters using Kmeans
vivi_kmeans <- kmeans(scale(vivi_final[,-c(1,2)]), 7)
vivi_kmeans$cluster
vivi_kmeans$centers
####################
# DBscan Clustering
set.seed(43278)
dbscan::kNNdistplot(scale(vivi_final[,-c(1,2)]), k=10)
abline(h = 2.5, lty = 2)
vivi_dbscan <- fpc::dbscan(scale(vivi_final[,-c(1,2)]), eps=4)
# eps = 1.5 => 18 clusters
# eps = 1.6 => 18 clusters
# eps = 1 => 33 clusters
# eps = 2 => 4 clusters
# eps = 1.7 => 16 clusters
# eps = 1.75 => 10 clusters
# eps = 1.77 => 10 clusters
# eps = 1.8 => 10 clusters
# eps = 1.85 => 5 clusters
# eps = 1.9 => 8 clusters
table(vivi_dbscan$cluster)
###########################################################
# Defining the new data set for clustering rule extraction
clusters <- data.frame(vivi_kmeans$cluster)
clusters_scan <- data.frame(vivi_dbscan$cluster)
other_data <- data.frame(vivi_final[,-c(1)])
#pca_data <- data.frame(vivi2[,c(3,4,5,6,7,8,9)])
vivi_tree_set <- data.frame(cbind(clusters, other_data))
vivi_scan_set <- data.frame(cbind(clusters_scan, other_data))
names(vivi_tree_set) <- c("cluster","circuito","flag_empleado",
"estimacion_daño","indice_cociente",
"qty_sin_3m","qty_sin_3m_fs",
"qty_sin_6m","qty_sin_6m_fs",
"months_first_date","months_inicio_pol")
names(vivi_scan_set) <- c("cluster","circuito","flag_empleado",
"estimacion_daño","indice_cociente",
"qty_sin_3m","qty_sin_3m_fs",
"qty_sin_6m","qty_sin_6m_fs",
"months_first_date","months_inicio_pol")
# Factoring the Objective
vivi_tree_set$cluster <- as.factor(vivi_tree_set$cluster)
vivi_tree_set$circuito <- as.factor(vivi_tree_set$circuito)
vivi_scan_set$cluster <- as.factor(vivi_scan_set$cluster)
vivi_scan_set$circuito <- as.factor(vivi_scan_set$circuito)
#####################################################
# Some Discovery of Clusters (before running a tree)
vivi_tree_set %>%
filter(cluster == 4, estimacion_daño < 100000) %>%
ggplot(aes(x=estimacion_daño, y=indice_cociente, color=circuito)) +
geom_jitter(alpha=0.45, width=0.5, height=0.5)
vivi_scan_set %>%
filter(cluster != 1, estimacion_daño < 100000, months_first_date < 37) %>%
ggplot(aes(x=months_first_date, y=months_inicio_pol, color=cluster)) +
geom_jitter(alpha=0.45, width=0.5, height=0.5)
# Circuit by cluster
prop.table(table(vivi_tree_set$cluster, vivi_tree_set$circuito),1)
prop.table(table(vivi_scan_set$cluster, vivi_scan_set$circuito),1)
discovery_scan <- vivi_scan_set %>%
group_by(cluster) %>%
summarise(avg_daño= mean(estimacion_daño),
avg_index= mean(indice_cociente),
avg_sin3= mean(qty_sin_3m),
avg_sin3_fs= mean(qty_sin_3m_fs),
avg_first_date= mean(months_first_date),
avg_inicio_pol= mean(months_inicio_pol),
avg_flag_empleado= mean(flag_empleado),
median_daño= median(estimacion_daño),
median_index= median(indice_cociente),
median_sin3= median(qty_sin_3m),
median_sin3_fs= median(qty_sin_3m_fs),
median_first_date= median(months_first_date),
median_inicio_pol= median(months_inicio_pol),
median_flag_empleado= median(flag_empleado)
) %>%
data.frame()
# Cluster 7 is mainly composed by FS (7% of all FS claims)
# PCA1 negativo
# Daño / Cobertura muy arriba de 1
# Sumas Aseguradas muy bajas
# varios claims previos por FS
# Cluster 3 and 4 are mainly composed by Administrativos, but around 10% of FS (check on that)
# Cluster 4:
# Young Polizas (o Inicio de Renovación o Nuevas)
# bajo indice daño/cobertura
# Cluster 6 is about big Claims (mostly "young" Polizas)
# Cluster 1 (27% FS)
# biggest Cluster
# more previous claims (6 months) than other clusters, many of them by fast track
# indice daño/cobertura alrededor de 1
# Cluster 5:
# pólizas cerca de renovación
# 75% daño/cobertura
# pocos claims anteriores por FS
# All Circuitos are distributed equally (more or less) between clusters
vivi_tree_set %>%
filter(indice_cociente < 15) %>%
ggplot(aes(x=cluster, y=indice_cociente)) +
geom_boxplot()
vivi_tree_set %>%
filter(qty_sin_3m != 0) %>%
ggplot(aes(x=cluster, y=(qty_sin_3m_fs / qty_sin_3m))) +
geom_boxplot()
##################################
# Extracting Rules of Clusters
# Setting random seed
set.seed(1956451)
# Training and testing (75-25)
inTrain <- createDataPartition(y = vivi_scan_set$cluster, p = .75, list = FALSE)
train_fs <- vivi_scan_set[inTrain,]
test_fs <- vivi_scan_set[-inTrain,]
# No fit_control needed. We actually want to overfit to find the hidden rules
fit_control <- trainControl(method = "cv", number = 10)
rf_fit <- train(cluster ~ estimacion_daño + indice_cociente +
qty_sin_3m + qty_sin_3m_fs +
months_first_date + months_inicio_pol,
data = train_fs,
#preProc = c("center", "scale"),
trControl = fit_control,
method = "rpart2", maxdepth = 6)
# Predicting with the model and comparing
predict_fs <- predict(rf_fit, newdata = test_fs)
# 93.46% accuracy for FastTrack category (Sensitivity)
confusionMatrix(data = predict_fs, test_fs$cluster)
# Extracting the rules: Daño Declarado, Indice Declarado/Cobertura, Siniestros last 3 months
fancyRpartPlot(rf_fit$finalModel)
############################
# Extracting Rules
# Setting random seed
set.seed(1956451)
# Training and testing (75-25)
inTrain <- createDataPartition(y = vivi1$CIRCUITO_LIQUIDACION_ID, p = .75, list = FALSE)
train_fs <- vivi1[inTrain,]
test_fs <- vivi1[-inTrain,]
# No fit_control needed. We actually want to overfit to find the hidden rules
#fit_control <- trainControl(method = "cv", number = 10)
rf_fit <- train(CIRCUITO_LIQUIDACION_ID ~ CANTIDAD_REAPERTURAS +
ESTIMACION_DAÑO_CLIENTE_DE +
QTY_SINIESTRO_3MONTH +
QTY_SINIESTRO_3MONTH_FS +
QTY_SINIESTRO_6MONTH +
MONTHS_SINIESTRO_DECLARACION +
MONTHS_SINIESTRO_INICIO_POL +
INDEX_DECLARA_COBERTURA,
data = vivi1, # trying to overfit...
method = "rpart2", maxdepth = 6)
# Predicting with the model and comparing
predict_fs <- predict(rf_fit, newdata = vivi1)
# 93.46% accuracy for FastTrack category (Sensitivity)
confusionMatrix(data = predict_fs, vivi1$CIRCUITO_LIQUIDACION_ID)
# Extracting the rules: Daño Declarado, Indice Declarado/Cobertura, Siniestros last 3 months
fancyRpartPlot(rf_fit$finalModel)
############################################
###########################################
# Entendiendo a los Analistas
admin <- vivi03 %>% filter(CIRCUITO_LIQUIDACION_ID == 2, ESTADO_SINIESTRO_TX == "Pagado" | ESTADO_SINIESTRO_TX == "Rechazado") %>% droplevels()
# Agrego las causas de siniestros
causas <- read.csv("CAUSAS_SINIESTROS_VIVIENDA.txt", quote="", row.names = NULL, sep="|", header = T, encoding = "ISO 8859-1")
admin2 <- inner_join(admin, causas, by="SINIESTRO_ID")
rechazos <- admin2 %>% filter(ESTADO_SINIESTRO_TX == "Rechazado")
prop.table(table(admin2$CAUSA_SINIESTRO_TX,
admin2$ESTADO_SINIESTRO_TX),1)
table(rechazos$MOTIVO_ESTADO_SINIESTRO_TX, rechazos$CAUSA_SINIESTRO_TX)
# Balance 75-25 overall
# MIN: Incendio 50-50
# MAX: Pérdida Frío 97.5-2.5
############
# Discovery
# Pérdida de Frío
admin2 %>%
filter(CAUSA_SINIESTRO_TX == "PERDIDA DE FRIO") %>%
ggplot(aes(x = QTY_SINIESTRO_6MONTH, y = INDEX_DECLARA_COBERTURA, color=as.factor(ESTADO_SINIESTRO_TX))) +
geom_jitter(alpha = 0.7, width = 0.5, height = 0.5) +
theme(legend.position = "none")
# Sólo 2 siniestros (sobre 82) rechazados, por falta de pago. No hay variables de comportamiento para el Rechazo
# Conclusión: pasar Pérdida de Frío a FS, con check sobre pago últimas X cuotas
# Daños y Roturas
admin2 %>%
filter(CAUSA_SINIESTRO_TX == "DAÑOS / ROTURAS") %>%
ggplot(aes(x = SUMA_ASEGURADA_DE, y = ESTIMACION_DAÑO_CLIENTE_DE, color=as.factor(ESTADO_SINIESTRO_TX))) +
geom_jitter(alpha = 0.7, width = 0.5, height = 0.5) +
theme(legend.position = "none")
# Eventos Climáticos
admin2 %>%
filter(CAUSA_SINIESTRO_TX == "EVENTOS CLIMATICOS", ESTIMACION_DAÑO_CLIENTE_DE < 750000) %>%
ggplot(aes(x = SUMA_ASEGURADA_DE, y = QTY_SINIESTRO_3MONTH, color=as.factor(ESTADO_SINIESTRO_TX))) +
geom_jitter(alpha = 0.5, width = 0.5, height = 0.5) +
theme(legend.position = "none")
############################
# Extracting Rules DAÑOS
danos <- admin2 %>%
filter(CAUSA_SINIESTRO_TX == "DAÑOS / ROTURAS") %>%
droplevels()
# Setting random seed
set.seed(1956451)
# Training and testing (75-25)
inTrain <- createDataPartition(y = danos$ESTADO_SINIESTRO_TX, p = 0.85, list = FALSE)
train <- danos[inTrain,]
test <- danos[-inTrain,]
# No fit_control needed. We actually want to overfit to find the hidden rules
fit_control <- trainControl(method = "cv", number = 10)
rf_fit <- train(ESTADO_SINIESTRO_TX ~ ESTIMACION_DAÑO_CLIENTE_DE +
QTY_SINIESTRO_3MONTH +
MONTHS_SINIESTRO_FIRSTDATE +
MONTHS_SINIESTRO_INICIO_POL +
PRODUCTO_EMPLEADOS_FL +
SUMA_ASEGURADA_DE,
data = train, # trying to overfit...
method = "rpart2", maxdepth = 3,
trControl = fit_control)
# Predicting with the model and comparing
predict <- predict(rf_fit, newdata = test)
# 97.14% accuracy for Siniestros Pagados (vs 80% en muestra)
confusionMatrix(data = predict, test$ESTADO_SINIESTRO_TX)
# Extracting the rules: Relación entre Daño Estimado vs Suma Asegurada
fancyRpartPlot(rf_fit$finalModel)
|
### Analisis-COVID-MP2.5
## Funciones para generar Figuras/Tablas de un modelo Transversal ajustado
## PBH Julio 2020
foot_note <- "Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1"
f_tableCoef <- function(model, preview="none", highlight=F){
# est <- cbind(est=coef(mod), confint(mod))
est <- summary(model)$coefficients[,1:4] %>% as.data.frame() %>%
as_tibble(rownames = "parametro")
names(est) <- c("parametro","coef","sd","z_value","p_value")
## Add codes
est <- est %>% mutate(codes=case_when(
p_value<0.001 ~ "***",
p_value<0.01 ~ "**",
p_value<0.05 ~ "*",
p_value<0.1 ~ ".",
T ~ ""))
## Tabla coeficientes
table <- est %>%
mutate(parametro=parametro %>%
str_remove_all("scale|\\(|\\)|log") %>%
f_replaceVar() %>%
str_replace("rmRM","Comuna dentro RM")) %>%
rename(Parametro=parametro, `Coef.`=coef, `Desv.`=sd,
`Valor-z`=z_value,`Valor-p`=p_value,`Sign.`=codes) %>%
flextable() %>%
colformat_num(big.mark=" ", digits=4, j=2:5,
na_str="s/i") %>%
bold(bold=T, part="header") %>%
autofit(add_w = 0.1, add_h = 0.3) %>%
align(j=1, align = "left", part="all") %>%
footnote(j=6, value=as_paragraph(foot_note), part="header", inline=T)
if (highlight){
table <- table %>% bold(bold=T, i = ~`Valor-p`<=0.05)
} else {
table <- table %>% bold(j=1, bold=T)
}
# Retorno tabla
if (preview=="docx"){
return(table %>% print(preview="docx"))
} else if(preview=="pptx"){
return(table %>% print(preview="pptx"))
} else {
return(table)
}
}
## Funcion para generar tabla con MRR estimados.
# Opcion preview permite abrir la tabla en word (docx) o en ppt (pptx)
# Opcion highlight destaca variables con significancia al 5%
f_tableMRR <- function(model, preview="none", highlight=F){
est <- summary(model)$coefficients[,1:4] %>% as.data.frame() %>%
as_tibble(rownames = "parametro")
names(est) <- c("parametro","coef","sd","z_value","p_value")
## Add codes
est <- est %>% mutate(codes=case_when(
p_value<0.001 ~ "***",
p_value<0.01 ~ "**",
p_value<0.05 ~ "*",
p_value<0.1 ~ ".",
T ~ ""))
# Calculate MRR
est <- est %>%
mutate(ci=confint(model, method="Wald", level=0.95) %>% na.omit(),
low=ci[,1], high=ci[,2], ci=NULL)
est_mrr <- est[-1,] %>% mutate(coef=exp(coef) %>% round(2),
low=exp(low) %>% round(2),
high=exp(high) %>% round(2),
ci=paste("(",format(low,digits=2),
", ",format(high,digits=2),")",sep = ""),
p_value=round(p_value,4))
# Tabla MRR
table <- est_mrr %>%
dplyr::select(parametro, coef, ci, p_value, codes) %>%
mutate(parametro=parametro %>%
str_remove_all("scale|\\(|\\)|log") %>%
f_replaceVar() %>%
str_replace("rmRM","Comuna dentro RM")) %>%
rename(Variable=parametro, MRR=coef, `95% I.C.`=ci,
`Valor-p`=p_value,`Sign.`=codes) %>%
flextable() %>%
bold(bold=T, part="header") %>%
autofit(add_w = 0.1, add_h = 0.3) %>%
align(j=1, align = "left", part="all") %>%
footnote(j=5, value=as_paragraph(foot_note), part="header", inline=T)
if (highlight){
table <- table %>% bold(bold=T, i = ~`Valor-p`<=0.05)
} else {
table <- table %>% bold(j=1, bold=T)
}
# Retorno tabla
if (preview=="docx"){
return(table %>% print(preview="docx"))
} else if(preview=="pptx"){
return(table %>% print(preview="pptx"))
} else {
return(table)
}
}
f_figMRR <- function(model){
# est <- cbind(est=coef(mod), confint(mod))
est <- summary(model)$coefficients[,1:4] %>% as.data.frame() %>%
as_tibble(rownames = "parametro")
names(est) <- c("parametro","coef","sd","z_value","p_value")
## Add codes
est <- est %>% mutate(codes=case_when(
p_value<0.001 ~ "***",
p_value<0.01 ~ "**",
p_value<0.05 ~ "*",
p_value<0.1 ~ ".",
T ~ ""))
# Calculate MRR
est <- est %>%
mutate(ci=confint(model, method="Wald", level=0.95) %>% na.omit(),
low=ci[,1], high=ci[,2], ci=NULL)
est_mrr <- est[-1,] %>% mutate(coef=exp(coef) %>% round(2),
low=exp(low) %>% round(2),
high=exp(high) %>% round(2),
ci=paste("(",format(low,digits=2),
", ",format(high,digits=2),")",sep = ""),
p_value=round(p_value,4))
## Figure MRR
p <- est_mrr %>%
rowid_to_column() %>%
mutate(parametro=parametro %>%
str_remove_all("scale|\\(|\\)|log") %>%
f_replaceVar() %>%
str_replace("rmRM","Comuna dentro RM")) %>%
ggplot(aes(x=reorder(parametro,desc(rowid)), y=coef))+
geom_point()+
geom_errorbar(aes(ymin=low, ymax=high))+
geom_hline(yintercept = 1, linetype = "dashed")+
labs(x="",y="MRR")+
coord_flip()
return(p)
}
## Para calcular MRR del MP2.5 con su intervalo de confianza
f_MRR_mp25 <- function(mod, param="mp25"){
# Get coefficients
est <- summary(mod)$coefficients[,1:4] %>% as.data.frame() %>%
as_tibble(rownames = "parametro") %>%
filter(parametro==param)
names(est) <- c("parametro","coef","sd","z_value","p_value")
# Calculate MRR
est <- est %>%
mutate(ci=confint(mod, method="Wald", level=0.95) %>% na.omit() %>%
as.data.frame() %>% as_tibble(rownames = "parametro") %>%
filter(parametro==param),
low=ci[,2] %>% unlist(), high=ci[,3] %>% unlist(), ci=NULL)
est_mrr <- est %>% mutate(RR=exp(coef) %>% round(4),
lower_CI=exp(low) %>% round(4),
upper_CI=exp(high) %>% round(4)) %>%
dplyr::select(RR, lower_CI, upper_CI)
return(est_mrr)
}
## EoF
|
/Scripts/05-FuncionesAnalisisTransversal.R
|
no_license
|
pmbusch/Analisis-COVID-MP2.5
|
R
| false | false | 6,072 |
r
|
### Analisis-COVID-MP2.5
## Funciones para generar Figuras/Tablas de un modelo Transversal ajustado
## PBH Julio 2020
foot_note <- "Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1"
f_tableCoef <- function(model, preview="none", highlight=F){
# est <- cbind(est=coef(mod), confint(mod))
est <- summary(model)$coefficients[,1:4] %>% as.data.frame() %>%
as_tibble(rownames = "parametro")
names(est) <- c("parametro","coef","sd","z_value","p_value")
## Add codes
est <- est %>% mutate(codes=case_when(
p_value<0.001 ~ "***",
p_value<0.01 ~ "**",
p_value<0.05 ~ "*",
p_value<0.1 ~ ".",
T ~ ""))
## Tabla coeficientes
table <- est %>%
mutate(parametro=parametro %>%
str_remove_all("scale|\\(|\\)|log") %>%
f_replaceVar() %>%
str_replace("rmRM","Comuna dentro RM")) %>%
rename(Parametro=parametro, `Coef.`=coef, `Desv.`=sd,
`Valor-z`=z_value,`Valor-p`=p_value,`Sign.`=codes) %>%
flextable() %>%
colformat_num(big.mark=" ", digits=4, j=2:5,
na_str="s/i") %>%
bold(bold=T, part="header") %>%
autofit(add_w = 0.1, add_h = 0.3) %>%
align(j=1, align = "left", part="all") %>%
footnote(j=6, value=as_paragraph(foot_note), part="header", inline=T)
if (highlight){
table <- table %>% bold(bold=T, i = ~`Valor-p`<=0.05)
} else {
table <- table %>% bold(j=1, bold=T)
}
# Retorno tabla
if (preview=="docx"){
return(table %>% print(preview="docx"))
} else if(preview=="pptx"){
return(table %>% print(preview="pptx"))
} else {
return(table)
}
}
## Funcion para generar tabla con MRR estimados.
# Opcion preview permite abrir la tabla en word (docx) o en ppt (pptx)
# Opcion highlight destaca variables con significancia al 5%
f_tableMRR <- function(model, preview="none", highlight=F){
est <- summary(model)$coefficients[,1:4] %>% as.data.frame() %>%
as_tibble(rownames = "parametro")
names(est) <- c("parametro","coef","sd","z_value","p_value")
## Add codes
est <- est %>% mutate(codes=case_when(
p_value<0.001 ~ "***",
p_value<0.01 ~ "**",
p_value<0.05 ~ "*",
p_value<0.1 ~ ".",
T ~ ""))
# Calculate MRR
est <- est %>%
mutate(ci=confint(model, method="Wald", level=0.95) %>% na.omit(),
low=ci[,1], high=ci[,2], ci=NULL)
est_mrr <- est[-1,] %>% mutate(coef=exp(coef) %>% round(2),
low=exp(low) %>% round(2),
high=exp(high) %>% round(2),
ci=paste("(",format(low,digits=2),
", ",format(high,digits=2),")",sep = ""),
p_value=round(p_value,4))
# Tabla MRR
table <- est_mrr %>%
dplyr::select(parametro, coef, ci, p_value, codes) %>%
mutate(parametro=parametro %>%
str_remove_all("scale|\\(|\\)|log") %>%
f_replaceVar() %>%
str_replace("rmRM","Comuna dentro RM")) %>%
rename(Variable=parametro, MRR=coef, `95% I.C.`=ci,
`Valor-p`=p_value,`Sign.`=codes) %>%
flextable() %>%
bold(bold=T, part="header") %>%
autofit(add_w = 0.1, add_h = 0.3) %>%
align(j=1, align = "left", part="all") %>%
footnote(j=5, value=as_paragraph(foot_note), part="header", inline=T)
if (highlight){
table <- table %>% bold(bold=T, i = ~`Valor-p`<=0.05)
} else {
table <- table %>% bold(j=1, bold=T)
}
# Retorno tabla
if (preview=="docx"){
return(table %>% print(preview="docx"))
} else if(preview=="pptx"){
return(table %>% print(preview="pptx"))
} else {
return(table)
}
}
f_figMRR <- function(model){
# est <- cbind(est=coef(mod), confint(mod))
est <- summary(model)$coefficients[,1:4] %>% as.data.frame() %>%
as_tibble(rownames = "parametro")
names(est) <- c("parametro","coef","sd","z_value","p_value")
## Add codes
est <- est %>% mutate(codes=case_when(
p_value<0.001 ~ "***",
p_value<0.01 ~ "**",
p_value<0.05 ~ "*",
p_value<0.1 ~ ".",
T ~ ""))
# Calculate MRR
est <- est %>%
mutate(ci=confint(model, method="Wald", level=0.95) %>% na.omit(),
low=ci[,1], high=ci[,2], ci=NULL)
est_mrr <- est[-1,] %>% mutate(coef=exp(coef) %>% round(2),
low=exp(low) %>% round(2),
high=exp(high) %>% round(2),
ci=paste("(",format(low,digits=2),
", ",format(high,digits=2),")",sep = ""),
p_value=round(p_value,4))
## Figure MRR
p <- est_mrr %>%
rowid_to_column() %>%
mutate(parametro=parametro %>%
str_remove_all("scale|\\(|\\)|log") %>%
f_replaceVar() %>%
str_replace("rmRM","Comuna dentro RM")) %>%
ggplot(aes(x=reorder(parametro,desc(rowid)), y=coef))+
geom_point()+
geom_errorbar(aes(ymin=low, ymax=high))+
geom_hline(yintercept = 1, linetype = "dashed")+
labs(x="",y="MRR")+
coord_flip()
return(p)
}
## Para calcular MRR del MP2.5 con su intervalo de confianza
f_MRR_mp25 <- function(mod, param="mp25"){
# Get coefficients
est <- summary(mod)$coefficients[,1:4] %>% as.data.frame() %>%
as_tibble(rownames = "parametro") %>%
filter(parametro==param)
names(est) <- c("parametro","coef","sd","z_value","p_value")
# Calculate MRR
est <- est %>%
mutate(ci=confint(mod, method="Wald", level=0.95) %>% na.omit() %>%
as.data.frame() %>% as_tibble(rownames = "parametro") %>%
filter(parametro==param),
low=ci[,2] %>% unlist(), high=ci[,3] %>% unlist(), ci=NULL)
est_mrr <- est %>% mutate(RR=exp(coef) %>% round(4),
lower_CI=exp(low) %>% round(4),
upper_CI=exp(high) %>% round(4)) %>%
dplyr::select(RR, lower_CI, upper_CI)
return(est_mrr)
}
## EoF
|
#funcao de ativacao -> funcao sigmoidal
f <- function(net){
return (1/(1+exp(-net)))
}
#derivada da funcao de ativacao
df_dnet <- function(f_net){
return (f_net * (1-f_net))
}
#arquitetura da mlp
mlp.architecture <- function(input.length=2,hidden.length=2,output.length=1, activation.function = f, d_activation.function = df_dnet){
#modelo a ser retornado
model = list()
model$input.length = input.length
model$hidden.length = hidden.length
model$output.length = output.length
#geracao de pesos para a camada escondida
model$hidden = matrix(runif(min =-0.5, max=0.5, hidden.length*(input.length+1)),nrow=hidden.length,ncol=input.length+1)
#geracao de pesos para a camada de saida
model$output = matrix(runif(min =-0.5, max=0.5, output.length*(hidden.length+1)),nrow=output.length,ncol=hidden.length+1)
model$f = activation.function
model$df_dnet = d_activation.function
return (model)
}
#aplica os dados nas equacoes
mlp.forward <- function(model, Xp) {
#multiplica os pesos da camada escondida pela entrada de padrao p
net_h_p = model$hidden %*% c(Xp,1)
#aplica o resultado da camada escondida na funcao de ativacao
f_net_h_p = model$f(net_h_p)
#multiplica os pesos da camada de saida pelas saidas da camada escondida
net_o_p = model$output %*% c(as.numeric(f_net_h_p),1)
#aplica o resulto da camada de saida pela funcao de ativacao
f_net_o_p = model$f(net_o_p)
#resultado
ret = list()
ret$net_h_p = net_h_p
ret$net_o_p = net_o_p
ret$f_net_h_p = f_net_h_p
ret$f_net_o_p = f_net_o_p
return (ret)
}
#treinamento
mlp.backpropagation <- function(model, dataset, eta=0.1, threadshold=1e-3){
squaredError = 2 * threadshold
counter = 0
#roda enquanto o erro eh maior que threadshold
while(squaredError > threadshold){
squaredError = 0
#para cada linha do dataset aplica a forward
for (p in 1:nrow(dataset)) {
Xp = as.numeric(dataset[p,1:model$input.length])
Yp = as.numeric(dataset[p,(model$input.length+1):ncol(dataset)])
#resultado do padrao p aplicado
results = mlp.forward(model,Xp)
#valor obtido
Yo = results$f_net_o_p
#calculando erro
error = Yp - Yo
#erro utilizado para treinamento
squaredError = squaredError + sum(error^2)
#ajuste da output
delta_o_p = error * model$df_dnet(results$f_net_o_p)
#ajuste da hidden
w_o_kj = model$output[,1:model$hidden.length]
delta_h_p = as.numeric(model$df_dnet(results$f_net_h_p)) * (as.numeric(delta_o_p) %*% w_o_kj)
#trainamento
model$output = model$output + eta * (delta_o_p%*%as.vector(c(results$f_net_h_p,1)))
model$hidden = model$hidden + eta * (t(delta_h_p)%*%as.vector(c(Xp,1)))
}
squaredError = squaredError / nrow(dataset)
cat("Erro medio quadrado = ", squaredError, "\n")
counter = counter + 1
}
ret = list()
ret$model = model
ret$counter = counter
return (ret)
}
|
/mlp/mlp.r
|
no_license
|
LucasSugi/machine-learning
|
R
| false | false | 2,869 |
r
|
#funcao de ativacao -> funcao sigmoidal
f <- function(net){
return (1/(1+exp(-net)))
}
#derivada da funcao de ativacao
df_dnet <- function(f_net){
return (f_net * (1-f_net))
}
#arquitetura da mlp
mlp.architecture <- function(input.length=2,hidden.length=2,output.length=1, activation.function = f, d_activation.function = df_dnet){
#modelo a ser retornado
model = list()
model$input.length = input.length
model$hidden.length = hidden.length
model$output.length = output.length
#geracao de pesos para a camada escondida
model$hidden = matrix(runif(min =-0.5, max=0.5, hidden.length*(input.length+1)),nrow=hidden.length,ncol=input.length+1)
#geracao de pesos para a camada de saida
model$output = matrix(runif(min =-0.5, max=0.5, output.length*(hidden.length+1)),nrow=output.length,ncol=hidden.length+1)
model$f = activation.function
model$df_dnet = d_activation.function
return (model)
}
#aplica os dados nas equacoes
mlp.forward <- function(model, Xp) {
#multiplica os pesos da camada escondida pela entrada de padrao p
net_h_p = model$hidden %*% c(Xp,1)
#aplica o resultado da camada escondida na funcao de ativacao
f_net_h_p = model$f(net_h_p)
#multiplica os pesos da camada de saida pelas saidas da camada escondida
net_o_p = model$output %*% c(as.numeric(f_net_h_p),1)
#aplica o resulto da camada de saida pela funcao de ativacao
f_net_o_p = model$f(net_o_p)
#resultado
ret = list()
ret$net_h_p = net_h_p
ret$net_o_p = net_o_p
ret$f_net_h_p = f_net_h_p
ret$f_net_o_p = f_net_o_p
return (ret)
}
#treinamento
mlp.backpropagation <- function(model, dataset, eta=0.1, threadshold=1e-3){
squaredError = 2 * threadshold
counter = 0
#roda enquanto o erro eh maior que threadshold
while(squaredError > threadshold){
squaredError = 0
#para cada linha do dataset aplica a forward
for (p in 1:nrow(dataset)) {
Xp = as.numeric(dataset[p,1:model$input.length])
Yp = as.numeric(dataset[p,(model$input.length+1):ncol(dataset)])
#resultado do padrao p aplicado
results = mlp.forward(model,Xp)
#valor obtido
Yo = results$f_net_o_p
#calculando erro
error = Yp - Yo
#erro utilizado para treinamento
squaredError = squaredError + sum(error^2)
#ajuste da output
delta_o_p = error * model$df_dnet(results$f_net_o_p)
#ajuste da hidden
w_o_kj = model$output[,1:model$hidden.length]
delta_h_p = as.numeric(model$df_dnet(results$f_net_h_p)) * (as.numeric(delta_o_p) %*% w_o_kj)
#trainamento
model$output = model$output + eta * (delta_o_p%*%as.vector(c(results$f_net_h_p,1)))
model$hidden = model$hidden + eta * (t(delta_h_p)%*%as.vector(c(Xp,1)))
}
squaredError = squaredError / nrow(dataset)
cat("Erro medio quadrado = ", squaredError, "\n")
counter = counter + 1
}
ret = list()
ret$model = model
ret$counter = counter
return (ret)
}
|
# General setup ----
## Function
rm(list=ls(all.names=T))
library(runjags)
source("function_simdata_ver5.R")
## MCMC setting
n.ad <- 100
n.iter <- 1E+4
n.thin <- max(3, ceiling(n.iter/500))
burn <- ceiling(max(10, n.iter/2))
Sample <- ceiling(n.iter/n.thin)
## Parameter set
N <- c(100, 500, 1000)
LEN <- c(500, 1000)
DELTA <- seq(50, 300, length = 6)
PHI <- c(0.4, 0.8)
PARA <- as.matrix(expand.grid(N, LEN, DELTA, PHI))
Kernel <- "Laplace"
colnames(PARA) <- c("N", "LEN", "DELTA", "PHI")
## N replicate and N parameter combinations
Nrep <- 50
Npara <- nrow(PARA)
# Bayesian Inference ----
output <- NULL
## Different sampling designs and model parameters
for(i in 1:Npara){
RE <- NULL
## Replicates under the same sampling designs and model parameters
for(j in 1:Nrep){
print(c(i,j))
delta <- PARA[i,"DELTA"]
## Simulated Data
D <- fun_disp(N = PARA[i,"N"], sec_len = PARA[i,"LEN"],
delta = delta, family = Kernel,
phi = PARA[i,"PHI"], hetero.phi = F)
## Data for JAGS
X <- D$X
X0 <- D$X0
Y <- 1 - is.na(D$X)
L <- PARA[i,"LEN"]
## Run JAGS
Djags <- list( X = X, X0 = X0, Nsample = length(X) )
para <- c("delta")
inits <- replicate(3, list(log.delta = log(delta), .RNG.name = "base::Mersenne-Twister", .RNG.seed = NA ), simplify = F )
for(k in 1:3) inits[[k]]$.RNG.seed <- k
m <- read.jagsfile("bayes-model/laplace/model_simple_laplace_v1.R")
post <- run.jags(m$model, monitor = para, data = Djags,
n.chains = 3, inits = inits, method = "parallel",
burnin = burn, sample = Sample, adapt = n.ad, thin = n.thin,
n.sims = 3, modules = "glm")
print(post$psrf$psrf[,1])
while(any(post$psrf$psrf[,1] >= 1.1)){
post <- extend.jags(post, burnin = 0, sample = Sample, adapt = n.ad, thin = n.thin,
n.sims = 3, combine = T)
print(post$psrf$psrf[,1])
}
## Output
MCMCiter <- (post$sample/Sample)*n.iter + burn
re <- summary(post)
RE <- rbind(RE, c(PARA[i,],
mean(is.na(X)==0),
mean(is.na(D$x_stay)==0),
re["delta", 1:3],
re["delta", "psrf"],
MCMCiter, burn, n.thin, post$sample) )
View(RE)
}#j
## Compile final output
output <- rbind(output, RE)
}#i
# Save results ----
colnames(output) <- c("N", "LEN", "DELTA", "PHI",
"Pcap", "Pstay",
"delta_lower", "delta_med", "delta_upper",
"R_hat_delta",
"MCMCiter", "Burn_in", "N_thin", "N_sample")
filename <- paste0("result/sim_model_simple_", Kernel, Sys.Date(), ".csv")
write.csv(output, filename)
|
/bayes-model/laplace/inits01_simple_laplace.R
|
permissive
|
aterui/public-proj_disp-model-sim
|
R
| false | false | 2,999 |
r
|
# General setup ----
## Function
rm(list=ls(all.names=T))
library(runjags)
source("function_simdata_ver5.R")
## MCMC setting
n.ad <- 100
n.iter <- 1E+4
n.thin <- max(3, ceiling(n.iter/500))
burn <- ceiling(max(10, n.iter/2))
Sample <- ceiling(n.iter/n.thin)
## Parameter set
N <- c(100, 500, 1000)
LEN <- c(500, 1000)
DELTA <- seq(50, 300, length = 6)
PHI <- c(0.4, 0.8)
PARA <- as.matrix(expand.grid(N, LEN, DELTA, PHI))
Kernel <- "Laplace"
colnames(PARA) <- c("N", "LEN", "DELTA", "PHI")
## N replicate and N parameter combinations
Nrep <- 50
Npara <- nrow(PARA)
# Bayesian Inference ----
output <- NULL
## Different sampling designs and model parameters
for(i in 1:Npara){
RE <- NULL
## Replicates under the same sampling designs and model parameters
for(j in 1:Nrep){
print(c(i,j))
delta <- PARA[i,"DELTA"]
## Simulated Data
D <- fun_disp(N = PARA[i,"N"], sec_len = PARA[i,"LEN"],
delta = delta, family = Kernel,
phi = PARA[i,"PHI"], hetero.phi = F)
## Data for JAGS
X <- D$X
X0 <- D$X0
Y <- 1 - is.na(D$X)
L <- PARA[i,"LEN"]
## Run JAGS
Djags <- list( X = X, X0 = X0, Nsample = length(X) )
para <- c("delta")
inits <- replicate(3, list(log.delta = log(delta), .RNG.name = "base::Mersenne-Twister", .RNG.seed = NA ), simplify = F )
for(k in 1:3) inits[[k]]$.RNG.seed <- k
m <- read.jagsfile("bayes-model/laplace/model_simple_laplace_v1.R")
post <- run.jags(m$model, monitor = para, data = Djags,
n.chains = 3, inits = inits, method = "parallel",
burnin = burn, sample = Sample, adapt = n.ad, thin = n.thin,
n.sims = 3, modules = "glm")
print(post$psrf$psrf[,1])
while(any(post$psrf$psrf[,1] >= 1.1)){
post <- extend.jags(post, burnin = 0, sample = Sample, adapt = n.ad, thin = n.thin,
n.sims = 3, combine = T)
print(post$psrf$psrf[,1])
}
## Output
MCMCiter <- (post$sample/Sample)*n.iter + burn
re <- summary(post)
RE <- rbind(RE, c(PARA[i,],
mean(is.na(X)==0),
mean(is.na(D$x_stay)==0),
re["delta", 1:3],
re["delta", "psrf"],
MCMCiter, burn, n.thin, post$sample) )
View(RE)
}#j
## Compile final output
output <- rbind(output, RE)
}#i
# Save results ----
colnames(output) <- c("N", "LEN", "DELTA", "PHI",
"Pcap", "Pstay",
"delta_lower", "delta_med", "delta_upper",
"R_hat_delta",
"MCMCiter", "Burn_in", "N_thin", "N_sample")
filename <- paste0("result/sim_model_simple_", Kernel, Sys.Date(), ".csv")
write.csv(output, filename)
|
########## MAJOR COMPLICATION REGRESSION COEFFICENTS
majorComp_sexFactor <- -0.0653436
majorComp_raceFactor <- 0.0474753
majorComp_ageFactor <- 0.0063017
#4 types of surgeries
majorComp_GastRxnFactor <- 2.082806
majorComp_ColonRxnFactor <- 1.782225
majorComp_PancFactor <- 2.600119
majorComp_GallFactor <- 0.000000
majorComp_CancerGIFactor <- 0.0502679 #Cancer Y/N?
majorComp_FunctionalFactor <- -0.6151226
majorComp_asaclassFactor <- 0.4520474
majorComp_steroidFactor <- 0.4633092
majorComp_ascitesFactor <- 0.7477772
majorComp_SepticFactor <- 0.8332981
majorComp_ventilarFactor <- 1.049847
discancr <- 0.509568
majorComp_DMallFactor <- 0.102625
majorComp_hypermedFactor <- 0.0456271
majorComp_hxchfFactor <- 0.3956417
majorComp_SOBFactor <- 0.2111835
majorComp_smokerFactor <- 0.1357178
majorComp_hxcopdFactor <- 0.2391823
majorComp_dialysisFactor <- 0.2449626
majorComp_renafailFactor <- 0.4374031
majorComp_BMIFactor <- 0.0078337
majorComp_consFactor <- -4.424855
########## DEATH REGRESSION COEFFICENTS
death_sexFactor <- -0.3212026
death_raceFactor <- -0.0163649
death_ageFactor <- 0.0501916
death_GastRxnFactor <- 1.385467
death_ColonRxnFactor <- 0.7759915
death_PancFactor <- 1.450207
death_GallFactor <- 0.000000
death_CancerGIFactor <- 0.1701402
death_FunctionalFactor <- -1.050477
death_asaclassFactor <- 0.7729463
death_steroidFactor <- 0.3690016
death_ascitesFactor <- 1.34307
death_SepticFactor <- 1.085556
death_ventilarFactor <- 0.7441897
discancr <- 0.9973936
death_DMallFactor <- 0.0872066
death_hypermedFactor <- 0.1677402
death_hxchfFactor <- 0.5012253
death_SOBFactor <- 0.4156159
death_smokerFactor <- 0.1785988
death_hxcopdFactor <- 0.3292885
death_dialysisFactor <- 0.9601621
death_renafailFactor <- 0.538141
death_BMIFactor <- -0.0086523
death_consFactor <- -9.716277
|
/global.R
|
no_license
|
akaraha1/surgery_calculator
|
R
| false | false | 2,252 |
r
|
########## MAJOR COMPLICATION REGRESSION COEFFICENTS
majorComp_sexFactor <- -0.0653436
majorComp_raceFactor <- 0.0474753
majorComp_ageFactor <- 0.0063017
#4 types of surgeries
majorComp_GastRxnFactor <- 2.082806
majorComp_ColonRxnFactor <- 1.782225
majorComp_PancFactor <- 2.600119
majorComp_GallFactor <- 0.000000
majorComp_CancerGIFactor <- 0.0502679 #Cancer Y/N?
majorComp_FunctionalFactor <- -0.6151226
majorComp_asaclassFactor <- 0.4520474
majorComp_steroidFactor <- 0.4633092
majorComp_ascitesFactor <- 0.7477772
majorComp_SepticFactor <- 0.8332981
majorComp_ventilarFactor <- 1.049847
discancr <- 0.509568
majorComp_DMallFactor <- 0.102625
majorComp_hypermedFactor <- 0.0456271
majorComp_hxchfFactor <- 0.3956417
majorComp_SOBFactor <- 0.2111835
majorComp_smokerFactor <- 0.1357178
majorComp_hxcopdFactor <- 0.2391823
majorComp_dialysisFactor <- 0.2449626
majorComp_renafailFactor <- 0.4374031
majorComp_BMIFactor <- 0.0078337
majorComp_consFactor <- -4.424855
########## DEATH REGRESSION COEFFICENTS
death_sexFactor <- -0.3212026
death_raceFactor <- -0.0163649
death_ageFactor <- 0.0501916
death_GastRxnFactor <- 1.385467
death_ColonRxnFactor <- 0.7759915
death_PancFactor <- 1.450207
death_GallFactor <- 0.000000
death_CancerGIFactor <- 0.1701402
death_FunctionalFactor <- -1.050477
death_asaclassFactor <- 0.7729463
death_steroidFactor <- 0.3690016
death_ascitesFactor <- 1.34307
death_SepticFactor <- 1.085556
death_ventilarFactor <- 0.7441897
discancr <- 0.9973936
death_DMallFactor <- 0.0872066
death_hypermedFactor <- 0.1677402
death_hxchfFactor <- 0.5012253
death_SOBFactor <- 0.4156159
death_smokerFactor <- 0.1785988
death_hxcopdFactor <- 0.3292885
death_dialysisFactor <- 0.9601621
death_renafailFactor <- 0.538141
death_BMIFactor <- -0.0086523
death_consFactor <- -9.716277
|
# O objetivo deste projeto é conhecer e aprender a usar o pacote data.table.
# É uma forma de trabalhar os dados muito rápida, boa para grandes quantidades de dados.
# A primeira impressão que tive foi a simplicidade e elegância da sintaxe.
# General form of data.table syntax
# DT[i, j, by]
# | | |
# | | --> grouped by what?
# | -----> what to do?
# --------> on which rows?
# Pega o DT, escolhe as linhas (i), faz um cálculo (j) por agrupamento (by).
library(data.table)
library(bikeshare14) # para obter o data.frame batrips.
# library(tidyverse)
# Create the data.table X
X <- data.table(id = c("a", "b", "c"), value = c(0.5, 1.0, 1.5))
# View X
X
# Agente sabe que é um data.table quando o número das
# linhas vem seguido de dois pontos.
# Qual a classe de batrips? R.dataframe.
class(batrips)
# Mas quero que seja também data.table!
batrips <- as.data.table(batrips)
class(batrips)
head(batrips)
head(batrips, 4)
tail(batrips, 3)
str(batrips)
ncol(batrips)
nrow(batrips)
table(batrips$subscription_type)
batrips[3] #Terceira linha.
batrips[3,] #Terceira linha.
batrips[1:4] #Linhas de 1 a 4
batrips[1:4,] #Linhas de 1 a 4
batrips[12:15] #Linhas de 12 a 15
batrips[-(12:15)] #Todas as linhas menos as de 12 a 15.
batrips[!(12:15)] #Todas as linhas menos as de 12 a 15.
batrips[c(1,6,10)] #Linhas 6, 7 e 10.
batrips[!c(1:5, 10:15)] #Todas linhas menos as de 1 a 5 e 10 a 15.
batrips[326339] #Última linha da batrips.
batrips[.N] #Última linmha da batrips.
batrips[!c(1, .N)] #todas nemos a 1ª e a última.
batrips[1:(.N-10)] #Todas as linhas até faltarem 10 para a última.
#Todas as linhas cuja coluna subscription_type é "Subscriber",
# poderia ser também "customer".
batrips[subscription_type == "Subscriber"]
#Todas as linhas onde start_terminal é 58 e end_terminal não é 65.
batrips[start_terminal == 58 & end_terminal != 65]
batrips[start_station == "MLK Library" & duration > 1600] # Já está ficando repetitivo, não precisa explicar mais.
batrips[subscription_type != "Subscriber"]
batrips[start_station == "Ryland Park" & subscription_type != "Customer"]
# %like%
vvr <- c("aaba", "aaba", "baca")
# Localiza "aa" em qualquer string
vvr %like% "aa"
# Localiza "ba" ao final de qualquer string
vvr %like% "ba$"
# Todas as linhas em que a coluna start_station começa com San Francisco.
# Tem San Francisco City Hall, San Francisco Caltrain 2, San Francisco Caltrain etc.
batrips[start_station %like% "^San Francisco"]
# Todas linhas com duração entre 2000 and 3000. Incluí 2000 e 3000.
# SÓ SERVE PARA COLUNAS NUMÉRICAS!
batrips[duration %between% c(2000, 3000)]
# Filtra na coluna trip_id alguns valores.
batrips[trip_id %in% c(588841, 139560, 139562)]
# Todas linhas que contenham os characters "Japantown", "Mezes Park", "MLK Library".
# SÓ SERVE PARA COLUNAS CHARACTER!
batrips[start_station %chin% c("Japantown", "Mezes Park", "MLK Library")]
# Filtra todas as end_station em que a palavra Market aparece ao final.
# Beale at Market, Steuart at Market etc.
batrips[end_station %like% "Market$"]
# Selecionando a coluna trip_id.
# O resultado é um data.table, se fosse data.frame o resultado seria um vetor.
ans <- batrips[, "trip_id"]
str(ans)
head(ans, 2)
# Podemos selecionar as colunas por número, mas não é recomendável.
# Pois a numeração das colunas muda na limpeza dos dados.
ans <- batrips[, c(2, 4)]
head(ans, 2)
# Selecionando colunas por nomes.
batrips[, c("duration", "start_station")]
# Podemos também selecionar por exclusão.
# Select all cols *except* those shown below
ans <- batrips[, -c("start_date", "end_date", "end_station")]
ans <- batrips[, !c("start_date", "end_date", "end_station")]
head(ans, 1)
# Aqui faz 2 coisas: seleciona as colunas trip_id e duration; e
# e troca o nome (rename) da duration por dur.
# NÃO PRECISA DE ASPAS!
# O resultado será um data.table
ans <- batrips[, list(trip_id, dur = duration)]
# O ponto aqui substituí a palavra list.
ans <- batrips[, .(trip_id, dur = duration)]
# Se fizermos sem o list, o resultado será um vetor.
ans <- batrips[, .(trip_id)]
head(ans, 4)
ans <- batrips[, (trip_id)]
head(ans, 4)
# Podemos fazer os cálculos diretamente nas colunas.
ans <- batrips[, mean(duration)]
ans <- batrips[, round(mean(duration),2)]
# Podemos calcular as média das duration só para Japantown.
ans <- batrips[start_station == "Japantown", round(mean(duration),2)]
# Filtro todas as linhas cuja start_station foi Japantown
# e conto quantas linhas (.N).
# How many trips started from "Japantown"?
batrips[start_station == "Japantown", .N] # Aqui o resultado é um vetor.
batrips[start_station == "Japantown", .(.N)] # aqui é outro data.table.
# Aqui buscamos a mediana da duration nas linhas em que
# a coluna end_station é "Market at 10th" e a subscription_type é "Subscriber".
batrips[end_station == "Market at 10th" & subscription_type == "Subscriber", median(duration)]
# Diferença entre datas.
date1 <- "2018-12-20 11:30:00 EST"
date2 <- "2018-12-20 11:20:00 EST"
difftime(date1, date2, units = "min")
difftime(date1, date2, units = "hour")
date3 <- "2018-10-25"
date4 <- "2020-12-20"
difftime(date4, date3, units = "weeks")
difftime(date4, date3, units = "auto")
# Calculando estatísticas.
# Aqui o resultado foi um data.table de uma linha.
batrips[, .(mn_dur = mean(duration), med_dur = median(duration))]
# Filtrando na coluna start_station o que é Japantown,
# depois, sobre isso, calculando a média e mediana da duration.
batrips[start_station == "Japantown", .(mn_dur = mean(duration),
med_dur = median(duration))]
# Calculando a mínima e a máxima duration.
# O resultado é um data.table e as colunas novas
# foram automaticamente nomeadas de V1 e V2.
batrips[, .(min(duration), max(duration))]
# Aqui calculou a media e máxima duration e ainda
# nomeou as colunas do data.table resultante.
batrips[, .(mean_duration = mean(duration),
last_ride = max(end_date))]
# Ao invéz de fazer uma conta podemos fazer um gráfico.
batrips[, hist(duration)]
# O gráfico ainda pode ser feito a partir de uma filtragem.
batrips[start_station == "Townsend at 7th" & duration < 500, hist(duration)]
# Aqui começa a sofisticar.
# O by argument é o group_by do dplyr
# Quantas viagens foram feitas a partir de cada start_station?
ans <- batrips[, .N, by = "start_station"] # Estas duas formas equivalem.
ans <- batrips[, .N, by = .(start_station)]
head(ans, 10)
ans
# Além de calcular as colunas podemos nomeá-las.
ans <- batrips[, .(num_d_viag = .N), by = .(inicio = start_station)]
head(ans, 10)
# Aqui contamos (.N) a quantidade de strat_station por mês.
# Usamos a função month() do data.table, aplicado ao start_date.
ans <- batrips[ , .N, by = .(start_station, mon = month(start_date))]
ans
nrow(ans)
# Básico: calculando a duration média por start_station.
# Dei uma guaribada para o resultado ficar natural para brasileiros.
batrips[, .(mean_duration = format(round(mean(duration), 2), decimal.mark = ",", big.mark = ".")),
by = start_station]
# Calculando a média da duration por (group_by) start_station e end_station.
batrips[, .(mean_duration = mean(duration)), by = .(start_station, end_station)]
# Calculando a média da duration e o número total de viagens agrupados por
# start_station e end_station
aggregate_mean_trips <- batrips[, .(mean_duration = mean(duration),
total_trips = .N),
by = .(start_station, end_station)]
# Calculate the total number of unique bike ids for every month
# Observe que nomeou automaticamente a coluna.
ans <- batrips[, uniqueN(bike_id), by = month(start_date)]
head(ans, 12)
# Mas podemos chamá-la de unic_bici.
ans <- batrips[, .(unic_bici = uniqueN(bike_id)), by = month(start_date)]
head(ans, 12)
# Contando as bicicletas em todas combinações de start_station e end_station e
# encadeando (%>%) com a operação de ordenamento decrescente.
batrips[, .N, by = .(start_station,
end_station)][order(-N)]
# Encadeando %>%.
# Contamos o número de chegadas por end_station e então
# ordenamos decrescentmente e então
# filtramos as 10 primeiras linhas.
top_10 <- batrips[, .N, by = end_station][order(-N)][1:10]
top_10
# Aqui parece meio complicado.
# Ordena crescentemente o start_data, depois
# agrupa por start_station, depois
# cria a coluna start_date que é formada pela
# 1ª e última start_date de cada start_station.
batrips[order(start_date),
.(start_date = start_date[c(1, .N)]),
by = start_station]
# Usando o .SD.
# Por parecer confuso vou usar um exemplo.
# Criando a data.table DT.
DT = data.table(
nome = c("alice","alice","alice","anita","anita","maria"),
nota = 1:6,
peso = 7:12,
data = 13:18
)
# Usando o .SD (subset data, é igual aos conjuntos e subcojuntos do colégio)
# Pegou o DT e fez 3 data.table agrupados por nome.
# Um da Alice, um da Anita e 1 da Maria.
DT[, print(.SD), by = nome]
# Podemos também fazer um cálculo por nome, média por exemplo.
# Dá como resultado a média de cada uma nos quesitos nota, peso e data.
# Aqui o resultado é um só data.table.
DT[, lapply(.SD, mean), by = nome]
# O .SD considera todas as colunas, mas podemos escolher quais queremos.
# .SDcols
DT[, lapply(.SD, mean), by = nome, .SDcols = c("nota", "data")]
DT[, lapply(.SD, mean), by = nome, .SDcols = c("peso")]
# Aqui pega a 1ª linha de cada start_station,
# e mantém todas as colunas.
batrips[, .SD[1], by = start_station]
# Aqui pega a 1ª linha de cada start_station,
# mas só fica com as colunas tirp_id e duration.
batrips[, .SD[1], by = start_station, .SDcols = c("trip_id", "duration")]
# Operador := (colon equal). Atualiza uma data.table por referência.
# Acho que é o equivalente ao mutate do dplyr.
#
# Aqui adiciona 2 colunas ao batrips.
batrips[, c("is_dur_gt_1hour", "week_day") := list(duration > 3600,
wday(start_date))]
# Quando adiciona uma só coluna a forma é simplificada.
batrips[, is_dur_gt_1hour := duration > 3600]
# Pode usar também o formato função :=
# Aqui a coluna is_dur_gt_1hour é retirada e a start_station
# fica em letras maiúsculas.
batrips[, `:=`(is_dur_gt_1hour = NULL,
start_station = toupper(start_station))][]
# Aqui acrescenta a coluna duration_hour, que é
# a durantion expressa em horas.
batrips[, duration_hour := duration/3600][]
# Foi criada a coluna duration_mean, é a média da duration
# para cada combinação de start_station e end_station.
batrips[, duration_mean := mean(duration), by = .(start_station, end_station)][]
# Foi criada a coluna trips_N, para contar (.N)
# as viagens por start_station.
batrips[, trips_N := .N, by = start_station][]
# Criação da coluna mean_dur, a média da duration depois de
# retirados os NA, agrupadas por mês.
batrips[, mean_dur := mean(duration, na.rm = TRUE),
by = month(start_date)][]
# Criação da coluna mean_dur, a média da duration depois de
# retirados os NA, agrupadas por mês.
# Depois faz: se a duration é NA troca por mean_dur.
# No final descarta a coluna mean_dur.
batrips_new[, mean_dur := mean(duration, na.rm = TRUE),
by = month(start_date)][is.na(duration),
duration := mean_dur][, mean_dur := NULL]
# Aqui deu uma sofisticada.
# Foi criada a coluna trip_category.
# 1º criou-se o objeto med_dur (a média da duration).
# Depois fez-se um ifelse para criar as categorias short, medioum e long.
# Tudo agrupado por start_station e end_station.
batrips[, trip_category := {
med_dur = median(duration, na.rm = TRUE)
if (med_dur < 600) "short"
else if (med_dur >= 600 & med_dur <= 1800) "medium"
else "long"
},
by = .(start_station, end_station)
][, .(start_station, end_station, trip_category)]
batrips[1:3]
# Aqui foram criadas duas colunas por start_station.
# Deu erro!
batrips[, c("mean_duration", "median_duration") := .(mean(duration),
median(duration)),
by = start_station]
# O mesmo que acima porém usando a função `:=``().
# Deu erro!
batrips[, `:=`(mean_duration = mean(duration),
median_duration = median(duration)),
by = start_station]
# Aqui filtrei a duration, cireia coluna mean_duration agrupada por
# start_station e end_station
batrips[duration > 600, mean_duration := mean(duration),
by = .(start_station, end_station)]
# Importando com fread.
# Pie chart.
mydata = sample(LETTERS[1:5],16,replace = TRUE)
mydata.count= table(mydata)
pie(mydata.count, col=rainbow(10))
# Função para gerar de senha.
password.generator <- function(len, n){
dummydt=data.frame(matrix(ncol=0,nrow=n))
num <- 1:9
spcl <- c("!", "#", "$", "%", "&", "(", ")", "*", "+", "-", "/", ":",
";", "<", "=", ">", "?", "@", "[", "^", "_", "{", "|", "}", "~")
comb <- c(num, spcl, letters, LETTERS)
p <- c(rep(0.035, 9), rep(0.015, 25), rep(0.025, 52))
password<-replicate(nrow(dummydt),paste0(sample(comb, len, TRUE, prob = p), collapse = ""))
dummydt$password<-password
return(dummydt)
}
password.generator(8, 3)
|
/exemplos de datatable.R
|
no_license
|
mario-rutman/estudando-o-data.table
|
R
| false | false | 13,337 |
r
|
# O objetivo deste projeto é conhecer e aprender a usar o pacote data.table.
# É uma forma de trabalhar os dados muito rápida, boa para grandes quantidades de dados.
# A primeira impressão que tive foi a simplicidade e elegância da sintaxe.
# General form of data.table syntax
# DT[i, j, by]
# | | |
# | | --> grouped by what?
# | -----> what to do?
# --------> on which rows?
# Pega o DT, escolhe as linhas (i), faz um cálculo (j) por agrupamento (by).
library(data.table)
library(bikeshare14) # para obter o data.frame batrips.
# library(tidyverse)
# Create the data.table X
X <- data.table(id = c("a", "b", "c"), value = c(0.5, 1.0, 1.5))
# View X
X
# Agente sabe que é um data.table quando o número das
# linhas vem seguido de dois pontos.
# Qual a classe de batrips? R.dataframe.
class(batrips)
# Mas quero que seja também data.table!
batrips <- as.data.table(batrips)
class(batrips)
head(batrips)
head(batrips, 4)
tail(batrips, 3)
str(batrips)
ncol(batrips)
nrow(batrips)
table(batrips$subscription_type)
batrips[3] #Terceira linha.
batrips[3,] #Terceira linha.
batrips[1:4] #Linhas de 1 a 4
batrips[1:4,] #Linhas de 1 a 4
batrips[12:15] #Linhas de 12 a 15
batrips[-(12:15)] #Todas as linhas menos as de 12 a 15.
batrips[!(12:15)] #Todas as linhas menos as de 12 a 15.
batrips[c(1,6,10)] #Linhas 6, 7 e 10.
batrips[!c(1:5, 10:15)] #Todas linhas menos as de 1 a 5 e 10 a 15.
batrips[326339] #Última linha da batrips.
batrips[.N] #Última linmha da batrips.
batrips[!c(1, .N)] #todas nemos a 1ª e a última.
batrips[1:(.N-10)] #Todas as linhas até faltarem 10 para a última.
#Todas as linhas cuja coluna subscription_type é "Subscriber",
# poderia ser também "customer".
batrips[subscription_type == "Subscriber"]
#Todas as linhas onde start_terminal é 58 e end_terminal não é 65.
batrips[start_terminal == 58 & end_terminal != 65]
batrips[start_station == "MLK Library" & duration > 1600] # Já está ficando repetitivo, não precisa explicar mais.
batrips[subscription_type != "Subscriber"]
batrips[start_station == "Ryland Park" & subscription_type != "Customer"]
# %like%
vvr <- c("aaba", "aaba", "baca")
# Localiza "aa" em qualquer string
vvr %like% "aa"
# Localiza "ba" ao final de qualquer string
vvr %like% "ba$"
# Todas as linhas em que a coluna start_station começa com San Francisco.
# Tem San Francisco City Hall, San Francisco Caltrain 2, San Francisco Caltrain etc.
batrips[start_station %like% "^San Francisco"]
# Todas linhas com duração entre 2000 and 3000. Incluí 2000 e 3000.
# SÓ SERVE PARA COLUNAS NUMÉRICAS!
batrips[duration %between% c(2000, 3000)]
# Filtra na coluna trip_id alguns valores.
batrips[trip_id %in% c(588841, 139560, 139562)]
# Todas linhas que contenham os characters "Japantown", "Mezes Park", "MLK Library".
# SÓ SERVE PARA COLUNAS CHARACTER!
batrips[start_station %chin% c("Japantown", "Mezes Park", "MLK Library")]
# Filtra todas as end_station em que a palavra Market aparece ao final.
# Beale at Market, Steuart at Market etc.
batrips[end_station %like% "Market$"]
# Selecionando a coluna trip_id.
# O resultado é um data.table, se fosse data.frame o resultado seria um vetor.
ans <- batrips[, "trip_id"]
str(ans)
head(ans, 2)
# Podemos selecionar as colunas por número, mas não é recomendável.
# Pois a numeração das colunas muda na limpeza dos dados.
ans <- batrips[, c(2, 4)]
head(ans, 2)
# Selecionando colunas por nomes.
batrips[, c("duration", "start_station")]
# Podemos também selecionar por exclusão.
# Select all cols *except* those shown below
ans <- batrips[, -c("start_date", "end_date", "end_station")]
ans <- batrips[, !c("start_date", "end_date", "end_station")]
head(ans, 1)
# Aqui faz 2 coisas: seleciona as colunas trip_id e duration; e
# e troca o nome (rename) da duration por dur.
# NÃO PRECISA DE ASPAS!
# O resultado será um data.table
ans <- batrips[, list(trip_id, dur = duration)]
# O ponto aqui substituí a palavra list.
ans <- batrips[, .(trip_id, dur = duration)]
# Se fizermos sem o list, o resultado será um vetor.
ans <- batrips[, .(trip_id)]
head(ans, 4)
ans <- batrips[, (trip_id)]
head(ans, 4)
# Podemos fazer os cálculos diretamente nas colunas.
ans <- batrips[, mean(duration)]
ans <- batrips[, round(mean(duration),2)]
# Podemos calcular as média das duration só para Japantown.
ans <- batrips[start_station == "Japantown", round(mean(duration),2)]
# Filtro todas as linhas cuja start_station foi Japantown
# e conto quantas linhas (.N).
# How many trips started from "Japantown"?
batrips[start_station == "Japantown", .N] # Aqui o resultado é um vetor.
batrips[start_station == "Japantown", .(.N)] # aqui é outro data.table.
# Aqui buscamos a mediana da duration nas linhas em que
# a coluna end_station é "Market at 10th" e a subscription_type é "Subscriber".
batrips[end_station == "Market at 10th" & subscription_type == "Subscriber", median(duration)]
# Diferença entre datas.
date1 <- "2018-12-20 11:30:00 EST"
date2 <- "2018-12-20 11:20:00 EST"
difftime(date1, date2, units = "min")
difftime(date1, date2, units = "hour")
date3 <- "2018-10-25"
date4 <- "2020-12-20"
difftime(date4, date3, units = "weeks")
difftime(date4, date3, units = "auto")
# Calculando estatísticas.
# Aqui o resultado foi um data.table de uma linha.
batrips[, .(mn_dur = mean(duration), med_dur = median(duration))]
# Filtrando na coluna start_station o que é Japantown,
# depois, sobre isso, calculando a média e mediana da duration.
batrips[start_station == "Japantown", .(mn_dur = mean(duration),
med_dur = median(duration))]
# Calculando a mínima e a máxima duration.
# O resultado é um data.table e as colunas novas
# foram automaticamente nomeadas de V1 e V2.
batrips[, .(min(duration), max(duration))]
# Aqui calculou a media e máxima duration e ainda
# nomeou as colunas do data.table resultante.
batrips[, .(mean_duration = mean(duration),
last_ride = max(end_date))]
# Ao invéz de fazer uma conta podemos fazer um gráfico.
batrips[, hist(duration)]
# O gráfico ainda pode ser feito a partir de uma filtragem.
batrips[start_station == "Townsend at 7th" & duration < 500, hist(duration)]
# Aqui começa a sofisticar.
# O by argument é o group_by do dplyr
# Quantas viagens foram feitas a partir de cada start_station?
ans <- batrips[, .N, by = "start_station"] # Estas duas formas equivalem.
ans <- batrips[, .N, by = .(start_station)]
head(ans, 10)
ans
# Além de calcular as colunas podemos nomeá-las.
ans <- batrips[, .(num_d_viag = .N), by = .(inicio = start_station)]
head(ans, 10)
# Aqui contamos (.N) a quantidade de strat_station por mês.
# Usamos a função month() do data.table, aplicado ao start_date.
ans <- batrips[ , .N, by = .(start_station, mon = month(start_date))]
ans
nrow(ans)
# Básico: calculando a duration média por start_station.
# Dei uma guaribada para o resultado ficar natural para brasileiros.
batrips[, .(mean_duration = format(round(mean(duration), 2), decimal.mark = ",", big.mark = ".")),
by = start_station]
# Calculando a média da duration por (group_by) start_station e end_station.
batrips[, .(mean_duration = mean(duration)), by = .(start_station, end_station)]
# Calculando a média da duration e o número total de viagens agrupados por
# start_station e end_station
aggregate_mean_trips <- batrips[, .(mean_duration = mean(duration),
total_trips = .N),
by = .(start_station, end_station)]
# Calculate the total number of unique bike ids for every month
# Observe que nomeou automaticamente a coluna.
ans <- batrips[, uniqueN(bike_id), by = month(start_date)]
head(ans, 12)
# Mas podemos chamá-la de unic_bici.
ans <- batrips[, .(unic_bici = uniqueN(bike_id)), by = month(start_date)]
head(ans, 12)
# Contando as bicicletas em todas combinações de start_station e end_station e
# encadeando (%>%) com a operação de ordenamento decrescente.
batrips[, .N, by = .(start_station,
end_station)][order(-N)]
# Encadeando %>%.
# Contamos o número de chegadas por end_station e então
# ordenamos decrescentmente e então
# filtramos as 10 primeiras linhas.
top_10 <- batrips[, .N, by = end_station][order(-N)][1:10]
top_10
# Aqui parece meio complicado.
# Ordena crescentemente o start_data, depois
# agrupa por start_station, depois
# cria a coluna start_date que é formada pela
# 1ª e última start_date de cada start_station.
batrips[order(start_date),
.(start_date = start_date[c(1, .N)]),
by = start_station]
# Usando o .SD.
# Por parecer confuso vou usar um exemplo.
# Criando a data.table DT.
DT = data.table(
nome = c("alice","alice","alice","anita","anita","maria"),
nota = 1:6,
peso = 7:12,
data = 13:18
)
# Usando o .SD (subset data, é igual aos conjuntos e subcojuntos do colégio)
# Pegou o DT e fez 3 data.table agrupados por nome.
# Um da Alice, um da Anita e 1 da Maria.
DT[, print(.SD), by = nome]
# Podemos também fazer um cálculo por nome, média por exemplo.
# Dá como resultado a média de cada uma nos quesitos nota, peso e data.
# Aqui o resultado é um só data.table.
DT[, lapply(.SD, mean), by = nome]
# O .SD considera todas as colunas, mas podemos escolher quais queremos.
# .SDcols
DT[, lapply(.SD, mean), by = nome, .SDcols = c("nota", "data")]
DT[, lapply(.SD, mean), by = nome, .SDcols = c("peso")]
# Aqui pega a 1ª linha de cada start_station,
# e mantém todas as colunas.
batrips[, .SD[1], by = start_station]
# Aqui pega a 1ª linha de cada start_station,
# mas só fica com as colunas tirp_id e duration.
batrips[, .SD[1], by = start_station, .SDcols = c("trip_id", "duration")]
# Operador := (colon equal). Atualiza uma data.table por referência.
# Acho que é o equivalente ao mutate do dplyr.
#
# Aqui adiciona 2 colunas ao batrips.
batrips[, c("is_dur_gt_1hour", "week_day") := list(duration > 3600,
wday(start_date))]
# Quando adiciona uma só coluna a forma é simplificada.
batrips[, is_dur_gt_1hour := duration > 3600]
# Pode usar também o formato função :=
# Aqui a coluna is_dur_gt_1hour é retirada e a start_station
# fica em letras maiúsculas.
batrips[, `:=`(is_dur_gt_1hour = NULL,
start_station = toupper(start_station))][]
# Aqui acrescenta a coluna duration_hour, que é
# a durantion expressa em horas.
batrips[, duration_hour := duration/3600][]
# Foi criada a coluna duration_mean, é a média da duration
# para cada combinação de start_station e end_station.
batrips[, duration_mean := mean(duration), by = .(start_station, end_station)][]
# Foi criada a coluna trips_N, para contar (.N)
# as viagens por start_station.
batrips[, trips_N := .N, by = start_station][]
# Criação da coluna mean_dur, a média da duration depois de
# retirados os NA, agrupadas por mês.
batrips[, mean_dur := mean(duration, na.rm = TRUE),
by = month(start_date)][]
# Criação da coluna mean_dur, a média da duration depois de
# retirados os NA, agrupadas por mês.
# Depois faz: se a duration é NA troca por mean_dur.
# No final descarta a coluna mean_dur.
batrips_new[, mean_dur := mean(duration, na.rm = TRUE),
by = month(start_date)][is.na(duration),
duration := mean_dur][, mean_dur := NULL]
# Aqui deu uma sofisticada.
# Foi criada a coluna trip_category.
# 1º criou-se o objeto med_dur (a média da duration).
# Depois fez-se um ifelse para criar as categorias short, medioum e long.
# Tudo agrupado por start_station e end_station.
batrips[, trip_category := {
med_dur = median(duration, na.rm = TRUE)
if (med_dur < 600) "short"
else if (med_dur >= 600 & med_dur <= 1800) "medium"
else "long"
},
by = .(start_station, end_station)
][, .(start_station, end_station, trip_category)]
batrips[1:3]
# Aqui foram criadas duas colunas por start_station.
# Deu erro!
batrips[, c("mean_duration", "median_duration") := .(mean(duration),
median(duration)),
by = start_station]
# O mesmo que acima porém usando a função `:=``().
# Deu erro!
batrips[, `:=`(mean_duration = mean(duration),
median_duration = median(duration)),
by = start_station]
# Aqui filtrei a duration, cireia coluna mean_duration agrupada por
# start_station e end_station
batrips[duration > 600, mean_duration := mean(duration),
by = .(start_station, end_station)]
# Importando com fread.
# Pie chart.
mydata = sample(LETTERS[1:5],16,replace = TRUE)
mydata.count= table(mydata)
pie(mydata.count, col=rainbow(10))
# Função para gerar de senha.
password.generator <- function(len, n){
dummydt=data.frame(matrix(ncol=0,nrow=n))
num <- 1:9
spcl <- c("!", "#", "$", "%", "&", "(", ")", "*", "+", "-", "/", ":",
";", "<", "=", ">", "?", "@", "[", "^", "_", "{", "|", "}", "~")
comb <- c(num, spcl, letters, LETTERS)
p <- c(rep(0.035, 9), rep(0.015, 25), rep(0.025, 52))
password<-replicate(nrow(dummydt),paste0(sample(comb, len, TRUE, prob = p), collapse = ""))
dummydt$password<-password
return(dummydt)
}
password.generator(8, 3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{varsel-statistics}
\alias{varsel-statistics}
\alias{varsel_plot}
\alias{varsel_statistics}
\title{Plotting or printing summary statistics related to variable selection}
\usage{
varsel_plot(object, ..., nv_max = NULL, statistics = NULL, deltas = T,
n_boot = 1000, alpha = 0.1)
varsel_statistics(object, ..., nv_max = NULL, deltas = F)
}
\arguments{
\item{object}{The object returned by \link[=varsel]{varsel} or
\link[=cv_varsel]{cv_varsel}.}
\item{nv_max}{Maximum submodel size for which the statistics are calculated.}
\item{statistics}{A list of strings of statistics to calculate. Available
options are: kl, mse, mlpd, kl, (gaussian only), pctcorr (binomial only).
If \code{NULL}, set to varsel_plot plots only mlpd, but varsel_statistics
return all the statistics.}
\item{deltas}{If \code{TRUE}, the difference between the full model and the
submodel is returned instead of the actual value of the statistic.
Defaults to \code{FALSE}.}
\item{n_boot}{Number of bootstrap samples for calculating the credible
intervals of the statistics.}
\item{alpha}{A number indicating the desired coverage of the credible
intervals. Eg. \code{alpha=0.1} corresponds to 90\% probability mass
within the intervals. Defaults to \code{0.1}.}
}
\description{
\code{varsel_statistics} can be used to obtain summary statistics related to
variable selection. The same statistics can be plotted with
\code{varsel_plot}.
}
|
/man/varsel-statistics.Rd
|
no_license
|
bgoodri/glmproj
|
R
| false | true | 1,504 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{varsel-statistics}
\alias{varsel-statistics}
\alias{varsel_plot}
\alias{varsel_statistics}
\title{Plotting or printing summary statistics related to variable selection}
\usage{
varsel_plot(object, ..., nv_max = NULL, statistics = NULL, deltas = T,
n_boot = 1000, alpha = 0.1)
varsel_statistics(object, ..., nv_max = NULL, deltas = F)
}
\arguments{
\item{object}{The object returned by \link[=varsel]{varsel} or
\link[=cv_varsel]{cv_varsel}.}
\item{nv_max}{Maximum submodel size for which the statistics are calculated.}
\item{statistics}{A list of strings of statistics to calculate. Available
options are: kl, mse, mlpd, kl, (gaussian only), pctcorr (binomial only).
If \code{NULL}, set to varsel_plot plots only mlpd, but varsel_statistics
return all the statistics.}
\item{deltas}{If \code{TRUE}, the difference between the full model and the
submodel is returned instead of the actual value of the statistic.
Defaults to \code{FALSE}.}
\item{n_boot}{Number of bootstrap samples for calculating the credible
intervals of the statistics.}
\item{alpha}{A number indicating the desired coverage of the credible
intervals. Eg. \code{alpha=0.1} corresponds to 90\% probability mass
within the intervals. Defaults to \code{0.1}.}
}
\description{
\code{varsel_statistics} can be used to obtain summary statistics related to
variable selection. The same statistics can be plotted with
\code{varsel_plot}.
}
|
### Exercise: bootstrap bias estimation
n = 25;
lambda = 1;
K = 200;
set.seed(2015);
X = rpois(n, lambda=lambda);
p_hat = exp(-2*mean(X));
# Bootstrap
BP_Samples = array(0, c(K, n));
for(k in 1:K){
BP_Samples[k, ] = sample(X, n, replace=TRUE);
}
# Bootstrap statistics
P_hat_star = apply(BP_Samples, 1, function(x) exp(-2*mean(x)));
# Bootstrap bias estimation
bias_BP = mean(P_hat_star-p_hat);
# Compare to true value
## Compute the true value
n_experiments = 2000;
P_iid_samples = c();
for(experiment_ind in 1:n_experiments){
X = rpois(n, lambda=lambda);
p_hat = exp(-2*mean(X));
P_iid_samples = c(P_iid_samples, p_hat);
}
bias_true = mean(P_iid_samples-exp(-2*lambda));
cat( paste('Bootstrap bias estimation = ', round(bias_BP, 3), '\n', 'True bias = ', round(bias_true, 3), '\n', sep='') );
# Plot
hist(P_iid_samples-exp(-2*lambda), prob=TRUE, main='Histogram of individual biases');
abline(v=bias_true, col='red');
abline(v=bias_BP, col='blue', lty=2);
|
/Stats406/old_lab_notes2015/Lab 8/Lab_8_sol.r
|
no_license
|
Pill-GZ/Teaching
|
R
| false | false | 1,039 |
r
|
### Exercise: bootstrap bias estimation
n = 25;
lambda = 1;
K = 200;
set.seed(2015);
X = rpois(n, lambda=lambda);
p_hat = exp(-2*mean(X));
# Bootstrap
BP_Samples = array(0, c(K, n));
for(k in 1:K){
BP_Samples[k, ] = sample(X, n, replace=TRUE);
}
# Bootstrap statistics
P_hat_star = apply(BP_Samples, 1, function(x) exp(-2*mean(x)));
# Bootstrap bias estimation
bias_BP = mean(P_hat_star-p_hat);
# Compare to true value
## Compute the true value
n_experiments = 2000;
P_iid_samples = c();
for(experiment_ind in 1:n_experiments){
X = rpois(n, lambda=lambda);
p_hat = exp(-2*mean(X));
P_iid_samples = c(P_iid_samples, p_hat);
}
bias_true = mean(P_iid_samples-exp(-2*lambda));
cat( paste('Bootstrap bias estimation = ', round(bias_BP, 3), '\n', 'True bias = ', round(bias_true, 3), '\n', sep='') );
# Plot
hist(P_iid_samples-exp(-2*lambda), prob=TRUE, main='Histogram of individual biases');
abline(v=bias_true, col='red');
abline(v=bias_BP, col='blue', lty=2);
|
library(png)
library(jpeg)
library(tcltk)
img <-readPNG("lady.png")[,,1] #画像ファイルの座標は左上が原点なので
img_upright <- function(x){ #列にrevを作用させて正立させる
t(apply(x,2,rev))
}
image(img_upright(img), col = grey(0:11/12))#グレーで表示
LDR <-function(Y,M,seed=1234){ #データをYとして受け取り、潜在変数次元をMとして受け取る
set.seed(seed)#シード値のセット
N <-ncol(Y)#Yの列数をN
D <-nrow(Y)#Yの行数をD
X <- matrix(rnorm(M*N),M,N)#要素を正規分布からのサンプルで初期化
W <- matrix(rnorm(M*D),D,M)#要素を正規分布からのサンプルで初期化
I_D <- diag(1,D)#対角成分が1の対角行列を用意
S_muinv <- diag(1,D)
S_Winv <- diag(1,M)
I_M <- diag(1,M)
pb <- txtProgressBar(min = 1, max = 1000, style = 3)
for(i in 1:1000){
S_muinv <-N*I_D+S_muinv#S_muinvを更新
mu <-drop(rowSums(Y - W%*%X)%*%solve(S_muinv))#solveでS_muにする
#rowSums(Y - W%*%X)%*%S_muでmu(μの平均値)を更新
#計算結果がnum [1, 1:D]の配列になっているので
#drop()でラベルを消してベクトルにする
W <-((Y-mu)%*%t(X)) %*% solve(X%*%t(X)+S_Winv)#solveでS_Wにする
#Wを更新
###S_Winv <-X%*%t(X)+S_Winv
###W <-((Y-mu)%*%t(X)) %*% solve(S_Winv)
###上のように2段階にすると何故かうまく行かない!
X <-t(t(Y-mu)%*%W %*% (solve(t(W)%*%W+I_M)))#Xを更新
setTxtProgressBar(pb, i)
}
list(W=W,X=X,mu=mu)
}
M10<-LDR(img_upright(img),10)#潜在変数次元10で試す
image((M10$W%*%M10$X+M10$mu),main="M=10", col = grey(0:11/12))
output <-function(img,n){#潜在変数次元数を引数に取り
Mn<-LDR(img_upright(img),n)#潜在変数を抽出し
image((Mn$W%*%Mn$X+Mn$mu),#画像を復元して表示する
main=paste('M=',n), col = grey(0:11/12))
}
output(img,10)
output(img,50)
|
/LDR.R
|
no_license
|
westvirturegate/GMM
|
R
| false | false | 1,956 |
r
|
library(png)
library(jpeg)
library(tcltk)
img <-readPNG("lady.png")[,,1] #画像ファイルの座標は左上が原点なので
img_upright <- function(x){ #列にrevを作用させて正立させる
t(apply(x,2,rev))
}
image(img_upright(img), col = grey(0:11/12))#グレーで表示
LDR <-function(Y,M,seed=1234){ #データをYとして受け取り、潜在変数次元をMとして受け取る
set.seed(seed)#シード値のセット
N <-ncol(Y)#Yの列数をN
D <-nrow(Y)#Yの行数をD
X <- matrix(rnorm(M*N),M,N)#要素を正規分布からのサンプルで初期化
W <- matrix(rnorm(M*D),D,M)#要素を正規分布からのサンプルで初期化
I_D <- diag(1,D)#対角成分が1の対角行列を用意
S_muinv <- diag(1,D)
S_Winv <- diag(1,M)
I_M <- diag(1,M)
pb <- txtProgressBar(min = 1, max = 1000, style = 3)
for(i in 1:1000){
S_muinv <-N*I_D+S_muinv#S_muinvを更新
mu <-drop(rowSums(Y - W%*%X)%*%solve(S_muinv))#solveでS_muにする
#rowSums(Y - W%*%X)%*%S_muでmu(μの平均値)を更新
#計算結果がnum [1, 1:D]の配列になっているので
#drop()でラベルを消してベクトルにする
W <-((Y-mu)%*%t(X)) %*% solve(X%*%t(X)+S_Winv)#solveでS_Wにする
#Wを更新
###S_Winv <-X%*%t(X)+S_Winv
###W <-((Y-mu)%*%t(X)) %*% solve(S_Winv)
###上のように2段階にすると何故かうまく行かない!
X <-t(t(Y-mu)%*%W %*% (solve(t(W)%*%W+I_M)))#Xを更新
setTxtProgressBar(pb, i)
}
list(W=W,X=X,mu=mu)
}
M10<-LDR(img_upright(img),10)#潜在変数次元10で試す
image((M10$W%*%M10$X+M10$mu),main="M=10", col = grey(0:11/12))
output <-function(img,n){#潜在変数次元数を引数に取り
Mn<-LDR(img_upright(img),n)#潜在変数を抽出し
image((Mn$W%*%Mn$X+Mn$mu),#画像を復元して表示する
main=paste('M=',n), col = grey(0:11/12))
}
output(img,10)
output(img,50)
|
# Get the 'R CMD check' status, if any
status <- R.utils::queryRCmdCheck()
if (status != "notRunning") {
cat("The current R session was launched by R CMD check. Status:", status, "\n")
} else {
cat("The current R session was not launched by R CMD check.\n")
}
# Display how R was launched
print(base::commandArgs())
# Display loaded packages etc.
print(search())
# Display current working directory
print(getwd())
|
/tests/queryRCmdCheck.R
|
no_license
|
muschellij2/R.utils
|
R
| false | false | 441 |
r
|
# Get the 'R CMD check' status, if any
status <- R.utils::queryRCmdCheck()
if (status != "notRunning") {
cat("The current R session was launched by R CMD check. Status:", status, "\n")
} else {
cat("The current R session was not launched by R CMD check.\n")
}
# Display how R was launched
print(base::commandArgs())
# Display loaded packages etc.
print(search())
# Display current working directory
print(getwd())
|
activate_chat = function(allowed_users, sleep_time = 5) {
chat = make_chat()
print("Activated Bot ...")
while (chat$is_active) {
Sys.sleep(sleep_time)
chat = check_for_message(chat, allowed_users)
}
return(chat)
}
# Get updates
make_chat = function() {
chat = list(chat_id = recieve_chat_id(Sys.getenv("telegram_token")),
output = NULL,
input = NULL,
last_msg_date = Sys.time(),
is_active = TRUE,
echo = TRUE)
send_message("Activated Chat Bot", chat$chat_id, Sys.getenv("telegram_token"))
class(chat) = "telegram_chat"
return(chat)
}
update_chat_id = function(chat) {
# Update the chat id
chat$chat_id = recieve_chat_id(Sys.getenv("telegram_token"))
return(chat)
}
# Listen to bot activation.
# bot on listens
listen = function(allowed_users, sleep_time = 120L) {
# Init chat
cid = recieve_chat_id(Sys.getenv("telegram_token"))
bot_start = cid$created_on
send_message("Started Listening ...", cid, Sys.getenv("telegram_token"))
# Make new environment
active = TRUE
while (active) {
cid = recieve_chat_id(Sys.getenv("telegram_token"))
if (tolower(cid$message) == "bot on") {
chat = activate_chat(allowed_users)
} else if (tolower(cid$message) == "stop" & cid$created_on > bot_start) {
# Only turn off listening if chat is inactive
msg = "Stopped Listening ..."
active = FALSE
send_message(msg, cid, Sys.getenv("telegram_token"))
}
Sys.sleep(sleep_time)
}
print(msg)
}
|
/R/wait_for_message.R
|
no_license
|
pfistfl/RTelegram
|
R
| false | false | 1,495 |
r
|
activate_chat = function(allowed_users, sleep_time = 5) {
chat = make_chat()
print("Activated Bot ...")
while (chat$is_active) {
Sys.sleep(sleep_time)
chat = check_for_message(chat, allowed_users)
}
return(chat)
}
# Get updates
make_chat = function() {
chat = list(chat_id = recieve_chat_id(Sys.getenv("telegram_token")),
output = NULL,
input = NULL,
last_msg_date = Sys.time(),
is_active = TRUE,
echo = TRUE)
send_message("Activated Chat Bot", chat$chat_id, Sys.getenv("telegram_token"))
class(chat) = "telegram_chat"
return(chat)
}
update_chat_id = function(chat) {
# Update the chat id
chat$chat_id = recieve_chat_id(Sys.getenv("telegram_token"))
return(chat)
}
# Listen to bot activation.
# bot on listens
listen = function(allowed_users, sleep_time = 120L) {
# Init chat
cid = recieve_chat_id(Sys.getenv("telegram_token"))
bot_start = cid$created_on
send_message("Started Listening ...", cid, Sys.getenv("telegram_token"))
# Make new environment
active = TRUE
while (active) {
cid = recieve_chat_id(Sys.getenv("telegram_token"))
if (tolower(cid$message) == "bot on") {
chat = activate_chat(allowed_users)
} else if (tolower(cid$message) == "stop" & cid$created_on > bot_start) {
# Only turn off listening if chat is inactive
msg = "Stopped Listening ..."
active = FALSE
send_message(msg, cid, Sys.getenv("telegram_token"))
}
Sys.sleep(sleep_time)
}
print(msg)
}
|
##############################################################
# MSNBC Transcripts
# Michele Claibourn
# Acquire data: initially Jan 20, 2017 through Nov 16, 2017
# Rachel Maddow, Last Word/O'Donnell, All In/Hayes
# Updated: through September 30, 2018
##############################################################
rm(list=ls())
library(dplyr)
library(rvest)
library(tm)
library(stringr)
library(quanteda)
#####################
# Rachel Maddow
#####################
setwd("~/Box Sync/mpc/dataForDemocracy/presidency_project/cablenews/")
# if (!file.exists("maddow")) {
# dir.create("maddow")
# }
setwd("maddow")
# Load the source pages
maddow <- NULL # create null data set
# for 2018, change year in path (initially 2017) to 2018
for (i in 1:12) {
source_page <- read_html(paste0("http://www.msnbc.com/transcripts/rachel-maddow-show/2018/", i))
# Get URLs associated with each day's transcript text
url1 <- source_page %>%
html_nodes(".transcript-item a") %>%
html_attr("href") %>%
xml2::url_absolute("http://www.msnbc.com/")
head(url1)
madd1 <- data.frame(url=url1, stringsAsFactors=FALSE)
maddow <- rbind(maddow, madd1)
}
# Turn into a dataframe and extract date, show segment
maddow$show <- "maddow"
maddow$date <- str_extract(maddow$url, "[0-9]{4}-[0-9]{2}-[0-9]{2}")
maddow$date <- as.Date(maddow$date, "%Y-%m-%d")
# # On initial run: Keep only transcripts since January 20, 2017
# maddow <- maddow %>% filter(date > as.Date("2017-01-19"))
# # On January run: Keep only transcripts since initial download, November 17, 2017 to December 31, 2017
# maddow <- maddow %>%
# filter(date > as.Date("2017-11-16") & date < as.Date("2018-01-01"))
# On March run: Keep only transcripts since December 31, 2017 to February 28, 2018
# maddow <- maddow %>%
# filter(date > as.Date("2017-12-31") & date < as.Date("2018-03-01"))
# On June run: Keep only transcripts since February 28, 2018 to May 31, 2018
# maddow <- maddow %>%
# filter(date > as.Date("2018-02-28") & date < as.Date("2018-06-01"))
# On August run: Keep only transcripts since June 1, 2018 to July 31, 2018
# maddow <- maddow %>%
# filter(date > as.Date("2018-05-31") & date < as.Date("2018-08-01"))
# On September run: Keep only transcripts since August 1, 2018 to August 31, 2018
# On October run: Keep only transcripts since September 1, 2018 to September 30, 2018
# On November run: Keep only transcripts since October 1, 2018 to October 31, 2018
maddow <- maddow %>%
filter(date > as.Date("2018-09-30") & date < as.Date("2018-11-01"))
# Loop through each link in data.frame (nrow(maddow)) and
# a. grab the html (read_html()), isolating node with text (".pane-node-body .pane-content",
# b. extract the text (html_text),
# c. append appropriate party label-year to downloaded file (paste0)
# d. and send output to file (sink/cat)
for(i in seq(nrow(maddow))) {
text <- read_html(maddow$url[i]) %>% # load the page
html_nodes(".pane-node-body .pane-content") %>% # isolate the text
html_text() # get the text
filename <- paste0(maddow$date[i], ".txt")
sink(file = filename) %>% # open file to write
cat(text) # put the contents of "text" in the file
sink() # close the file
}
#####################
# Last Word with Lawrence O'Donnell
#####################
setwd("~/Box Sync/mpc/dataForDemocracy/presidency_project/cablenews/")
# if (!file.exists("lastword")) {
# dir.create("lastword")
# }
setwd("lastword")
# Load the source pages
lastword <- NULL # create null data set
# for 2018, change year in path (initially 2017) to 2018
for (i in 1:12) {
source_page <- read_html(paste0("http://www.msnbc.com/transcripts/the-last-word/2018/", i))
# Get URLs associated with each day's transcript text
url1 <- source_page %>%
html_nodes(".transcript-item a") %>%
html_attr("href") %>%
xml2::url_absolute("http://www.msnbc.com/")
head(url1)
last1 <- data.frame(url=url1, stringsAsFactors=FALSE)
lastword <- rbind(lastword, last1)
}
# Turn into a dataframe and extract date, show segment
lastword$show <- "lastword"
lastword$date <- str_extract(lastword$url, "[0-9]{4}-[0-9]{2}-[0-9]{2}")
lastword$date <- as.Date(lastword$date, "%Y-%m-%d")
# # On initial run: Keep only transcripts since January 20, 2017
# lastword <- lastword %>% filter(date > as.Date("2017-01-19"))
# # On January run: Keep only transcripts since initial download, November 17, 2017 to December 31, 2017
# lastword <- lastword %>%
# filter(date > as.Date("2017-11-16") & date < as.Date("2018-01-01"))
# On March run: Keep only transcripts since December 31, 2017 to February 28, 2018
# lastword <- lastword %>%
# filter(date > as.Date("2017-12-31") & date < as.Date("2018-03-01"))
# On June run: Keep only transcripts since February 28, 2018 to May 31, 2018
# lastword <- lastword %>%
# filter(date > as.Date("2018-02-28") & date < as.Date("2018-06-01"))
# On August run: Keep only transcripts since June 1, 2018 to July 31, 2018
# On September run: Keep only transcripts since August 1, 2018 to August 31, 2018
# On October run: Keep only transcripts since September 1, 2018 to September 30, 2018
# On November run: Keep only transcripts since October 1, 2018 to October 31, 2018
lastword <- lastword %>%
filter(date > as.Date("2018-09-30") & date < as.Date("2018-11-01"))
# Download transcripts as text files
for(i in seq(nrow(lastword))) {
text <- read_html(lastword$url[i]) %>% # load the page
html_nodes(".pane-node-body .pane-content") %>% # isolate the text
html_text() # get the text
filename <- paste0(lastword$date[i], ".txt")
sink(file = filename) %>% # open file to write
cat(text) # put the contents of "text" in the file
sink() # close the file
}
#####################
# All In with Chris Hayes
#####################
setwd("~/Box Sync/mpc/dataForDemocracy/presidency_project/cablenews/")
# if (!file.exists("allin")) {
# dir.create("allin")
# }
setwd("allin")
# Load the source pages
allin <- NULL # create null data set
# for 2018, change year in path (initially 2017) to 2018
for (i in 1:12) {
source_page <- read_html(paste0("http://www.msnbc.com/transcripts/all-in/2018/", i))
# Get URLs associated with each day's transcript text
url1 <- source_page %>%
html_nodes(".transcript-item a") %>%
html_attr("href") %>%
xml2::url_absolute("http://www.msnbc.com/")
head(url1)
all1 <- data.frame(url=url1, stringsAsFactors=FALSE)
allin <- rbind(allin, all1)
}
# Turn into a dataframe and extract date, show segment
allin$show <- "allin"
allin$date <- str_extract(allin$url, "[0-9]{4}-[0-9]{2}-[0-9]{2}")
allin$date <- as.Date(allin$date, "%Y-%m-%d")
# # On initial run: Keep only transcripts since January 20, 2017
# allin <- allin %>% filter(date > as.Date("2017-01-19"))
# # On January run: Keep only transcripts since initial download, November 17, 2017 to December 31, 2017
# allin <- allin %>%
# filter(date > as.Date("2017-11-16") & date < as.Date("2018-01-01"))
# On March run: Keep only transcripts since December 31, 2017 to February 28, 2018
# allin <- allin %>%
# filter(date > as.Date("2017-12-31") & date < as.Date("2018-03-01"))
# On June run: Keep only transcripts since February 28, 2018 to May 31, 2018
# allin <- allin %>%
# filter(date > as.Date("2018-02-28") & date < as.Date("2018-06-01"))
# On August run: Keep only transcripts since June 1, 2018 to July 31, 2018
# allin <- allin %>%
# filter(date > as.Date("2018-05-31") & date < as.Date("2018-08-01"))
# On September run: Keep only transcripts since August 1, 2018 to August 31, 2018
# On October run: Keep only transcripts since September 1, 2018 to September 30, 2018
# On November run: Keep only transcripts since October 1, 2018 to October 31, 2018
allin <- allin %>%
filter(date > as.Date("2018-09-30") & date < as.Date("2018-11-01"))
# Download transcripts as text files
for(i in seq(nrow(allin))) {
text <- read_html(allin$url[i]) %>% # load the page
html_nodes(".pane-node-body .pane-content") %>% # isolate the text
html_text() # get the text
filename <- paste0(allin$date[i], ".txt")
sink(file = filename) %>% # open file to write
cat(text) # put the contents of "text" in the file
sink() # close the file
}
|
/codeR/cablenews/acquire_msnbc.R
|
no_license
|
datafordemocracy/publicpresidency
|
R
| false | false | 8,277 |
r
|
##############################################################
# MSNBC Transcripts
# Michele Claibourn
# Acquire data: initially Jan 20, 2017 through Nov 16, 2017
# Rachel Maddow, Last Word/O'Donnell, All In/Hayes
# Updated: through September 30, 2018
##############################################################
rm(list=ls())
library(dplyr)
library(rvest)
library(tm)
library(stringr)
library(quanteda)
#####################
# Rachel Maddow
#####################
setwd("~/Box Sync/mpc/dataForDemocracy/presidency_project/cablenews/")
# if (!file.exists("maddow")) {
# dir.create("maddow")
# }
setwd("maddow")
# Load the source pages
maddow <- NULL # create null data set
# for 2018, change year in path (initially 2017) to 2018
for (i in 1:12) {
source_page <- read_html(paste0("http://www.msnbc.com/transcripts/rachel-maddow-show/2018/", i))
# Get URLs associated with each day's transcript text
url1 <- source_page %>%
html_nodes(".transcript-item a") %>%
html_attr("href") %>%
xml2::url_absolute("http://www.msnbc.com/")
head(url1)
madd1 <- data.frame(url=url1, stringsAsFactors=FALSE)
maddow <- rbind(maddow, madd1)
}
# Turn into a dataframe and extract date, show segment
maddow$show <- "maddow"
maddow$date <- str_extract(maddow$url, "[0-9]{4}-[0-9]{2}-[0-9]{2}")
maddow$date <- as.Date(maddow$date, "%Y-%m-%d")
# # On initial run: Keep only transcripts since January 20, 2017
# maddow <- maddow %>% filter(date > as.Date("2017-01-19"))
# # On January run: Keep only transcripts since initial download, November 17, 2017 to December 31, 2017
# maddow <- maddow %>%
# filter(date > as.Date("2017-11-16") & date < as.Date("2018-01-01"))
# On March run: Keep only transcripts since December 31, 2017 to February 28, 2018
# maddow <- maddow %>%
# filter(date > as.Date("2017-12-31") & date < as.Date("2018-03-01"))
# On June run: Keep only transcripts since February 28, 2018 to May 31, 2018
# maddow <- maddow %>%
# filter(date > as.Date("2018-02-28") & date < as.Date("2018-06-01"))
# On August run: Keep only transcripts since June 1, 2018 to July 31, 2018
# maddow <- maddow %>%
# filter(date > as.Date("2018-05-31") & date < as.Date("2018-08-01"))
# On September run: Keep only transcripts since August 1, 2018 to August 31, 2018
# On October run: Keep only transcripts since September 1, 2018 to September 30, 2018
# On November run: Keep only transcripts since October 1, 2018 to October 31, 2018
maddow <- maddow %>%
filter(date > as.Date("2018-09-30") & date < as.Date("2018-11-01"))
# Loop through each link in data.frame (nrow(maddow)) and
# a. grab the html (read_html()), isolating node with text (".pane-node-body .pane-content",
# b. extract the text (html_text),
# c. append appropriate party label-year to downloaded file (paste0)
# d. and send output to file (sink/cat)
for(i in seq(nrow(maddow))) {
text <- read_html(maddow$url[i]) %>% # load the page
html_nodes(".pane-node-body .pane-content") %>% # isolate the text
html_text() # get the text
filename <- paste0(maddow$date[i], ".txt")
sink(file = filename) %>% # open file to write
cat(text) # put the contents of "text" in the file
sink() # close the file
}
#####################
# Last Word with Lawrence O'Donnell
#####################
setwd("~/Box Sync/mpc/dataForDemocracy/presidency_project/cablenews/")
# if (!file.exists("lastword")) {
# dir.create("lastword")
# }
setwd("lastword")
# Load the source pages
lastword <- NULL # create null data set
# for 2018, change year in path (initially 2017) to 2018
for (i in 1:12) {
source_page <- read_html(paste0("http://www.msnbc.com/transcripts/the-last-word/2018/", i))
# Get URLs associated with each day's transcript text
url1 <- source_page %>%
html_nodes(".transcript-item a") %>%
html_attr("href") %>%
xml2::url_absolute("http://www.msnbc.com/")
head(url1)
last1 <- data.frame(url=url1, stringsAsFactors=FALSE)
lastword <- rbind(lastword, last1)
}
# Turn into a dataframe and extract date, show segment
lastword$show <- "lastword"
lastword$date <- str_extract(lastword$url, "[0-9]{4}-[0-9]{2}-[0-9]{2}")
lastword$date <- as.Date(lastword$date, "%Y-%m-%d")
# # On initial run: Keep only transcripts since January 20, 2017
# lastword <- lastword %>% filter(date > as.Date("2017-01-19"))
# # On January run: Keep only transcripts since initial download, November 17, 2017 to December 31, 2017
# lastword <- lastword %>%
# filter(date > as.Date("2017-11-16") & date < as.Date("2018-01-01"))
# On March run: Keep only transcripts since December 31, 2017 to February 28, 2018
# lastword <- lastword %>%
# filter(date > as.Date("2017-12-31") & date < as.Date("2018-03-01"))
# On June run: Keep only transcripts since February 28, 2018 to May 31, 2018
# lastword <- lastword %>%
# filter(date > as.Date("2018-02-28") & date < as.Date("2018-06-01"))
# On August run: Keep only transcripts since June 1, 2018 to July 31, 2018
# On September run: Keep only transcripts since August 1, 2018 to August 31, 2018
# On October run: Keep only transcripts since September 1, 2018 to September 30, 2018
# On November run: Keep only transcripts since October 1, 2018 to October 31, 2018
lastword <- lastword %>%
filter(date > as.Date("2018-09-30") & date < as.Date("2018-11-01"))
# Download transcripts as text files
for(i in seq(nrow(lastword))) {
text <- read_html(lastword$url[i]) %>% # load the page
html_nodes(".pane-node-body .pane-content") %>% # isolate the text
html_text() # get the text
filename <- paste0(lastword$date[i], ".txt")
sink(file = filename) %>% # open file to write
cat(text) # put the contents of "text" in the file
sink() # close the file
}
#####################
# All In with Chris Hayes
#####################
setwd("~/Box Sync/mpc/dataForDemocracy/presidency_project/cablenews/")
# if (!file.exists("allin")) {
# dir.create("allin")
# }
setwd("allin")
# Load the source pages
allin <- NULL # create null data set
# for 2018, change year in path (initially 2017) to 2018
for (i in 1:12) {
source_page <- read_html(paste0("http://www.msnbc.com/transcripts/all-in/2018/", i))
# Get URLs associated with each day's transcript text
url1 <- source_page %>%
html_nodes(".transcript-item a") %>%
html_attr("href") %>%
xml2::url_absolute("http://www.msnbc.com/")
head(url1)
all1 <- data.frame(url=url1, stringsAsFactors=FALSE)
allin <- rbind(allin, all1)
}
# Turn into a dataframe and extract date, show segment
allin$show <- "allin"
allin$date <- str_extract(allin$url, "[0-9]{4}-[0-9]{2}-[0-9]{2}")
allin$date <- as.Date(allin$date, "%Y-%m-%d")
# # On initial run: Keep only transcripts since January 20, 2017
# allin <- allin %>% filter(date > as.Date("2017-01-19"))
# # On January run: Keep only transcripts since initial download, November 17, 2017 to December 31, 2017
# allin <- allin %>%
# filter(date > as.Date("2017-11-16") & date < as.Date("2018-01-01"))
# On March run: Keep only transcripts since December 31, 2017 to February 28, 2018
# allin <- allin %>%
# filter(date > as.Date("2017-12-31") & date < as.Date("2018-03-01"))
# On June run: Keep only transcripts since February 28, 2018 to May 31, 2018
# allin <- allin %>%
# filter(date > as.Date("2018-02-28") & date < as.Date("2018-06-01"))
# On August run: Keep only transcripts since June 1, 2018 to July 31, 2018
# allin <- allin %>%
# filter(date > as.Date("2018-05-31") & date < as.Date("2018-08-01"))
# On September run: Keep only transcripts since August 1, 2018 to August 31, 2018
# On October run: Keep only transcripts since September 1, 2018 to September 30, 2018
# On November run: Keep only transcripts since October 1, 2018 to October 31, 2018
allin <- allin %>%
filter(date > as.Date("2018-09-30") & date < as.Date("2018-11-01"))
# Download transcripts as text files
for(i in seq(nrow(allin))) {
text <- read_html(allin$url[i]) %>% # load the page
html_nodes(".pane-node-body .pane-content") %>% # isolate the text
html_text() # get the text
filename <- paste0(allin$date[i], ".txt")
sink(file = filename) %>% # open file to write
cat(text) # put the contents of "text" in the file
sink() # close the file
}
|
library(drake)
### Name: make_with_config
### Title: Run 'make()', on an existing internal configuration list.
### Aliases: make_with_config
### ** Examples
## Not run:
##D test_with_dir("Quarantine side effects.", {
##D load_mtcars_example() # Get the code with drake_example("mtcars").
##D # The following lines are the same as make(my_plan)
##D config <- drake_config(my_plan) # Create the internal config list.
##D make_with_config(config = config) # Run the project, build the targets.
##D })
## End(Not run)
|
/data/genthat_extracted_code/drake/examples/make_with_config.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 522 |
r
|
library(drake)
### Name: make_with_config
### Title: Run 'make()', on an existing internal configuration list.
### Aliases: make_with_config
### ** Examples
## Not run:
##D test_with_dir("Quarantine side effects.", {
##D load_mtcars_example() # Get the code with drake_example("mtcars").
##D # The following lines are the same as make(my_plan)
##D config <- drake_config(my_plan) # Create the internal config list.
##D make_with_config(config = config) # Run the project, build the targets.
##D })
## End(Not run)
|
plm.knn=function(k,y,X,W,lambda=1,X.new=NULL,W.new=NULL,cl=1) {
library(foreach)
library(doParallel)
n=length(y)
if(length(lambda)==1) {lambda=rep(lambda,ncol(W))}
Lambda=matrix(rep(lambda,n),nrow=n,byrow=T)
nb=function(w) {
d=sqrt(rowSums(((W-matrix(rep(w,n),nrow=n,byrow=T))*Lambda)^2))
d_k=sort(d)[k]
nb_ind=which(d<=d_k)
return(nb_ind)
}
knn.mean=function(i,W.nb) {
nb_ind=nb(W.nb[i,])
y_bar=mean(y[nb_ind])
x_bar=colMeans(X[nb_ind,])
return(c(y_bar,x_bar))
}
if(cl>1) {
registerDoParallel(cl)
M=foreach(i=1:n,.combine=rbind) %dopar% {knn.mean(i,W)}
stopImplicitCluster()
} else {
M=foreach(i=1:n,.combine=rbind) %do% {knn.mean(i,W)}
}
rownames(M)=NULL
y_demean=y-M[,1];X_demean=X-M[,-1]
beta=as.vector(solve(t(X_demean)%*%X_demean)%*%t(X_demean)%*%y_demean)
if(is.null(X.new)|is.null(W.new)) {
y_hat=as.vector(X_demean%*%beta)+M[,1]
return(list(pred.plm=y_hat,pred.knn=M[,1],coef=beta))
} else {
if(cl>1) {
registerDoParallel(cl)
M.new=foreach(i=1:nrow(W.new),.combine=rbind) %dopar% {knn.mean(i,W.new)}
stopImplicitCluster()
} else {
M.new=foreach(i=1:nrow(W.new),.combine=rbind) %do% {knn.mean(i,W.new)}
}
rownames(M.new)=NULL
y_hat=as.vector((X.new-M.new[,-1])%*%beta)+M.new[,1]
return(list(pred.plm=y_hat,pred.knn=M.new[,1],coef=beta))
}
}
pfm=function(y,pred) {
e2=(y-pred)^2
R2=1-sum(e2)/sum((y-mean(y))^2)
RMSE=sqrt(mean(e2))
return(c(R2=R2,RMSE=RMSE))
}
plm.knn.tune=function(k.grid,lambda.grid=1,y,X,W,fold=4,metric="R2",cl=1) {
n=length(y)
group=caret::createFolds(1:n,k=fold)
plm.knn.cv=function(k,lambda) {
y_hat=rep(NA,n)
for (i in 1:fold) {
y_hat[group[[i]]]=plm.knn(k=k,
y=y[-group[[i]]],X=X[-group[[i]],],W=W[-group[[i]],],
lambda=lambda,
X.new=X[group[[i]],],W.new=W[group[[i]],],
cl=cl)$pred.plm
}
performance=pfm(y,y_hat)
return(performance)
}
if(length(lambda.grid)==1) {
tuneGrid=data.frame(k=k.grid,lambda=lambda.grid)
} else {
tuneGrid=data.frame(k=rep(k.grid,each=nrow(lambda.grid)),lambda=lambda.grid)
}
result=NULL
for (i in 1:nrow(tuneGrid)) {
tc=Sys.time()
cat(paste(i," group tuning parameters starting at: ",tc,", ",sep=""))
result=rbind(result,
plm.knn.cv(k=tuneGrid$k[i],lambda=unlist(tuneGrid[i,-1])))
tc=Sys.time()-tc;print(tc)
}
result=as.data.frame(result)
if(metric=="R2") {
bestTune=which.max(result$R2)
} else {
bestTune=which.min(result$RMSE)
}
return(list(k.best=tuneGrid$k[bestTune],lambda.best=unlist(tuneGrid[bestTune,-1]),
Performance=cbind(tuneGrid,result)))
}
|
/plm.knn.R
|
no_license
|
ishwang1/Ish-function
|
R
| false | false | 2,880 |
r
|
plm.knn=function(k,y,X,W,lambda=1,X.new=NULL,W.new=NULL,cl=1) {
library(foreach)
library(doParallel)
n=length(y)
if(length(lambda)==1) {lambda=rep(lambda,ncol(W))}
Lambda=matrix(rep(lambda,n),nrow=n,byrow=T)
nb=function(w) {
d=sqrt(rowSums(((W-matrix(rep(w,n),nrow=n,byrow=T))*Lambda)^2))
d_k=sort(d)[k]
nb_ind=which(d<=d_k)
return(nb_ind)
}
knn.mean=function(i,W.nb) {
nb_ind=nb(W.nb[i,])
y_bar=mean(y[nb_ind])
x_bar=colMeans(X[nb_ind,])
return(c(y_bar,x_bar))
}
if(cl>1) {
registerDoParallel(cl)
M=foreach(i=1:n,.combine=rbind) %dopar% {knn.mean(i,W)}
stopImplicitCluster()
} else {
M=foreach(i=1:n,.combine=rbind) %do% {knn.mean(i,W)}
}
rownames(M)=NULL
y_demean=y-M[,1];X_demean=X-M[,-1]
beta=as.vector(solve(t(X_demean)%*%X_demean)%*%t(X_demean)%*%y_demean)
if(is.null(X.new)|is.null(W.new)) {
y_hat=as.vector(X_demean%*%beta)+M[,1]
return(list(pred.plm=y_hat,pred.knn=M[,1],coef=beta))
} else {
if(cl>1) {
registerDoParallel(cl)
M.new=foreach(i=1:nrow(W.new),.combine=rbind) %dopar% {knn.mean(i,W.new)}
stopImplicitCluster()
} else {
M.new=foreach(i=1:nrow(W.new),.combine=rbind) %do% {knn.mean(i,W.new)}
}
rownames(M.new)=NULL
y_hat=as.vector((X.new-M.new[,-1])%*%beta)+M.new[,1]
return(list(pred.plm=y_hat,pred.knn=M.new[,1],coef=beta))
}
}
pfm=function(y,pred) {
e2=(y-pred)^2
R2=1-sum(e2)/sum((y-mean(y))^2)
RMSE=sqrt(mean(e2))
return(c(R2=R2,RMSE=RMSE))
}
plm.knn.tune=function(k.grid,lambda.grid=1,y,X,W,fold=4,metric="R2",cl=1) {
n=length(y)
group=caret::createFolds(1:n,k=fold)
plm.knn.cv=function(k,lambda) {
y_hat=rep(NA,n)
for (i in 1:fold) {
y_hat[group[[i]]]=plm.knn(k=k,
y=y[-group[[i]]],X=X[-group[[i]],],W=W[-group[[i]],],
lambda=lambda,
X.new=X[group[[i]],],W.new=W[group[[i]],],
cl=cl)$pred.plm
}
performance=pfm(y,y_hat)
return(performance)
}
if(length(lambda.grid)==1) {
tuneGrid=data.frame(k=k.grid,lambda=lambda.grid)
} else {
tuneGrid=data.frame(k=rep(k.grid,each=nrow(lambda.grid)),lambda=lambda.grid)
}
result=NULL
for (i in 1:nrow(tuneGrid)) {
tc=Sys.time()
cat(paste(i," group tuning parameters starting at: ",tc,", ",sep=""))
result=rbind(result,
plm.knn.cv(k=tuneGrid$k[i],lambda=unlist(tuneGrid[i,-1])))
tc=Sys.time()-tc;print(tc)
}
result=as.data.frame(result)
if(metric=="R2") {
bestTune=which.max(result$R2)
} else {
bestTune=which.min(result$RMSE)
}
return(list(k.best=tuneGrid$k[bestTune],lambda.best=unlist(tuneGrid[bestTune,-1]),
Performance=cbind(tuneGrid,result)))
}
|
\alias{gtkTreeViewAppendColumn}
\name{gtkTreeViewAppendColumn}
\title{gtkTreeViewAppendColumn}
\description{Appends \code{column} to the list of columns. If \code{tree.view} has "fixed\_height"
mode enabled, then \code{column} must have its "sizing" property set to be
GTK\_TREE\_VIEW\_COLUMN\_FIXED.}
\usage{gtkTreeViewAppendColumn(object, column)}
\arguments{
\item{\code{object}}{[\code{\link{GtkTreeView}}] A \code{\link{GtkTreeView}}.}
\item{\code{column}}{[\code{\link{GtkTreeViewColumn}}] The \code{\link{GtkTreeViewColumn}} to add.}
}
\value{[integer] The number of columns in \code{tree.view} after appending.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/man/gtkTreeViewAppendColumn.Rd
|
no_license
|
cran/RGtk2.10
|
R
| false | false | 694 |
rd
|
\alias{gtkTreeViewAppendColumn}
\name{gtkTreeViewAppendColumn}
\title{gtkTreeViewAppendColumn}
\description{Appends \code{column} to the list of columns. If \code{tree.view} has "fixed\_height"
mode enabled, then \code{column} must have its "sizing" property set to be
GTK\_TREE\_VIEW\_COLUMN\_FIXED.}
\usage{gtkTreeViewAppendColumn(object, column)}
\arguments{
\item{\code{object}}{[\code{\link{GtkTreeView}}] A \code{\link{GtkTreeView}}.}
\item{\code{column}}{[\code{\link{GtkTreeViewColumn}}] The \code{\link{GtkTreeViewColumn}} to add.}
}
\value{[integer] The number of columns in \code{tree.view} after appending.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
#test multicore
rm(list=ls())
library(parallel)
dats=list(n=10,k=5)
simdat = function(...){
#include a pause to see if it works
res=list(n=10,k=5)
#dats=c(...)
dats=list(n=10,k=5)
n=dats[['n']]
k=dats[['k']]
d=list(matrix(rnorm(n*k,mean=0,sd=1),n,k))
}
simdat2 = function(...){
d=list(matrix(rnorm(n*k,mean=0,sd=1),n,k))
}
#xtest = simdat(n=5,k=10)
xtest = simdat()
cl <- makeCluster(mc <- getOption("cl.cores", 2))
cltst=do.call(c,parLapply(cl=cl,1:2,simdat))
#alt
#res=clusterEvalQ(cl,dats)
#cl2=do.call(c,res)
simdat2(cl2)
stopCluster(cl)
|
/code/parallel.R
|
no_license
|
bjb40/apc
|
R
| false | false | 585 |
r
|
#test multicore
rm(list=ls())
library(parallel)
dats=list(n=10,k=5)
simdat = function(...){
#include a pause to see if it works
res=list(n=10,k=5)
#dats=c(...)
dats=list(n=10,k=5)
n=dats[['n']]
k=dats[['k']]
d=list(matrix(rnorm(n*k,mean=0,sd=1),n,k))
}
simdat2 = function(...){
d=list(matrix(rnorm(n*k,mean=0,sd=1),n,k))
}
#xtest = simdat(n=5,k=10)
xtest = simdat()
cl <- makeCluster(mc <- getOption("cl.cores", 2))
cltst=do.call(c,parLapply(cl=cl,1:2,simdat))
#alt
#res=clusterEvalQ(cl,dats)
#cl2=do.call(c,res)
simdat2(cl2)
stopCluster(cl)
|
png("plot3.png")
with(df, plot(timestamp, Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = ""))
with(df, points(timestamp, Sub_metering_2, type = "l", col = "red"))
with(df, points(timestamp, Sub_metering_3, type = "l", col = "blue"))
legend("topright", pch = "-", col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
zaph6691/course4project1
|
R
| false | false | 389 |
r
|
png("plot3.png")
with(df, plot(timestamp, Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = ""))
with(df, points(timestamp, Sub_metering_2, type = "l", col = "red"))
with(df, points(timestamp, Sub_metering_3, type = "l", col = "blue"))
legend("topright", pch = "-", col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
###############################################################
# Name: Danielle Senechal
# CSC-301
# Final Project
###############################################################
# Define a server for the Shiny app
function(input, output, session) {
updateSelectInput(session, "datas",
label = paste("Which data set?"),
choices = c("hmnist_8_8_RGB.csv", "hmnist_28_28_RGB.csv"),
selected = "hmnist_8_8_RGB.csv")
# choose between 1, 3, and 5 neighbors
updateSelectInput(session, "neighbor",
label = paste("How many neighbors?"),
choices = c(1, 3, 5),
selected = 3 )
# choose between 90%, 80%, 70%, 60%, and 50% for the training dataset
updateSelectInput(session, "splits",
label = paste("What percent of data for training? (Multiply by 100)"),
choices=c(0.90, 0.80, 0.70, 0.60, 0.50), selected = 0.60 )
observe({
######################## kNN function ########################
setwd("~/Documents/ECSU/Spring 2020/Web Dev/The Final") # set working directory
thedata <- read.csv(paste0(input$datas))
########## Naming the categories ##########
head(thedata$label)
thedata.coded <- factor(thedata$label, levels = c(0, 1, 2, 3, 4, 5, 6),
labels = c("AKIEC", "BCC", "BKL", "DTF", "NV",
"VASC", "MEL")) # corresponding names
########## Spliting into seven categories ##########
AKIEC <- subset(thedata, thedata$label == 0)
nrow(AKIEC)
# View(AKIEC)
BCC <- subset(thedata, thedata$label == 1)
nrow(BCC)
# View(BCC)
BKL <- subset(thedata, thedata$label == 2)
nrow(BKL)
# View(BKL)
DTF <- subset(thedata, thedata$label == 3)
nrow(DTF)
# View(DTF)
NV <- subset(thedata, thedata$label == 4)
nrow(NV)
# View(NV)
VASC <- subset(thedata, thedata$label == 5)
nrow(VASC)
# View(VASC)
MEL <- subset(thedata, thedata$label == 6)
nrow(MEL)
# View(MEL)
nrow(AKIEC) + nrow(BCC) + nrow(BKL) + nrow(DTF) + nrow(MEL) + nrow(NV) + nrow(VASC)
# should equate to 10,015
########## Spliting into training/testing ##########
thesplit <- as.numeric(input$splits) # make input numeric
####### AKIEC #######
per.AKIEC <- sample(1:nrow(AKIEC), thesplit * nrow(AKIEC))
AKIEC.train <- AKIEC[per.AKIEC,] # training
AKIEC.test <- AKIEC[-per.AKIEC,] #testing
####### BCC #######
per.BCC <- sample(1:nrow(BCC), thesplit * nrow(BCC))
BCC.train <- BCC[per.BCC,] # training
BCC.test <- BCC[-per.BCC,] # testing
####### BKL #######
per.BKL <- sample(1:nrow(BKL), thesplit * nrow(BKL))
BKL.train <- BKL[per.BKL,] # training
BKL.test <- BKL[-per.BKL,] # testing
####### DTF #######
per.DTF <- sample(1:nrow(DTF), thesplit * nrow(DTF))
DTF.train <- DTF[per.DTF,] # training
DTF.test <- DTF[-per.DTF,] # testing
####### MEL #######
per.MEL <- sample(1:nrow(MEL), thesplit * nrow(MEL))
MEL.train <- MEL[per.MEL,] # training
MEL.test <- MEL[-per.MEL,] # testing
####### NV #######
per.NV <- sample(1:nrow(NV), thesplit * nrow(NV))
NV.train <- NV[per.NV,] # training
NV.test <- NV[-per.NV,] # testing
####### VASC #######
per.VASC <- sample(1:nrow(VASC), thesplit * nrow(VASC))
VASC.train <- VASC[per.VASC,] # training
VASC.test <- VASC[-per.VASC,] # testing
########## Recombining ##########
training <- rbind(AKIEC.train, BCC.train, BKL.train, DTF.train, MEL.train, NV.train, VASC.train)
training <- data.frame(training) # data frame of all training data
testing <- rbind(AKIEC.test, BCC.test, BKL.test, DTF.test, MEL.test, NV.test, VASC.test)
testing <- data.frame(testing) # data frame of all testing data
########## Perform kNN function ##########
target.category <- training[ncol(training)] # select last column which has target categories
test.category <- testing[ncol(testing)] # select last column which has target categories
cl = target.category[,1] # create classifiction variable
predictions <- knn(training, testing, cl, k = input$neighbor) # kNN, k is chosen by user in ui
test.category.vec = test.category[,1] # create vector
# identify correct predictions in a confusion matrix
cm <- as.matrix(table(test.category.vec, predictions))
colnames(cm) <- c("AKIEC", "BCC", "BKL", "DTF", "NV", "VASC", "MEL") # give column names
rownames(cm) <- c("AKIEC", "BCC", "BKL", "DTF", "NV", "VASC", "MEL") # give row names
total.accuracy <- sum(diag(cm))/length(cl) * 100 # gives the accuracy of all predictions
acc <- diag(cm) # select diagonal results, they are the predictions that are correct
tots <- data_frame(acc) # create dataframe
# add column of overall totals in each testing category
tots$totals <- c(nrow(AKIEC.test), nrow(BCC.test), nrow(BKL.test), nrow(DTF.test),
nrow(NV.test), nrow(VASC.test), nrow(MEL.test))
# divide number correct by overall in each category, multiply by 100 to get percentage
indiv.accs <- tots$acc / tots$totals * 100
indiv.accs <- data_frame(indiv.accs) # create dataframe
# add column to specify type of lesion
indiv.accs$type <- c("AKIEC", "BCC", "BKL", "DTF", "NV", "VASC", "MEL")
akiec.acc <- indiv.accs[1,] # select AKIEC row
bcc.acc <- indiv.accs[2,] # select BCC row
bkl.acc <- indiv.accs[3,] # select BKL row
dtf.acc <- indiv.accs[4,] # select DTF row
nv.acc <- indiv.accs[5,] # select NV row
vasc.acc <- indiv.accs[6,] # select VASC row
mel.acc <- indiv.accs[7,] # select MEL row
######################## end kNN function ########################
# output bar graph that updates with given input (set y axis to prevent confusion)
output$accuracyPlot <- renderPlot({
ggplot(indiv.accs, aes(x=type, y=indiv.accs, fill=type)) +
geom_bar(stat='identity', position='dodge') +
ggtitle(paste0("Categorical Accuracies using ", as.numeric(input$splits) * 100,
"% of the data for training and k = ", input$neighbor, " (", input$datas, ")")) +
labs(x = "Type of lesion", y = "Accuracy (%)") + ylim(0, 100) +
theme(legend.position = "none") })
# output overall accuracy
output$accuracyMessage <- renderText({
paste('Overall accuracy is:', total.accuracy, "%") })
# output accuracy for AKIEC
output$akiecMessage <- renderText({
paste('Total accuracy for Actinic keratoses (AKIEC) is:',
round(akiec.acc$indiv.accs, digits=2), "%") })
# output accuracy for BCC
output$bccMessage <- renderText({
paste('Total accuracy for Basal cell carcinoma (BCC) is:',
round(bcc.acc$indiv.accs, digits=2), "%") })
# output accuracy for BKL
output$bklMessage <- renderText({
paste('Total accuracy for Benign keratosis-like lesions (BKL) is:',
round(bkl.acc$indiv.accs, digits=2), "%") })
# output accuracy for DTF
output$dtfMessage <- renderText({
paste('Total accuracy for Dermatofibroma (DTF) is:',
round(dtf.acc$indiv.accs, digits=2), "%") })
# output accuracy for MEL
output$melMessage <- renderText({
paste('Total accuracy for Melanoma (MEL) is:',
round(mel.acc$indiv.accs, digits=2), "%") })
# output accuracy for NV
output$nvMessage <- renderText({
paste('Total accuracy for Melanocytic nevi (NV) is:',
round(nv.acc$indiv.accs, digits=2), "%") })
# output accuracy for VASC
output$vascMessage <- renderText({
paste('Total accuracy for Vascular lesions (VASC) is:',
round(vasc.acc$indiv.accs, digits=2), "%") })
})
}
|
/server.R
|
no_license
|
DaniSenechal/KNNSkinLesions
|
R
| false | false | 7,974 |
r
|
###############################################################
# Name: Danielle Senechal
# CSC-301
# Final Project
###############################################################
# Define a server for the Shiny app
function(input, output, session) {
updateSelectInput(session, "datas",
label = paste("Which data set?"),
choices = c("hmnist_8_8_RGB.csv", "hmnist_28_28_RGB.csv"),
selected = "hmnist_8_8_RGB.csv")
# choose between 1, 3, and 5 neighbors
updateSelectInput(session, "neighbor",
label = paste("How many neighbors?"),
choices = c(1, 3, 5),
selected = 3 )
# choose between 90%, 80%, 70%, 60%, and 50% for the training dataset
updateSelectInput(session, "splits",
label = paste("What percent of data for training? (Multiply by 100)"),
choices=c(0.90, 0.80, 0.70, 0.60, 0.50), selected = 0.60 )
observe({
######################## kNN function ########################
setwd("~/Documents/ECSU/Spring 2020/Web Dev/The Final") # set working directory
thedata <- read.csv(paste0(input$datas))
########## Naming the categories ##########
head(thedata$label)
thedata.coded <- factor(thedata$label, levels = c(0, 1, 2, 3, 4, 5, 6),
labels = c("AKIEC", "BCC", "BKL", "DTF", "NV",
"VASC", "MEL")) # corresponding names
########## Spliting into seven categories ##########
AKIEC <- subset(thedata, thedata$label == 0)
nrow(AKIEC)
# View(AKIEC)
BCC <- subset(thedata, thedata$label == 1)
nrow(BCC)
# View(BCC)
BKL <- subset(thedata, thedata$label == 2)
nrow(BKL)
# View(BKL)
DTF <- subset(thedata, thedata$label == 3)
nrow(DTF)
# View(DTF)
NV <- subset(thedata, thedata$label == 4)
nrow(NV)
# View(NV)
VASC <- subset(thedata, thedata$label == 5)
nrow(VASC)
# View(VASC)
MEL <- subset(thedata, thedata$label == 6)
nrow(MEL)
# View(MEL)
nrow(AKIEC) + nrow(BCC) + nrow(BKL) + nrow(DTF) + nrow(MEL) + nrow(NV) + nrow(VASC)
# should equate to 10,015
########## Spliting into training/testing ##########
thesplit <- as.numeric(input$splits) # make input numeric
####### AKIEC #######
per.AKIEC <- sample(1:nrow(AKIEC), thesplit * nrow(AKIEC))
AKIEC.train <- AKIEC[per.AKIEC,] # training
AKIEC.test <- AKIEC[-per.AKIEC,] #testing
####### BCC #######
per.BCC <- sample(1:nrow(BCC), thesplit * nrow(BCC))
BCC.train <- BCC[per.BCC,] # training
BCC.test <- BCC[-per.BCC,] # testing
####### BKL #######
per.BKL <- sample(1:nrow(BKL), thesplit * nrow(BKL))
BKL.train <- BKL[per.BKL,] # training
BKL.test <- BKL[-per.BKL,] # testing
####### DTF #######
per.DTF <- sample(1:nrow(DTF), thesplit * nrow(DTF))
DTF.train <- DTF[per.DTF,] # training
DTF.test <- DTF[-per.DTF,] # testing
####### MEL #######
per.MEL <- sample(1:nrow(MEL), thesplit * nrow(MEL))
MEL.train <- MEL[per.MEL,] # training
MEL.test <- MEL[-per.MEL,] # testing
####### NV #######
per.NV <- sample(1:nrow(NV), thesplit * nrow(NV))
NV.train <- NV[per.NV,] # training
NV.test <- NV[-per.NV,] # testing
####### VASC #######
per.VASC <- sample(1:nrow(VASC), thesplit * nrow(VASC))
VASC.train <- VASC[per.VASC,] # training
VASC.test <- VASC[-per.VASC,] # testing
########## Recombining ##########
training <- rbind(AKIEC.train, BCC.train, BKL.train, DTF.train, MEL.train, NV.train, VASC.train)
training <- data.frame(training) # data frame of all training data
testing <- rbind(AKIEC.test, BCC.test, BKL.test, DTF.test, MEL.test, NV.test, VASC.test)
testing <- data.frame(testing) # data frame of all testing data
########## Perform kNN function ##########
target.category <- training[ncol(training)] # select last column which has target categories
test.category <- testing[ncol(testing)] # select last column which has target categories
cl = target.category[,1] # create classifiction variable
predictions <- knn(training, testing, cl, k = input$neighbor) # kNN, k is chosen by user in ui
test.category.vec = test.category[,1] # create vector
# identify correct predictions in a confusion matrix
cm <- as.matrix(table(test.category.vec, predictions))
colnames(cm) <- c("AKIEC", "BCC", "BKL", "DTF", "NV", "VASC", "MEL") # give column names
rownames(cm) <- c("AKIEC", "BCC", "BKL", "DTF", "NV", "VASC", "MEL") # give row names
total.accuracy <- sum(diag(cm))/length(cl) * 100 # gives the accuracy of all predictions
acc <- diag(cm) # select diagonal results, they are the predictions that are correct
tots <- data_frame(acc) # create dataframe
# add column of overall totals in each testing category
tots$totals <- c(nrow(AKIEC.test), nrow(BCC.test), nrow(BKL.test), nrow(DTF.test),
nrow(NV.test), nrow(VASC.test), nrow(MEL.test))
# divide number correct by overall in each category, multiply by 100 to get percentage
indiv.accs <- tots$acc / tots$totals * 100
indiv.accs <- data_frame(indiv.accs) # create dataframe
# add column to specify type of lesion
indiv.accs$type <- c("AKIEC", "BCC", "BKL", "DTF", "NV", "VASC", "MEL")
akiec.acc <- indiv.accs[1,] # select AKIEC row
bcc.acc <- indiv.accs[2,] # select BCC row
bkl.acc <- indiv.accs[3,] # select BKL row
dtf.acc <- indiv.accs[4,] # select DTF row
nv.acc <- indiv.accs[5,] # select NV row
vasc.acc <- indiv.accs[6,] # select VASC row
mel.acc <- indiv.accs[7,] # select MEL row
######################## end kNN function ########################
# output bar graph that updates with given input (set y axis to prevent confusion)
output$accuracyPlot <- renderPlot({
ggplot(indiv.accs, aes(x=type, y=indiv.accs, fill=type)) +
geom_bar(stat='identity', position='dodge') +
ggtitle(paste0("Categorical Accuracies using ", as.numeric(input$splits) * 100,
"% of the data for training and k = ", input$neighbor, " (", input$datas, ")")) +
labs(x = "Type of lesion", y = "Accuracy (%)") + ylim(0, 100) +
theme(legend.position = "none") })
# output overall accuracy
output$accuracyMessage <- renderText({
paste('Overall accuracy is:', total.accuracy, "%") })
# output accuracy for AKIEC
output$akiecMessage <- renderText({
paste('Total accuracy for Actinic keratoses (AKIEC) is:',
round(akiec.acc$indiv.accs, digits=2), "%") })
# output accuracy for BCC
output$bccMessage <- renderText({
paste('Total accuracy for Basal cell carcinoma (BCC) is:',
round(bcc.acc$indiv.accs, digits=2), "%") })
# output accuracy for BKL
output$bklMessage <- renderText({
paste('Total accuracy for Benign keratosis-like lesions (BKL) is:',
round(bkl.acc$indiv.accs, digits=2), "%") })
# output accuracy for DTF
output$dtfMessage <- renderText({
paste('Total accuracy for Dermatofibroma (DTF) is:',
round(dtf.acc$indiv.accs, digits=2), "%") })
# output accuracy for MEL
output$melMessage <- renderText({
paste('Total accuracy for Melanoma (MEL) is:',
round(mel.acc$indiv.accs, digits=2), "%") })
# output accuracy for NV
output$nvMessage <- renderText({
paste('Total accuracy for Melanocytic nevi (NV) is:',
round(nv.acc$indiv.accs, digits=2), "%") })
# output accuracy for VASC
output$vascMessage <- renderText({
paste('Total accuracy for Vascular lesions (VASC) is:',
round(vasc.acc$indiv.accs, digits=2), "%") })
})
}
|
library(haven)
library(tidyverse)
library(ggplot2)
library(forcats)
library(xtable)
options(xtable.comment = FALSE)
library(DT)
#maps library's
library(maps)
library(dplyr)
library(maptools)
library(rgdal)
library(here)
library(plotly)
# grafico pie
library(scales)
library(plotrix)
#general
library(labelled)
library(ggmosaic)
# heatmap
library("pheatmap")
require(ggplot2)
require(colorspace)
library(grid)
datos <- read_sav("PlotFunctions/datos.sav")
nousa <- datos %>% filter(C9==2 & C24!=99)
print(getwd())
PlotRazonesNoUsoInternet <- function() {
#Gráfico de barras de las razones por las que las personas no usan internet
ggplot(nousa,aes(x=(fct_infreq(as.factor(C24))),group=as.factor(C24)))+geom_bar(stat = "count",aes(y = (..count..)/sum(..count..)))+scale_x_discrete(labels=c(
"1"="No sabe como podria servirle",
"2"="No sabe usarlo",
"3"="No tiene dispositivos digitales",
"4"="Le resulta caro",
"5"="No tiene tiempo",
"6"="Discapacidad",
"7"="No le interesa o no quiere",
"8"="Falta de conocimiento de idioma extranjero",
"9"="Inseguro respecto al contenido",
"10"="Le preocupa privacidad",
"11"="Otra"))+labs(fill=NULL,x="Razon",y="Cantidad")+scale_fill_brewer(palette="Paired",labels=NULL)+scale_y_continuous(labels = scales::percent)+
theme(axis.text.x = element_text(angle = 270, vjust = 0))
}
#Boxplot de eded de las personas que usan internet y las que no
PlotEdadPersonasUsanInternet <- function() {
ggplot(datos, aes(x = C8, y = as.factor(C9))) + geom_boxplot() + coord_flip() +
scale_y_discrete(labels = c("1" = "Si", "2" = "No")) +
labs(x = "Edad", y = "Usa internet")
}
#Gráfico de barra apiladas al 100% del uso de internet según nivel educativo
PlotUsoInternetNivelEducativo <- function() {
ggplot(datos, aes(fill = as.factor(C9), x = as.factor(niveledu))) + geom_bar(position = "fill") +
labs(y = "Proporcion", x = "Nivel educativo", fill="Usa internet") +
scale_fill_brewer(palette="Dark2",labels=c("1"="Si","2"="No"))
}
#Gráfico de barra apiladas al 100% del uso de internet según quintil
PlotUsoInternetQuintil <- function() {
ggplot(datos, aes(fill = as.factor(C9), x = as.factor(Quintil))) + geom_bar(position = "fill") +
labs(y = "Proporcion", x = "Quintil", fill="Usa internet",
title = "Grafico de barras apiladas al 100% de uso de internet segun quintil de ingreso per capita") +
scale_fill_brewer(palette="Dark2",labels=c("1"="Si","2"="No"))
}
#Gráfico de barra apiladas al 100% del uso de internet según departamento
PlotUsoInternetDepartamento <- function() {
ggplot(datos, aes(fill = as.factor(C9), x = fct_reorder(as.factor(DOMDEPARTAMENTO),-as.numeric(C9),mean))) + geom_bar(position = "fill") +
labs(y = "Proporcion", x = "Departamento", fill="Usa internet") +
scale_fill_brewer(palette="Dark2",labels=c("1"="Si","2"="No")) + scale_x_discrete(labels=c("1"="Montevideo",
"3"="Canelones",
"4"="Cerro Largo",
"5"="Colonia",
"8"="Florida",
"10"="Maldonado",
"6"="Durazno",
"11"="Paysandu",
"12"="Rio Negro",
"13"="Rivera",
"14"="Rocha",
"16"="San José",
"18"="Tacuarembó")) +
theme(axis.text.x = element_text(angle = 270, vjust = 0))
}
#Frecuencia de uso de internet en el trabajo por nivel educativo
PlotUsoInternetTrabajoNivelEducativo <- function() {
datos$niveledu
datos%>%filter(!is.na(C13_2))%>%
ggplot(aes(x=as.factor(niveledu),fill=as.factor(C13_2)))+geom_bar(position = "fill")+
scale_fill_discrete("Uso internet",breaks=c(1,2,3,4,99),
labels=c("Todos los dias",
"Al menos una vez a la semana",
"Con menor frecuencia",
"No utilizó",
"S/D"))+
scale_x_discrete(labels=c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU"))+
labs(x="Nivel educativo",y="Proporcion")
}
#Uso de internet por nivel educativo
PlotUsoInternetPorNivelEducativo <- function() {
datos$C11
datos%>%filter(!is.na(C11))%>%
ggplot(aes(x=as.factor(niveledu),fill=as.factor(C11)))+geom_bar(position = "fill")+
scale_x_discrete(labels=c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU"))+
scale_fill_discrete("Uso internet",breaks=c(1,2,3,4,99),
labels=c("Todos los dias",
"Al menos una vez a la semana",
"Con menor frecuencia",
"No utilizó",
"S/D"))+
labs(x="Nivel educativo",y="Proporcion")
}
#Uso de redes sociales por nivel educativo
PlotUsoRedesSocialesNivelEducativo <- function() {
datos$C9_1
datos%>%filter(!is.na(C9_1))%>%
ggplot(aes(x=as.factor(niveledu),fill=as.factor(C9_1)))+geom_bar(position = "fill")+
scale_x_discrete(labels=c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU"))+
scale_fill_discrete("Uso de redes",
breaks=c(1,2),
labels=c("Si","No"))+
labs(x="Nivel educativo",y="Proporcion")
}
CreateDFRedesSocialesTotalPorcentaje <- function(datos) {
datos_RS = datos %>%
group_by(DOMDEPARTAMENTO) %>%
summarise(
# Facebook
porcentage_Facebook_1 = sum(C18_1 == 1, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
porcentage_Facebook_2 = sum(C18_1 == 2, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
porcentage_Facebook_3 = sum(C18_1 == 3, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
porcentage_Facebook_4 = sum(C18_1 == 4, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
porcentage_Facebook_99 = sum(C18_1 == 99, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
# Whats app
porcentage_WhatsApp_1 = sum(C18_2 == 1, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 4 , na.rm = TRUE),
porcentage_WhatsApp_2 = sum(C18_2 == 2, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 4 , na.rm = TRUE),
porcentage_WhatsApp_3 = sum(C18_2 == 3, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 4 , na.rm = TRUE),
porcentage_WhatsApp_4 = sum(C18_2 == 4, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 4 , na.rm = TRUE),
porcentage_WhatsApp_99 = sum(C18_2 == 99, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 99 , na.rm = TRUE),
# Twitter
porcentage_Twitter_1 = sum(C18_3 == 1, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 4 , na.rm = TRUE),
porcentage_Twitter_2 = sum(C18_3 == 2, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 4 , na.rm = TRUE),
porcentage_Twitter_3 = sum(C18_3 == 3, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 4 , na.rm = TRUE),
porcentage_Twitter_4 = sum(C18_3 == 4, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 4 , na.rm = TRUE),
porcentage_Twitter_99 = sum(C18_3 == 99, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 99 , na.rm = TRUE),
# Instagram
porcentage_Instagram_1 = sum(C18_4 == 1, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 4 , na.rm = TRUE),
porcentage_Instagram_2 = sum(C18_4 == 2, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 4 , na.rm = TRUE),
porcentage_Instagram_3 = sum(C18_4 == 3, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 4 , na.rm = TRUE),
porcentage_Instagram_4 = sum(C18_4 == 4, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 4 , na.rm = TRUE),
porcentage_Instagram_99 = sum(C18_4 == 99, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 99 , na.rm = TRUE),
)
return(datos_RS)
}
##############################################
# plot uso Facebook por departamento indice #
##############################################
PlotUsoFacebookIndiceDepartamento = function(indice_departamento, datos){
proporciones <- c(datos$porcentage_Facebook_1[indice_departamento],
datos$porcentage_Facebook_2[indice_departamento],
datos$porcentage_Facebook_3[indice_departamento],
datos$porcentage_Facebook_4[indice_departamento],
datos$porcentage_Facebook_99[indice_departamento])
etiqueta_alto = as.character(format(round(proporciones[1] * 100, 2), nsmall = 2))
etiqueta_medio = as.character(format(round(proporciones[2] * 100, 2), nsmall = 2))
etiqueta_poco = as.character(format(round(proporciones[3] * 100, 2), nsmall = 2))
etiqueta_nunca = as.character(format(round(proporciones[4] * 100, 2), nsmall = 2))
etiqueta_na = as.character(format(round(proporciones[5] * 100, 2), nsmall = 2))
porcentajes <- c(etiqueta_alto, etiqueta_medio, etiqueta_poco, etiqueta_nunca, etiqueta_na)
etiquetas <- c("Alto ",
"Medio ",
"Poco ",
"Nunca ",
"N/A ")
# plot
data <- data.frame(
name = etiquetas,
average = proporciones,
perct = porcentajes
)
# Increase bottom margin
par(mar=c(6,4,4,4))
# Basic Barplot
my_bar <- barplot(data$average , border=F , names.arg=data$name ,
las=2 ,
col=c(rgb(0.3,0.1,0.4,0.6) , rgb(0.3,0.5,0.4,0.6) , rgb(0.3,0.9,0.4,0.6) , rgb(0.3,0.9,0.4,0.6), rgb(0.3,0.2,0.2,0.2)) ,
ylim=c(0,1) ,
main="" )
text(my_bar, data$average+0.04 , paste("%: ", data$perct, sep="") ,cex=1)
}
#Gráfico Pie 3d del uso de Facebook segun departamento
PlotUsoFacebookDepartamento <- function(index) {
data = CreateDFRedesSocialesTotalPorcentaje(datos)
PlotUsoFacebookIndiceDepartamento(index, data)
}
##############################################
# plot uso WhatsApp por departamento indice #
##############################################
PlotUsoWhatsAppIndiceDepartamento = function(indice_departamento, datos){
proporciones <- c(datos$porcentage_WhatsApp_1[indice_departamento],
datos$porcentage_WhatsApp_2[indice_departamento],
datos$porcentage_WhatsApp_3[indice_departamento],
datos$porcentage_WhatsApp_4[indice_departamento],
datos$porcentage_WhatsApp_99[indice_departamento])
etiqueta_alto = as.character(format(round(proporciones[1] * 100, 2), nsmall = 2))
etiqueta_medio = as.character(format(round(proporciones[2] * 100, 2), nsmall = 2))
etiqueta_poco = as.character(format(round(proporciones[3] * 100, 2), nsmall = 2))
etiqueta_nunca = as.character(format(round(proporciones[4] * 100, 2), nsmall = 2))
etiqueta_na = as.character(format(round(proporciones[5] * 100, 2), nsmall = 2))
porcentajes <- c(etiqueta_alto, etiqueta_medio, etiqueta_poco, etiqueta_nunca, etiqueta_na)
etiquetas <- c("Alto ",
"Medio ",
"Poco ",
"Nunca ",
"N/A ")
# plot
data <- data.frame(
name = etiquetas,
average = proporciones,
perct = porcentajes
)
# Increase bottom margin
par(mar=c(6,4,4,4))
# Basic Barplot
my_bar <- barplot(data$average , border=F , names.arg=data$name ,
las=2 ,
col=c(rgb(0.3,0.1,0.4,0.6) , rgb(0.3,0.5,0.4,0.6) , rgb(0.3,0.9,0.4,0.6) , rgb(0.3,0.9,0.4,0.6), rgb(0.3,0.2,0.2,0.2)) ,
ylim=c(0,1) ,
main="" )
text(my_bar, data$average+0.04 , paste("%: ", data$perct, sep="") ,cex=1)
}
#Gráfico Pie 3d del uso de Whatsapp segun departamento
PlotUsoWhatsAppDepartamento <- function(index) {
data = CreateDFRedesSocialesTotalPorcentaje(datos)
PlotUsoWhatsAppIndiceDepartamento(index, data)
}
##############################################
# plot uso Twitter por departamento indice #
#############################################
PlotUsoTwitterIndiceDepartamento = function(indice_departamento, datos){
proporciones <- c(datos$porcentage_Twitter_1[indice_departamento],
datos$porcentage_Twitter_2[indice_departamento],
datos$porcentage_Twitter_3[indice_departamento],
datos$porcentage_Twitter_4[indice_departamento],
datos$porcentage_Twitter_99[indice_departamento])
etiqueta_alto = as.character(format(round(proporciones[1] * 100, 2), nsmall = 2))
etiqueta_medio = as.character(format(round(proporciones[2] * 100, 2), nsmall = 2))
etiqueta_poco = as.character(format(round(proporciones[3] * 100, 2), nsmall = 2))
etiqueta_nunca = as.character(format(round(proporciones[4] * 100, 2), nsmall = 2))
etiqueta_na = as.character(format(round(proporciones[5] * 100, 2), nsmall = 2))
porcentajes <- c(etiqueta_alto, etiqueta_medio, etiqueta_poco, etiqueta_nunca, etiqueta_na)
etiquetas <- c("Alto ",
"Medio ",
"Poco ",
"Nunca ",
"N/A ")
# plot
data <- data.frame(
name = etiquetas,
average = proporciones,
perct = porcentajes
)
# Increase bottom margin
par(mar=c(6,4,4,4))
# Basic Barplot
my_bar <- barplot(data$average , border=F , names.arg=data$name ,
las=2 ,
col=c(rgb(0.3,0.1,0.4,0.6) , rgb(0.3,0.5,0.4,0.6) , rgb(0.3,0.9,0.4,0.6) , rgb(0.3,0.9,0.4,0.6), rgb(0.3,0.2,0.2,0.2)) ,
ylim=c(0,1) ,
main="" )
text(my_bar, data$average+0.04 , paste("%: ", data$perct, sep="") ,cex=1)
}
#Gráfico 3d PIE del uso de TWITTER segun departamento
PlotUsoTwitterDepartamento <- function(index) {
data = CreateDFRedesSocialesTotalPorcentaje(datos)
PlotUsoTwitterIndiceDepartamento(index, data)
}
###############################################
# plot uso Instagram por departamento indice #
###############################################
PlotUsoInstagramIndiceDepartamento = function(indice_departamento, datos){
proporciones <- c(datos$porcentage_Instagram_1[indice_departamento],
datos$porcentage_Instagram_2[indice_departamento],
datos$porcentage_Instagram_3[indice_departamento],
datos$porcentage_Instagram_4[indice_departamento],
datos$porcentage_Instagram_99[indice_departamento])
etiqueta_alto = as.character(format(round(proporciones[1] * 100, 2), nsmall = 2))
etiqueta_medio = as.character(format(round(proporciones[2] * 100, 2), nsmall = 2))
etiqueta_poco = as.character(format(round(proporciones[3] * 100, 2), nsmall = 2))
etiqueta_nunca = as.character(format(round(proporciones[4] * 100, 2), nsmall = 2))
etiqueta_na = as.character(format(round(proporciones[5] * 100, 2), nsmall = 2))
porcentajes <- c(etiqueta_alto, etiqueta_medio, etiqueta_poco, etiqueta_nunca, etiqueta_na)
etiquetas <- c("Alto ",
"Medio ",
"Poco ",
"Nunca ",
"N/A ")
# plot
data <- data.frame(
name = etiquetas,
average = proporciones,
perct = porcentajes
)
# Increase bottom margin
par(mar=c(6,4,4,4))
# Basic Barplot
my_bar <- barplot(data$average , border=F , names.arg=data$name ,
las=2 ,
col=c(rgb(0.3,0.1,0.4,0.6) , rgb(0.3,0.5,0.4,0.6) , rgb(0.3,0.9,0.4,0.6) , rgb(0.3,0.9,0.4,0.6), rgb(0.3,0.2,0.2,0.2)) ,
ylim=c(0,1) ,
main="" )
text(my_bar, data$average+0.04 , paste("%: ", data$perct, sep="") ,cex=1)
}
#Gráfico de barra apiladas del uso de Instagram segun departamento
PlotUsoInstagramDepartamento <- function(index) {
data = CreateDFRedesSocialesTotalPorcentaje(datos)
PlotUsoInstagramIndiceDepartamento(index, data)
}
#####################################
# HeatMap Uruguay por departamentos #
#####################################
PlotHeatMapDepartamentosRedesSociales <- function(option){
cluster_col = FALSE
cluster_row = FALSE
if (option == 2) {
# cluster por Departamento
cluster_row = TRUE
}else if (option == 3) {
cluster_col = TRUE
}
# crea el porcentaje de uso por redes sociales y convierte a matriz
data = CreateDFRedesSocialesTotalPorcentaje(datos)
samp2 = data[,-1]
mat_data = data.matrix(samp2[,1:ncol(samp2)])
mat_data = mat_data * 100
rownames(mat_data) = c("Montevideo",
"Canelones",
"Cerro Largo",
"Colonia",
"Durazno",
"Florida",
"Maldonado",
"Paysandu",
"Rio Negro",
"Rivera",
"Rocha",
"San José",
"Tacuarembó")
colnames(mat_data) = c(
"Facebook Alto",
"Facebook Medio",
"Facebook Poco",
"Facebook Nunca",
"Facebook N/A",
"WhatsApp Alto",
"WhatsApp Medio",
"WhatsApp Poco",
"WhatsApp Nunca",
"WhatsApp N/A",
"Twitter Alto",
"Twitter Medio",
"Twitter Poco",
"Twitter Nunca",
"Twitter N/A",
"Instagram Alto",
"Instagram Medio",
"Instagram Poco",
"Instagram Nunca",
"Instagram N/A"
)
pheatmap(mat_data,
color = colorRampPalette(c("deepskyblue4", "goldenrod", "firebrick"))(100),
fontsize_col = 6,
show_rownames = T,
cluster_cols = cluster_col,
cluster_rows = cluster_row,
main = "Heatmap Uso Redes Sociales Por Departamento",
cutree_cols = 3,
cutree_rows = 3)
}
################################
# Indice Uso de redes sociales #
################################
CreateIndiceData <- function(data) {
# Cambios para crear indice de uso de redes sociales
data = data %>% mutate(C18_1 = replace(C18_1, C18_1 == 99, 0))
data = data %>% mutate(C18_2 = replace(C18_2, C18_2 == 99, 0))
data = data %>% mutate(C18_3 = replace(C18_3, C18_3 == 99, 0))
data = data %>% mutate(C18_4 = replace(C18_4, C18_4 == 99, 0))
data = data %>% mutate(C18_5 = replace(C18_5, C18_5 == 99, 0))
data = data %>% mutate(C18_6 = replace(C18_6, C18_6 == 99, 0))
data = data %>% mutate(C18_7 = replace(C18_7, C18_7 == 99, 0))
data = data %>% replace_na(list(C18_1 = 0, C18_2 =0, C18_3 =0, C18_4 =0, C18_5 =0, C18_6 =0, C18_7 = 0))
# se agrega la columna uso_redes_sociales con un indice entre 0 y 1 que maca la intensidad de uso de redes sociales.
data = mutate(data, uso_redes_sociales = 1 - ( (( ( C18_1 + C18_2 + C18_3 + C18_4 + C18_5 + C18_6 + C18_7) / 7 ) - 1 ) / 3 ) )
# se agrupa por apartamento y se hace una media de la nueva columna uso_redes_sociales
# Aunque hay departamentos con una intensidad de uso menor, el uso se ve homogeneo.
data = data %>%
group_by(DOMDEPARTAMENTO) %>%
summarise(
media_uso_redes_sociales = mean(uso_redes_sociales, na.rm = TRUE)
)
return(data)
}
AddIndexDepartamentoMapa <- function(data) {
# cosas raras de R
data <- data %>% mutate(id = MapDep(DOMDEPARTAMENTO) )
index = 0
for (i in data$DOMDEPARTAMENTO){
index = index + 1
data$id[index] = MapDepartamento(data$DOMDEPARTAMENTO[index])
}
return(data)
}
MapDep = function(i){
return(i*1)
}
MapDepartamento = function(i){
if (i==1){
departamento = "0" # Montevideo
} else if ( i == 2) {
departamento = "1" # Artigas
} else if ( i == 3) {
departamento = "2" # Canelones
}else if ( i == 4) {
departamento = "19" # Cerro Largo
}else if ( i == 5) {
departamento = "3" # Colonia
}else if ( i == 6) {
departamento = "4" # Durazno
}else if ( i == 7) {
departamento = "17" # Flores
}else if ( i == 8) {
departamento = "5" # Florida
}else if ( i == 9) {
departamento = "6" # Lavalleja
}else if ( i == 10) {
departamento = "18" # Maldonado
} else if ( i == 11) {
departamento = "7" # Paysandu
}else if ( i == 12) {
departamento = "8" # Rio Negro
} else if ( i == 13) {
departamento = "9" # Rivera
}else if ( i == 14) {
departamento = "10" # Rocha
}else if ( i == 15) {
departamento = "11" # Salto
}else if ( i == 16) {
departamento = "12" # San Jose
}else if ( i == 17) {
departamento = "13" # Soriano
}else if ( i == 18) {
departamento = "16" # Tacuarembo
}else if ( i == 19) {
departamento = "14" # Flores
}
return(departamento)
}
PlotIndiceUsoRedesSociales <- function() {
data = CreateIndiceData(datos)
# es necesario hacer el mapeo de indices entre el mapa
# y el que esta en la BD.
data = AddIndexDepartamentoMapa(data)
# Uruguay Plot Map
sp_depto <- readOGR("PlotFunctions/ine_depto.shp")
dframe_depto <- ggplot2::fortify(sp_depto) #conviere a data.frame
# JOIN by id
mapa <- left_join(dframe_depto, data, by = "id")
# Finally plot Uy!
ggplot(mapa, aes(long, lat, group = group))+
geom_polygon(aes(fill = media_uso_redes_sociales), color = "white")+
scale_fill_viridis_c()
}
#############################
# Tabla de valores dinamica #
#############################
TablaVariables<-function(){datatable(matrix(c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU",
"Primaria incompleta",
"Ciclo Basico incompleto",
"Segundo Ciclo incompleto",
"Terciaria incompleta",
"Terciaria no Universitaria",
"Terciaria Universitaria"),nrow = 6, ncol = 2),
colnames = c("Abreviacion","Variable"))
}
TablaVar<-function(){
TablaVariables<-data.frame(matrix(c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU",
"Primaria incompleta",
"Ciclo Basico incompleto",
"Segundo Ciclo incompleto",
"Terciaria incompleta",
"Terciaria no Universitaria",
"Terciaria Universitaria"),nrow = 6, ncol = 2))
colnames(TablaVariables)<-c("Abreviacion","Variable")
print(xtable(TablaVariables),include.rownames=FALSE)
}
tabvar<-function(){
tablavariables<-data.frame(matrix(c("Departamento","Departamento donde vive la persona.",
"Uso de internet","Si la persona usa internet o no.",
"Edad","Edad de la persona.",
"Frecuencia de uso de internet","Que tanto usa la persona el internet.",
"Motivos de no uso de internet", "Razones de quienes no usan internet para no usarlo.",
"Nivel educativo", "Nivel educativo alcanzado.",
"Frecuencia uso de internet en trabajo", "Que tanto usa la persona internet en el trabajo."),
ncol=2, byrow = TRUE))
colnames(tablavariables)<-c("Variable","Descripcion")
print(xtable(tablavariables),include.rownames=FALSE)
}
|
/PlotFunctions/plot.R
|
permissive
|
OBen16/ProyectoNT
|
R
| false | false | 24,692 |
r
|
library(haven)
library(tidyverse)
library(ggplot2)
library(forcats)
library(xtable)
options(xtable.comment = FALSE)
library(DT)
#maps library's
library(maps)
library(dplyr)
library(maptools)
library(rgdal)
library(here)
library(plotly)
# grafico pie
library(scales)
library(plotrix)
#general
library(labelled)
library(ggmosaic)
# heatmap
library("pheatmap")
require(ggplot2)
require(colorspace)
library(grid)
datos <- read_sav("PlotFunctions/datos.sav")
nousa <- datos %>% filter(C9==2 & C24!=99)
print(getwd())
PlotRazonesNoUsoInternet <- function() {
#Gráfico de barras de las razones por las que las personas no usan internet
ggplot(nousa,aes(x=(fct_infreq(as.factor(C24))),group=as.factor(C24)))+geom_bar(stat = "count",aes(y = (..count..)/sum(..count..)))+scale_x_discrete(labels=c(
"1"="No sabe como podria servirle",
"2"="No sabe usarlo",
"3"="No tiene dispositivos digitales",
"4"="Le resulta caro",
"5"="No tiene tiempo",
"6"="Discapacidad",
"7"="No le interesa o no quiere",
"8"="Falta de conocimiento de idioma extranjero",
"9"="Inseguro respecto al contenido",
"10"="Le preocupa privacidad",
"11"="Otra"))+labs(fill=NULL,x="Razon",y="Cantidad")+scale_fill_brewer(palette="Paired",labels=NULL)+scale_y_continuous(labels = scales::percent)+
theme(axis.text.x = element_text(angle = 270, vjust = 0))
}
#Boxplot de eded de las personas que usan internet y las que no
PlotEdadPersonasUsanInternet <- function() {
ggplot(datos, aes(x = C8, y = as.factor(C9))) + geom_boxplot() + coord_flip() +
scale_y_discrete(labels = c("1" = "Si", "2" = "No")) +
labs(x = "Edad", y = "Usa internet")
}
#Gráfico de barra apiladas al 100% del uso de internet según nivel educativo
PlotUsoInternetNivelEducativo <- function() {
ggplot(datos, aes(fill = as.factor(C9), x = as.factor(niveledu))) + geom_bar(position = "fill") +
labs(y = "Proporcion", x = "Nivel educativo", fill="Usa internet") +
scale_fill_brewer(palette="Dark2",labels=c("1"="Si","2"="No"))
}
#Gráfico de barra apiladas al 100% del uso de internet según quintil
PlotUsoInternetQuintil <- function() {
ggplot(datos, aes(fill = as.factor(C9), x = as.factor(Quintil))) + geom_bar(position = "fill") +
labs(y = "Proporcion", x = "Quintil", fill="Usa internet",
title = "Grafico de barras apiladas al 100% de uso de internet segun quintil de ingreso per capita") +
scale_fill_brewer(palette="Dark2",labels=c("1"="Si","2"="No"))
}
#Gráfico de barra apiladas al 100% del uso de internet según departamento
PlotUsoInternetDepartamento <- function() {
ggplot(datos, aes(fill = as.factor(C9), x = fct_reorder(as.factor(DOMDEPARTAMENTO),-as.numeric(C9),mean))) + geom_bar(position = "fill") +
labs(y = "Proporcion", x = "Departamento", fill="Usa internet") +
scale_fill_brewer(palette="Dark2",labels=c("1"="Si","2"="No")) + scale_x_discrete(labels=c("1"="Montevideo",
"3"="Canelones",
"4"="Cerro Largo",
"5"="Colonia",
"8"="Florida",
"10"="Maldonado",
"6"="Durazno",
"11"="Paysandu",
"12"="Rio Negro",
"13"="Rivera",
"14"="Rocha",
"16"="San José",
"18"="Tacuarembó")) +
theme(axis.text.x = element_text(angle = 270, vjust = 0))
}
#Frecuencia de uso de internet en el trabajo por nivel educativo
PlotUsoInternetTrabajoNivelEducativo <- function() {
datos$niveledu
datos%>%filter(!is.na(C13_2))%>%
ggplot(aes(x=as.factor(niveledu),fill=as.factor(C13_2)))+geom_bar(position = "fill")+
scale_fill_discrete("Uso internet",breaks=c(1,2,3,4,99),
labels=c("Todos los dias",
"Al menos una vez a la semana",
"Con menor frecuencia",
"No utilizó",
"S/D"))+
scale_x_discrete(labels=c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU"))+
labs(x="Nivel educativo",y="Proporcion")
}
#Uso de internet por nivel educativo
PlotUsoInternetPorNivelEducativo <- function() {
datos$C11
datos%>%filter(!is.na(C11))%>%
ggplot(aes(x=as.factor(niveledu),fill=as.factor(C11)))+geom_bar(position = "fill")+
scale_x_discrete(labels=c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU"))+
scale_fill_discrete("Uso internet",breaks=c(1,2,3,4,99),
labels=c("Todos los dias",
"Al menos una vez a la semana",
"Con menor frecuencia",
"No utilizó",
"S/D"))+
labs(x="Nivel educativo",y="Proporcion")
}
#Uso de redes sociales por nivel educativo
PlotUsoRedesSocialesNivelEducativo <- function() {
datos$C9_1
datos%>%filter(!is.na(C9_1))%>%
ggplot(aes(x=as.factor(niveledu),fill=as.factor(C9_1)))+geom_bar(position = "fill")+
scale_x_discrete(labels=c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU"))+
scale_fill_discrete("Uso de redes",
breaks=c(1,2),
labels=c("Si","No"))+
labs(x="Nivel educativo",y="Proporcion")
}
CreateDFRedesSocialesTotalPorcentaje <- function(datos) {
datos_RS = datos %>%
group_by(DOMDEPARTAMENTO) %>%
summarise(
# Facebook
porcentage_Facebook_1 = sum(C18_1 == 1, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
porcentage_Facebook_2 = sum(C18_1 == 2, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
porcentage_Facebook_3 = sum(C18_1 == 3, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
porcentage_Facebook_4 = sum(C18_1 == 4, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
porcentage_Facebook_99 = sum(C18_1 == 99, na.rm = TRUE) / sum(C18_1 >=1 & C18_1 <= 99 , na.rm = TRUE),
# Whats app
porcentage_WhatsApp_1 = sum(C18_2 == 1, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 4 , na.rm = TRUE),
porcentage_WhatsApp_2 = sum(C18_2 == 2, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 4 , na.rm = TRUE),
porcentage_WhatsApp_3 = sum(C18_2 == 3, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 4 , na.rm = TRUE),
porcentage_WhatsApp_4 = sum(C18_2 == 4, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 4 , na.rm = TRUE),
porcentage_WhatsApp_99 = sum(C18_2 == 99, na.rm = TRUE) / sum(C18_2 >=1 & C18_2 <= 99 , na.rm = TRUE),
# Twitter
porcentage_Twitter_1 = sum(C18_3 == 1, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 4 , na.rm = TRUE),
porcentage_Twitter_2 = sum(C18_3 == 2, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 4 , na.rm = TRUE),
porcentage_Twitter_3 = sum(C18_3 == 3, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 4 , na.rm = TRUE),
porcentage_Twitter_4 = sum(C18_3 == 4, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 4 , na.rm = TRUE),
porcentage_Twitter_99 = sum(C18_3 == 99, na.rm = TRUE) / sum(C18_3 >=1 & C18_3 <= 99 , na.rm = TRUE),
# Instagram
porcentage_Instagram_1 = sum(C18_4 == 1, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 4 , na.rm = TRUE),
porcentage_Instagram_2 = sum(C18_4 == 2, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 4 , na.rm = TRUE),
porcentage_Instagram_3 = sum(C18_4 == 3, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 4 , na.rm = TRUE),
porcentage_Instagram_4 = sum(C18_4 == 4, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 4 , na.rm = TRUE),
porcentage_Instagram_99 = sum(C18_4 == 99, na.rm = TRUE) / sum(C18_4 >=1 & C18_4 <= 99 , na.rm = TRUE),
)
return(datos_RS)
}
##############################################
# plot uso Facebook por departamento indice #
##############################################
PlotUsoFacebookIndiceDepartamento = function(indice_departamento, datos){
proporciones <- c(datos$porcentage_Facebook_1[indice_departamento],
datos$porcentage_Facebook_2[indice_departamento],
datos$porcentage_Facebook_3[indice_departamento],
datos$porcentage_Facebook_4[indice_departamento],
datos$porcentage_Facebook_99[indice_departamento])
etiqueta_alto = as.character(format(round(proporciones[1] * 100, 2), nsmall = 2))
etiqueta_medio = as.character(format(round(proporciones[2] * 100, 2), nsmall = 2))
etiqueta_poco = as.character(format(round(proporciones[3] * 100, 2), nsmall = 2))
etiqueta_nunca = as.character(format(round(proporciones[4] * 100, 2), nsmall = 2))
etiqueta_na = as.character(format(round(proporciones[5] * 100, 2), nsmall = 2))
porcentajes <- c(etiqueta_alto, etiqueta_medio, etiqueta_poco, etiqueta_nunca, etiqueta_na)
etiquetas <- c("Alto ",
"Medio ",
"Poco ",
"Nunca ",
"N/A ")
# plot
data <- data.frame(
name = etiquetas,
average = proporciones,
perct = porcentajes
)
# Increase bottom margin
par(mar=c(6,4,4,4))
# Basic Barplot
my_bar <- barplot(data$average , border=F , names.arg=data$name ,
las=2 ,
col=c(rgb(0.3,0.1,0.4,0.6) , rgb(0.3,0.5,0.4,0.6) , rgb(0.3,0.9,0.4,0.6) , rgb(0.3,0.9,0.4,0.6), rgb(0.3,0.2,0.2,0.2)) ,
ylim=c(0,1) ,
main="" )
text(my_bar, data$average+0.04 , paste("%: ", data$perct, sep="") ,cex=1)
}
#Gráfico Pie 3d del uso de Facebook segun departamento
PlotUsoFacebookDepartamento <- function(index) {
data = CreateDFRedesSocialesTotalPorcentaje(datos)
PlotUsoFacebookIndiceDepartamento(index, data)
}
##############################################
# plot uso WhatsApp por departamento indice #
##############################################
PlotUsoWhatsAppIndiceDepartamento = function(indice_departamento, datos){
proporciones <- c(datos$porcentage_WhatsApp_1[indice_departamento],
datos$porcentage_WhatsApp_2[indice_departamento],
datos$porcentage_WhatsApp_3[indice_departamento],
datos$porcentage_WhatsApp_4[indice_departamento],
datos$porcentage_WhatsApp_99[indice_departamento])
etiqueta_alto = as.character(format(round(proporciones[1] * 100, 2), nsmall = 2))
etiqueta_medio = as.character(format(round(proporciones[2] * 100, 2), nsmall = 2))
etiqueta_poco = as.character(format(round(proporciones[3] * 100, 2), nsmall = 2))
etiqueta_nunca = as.character(format(round(proporciones[4] * 100, 2), nsmall = 2))
etiqueta_na = as.character(format(round(proporciones[5] * 100, 2), nsmall = 2))
porcentajes <- c(etiqueta_alto, etiqueta_medio, etiqueta_poco, etiqueta_nunca, etiqueta_na)
etiquetas <- c("Alto ",
"Medio ",
"Poco ",
"Nunca ",
"N/A ")
# plot
data <- data.frame(
name = etiquetas,
average = proporciones,
perct = porcentajes
)
# Increase bottom margin
par(mar=c(6,4,4,4))
# Basic Barplot
my_bar <- barplot(data$average , border=F , names.arg=data$name ,
las=2 ,
col=c(rgb(0.3,0.1,0.4,0.6) , rgb(0.3,0.5,0.4,0.6) , rgb(0.3,0.9,0.4,0.6) , rgb(0.3,0.9,0.4,0.6), rgb(0.3,0.2,0.2,0.2)) ,
ylim=c(0,1) ,
main="" )
text(my_bar, data$average+0.04 , paste("%: ", data$perct, sep="") ,cex=1)
}
#Gráfico Pie 3d del uso de Whatsapp segun departamento
PlotUsoWhatsAppDepartamento <- function(index) {
data = CreateDFRedesSocialesTotalPorcentaje(datos)
PlotUsoWhatsAppIndiceDepartamento(index, data)
}
##############################################
# plot uso Twitter por departamento indice #
#############################################
PlotUsoTwitterIndiceDepartamento = function(indice_departamento, datos){
proporciones <- c(datos$porcentage_Twitter_1[indice_departamento],
datos$porcentage_Twitter_2[indice_departamento],
datos$porcentage_Twitter_3[indice_departamento],
datos$porcentage_Twitter_4[indice_departamento],
datos$porcentage_Twitter_99[indice_departamento])
etiqueta_alto = as.character(format(round(proporciones[1] * 100, 2), nsmall = 2))
etiqueta_medio = as.character(format(round(proporciones[2] * 100, 2), nsmall = 2))
etiqueta_poco = as.character(format(round(proporciones[3] * 100, 2), nsmall = 2))
etiqueta_nunca = as.character(format(round(proporciones[4] * 100, 2), nsmall = 2))
etiqueta_na = as.character(format(round(proporciones[5] * 100, 2), nsmall = 2))
porcentajes <- c(etiqueta_alto, etiqueta_medio, etiqueta_poco, etiqueta_nunca, etiqueta_na)
etiquetas <- c("Alto ",
"Medio ",
"Poco ",
"Nunca ",
"N/A ")
# plot
data <- data.frame(
name = etiquetas,
average = proporciones,
perct = porcentajes
)
# Increase bottom margin
par(mar=c(6,4,4,4))
# Basic Barplot
my_bar <- barplot(data$average , border=F , names.arg=data$name ,
las=2 ,
col=c(rgb(0.3,0.1,0.4,0.6) , rgb(0.3,0.5,0.4,0.6) , rgb(0.3,0.9,0.4,0.6) , rgb(0.3,0.9,0.4,0.6), rgb(0.3,0.2,0.2,0.2)) ,
ylim=c(0,1) ,
main="" )
text(my_bar, data$average+0.04 , paste("%: ", data$perct, sep="") ,cex=1)
}
#Gráfico 3d PIE del uso de TWITTER segun departamento
PlotUsoTwitterDepartamento <- function(index) {
data = CreateDFRedesSocialesTotalPorcentaje(datos)
PlotUsoTwitterIndiceDepartamento(index, data)
}
###############################################
# plot uso Instagram por departamento indice #
###############################################
PlotUsoInstagramIndiceDepartamento = function(indice_departamento, datos){
proporciones <- c(datos$porcentage_Instagram_1[indice_departamento],
datos$porcentage_Instagram_2[indice_departamento],
datos$porcentage_Instagram_3[indice_departamento],
datos$porcentage_Instagram_4[indice_departamento],
datos$porcentage_Instagram_99[indice_departamento])
etiqueta_alto = as.character(format(round(proporciones[1] * 100, 2), nsmall = 2))
etiqueta_medio = as.character(format(round(proporciones[2] * 100, 2), nsmall = 2))
etiqueta_poco = as.character(format(round(proporciones[3] * 100, 2), nsmall = 2))
etiqueta_nunca = as.character(format(round(proporciones[4] * 100, 2), nsmall = 2))
etiqueta_na = as.character(format(round(proporciones[5] * 100, 2), nsmall = 2))
porcentajes <- c(etiqueta_alto, etiqueta_medio, etiqueta_poco, etiqueta_nunca, etiqueta_na)
etiquetas <- c("Alto ",
"Medio ",
"Poco ",
"Nunca ",
"N/A ")
# plot
data <- data.frame(
name = etiquetas,
average = proporciones,
perct = porcentajes
)
# Increase bottom margin
par(mar=c(6,4,4,4))
# Basic Barplot
my_bar <- barplot(data$average , border=F , names.arg=data$name ,
las=2 ,
col=c(rgb(0.3,0.1,0.4,0.6) , rgb(0.3,0.5,0.4,0.6) , rgb(0.3,0.9,0.4,0.6) , rgb(0.3,0.9,0.4,0.6), rgb(0.3,0.2,0.2,0.2)) ,
ylim=c(0,1) ,
main="" )
text(my_bar, data$average+0.04 , paste("%: ", data$perct, sep="") ,cex=1)
}
#Gráfico de barra apiladas del uso de Instagram segun departamento
PlotUsoInstagramDepartamento <- function(index) {
data = CreateDFRedesSocialesTotalPorcentaje(datos)
PlotUsoInstagramIndiceDepartamento(index, data)
}
#####################################
# HeatMap Uruguay por departamentos #
#####################################
PlotHeatMapDepartamentosRedesSociales <- function(option){
cluster_col = FALSE
cluster_row = FALSE
if (option == 2) {
# cluster por Departamento
cluster_row = TRUE
}else if (option == 3) {
cluster_col = TRUE
}
# crea el porcentaje de uso por redes sociales y convierte a matriz
data = CreateDFRedesSocialesTotalPorcentaje(datos)
samp2 = data[,-1]
mat_data = data.matrix(samp2[,1:ncol(samp2)])
mat_data = mat_data * 100
rownames(mat_data) = c("Montevideo",
"Canelones",
"Cerro Largo",
"Colonia",
"Durazno",
"Florida",
"Maldonado",
"Paysandu",
"Rio Negro",
"Rivera",
"Rocha",
"San José",
"Tacuarembó")
colnames(mat_data) = c(
"Facebook Alto",
"Facebook Medio",
"Facebook Poco",
"Facebook Nunca",
"Facebook N/A",
"WhatsApp Alto",
"WhatsApp Medio",
"WhatsApp Poco",
"WhatsApp Nunca",
"WhatsApp N/A",
"Twitter Alto",
"Twitter Medio",
"Twitter Poco",
"Twitter Nunca",
"Twitter N/A",
"Instagram Alto",
"Instagram Medio",
"Instagram Poco",
"Instagram Nunca",
"Instagram N/A"
)
pheatmap(mat_data,
color = colorRampPalette(c("deepskyblue4", "goldenrod", "firebrick"))(100),
fontsize_col = 6,
show_rownames = T,
cluster_cols = cluster_col,
cluster_rows = cluster_row,
main = "Heatmap Uso Redes Sociales Por Departamento",
cutree_cols = 3,
cutree_rows = 3)
}
################################
# Indice Uso de redes sociales #
################################
CreateIndiceData <- function(data) {
# Cambios para crear indice de uso de redes sociales
data = data %>% mutate(C18_1 = replace(C18_1, C18_1 == 99, 0))
data = data %>% mutate(C18_2 = replace(C18_2, C18_2 == 99, 0))
data = data %>% mutate(C18_3 = replace(C18_3, C18_3 == 99, 0))
data = data %>% mutate(C18_4 = replace(C18_4, C18_4 == 99, 0))
data = data %>% mutate(C18_5 = replace(C18_5, C18_5 == 99, 0))
data = data %>% mutate(C18_6 = replace(C18_6, C18_6 == 99, 0))
data = data %>% mutate(C18_7 = replace(C18_7, C18_7 == 99, 0))
data = data %>% replace_na(list(C18_1 = 0, C18_2 =0, C18_3 =0, C18_4 =0, C18_5 =0, C18_6 =0, C18_7 = 0))
# se agrega la columna uso_redes_sociales con un indice entre 0 y 1 que maca la intensidad de uso de redes sociales.
data = mutate(data, uso_redes_sociales = 1 - ( (( ( C18_1 + C18_2 + C18_3 + C18_4 + C18_5 + C18_6 + C18_7) / 7 ) - 1 ) / 3 ) )
# se agrupa por apartamento y se hace una media de la nueva columna uso_redes_sociales
# Aunque hay departamentos con una intensidad de uso menor, el uso se ve homogeneo.
data = data %>%
group_by(DOMDEPARTAMENTO) %>%
summarise(
media_uso_redes_sociales = mean(uso_redes_sociales, na.rm = TRUE)
)
return(data)
}
AddIndexDepartamentoMapa <- function(data) {
# cosas raras de R
data <- data %>% mutate(id = MapDep(DOMDEPARTAMENTO) )
index = 0
for (i in data$DOMDEPARTAMENTO){
index = index + 1
data$id[index] = MapDepartamento(data$DOMDEPARTAMENTO[index])
}
return(data)
}
MapDep = function(i){
return(i*1)
}
MapDepartamento = function(i){
if (i==1){
departamento = "0" # Montevideo
} else if ( i == 2) {
departamento = "1" # Artigas
} else if ( i == 3) {
departamento = "2" # Canelones
}else if ( i == 4) {
departamento = "19" # Cerro Largo
}else if ( i == 5) {
departamento = "3" # Colonia
}else if ( i == 6) {
departamento = "4" # Durazno
}else if ( i == 7) {
departamento = "17" # Flores
}else if ( i == 8) {
departamento = "5" # Florida
}else if ( i == 9) {
departamento = "6" # Lavalleja
}else if ( i == 10) {
departamento = "18" # Maldonado
} else if ( i == 11) {
departamento = "7" # Paysandu
}else if ( i == 12) {
departamento = "8" # Rio Negro
} else if ( i == 13) {
departamento = "9" # Rivera
}else if ( i == 14) {
departamento = "10" # Rocha
}else if ( i == 15) {
departamento = "11" # Salto
}else if ( i == 16) {
departamento = "12" # San Jose
}else if ( i == 17) {
departamento = "13" # Soriano
}else if ( i == 18) {
departamento = "16" # Tacuarembo
}else if ( i == 19) {
departamento = "14" # Flores
}
return(departamento)
}
PlotIndiceUsoRedesSociales <- function() {
data = CreateIndiceData(datos)
# es necesario hacer el mapeo de indices entre el mapa
# y el que esta en la BD.
data = AddIndexDepartamentoMapa(data)
# Uruguay Plot Map
sp_depto <- readOGR("PlotFunctions/ine_depto.shp")
dframe_depto <- ggplot2::fortify(sp_depto) #conviere a data.frame
# JOIN by id
mapa <- left_join(dframe_depto, data, by = "id")
# Finally plot Uy!
ggplot(mapa, aes(long, lat, group = group))+
geom_polygon(aes(fill = media_uso_redes_sociales), color = "white")+
scale_fill_viridis_c()
}
#############################
# Tabla de valores dinamica #
#############################
TablaVariables<-function(){datatable(matrix(c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU",
"Primaria incompleta",
"Ciclo Basico incompleto",
"Segundo Ciclo incompleto",
"Terciaria incompleta",
"Terciaria no Universitaria",
"Terciaria Universitaria"),nrow = 6, ncol = 2),
colnames = c("Abreviacion","Variable"))
}
TablaVar<-function(){
TablaVariables<-data.frame(matrix(c("PI",
"CBI",
"SCI",
"TI",
"TNU",
"TU",
"Primaria incompleta",
"Ciclo Basico incompleto",
"Segundo Ciclo incompleto",
"Terciaria incompleta",
"Terciaria no Universitaria",
"Terciaria Universitaria"),nrow = 6, ncol = 2))
colnames(TablaVariables)<-c("Abreviacion","Variable")
print(xtable(TablaVariables),include.rownames=FALSE)
}
tabvar<-function(){
tablavariables<-data.frame(matrix(c("Departamento","Departamento donde vive la persona.",
"Uso de internet","Si la persona usa internet o no.",
"Edad","Edad de la persona.",
"Frecuencia de uso de internet","Que tanto usa la persona el internet.",
"Motivos de no uso de internet", "Razones de quienes no usan internet para no usarlo.",
"Nivel educativo", "Nivel educativo alcanzado.",
"Frecuencia uso de internet en trabajo", "Que tanto usa la persona internet en el trabajo."),
ncol=2, byrow = TRUE))
colnames(tablavariables)<-c("Variable","Descripcion")
print(xtable(tablavariables),include.rownames=FALSE)
}
|
library(scrobbler)
### Name: convert
### Title: convert
### Aliases: convert
### ** Examples
unix_time <- "1522124746"
timestamp <- convert(unix_time, to = "Time")
my_tracks <- read_scrobbles(system.file("extdata", "scrobbles.txt", package = "scrobbler"))
my_tracks$Date <- convert(my_tracks$Date, to = "Time")
|
/data/genthat_extracted_code/scrobbler/examples/convert.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 320 |
r
|
library(scrobbler)
### Name: convert
### Title: convert
### Aliases: convert
### ** Examples
unix_time <- "1522124746"
timestamp <- convert(unix_time, to = "Time")
my_tracks <- read_scrobbles(system.file("extdata", "scrobbles.txt", package = "scrobbler"))
my_tracks$Date <- convert(my_tracks$Date, to = "Time")
|
Kmatrix <- function(model, modelterm, covariate=NULL, prtnum=FALSE)
{
if (inherits(model, "mer") || inherits(model, "merMod")) {
if(!lme4::isLMM(model) && !lme4::isGLMM(model))
stop("Can't handle a nonlinear mixed model")
thecall <- slot(model, "call")
contrasts <- attr(model.matrix(model), "contrasts")
}else if (inherits(model, "lme")) {
thecall <- model$call
contrasts <- model$contrasts
}else if (inherits(model, "gls")) {
thecall <- model$call
contrasts <- model$contrasts
}else if (inherits(model, "lm")) {
thecall <- model$call
contrasts <- attr(model.matrix(model), "contrasts")
}else stop(paste("Can't handle an model of class", class(model)[1]))
cov.reduce <- function(x, name) mean(x)
fac.reduce <- function(coefs, lev) apply(coefs, 2, mean)
# Get model formula and index of response
Terms <- terms(model)
yname <- as.character(attr(Terms, "variables"))[[2]]
Terms <- delete.response(Terms)
# get the pure formula w/o extra stuff
formrhs <- formula(Terms)
# All the variables in the model
nm <- all.vars(formrhs)
nm <- nm[nm!="pi"]
# Figure out if any are coerced to factor or ordered
anm <- all.names(formrhs)
coerced <- anm[1 + grep("factor|as.factor|ordered|as.ordered", anm)]
# Obtain a simplified formula -- needed to recover the data in the model
form <- as.formula(paste("~", paste(nm, collapse = "+")))
envir <- attr(Terms, ".Environment")
X <- model.frame(form, eval(thecall$data, envir=envir),
subset = eval(thecall$subset, enclos=envir),
na.action = na.omit, drop.unused.levels = TRUE)
preddf <- X
baselevs <- xlev <- matdat <- list()
all.var.names <- names(X)
for (xname in all.var.names) {
obj <- X[[xname]]
if (is.factor(obj)) {
xlev[[xname]] <- levels(obj)
baselevs[[xname]] <- levels(obj)
}
else if (is.matrix(obj)) {
# Matrices -- reduce columns thereof, but don't add to baselevs
matdat[[xname]] <- apply(obj, 2, cov.reduce, xname)
}
else {
# single numeric pred but coerced to a factor - use unique values
if (length(grep(xname, coerced)) > 0)
baselevs[[xname]] <- sort(unique(obj))
# Ordinary covariates - summarize if not in 'at' arg
else baselevs[[xname]] <- cov.reduce(obj, xname)
}
}
covlevname <- setdiff(names(baselevs), c(names(xlev), coerced))
if ((!is.null(covariate) && !covariate%in%c("NULL", "")) && is.numeric(covariate)) baselevs[covlevname] <- as.list(covariate)
if ((!is.null(covariate) && !covariate%in%c("NULL", "")) && is.character(covariate) && covariate%in%covlevname) baselevs[[covariate]] <- seq(min(X[[covariate]]), max(X[[covariate]]), length=50)
if (all(length(covlevname)!=0, prtnum)) {
cat("\n", "The predicted means are estimated at \n\n")
print(round( unlist(baselevs[covlevname]), 4))
cat("\n")
}
# OK. Now make a grid of the factor levels of interest, along w/ covariate "at" values
grid <- do.call("expand.grid", baselevs)
# add any matrices
for (nm in names(matdat))
grid[[nm]] <- matrix(rep(matdat[[nm]], each=nrow(grid)), nrow=nrow(grid))
# Now make a new dataset with just the factor combs and covariate values we want for prediction
# WARNING -- This will overwrite X, so get anything you need from X BEFORE we get here
m <- model.frame(Terms, grid, na.action = na.pass, xlev = xlev)
X <- model.matrix(Terms, m, contrasts.arg = contrasts)
# All factors (excluding covariates)
# version 1.10 - no longer excluding covariates
allFacs <- all.var.names
### Array of indexes for rows of X, organized by dimensions
row.indexes <- array(seq_len(nrow(X)), sapply(baselevs, length))
# convert a string to a formula
form <- as.formula(paste("~",modelterm))
# These are the variables involved; and the label to use in the results
facs <- all.vars(form)
if ((!is.null(covariate) && !covariate%in%c("NULL", "")) && all(is.character(covariate), !covariate%in%facs)) facs <- c(facs, covariate)
if (any(sapply(facs, function(nm) length(grep(nm, allFacs)) == 0)))
stop(paste("Unknown factor(s) in specification:", paste(form, collapse=" ")))
# create the grid of factor combinations
levs <- list()
for (f in facs) levs[[f]] <- baselevs[[f]]
combs <- do.call("expand.grid", levs)
fctnames <- do.call("expand.grid", levs[rev(names(levs))])
fctnames <- fctnames[, rev(names(fctnames)), drop=FALSE]
rnK <- do.call("paste", c(fctnames, sep=":"))
K <- plyr::alply(row.indexes, match(facs, names(baselevs)), function(idx) {
fac.reduce(X[idx, , drop=FALSE], "")
})
K <- as.matrix(as.data.frame(K))
dimnames(K)[[2]] <- do.call("paste", c(combs, sep=":"))
K <-t(K)
K <- K[rnK, , drop=FALSE]
return(list(K=K, fctnames=fctnames, response=yname, preddf=preddf))
}
|
/predictmeans/R/Kmatrix.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 5,071 |
r
|
Kmatrix <- function(model, modelterm, covariate=NULL, prtnum=FALSE)
{
if (inherits(model, "mer") || inherits(model, "merMod")) {
if(!lme4::isLMM(model) && !lme4::isGLMM(model))
stop("Can't handle a nonlinear mixed model")
thecall <- slot(model, "call")
contrasts <- attr(model.matrix(model), "contrasts")
}else if (inherits(model, "lme")) {
thecall <- model$call
contrasts <- model$contrasts
}else if (inherits(model, "gls")) {
thecall <- model$call
contrasts <- model$contrasts
}else if (inherits(model, "lm")) {
thecall <- model$call
contrasts <- attr(model.matrix(model), "contrasts")
}else stop(paste("Can't handle an model of class", class(model)[1]))
cov.reduce <- function(x, name) mean(x)
fac.reduce <- function(coefs, lev) apply(coefs, 2, mean)
# Get model formula and index of response
Terms <- terms(model)
yname <- as.character(attr(Terms, "variables"))[[2]]
Terms <- delete.response(Terms)
# get the pure formula w/o extra stuff
formrhs <- formula(Terms)
# All the variables in the model
nm <- all.vars(formrhs)
nm <- nm[nm!="pi"]
# Figure out if any are coerced to factor or ordered
anm <- all.names(formrhs)
coerced <- anm[1 + grep("factor|as.factor|ordered|as.ordered", anm)]
# Obtain a simplified formula -- needed to recover the data in the model
form <- as.formula(paste("~", paste(nm, collapse = "+")))
envir <- attr(Terms, ".Environment")
X <- model.frame(form, eval(thecall$data, envir=envir),
subset = eval(thecall$subset, enclos=envir),
na.action = na.omit, drop.unused.levels = TRUE)
preddf <- X
baselevs <- xlev <- matdat <- list()
all.var.names <- names(X)
for (xname in all.var.names) {
obj <- X[[xname]]
if (is.factor(obj)) {
xlev[[xname]] <- levels(obj)
baselevs[[xname]] <- levels(obj)
}
else if (is.matrix(obj)) {
# Matrices -- reduce columns thereof, but don't add to baselevs
matdat[[xname]] <- apply(obj, 2, cov.reduce, xname)
}
else {
# single numeric pred but coerced to a factor - use unique values
if (length(grep(xname, coerced)) > 0)
baselevs[[xname]] <- sort(unique(obj))
# Ordinary covariates - summarize if not in 'at' arg
else baselevs[[xname]] <- cov.reduce(obj, xname)
}
}
covlevname <- setdiff(names(baselevs), c(names(xlev), coerced))
if ((!is.null(covariate) && !covariate%in%c("NULL", "")) && is.numeric(covariate)) baselevs[covlevname] <- as.list(covariate)
if ((!is.null(covariate) && !covariate%in%c("NULL", "")) && is.character(covariate) && covariate%in%covlevname) baselevs[[covariate]] <- seq(min(X[[covariate]]), max(X[[covariate]]), length=50)
if (all(length(covlevname)!=0, prtnum)) {
cat("\n", "The predicted means are estimated at \n\n")
print(round( unlist(baselevs[covlevname]), 4))
cat("\n")
}
# OK. Now make a grid of the factor levels of interest, along w/ covariate "at" values
grid <- do.call("expand.grid", baselevs)
# add any matrices
for (nm in names(matdat))
grid[[nm]] <- matrix(rep(matdat[[nm]], each=nrow(grid)), nrow=nrow(grid))
# Now make a new dataset with just the factor combs and covariate values we want for prediction
# WARNING -- This will overwrite X, so get anything you need from X BEFORE we get here
m <- model.frame(Terms, grid, na.action = na.pass, xlev = xlev)
X <- model.matrix(Terms, m, contrasts.arg = contrasts)
# All factors (excluding covariates)
# version 1.10 - no longer excluding covariates
allFacs <- all.var.names
### Array of indexes for rows of X, organized by dimensions
row.indexes <- array(seq_len(nrow(X)), sapply(baselevs, length))
# convert a string to a formula
form <- as.formula(paste("~",modelterm))
# These are the variables involved; and the label to use in the results
facs <- all.vars(form)
if ((!is.null(covariate) && !covariate%in%c("NULL", "")) && all(is.character(covariate), !covariate%in%facs)) facs <- c(facs, covariate)
if (any(sapply(facs, function(nm) length(grep(nm, allFacs)) == 0)))
stop(paste("Unknown factor(s) in specification:", paste(form, collapse=" ")))
# create the grid of factor combinations
levs <- list()
for (f in facs) levs[[f]] <- baselevs[[f]]
combs <- do.call("expand.grid", levs)
fctnames <- do.call("expand.grid", levs[rev(names(levs))])
fctnames <- fctnames[, rev(names(fctnames)), drop=FALSE]
rnK <- do.call("paste", c(fctnames, sep=":"))
K <- plyr::alply(row.indexes, match(facs, names(baselevs)), function(idx) {
fac.reduce(X[idx, , drop=FALSE], "")
})
K <- as.matrix(as.data.frame(K))
dimnames(K)[[2]] <- do.call("paste", c(combs, sep=":"))
K <-t(K)
K <- K[rnK, , drop=FALSE]
return(list(K=K, fctnames=fctnames, response=yname, preddf=preddf))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/packSearch.R
\name{packSearch}
\alias{packSearch}
\title{packFinder Algorithm Pipeline}
\usage{
packSearch(
tirSeq,
Genome,
mismatch = 0,
elementLength,
tsdLength,
tsdMismatch = 0,
fixed = TRUE
)
}
\arguments{
\item{tirSeq}{A \code{\link[Biostrings:DNAString-class]{DNAString}}
object containing the TIR sequence to be searched for.}
\item{Genome}{A \code{\link[Biostrings:XStringSet-class]{DNAStringSet}}
object to be searched.}
\item{mismatch}{The maximum edit distance to be considered for TIR
matches (indels + substitions). See
\code{\link[Biostrings]{matchPattern}} for details.}
\item{elementLength}{The maximum element length to be considered, as a vector
of two integers. E.g. \code{c(300, 3500)}}
\item{tsdLength}{Integer referring to the length of the flanking TSD region.}
\item{tsdMismatch}{An integer referring to the allowable mismatch
(substitutions or indels) between a transposon's TSD
sequences. \code{\link[Biostrings]{matchPattern}} from Biostrings
is used for pattern matching.}
\item{fixed}{Logical that will be passed to the `fixed` argument of
\code{\link[Biostrings]{matchPattern}}. Determines the behaviour of IUPAC
ambiguity codes when searching for TIR sequences.}
}
\value{
A dataframe, containing elements
identified by thealgorithm. These may be autonomous or
pack-TYPE elements. Will contain the following features:
\itemize{
\item start - the predicted element's start base
sequence position.
\item end - the predicted element's end base
sequence position.
\item seqnames - character string referring to the
sequence name in \code{Genome} to which \code{start}
and \code{end} refer to.
\item width - the width of the predicted element.
\item strand - the strand direction of the
transposable element. This will be set to "*" as the
\code{packSearch} function does not consider
transposons to have a direction - only TIR sequences.
Passing the \code{packMatches} dataframe to
\code{\link{packClust}} will assign a direction to
each predicted Pack-TYPE element.
}
This dataframe is in the format produced by
coercing a \code{link[GenomicRanges:GRanges-class]{GRanges}}
object to a dataframe: \code{data.frame(GRanges)}. Downstream
functions, such as \code{\link{packClust}}, use this
dataframe to manipulate predicted transposable elements.
}
\description{
General use pipeline function for the Pack-TYPE transposon
finding algorithm.
}
\details{
Finds potential pack-TYPE elements based on:
\itemize{
\item Similarity of TIR sequence to \code{tirSeq}
\item Proximity of potential TIR sequences
\item Directionality of TIR sequences
\item Similarity of TSD sequences
}
The algorithm finds potential forward and reverse TIR
sequences using \code{\link{identifyTirMatches}} and
their associated TSD sequence via \code{\link{getTsds}}.
The main filtering stage,
\code{\link{identifyPotentialPackElements}}, filters
matches to obtain a dataframe of potential PACK elements.
Note that this pipeline does not consider the
possibility of discovered elements being autonomous
elements, so it is recommended to cluster and/or BLAST
elements for further analysis. Furthermore, only exact TSD
matches are considered, so supplying long sequences for
TSD elements may lead to false-negative results.
}
\note{
This algorithm does not consider:
\itemize{
\item Autonomous elements - autonomous elements will
be predicted by this algorithm as there is no BLAST
step. It is recommended that, after clustering
elements using \code{\link{packClust}}, the user
analyses each group to determine which predicted
elements are autonomous and which are likely
Pack-TYPE elements. Alternatively, databases such as
Repbase (\url{https://www.girinst.org/repbase/})
supply annotations for autonomous transposable
elements that can be used to filter autonomous matches.
\item TSD Mismatches - if two TIRs do not have exact
matches for their terminal site duplications they
will be ignored. Supplying longer TSD sequences will
likely lead to a lower false-positive rate, however
may also cause a greater rate of false-negative results.
}
Pattern matching is done via \code{\link[Biostrings]{matchPattern}}.
}
\examples{
data(arabidopsisThalianaRefseq)
packMatches <- packSearch(
Biostrings::DNAString("CACTACAA"),
arabidopsisThalianaRefseq,
elementLength = c(300, 3500),
tsdLength = 3
)
}
\seealso{
\code{\link{identifyTirMatches}}, \code{\link{getTsds}},
\code{\link{identifyPotentialPackElements}}, \code{\link{packClust}},
\code{\link{packMatches}},
\code{\link[Biostrings:XStringSet-class]{DNAStringSet}},
\code{\link[Biostrings:DNAString-class]{DNAString}},
\code{\link[Biostrings]{matchPattern}}
}
\author{
Jack Gisby
}
|
/man/packSearch.Rd
|
no_license
|
jackgisby/packFinder
|
R
| false | true | 4,931 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/packSearch.R
\name{packSearch}
\alias{packSearch}
\title{packFinder Algorithm Pipeline}
\usage{
packSearch(
tirSeq,
Genome,
mismatch = 0,
elementLength,
tsdLength,
tsdMismatch = 0,
fixed = TRUE
)
}
\arguments{
\item{tirSeq}{A \code{\link[Biostrings:DNAString-class]{DNAString}}
object containing the TIR sequence to be searched for.}
\item{Genome}{A \code{\link[Biostrings:XStringSet-class]{DNAStringSet}}
object to be searched.}
\item{mismatch}{The maximum edit distance to be considered for TIR
matches (indels + substitions). See
\code{\link[Biostrings]{matchPattern}} for details.}
\item{elementLength}{The maximum element length to be considered, as a vector
of two integers. E.g. \code{c(300, 3500)}}
\item{tsdLength}{Integer referring to the length of the flanking TSD region.}
\item{tsdMismatch}{An integer referring to the allowable mismatch
(substitutions or indels) between a transposon's TSD
sequences. \code{\link[Biostrings]{matchPattern}} from Biostrings
is used for pattern matching.}
\item{fixed}{Logical that will be passed to the `fixed` argument of
\code{\link[Biostrings]{matchPattern}}. Determines the behaviour of IUPAC
ambiguity codes when searching for TIR sequences.}
}
\value{
A dataframe, containing elements
identified by thealgorithm. These may be autonomous or
pack-TYPE elements. Will contain the following features:
\itemize{
\item start - the predicted element's start base
sequence position.
\item end - the predicted element's end base
sequence position.
\item seqnames - character string referring to the
sequence name in \code{Genome} to which \code{start}
and \code{end} refer to.
\item width - the width of the predicted element.
\item strand - the strand direction of the
transposable element. This will be set to "*" as the
\code{packSearch} function does not consider
transposons to have a direction - only TIR sequences.
Passing the \code{packMatches} dataframe to
\code{\link{packClust}} will assign a direction to
each predicted Pack-TYPE element.
}
This dataframe is in the format produced by
coercing a \code{link[GenomicRanges:GRanges-class]{GRanges}}
object to a dataframe: \code{data.frame(GRanges)}. Downstream
functions, such as \code{\link{packClust}}, use this
dataframe to manipulate predicted transposable elements.
}
\description{
General use pipeline function for the Pack-TYPE transposon
finding algorithm.
}
\details{
Finds potential pack-TYPE elements based on:
\itemize{
\item Similarity of TIR sequence to \code{tirSeq}
\item Proximity of potential TIR sequences
\item Directionality of TIR sequences
\item Similarity of TSD sequences
}
The algorithm finds potential forward and reverse TIR
sequences using \code{\link{identifyTirMatches}} and
their associated TSD sequence via \code{\link{getTsds}}.
The main filtering stage,
\code{\link{identifyPotentialPackElements}}, filters
matches to obtain a dataframe of potential PACK elements.
Note that this pipeline does not consider the
possibility of discovered elements being autonomous
elements, so it is recommended to cluster and/or BLAST
elements for further analysis. Furthermore, only exact TSD
matches are considered, so supplying long sequences for
TSD elements may lead to false-negative results.
}
\note{
This algorithm does not consider:
\itemize{
\item Autonomous elements - autonomous elements will
be predicted by this algorithm as there is no BLAST
step. It is recommended that, after clustering
elements using \code{\link{packClust}}, the user
analyses each group to determine which predicted
elements are autonomous and which are likely
Pack-TYPE elements. Alternatively, databases such as
Repbase (\url{https://www.girinst.org/repbase/})
supply annotations for autonomous transposable
elements that can be used to filter autonomous matches.
\item TSD Mismatches - if two TIRs do not have exact
matches for their terminal site duplications they
will be ignored. Supplying longer TSD sequences will
likely lead to a lower false-positive rate, however
may also cause a greater rate of false-negative results.
}
Pattern matching is done via \code{\link[Biostrings]{matchPattern}}.
}
\examples{
data(arabidopsisThalianaRefseq)
packMatches <- packSearch(
Biostrings::DNAString("CACTACAA"),
arabidopsisThalianaRefseq,
elementLength = c(300, 3500),
tsdLength = 3
)
}
\seealso{
\code{\link{identifyTirMatches}}, \code{\link{getTsds}},
\code{\link{identifyPotentialPackElements}}, \code{\link{packClust}},
\code{\link{packMatches}},
\code{\link[Biostrings:XStringSet-class]{DNAStringSet}},
\code{\link[Biostrings:DNAString-class]{DNAString}},
\code{\link[Biostrings]{matchPattern}}
}
\author{
Jack Gisby
}
|
#####################################################################
#
# this program takes the summary data created in
# /groups/brooksgrp/center_for_washington_area_studies/state_of_the_capitol_region/python_output/2019
# by python program
# /groups/brooksgrp/center_for_washington_area_studies/state_of_the_capitol_region/python_programs/2019/acs_county_2010_2017_v02.py
# and tries to make plots from it
# for presentation purposes
#
# March 28, 2019
#
# acs_county_page_plots_2017_v03.R
#
##############################################################################
##############################################################################
# Importing the required packages
library(dplyr)
library(scales)
library(ggplot2)
library(reshape)
library(splitstackshape)
library(RColorBrewer)
library(sf)
library(stringr)
##############################################################################
# todays date
dateo <- paste(substr(Sys.Date(),1,4),substr(Sys.Date(),6,7),substr(Sys.Date(),9,10),sep="")
dateo
groupDir <- "/groups/brooksgrp"
# data and output directories
data_dir <- paste0(groupDir,"/center_for_washington_area_studies/state_of_the_capitol_region/python_output/2019/summary_files_data/")
out_dir <- paste0(groupDir,"/center_for_washington_area_studies/state_of_the_capitol_region/r_output/2019/county_page/")
# load the data
acs_cnt_2013_2017 <- read.csv(paste0(data_dir,"20190303_cnty_acs_2013_2017_absolute_values.csv"),stringsAsFactors = F)
################ write a function to reformat the data to bring it to county level and subset it for requried columns ################
reformat_subset_data <- function(df) {
# transpose the data to show attributes as per county level
data <- as.data.frame(t(df))
# remove unnecessary columns
data <- data[-c(1,2),]
# reassign the column names in transposed df with the indexes
colnames(data) <- df$index
# print the df to check if the format is appropriate
print(head(data))
# rename the columns to avoid column duplicates error
colnames(data) <- make.unique(names(data))
#names of columns in data frame
cols <- colnames(data)
# character variables
cols.char <- c("FILEID","FILETYPE","STATE","GEO_NAME","GEO_ID")
#numeric variables
cols.num <- cols[!cols %in% cols.char]
# write a function to convert the required columns to numeric
make_num <- function(x)
{
return(as.numeric(as.character(x)))
}
# make all the required columns numeric
data[cols.num] <- lapply(data[cols.num],make_num)
# print the dataframe to check the data types
print(str(data))
# create column state country code
data["state_county_code"] <- rownames(data)
# split the column GEO NAME to extract county and state name
data <- as.data.frame(cSplit(data,c("GEO_NAME"),',',drop = F))
# split the column country code to get state and county codes
data <- as.data.frame(cSplit(data,c("state_county_code"),'_',drop = F,type.convert = FALSE))
# rename the splitted columns
names(data)[names(data)=="GEO_NAME_1"] <- "county_name"
names(data)[names(data)=="GEO_NAME_2"] <- "state_name"
names(data)[names(data)=="state_county_code_1"] <- "state_code"
names(data)[names(data)=="state_county_code_2"] <- "county_code"
data$FILETYPE <- as.character(data$FILETYPE)
# get the year column
data <- data %>% mutate(year=substr(FILETYPE,1,4))
return(data)
}
# get the data for year 2017 with relevant columns by calling the function
acs_cnt_2013_2017_subset <- reformat_subset_data(acs_cnt_2013_2017)
head(acs_cnt_2013_2017_subset)
#save(acs_cnt_2013_2017_subset,file="census_data_2017.RData")
################ create columns for structure type -- Graph 1 ################
acs_cnt_2013_2017_subset %>% select(matches('^B25034_([4-7]|1[2-9])_')) %>% head(2)
#acs_cnt_2013_2017_subset %>% select(matches('^B25034_2|B25034_3')) %>% head(2)
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25034_12_built_2010_to_2017'=select(.,matches('^B25034_2|B25034_3')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25034_13_built_1949_or_earlier'=select(.,matches('^B25034_10|B25034_11')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25034_14_built_1950_to_1969'=select(.,matches('^B25034_8|B25034_9')) %>%
apply(1, sum, na.rm=TRUE))
################# create columns for structure type -- Graph 2 ################
#acs_cnt_2013_2017_subset %>% select(intersect(starts_with("B25127"),contains("2_to_4"))) %>% head(2)
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('1_Units'=select(.,intersect(starts_with("B25127"),contains("1,_detached"))) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('2_to_4_Units'=select(.,intersect(starts_with("B25127"),contains("2_to_4"))) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('5_to_19_Units'=select(.,intersect(starts_with("B25127"),contains("5_to_19"))) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('20_to_49_Units'=select(.,intersect(starts_with("B25127"),contains("20_to_49"))) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('50_or_more_Units'=select(.,intersect(starts_with("B25127"),contains("50_or_more"))) %>%
apply(1, sum, na.rm=TRUE))
################# create columns for owner occupied homes -- Graph 3 ################
acs_cnt_2013_2017_subset %>% select(matches('^B25075')) %>% head(2)
acs_cnt_2013_2017_subset %>% select(matches('^B25075_([2-9]|[1][0-8])_')) %>% head(2)
acs_cnt_2013_2017_subset %>% select(matches('^B25075_([1][9]|[2][0])_')) %>% head(2)
acs_cnt_2013_2017_subset %>% select(matches('^B25075_([2][4-7])_')) %>% head(2)
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25075_28_less_than_$200,000'=select(.,matches('^B25075_([2-9]|[1][0-8])_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25075_29_$200,000_to_$299,999'=select(.,matches('^B25075_([1][9]|[2][0])_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25075_30_$750,000_or_more'=select(.,matches('^B25075_([2][4-7])_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset %>% select(matches('^B25075_([2][1-3]|[2][8-9]|[3][0])_')) %>% colnames()
################# create columns for structure type -- Graph 4 ################
acs_cnt_2013_2017_subset %>% select(matches('^B19001')) %>% colnames()
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B19001_18_less_than_$40,000'=select(.,matches('^B19001_([2-8])_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B19001_19_$40,000_to_$74,999'=select(.,matches('^B19001_([9]|[1][0-2])_')) %>%
apply(1, sum, na.rm=TRUE))
# acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
# mutate('B19001_20_$30,000_to_$39,999'=select(.,matches('^B19001_7|B19001_8')) %>%
# apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B19001_20_$100,000_to_$149,999'=select(.,matches('^B19001_14|B19001_15')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset %>% select(matches('^B19001_([1][3]|[1][6-9]|[2][0])')) %>% colnames()
################ create columns for structure type -- Graph 5 ################
acs_cnt_2013_2017_subset %>% select(matches('^B03002_3_|B03002_4_|B03002_6_|B03002_12_'))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('total_white_hispanic_black_asian'=select(.,matches('^B03002_3_|B03002_4_|B03002_6_|B03002_12_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>% mutate('All_Others'=B03002_1_total-total_white_hispanic_black_asian)
################# create columns for structure type -- Graph 6 ################
acs_cnt_2013_2017_subset %>% select(matches('^B25063')) %>% colnames()
acs_cnt_2013_2017_subset %>% select(matches('^B25063_([3-9]|[1][0-7])')) %>% colnames()
#acs_cnt_2013_2017_subset %>% select(matches('^B25063_([1][8-9])')) %>% colnames()
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25063_28_less_than_$1,000'=select(.,matches('^B25063_([3-9]|[1][0-9])')) %>%
apply(1, sum, na.rm=TRUE))
#acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
# mutate('B25063_29_$600_to_$799'=select(.,matches('^B25063_([1][4-7])')) %>%
# apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25063_29_$1,000_to_$1,499'=select(.,matches('^B25063_([2][0-1])')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25063_30_$2,500_or_more'=select(.,matches('^B25063_([2][4-6])')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset %>% select(matches('^B25063_([2][2-3]|[2][7-9]|[3][0-9])_')) %>% colnames()
#save(acs_cnt_2013_2017_subset,file="census_data_2017.RData")
#############################################################################################################
geom_text_col <- "#737373"
axis_labs_col <- "#737373"
axis_text_size <- 50
#https://stackoverflow.com/questions/6461209/how-to-round-up-to-the-nearest-10-or-100-or-x
roundUpNice <- function(x, nice=c(1,2,4,5,6,8,10)) {
if(length(x) != 1) stop("'x' must be of length 1")
10^floor(log10(x)) * nice[[which(x <= 10^floor(log10(x)) * nice)[[1]]]]
}
dpi_val = 300
width_val = 16
height_val = 11
############################################### 1 ############################################################
# filter for the relevant columns
acs_cnt_2013_2017_subset_built_year <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,matches('^B25034_([4-7]|1[2-9])_')) %>%
as.data.frame()
#built_cols <- colnames(acs_cnt_2013_2017_subset_built_year)[4:ncol(acs_cnt_2013_2017_subset_built_year)]
built_cols <- c("2000 to 2009","1990 to 1999" ,"1980 to 1989", "1970 to 1979", "2010 to 2017", "before 1949" , "1950 to 1969")
colnames(acs_cnt_2013_2017_subset_built_year)[4:ncol(acs_cnt_2013_2017_subset_built_year)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_built_year_melt <- melt(acs_cnt_2013_2017_subset_built_year, id.var=c("STATE_FIPS","county_name","county_code"))
acs_cnt_2013_2017_subset_built_year_melt$variable <- factor(acs_cnt_2013_2017_subset_built_year_melt$variable,
c("before 1949", "1950 to 1969", "1970 to 1979", "1980 to 1989",
"1990 to 1999", "2000 to 2009","2010 to 2017"))
df_counties_1 <- acs_cnt_2013_2017_subset_built_year_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
#df <- acs_cnt_2013_2017_subset_built_year_melt
#statefips <- "00"
#countyfips <- "000"
# plot the graph
plot_c1_g1 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_1$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 5))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
#labs(x = "", y = "num of existing housing units", colour = "Parameter")+
#labs(x = "", y = "number of housing units", colour = "Parameter")+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
# geom_text(aes(label=value_prop), vjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=F), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
p1 <- p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g1.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_age_of_housing_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g1.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_age_of_housing_type.csv"),row.names = F)
}
# provide column names for which we want absolute value plot on county level
#state_fips <- c("00","11", "54","51","24")
#state_fips <- c("11","00")
#state_fips <- c("24", "51")
# call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# }
# }
############################################### 2 ############################################################
# filter for the relevant columns
acs_cnt_2013_2017_subset_structure_type <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,contains("_Units")) %>%
as.data.frame()
built_cols <- colnames(acs_cnt_2013_2017_subset_structure_type)[4:ncol(acs_cnt_2013_2017_subset_structure_type)]
built_cols <- gsub("_Units","",built_cols)
built_cols <- gsub("_"," ",built_cols)
colnames(acs_cnt_2013_2017_subset_structure_type)[4:ncol(acs_cnt_2013_2017_subset_structure_type)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_structure_type_melt <- melt(acs_cnt_2013_2017_subset_structure_type, id.var=c("STATE_FIPS","county_name","county_code"))
df_counties_2 <- acs_cnt_2013_2017_subset_structure_type_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
#df <- acs_cnt_2013_2017_subset_built_year_melt
#statefips <- "00"
#countyfips <- "000"
plot_c2_g2 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_2$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 5))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
#geom_text(aes(label=value_prop), vjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
p1 <- p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g2.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_structure_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g2.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_structure_type.csv"),row.names = F)
}
#state_fips <- c("11","00")
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# }
# }
################################################ 3 ############################################################
#
# filter for the relevant columns
acs_cnt_2013_2017_subset_owner_occupied <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,matches("^B25075_([2][1-3]|[2][8-9]|[3][0])_")) %>%
as.data.frame()
built_cols <- colnames(acs_cnt_2013_2017_subset_owner_occupied)[4:ncol(acs_cnt_2013_2017_subset_owner_occupied)]
built_cols <- gsub("B25075_([0-9]|[1-3][0-9])_","",built_cols)
built_cols <- gsub("_"," ",built_cols)
colnames(acs_cnt_2013_2017_subset_owner_occupied)[4:ncol(acs_cnt_2013_2017_subset_owner_occupied)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_owner_occupied_melt <- melt(acs_cnt_2013_2017_subset_owner_occupied, id.var=c("STATE_FIPS","county_name","county_code"))
new_levels <- c("less than $200,000", "$200,000 to $299,999", "$300,000 to $399,999",
"$400,000 to $499,999", "$500,000 to $749,999", "$750,000 or more")
acs_cnt_2013_2017_subset_owner_occupied_melt$variable <- factor(acs_cnt_2013_2017_subset_owner_occupied_melt$variable,new_levels)
# new_levels <- c("less than $100,000","$100,000 to $124,999", "$125,000 to $149,999","$150,000 to $174,999",
# "$175,000 to $199,999", "$200,000 to $249,999", "$250,000 to $299,999", "$300,000 to $399,999",
# "$400,000 to $499,999", "$500,000 to $749,999", "$750,000 to $999,999" ,
# "$1,000,000 to $1,499,999", "$1,500,000 to $1,999,999", "$2,000,000 or more")
df_counties_3 <- acs_cnt_2013_2017_subset_owner_occupied_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
plot_c3_g3 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_3$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 5))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
#labs(x = "value of owner occupied homes - 2017", y = "", colour = "Parameter")+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
#geom_text(aes(label=value_prop), hjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
# make the barplot horizontal
p1 <-p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g3.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_owner_occupied.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g3.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_owner_occupied.csv"), row.names = F)
}
#state_fips <- c("11","00")
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# }
# }
################################################ 4 ############################################################
#
# # filter for the relevant columns
acs_cnt_2013_2017_subset_household_income <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,matches('^B19001_([1][3]|[1][6-9]|[2][0])')) %>%
as.data.frame()
#
built_cols <- colnames(acs_cnt_2013_2017_subset_household_income)[4:ncol(acs_cnt_2013_2017_subset_household_income)]
built_cols <- gsub("B19001_([0-9]|[1-2][0-9])_","",built_cols)
built_cols <- gsub("_"," ",built_cols)
colnames(acs_cnt_2013_2017_subset_household_income)[4:ncol(acs_cnt_2013_2017_subset_household_income)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_household_income_melt <- melt(acs_cnt_2013_2017_subset_household_income, id.var=c("STATE_FIPS","county_name","county_code"))
new_levels <- c("less than $40,000" , "$40,000 to $74,999", "$75,000 to $99,999", "$100,000 to $149,999",
"$150,000 to $199,999", "$200,000 or more")
acs_cnt_2013_2017_subset_household_income_melt$variable <- factor(acs_cnt_2013_2017_subset_household_income_melt$variable,new_levels)
df_counties_4 <- acs_cnt_2013_2017_subset_household_income_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
plot_c4_g4 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_4$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 5))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
# labs(x = "household income", y = "number of households", colour = "Parameter")+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
#geom_text(aes(label=value_prop), hjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
#axis.text.x = element_text(angle = 90, hjust = 1),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
# make the barplot horizontal
p1 <-p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g4.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_household_income_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g4.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_household_income_type.csv"),row.names = F)
}
#state_fips <- c("11","00")
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# }
# }
# ############################################### 5 ############################################################
#
# filter for the relevant columns
acs_cnt_2013_2017_subset_race <- acs_cnt_2013_2017_subset %>%
select("STATE_FIPS","county_code","county_name",matches('^B03002_3_|B03002_4_|B03002_6_|B03002_12_'),
"All_Others") %>%
as.data.frame()
#built_cols <- colnames(acs_cnt_2013_2017_subset_race)[4:ncol(acs_cnt_2013_2017_subset_race)]
# built_cols <- gsub("B03002_([0-9]|[1-2][0-9])_","",built_cols)
#
# built_cols <- gsub("_"," ",built_cols)
#
# built_cols <- gsub("Black or ","",built_cols)
built_cols <- c("White Alone" , "African-American Alone" ,"Asian Alone" , "Hispanic or Latino", "All Others")
colnames(acs_cnt_2013_2017_subset_race)[4:ncol(acs_cnt_2013_2017_subset_race)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_race_melt <- melt(acs_cnt_2013_2017_subset_race, id.var=c("STATE_FIPS","county_name","county_code"))
acs_cnt_2013_2017_subset_race_melt$variable <- factor(acs_cnt_2013_2017_subset_race_melt$variable,
rev(levels(acs_cnt_2013_2017_subset_race_melt$variable)))
df_counties_5 <- acs_cnt_2013_2017_subset_race_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
plot_c5_g5 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_5$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 3))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
#geom_text(aes(label=value_prop), hjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
p1 <-p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g5.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_race_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g5.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_race_type.csv"),row.names = F)
}
state_fips <- c("11","00")
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
for (sfips in state_fips){
#print(sfips)
if(sfips=="00"){
cfips <- c("000")
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
} else if(sfips=="11"){
cfips <- c("001")
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
} else if (sfips=="51"){
cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
for(cfips in cfips_list){
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
}
}else if (sfips=="24"){
cfips_list <- c("009","017","021","031","033")
for(cfips in cfips_list){
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
}
} else{
cfips <- c("037")
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
}
}
# ############################################### 6 ############################################################
#
# # filter for the relevant columns
acs_cnt_2013_2017_subset_household_type <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,matches('^B25063_([2][2-3]|[2][7-9]|[3][0-9])_')) %>%
as.data.frame()
built_cols <- colnames(acs_cnt_2013_2017_subset_household_type)[4:ncol(acs_cnt_2013_2017_subset_household_type)]
built_cols <- gsub("B25063_([0-9]|[1-3][0-9])_","",built_cols)
built_cols <- gsub("_"," ",built_cols)
colnames(acs_cnt_2013_2017_subset_household_type)[4:ncol(acs_cnt_2013_2017_subset_household_type)] <- built_cols
colnames(acs_cnt_2013_2017_subset_household_type)[6:7] <- c("No cash rent", "Less than $1,000")
#colnames(acs_cnt_2013_2017_subset_household_type)[10] <- c("$3,500 or More")
# melt the dataframe
acs_cnt_2013_2017_subset_household_type_melt <- melt(acs_cnt_2013_2017_subset_household_type, id.var=c("STATE_FIPS","county_name","county_code"))
new_levels <- c("No cash rent", "Less than $1,000",
"$1,000 to $1,499" ,"$1,500 to $1,999", "$2,000 to $2,499",
"$2,500 or more")
acs_cnt_2013_2017_subset_household_type_melt$variable <- factor(acs_cnt_2013_2017_subset_household_type_melt$variable,new_levels)
df_counties_6 <- acs_cnt_2013_2017_subset_household_type_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
plot_c6_g6 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_6$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 3))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
#labs(x = "", y = "number of rental units", colour = "Parameter")+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
# geom_text(aes(label=value_prop, size=value), hjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
#panel.grid.major.y = element_line(color="gray"),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
#axis.line.y = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
#axis.text.x = element_text(angle = 90, hjust = 1),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
# make the barplot horizontal
p1 <-p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g6.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_rent_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g6.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_rent_type.csv"),row.names = F)
}
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# }
# }
state_fips <- c("00","11", "24", "51", "54")
for (sfips in state_fips){
#print(sfips)
if(sfips=="00"){
cfips <- c("000")
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
} else if(sfips=="11"){
cfips <- c("001")
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
} else if (sfips=="51"){
cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
#cfips <- c("013")
for(cfips in cfips_list){
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
}
}else if (sfips=="24"){
cfips_list <- c("009","017","021","031","033")
#cfips <- c("033")
for(cfips in cfips_list){
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
}
} else{
cfips <- c("037")
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
}
}
|
/r_programs/2019/old/acs_county_page_plots_2017_v03.R
|
no_license
|
chirag-jhamb/us_census_data_study
|
R
| false | false | 47,848 |
r
|
#####################################################################
#
# this program takes the summary data created in
# /groups/brooksgrp/center_for_washington_area_studies/state_of_the_capitol_region/python_output/2019
# by python program
# /groups/brooksgrp/center_for_washington_area_studies/state_of_the_capitol_region/python_programs/2019/acs_county_2010_2017_v02.py
# and tries to make plots from it
# for presentation purposes
#
# March 28, 2019
#
# acs_county_page_plots_2017_v03.R
#
##############################################################################
##############################################################################
# Importing the required packages
library(dplyr)
library(scales)
library(ggplot2)
library(reshape)
library(splitstackshape)
library(RColorBrewer)
library(sf)
library(stringr)
##############################################################################
# todays date
dateo <- paste(substr(Sys.Date(),1,4),substr(Sys.Date(),6,7),substr(Sys.Date(),9,10),sep="")
dateo
groupDir <- "/groups/brooksgrp"
# data and output directories
data_dir <- paste0(groupDir,"/center_for_washington_area_studies/state_of_the_capitol_region/python_output/2019/summary_files_data/")
out_dir <- paste0(groupDir,"/center_for_washington_area_studies/state_of_the_capitol_region/r_output/2019/county_page/")
# load the data
acs_cnt_2013_2017 <- read.csv(paste0(data_dir,"20190303_cnty_acs_2013_2017_absolute_values.csv"),stringsAsFactors = F)
################ write a function to reformat the data to bring it to county level and subset it for requried columns ################
reformat_subset_data <- function(df) {
# transpose the data to show attributes as per county level
data <- as.data.frame(t(df))
# remove unnecessary columns
data <- data[-c(1,2),]
# reassign the column names in transposed df with the indexes
colnames(data) <- df$index
# print the df to check if the format is appropriate
print(head(data))
# rename the columns to avoid column duplicates error
colnames(data) <- make.unique(names(data))
#names of columns in data frame
cols <- colnames(data)
# character variables
cols.char <- c("FILEID","FILETYPE","STATE","GEO_NAME","GEO_ID")
#numeric variables
cols.num <- cols[!cols %in% cols.char]
# write a function to convert the required columns to numeric
make_num <- function(x)
{
return(as.numeric(as.character(x)))
}
# make all the required columns numeric
data[cols.num] <- lapply(data[cols.num],make_num)
# print the dataframe to check the data types
print(str(data))
# create column state country code
data["state_county_code"] <- rownames(data)
# split the column GEO NAME to extract county and state name
data <- as.data.frame(cSplit(data,c("GEO_NAME"),',',drop = F))
# split the column country code to get state and county codes
data <- as.data.frame(cSplit(data,c("state_county_code"),'_',drop = F,type.convert = FALSE))
# rename the splitted columns
names(data)[names(data)=="GEO_NAME_1"] <- "county_name"
names(data)[names(data)=="GEO_NAME_2"] <- "state_name"
names(data)[names(data)=="state_county_code_1"] <- "state_code"
names(data)[names(data)=="state_county_code_2"] <- "county_code"
data$FILETYPE <- as.character(data$FILETYPE)
# get the year column
data <- data %>% mutate(year=substr(FILETYPE,1,4))
return(data)
}
# get the data for year 2017 with relevant columns by calling the function
acs_cnt_2013_2017_subset <- reformat_subset_data(acs_cnt_2013_2017)
head(acs_cnt_2013_2017_subset)
#save(acs_cnt_2013_2017_subset,file="census_data_2017.RData")
################ create columns for structure type -- Graph 1 ################
acs_cnt_2013_2017_subset %>% select(matches('^B25034_([4-7]|1[2-9])_')) %>% head(2)
#acs_cnt_2013_2017_subset %>% select(matches('^B25034_2|B25034_3')) %>% head(2)
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25034_12_built_2010_to_2017'=select(.,matches('^B25034_2|B25034_3')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25034_13_built_1949_or_earlier'=select(.,matches('^B25034_10|B25034_11')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25034_14_built_1950_to_1969'=select(.,matches('^B25034_8|B25034_9')) %>%
apply(1, sum, na.rm=TRUE))
################# create columns for structure type -- Graph 2 ################
#acs_cnt_2013_2017_subset %>% select(intersect(starts_with("B25127"),contains("2_to_4"))) %>% head(2)
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('1_Units'=select(.,intersect(starts_with("B25127"),contains("1,_detached"))) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('2_to_4_Units'=select(.,intersect(starts_with("B25127"),contains("2_to_4"))) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('5_to_19_Units'=select(.,intersect(starts_with("B25127"),contains("5_to_19"))) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('20_to_49_Units'=select(.,intersect(starts_with("B25127"),contains("20_to_49"))) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('50_or_more_Units'=select(.,intersect(starts_with("B25127"),contains("50_or_more"))) %>%
apply(1, sum, na.rm=TRUE))
################# create columns for owner occupied homes -- Graph 3 ################
acs_cnt_2013_2017_subset %>% select(matches('^B25075')) %>% head(2)
acs_cnt_2013_2017_subset %>% select(matches('^B25075_([2-9]|[1][0-8])_')) %>% head(2)
acs_cnt_2013_2017_subset %>% select(matches('^B25075_([1][9]|[2][0])_')) %>% head(2)
acs_cnt_2013_2017_subset %>% select(matches('^B25075_([2][4-7])_')) %>% head(2)
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25075_28_less_than_$200,000'=select(.,matches('^B25075_([2-9]|[1][0-8])_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25075_29_$200,000_to_$299,999'=select(.,matches('^B25075_([1][9]|[2][0])_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25075_30_$750,000_or_more'=select(.,matches('^B25075_([2][4-7])_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset %>% select(matches('^B25075_([2][1-3]|[2][8-9]|[3][0])_')) %>% colnames()
################# create columns for structure type -- Graph 4 ################
acs_cnt_2013_2017_subset %>% select(matches('^B19001')) %>% colnames()
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B19001_18_less_than_$40,000'=select(.,matches('^B19001_([2-8])_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B19001_19_$40,000_to_$74,999'=select(.,matches('^B19001_([9]|[1][0-2])_')) %>%
apply(1, sum, na.rm=TRUE))
# acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
# mutate('B19001_20_$30,000_to_$39,999'=select(.,matches('^B19001_7|B19001_8')) %>%
# apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B19001_20_$100,000_to_$149,999'=select(.,matches('^B19001_14|B19001_15')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset %>% select(matches('^B19001_([1][3]|[1][6-9]|[2][0])')) %>% colnames()
################ create columns for structure type -- Graph 5 ################
acs_cnt_2013_2017_subset %>% select(matches('^B03002_3_|B03002_4_|B03002_6_|B03002_12_'))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('total_white_hispanic_black_asian'=select(.,matches('^B03002_3_|B03002_4_|B03002_6_|B03002_12_')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>% mutate('All_Others'=B03002_1_total-total_white_hispanic_black_asian)
################# create columns for structure type -- Graph 6 ################
acs_cnt_2013_2017_subset %>% select(matches('^B25063')) %>% colnames()
acs_cnt_2013_2017_subset %>% select(matches('^B25063_([3-9]|[1][0-7])')) %>% colnames()
#acs_cnt_2013_2017_subset %>% select(matches('^B25063_([1][8-9])')) %>% colnames()
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25063_28_less_than_$1,000'=select(.,matches('^B25063_([3-9]|[1][0-9])')) %>%
apply(1, sum, na.rm=TRUE))
#acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
# mutate('B25063_29_$600_to_$799'=select(.,matches('^B25063_([1][4-7])')) %>%
# apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25063_29_$1,000_to_$1,499'=select(.,matches('^B25063_([2][0-1])')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset <- acs_cnt_2013_2017_subset %>%
mutate('B25063_30_$2,500_or_more'=select(.,matches('^B25063_([2][4-6])')) %>%
apply(1, sum, na.rm=TRUE))
acs_cnt_2013_2017_subset %>% select(matches('^B25063_([2][2-3]|[2][7-9]|[3][0-9])_')) %>% colnames()
#save(acs_cnt_2013_2017_subset,file="census_data_2017.RData")
#############################################################################################################
geom_text_col <- "#737373"
axis_labs_col <- "#737373"
axis_text_size <- 50
#https://stackoverflow.com/questions/6461209/how-to-round-up-to-the-nearest-10-or-100-or-x
roundUpNice <- function(x, nice=c(1,2,4,5,6,8,10)) {
if(length(x) != 1) stop("'x' must be of length 1")
10^floor(log10(x)) * nice[[which(x <= 10^floor(log10(x)) * nice)[[1]]]]
}
dpi_val = 300
width_val = 16
height_val = 11
############################################### 1 ############################################################
# filter for the relevant columns
acs_cnt_2013_2017_subset_built_year <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,matches('^B25034_([4-7]|1[2-9])_')) %>%
as.data.frame()
#built_cols <- colnames(acs_cnt_2013_2017_subset_built_year)[4:ncol(acs_cnt_2013_2017_subset_built_year)]
built_cols <- c("2000 to 2009","1990 to 1999" ,"1980 to 1989", "1970 to 1979", "2010 to 2017", "before 1949" , "1950 to 1969")
colnames(acs_cnt_2013_2017_subset_built_year)[4:ncol(acs_cnt_2013_2017_subset_built_year)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_built_year_melt <- melt(acs_cnt_2013_2017_subset_built_year, id.var=c("STATE_FIPS","county_name","county_code"))
acs_cnt_2013_2017_subset_built_year_melt$variable <- factor(acs_cnt_2013_2017_subset_built_year_melt$variable,
c("before 1949", "1950 to 1969", "1970 to 1979", "1980 to 1989",
"1990 to 1999", "2000 to 2009","2010 to 2017"))
df_counties_1 <- acs_cnt_2013_2017_subset_built_year_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
#df <- acs_cnt_2013_2017_subset_built_year_melt
#statefips <- "00"
#countyfips <- "000"
# plot the graph
plot_c1_g1 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_1$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 5))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
#labs(x = "", y = "num of existing housing units", colour = "Parameter")+
#labs(x = "", y = "number of housing units", colour = "Parameter")+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
# geom_text(aes(label=value_prop), vjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=F), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
p1 <- p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g1.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_age_of_housing_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g1.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_age_of_housing_type.csv"),row.names = F)
}
# provide column names for which we want absolute value plot on county level
#state_fips <- c("00","11", "54","51","24")
#state_fips <- c("11","00")
#state_fips <- c("24", "51")
# call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
# }
# }
############################################### 2 ############################################################
# filter for the relevant columns
acs_cnt_2013_2017_subset_structure_type <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,contains("_Units")) %>%
as.data.frame()
built_cols <- colnames(acs_cnt_2013_2017_subset_structure_type)[4:ncol(acs_cnt_2013_2017_subset_structure_type)]
built_cols <- gsub("_Units","",built_cols)
built_cols <- gsub("_"," ",built_cols)
colnames(acs_cnt_2013_2017_subset_structure_type)[4:ncol(acs_cnt_2013_2017_subset_structure_type)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_structure_type_melt <- melt(acs_cnt_2013_2017_subset_structure_type, id.var=c("STATE_FIPS","county_name","county_code"))
df_counties_2 <- acs_cnt_2013_2017_subset_structure_type_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
#df <- acs_cnt_2013_2017_subset_built_year_melt
#statefips <- "00"
#countyfips <- "000"
plot_c2_g2 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_2$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 5))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
#geom_text(aes(label=value_prop), vjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
p1 <- p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g2.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_structure_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g2.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_structure_type.csv"),row.names = F)
}
#state_fips <- c("11","00")
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
# }
# }
################################################ 3 ############################################################
#
# filter for the relevant columns
acs_cnt_2013_2017_subset_owner_occupied <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,matches("^B25075_([2][1-3]|[2][8-9]|[3][0])_")) %>%
as.data.frame()
built_cols <- colnames(acs_cnt_2013_2017_subset_owner_occupied)[4:ncol(acs_cnt_2013_2017_subset_owner_occupied)]
built_cols <- gsub("B25075_([0-9]|[1-3][0-9])_","",built_cols)
built_cols <- gsub("_"," ",built_cols)
colnames(acs_cnt_2013_2017_subset_owner_occupied)[4:ncol(acs_cnt_2013_2017_subset_owner_occupied)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_owner_occupied_melt <- melt(acs_cnt_2013_2017_subset_owner_occupied, id.var=c("STATE_FIPS","county_name","county_code"))
new_levels <- c("less than $200,000", "$200,000 to $299,999", "$300,000 to $399,999",
"$400,000 to $499,999", "$500,000 to $749,999", "$750,000 or more")
acs_cnt_2013_2017_subset_owner_occupied_melt$variable <- factor(acs_cnt_2013_2017_subset_owner_occupied_melt$variable,new_levels)
# new_levels <- c("less than $100,000","$100,000 to $124,999", "$125,000 to $149,999","$150,000 to $174,999",
# "$175,000 to $199,999", "$200,000 to $249,999", "$250,000 to $299,999", "$300,000 to $399,999",
# "$400,000 to $499,999", "$500,000 to $749,999", "$750,000 to $999,999" ,
# "$1,000,000 to $1,499,999", "$1,500,000 to $1,999,999", "$2,000,000 or more")
df_counties_3 <- acs_cnt_2013_2017_subset_owner_occupied_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
plot_c3_g3 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_3$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 5))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
#labs(x = "value of owner occupied homes - 2017", y = "", colour = "Parameter")+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
#geom_text(aes(label=value_prop), hjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
# make the barplot horizontal
p1 <-p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g3.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_owner_occupied.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g3.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_owner_occupied.csv"), row.names = F)
}
#state_fips <- c("11","00")
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
# }
# }
################################################ 4 ############################################################
#
# # filter for the relevant columns
acs_cnt_2013_2017_subset_household_income <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,matches('^B19001_([1][3]|[1][6-9]|[2][0])')) %>%
as.data.frame()
#
built_cols <- colnames(acs_cnt_2013_2017_subset_household_income)[4:ncol(acs_cnt_2013_2017_subset_household_income)]
built_cols <- gsub("B19001_([0-9]|[1-2][0-9])_","",built_cols)
built_cols <- gsub("_"," ",built_cols)
colnames(acs_cnt_2013_2017_subset_household_income)[4:ncol(acs_cnt_2013_2017_subset_household_income)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_household_income_melt <- melt(acs_cnt_2013_2017_subset_household_income, id.var=c("STATE_FIPS","county_name","county_code"))
new_levels <- c("less than $40,000" , "$40,000 to $74,999", "$75,000 to $99,999", "$100,000 to $149,999",
"$150,000 to $199,999", "$200,000 or more")
acs_cnt_2013_2017_subset_household_income_melt$variable <- factor(acs_cnt_2013_2017_subset_household_income_melt$variable,new_levels)
df_counties_4 <- acs_cnt_2013_2017_subset_household_income_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
plot_c4_g4 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_4$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 5))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
# labs(x = "household income", y = "number of households", colour = "Parameter")+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
#geom_text(aes(label=value_prop), hjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
#axis.text.x = element_text(angle = 90, hjust = 1),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
# make the barplot horizontal
p1 <-p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g4.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_household_income_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g4.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_household_income_type.csv"),row.names = F)
}
#state_fips <- c("11","00")
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
# }
# }
# ############################################### 5 ############################################################
#
# filter for the relevant columns
acs_cnt_2013_2017_subset_race <- acs_cnt_2013_2017_subset %>%
select("STATE_FIPS","county_code","county_name",matches('^B03002_3_|B03002_4_|B03002_6_|B03002_12_'),
"All_Others") %>%
as.data.frame()
#built_cols <- colnames(acs_cnt_2013_2017_subset_race)[4:ncol(acs_cnt_2013_2017_subset_race)]
# built_cols <- gsub("B03002_([0-9]|[1-2][0-9])_","",built_cols)
#
# built_cols <- gsub("_"," ",built_cols)
#
# built_cols <- gsub("Black or ","",built_cols)
built_cols <- c("White Alone" , "African-American Alone" ,"Asian Alone" , "Hispanic or Latino", "All Others")
colnames(acs_cnt_2013_2017_subset_race)[4:ncol(acs_cnt_2013_2017_subset_race)] <- built_cols
# melt the dataframe
acs_cnt_2013_2017_subset_race_melt <- melt(acs_cnt_2013_2017_subset_race, id.var=c("STATE_FIPS","county_name","county_code"))
acs_cnt_2013_2017_subset_race_melt$variable <- factor(acs_cnt_2013_2017_subset_race_melt$variable,
rev(levels(acs_cnt_2013_2017_subset_race_melt$variable)))
df_counties_5 <- acs_cnt_2013_2017_subset_race_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
plot_c5_g5 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_5$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 3))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
#geom_text(aes(label=value_prop), hjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
p1 <-p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g5.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_race_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g5.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_race_type.csv"),row.names = F)
}
state_fips <- c("11","00")
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
for (sfips in state_fips){
#print(sfips)
if(sfips=="00"){
cfips <- c("000")
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
} else if(sfips=="11"){
cfips <- c("001")
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
} else if (sfips=="51"){
cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
for(cfips in cfips_list){
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
}
}else if (sfips=="24"){
cfips_list <- c("009","017","021","031","033")
for(cfips in cfips_list){
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
}
} else{
cfips <- c("037")
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
}
}
# ############################################### 6 ############################################################
#
# # filter for the relevant columns
acs_cnt_2013_2017_subset_household_type <- acs_cnt_2013_2017_subset %>%
select(STATE_FIPS,county_code,county_name,matches('^B25063_([2][2-3]|[2][7-9]|[3][0-9])_')) %>%
as.data.frame()
built_cols <- colnames(acs_cnt_2013_2017_subset_household_type)[4:ncol(acs_cnt_2013_2017_subset_household_type)]
built_cols <- gsub("B25063_([0-9]|[1-3][0-9])_","",built_cols)
built_cols <- gsub("_"," ",built_cols)
colnames(acs_cnt_2013_2017_subset_household_type)[4:ncol(acs_cnt_2013_2017_subset_household_type)] <- built_cols
colnames(acs_cnt_2013_2017_subset_household_type)[6:7] <- c("No cash rent", "Less than $1,000")
#colnames(acs_cnt_2013_2017_subset_household_type)[10] <- c("$3,500 or More")
# melt the dataframe
acs_cnt_2013_2017_subset_household_type_melt <- melt(acs_cnt_2013_2017_subset_household_type, id.var=c("STATE_FIPS","county_name","county_code"))
new_levels <- c("No cash rent", "Less than $1,000",
"$1,000 to $1,499" ,"$1,500 to $1,999", "$2,000 to $2,499",
"$2,500 or more")
acs_cnt_2013_2017_subset_household_type_melt$variable <- factor(acs_cnt_2013_2017_subset_household_type_melt$variable,new_levels)
df_counties_6 <- acs_cnt_2013_2017_subset_household_type_melt %>%
group_by(STATE_FIPS,county_code,county_name,variable) %>%
summarise(value_metro=sum(value)) %>% mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
plot_c6_g6 <- function(df,statefips,countyfips){
if(statefips!="00"){
df <- df %>% filter(STATE_FIPS==statefips & county_code==countyfips)
df <- df %>% mutate(value_prop=round(value/sum(value, na.rm = T)*100,2)) %>% as.data.frame()
y_max <- roundUpNice(max(df_counties_6$value_prop))
}else{
df <- df %>%
group_by(variable) %>%
summarise(value_metro=sum(value))
df <- df %>%
mutate(value_prop=round(value_metro/sum(value_metro, na.rm = T)*100,2)) %>%
as.data.frame()
y_max <- max(df$value_prop)
}
if(statefips=="00"){
fill_col <- "#810f7c"
}
else if((countyfips %in% c("001") & statefips %in% c("11"))| (countyfips %in% c("013","510") & statefips %in% c("51"))){
fill_col <- "#045a8d"
}else if((countyfips %in% c("033","031") & statefips %in% c("24"))|(countyfips %in% c("059","600","610") & statefips %in% c("51"))){
fill_col <- "#2b8cbe"
}
else{
fill_col <- "#74c476"
}
print(fill_col)
# plot the graph
p1 <- ggplot(df, aes(x = variable, y = value_prop)) +
geom_bar(stat = "identity", fill=fill_col)+
scale_y_continuous(labels = scales::comma, breaks = trans_breaks(identity, identity, n = 3))+
ylim(0,y_max)+
#scale_x_continuous(limits= c(1950, 2017), breaks = c(seq(1950,2017,10))) +
#scale_colour_manual(values = c("orange","green"))+
#labs(x = "", y = "number of rental units", colour = "Parameter")+
labs(x = "", y = "", colour = "Parameter")+
scale_shape_manual(values = c(16, 21)) +
# geom_text(aes(label=value_prop, size=value), hjust=-0.5, color=geom_text_col, size=5)+
#labs(x="", y="") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
#panel.grid.major.y = element_line(color="gray"),
panel.grid.major.x = element_line(color="gray"),
axis.title.x = element_text(colour = axis_labs_col),
axis.title.y = element_text(colour = axis_labs_col),
#axis.line.x = element_line(color = "black"),
#axis.line.y = element_line(color = "black"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text = element_text(size = axis_text_size),
axis.title = element_text(size = 25),
plot.title = element_text(size=25),
#axis.text.x = element_text(angle = 90, hjust = 1),
legend.title = element_blank(),
legend.position="none",
legend.justification = c(1,1),
legend.text = element_text(size=25),
legend.key.size = unit(0.8,"line"),
legend.key = element_rect(fill = "white"),
legend.spacing = unit(0.45,"cm"))+
guides(colour = guide_legend(override.aes = list(size=10),reverse=T), size=FALSE)
# Here we define spaces as the big separator
#point <- format_format(big.mark = ",", decimal.mark = ".", scientific = FALSE)
# make the barplot horizontal
p1 <-p1+coord_flip()
#print(p1)
# save the graph
ggsave(paste0(out_dir,"c1.g6.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_rent_type.jpg"),
plot = p1, dpi = dpi_val, width = width_val, height = height_val, units = c("in"))
if(statefips!="00"){
colnames(df)[2] <- "value"
}
write.csv(df,paste0(out_dir,"c1.g6.",statefips,countyfips,"_",dateo,"_acs_cnt_2017_housing_units_by_rent_type.csv"),row.names = F)
}
# state_fips <- c("00","11", "24", "51", "54")
# # call the funtion to create plot for each variable
# for (sfips in state_fips){
# #print(sfips)
# if(sfips=="00"){
# cfips <- c("000")
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# } else if(sfips=="11"){
# cfips <- c("001")
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# } else if (sfips=="51"){
# cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
# for(cfips in cfips_list){
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# }
# }else if (sfips=="24"){
# cfips_list <- c("009","017","021","031","033")
# for(cfips in cfips_list){
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# }
# } else{
# cfips <- c("037")
# plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
# }
# }
state_fips <- c("00","11", "24", "51", "54")
for (sfips in state_fips){
#print(sfips)
if(sfips=="00"){
cfips <- c("000")
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
} else if(sfips=="11"){
cfips <- c("001")
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
} else if (sfips=="51"){
cfips_list <- c("013","043","047","059","061","107","153","157","177","179","187","510","600","610","630","683","685")
#cfips <- c("013")
for(cfips in cfips_list){
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
}
}else if (sfips=="24"){
cfips_list <- c("009","017","021","031","033")
#cfips <- c("033")
for(cfips in cfips_list){
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
}
} else{
cfips <- c("037")
plot_c1_g1(acs_cnt_2013_2017_subset_built_year_melt,sfips,cfips)
plot_c2_g2(acs_cnt_2013_2017_subset_structure_type_melt,sfips,cfips)
plot_c3_g3(acs_cnt_2013_2017_subset_owner_occupied_melt,sfips,cfips)
plot_c4_g4(acs_cnt_2013_2017_subset_household_income_melt,sfips,cfips)
plot_c5_g5(acs_cnt_2013_2017_subset_race_melt,sfips,cfips)
plot_c6_g6(acs_cnt_2013_2017_subset_household_type_melt,sfips,cfips)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_general.R
\name{clear_env}
\alias{clear_env}
\title{Function to clear all elements within environment}
\usage{
clear_env(env, all.names = T)
}
\arguments{
\item{env}{environment to clean}
\item{all.names}{clear all variables?}
}
\description{
Function to clear all elements within environment
}
\examples{
\dontrun{
env = new.env()
env$a = 1
print(as.list(env))
clear_env(env)
print(as.list(env))
}
}
|
/man/clear_env.Rd
|
no_license
|
dipterix/rave
|
R
| false | true | 487 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_general.R
\name{clear_env}
\alias{clear_env}
\title{Function to clear all elements within environment}
\usage{
clear_env(env, all.names = T)
}
\arguments{
\item{env}{environment to clean}
\item{all.names}{clear all variables?}
}
\description{
Function to clear all elements within environment
}
\examples{
\dontrun{
env = new.env()
env$a = 1
print(as.list(env))
clear_env(env)
print(as.list(env))
}
}
|
pdf("discovery_by_sample_size.pdf",13,12)
library(data.table)
library(ggplot2)
statsData = fread("cod_analysis/GTEx/tissue_conservation/COP_specificity_stats.txt", stringsAsFactors = FALSE, header = TRUE, sep="\t")
copData = fread("zcat cod_identification/GTEx/CODer_final_dataset_cops_merged.bed.gz", stringsAsFactors = FALSE, header = TRUE, sep="\t")
coderStats = fread("cod_identification/GTEx/coder_stats.txt", stringsAsFactors = FALSE, header = F, sep="\t")
coderStats = coderStats[V2 == "Total samples"][,.(V1,V3)]
colnames(coderStats) = c("tissue","sample_size")
colorCode = fread( "raw_input/GTEx/v8/other/color_code_computer.txt", stringsAsFactors = FALSE, header = FALSE, sep="\t")
mergedData = unique(merge(statsData,copData,by="pairID"))
mergedData = unique(merge(mergedData,coderStats,by="tissue"))
data = data.table(table(mergedData[copFreq == 1]$tissue)) #ratio > 0.2][ratio < 0.5
others = data.table(table(mergedData$tissue))
data = merge(data, others, by = "V1")
colnames(data) = c("tissue","uniques","total")
data$perc = data$uniques * 100.0 / data$total
data = merge(data,coderStats,by = "tissue")
correlation = cor.test(data$perc, data$sample_size, method = "spearman")
correlationText = paste("Spearman R:",round(correlation$estimate,2), "P-value:",format.pval(pv = c(correlation$p.value), digits = 2), sep = " ")
d = lm(data$perc ~ data$sample_size)
ggplot( data, aes(x = sample_size, y = perc, color = tissue, fill = tissue ) ) +
geom_abline(slope = d$coefficients[2], intercept = d$coefficients[1], color = "#de2d26") +
geom_point( show.legend = FALSE, alpha = 0.9, size = 4, shape = 21, color = "black") +
geom_text( aes(label = tissue), size = 7, vjust = 1.5, check_overlap = TRUE, show.legend = FALSE) +
annotate("text", x = 420, y = 38, label = correlationText, hjust = 0, vjust =1, size = 8.5 ) +
# ggtitle("% unique COPs per tissue sample size") +
ylab("% unique COPs") +
xlab("Tissue sample size") +
scale_color_manual(values = colorCode$V2) +
scale_fill_manual(values = colorCode$V2) +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5), text = element_text(size=28), axis.text.x = element_text(angle = 0, vjust=0.6),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", fill = "white", size = 2))
dev.off()
|
/manuscript_figures/section2/discovery_by_sample_size.R
|
permissive
|
diogomribeiro/LoCOP
|
R
| false | false | 2,369 |
r
|
pdf("discovery_by_sample_size.pdf",13,12)
library(data.table)
library(ggplot2)
statsData = fread("cod_analysis/GTEx/tissue_conservation/COP_specificity_stats.txt", stringsAsFactors = FALSE, header = TRUE, sep="\t")
copData = fread("zcat cod_identification/GTEx/CODer_final_dataset_cops_merged.bed.gz", stringsAsFactors = FALSE, header = TRUE, sep="\t")
coderStats = fread("cod_identification/GTEx/coder_stats.txt", stringsAsFactors = FALSE, header = F, sep="\t")
coderStats = coderStats[V2 == "Total samples"][,.(V1,V3)]
colnames(coderStats) = c("tissue","sample_size")
colorCode = fread( "raw_input/GTEx/v8/other/color_code_computer.txt", stringsAsFactors = FALSE, header = FALSE, sep="\t")
mergedData = unique(merge(statsData,copData,by="pairID"))
mergedData = unique(merge(mergedData,coderStats,by="tissue"))
data = data.table(table(mergedData[copFreq == 1]$tissue)) #ratio > 0.2][ratio < 0.5
others = data.table(table(mergedData$tissue))
data = merge(data, others, by = "V1")
colnames(data) = c("tissue","uniques","total")
data$perc = data$uniques * 100.0 / data$total
data = merge(data,coderStats,by = "tissue")
correlation = cor.test(data$perc, data$sample_size, method = "spearman")
correlationText = paste("Spearman R:",round(correlation$estimate,2), "P-value:",format.pval(pv = c(correlation$p.value), digits = 2), sep = " ")
d = lm(data$perc ~ data$sample_size)
ggplot( data, aes(x = sample_size, y = perc, color = tissue, fill = tissue ) ) +
geom_abline(slope = d$coefficients[2], intercept = d$coefficients[1], color = "#de2d26") +
geom_point( show.legend = FALSE, alpha = 0.9, size = 4, shape = 21, color = "black") +
geom_text( aes(label = tissue), size = 7, vjust = 1.5, check_overlap = TRUE, show.legend = FALSE) +
annotate("text", x = 420, y = 38, label = correlationText, hjust = 0, vjust =1, size = 8.5 ) +
# ggtitle("% unique COPs per tissue sample size") +
ylab("% unique COPs") +
xlab("Tissue sample size") +
scale_color_manual(values = colorCode$V2) +
scale_fill_manual(values = colorCode$V2) +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5), text = element_text(size=28), axis.text.x = element_text(angle = 0, vjust=0.6),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", fill = "white", size = 2))
dev.off()
|
\name{fmem}
\alias{fmem}
\title{Flexible Measurement Error Models}
\description{
\bold{fmem} is used to obtain the statistical inference based on the Bayesian approach for the structural version
of the flexible measurement error models under the presence of homoscedastic and heteroscedastic random errors.
These models admits vectors of explanatory variables with and without measurement error as well as the presence of nonlinear effects, which is approximated
by using B-splines. The error-prone variables and the random error follow scale mixtures of normal distributions.}
\usage{
fmem(formula, data, omeg, family, eta, burn.in, post.sam.s, thin, heter)
}
\arguments{
\item{formula}{a symbolic description of the systematic component of the model to be fitted. See details for further information.}
\item{data}{an optional data frame, list or environment containing the variables in the model.}
\item{omeg}{(optional) the ratio \eqn{\omega=\sigma_{\epsilon}^2/\sigma_{\xi}^2}, this value must be specified when the model of interest is the homocedastic
flexible measurement model. If this value is not specified is assumed to be 1, that is, \eqn{\sigma_y^2=\sigma_{\xi}^2}}
\item{family}{a description of the error-prone variables and the random error distributions to be used in the model.
Supported distributions include \emph{Normal}, \emph{Student-t}, \emph{Slash},\emph{Hyperbolic},
\emph{Laplace} and \emph{ContNormal}, which correspond to normal, Student-t, slash, symmetric hyperbolic, Laplace and
contaminated normal distributions, respectively.}
\item{eta}{(optional) a numeric value or numeric vector that represents the extra parameter of the specified error distribution. This parameter can be assumed known or unknown.}
\item{burn.in}{the number of burn-in iterations for the MCMC algorithm.}
\item{post.sam.s}{the required size for the posterior sample of interest parameters.}
\item{thin}{(optional) the thinning interval used in the simulation to obtain the required size for the posterior sample.}
\item{heter}{(optional) An object type list that contains the values \eqn{\sigma_{\epsilon_i}^2} and \eqn{\Sigma_{\xi_i}} for all \eqn{i} (\eqn{i=1,ldots,n}).
The objects have to be specified as \code{sigma2y} and \code{sigma2xi}, i.e. \code{heter <- list(sigma2y, sigma2xi)}.
If this argument is not specified the adjusted model is the version homocedastic.}
}
\details{
The argument \emph{formula} comprises of three parts, namely: \emph{(i)} observed response variable;
\emph{(ii)} covariates with measurement error; and \emph{(iii)} covariates without measurement error including the non-parametric
components, which can be specified by using the function \code{bsp()}.
The first two parts are separated by the symbol "~" and the second and third parts are separated by the symbol "|".
This function allows to fit the measurement error model under the presence of homocedastic and heterocedastic random errors. These models
admits vectors of explanatory variables with and without measurement error as well as the presence of nonlinear effects
approximated by using B-splines. The model investigated is the structural
version, as the error-prone variables follow scale mixtures of normal distributions.
}
\value{
\item{chains}{A matrix that contains the posterior sample of interest parameters. Each column represents the marginal posterior sample of each parameter.}
\item{res}{a vector of quantile residuals, proposed by Dunn and Smyth (1996) in the context of classical inference, but suited here to the Bayesian case.}
\item{K-L}{a vector of case-deletion influence measures based on the Kullback-Leibler divergence.}
\item{X_2}{a vector of case-deletion influence measures based on the X2-Distance divergence.}
\item{DIC}{DIC criterion for model selection.}
\item{LMPL}{Log-marginal pseudo-likelihood for model selection.}
}
\references{Rondon, L.M. and Bolfarine, H. (2015). Bayesian analysis of flexible measurement error models. (submitted)}
\author{Luz Marina Rondon <lumarp@gmail.com> and Heleno Bolfarine}
\seealso{
\code{\link{bsp}}
\code{\link{bsp.graph.fmem}}
}
\examples{
#library(SemiPar)
#### Ragweed Pollen ####
#data(ragweed)
#attach(ragweed)
############ Example ragweed data
#ragweed2 <- ragweed[year==1993]
#day.in.seas <- day.in.seas[year==1993]
#temperature <- temperature[year==1993]
#rain <- rain[year==1993]
#wind.speed <- wind.speed[year==1993]
#ragweedn <- data.frame(ragweed2,day.in.seas,temperature,rain,wind.speed)
#model <- fmem(sqrt(ragweed2) ~ wind.speed | rain + temperature + bsp(day.in.seas),
# data=ragweedn,family="Normal", burn.in=500, post.sam.s=2000,
# thin=10, omeg=1)
#summary(model)
#
### Plot non-parametric component
#bsp.graph.fmem(model, which=1, xlab="Day", ylab="f(Day)")
############ Example Boston data
#library(MASS)
#data(Boston)
#attach(Boston)
#model <- fmem(log(medv) ~ nox | crim + rm + bsp(lstat) + bsp(dis), data=Boston,
# family="ContNormal", burn.in=10000, post.sam.s=5000, omeg=4, thin=10)
#summary(model)
#
### Plot non-parametric components
#bsp.graph.fmem(model, which=1, xlab="lstat", ylab="f(lstat)") ### for variable lstat
#bsp.graph.fmem(model, which=2, xlab="dis", ylab="f(dis)") ### for variable dis
#
}
|
/man/fmem.Rd
|
no_license
|
cran/BayesGESM
|
R
| false | false | 5,388 |
rd
|
\name{fmem}
\alias{fmem}
\title{Flexible Measurement Error Models}
\description{
\bold{fmem} is used to obtain the statistical inference based on the Bayesian approach for the structural version
of the flexible measurement error models under the presence of homoscedastic and heteroscedastic random errors.
These models admits vectors of explanatory variables with and without measurement error as well as the presence of nonlinear effects, which is approximated
by using B-splines. The error-prone variables and the random error follow scale mixtures of normal distributions.}
\usage{
fmem(formula, data, omeg, family, eta, burn.in, post.sam.s, thin, heter)
}
\arguments{
\item{formula}{a symbolic description of the systematic component of the model to be fitted. See details for further information.}
\item{data}{an optional data frame, list or environment containing the variables in the model.}
\item{omeg}{(optional) the ratio \eqn{\omega=\sigma_{\epsilon}^2/\sigma_{\xi}^2}, this value must be specified when the model of interest is the homocedastic
flexible measurement model. If this value is not specified is assumed to be 1, that is, \eqn{\sigma_y^2=\sigma_{\xi}^2}}
\item{family}{a description of the error-prone variables and the random error distributions to be used in the model.
Supported distributions include \emph{Normal}, \emph{Student-t}, \emph{Slash},\emph{Hyperbolic},
\emph{Laplace} and \emph{ContNormal}, which correspond to normal, Student-t, slash, symmetric hyperbolic, Laplace and
contaminated normal distributions, respectively.}
\item{eta}{(optional) a numeric value or numeric vector that represents the extra parameter of the specified error distribution. This parameter can be assumed known or unknown.}
\item{burn.in}{the number of burn-in iterations for the MCMC algorithm.}
\item{post.sam.s}{the required size for the posterior sample of interest parameters.}
\item{thin}{(optional) the thinning interval used in the simulation to obtain the required size for the posterior sample.}
\item{heter}{(optional) An object type list that contains the values \eqn{\sigma_{\epsilon_i}^2} and \eqn{\Sigma_{\xi_i}} for all \eqn{i} (\eqn{i=1,ldots,n}).
The objects have to be specified as \code{sigma2y} and \code{sigma2xi}, i.e. \code{heter <- list(sigma2y, sigma2xi)}.
If this argument is not specified the adjusted model is the version homocedastic.}
}
\details{
The argument \emph{formula} comprises of three parts, namely: \emph{(i)} observed response variable;
\emph{(ii)} covariates with measurement error; and \emph{(iii)} covariates without measurement error including the non-parametric
components, which can be specified by using the function \code{bsp()}.
The first two parts are separated by the symbol "~" and the second and third parts are separated by the symbol "|".
This function allows to fit the measurement error model under the presence of homocedastic and heterocedastic random errors. These models
admits vectors of explanatory variables with and without measurement error as well as the presence of nonlinear effects
approximated by using B-splines. The model investigated is the structural
version, as the error-prone variables follow scale mixtures of normal distributions.
}
\value{
\item{chains}{A matrix that contains the posterior sample of interest parameters. Each column represents the marginal posterior sample of each parameter.}
\item{res}{a vector of quantile residuals, proposed by Dunn and Smyth (1996) in the context of classical inference, but suited here to the Bayesian case.}
\item{K-L}{a vector of case-deletion influence measures based on the Kullback-Leibler divergence.}
\item{X_2}{a vector of case-deletion influence measures based on the X2-Distance divergence.}
\item{DIC}{DIC criterion for model selection.}
\item{LMPL}{Log-marginal pseudo-likelihood for model selection.}
}
\references{Rondon, L.M. and Bolfarine, H. (2015). Bayesian analysis of flexible measurement error models. (submitted)}
\author{Luz Marina Rondon <lumarp@gmail.com> and Heleno Bolfarine}
\seealso{
\code{\link{bsp}}
\code{\link{bsp.graph.fmem}}
}
\examples{
#library(SemiPar)
#### Ragweed Pollen ####
#data(ragweed)
#attach(ragweed)
############ Example ragweed data
#ragweed2 <- ragweed[year==1993]
#day.in.seas <- day.in.seas[year==1993]
#temperature <- temperature[year==1993]
#rain <- rain[year==1993]
#wind.speed <- wind.speed[year==1993]
#ragweedn <- data.frame(ragweed2,day.in.seas,temperature,rain,wind.speed)
#model <- fmem(sqrt(ragweed2) ~ wind.speed | rain + temperature + bsp(day.in.seas),
# data=ragweedn,family="Normal", burn.in=500, post.sam.s=2000,
# thin=10, omeg=1)
#summary(model)
#
### Plot non-parametric component
#bsp.graph.fmem(model, which=1, xlab="Day", ylab="f(Day)")
############ Example Boston data
#library(MASS)
#data(Boston)
#attach(Boston)
#model <- fmem(log(medv) ~ nox | crim + rm + bsp(lstat) + bsp(dis), data=Boston,
# family="ContNormal", burn.in=10000, post.sam.s=5000, omeg=4, thin=10)
#summary(model)
#
### Plot non-parametric components
#bsp.graph.fmem(model, which=1, xlab="lstat", ylab="f(lstat)") ### for variable lstat
#bsp.graph.fmem(model, which=2, xlab="dis", ylab="f(dis)") ### for variable dis
#
}
|
#' Title
#'
#' @param mat a numeric matrix
#' @param threshold
#' @param maxpoints
#' @param cex either "absolut" (default), "increasing" or "decreasing". Gives the size of the bubbles.
#' @param attributes a list with two elements "row" and "col". Each element should be a data.frame with the same number of rows as ncol(mat) or nrow(mat), respectively. If not given and the matrix has colnames and rownames these are taken.
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
#'
IOvisualize <- function (mat, threshold, maxpoints = 10000, cex = "absolut",
attributes = NULL
, ...) {
# TODO: what to do when mat contains Inf-values? --> error atm: "Error in leaflet::colorNumeric(palette = palette, domain = domain, na.color = na.color, : Wasn't able to determine range of domain "
# maybe mat is a sparse matrix?
if (!is.matrix(mat)) mat <- as.dense.matrix(mat)
# threshold argument not needed if matrix is (i) not big enough or (ii) does not have enough non-NA values
if (maxpoints > (ncol(mat) * nrow(mat)) | length(mat[is.na(mat)]) < maxpoints) {
min_threshold <- 0
} else {
suppressWarnings(min_threshold <- mat %>% abs %>%
fsort(.,decreasing = TRUE, na.last = TRUE) %>%
.[maxpoints])
}
if (missing(threshold)) {
threshold <- min_threshold
}
if (min_threshold > threshold) {
warning(paste0("maxpoints = ", maxpoints, " reached: ",
"threshold taken to ", min_threshold))
threshold <- min_threshold
}
mat[mat < threshold & mat > -threshold] <- NA
mat[mat == 0] <- NA
res <- mat %>% as.sparse.matrix
# Adding adidtional attributes (optional)
if (is.null(attributes)) {
attributes <- list("row" = data.table(rownames = rownames(mat)),
"col" = data.table(colnames = colnames(mat)))
}
if (!(exists("row", attributes) & exists("col", attributes))) {
warning("attributes needs to have both arguments col and row!")
} else if (nrow(attributes$row) == nrow(mat) &
nrow(attributes$col) == ncol(mat)) {
# either: attributes are given, or matrix has both row- and colnames
attributes <- lapply(attributes, function(x) {
x[,"id" := 1:.N]
})
res <- merge(res, attributes$row,
by.x = "row", by.y = "id",
suffixes = c(".row",".col")) %>%
merge(., attributes$col, by.x = "col",
by.y = "id", suffixes = c(".row", ".col"))
}
res <- res %>% .[, `:=`(row, -row)] %>% sf::st_as_sf(coords = c("col",
"row"),
remove = FALSE)
res$row <- -res$row
if (cex == "absolut") {
res[["abs_value"]] <- abs(res$value)
cex <- "abs_value"
} else if (cex == "increasing") {
cex <- "value"
} else if (cex == "decreasing") {
res[["dec_value"]] <- -(res$value)
cex <- "dec_value"
}
mapview::mapview(res, alpha = 0.3, lwd = 0, cex = cex,
color = viridis::viridis,
zcol = "value", layer.name = "value")
}
#' Title
#'
#' @param mat
#'
#' @return
#' @export
#'
#' @examples
#'
#'
#'
as.sparse.matrix <- function(mat, rownames = NULL, colnames = NULL,
na.rm = FALSE,
suffices = c('.row', '.col')) {
mat <- reshape2::melt(mat, na.rm = na.rm)
mat <- as.data.table(mat)
setnames(mat, c('row', 'col', 'value'))
if (is.factor(mat$row)) mat$row <- as.character(mat$row)
if (is.factor(mat$col)) mat$col <- as.character(mat$col)
if (!(is.null(rownames) | is.null(colnames))) {
# check for duplicates
dup_rows <- colnames(rownames) %in% colnames(colnames)
dup_cols <- colnames(colnames) %in% colnames(rownames)
colnames(rownames)[dup_rows] <- paste0(colnames(rownames)[dup_rows], suffices[1])
colnames(colnames)[dup_cols] <- paste0(colnames(colnames)[dup_cols], suffices[2])
}
if (!is.null(colnames)) {
mat <- merge(mat, cbind(colnames, col = (1:nrow(colnames))),
by = 'col')
#mat[, col := NULL]
}
if (!is.null(rownames)) {
mat <- merge(mat, cbind(rownames, row = (1:nrow(rownames))),
by = 'row')
#mat[, row := NULL]
}
setcolorder(mat, c('row', 'col', colnames(rownames), colnames(colnames)))
return(mat[])
}
# mat <- Z
# colnames <- data.table(country = c(LETTERS[1:ncol(mat)]),
# industry = letters[1:ncol(mat)])
# rownames <- data.table(country = c(LETTERS[4 + (1:ncol(mat))]),
# industry = letters[4 + (1:ncol(mat))])
#
#
# as.sparse.matrix(Z, colnames = colnames, rownames = rownames, suffices = c('x', 'y'))
# as.sparse.matrix(Z, colnames = colnames[,1], rownames = rownames)
#' Title
#'
#' @param x a sparse matrix in the form a either a data.frame or data.table. Needs to have 3 columns. Default order: row | col | value. If order differs please specify `row`, `col` and `value` arguments. Row and col columns can be either integers (representing the location in the matrix) or character.
#' @param row which column of x represent the row-index? default 1
#' @param col which column of x represent the column-index? default 2
#' @param value which column of x represent the value? default 3
#' @param keep.names only considered if the `row` and `col` columns of `x` are of type character.
#'
#' @return
#' @export
#'
#' @examples
as.dense.matrix <- function(x, row = 1, col = 2, value = 3,
keep.names = TRUE) {
if (mode(x) != "data.frame") x <- as.data.frame(x)
mat <- matrix(NA, ncol = length(unique(x[,col])),
nrow = length(unique(x[,row])))
mat[cbind(as.factor(x[,row]), as.factor(x[,col]))] <- x[,value] # as.factor needed to also work with non-integers row/col IDs
if(isTRUE(keep.names) & (is.character(x[,row]) | is.character(x[,col]))) {
rownames(mat) <- levels(as.factor(x[,row]))
colnames(mat) <- levels(as.factor(x[,col]))
}
return(mat)
}
#' Title
#'
#' @param xy
#'
#' @return
#' @export
#'
#' @examples
point2polygon <- function (xy) {
x <- c(xy[1]-1, xy[1])
y <- c(xy[2]-1, xy[2])
poly <- matrix(c(x[1],y[1],
x[1],y[2],
x[2],y[2],
x[2],y[1],
x[1],y[1]),
nrow = 5, byrow = TRUE)
return(st_polygon(list(poly)))
}
#' Title
#'
#' @param Z
#' @param Y
#' @param va
#' @param L
#'
#' @return
#' @export
#'
#' @examples
calculate_x <- function(Z, Y, va, L) {
if(!is.null(dim(Y))) Y <- apply(Y, 1, sum) # if Y is matrix
if(missing(L)) {
# check if mass balanced
if(!all.equal(apply(Z, 1, sum) + Y, apply(Z, 2, sum) + va)) {
stop("IO system is not mass balanced !!")
}
# calculate output
x <- apply(Z, 1, sum) + Y
} else {
x <- L %*% Y
}
return(x)
}
#' Title
#'
#' @param Z
#' @param x
#'
#' @return
#' @export
#'
#' @examples
calculate_A <- function(Z, x) {
# calculate A-matrix
# A <- Z/x[col(Z)]
A <- eachrow(Z, x,'/')
A[is.na(A)] <- 0
return(A)
}
#' Title
#'
#' @param A
#'
#' @return
#' @export
#'
#' @examples
calculate_L <- function(A) {
# calculate Leontief inverse
L <- solve(diag(nrow(A)) - A)
return(L)
}
#' Title
#' #todo> check
#' @param Z
#'
#' @return
#' @export
#'
#' @examples
calculate_B <- function(Z, x) {
B <- Z / x
B[is.na(B)] <- 0
return(B)
}
#' Title
#' #TODO: check
#' @param Z
#'
#' @return
#' @export
#'
#' @examples
calculate_G <- function(B) {
return(solve(diag(nrow(B)) -(B)))
}
#' Title
#'
#' @param E
#' @param x
#'
#' @return
#' @export
#'
#' @examples
calculate_S <- function(E, x) {
# calculate Stressor matrix
x_hat <- diag(1/x)
x_hat[is.infinite(x_hat)] <- 0
S <- E %*% x_hat
return(S)
}
#' Title
#'
#' @param Z
#' @param Y
#' @param va
#' @param E
#'
#' @return
#' @export
#'
#' @examples
IO_creator <- function(Z, Y, va, E) {
x <- calculate_x(Z, Y, va)
A <- calculate_A(Z, x)
S <- calculate_S(E, x)
L <- calculate_L(A)
return(list("A" = A, "L" = L, "S" = S))
}
#' Title
#'
#' @param S
#' @param L
#' @param Y
#' @param B
#' @param d
#' @param f
#' @param detailed
#'
#' @return
#' @export
#'
#' @examples
IO_calculator <- function(S, L, Y, B, d, f, detailed = TRUE) {
if(missing(Y)) Y <- (B %*% d) * as.numeric(f)
x <- as.numeric(L %*% Y)
if(detailed) B <- S %*% diag(x)
else B <- S %*% x
return(B)
}
#' Title
#'
#' @param n.industries
#' @param n.emissions
#' @param n.fdcats
#' @param A
#'
#' @return
#' @export
#'
#' @examples
create_random_IOtable <- function(n.industries, n.emissions, n.fdcats, A = FALSE) {
x0 <- list("S" = matrix(runif(n.industries*n.emissions), n.emissions, n.industries),
"L" = matrix(runif(n.industries^2), n.industries, n.industries),
"Y" = matrix(runif(n.industries * n.fdcats), n.industries, n.fdcats))
if(A) x0[["A"]] <- matrix(runif(n.industries^2), n.industries, n.industries)
return(x0)
}
#' Title
#'
#' @param A_mat
#' @param n
#'
#' @return
#' @export
#'
#' @examples
leontief_series_expansion <- function(A_mat, n) {
list <- vector(mode = "list", length = n)
list[[1]] <- diag(1, nrow = nrow(A_mat), ncol = ncol(A_mat))
for(i in 2:n) {
list[[i]] <- list[[i-1]] %*% A_mat
}
return(list)
}
#' Aggregates the Y matrix for specfic columns.
#' e.g. by country, final demand category
#'
#' @param Y the final demand matrix
#' @param groupings a vector with the groupings, same length as ncol(Y)
#'
#' @return
#' @export
#'
#' @examples
aggregate_Y <- function(Y, groupings) {
if (length(groupings) != ncol(Y)) stop('groupings need to have the same length as nrow(Y)')
grouping_levels <- unique(groupings)
n_groups <- length(grouping_levels)
Ynew <- matrix(0, nrow = nrow(Y), ncol = n_groups)
colnames(Ynew) <- grouping_levels
for (i in 1:n_groups) {
Ynew[,i] <- rowsums(Y[, groupings == grouping_levels[i]])
}
return(Ynew)
}
# 2. Sectoral Footprint Functions ----------------------------------------------
#' Title
#'
#' @param S_mat
#' @param L_mat
#' @param y_vec
#' @param index
#'
#' @return
#' @export
#'
#' @examples
.calc.sector.fp.direct <- function(S_mat, L_mat, y_vec, index) {
if(missing(index)) {
fp <- IO_calculator(S_mat, L_mat, y_vec)
} else {
# only for 1 sector
fp <- S_mat[,index] %*% (L_mat[index,] %*% y_vec)
}
return(fp)
}
#' Title
#'
#' @param S_mat
#' @param L_mat
#' @param x
#' @param index
#'
#' @return
#' @export
#'
#' @examples
.calc.sector.fp.indirect <- function(S_mat, L_mat, x, index) {
diag(L_mat) <- 0 # all diagonal entries (e.g. input of cars into car industry) are already considered in the direct footprint calculations
if(missing(index)) {
fp <- S_mat %*% L_mat %*% diag(as.numeric(x))
} else {
fp <- S_mat %*% L_mat[,index] %*% x[index]
}
return(fp)
}
#' Title
#'
#' @param L_mat
#' @param S_mat
#' @param y_vec
#' @param index the index of the sector footprints are to be calculated. If missing results for ALL sectors are returned (higher computational expenses)
#' @param detailed shall footprints be returned split up by direct + indirect emissions?
#'
#' @return
#' @export
#'
#' @examples
calc_footprint_sector <- function(L_mat, S_mat, y_vec, index,
detailed = FALSE) {
direct <- .calc.sector.fp.direct(S_mat = S_mat, L_mat = L_mat,
y_vec = y_vec, index = index)
x <- calculate_x(Y = y_vec, L = L_mat)
indirect <- .calc.sector.fp.indirect(S_mat = S_mat, L_mat = L_mat,
x = x, index = index)
if(detailed) {
fp <- list("direct" = direct, "indirect" = indirect)
} else {
fp <- direct + indirect
}
return(fp)
}
#' Title
#'
#' @param n number of layers. recommendation >= 8
#' @param L_mat
#' @param A_mat
#' @param y_vec
#' @param S_mat
#' @param index see ?calc_footprint_sector
#'
#' @return
#' @export
#'
#' @examples
SPA_footprint_sector <- function(n = 8, L_mat, A_mat, y_vec, S_mat, index) {
L_series <- leontief_series_expansion(A_mat, n)
fp <- vector(mode = "list", length = n)
fp[[1]] <- .calc.sector.fp.direct(index = index, S_mat = S_mat,
L_mat = L_mat, y_vec = y_vec)
if(missing(index)) {
# total output
x <- calculate_x(L = L_mat, Y = y_vec) %>% as.numeric
for(i in 2:n) {
fp[[i]] <- S_mat %*% L_series[[i]] %*% diag(x)
}
} else {
# output of sector i
x <- L_mat[index,] %*% y_vec
for(i in 2:n) {
fp[[i]] <- S_mat %*% L_series[[i]][,index] %*% x
}
}
return(fp)
}
|
/R/MRIO_tools.R
|
no_license
|
simschul/my.utils
|
R
| false | false | 12,804 |
r
|
#' Title
#'
#' @param mat a numeric matrix
#' @param threshold
#' @param maxpoints
#' @param cex either "absolut" (default), "increasing" or "decreasing". Gives the size of the bubbles.
#' @param attributes a list with two elements "row" and "col". Each element should be a data.frame with the same number of rows as ncol(mat) or nrow(mat), respectively. If not given and the matrix has colnames and rownames these are taken.
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
#'
IOvisualize <- function (mat, threshold, maxpoints = 10000, cex = "absolut",
attributes = NULL
, ...) {
# TODO: what to do when mat contains Inf-values? --> error atm: "Error in leaflet::colorNumeric(palette = palette, domain = domain, na.color = na.color, : Wasn't able to determine range of domain "
# maybe mat is a sparse matrix?
if (!is.matrix(mat)) mat <- as.dense.matrix(mat)
# threshold argument not needed if matrix is (i) not big enough or (ii) does not have enough non-NA values
if (maxpoints > (ncol(mat) * nrow(mat)) | length(mat[is.na(mat)]) < maxpoints) {
min_threshold <- 0
} else {
suppressWarnings(min_threshold <- mat %>% abs %>%
fsort(.,decreasing = TRUE, na.last = TRUE) %>%
.[maxpoints])
}
if (missing(threshold)) {
threshold <- min_threshold
}
if (min_threshold > threshold) {
warning(paste0("maxpoints = ", maxpoints, " reached: ",
"threshold taken to ", min_threshold))
threshold <- min_threshold
}
mat[mat < threshold & mat > -threshold] <- NA
mat[mat == 0] <- NA
res <- mat %>% as.sparse.matrix
# Adding adidtional attributes (optional)
if (is.null(attributes)) {
attributes <- list("row" = data.table(rownames = rownames(mat)),
"col" = data.table(colnames = colnames(mat)))
}
if (!(exists("row", attributes) & exists("col", attributes))) {
warning("attributes needs to have both arguments col and row!")
} else if (nrow(attributes$row) == nrow(mat) &
nrow(attributes$col) == ncol(mat)) {
# either: attributes are given, or matrix has both row- and colnames
attributes <- lapply(attributes, function(x) {
x[,"id" := 1:.N]
})
res <- merge(res, attributes$row,
by.x = "row", by.y = "id",
suffixes = c(".row",".col")) %>%
merge(., attributes$col, by.x = "col",
by.y = "id", suffixes = c(".row", ".col"))
}
res <- res %>% .[, `:=`(row, -row)] %>% sf::st_as_sf(coords = c("col",
"row"),
remove = FALSE)
res$row <- -res$row
if (cex == "absolut") {
res[["abs_value"]] <- abs(res$value)
cex <- "abs_value"
} else if (cex == "increasing") {
cex <- "value"
} else if (cex == "decreasing") {
res[["dec_value"]] <- -(res$value)
cex <- "dec_value"
}
mapview::mapview(res, alpha = 0.3, lwd = 0, cex = cex,
color = viridis::viridis,
zcol = "value", layer.name = "value")
}
#' Title
#'
#' @param mat
#'
#' @return
#' @export
#'
#' @examples
#'
#'
#'
as.sparse.matrix <- function(mat, rownames = NULL, colnames = NULL,
na.rm = FALSE,
suffices = c('.row', '.col')) {
mat <- reshape2::melt(mat, na.rm = na.rm)
mat <- as.data.table(mat)
setnames(mat, c('row', 'col', 'value'))
if (is.factor(mat$row)) mat$row <- as.character(mat$row)
if (is.factor(mat$col)) mat$col <- as.character(mat$col)
if (!(is.null(rownames) | is.null(colnames))) {
# check for duplicates
dup_rows <- colnames(rownames) %in% colnames(colnames)
dup_cols <- colnames(colnames) %in% colnames(rownames)
colnames(rownames)[dup_rows] <- paste0(colnames(rownames)[dup_rows], suffices[1])
colnames(colnames)[dup_cols] <- paste0(colnames(colnames)[dup_cols], suffices[2])
}
if (!is.null(colnames)) {
mat <- merge(mat, cbind(colnames, col = (1:nrow(colnames))),
by = 'col')
#mat[, col := NULL]
}
if (!is.null(rownames)) {
mat <- merge(mat, cbind(rownames, row = (1:nrow(rownames))),
by = 'row')
#mat[, row := NULL]
}
setcolorder(mat, c('row', 'col', colnames(rownames), colnames(colnames)))
return(mat[])
}
# mat <- Z
# colnames <- data.table(country = c(LETTERS[1:ncol(mat)]),
# industry = letters[1:ncol(mat)])
# rownames <- data.table(country = c(LETTERS[4 + (1:ncol(mat))]),
# industry = letters[4 + (1:ncol(mat))])
#
#
# as.sparse.matrix(Z, colnames = colnames, rownames = rownames, suffices = c('x', 'y'))
# as.sparse.matrix(Z, colnames = colnames[,1], rownames = rownames)
#' Title
#'
#' @param x a sparse matrix in the form a either a data.frame or data.table. Needs to have 3 columns. Default order: row | col | value. If order differs please specify `row`, `col` and `value` arguments. Row and col columns can be either integers (representing the location in the matrix) or character.
#' @param row which column of x represent the row-index? default 1
#' @param col which column of x represent the column-index? default 2
#' @param value which column of x represent the value? default 3
#' @param keep.names only considered if the `row` and `col` columns of `x` are of type character.
#'
#' @return
#' @export
#'
#' @examples
as.dense.matrix <- function(x, row = 1, col = 2, value = 3,
keep.names = TRUE) {
if (mode(x) != "data.frame") x <- as.data.frame(x)
mat <- matrix(NA, ncol = length(unique(x[,col])),
nrow = length(unique(x[,row])))
mat[cbind(as.factor(x[,row]), as.factor(x[,col]))] <- x[,value] # as.factor needed to also work with non-integers row/col IDs
if(isTRUE(keep.names) & (is.character(x[,row]) | is.character(x[,col]))) {
rownames(mat) <- levels(as.factor(x[,row]))
colnames(mat) <- levels(as.factor(x[,col]))
}
return(mat)
}
#' Title
#'
#' @param xy
#'
#' @return
#' @export
#'
#' @examples
point2polygon <- function (xy) {
x <- c(xy[1]-1, xy[1])
y <- c(xy[2]-1, xy[2])
poly <- matrix(c(x[1],y[1],
x[1],y[2],
x[2],y[2],
x[2],y[1],
x[1],y[1]),
nrow = 5, byrow = TRUE)
return(st_polygon(list(poly)))
}
#' Title
#'
#' @param Z
#' @param Y
#' @param va
#' @param L
#'
#' @return
#' @export
#'
#' @examples
calculate_x <- function(Z, Y, va, L) {
if(!is.null(dim(Y))) Y <- apply(Y, 1, sum) # if Y is matrix
if(missing(L)) {
# check if mass balanced
if(!all.equal(apply(Z, 1, sum) + Y, apply(Z, 2, sum) + va)) {
stop("IO system is not mass balanced !!")
}
# calculate output
x <- apply(Z, 1, sum) + Y
} else {
x <- L %*% Y
}
return(x)
}
#' Title
#'
#' @param Z
#' @param x
#'
#' @return
#' @export
#'
#' @examples
calculate_A <- function(Z, x) {
# calculate A-matrix
# A <- Z/x[col(Z)]
A <- eachrow(Z, x,'/')
A[is.na(A)] <- 0
return(A)
}
#' Title
#'
#' @param A
#'
#' @return
#' @export
#'
#' @examples
calculate_L <- function(A) {
# calculate Leontief inverse
L <- solve(diag(nrow(A)) - A)
return(L)
}
#' Title
#' #todo> check
#' @param Z
#'
#' @return
#' @export
#'
#' @examples
calculate_B <- function(Z, x) {
B <- Z / x
B[is.na(B)] <- 0
return(B)
}
#' Title
#' #TODO: check
#' @param Z
#'
#' @return
#' @export
#'
#' @examples
calculate_G <- function(B) {
return(solve(diag(nrow(B)) -(B)))
}
#' Title
#'
#' @param E
#' @param x
#'
#' @return
#' @export
#'
#' @examples
calculate_S <- function(E, x) {
# calculate Stressor matrix
x_hat <- diag(1/x)
x_hat[is.infinite(x_hat)] <- 0
S <- E %*% x_hat
return(S)
}
#' Title
#'
#' @param Z
#' @param Y
#' @param va
#' @param E
#'
#' @return
#' @export
#'
#' @examples
IO_creator <- function(Z, Y, va, E) {
x <- calculate_x(Z, Y, va)
A <- calculate_A(Z, x)
S <- calculate_S(E, x)
L <- calculate_L(A)
return(list("A" = A, "L" = L, "S" = S))
}
#' Title
#'
#' @param S
#' @param L
#' @param Y
#' @param B
#' @param d
#' @param f
#' @param detailed
#'
#' @return
#' @export
#'
#' @examples
IO_calculator <- function(S, L, Y, B, d, f, detailed = TRUE) {
if(missing(Y)) Y <- (B %*% d) * as.numeric(f)
x <- as.numeric(L %*% Y)
if(detailed) B <- S %*% diag(x)
else B <- S %*% x
return(B)
}
#' Title
#'
#' @param n.industries
#' @param n.emissions
#' @param n.fdcats
#' @param A
#'
#' @return
#' @export
#'
#' @examples
create_random_IOtable <- function(n.industries, n.emissions, n.fdcats, A = FALSE) {
x0 <- list("S" = matrix(runif(n.industries*n.emissions), n.emissions, n.industries),
"L" = matrix(runif(n.industries^2), n.industries, n.industries),
"Y" = matrix(runif(n.industries * n.fdcats), n.industries, n.fdcats))
if(A) x0[["A"]] <- matrix(runif(n.industries^2), n.industries, n.industries)
return(x0)
}
#' Title
#'
#' @param A_mat
#' @param n
#'
#' @return
#' @export
#'
#' @examples
leontief_series_expansion <- function(A_mat, n) {
list <- vector(mode = "list", length = n)
list[[1]] <- diag(1, nrow = nrow(A_mat), ncol = ncol(A_mat))
for(i in 2:n) {
list[[i]] <- list[[i-1]] %*% A_mat
}
return(list)
}
#' Aggregates the Y matrix for specfic columns.
#' e.g. by country, final demand category
#'
#' @param Y the final demand matrix
#' @param groupings a vector with the groupings, same length as ncol(Y)
#'
#' @return
#' @export
#'
#' @examples
aggregate_Y <- function(Y, groupings) {
if (length(groupings) != ncol(Y)) stop('groupings need to have the same length as nrow(Y)')
grouping_levels <- unique(groupings)
n_groups <- length(grouping_levels)
Ynew <- matrix(0, nrow = nrow(Y), ncol = n_groups)
colnames(Ynew) <- grouping_levels
for (i in 1:n_groups) {
Ynew[,i] <- rowsums(Y[, groupings == grouping_levels[i]])
}
return(Ynew)
}
# 2. Sectoral Footprint Functions ----------------------------------------------
#' Title
#'
#' @param S_mat
#' @param L_mat
#' @param y_vec
#' @param index
#'
#' @return
#' @export
#'
#' @examples
.calc.sector.fp.direct <- function(S_mat, L_mat, y_vec, index) {
if(missing(index)) {
fp <- IO_calculator(S_mat, L_mat, y_vec)
} else {
# only for 1 sector
fp <- S_mat[,index] %*% (L_mat[index,] %*% y_vec)
}
return(fp)
}
#' Title
#'
#' @param S_mat
#' @param L_mat
#' @param x
#' @param index
#'
#' @return
#' @export
#'
#' @examples
.calc.sector.fp.indirect <- function(S_mat, L_mat, x, index) {
diag(L_mat) <- 0 # all diagonal entries (e.g. input of cars into car industry) are already considered in the direct footprint calculations
if(missing(index)) {
fp <- S_mat %*% L_mat %*% diag(as.numeric(x))
} else {
fp <- S_mat %*% L_mat[,index] %*% x[index]
}
return(fp)
}
#' Title
#'
#' @param L_mat
#' @param S_mat
#' @param y_vec
#' @param index the index of the sector footprints are to be calculated. If missing results for ALL sectors are returned (higher computational expenses)
#' @param detailed shall footprints be returned split up by direct + indirect emissions?
#'
#' @return
#' @export
#'
#' @examples
calc_footprint_sector <- function(L_mat, S_mat, y_vec, index,
detailed = FALSE) {
direct <- .calc.sector.fp.direct(S_mat = S_mat, L_mat = L_mat,
y_vec = y_vec, index = index)
x <- calculate_x(Y = y_vec, L = L_mat)
indirect <- .calc.sector.fp.indirect(S_mat = S_mat, L_mat = L_mat,
x = x, index = index)
if(detailed) {
fp <- list("direct" = direct, "indirect" = indirect)
} else {
fp <- direct + indirect
}
return(fp)
}
#' Title
#'
#' @param n number of layers. recommendation >= 8
#' @param L_mat
#' @param A_mat
#' @param y_vec
#' @param S_mat
#' @param index see ?calc_footprint_sector
#'
#' @return
#' @export
#'
#' @examples
SPA_footprint_sector <- function(n = 8, L_mat, A_mat, y_vec, S_mat, index) {
L_series <- leontief_series_expansion(A_mat, n)
fp <- vector(mode = "list", length = n)
fp[[1]] <- .calc.sector.fp.direct(index = index, S_mat = S_mat,
L_mat = L_mat, y_vec = y_vec)
if(missing(index)) {
# total output
x <- calculate_x(L = L_mat, Y = y_vec) %>% as.numeric
for(i in 2:n) {
fp[[i]] <- S_mat %*% L_series[[i]] %*% diag(x)
}
} else {
# output of sector i
x <- L_mat[index,] %*% y_vec
for(i in 2:n) {
fp[[i]] <- S_mat %*% L_series[[i]][,index] %*% x
}
}
return(fp)
}
|
#Explore the classification efficiency
library(clusterCrit)
source("./source/functions.R")
source("./source/graphics.R")
load("./results/soms_sp_6x6.Rdata") #created in build maps
load("./results/soms_sp_10x10.Rdata") #created in build maps
load("./results/soms_sp_14x14.Rdata") #created in build maps
load("./data/owda_for_som.rdata") #created in import_data
x <- som_sp_10x10$codes[[1]]
som_hc <- cutree(hclust(dist(x), method = "ward.D2"), 2)
cluster_comparison <- as.matrix(intCriteria(x, som_hc, c("all")))
for(i in 3:30){ #Takes some time: you can load below
som_hc <- cutree(hclust(dist(x), method = "ward.D2"), i)
cluster_comparison <- cbind(cluster_comparison,
intCriteria(x, som_hc, c("all")))
print(i)
}
cluster_comparison <- apply(cluster_comparison, 2, unlist)
colnames(cluster_comparison) <- 2:30
cluster_comparison = data.frame(t(cluster_comparison))
save(cluster_comparison, file = "./results/cluster_comp.rdata")
load("./results/cluster_comp.rdata")
#all plots
for(i in 1:31){
aa = parse(text = colnames(cluster_comparison)[i])
print(xyplot(eval(aa)~2:30, cluster_comparison))
}
#some plots
xyplot(davies_bouldin ~ 2:30, cluster_comparison)
xyplot(c_index ~ 2:30, cluster_comparison)
xyplot(calinski_harabasz ~ 2:30, cluster_comparison)
xyplot(sd_scat ~ 2:30, cluster_comparison)
xyplot(sd_dis ~ 2:30, cluster_comparison)
xyplot(wemmert_gancarski ~ 2:30, cluster_comparison)
#merge clusters with owda data
som_sp_6x6_map_31 <- create.som.clusters(som_sp_6x6, som_sp_6x6_map, 31)
owda_sp_6x6 <- make.classif(som_sp_6x6_map_31, owda_raw, 31)
save(som_sp_6x6, som_sp_6x6_map, som_sp_6x6_map_17, owda_sp_6x6, file = "results/soms_sp_6x6.Rdata")
som_sp_10x10_map_31 <- create.som.clusters(som_sp_10x10, som_sp_10x10_map, 31)
owda_sp_10x10 <- make.classif(som_sp_10x10_map_31, owda_raw, 31)
save(som_sp_10x10, som_sp_10x10_map, som_sp_10x10_map_17, owda_sp_10x10, file = "results/soms_sp_10x10.Rdata")
som_sp_14x14_map_31 <- create.som.clusters(som_sp_14x14, som_sp_14x14_map, 31)
owda_sp_14x14 <- make.classif(som_sp_14x14_map_31, owda_raw, 31)
save(som_sp_14x14, som_sp_14x14_map, som_sp_14x14_map_17, owda_sp_14x14, file = "results/soms_sp_14x14.Rdata")
#Examine maximum corellation between clusters and variance: Make it a single function
cor_som_sp <- data.table(rbind(data.table(som = as.factor("6x6"), clusters = 2:31, max_cor = som.cor(owda_sp_6x6)),
data.table(som = as.factor("10x10"), clusters = 2:31, max_cor = som.cor(owda_sp_10x10)),
data.table(som = as.factor("14x14"), clusters = 2:31, max_cor = som.cor(owda_sp_14x14))))
sd_som_sp <- data.table(rbind(data.table(som = as.factor("6x6"), clusters = 2:31, sd = som.sd(owda_sp_6x6, nclusters = 31)),
data.table(som = as.factor("10x10"), clusters = 2:31, sd = som.sd(owda_sp_10x10, nclusters = 31)),
data.table(som = as.factor("14x14"), clusters = 2:31, sd = som.sd(owda_sp_14x14, nclusters = 31))))
g2 <- ggplot(cor_som_sp, aes(clusters, max_cor, color = som)) +
geom_point() +
geom_smooth(span = 0.3, se = F) +
labs(x = "Number of clusters", y = "Maximum Correlation") +
scale_color_manual(values = colset_light[c(8, 11, 4)]) +
theme_bw()
g1 <- ggplot(sd_som_sp, aes(clusters, sd, col = som)) +
geom_point() +
geom_smooth(span = 0.3, se = F) +
labs(x = "Number of clusters", y = "Stand. Dev.") +
scale_color_manual(values = colset_light[c(8, 11, 4)]) +
theme_bw()
gg_all <- ggarrange(g1 + rremove("legend"),
g2 + theme(legend.position = c(0.76, 0.33),
legend.background = element_rect(fill = NA)),
labels = c("a", "b"),
nrow = 1, ncol = 2)
ggsave("./results/figs/clustering_comp.png", gg_all, units = "cm", width = 20, height = 10)
|
/bin/compare_clustering.R
|
no_license
|
imarkonis/owda_som
|
R
| false | false | 3,909 |
r
|
#Explore the classification efficiency
library(clusterCrit)
source("./source/functions.R")
source("./source/graphics.R")
load("./results/soms_sp_6x6.Rdata") #created in build maps
load("./results/soms_sp_10x10.Rdata") #created in build maps
load("./results/soms_sp_14x14.Rdata") #created in build maps
load("./data/owda_for_som.rdata") #created in import_data
x <- som_sp_10x10$codes[[1]]
som_hc <- cutree(hclust(dist(x), method = "ward.D2"), 2)
cluster_comparison <- as.matrix(intCriteria(x, som_hc, c("all")))
for(i in 3:30){ #Takes some time: you can load below
som_hc <- cutree(hclust(dist(x), method = "ward.D2"), i)
cluster_comparison <- cbind(cluster_comparison,
intCriteria(x, som_hc, c("all")))
print(i)
}
cluster_comparison <- apply(cluster_comparison, 2, unlist)
colnames(cluster_comparison) <- 2:30
cluster_comparison = data.frame(t(cluster_comparison))
save(cluster_comparison, file = "./results/cluster_comp.rdata")
load("./results/cluster_comp.rdata")
#all plots
for(i in 1:31){
aa = parse(text = colnames(cluster_comparison)[i])
print(xyplot(eval(aa)~2:30, cluster_comparison))
}
#some plots
xyplot(davies_bouldin ~ 2:30, cluster_comparison)
xyplot(c_index ~ 2:30, cluster_comparison)
xyplot(calinski_harabasz ~ 2:30, cluster_comparison)
xyplot(sd_scat ~ 2:30, cluster_comparison)
xyplot(sd_dis ~ 2:30, cluster_comparison)
xyplot(wemmert_gancarski ~ 2:30, cluster_comparison)
#merge clusters with owda data
som_sp_6x6_map_31 <- create.som.clusters(som_sp_6x6, som_sp_6x6_map, 31)
owda_sp_6x6 <- make.classif(som_sp_6x6_map_31, owda_raw, 31)
save(som_sp_6x6, som_sp_6x6_map, som_sp_6x6_map_17, owda_sp_6x6, file = "results/soms_sp_6x6.Rdata")
som_sp_10x10_map_31 <- create.som.clusters(som_sp_10x10, som_sp_10x10_map, 31)
owda_sp_10x10 <- make.classif(som_sp_10x10_map_31, owda_raw, 31)
save(som_sp_10x10, som_sp_10x10_map, som_sp_10x10_map_17, owda_sp_10x10, file = "results/soms_sp_10x10.Rdata")
som_sp_14x14_map_31 <- create.som.clusters(som_sp_14x14, som_sp_14x14_map, 31)
owda_sp_14x14 <- make.classif(som_sp_14x14_map_31, owda_raw, 31)
save(som_sp_14x14, som_sp_14x14_map, som_sp_14x14_map_17, owda_sp_14x14, file = "results/soms_sp_14x14.Rdata")
#Examine maximum corellation between clusters and variance: Make it a single function
cor_som_sp <- data.table(rbind(data.table(som = as.factor("6x6"), clusters = 2:31, max_cor = som.cor(owda_sp_6x6)),
data.table(som = as.factor("10x10"), clusters = 2:31, max_cor = som.cor(owda_sp_10x10)),
data.table(som = as.factor("14x14"), clusters = 2:31, max_cor = som.cor(owda_sp_14x14))))
sd_som_sp <- data.table(rbind(data.table(som = as.factor("6x6"), clusters = 2:31, sd = som.sd(owda_sp_6x6, nclusters = 31)),
data.table(som = as.factor("10x10"), clusters = 2:31, sd = som.sd(owda_sp_10x10, nclusters = 31)),
data.table(som = as.factor("14x14"), clusters = 2:31, sd = som.sd(owda_sp_14x14, nclusters = 31))))
g2 <- ggplot(cor_som_sp, aes(clusters, max_cor, color = som)) +
geom_point() +
geom_smooth(span = 0.3, se = F) +
labs(x = "Number of clusters", y = "Maximum Correlation") +
scale_color_manual(values = colset_light[c(8, 11, 4)]) +
theme_bw()
g1 <- ggplot(sd_som_sp, aes(clusters, sd, col = som)) +
geom_point() +
geom_smooth(span = 0.3, se = F) +
labs(x = "Number of clusters", y = "Stand. Dev.") +
scale_color_manual(values = colset_light[c(8, 11, 4)]) +
theme_bw()
gg_all <- ggarrange(g1 + rremove("legend"),
g2 + theme(legend.position = c(0.76, 0.33),
legend.background = element_rect(fill = NA)),
labels = c("a", "b"),
nrow = 1, ncol = 2)
ggsave("./results/figs/clustering_comp.png", gg_all, units = "cm", width = 20, height = 10)
|
# iris 데이터로 그래프 그리기
#1. 산점도
library(dplyr)
View(iris)
iris_setosa <- filter(iris, Species == 'setosa')
iris_versicolor <- filter(iris, Species == 'versicolor')
iris_virginica <- filter(iris, Species == 'virginica')
par(mfrow=c(3,2))
plot(iris_setosa$Sepal.Length, iris_setosa$Sepal.Width,
xlab='Length', ylab='Width',
xlim=c(4, 8.1), ylim=c(1.9, 4.5), main='Setosa 품종의 Sepal')
plot(iris_setosa$Petal.Length, iris_setosa$Petal.Width,
xlab='Length', ylab='Width',
xlim=c(0.8, 7), ylim=c(0, 2.6), main='Setosa 품종의 Petal')
plot(iris_versicolor$Sepal.Length, iris_versicolor$Sepal.Width,
xlab='Length', ylab='Width',
xlim=c(4, 8.1), ylim=c(1.9, 4.5), main='Versicolor 품종의 Sepal')
plot(iris_versicolor$Petal.Length, iris_versicolor$Petal.Width,
xlab='Length', ylab='Width',
xlim=c(0.8, 7), ylim=c(0, 2.6), main='Versicolor 품종의 Petal')
plot(iris_virginica$Sepal.Length, iris_virginica$Sepal.Width,
xlab='Length', ylab='Width',
xlim=c(4, 8.1), ylim=c(1.9, 4.5), main='Virginica 품종의 Sepal')
plot(iris_virginica$Petal.Length, iris_virginica$Petal.Width,
xlab='Length', ylab='Width',
xlim=c(0.8, 7), ylim=c(0, 2.6), main='Virginica 품종의 Petal')
#2. 평균 비교
par(mfrow=c(1,2))
mean_of_iris_setosa <- iris_setosa %>%
summarise_each(list(mean), Sepal.Length, Sepal.Width,
Petal.Length, Petal.Width)
mean_of_iris_versicolor <- iris_versicolor %>%
summarise_each(list(mean), Sepal.Length, Sepal.Width,
Petal.Length, Petal.Width)
mean_of_iris_virginica <- iris_virginica %>%
summarise_each(list(mean), Sepal.Length, Sepal.Width,
Petal.Length, Petal.Width)
# barplot(as.matrix(mean_of_iris_setosa))
mean_of_iris <- t(rbind(mean_of_iris_setosa,
mean_of_iris_versicolor,
mean_of_iris_virginica))
colnames(mean_of_iris) <- c('Setosa', 'Versicolor', 'Virginica')
barplot(as.matrix(mean_of_iris), beside=T, main='품종별 평균',
ylim=c(0,10),
col=c('red','yellow','green','blue'))
legend(1, 10,
c("Sepal.length","Sepal.width","Petal.length","Petal.width"), cex=0.8,
fill=c('red','yellow','green','blue'))
barplot(as.matrix(mean_of_iris), main='품종별 평균',
ylim=c(0,25),
col=c('red','yellow','green','blue'))
legend(0.2, 25,
c("Sepal.length","Sepal.width","Petal.length","Petal.width"), cex=0.8,
fill=c('red','yellow','green','blue'))
#3. Boxplot
par(mfrow=c(3,1))
boxplot(iris_setosa$Sepal.Length, iris_setosa$Sepal.Width,
iris_setosa$Petal.Length, iris_setosa$Petal.Width,
col=c('red','yellow','green','blue'),
names=c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width'),
main='Setosa')
boxplot(iris_versicolor$Sepal.Length, iris_versicolor$Sepal.Width,
iris_versicolor$Petal.Length, iris_versicolor$Petal.Width,
col=c('red','yellow','green','blue'),
names=c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width'),
main='Versicolor')
boxplot(iris_virginica$Sepal.Length, iris_virginica$Sepal.Width,
iris_virginica$Petal.Length, iris_virginica$Petal.Width,
col=c('red','yellow','green','blue'),
names=c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width'),
main='Virginica')
par(mfrow=c(1,1))
|
/Part4/iris.R
|
no_license
|
ckiekim/R-Lecture
|
R
| false | false | 3,386 |
r
|
# iris 데이터로 그래프 그리기
#1. 산점도
library(dplyr)
View(iris)
iris_setosa <- filter(iris, Species == 'setosa')
iris_versicolor <- filter(iris, Species == 'versicolor')
iris_virginica <- filter(iris, Species == 'virginica')
par(mfrow=c(3,2))
plot(iris_setosa$Sepal.Length, iris_setosa$Sepal.Width,
xlab='Length', ylab='Width',
xlim=c(4, 8.1), ylim=c(1.9, 4.5), main='Setosa 품종의 Sepal')
plot(iris_setosa$Petal.Length, iris_setosa$Petal.Width,
xlab='Length', ylab='Width',
xlim=c(0.8, 7), ylim=c(0, 2.6), main='Setosa 품종의 Petal')
plot(iris_versicolor$Sepal.Length, iris_versicolor$Sepal.Width,
xlab='Length', ylab='Width',
xlim=c(4, 8.1), ylim=c(1.9, 4.5), main='Versicolor 품종의 Sepal')
plot(iris_versicolor$Petal.Length, iris_versicolor$Petal.Width,
xlab='Length', ylab='Width',
xlim=c(0.8, 7), ylim=c(0, 2.6), main='Versicolor 품종의 Petal')
plot(iris_virginica$Sepal.Length, iris_virginica$Sepal.Width,
xlab='Length', ylab='Width',
xlim=c(4, 8.1), ylim=c(1.9, 4.5), main='Virginica 품종의 Sepal')
plot(iris_virginica$Petal.Length, iris_virginica$Petal.Width,
xlab='Length', ylab='Width',
xlim=c(0.8, 7), ylim=c(0, 2.6), main='Virginica 품종의 Petal')
#2. 평균 비교
par(mfrow=c(1,2))
mean_of_iris_setosa <- iris_setosa %>%
summarise_each(list(mean), Sepal.Length, Sepal.Width,
Petal.Length, Petal.Width)
mean_of_iris_versicolor <- iris_versicolor %>%
summarise_each(list(mean), Sepal.Length, Sepal.Width,
Petal.Length, Petal.Width)
mean_of_iris_virginica <- iris_virginica %>%
summarise_each(list(mean), Sepal.Length, Sepal.Width,
Petal.Length, Petal.Width)
# barplot(as.matrix(mean_of_iris_setosa))
mean_of_iris <- t(rbind(mean_of_iris_setosa,
mean_of_iris_versicolor,
mean_of_iris_virginica))
colnames(mean_of_iris) <- c('Setosa', 'Versicolor', 'Virginica')
barplot(as.matrix(mean_of_iris), beside=T, main='품종별 평균',
ylim=c(0,10),
col=c('red','yellow','green','blue'))
legend(1, 10,
c("Sepal.length","Sepal.width","Petal.length","Petal.width"), cex=0.8,
fill=c('red','yellow','green','blue'))
barplot(as.matrix(mean_of_iris), main='품종별 평균',
ylim=c(0,25),
col=c('red','yellow','green','blue'))
legend(0.2, 25,
c("Sepal.length","Sepal.width","Petal.length","Petal.width"), cex=0.8,
fill=c('red','yellow','green','blue'))
#3. Boxplot
par(mfrow=c(3,1))
boxplot(iris_setosa$Sepal.Length, iris_setosa$Sepal.Width,
iris_setosa$Petal.Length, iris_setosa$Petal.Width,
col=c('red','yellow','green','blue'),
names=c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width'),
main='Setosa')
boxplot(iris_versicolor$Sepal.Length, iris_versicolor$Sepal.Width,
iris_versicolor$Petal.Length, iris_versicolor$Petal.Width,
col=c('red','yellow','green','blue'),
names=c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width'),
main='Versicolor')
boxplot(iris_virginica$Sepal.Length, iris_virginica$Sepal.Width,
iris_virginica$Petal.Length, iris_virginica$Petal.Width,
col=c('red','yellow','green','blue'),
names=c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width'),
main='Virginica')
par(mfrow=c(1,1))
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/immunogen_decision_rules.r
\name{get_mode}
\alias{get_mode}
\title{Estimate sample mode}
\usage{
get_mode(sample)
}
\arguments{
\item{sample}{Sample to get an estimated mode of}
}
\description{
This function is a wrapper for the mlv function from the modeest package
}
|
/man/get_mode.Rd
|
no_license
|
liesb/BIITE
|
R
| false | false | 361 |
rd
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/immunogen_decision_rules.r
\name{get_mode}
\alias{get_mode}
\title{Estimate sample mode}
\usage{
get_mode(sample)
}
\arguments{
\item{sample}{Sample to get an estimated mode of}
}
\description{
This function is a wrapper for the mlv function from the modeest package
}
|
epc = read.table("household_power_consumption.txt", header = TRUE, sep = ";")
epc2 = with(epc,subset(epc,Date == "1/2/2007" | Date == "2/2/2007"))
datetime <- paste(epc2$Date, epc2$Time)
datetime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
epc2$DateTime <- datetime
epc2 <- epc2[,c(1,2,10,3:9)]
cols <- 4:9
epc2[cols] <- lapply(epc2[cols], as.numeric)
with(epc2, plot(DateTime,Sub_metering_1,ylab = "Energy sub metering", type = "n", xlab = ""))
with(epc2,lines(DateTime,Sub_metering_1, col = "black"))
with(epc2,lines(DateTime,Sub_metering_2, col = "red"))
with(epc2,lines(DateTime,Sub_metering_3, col = "blue"))
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), pch = "-")
dev.copy(png, file = "plot3.png")
dev.off()
|
/plot3.R
|
no_license
|
ankushkh/ExData_Plotting1
|
R
| false | false | 787 |
r
|
epc = read.table("household_power_consumption.txt", header = TRUE, sep = ";")
epc2 = with(epc,subset(epc,Date == "1/2/2007" | Date == "2/2/2007"))
datetime <- paste(epc2$Date, epc2$Time)
datetime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
epc2$DateTime <- datetime
epc2 <- epc2[,c(1,2,10,3:9)]
cols <- 4:9
epc2[cols] <- lapply(epc2[cols], as.numeric)
with(epc2, plot(DateTime,Sub_metering_1,ylab = "Energy sub metering", type = "n", xlab = ""))
with(epc2,lines(DateTime,Sub_metering_1, col = "black"))
with(epc2,lines(DateTime,Sub_metering_2, col = "red"))
with(epc2,lines(DateTime,Sub_metering_3, col = "blue"))
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), pch = "-")
dev.copy(png, file = "plot3.png")
dev.off()
|
# Script to undertake analysis of Hattah Floodplains vegetation data using boosted generalized additive models
# Adapted by C James from sample code provided by Maloney et al. 2012 (Applying additive modelling, Methods in Ecology and Evolution vol 3, 116-128, Appendix E)
# 19th August 2016
#
# Load data and libraries
library(mboost)
library(MASS)
library(PerformanceAnalytics)
data.dir="C:/Users/jc246980/Documents/Documents (2)/Current projects/MD Vegetation/Hattah_data_csvs/"; setwd(data.dir)
data.matrix=read.csv("Final_Metrics_Hattah_FP.csv") # load data
image.dir="C:/Users/jc246980/Documents/Documents (2)/Current projects/MD Vegetation/Plots/";
# sort out environmental data - centre and logarithmize
envdata=data.matrix[,c("d30", "d90", "d180", "d365", "Inundated","Flood_frequency","TSLF", "Easting", "Northing","H_index")]
envdata_backup=envdata
rownames(envdata)=(data.matrix$Row.names)
# Note: rainfall variables are (predictably) highly correlated so I am dropping the most correlated (d90 and d180) and am left with d30 and d365 which still have correlation coeff of 0.88!
# highly skewed distributions were log10 transformed before analysis
envdata$d30=as.numeric(scale(log10(envdata$d30+1),center=TRUE, scale=FALSE)) # added one to avoid return of infinity values due to low mean rainfall over shorter time periods
envdata$d90=as.numeric(scale(log10(envdata$d90+1),center=TRUE, scale=FALSE))
envdata$d180=as.numeric(scale(log10(envdata$d180+1),center=TRUE, scale=FALSE))
envdata$d365=as.numeric(scale(log10(envdata$d365+1),center=TRUE, scale=FALSE))
envdata$Flood_frequency=as.numeric(scale(envdata$Flood_frequency,center=TRUE, scale=FALSE))
envdata$TSLF=as.numeric(scale(log10(envdata$TSLF+1),center=TRUE, scale=FALSE))
envdata$Easting=as.numeric(scale(envdata$Easting^2,center=FALSE, scale=TRUE))
envdata$Northing=as.numeric(scale(envdata$Northing^2,center=FALSE, scale=TRUE))
envdata$INT <- rep(1,nrow(envdata)) # provide intercept variable
####################################################DIVERSITY analysis
n<- nrow(daten)
set.seed(806)
indvecL <-sample(1:n,n,replace=FALSE) # create a random set of numbers
datenSmall <- daten[indvecL,][1:(n/2),] # create a subset of half the original data using the random numbers
#specify model formula - kept relatively simple of the time being (bols are linear effects, bbs are smoothed effects and bspatial are spatial effects)
formulaB <- H_index ~ bols(Easting, intercept=FALSE)+bols(Northing, intercept=FALSE)+brandom(Site.ID,df=1)+
bspatial(Easting, Northing, knots=20, center=TRUE, df=1, differences=1)+
bols(d30, intercept=FALSE)+bbs(d30, center=TRUE, df=1)+
bols(d365, intercept=FALSE)+bbs(d365, center=TRUE, df=1)+
bols(Flood_frequency, intercept=FALSE)+bbs(Flood_frequency, center=TRUE, df=1)+
bols(TSLF, intercept=FALSE)+bbs(TSLF, center=TRUE, df=1)+
bols(Inundated.y, intercept=FALSE)
predicted<-list() # creates a whole bunch of empty lists
predicted.insample <-list()
nuvec<-list()
null.model.coef<-list()
null.model.nu<-list()
# specify training set
set.seed(1)
n<-nrow(datenSmall)
indvec<-sample(1:n,n,replace=TRUE)
traindata<-datenSmall[indvec,]
traindatav2=traindata[,-1] # remove site names from data frame
# Run Full model for diversity
Full.model <-gamboost(formulaB,data=daten, family=Gaussian(), control=boost_control(mstop=1000,trace=TRUE)) # originally 10000 but reduced down the mstop value for trial runs as it takes a while
# This churns out a couple of warnings that I don't fully understand regarding the linear effects - covariates should be (mean-) centered if intercept =False for Easting, Northing and d30
mopt <- mstop(aic <- AIC(Full.model)) # also suggests that mstop is 10000 during initial run
# Carry out 5 fold cross validation to determine optimal stopping iteration - this seems to still be 10000 - increase cross validation for proper runs?
cv5f <- cv(model.weights(Full.model), type='kfold', B=5)
cvm <- cvrisk(Full.model, folds=cv5f)
#plot(cvm)
st<-(mstop(cvm))
Full.model[st]
coef(Full.model)
# create new data frame with residuals for plotting and also for use in t+1
newdat <- cbind(traindatav2$Site.ID, as.data.frame(residuals(Full.model))) # extract residuals
newdat=cbind(newdat, traindata$Row.names)
colnames(newdat)=c("Site.ID", "resid", "Site.year")
substrRight <- function(x, n){ # script to grab year off row names
substr(x, nchar(x)-n+1, nchar(x))
}
Site.year=newdat$Site.year
year=sapply(Site.year, function (x) substrRight(x, 2))
year=as.data.frame(year)
newdat=cbind(newdat, year)
newdat$year=as.numeric(newdat$year)
sitelist=unique(newdat$Site.ID)
newdat$newresid=NA # creates new column into which the residuals from the last time period will be added
for(s in sitelist) { # loop through each site - because some sites don't have year 4 I have had to create two sets of rules
yoi=unique(newdat[which(newdat$Site.ID==s),c("year")])
if("4" %in% yoi){
newdat[which(newdat$Site.ID==s & newdat$year==1),c("newresid")] = 0.1
roi_t2=newdat[which(newdat$Site.ID==s & newdat$year==1),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==2),c("newresid")] = roi_t2
roi_t3=newdat[which(newdat$Site.ID==s & newdat$year==2),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==3),c("newresid")] = roi_t3
roi_t4=newdat[which(newdat$Site.ID==s & newdat$year==3),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==4),c("newresid")] = roi_t4
roi_t5=newdat[which(newdat$Site.ID==s & newdat$year==4),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==5),c("newresid")] = roi_t5
roi_t6=newdat[which(newdat$Site.ID==s & newdat$year==5),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==6),c("newresid")] = roi_t6
roi_t7=newdat[which(newdat$Site.ID==s & newdat$year==6),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==7),c("newresid")] = roi_t7
roi_t8=newdat[which(newdat$Site.ID==s & newdat$year==7),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==8),c("newresid")] = roi_t8}
if(!"4" %in% yoi){
newdat[which(newdat$Site.ID==s & newdat$year==1),c("newresid")] = 0.1
roi_t2=newdat[which(newdat$Site.ID==s & newdat$year==1),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==2),c("newresid")] = roi_t2
roi_t3=newdat[which(newdat$Site.ID==s & newdat$year==2),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==3),c("newresid")] = roi_t3
roi_t5=newdat[which(newdat$Site.ID==s & newdat$year==3),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==5),c("newresid")] = roi_t5
roi_t6=newdat[which(newdat$Site.ID==s & newdat$year==5),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==6),c("newresid")] = roi_t6
roi_t7=newdat[which(newdat$Site.ID==s & newdat$year==6),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==7),c("newresid")] = roi_t7
roi_t8=newdat[which(newdat$Site.ID==s & newdat$year==7),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==8),c("newresid")] = roi_t8}
}
newdat$Row.names=rownames(newdat)
daten_resid=merge(daten,newdat[,c("newresid", "Row.names")],by="Row.names") # merge data with new residuals to create the data dataframe with the residuals added as a predictor
daten_resid$newresid=as.numeric(scale(daten_resid$newresid, scale=TRUE))
# Example of marginal functional estimates of boosted additive models for flood frequency, time since last flood and rainfall in 90 days prior to sampling
# rem that data has been centred so when looking at the plots its helpful to 'uncentre' the data
mFf<-mean(envdata_backup$Flood_frequency)
xmatLin <- extract(Full.model, which=8)
xmatSmooth <- extract(Full.model, which=9)
par(mfrow = c(3,2))
# the below line had to adapted from the paper to correct for the type of speech marks, the order or the components and the spacing around the equals signs: see coef(Full.model)
yvalues=xmatSmooth[[1]]%*%coef(Full.model)$`bbs(Flood_frequency, df = 1, center = TRUE)` + xmatLin[[1]]*coef(Full.model)$`bols(Flood_frequency, intercept = FALSE)`
plot(sort(traindata$Flood_frequency+mFf),yvalues[order(traindata$Flood_frequency+mFf)], type="l",xlab='Flood frequency', ylab='f(Flood frequency)')
rug(sort(traindata$Flood_frequency+mFf))
# plot using Time Since Last Flood (TSLF)
mTSLF<-mean(log10(envdata_backup$TSLF+1))
xmatSmooth <- extract(Full.model, which=11)
# the below line had to adapted from the paper to correct for the type of speech marks, the order and the spacing around the equals signs: see coef(Full.model)
yvalues=xmatSmooth[[1]]%*%coef(Full.model)$`bbs(TSLF, df = 1, center = TRUE)`
plot(sort(traindata$TSLF+mTSLF),yvalues[order(traindata$TSLF+mTSLF)], type="l",xlab='TSLF', ylab='f(log(TSLF+1))')
rug(sort(traindata$TSLF+mTSLF))
# plot using d365(accumulated rainfall in 90 days prior to sampling)
md365<-mean(log10(envdata_backup$d365+1))
xmatSmooth <- extract(Full.model, which=7)
# the below line had to adapted from the paper to correct for the type of speech marks, the order and the spacing around the equals signs: see coef(Full.model)
yvalues=xmatSmooth[[1]]%*%coef(Full.model)$`bbs(d365, df = 1, center = TRUE)`
plot(sort(traindata$d365+md365),yvalues[order(traindata$d365+md365)], type="l",xlab='d365', ylab='f(log(d365+1))')
rug(sort(traindata$d365+md365))
# Predictions for out-of-bootstrap data
testdata <- datenSmall[-indvec,]
predictions<-predict(Full.model,newdata=testdata)
plot(exp(predictions),testdata$H_index)
abline(1,0)
# Compute pseudo r^2
m1 <-glm(H_index~1,data=traindata, family=gaussian)
null.model.coef <- coef(m1)
y<-testdata$H_indexlamba<-exp(predictions)
lambda<-exp(predictions)
L1<-y*log(lambda)-lgamma(y+1)-lambda
c0<-exp(null.model.coef)
L0<-y*log(c0)-lgamma(y+1)-c0
n<-length(y)
r2<-(1-exp(-2/n*(sum(L1)-sum(L0))))/(1-exp(sum(L0))^{2/n})
# Compute out-of-bootstrap r^2 - NOT SURE I have done this correctly - I want to measure the predictive accuracy but I am not sure what I am doing here!
m1 <-glm(H_index~1,data=testdata, family=gaussian)
null.model.coef <- coef(m1)
y<-testdata$H_indexlamba<-exp(predictions)
lambda<-exp(predictions)
L1<-y*log(lambda)-lgamma(y+1)-lambda
c0<-exp(null.model.coef)
L0<-y*log(c0)-lgamma(y+1)-c0
n<-length(y)
r2<-(1-exp(-2/n*(sum(L1)-sum(L0))))/(1-exp(sum(L0))^{2/n})
####################################################RICHNESS analysis
n<- nrow(daten.rich)
set.seed(806)
indvecL <-sample(1:n,n,replace=FALSE) # create a random set of numbers
datenSmall.rich <- daten.rich[indvecL,][1:(n/2),] # create a subset of half the original data using the random numbers
datenSmall.rich$Inundated.y=as.factor(datenSmall.rich$Inundated.y)
#specify model formula - kept relatively simple of the time being (bols are linear effects, bbs are smoothed effects and bspatial are spatial effects)
formulaB <- Richness ~ bols(Easting, intercept=FALSE)+bols(Northing, intercept=FALSE)+
bspatial(Easting, Northing, knots=20, center=TRUE, df=1, differences=1)+
bols(d30, intercept=FALSE)+bbs(d30, center=TRUE, df=1)+
bols(d365, intercept=FALSE)+bbs(d365, center=TRUE, df=1)+
bols(Flood_frequency, intercept=FALSE)+bbs(Flood_frequency, center=TRUE, df=1)+
bols(TSLF, intercept=FALSE)+bbs(TSLF, center=TRUE, df=1)+
bols(Inundated.y, intercept=FALSE)
predicted<-list() # creates a whole bunch of empty lists
predicted.insample <-list()
nuvec<-list()
null.model.coef<-list()
null.model.nu<-list()
# specify training set
set.seed(1)
n<-nrow(datenSmall.rich)
indvec<-sample(1:n,n,replace=TRUE)
traindata.rich<-datenSmall.rich[indvec,]
traindatav2.rich=traindata.rich[,-1] # remove site names from training data set
# Run model - FAILS
Full.model.rich <-gamboost(formulaB,data=traindatav2.rich, family=Poisson(), control=boost_control(mstop=2000,trace=TRUE)) #
# This model FAILS with an error around singularity which I think means that there are responses that are NOT unique combinations of the predictors - this did not happen for the diversity index
# because this index takes into count abundance as well as p/a
mopt <- mstop(aic <- AIC(Full.model.rich))
#########################Further plotting of model and partial effects
# Plot partial effects all together
# plot just smoothed effects
par(mfrow = c(3,2))
plot(Full.model, which = "bbs")
# plot just linear effects
par(mfrow = c(3,3))
plot(Full.model, which = "bols")
# Plot spatial effects
plot(Full.model, which = "bspatial")
|
/Development/mboost.script.r
|
no_license
|
CassieJames/EWKR-scripts
|
R
| false | false | 12,289 |
r
|
# Script to undertake analysis of Hattah Floodplains vegetation data using boosted generalized additive models
# Adapted by C James from sample code provided by Maloney et al. 2012 (Applying additive modelling, Methods in Ecology and Evolution vol 3, 116-128, Appendix E)
# 19th August 2016
#
# Load data and libraries
library(mboost)
library(MASS)
library(PerformanceAnalytics)
data.dir="C:/Users/jc246980/Documents/Documents (2)/Current projects/MD Vegetation/Hattah_data_csvs/"; setwd(data.dir)
data.matrix=read.csv("Final_Metrics_Hattah_FP.csv") # load data
image.dir="C:/Users/jc246980/Documents/Documents (2)/Current projects/MD Vegetation/Plots/";
# sort out environmental data - centre and logarithmize
envdata=data.matrix[,c("d30", "d90", "d180", "d365", "Inundated","Flood_frequency","TSLF", "Easting", "Northing","H_index")]
envdata_backup=envdata
rownames(envdata)=(data.matrix$Row.names)
# Note: rainfall variables are (predictably) highly correlated so I am dropping the most correlated (d90 and d180) and am left with d30 and d365 which still have correlation coeff of 0.88!
# highly skewed distributions were log10 transformed before analysis
envdata$d30=as.numeric(scale(log10(envdata$d30+1),center=TRUE, scale=FALSE)) # added one to avoid return of infinity values due to low mean rainfall over shorter time periods
envdata$d90=as.numeric(scale(log10(envdata$d90+1),center=TRUE, scale=FALSE))
envdata$d180=as.numeric(scale(log10(envdata$d180+1),center=TRUE, scale=FALSE))
envdata$d365=as.numeric(scale(log10(envdata$d365+1),center=TRUE, scale=FALSE))
envdata$Flood_frequency=as.numeric(scale(envdata$Flood_frequency,center=TRUE, scale=FALSE))
envdata$TSLF=as.numeric(scale(log10(envdata$TSLF+1),center=TRUE, scale=FALSE))
envdata$Easting=as.numeric(scale(envdata$Easting^2,center=FALSE, scale=TRUE))
envdata$Northing=as.numeric(scale(envdata$Northing^2,center=FALSE, scale=TRUE))
envdata$INT <- rep(1,nrow(envdata)) # provide intercept variable
####################################################DIVERSITY analysis
n<- nrow(daten)
set.seed(806)
indvecL <-sample(1:n,n,replace=FALSE) # create a random set of numbers
datenSmall <- daten[indvecL,][1:(n/2),] # create a subset of half the original data using the random numbers
#specify model formula - kept relatively simple of the time being (bols are linear effects, bbs are smoothed effects and bspatial are spatial effects)
formulaB <- H_index ~ bols(Easting, intercept=FALSE)+bols(Northing, intercept=FALSE)+brandom(Site.ID,df=1)+
bspatial(Easting, Northing, knots=20, center=TRUE, df=1, differences=1)+
bols(d30, intercept=FALSE)+bbs(d30, center=TRUE, df=1)+
bols(d365, intercept=FALSE)+bbs(d365, center=TRUE, df=1)+
bols(Flood_frequency, intercept=FALSE)+bbs(Flood_frequency, center=TRUE, df=1)+
bols(TSLF, intercept=FALSE)+bbs(TSLF, center=TRUE, df=1)+
bols(Inundated.y, intercept=FALSE)
predicted<-list() # creates a whole bunch of empty lists
predicted.insample <-list()
nuvec<-list()
null.model.coef<-list()
null.model.nu<-list()
# specify training set
set.seed(1)
n<-nrow(datenSmall)
indvec<-sample(1:n,n,replace=TRUE)
traindata<-datenSmall[indvec,]
traindatav2=traindata[,-1] # remove site names from data frame
# Run Full model for diversity
Full.model <-gamboost(formulaB,data=daten, family=Gaussian(), control=boost_control(mstop=1000,trace=TRUE)) # originally 10000 but reduced down the mstop value for trial runs as it takes a while
# This churns out a couple of warnings that I don't fully understand regarding the linear effects - covariates should be (mean-) centered if intercept =False for Easting, Northing and d30
mopt <- mstop(aic <- AIC(Full.model)) # also suggests that mstop is 10000 during initial run
# Carry out 5 fold cross validation to determine optimal stopping iteration - this seems to still be 10000 - increase cross validation for proper runs?
cv5f <- cv(model.weights(Full.model), type='kfold', B=5)
cvm <- cvrisk(Full.model, folds=cv5f)
#plot(cvm)
st<-(mstop(cvm))
Full.model[st]
coef(Full.model)
# create new data frame with residuals for plotting and also for use in t+1
newdat <- cbind(traindatav2$Site.ID, as.data.frame(residuals(Full.model))) # extract residuals
newdat=cbind(newdat, traindata$Row.names)
colnames(newdat)=c("Site.ID", "resid", "Site.year")
substrRight <- function(x, n){ # script to grab year off row names
substr(x, nchar(x)-n+1, nchar(x))
}
Site.year=newdat$Site.year
year=sapply(Site.year, function (x) substrRight(x, 2))
year=as.data.frame(year)
newdat=cbind(newdat, year)
newdat$year=as.numeric(newdat$year)
sitelist=unique(newdat$Site.ID)
newdat$newresid=NA # creates new column into which the residuals from the last time period will be added
for(s in sitelist) { # loop through each site - because some sites don't have year 4 I have had to create two sets of rules
yoi=unique(newdat[which(newdat$Site.ID==s),c("year")])
if("4" %in% yoi){
newdat[which(newdat$Site.ID==s & newdat$year==1),c("newresid")] = 0.1
roi_t2=newdat[which(newdat$Site.ID==s & newdat$year==1),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==2),c("newresid")] = roi_t2
roi_t3=newdat[which(newdat$Site.ID==s & newdat$year==2),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==3),c("newresid")] = roi_t3
roi_t4=newdat[which(newdat$Site.ID==s & newdat$year==3),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==4),c("newresid")] = roi_t4
roi_t5=newdat[which(newdat$Site.ID==s & newdat$year==4),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==5),c("newresid")] = roi_t5
roi_t6=newdat[which(newdat$Site.ID==s & newdat$year==5),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==6),c("newresid")] = roi_t6
roi_t7=newdat[which(newdat$Site.ID==s & newdat$year==6),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==7),c("newresid")] = roi_t7
roi_t8=newdat[which(newdat$Site.ID==s & newdat$year==7),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==8),c("newresid")] = roi_t8}
if(!"4" %in% yoi){
newdat[which(newdat$Site.ID==s & newdat$year==1),c("newresid")] = 0.1
roi_t2=newdat[which(newdat$Site.ID==s & newdat$year==1),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==2),c("newresid")] = roi_t2
roi_t3=newdat[which(newdat$Site.ID==s & newdat$year==2),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==3),c("newresid")] = roi_t3
roi_t5=newdat[which(newdat$Site.ID==s & newdat$year==3),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==5),c("newresid")] = roi_t5
roi_t6=newdat[which(newdat$Site.ID==s & newdat$year==5),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==6),c("newresid")] = roi_t6
roi_t7=newdat[which(newdat$Site.ID==s & newdat$year==6),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==7),c("newresid")] = roi_t7
roi_t8=newdat[which(newdat$Site.ID==s & newdat$year==7),c("resid")]
newdat[which(newdat$Site.ID==s & newdat$year==8),c("newresid")] = roi_t8}
}
newdat$Row.names=rownames(newdat)
daten_resid=merge(daten,newdat[,c("newresid", "Row.names")],by="Row.names") # merge data with new residuals to create the data dataframe with the residuals added as a predictor
daten_resid$newresid=as.numeric(scale(daten_resid$newresid, scale=TRUE))
# Example of marginal functional estimates of boosted additive models for flood frequency, time since last flood and rainfall in 90 days prior to sampling
# rem that data has been centred so when looking at the plots its helpful to 'uncentre' the data
mFf<-mean(envdata_backup$Flood_frequency)
xmatLin <- extract(Full.model, which=8)
xmatSmooth <- extract(Full.model, which=9)
par(mfrow = c(3,2))
# the below line had to adapted from the paper to correct for the type of speech marks, the order or the components and the spacing around the equals signs: see coef(Full.model)
yvalues=xmatSmooth[[1]]%*%coef(Full.model)$`bbs(Flood_frequency, df = 1, center = TRUE)` + xmatLin[[1]]*coef(Full.model)$`bols(Flood_frequency, intercept = FALSE)`
plot(sort(traindata$Flood_frequency+mFf),yvalues[order(traindata$Flood_frequency+mFf)], type="l",xlab='Flood frequency', ylab='f(Flood frequency)')
rug(sort(traindata$Flood_frequency+mFf))
# plot using Time Since Last Flood (TSLF)
mTSLF<-mean(log10(envdata_backup$TSLF+1))
xmatSmooth <- extract(Full.model, which=11)
# the below line had to adapted from the paper to correct for the type of speech marks, the order and the spacing around the equals signs: see coef(Full.model)
yvalues=xmatSmooth[[1]]%*%coef(Full.model)$`bbs(TSLF, df = 1, center = TRUE)`
plot(sort(traindata$TSLF+mTSLF),yvalues[order(traindata$TSLF+mTSLF)], type="l",xlab='TSLF', ylab='f(log(TSLF+1))')
rug(sort(traindata$TSLF+mTSLF))
# plot using d365(accumulated rainfall in 90 days prior to sampling)
md365<-mean(log10(envdata_backup$d365+1))
xmatSmooth <- extract(Full.model, which=7)
# the below line had to adapted from the paper to correct for the type of speech marks, the order and the spacing around the equals signs: see coef(Full.model)
yvalues=xmatSmooth[[1]]%*%coef(Full.model)$`bbs(d365, df = 1, center = TRUE)`
plot(sort(traindata$d365+md365),yvalues[order(traindata$d365+md365)], type="l",xlab='d365', ylab='f(log(d365+1))')
rug(sort(traindata$d365+md365))
# Predictions for out-of-bootstrap data
testdata <- datenSmall[-indvec,]
predictions<-predict(Full.model,newdata=testdata)
plot(exp(predictions),testdata$H_index)
abline(1,0)
# Compute pseudo r^2
m1 <-glm(H_index~1,data=traindata, family=gaussian)
null.model.coef <- coef(m1)
y<-testdata$H_indexlamba<-exp(predictions)
lambda<-exp(predictions)
L1<-y*log(lambda)-lgamma(y+1)-lambda
c0<-exp(null.model.coef)
L0<-y*log(c0)-lgamma(y+1)-c0
n<-length(y)
r2<-(1-exp(-2/n*(sum(L1)-sum(L0))))/(1-exp(sum(L0))^{2/n})
# Compute out-of-bootstrap r^2 - NOT SURE I have done this correctly - I want to measure the predictive accuracy but I am not sure what I am doing here!
m1 <-glm(H_index~1,data=testdata, family=gaussian)
null.model.coef <- coef(m1)
y<-testdata$H_indexlamba<-exp(predictions)
lambda<-exp(predictions)
L1<-y*log(lambda)-lgamma(y+1)-lambda
c0<-exp(null.model.coef)
L0<-y*log(c0)-lgamma(y+1)-c0
n<-length(y)
r2<-(1-exp(-2/n*(sum(L1)-sum(L0))))/(1-exp(sum(L0))^{2/n})
####################################################RICHNESS analysis
n<- nrow(daten.rich)
set.seed(806)
indvecL <-sample(1:n,n,replace=FALSE) # create a random set of numbers
datenSmall.rich <- daten.rich[indvecL,][1:(n/2),] # create a subset of half the original data using the random numbers
datenSmall.rich$Inundated.y=as.factor(datenSmall.rich$Inundated.y)
#specify model formula - kept relatively simple of the time being (bols are linear effects, bbs are smoothed effects and bspatial are spatial effects)
formulaB <- Richness ~ bols(Easting, intercept=FALSE)+bols(Northing, intercept=FALSE)+
bspatial(Easting, Northing, knots=20, center=TRUE, df=1, differences=1)+
bols(d30, intercept=FALSE)+bbs(d30, center=TRUE, df=1)+
bols(d365, intercept=FALSE)+bbs(d365, center=TRUE, df=1)+
bols(Flood_frequency, intercept=FALSE)+bbs(Flood_frequency, center=TRUE, df=1)+
bols(TSLF, intercept=FALSE)+bbs(TSLF, center=TRUE, df=1)+
bols(Inundated.y, intercept=FALSE)
predicted<-list() # creates a whole bunch of empty lists
predicted.insample <-list()
nuvec<-list()
null.model.coef<-list()
null.model.nu<-list()
# specify training set
set.seed(1)
n<-nrow(datenSmall.rich)
indvec<-sample(1:n,n,replace=TRUE)
traindata.rich<-datenSmall.rich[indvec,]
traindatav2.rich=traindata.rich[,-1] # remove site names from training data set
# Run model - FAILS
Full.model.rich <-gamboost(formulaB,data=traindatav2.rich, family=Poisson(), control=boost_control(mstop=2000,trace=TRUE)) #
# This model FAILS with an error around singularity which I think means that there are responses that are NOT unique combinations of the predictors - this did not happen for the diversity index
# because this index takes into count abundance as well as p/a
mopt <- mstop(aic <- AIC(Full.model.rich))
#########################Further plotting of model and partial effects
# Plot partial effects all together
# plot just smoothed effects
par(mfrow = c(3,2))
plot(Full.model, which = "bbs")
# plot just linear effects
par(mfrow = c(3,3))
plot(Full.model, which = "bols")
# Plot spatial effects
plot(Full.model, which = "bspatial")
|
# recreating the visualization at: https://www.cbsnews.com/news/amazon-hq2-cities-location-choices-new-second-headquarters/
#==============
# LOAD PACKAGES
#==============
library(rvest)
library(tidyverse)
library(stringr)
library(ggmap)
#=======
# SCRAPE
#=======
html.amzn_cities <- read_html("https://www.cbsnews.com/news/amazon-hq2-cities-location-choices-new-second-headquarters/")
df.amzn_cities <- html.amzn_cities %>%
html_nodes("table") %>%
.[[1]] %>%
html_table()
# inspect
head(df.amzn_cities)
#====================
# CHANGE COLUMN NAMES
#====================
# inspect col names
names(df.amzn_cities)
# assign new col names
names(df.amzn_cities) <- c("metro_area", "state", "population", "bachelors_degree_pct")
# inspect
head(df.amzn_cities)
# delete unneeded first row
df.amzn_cities <- df.amzn_cities[-1, ]
# inspect
head(df.amzn_cities)
#====================
# CHANGE COLUMN TYPES
#====================
str(df.amzn_cities)
# coerce population from chr to int by parsing as int
df.amzn_cities <- mutate(df.amzn_cities, population = parse_number(population))
# inspect
head(df.amzn_cities)
typeof(df.amzn_cities$population)
# coerce bachelors_degree_pct to double
df.amzn_cities <- mutate(df.amzn_cities, bachelors_degree_pct = parse_number(bachelors_degree_pct))
# inspect
head(df.amzn_cities)
typeof(df.amzn_cities$bachelors_degree_pct)
#====================
# EXTRACT CITY NAMES
#====================
# Extract first city name from metro area, e.g. Los Angeles-Long Beach-Anaheim ==> Los Angeles
# -get the first city before a hyphen; add city column
df.amzn_cities <- df.amzn_cities %>%
mutate(city = str_extract(metro_area, "^[^-]*")) # get the first city before a hyphen
# inspect
head(df.amzn_cities)
#========
# GEOCODE
#========
geocodes <- geocode(df.amzn_cities$city)
# inspect
head(geocodes)
# merge geocodes into amazn df
df.amzn_cities <- cbind(df.amzn_cities, geocodes)
# inspect
head(df.amzn_cities)
#====================
# REORDER COL NAMES
#====================
df.amzn_cities <- select(df.amzn_cities, city, state, metro_area, population, bachelors_degree_pct, lon, lat)
# inspect
head(df.amzn_cities)
# rename lon to long
df.amzn_cities <- rename(df.amzn_cities, long = lon)
# inspect
head(df.amzn_cities)
#============
# GET USA MAP
#============
map.states <- map_data("state")
# plot first iteration
ggplot() +
geom_polygon(data = map.states, aes(x = long, y = lat, group = group)) + # plots map of USA state outlines
geom_point(data = df.amzn_cities, aes(x = long, y = lat,
size = population,
color = bachelors_degree_pct))
# plot final version
ggplot() +
geom_polygon(data = map.states, aes(x = long, y = lat, group = group)) + # plots map of USA state outlines
geom_point(data = df.amzn_cities, aes(x = long, y = lat,
size = population,
color = bachelors_degree_pct * .01),
alpha = .5) +
geom_point(data = df.amzn_cities, aes(x = long, y = lat,
size = population,
color = bachelors_degree_pct * .01),
shape = 1) +
coord_map(projection = "albers", lat0 = 30, lat1 = 40, xlim = c(-121, -73), ylim = c(25, 51)) +
scale_color_gradient2(low = "red", mid = "yellow", high = "green", midpoint = .41,
labels = scales::percent_format()) +
scale_size_continuous(range = c(.9, 11), breaks = c(2000000, 10000000, 20000000),
labels = scales::comma_format()) +
guides(color = guide_legend(reverse = T,
override.aes = list(alpha = 1, size = 4))) +
labs(color = "Bachelor's Degree \nPercent",
size = "Total Population \n(metro area)",
title = "Possible cities for new Amazon Headquarters",
subtitle = "Based on population % percent of people with college degrees") +
theme(text = element_text(color = "#464646", family = "American Typewriter"),
panel.background = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
plot.title = element_text(size = 20),
plot.subtitle = element_text(size = 11),
legend.key = element_rect(fill = "white")
)
|
/possible-cities-amazon-new-headquarters.R
|
no_license
|
fiddlesleet/Maps
|
R
| false | false | 4,475 |
r
|
# recreating the visualization at: https://www.cbsnews.com/news/amazon-hq2-cities-location-choices-new-second-headquarters/
#==============
# LOAD PACKAGES
#==============
library(rvest)
library(tidyverse)
library(stringr)
library(ggmap)
#=======
# SCRAPE
#=======
html.amzn_cities <- read_html("https://www.cbsnews.com/news/amazon-hq2-cities-location-choices-new-second-headquarters/")
df.amzn_cities <- html.amzn_cities %>%
html_nodes("table") %>%
.[[1]] %>%
html_table()
# inspect
head(df.amzn_cities)
#====================
# CHANGE COLUMN NAMES
#====================
# inspect col names
names(df.amzn_cities)
# assign new col names
names(df.amzn_cities) <- c("metro_area", "state", "population", "bachelors_degree_pct")
# inspect
head(df.amzn_cities)
# delete unneeded first row
df.amzn_cities <- df.amzn_cities[-1, ]
# inspect
head(df.amzn_cities)
#====================
# CHANGE COLUMN TYPES
#====================
str(df.amzn_cities)
# coerce population from chr to int by parsing as int
df.amzn_cities <- mutate(df.amzn_cities, population = parse_number(population))
# inspect
head(df.amzn_cities)
typeof(df.amzn_cities$population)
# coerce bachelors_degree_pct to double
df.amzn_cities <- mutate(df.amzn_cities, bachelors_degree_pct = parse_number(bachelors_degree_pct))
# inspect
head(df.amzn_cities)
typeof(df.amzn_cities$bachelors_degree_pct)
#====================
# EXTRACT CITY NAMES
#====================
# Extract first city name from metro area, e.g. Los Angeles-Long Beach-Anaheim ==> Los Angeles
# -get the first city before a hyphen; add city column
df.amzn_cities <- df.amzn_cities %>%
mutate(city = str_extract(metro_area, "^[^-]*")) # get the first city before a hyphen
# inspect
head(df.amzn_cities)
#========
# GEOCODE
#========
geocodes <- geocode(df.amzn_cities$city)
# inspect
head(geocodes)
# merge geocodes into amazn df
df.amzn_cities <- cbind(df.amzn_cities, geocodes)
# inspect
head(df.amzn_cities)
#====================
# REORDER COL NAMES
#====================
df.amzn_cities <- select(df.amzn_cities, city, state, metro_area, population, bachelors_degree_pct, lon, lat)
# inspect
head(df.amzn_cities)
# rename lon to long
df.amzn_cities <- rename(df.amzn_cities, long = lon)
# inspect
head(df.amzn_cities)
#============
# GET USA MAP
#============
map.states <- map_data("state")
# plot first iteration
ggplot() +
geom_polygon(data = map.states, aes(x = long, y = lat, group = group)) + # plots map of USA state outlines
geom_point(data = df.amzn_cities, aes(x = long, y = lat,
size = population,
color = bachelors_degree_pct))
# plot final version
ggplot() +
geom_polygon(data = map.states, aes(x = long, y = lat, group = group)) + # plots map of USA state outlines
geom_point(data = df.amzn_cities, aes(x = long, y = lat,
size = population,
color = bachelors_degree_pct * .01),
alpha = .5) +
geom_point(data = df.amzn_cities, aes(x = long, y = lat,
size = population,
color = bachelors_degree_pct * .01),
shape = 1) +
coord_map(projection = "albers", lat0 = 30, lat1 = 40, xlim = c(-121, -73), ylim = c(25, 51)) +
scale_color_gradient2(low = "red", mid = "yellow", high = "green", midpoint = .41,
labels = scales::percent_format()) +
scale_size_continuous(range = c(.9, 11), breaks = c(2000000, 10000000, 20000000),
labels = scales::comma_format()) +
guides(color = guide_legend(reverse = T,
override.aes = list(alpha = 1, size = 4))) +
labs(color = "Bachelor's Degree \nPercent",
size = "Total Population \n(metro area)",
title = "Possible cities for new Amazon Headquarters",
subtitle = "Based on population % percent of people with college degrees") +
theme(text = element_text(color = "#464646", family = "American Typewriter"),
panel.background = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
plot.title = element_text(size = 20),
plot.subtitle = element_text(size = 11),
legend.key = element_rect(fill = "white")
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runClusterExperiment.R
\name{runClusterExperiment}
\alias{runClusterExperiment}
\title{Run the shiny app for clusterExperiment package}
\usage{
runClusterExperiment()
}
\description{
This function runs the shiny app that corresponds to the clusterExperiment
package.
}
\details{
Typing \code{runClusterExperiment()} at the command prompt (after
loading the \code{clusterExpShiny} library) will cause a browser to open
with the shiny app.
}
|
/man/runClusterExperiment.Rd
|
no_license
|
epurdom/clusterExperimentShiny
|
R
| false | true | 523 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runClusterExperiment.R
\name{runClusterExperiment}
\alias{runClusterExperiment}
\title{Run the shiny app for clusterExperiment package}
\usage{
runClusterExperiment()
}
\description{
This function runs the shiny app that corresponds to the clusterExperiment
package.
}
\details{
Typing \code{runClusterExperiment()} at the command prompt (after
loading the \code{clusterExpShiny} library) will cause a browser to open
with the shiny app.
}
|
# Define fibonacci(N) as the sequence using sum of last N numbers.
# Fibonacci(2) is the classic 1, 1, 2, 3, 5, 8...
# Write function to compute Nth fibonacci(3) number
# 1, 1, 1, 3, 5, 9, 17,...
fib3 <- function(n) {
vec=rep(1, n)
if(n > 3) {
for(i in 4:n) vec[i] = vec[i-1] + vec[i-2] + vec[i-3]
}
return(vec[n])
}
|
/fib3.R
|
no_license
|
CodyStumpo/InterviewCode
|
R
| false | false | 332 |
r
|
# Define fibonacci(N) as the sequence using sum of last N numbers.
# Fibonacci(2) is the classic 1, 1, 2, 3, 5, 8...
# Write function to compute Nth fibonacci(3) number
# 1, 1, 1, 3, 5, 9, 17,...
fib3 <- function(n) {
vec=rep(1, n)
if(n > 3) {
for(i in 4:n) vec[i] = vec[i-1] + vec[i-2] + vec[i-3]
}
return(vec[n])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gmxOnline.R
\name{scrapegmxHeadlines}
\alias{scrapegmxHeadlines}
\title{Scrape homepage of GMX}
\usage{
scrapegmxHeadlines(path)
}
\arguments{
\item{path}{Path where file with homepage in html format will be stored}
}
\description{
This function takes the headlines off of GMX and returns a dataFrame with two
columns: titles and URLs.
}
\author{
Simon Munzert, Pablo Barbera, Joshua Timm
}
|
/SCRAP/man/scrapegmxHeadlines.Rd
|
no_license
|
NetDem-USC/scraping
|
R
| false | true | 469 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gmxOnline.R
\name{scrapegmxHeadlines}
\alias{scrapegmxHeadlines}
\title{Scrape homepage of GMX}
\usage{
scrapegmxHeadlines(path)
}
\arguments{
\item{path}{Path where file with homepage in html format will be stored}
}
\description{
This function takes the headlines off of GMX and returns a dataFrame with two
columns: titles and URLs.
}
\author{
Simon Munzert, Pablo Barbera, Joshua Timm
}
|
library('quadprog')
data = read.csv('/Users/hutch/OneDrive - 중앙대학교/RinEcometrics/PS2/data_port_PS2.csv', header=T)
data = ts(data,start = c(1990,01),end = c(2018,09), frequency = 12)
head(data)
meanlist = NULL#in sample 자산별 평균을 담는 변수
phi1 = NULL
phi2 = NULL
phi3 = NULL
phi4 = NULL
phi5 = c(rep(1/5,5))
r1 = NULL
r2 = NULL
r3 = NULL
r4 = NULL
r5 = NULL
c = mean(data[,2:6])#제약식(2)의 c는 모든 자산의 global mean으로 설정
data[121,]
#함수 구현
for (i in 1:(dim(data)[1]-120)){#i=1,2,...,225까지 반복
#in sample, out of sample 정의
insample = data[(i:(i+120-1)),]#120개행의 in sample정의
outsample = data[(i+120),]#t+W번째 행인 out of sample정의
#분산공분산행렬 정의
covmatrix = cov(insample[,2:6])
#meanlist정의 - 자산별(열별) 평균값 구함
for (j in 2:dim(insample)[2]){
meanlist[j-1] = mean(insample[,j])}
#1번 전략(mean-variance 포트폴리오-숏셀가능)
Q1=solve.QP( Dmat=2*covmatrix, dvec=c(0,0,0,0,0),
Amat=cbind(meanlist,rep(1,5)), bvec=c(c,1) )
phi1 = cbind(phi1,Q1$solution)
r1[i] = Q1$solution%*%outsample[2:6]
#2번 전략(minimun variance 포트폴리오)
Q2=solve.QP( Dmat=2*covmatrix, dvec=c(0,0,0,0,0),
Amat=cbind(rep(1,5)), bvec=c(1) )
phi2 = cbind(phi2,Q2$solution)
r2[i] = Q2$solution%*%outsample[2:6]
#3번 전략(mean-variance 포트폴리오-숏셀불가)
Q3=solve.QP( Dmat=2*covmatrix, dvec=c(0,0,0,0,0),
Amat=cbind(meanlist,rep(1,5),diag(1,5)), bvec=c(c,1,rep(0,5)) )
phi3 = cbind(phi3, Q3$solution)
r3[i] = Q3$solution%*%outsample[2:6]
#4번 전략(minimun variance 포트폴리오-숏셀불가)
Q4= solve.QP( Dmat=2*covmatrix, dvec=c(0,0,0,0,0),
Amat=cbind(rep(1,5),diag(1,5)), bvec=c(1,rep(0,5)) )
phi4 = cbind(phi4,Q4$solution)
r4[i] = Q4$solution%*%outsample[2:6]
#5번 전략(균등비율 포트폴리오)
r5[i] = phi5%*%outsample[2:6] }
r = rbind(r1,r2,r3,r4,r5)
#shape 비율 구하는 함수
sharpe=NULL
for (i in 1:dim(r)[1]) {
m = mean(r[i,]); std = var(r[i,])^0.5; sharpe[i] = m/std }
sharpe
names(sharpe) = c(1,2,3,4,5)
match(min(sharpe), sharpe) #sharpe비율이 가장 작은 case
match(max(sharpe),sharpe) #sharpe비율이 가장 큰 case
barplot(sharpe, main='Shape ratio', col=c(5,0,0,4,0))
#Q1 120개 이용해서 추정하고, 121번째 값을 이용해서 표본외 수익률 계산하는 거면,
#전체 데이터가 345개인데, 롤링윈도우가 344번째까지만 실행되어야 하는가?
#Q2 phi'*mu = c 인 2번제약식에서 c값을 무얼로 두냐에 따라 수렴하는 경우와
#수렴하지 않는 경우가 있는데 c를 어떤 수로 두어야 하는가?
#t(A)%*%b = bvec 이므로
#Amat정의
t(cbind(meanlist,rep(1,5),rep(-1,5),diag(1,5)))
#I(10*10)과 [1]과 [-1]을 세로 결합
#bvec정의
constant=1
c(constant,1, -1,rep(0, 5)) # 0 0 0 0 0 0 0 0 0 0 1 -1
meanlist = c(1,1,1,1,1)
#완성 코드
# case1
2*covmatrix
cbind(meanlist,rep(1,5))
c(constant,1)
Q1=solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(meanlist,rep(1,5),rep(-1,5)),bvec=c(constant,1,-1))
sol1[i] = Q1$solution%*%x[2:6]
# case2
cbind(rep(1,5))
c(1)
Q2=solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(rep(1,5),rep(-1,5)),bvec=c(1,-1))
sol2[i] = Q2$solution%*%x[2:6]
# case3
cbind(meanlist,rep(1,5),diag(1,5))
c(constant,1,rep(0,5))
Q3=solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(meanlist,rep(1,5),rep(-1,5),diag(1,5)),bvec=c(constant,1,-1,rep(0,5)))
sol3[i] = Q3$solution%*%x[2:6]
# case4
cbind(rep(1,5),diag(1,5))
c(1,rep(0,5))
Q4= solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(rep(1,5),rep(-1,5),diag(1,5)),bvec=c(1,-1,rep(0,5)))
sol4[i] = Q4$solution%*%x[2:6]
# case5
phi = c(rep(1/5,5))
sol5[i] = phi%*%x[2:6]
#백업
QP1 = solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(matrix(c(1,1,1,1,1),5,1), meanlist), bvec=c(1,1) )
sol1[i] = QP$solution%*%x[2:6]
QP2 = solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=matrix(c(1,1,1,1,1),5,1), bvec=c(1) )
sol2[i] = QP2$solution%*%x[2:6]
mmat = NULL
stdmat = NULL
for (j in 1:dim(data)[1]){
mmat[j] = mean(data[j,2:6])
stdmat[j] = var(data[j,2:6])^0.5}
plot(y=mmat, x=stdmat, xlab='std.err',ylab='mean')
for (k in 1:5){
lines(phi1[k,],col=k)
}
lines(phi1[,2])
#std.err 구할 수 있나?
phi1[,2]
phi41 = ts(phi4[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi42 = ts(phi4[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi43 = ts(phi4[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi44 = ts(phi4[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi45 = ts(phi4[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(3,2))
plot(phi41, type='l', main='phi1 of Portfolio 4', xlab='', ylab='')
abline(h=0)
plot(phi42, type='l', main='phi2 of Portfolio 4', xlab='', ylab='')
abline(h=0)
plot(phi43, type='l', main='phi3 of Portfolio 4', xlab='', ylab='')
abline(h=0)
plot(phi44, type='l', main='phi4 of Portfolio 4', xlab='', ylab='')
abline(h=0)
plot(phi45, type='l', main='phi5 of Portfolio 4', xlab='', ylab='')
abline(h=0)
mr=NULL
sr=NULL
for (i in 1:5){
mr[i] = mean(r[i,])
sr[i] = var(r[i,])^0.5
}
totmat = t(rbind(mr,sr))
plot(totmat)
r1
plot(density(r1))
plot(density(phi1[1,]))
plot(density(phi1[2,]))
plot(density(phi2))
plot(density(phi3))
plot(density(phi4))
plot(density(phi5))
phi11 = ts(phi1[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi12 = ts(phi1[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi13 = ts(phi1[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi14 = ts(phi1[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi15 = ts(phi1[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(2,3))
plot(phi11, type='l', main='phi of Cnsmr - Portfolio 1', xlab='', ylab='')
abline(h=0)
plot(phi12, type='l', main='phi of Manuf - Portfolio 1', xlab='', ylab='')
abline(h=0)
plot(phi13, type='l', main='phi of HiTec - Portfolio 1', xlab='', ylab='')
abline(h=0)
plot(phi14, type='l', main='phi of Hlth - Portfolio 1', xlab='', ylab='')
abline(h=0)
plot(phi15, type='l', main='phi of Other - Portfolio 1', xlab='', ylab='')
abline(h=0)
phi21 = ts(phi2[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi22 = ts(phi2[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi23 = ts(phi2[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi24 = ts(phi2[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi25 = ts(phi2[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(2,3))
plot(phi21, type='l', main='phi of Cnsmr - Portfolio 2', xlab='', ylab='')
abline(h=0)
plot(phi22, type='l', main='phi of Manuf - Portfolio 2', xlab='', ylab='')
abline(h=0)
plot(phi23, type='l', main='phi of HiTec - Portfolio 2', xlab='', ylab='')
abline(h=0)
plot(phi24, type='l', main='phi of Hlth - Portfolio 2', xlab='', ylab='')
abline(h=0)
plot(phi25, type='l', main='phi of Other - Portfolio 2', xlab='', ylab='')
abline(h=0)
phi31 = ts(phi3[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi32 = ts(phi3[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi33 = ts(phi3[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi34 = ts(phi3[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi35 = ts(phi3[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(2,3))
plot(phi31, type='l', main='phi of Cnsmr - Portfolio 3', xlab='', ylab='')
plot(phi32, type='l', main='phi of Manuf - Portfolio 3', xlab='', ylab='')
plot(phi33, type='l', main='phi of HiTec - Portfolio 3', xlab='', ylab='')
plot(phi34, type='l', main='phi of Hlth - Portfolio 3', xlab='', ylab='')
plot(phi35, type='l', main='phi of Other - Portfolio 3', xlab='', ylab='')
phi41 = ts(phi4[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi42 = ts(phi4[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi43 = ts(phi4[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi44 = ts(phi4[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi45 = ts(phi4[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(2,3))
plot(phi41, type='l', main='phi of Cnsmr - Portfolio 4', xlab='', ylab='')
plot(phi42, type='l', main='phi of Manuf - Portfolio 4', xlab='', ylab='')
plot(phi43, type='l', main='phi of HiTec - Portfolio 4', xlab='', ylab='')
plot(phi44, type='l', main='phi of Hlth - Portfolio 4', xlab='', ylab='')
plot(phi45, type='l', main='phi of Other - Portfolio 4', xlab='', ylab='')
|
/Portfolio Optimization.R
|
no_license
|
hutch24/Econometrics_with_R
|
R
| false | false | 8,608 |
r
|
library('quadprog')
data = read.csv('/Users/hutch/OneDrive - 중앙대학교/RinEcometrics/PS2/data_port_PS2.csv', header=T)
data = ts(data,start = c(1990,01),end = c(2018,09), frequency = 12)
head(data)
meanlist = NULL#in sample 자산별 평균을 담는 변수
phi1 = NULL
phi2 = NULL
phi3 = NULL
phi4 = NULL
phi5 = c(rep(1/5,5))
r1 = NULL
r2 = NULL
r3 = NULL
r4 = NULL
r5 = NULL
c = mean(data[,2:6])#제약식(2)의 c는 모든 자산의 global mean으로 설정
data[121,]
#함수 구현
for (i in 1:(dim(data)[1]-120)){#i=1,2,...,225까지 반복
#in sample, out of sample 정의
insample = data[(i:(i+120-1)),]#120개행의 in sample정의
outsample = data[(i+120),]#t+W번째 행인 out of sample정의
#분산공분산행렬 정의
covmatrix = cov(insample[,2:6])
#meanlist정의 - 자산별(열별) 평균값 구함
for (j in 2:dim(insample)[2]){
meanlist[j-1] = mean(insample[,j])}
#1번 전략(mean-variance 포트폴리오-숏셀가능)
Q1=solve.QP( Dmat=2*covmatrix, dvec=c(0,0,0,0,0),
Amat=cbind(meanlist,rep(1,5)), bvec=c(c,1) )
phi1 = cbind(phi1,Q1$solution)
r1[i] = Q1$solution%*%outsample[2:6]
#2번 전략(minimun variance 포트폴리오)
Q2=solve.QP( Dmat=2*covmatrix, dvec=c(0,0,0,0,0),
Amat=cbind(rep(1,5)), bvec=c(1) )
phi2 = cbind(phi2,Q2$solution)
r2[i] = Q2$solution%*%outsample[2:6]
#3번 전략(mean-variance 포트폴리오-숏셀불가)
Q3=solve.QP( Dmat=2*covmatrix, dvec=c(0,0,0,0,0),
Amat=cbind(meanlist,rep(1,5),diag(1,5)), bvec=c(c,1,rep(0,5)) )
phi3 = cbind(phi3, Q3$solution)
r3[i] = Q3$solution%*%outsample[2:6]
#4번 전략(minimun variance 포트폴리오-숏셀불가)
Q4= solve.QP( Dmat=2*covmatrix, dvec=c(0,0,0,0,0),
Amat=cbind(rep(1,5),diag(1,5)), bvec=c(1,rep(0,5)) )
phi4 = cbind(phi4,Q4$solution)
r4[i] = Q4$solution%*%outsample[2:6]
#5번 전략(균등비율 포트폴리오)
r5[i] = phi5%*%outsample[2:6] }
r = rbind(r1,r2,r3,r4,r5)
#shape 비율 구하는 함수
sharpe=NULL
for (i in 1:dim(r)[1]) {
m = mean(r[i,]); std = var(r[i,])^0.5; sharpe[i] = m/std }
sharpe
names(sharpe) = c(1,2,3,4,5)
match(min(sharpe), sharpe) #sharpe비율이 가장 작은 case
match(max(sharpe),sharpe) #sharpe비율이 가장 큰 case
barplot(sharpe, main='Shape ratio', col=c(5,0,0,4,0))
#Q1 120개 이용해서 추정하고, 121번째 값을 이용해서 표본외 수익률 계산하는 거면,
#전체 데이터가 345개인데, 롤링윈도우가 344번째까지만 실행되어야 하는가?
#Q2 phi'*mu = c 인 2번제약식에서 c값을 무얼로 두냐에 따라 수렴하는 경우와
#수렴하지 않는 경우가 있는데 c를 어떤 수로 두어야 하는가?
#t(A)%*%b = bvec 이므로
#Amat정의
t(cbind(meanlist,rep(1,5),rep(-1,5),diag(1,5)))
#I(10*10)과 [1]과 [-1]을 세로 결합
#bvec정의
constant=1
c(constant,1, -1,rep(0, 5)) # 0 0 0 0 0 0 0 0 0 0 1 -1
meanlist = c(1,1,1,1,1)
#완성 코드
# case1
2*covmatrix
cbind(meanlist,rep(1,5))
c(constant,1)
Q1=solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(meanlist,rep(1,5),rep(-1,5)),bvec=c(constant,1,-1))
sol1[i] = Q1$solution%*%x[2:6]
# case2
cbind(rep(1,5))
c(1)
Q2=solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(rep(1,5),rep(-1,5)),bvec=c(1,-1))
sol2[i] = Q2$solution%*%x[2:6]
# case3
cbind(meanlist,rep(1,5),diag(1,5))
c(constant,1,rep(0,5))
Q3=solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(meanlist,rep(1,5),rep(-1,5),diag(1,5)),bvec=c(constant,1,-1,rep(0,5)))
sol3[i] = Q3$solution%*%x[2:6]
# case4
cbind(rep(1,5),diag(1,5))
c(1,rep(0,5))
Q4= solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(rep(1,5),rep(-1,5),diag(1,5)),bvec=c(1,-1,rep(0,5)))
sol4[i] = Q4$solution%*%x[2:6]
# case5
phi = c(rep(1/5,5))
sol5[i] = phi%*%x[2:6]
#백업
QP1 = solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=cbind(matrix(c(1,1,1,1,1),5,1), meanlist), bvec=c(1,1) )
sol1[i] = QP$solution%*%x[2:6]
QP2 = solve.QP(Dmat=2*covmatrix,dvec=c(0,0,0,0,0),Amat=matrix(c(1,1,1,1,1),5,1), bvec=c(1) )
sol2[i] = QP2$solution%*%x[2:6]
mmat = NULL
stdmat = NULL
for (j in 1:dim(data)[1]){
mmat[j] = mean(data[j,2:6])
stdmat[j] = var(data[j,2:6])^0.5}
plot(y=mmat, x=stdmat, xlab='std.err',ylab='mean')
for (k in 1:5){
lines(phi1[k,],col=k)
}
lines(phi1[,2])
#std.err 구할 수 있나?
phi1[,2]
phi41 = ts(phi4[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi42 = ts(phi4[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi43 = ts(phi4[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi44 = ts(phi4[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi45 = ts(phi4[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(3,2))
plot(phi41, type='l', main='phi1 of Portfolio 4', xlab='', ylab='')
abline(h=0)
plot(phi42, type='l', main='phi2 of Portfolio 4', xlab='', ylab='')
abline(h=0)
plot(phi43, type='l', main='phi3 of Portfolio 4', xlab='', ylab='')
abline(h=0)
plot(phi44, type='l', main='phi4 of Portfolio 4', xlab='', ylab='')
abline(h=0)
plot(phi45, type='l', main='phi5 of Portfolio 4', xlab='', ylab='')
abline(h=0)
mr=NULL
sr=NULL
for (i in 1:5){
mr[i] = mean(r[i,])
sr[i] = var(r[i,])^0.5
}
totmat = t(rbind(mr,sr))
plot(totmat)
r1
plot(density(r1))
plot(density(phi1[1,]))
plot(density(phi1[2,]))
plot(density(phi2))
plot(density(phi3))
plot(density(phi4))
plot(density(phi5))
phi11 = ts(phi1[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi12 = ts(phi1[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi13 = ts(phi1[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi14 = ts(phi1[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi15 = ts(phi1[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(2,3))
plot(phi11, type='l', main='phi of Cnsmr - Portfolio 1', xlab='', ylab='')
abline(h=0)
plot(phi12, type='l', main='phi of Manuf - Portfolio 1', xlab='', ylab='')
abline(h=0)
plot(phi13, type='l', main='phi of HiTec - Portfolio 1', xlab='', ylab='')
abline(h=0)
plot(phi14, type='l', main='phi of Hlth - Portfolio 1', xlab='', ylab='')
abline(h=0)
plot(phi15, type='l', main='phi of Other - Portfolio 1', xlab='', ylab='')
abline(h=0)
phi21 = ts(phi2[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi22 = ts(phi2[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi23 = ts(phi2[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi24 = ts(phi2[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi25 = ts(phi2[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(2,3))
plot(phi21, type='l', main='phi of Cnsmr - Portfolio 2', xlab='', ylab='')
abline(h=0)
plot(phi22, type='l', main='phi of Manuf - Portfolio 2', xlab='', ylab='')
abline(h=0)
plot(phi23, type='l', main='phi of HiTec - Portfolio 2', xlab='', ylab='')
abline(h=0)
plot(phi24, type='l', main='phi of Hlth - Portfolio 2', xlab='', ylab='')
abline(h=0)
plot(phi25, type='l', main='phi of Other - Portfolio 2', xlab='', ylab='')
abline(h=0)
phi31 = ts(phi3[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi32 = ts(phi3[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi33 = ts(phi3[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi34 = ts(phi3[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi35 = ts(phi3[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(2,3))
plot(phi31, type='l', main='phi of Cnsmr - Portfolio 3', xlab='', ylab='')
plot(phi32, type='l', main='phi of Manuf - Portfolio 3', xlab='', ylab='')
plot(phi33, type='l', main='phi of HiTec - Portfolio 3', xlab='', ylab='')
plot(phi34, type='l', main='phi of Hlth - Portfolio 3', xlab='', ylab='')
plot(phi35, type='l', main='phi of Other - Portfolio 3', xlab='', ylab='')
phi41 = ts(phi4[1,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi42 = ts(phi4[2,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi43 = ts(phi4[3,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi44 = ts(phi4[4,],start = c(2000,01),end = c(2018,09), frequency = 12)
phi45 = ts(phi4[5,],start = c(2000,01),end = c(2018,09), frequency = 12)
par(mfrow=c(2,3))
plot(phi41, type='l', main='phi of Cnsmr - Portfolio 4', xlab='', ylab='')
plot(phi42, type='l', main='phi of Manuf - Portfolio 4', xlab='', ylab='')
plot(phi43, type='l', main='phi of HiTec - Portfolio 4', xlab='', ylab='')
plot(phi44, type='l', main='phi of Hlth - Portfolio 4', xlab='', ylab='')
plot(phi45, type='l', main='phi of Other - Portfolio 4', xlab='', ylab='')
|
#' Reads a data file and creates a dataframe with dplyr.
#' @param filename input character vector
#' @return dplyr::tbl_df by reading the whole file
#' @import readr
#' @import dplyr
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' Returns a string with a file name made from a year like
#' "accident_1987.csv.bz2"
#' @param year input numeric
#' @return character vector
#' @examples
#' make_filename(1987)
make_filename <- function(year) {
year <- as.integer(year)
fname <- sprintf("accident_%d.csv.bz2", year)
system.file("extdata", fname, package = "fars")
}
#' The input is a vector of years and for each year
#' it reads a file and selects the columns Month and Year.
#' It modifies the variable dat and .
#' We will end up with repeated rows (one per observation)
#' of what happened in a given Mmonth and Year
#' @param years input numeric vector
#' @return tibble with columns for all the year files
#' @import dplyr
#' @export
#' @examples
#' fars_read_years(c(2014,2015))
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate(dat, year = year) %>%
dplyr::select(MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' It starts with having one row per observation
#' and columns for Month and years, what this
#' function does is to count how many observations
#' happen by Month and Year
#' @param years input numeric vector
#' @return tibble
#' @import dplyr
#' @import tidyr
#' @import magrittr
#' @export
#' @examples
#' fars_summarize_years(c(2014,2015))
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(year, MONTH) %>%
dplyr::summarize(n = n()) %>%
tidyr::spread(year, n)
}
#' The final function produces a picture plotting
#' the observations (accidents) that happen in a state
#' in a given year.
#' It takes only the observation that belong in the state (using filter)
#'
#' @param state.num input character
#' @param year input numeric
#' @return plot of observations
#' @export
#' @import dplyr
#' @import maps
#' @examples
#' fars_map_state("21",2014)
#' fars_map_state("30",2015)
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter(data, STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
/R/fars_functions.R
|
no_license
|
leofranco/fars
|
R
| false | false | 3,552 |
r
|
#' Reads a data file and creates a dataframe with dplyr.
#' @param filename input character vector
#' @return dplyr::tbl_df by reading the whole file
#' @import readr
#' @import dplyr
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' Returns a string with a file name made from a year like
#' "accident_1987.csv.bz2"
#' @param year input numeric
#' @return character vector
#' @examples
#' make_filename(1987)
make_filename <- function(year) {
year <- as.integer(year)
fname <- sprintf("accident_%d.csv.bz2", year)
system.file("extdata", fname, package = "fars")
}
#' The input is a vector of years and for each year
#' it reads a file and selects the columns Month and Year.
#' It modifies the variable dat and .
#' We will end up with repeated rows (one per observation)
#' of what happened in a given Mmonth and Year
#' @param years input numeric vector
#' @return tibble with columns for all the year files
#' @import dplyr
#' @export
#' @examples
#' fars_read_years(c(2014,2015))
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate(dat, year = year) %>%
dplyr::select(MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' It starts with having one row per observation
#' and columns for Month and years, what this
#' function does is to count how many observations
#' happen by Month and Year
#' @param years input numeric vector
#' @return tibble
#' @import dplyr
#' @import tidyr
#' @import magrittr
#' @export
#' @examples
#' fars_summarize_years(c(2014,2015))
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(year, MONTH) %>%
dplyr::summarize(n = n()) %>%
tidyr::spread(year, n)
}
#' The final function produces a picture plotting
#' the observations (accidents) that happen in a state
#' in a given year.
#' It takes only the observation that belong in the state (using filter)
#'
#' @param state.num input character
#' @param year input numeric
#' @return plot of observations
#' @export
#' @import dplyr
#' @import maps
#' @examples
#' fars_map_state("21",2014)
#' fars_map_state("30",2015)
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter(data, STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
library(shiny)
renderInputs <- function(prefix) {
wellPanel(
fluidRow(
column(6,
sliderInput(paste0(prefix, "_", "n_obs"),
"Number of observations (in Years):",
min = 0, max = 20, value = 10),
sliderInput(paste0(prefix, "_", "start_capital"),
"Initial capital invested :",
min =500000, max = 5000000,
value = 1000000, step = 500000,
pre = "$", sep = ","),
sliderInput(paste0(prefix, "_", "annual_mean_return"),
"Annual investment return (n %)", min =0.0,
max = 15.0, value = 5.0, step = 0.5),
sliderInput(paste0(prefix, "_", "annual_ret_std_dev"),
"Annual investment volatility (in %):",
min = 0.0, max = 25.0, value = 7.0,
step = 0.1)
),
column(6,
sliderInput(paste0(prefix, "_", "annual_inflation"),
"Annual inflation (in %):",
min = 0, max = 5, value = 2.5,
step =0.5),
# "Annual inflation volatility (in %):",
# min= 0.0, max = 5.0, value =1.5,
# step = 0.05),
sliderInput(paste0(prefix, "_", "monthly_withdrawals"), "Monthly capital
withdrawals:" , min = 0, max = 50000,
value = 25000,step = 10000,
pre = "$", sep = ","),
sliderInput(paste0(prefix, "_", "n_sim"), "Number of simulations:",
min = 0, max = 100, value = 20)
)),
p(actionButton(paste0(prefix, "_", "recalc"),"Re-run simulation", icon("random")
))
)}
# Define UI for application that plots random distributions
fluidPage(theme="simplex.min.css",
tags$style(type="text/css",
"label {font-size: 12px;}",
".recalculating {opacity: 1.0;}"
),
# Application title
tags$h2("Wealth Returns, Inflation and Withdrawals"),
p("Adapted from retirement_simulation app in Github's shiny-examples
depository"),
#p("An adaptation of the",
#tags$a(href="http://glimmer.rstudio.com/systematicin/retirement.withdrawal/",
#"retirement app")
# "from",
# tags$a(href="http://systematicinvestor.wordpress.com/ , "Systematic Investor"),
# "to demonstrate the use of Shiny's new grid options."),
#hr(),
fluidRow(
column(6, tags$h3("Scenario A")),
column(6, tags$h3("Scenario B"))
),
fluidRow(
column(6, renderInputs("a")),
column(6, renderInputs("b"))
),
fluidRow(
column(6,
plotOutput("a_distPlot", height = "600px")
),
column(6,
plotOutput("b_distPlot", height = "600px")
)))
|
/investmentUi.R
|
no_license
|
wenlarry/DevelopingDataProducts
|
R
| false | false | 3,703 |
r
|
library(shiny)
renderInputs <- function(prefix) {
wellPanel(
fluidRow(
column(6,
sliderInput(paste0(prefix, "_", "n_obs"),
"Number of observations (in Years):",
min = 0, max = 20, value = 10),
sliderInput(paste0(prefix, "_", "start_capital"),
"Initial capital invested :",
min =500000, max = 5000000,
value = 1000000, step = 500000,
pre = "$", sep = ","),
sliderInput(paste0(prefix, "_", "annual_mean_return"),
"Annual investment return (n %)", min =0.0,
max = 15.0, value = 5.0, step = 0.5),
sliderInput(paste0(prefix, "_", "annual_ret_std_dev"),
"Annual investment volatility (in %):",
min = 0.0, max = 25.0, value = 7.0,
step = 0.1)
),
column(6,
sliderInput(paste0(prefix, "_", "annual_inflation"),
"Annual inflation (in %):",
min = 0, max = 5, value = 2.5,
step =0.5),
# "Annual inflation volatility (in %):",
# min= 0.0, max = 5.0, value =1.5,
# step = 0.05),
sliderInput(paste0(prefix, "_", "monthly_withdrawals"), "Monthly capital
withdrawals:" , min = 0, max = 50000,
value = 25000,step = 10000,
pre = "$", sep = ","),
sliderInput(paste0(prefix, "_", "n_sim"), "Number of simulations:",
min = 0, max = 100, value = 20)
)),
p(actionButton(paste0(prefix, "_", "recalc"),"Re-run simulation", icon("random")
))
)}
# Define UI for application that plots random distributions
fluidPage(theme="simplex.min.css",
tags$style(type="text/css",
"label {font-size: 12px;}",
".recalculating {opacity: 1.0;}"
),
# Application title
tags$h2("Wealth Returns, Inflation and Withdrawals"),
p("Adapted from retirement_simulation app in Github's shiny-examples
depository"),
#p("An adaptation of the",
#tags$a(href="http://glimmer.rstudio.com/systematicin/retirement.withdrawal/",
#"retirement app")
# "from",
# tags$a(href="http://systematicinvestor.wordpress.com/ , "Systematic Investor"),
# "to demonstrate the use of Shiny's new grid options."),
#hr(),
fluidRow(
column(6, tags$h3("Scenario A")),
column(6, tags$h3("Scenario B"))
),
fluidRow(
column(6, renderInputs("a")),
column(6, renderInputs("b"))
),
fluidRow(
column(6,
plotOutput("a_distPlot", height = "600px")
),
column(6,
plotOutput("b_distPlot", height = "600px")
)))
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(reactor)
# Define UI for application that draws a histogram
ui <- navbarPage(title = 'Reactor Test',
tabPanel('Old Faithful',
# Application title
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
titlePanel("Old Faithful Geyser Data"),
plotOutput("distPlot")
)
)
),
tabPanel('Reactor', reactorUI('faithful'))
)
# Define server logic required to draw a histogram
server <- function(input, output) {
data <- reactive({ faithful })
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- data()[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
# add the reactor module
r <- reactorModule('faithful')
}
# Run the application
shinyApp(ui = ui, server = server)
|
/inst/example/app.R
|
no_license
|
joe-chelladurai/reactor
|
R
| false | false | 1,596 |
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(reactor)
# Define UI for application that draws a histogram
ui <- navbarPage(title = 'Reactor Test',
tabPanel('Old Faithful',
# Application title
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
titlePanel("Old Faithful Geyser Data"),
plotOutput("distPlot")
)
)
),
tabPanel('Reactor', reactorUI('faithful'))
)
# Define server logic required to draw a histogram
server <- function(input, output) {
data <- reactive({ faithful })
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- data()[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
# add the reactor module
r <- reactorModule('faithful')
}
# Run the application
shinyApp(ui = ui, server = server)
|
context("Namespace")
roc <- namespace_roclet()
test_that("export detects object name", {
out <- roc_proc_text(roc, "#' @export\na <- function(){}")
expect_equal(out, 'export(a)')
})
test_that("export parameter overrides default", {
out <- roc_proc_text(roc, "#' @export b\na <- function(){}")
expect_equal(out, 'export(b)')
})
test_that("export detects S4 class", {
out <- roc_proc_text(roc, "#' @export\nsetClass('a')")
expect_equal(out, 'exportClasses(a)')
})
test_that("exportClass overrides default class name", {
out <- roc_proc_text(roc, "#' @exportClass b\nsetClass('a')")
expect_equal(out, 'exportClasses(b)')
})
test_that("export detects method name", {
out <- roc_proc_text(roc, "
#' @export\n
setMethod('max', 'a', function(x, ...) x[1])")
expect_equal(out, 'exportMethods(max)')
})
test_that("exportMethod overrides default method name", {
out <- roc_proc_text(roc, "
#' @exportMethod c
setMethod('max', 'a', function(x, ...) x[1])")
expect_equal(out, 'exportMethods(c)')
})
test_that("other namespace tags produce correct output", {
out <- roc_proc_text(roc, "
#' @exportPattern test
#' @S3method test test
#' @import test
#' @importFrom test test
#' @importClassesFrom test test
#' @importMethodsFrom test test
NULL")
expect_equal(sort(out), sort(c("exportPattern(test)", "S3method(test,test)",
"import(test)", "importFrom(test,test)", "importClassesFrom(test,test)",
"importMethodsFrom(test,test)")))
})
|
/inst/tests/test-namespace.R
|
no_license
|
gthb/roxygen
|
R
| false | false | 1,511 |
r
|
context("Namespace")
roc <- namespace_roclet()
test_that("export detects object name", {
out <- roc_proc_text(roc, "#' @export\na <- function(){}")
expect_equal(out, 'export(a)')
})
test_that("export parameter overrides default", {
out <- roc_proc_text(roc, "#' @export b\na <- function(){}")
expect_equal(out, 'export(b)')
})
test_that("export detects S4 class", {
out <- roc_proc_text(roc, "#' @export\nsetClass('a')")
expect_equal(out, 'exportClasses(a)')
})
test_that("exportClass overrides default class name", {
out <- roc_proc_text(roc, "#' @exportClass b\nsetClass('a')")
expect_equal(out, 'exportClasses(b)')
})
test_that("export detects method name", {
out <- roc_proc_text(roc, "
#' @export\n
setMethod('max', 'a', function(x, ...) x[1])")
expect_equal(out, 'exportMethods(max)')
})
test_that("exportMethod overrides default method name", {
out <- roc_proc_text(roc, "
#' @exportMethod c
setMethod('max', 'a', function(x, ...) x[1])")
expect_equal(out, 'exportMethods(c)')
})
test_that("other namespace tags produce correct output", {
out <- roc_proc_text(roc, "
#' @exportPattern test
#' @S3method test test
#' @import test
#' @importFrom test test
#' @importClassesFrom test test
#' @importMethodsFrom test test
NULL")
expect_equal(sort(out), sort(c("exportPattern(test)", "S3method(test,test)",
"import(test)", "importFrom(test,test)", "importClassesFrom(test,test)",
"importMethodsFrom(test,test)")))
})
|
# Fill in the template below to create your help
# Do not edit the "^#.*####$" lines
# Code sections should have the name Item_1, Item_2, etc
# Name ####
# Tags ####
# Description ####
# Packages ####
# Item_1 ####
|
/inst/templates/snip_skeleton.R
|
no_license
|
bobbeltje/snippie
|
R
| false | false | 221 |
r
|
# Fill in the template below to create your help
# Do not edit the "^#.*####$" lines
# Code sections should have the name Item_1, Item_2, etc
# Name ####
# Tags ####
# Description ####
# Packages ####
# Item_1 ####
|
################################
#LOOP OVER "CLOSE" GROUPS:
################################
close.analysis_svm.pvalues<-function(unk.obs.vec, close.groups, within.aligned.close.list, var.tol, unk.maxlag.within, maxlag.between)
{
#im.mat<-NULL
pvalue.vec<-NULL
for(i in 1:length(close.grps))
{
#Now we have to check the classification results for the unknown aligned against EACH of the "close" groups
specific.grp<-close.grps[i]
print("*********************************************************")
print(paste("Iteration: ",i," Alignment with group: ", close.groups[i]))
print("*********************************************************")
specific.grp.idx<-which(close.grps==specific.grp)
tmp<-align.unknown.to.group(unk.obs.vec, specific.grp.idx, within.aligned.close.list, lagmax=unk.maxlag.within, printQ=FALSE, plotQ=FALSE)
unk.obs.mov<-tmp[[1]] #The unknown, aligned to one of the "close" groups
within.aligned.close.list.tmp<-tmp[[2]] #The within group aligned data
#Align between groups
within.between.aligned.list.tmp<-align.between.groups.with.unknown.in.a.group(unk.obs.mov, specific.grp.idx, within.aligned.close.list.tmp, lbl.close, maxlag.between, "longest", printQ=FALSE)
Xdat<-within.between.aligned.list.tmp[[2]] #Within and Between group aligned data
wb.aligned.unk.obs.vec<-within.between.aligned.list.tmp[[1]] #Within and Between group aligned unknown, wrt close group i
num.pts<-dim(Xdat)[2] #number of points in the profiles used, once alighed
Xdat<-scale(Xdat,center=TRUE,scale=FALSE)[,]
pca.model<-prcomp(Xdat,scale=FALSE)
#im.mat<-cbind(im.mat,summary(pca.model)$importance[3,1:50])
Mmax<-which(summary(pca.model)$importance[3,]>=var.tol)[1]
print("")
print(paste(100*var.tol,"% variance occurs at dimension: ", Mmax, sep=""))
print("Begin HOO-CV model dimension determination.")
#Find optimal discrimination dimension with HOO-CV
err.vec<-NULL
ind.mat<-NULL
for(j in 2:Mmax)
{
Z<-predict(pca.model)[,1:j]
ind.vec<-NULL
for(k in 1:nrow(Z))
{
Z.heldout<-t(as.matrix(Z[k,]))
lbl.heldout<-lbl.close[k]
Z.kept<-Z[-k,]
lbl.kept<-lbl.close[-k]
svm.model<-svm(Z.kept,lbl.kept,scale=FALSE,type="C-classification",kernel="linear",cost=0.1,fitted=TRUE,probability=TRUE)
pred<-predict(svm.model,Z.heldout)
#prob.vec<-attr(pred, "probabilities")[,]
ind.vec<-c(ind.vec,pred==lbl.heldout)
} #end for k
ind.mat<-cbind(ind.mat,ind.vec)
ccp<-(sum(ind.vec)/nrow(Z) )
err<-(1-ccp)*100
print(paste(j,err))
err.vec<-c(err.vec,err)
} #end for j
cv.err.mat<-cbind(2:Mmax,err.vec)
colnames(cv.err.mat)<-c("Dimension", "HOO-CV error")
print(cv.err.mat)
print("")
Mmin<-(which(err.vec==min(err.vec))+1)[1]
print(paste("Minimal error dimension is: ", Mmin,"D. Minimum HOO-CV error is: ",min(err.vec),sep=""))
#plot(2:Mmax,err.vec,typ="l")
#Predict unknown with chosen dimension:
Mpred<-Mmin
Z<-predict(pca.model)[,1:Mpred] #Grab PCA scores
#Project unknown into mimimal dimension space:
Apc<-pca.model$rotation[,1:Mpred]
Zunk.vec<-wb.aligned.unk.obs.vec%*%Apc
#Put the unknown into the "bag" with lbl.unk = specific.grp (ie the unk labeled as the group it is aligned on)
Zaug<-rbind(Z,Zunk.vec)
lblaug<-as.factor(c(as.character(lbl.close), as.character(specific.grp)))
#Determine nonconformity measures of each item in the bag, cf. Algorithmic Learning in a Random World and Tutorial on Conformal Prediction
noncf.vec<-NULL
for(kk in 1:nrow(Zaug))
{
Zaug.heldout<-t(as.matrix(Zaug[kk,]))
lblaug.heldout<-lblaug[kk]
Zaug.kept<-Zaug[-kk,]
lblaug.kept<-lblaug[-kk]
svm.model<-svm(Zaug.kept,lblaug.kept,scale=FALSE,type="C-classification",kernel="linear",cost=0.1,fitted=TRUE,probability=TRUE)
#pred<-predict(svm.model,Zaug.heldout)
grp.platt.scores<-attr(predict(svm.model, Zaug.heldout, probability=TRUE), "probabilities")[,]
lblaug.heldout.idx<-which(names(grp.platt.scores)==as.character(lblaug.heldout))
platt.pred.score<-grp.platt.scores[lblaug.heldout.idx]
noncf.vec<-c(noncf.vec,platt.pred.score)
} #end for kk
pvalue<-sum(noncf.vec>=noncf.vec[length(noncf.vec)])/length(noncf.vec)
print(paste("Assignment group",specific.grp,"p-value:",pvalue))
pvalue.vec<-c(pvalue.vec,pvalue)
} #end for i
print(cbind(as.character(close.groups),as.character(pvalue.vec)))
print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
}
|
/R/close.analysis_svm_pvalues.R
|
no_license
|
npetraco/profileslib
|
R
| false | false | 4,552 |
r
|
################################
#LOOP OVER "CLOSE" GROUPS:
################################
close.analysis_svm.pvalues<-function(unk.obs.vec, close.groups, within.aligned.close.list, var.tol, unk.maxlag.within, maxlag.between)
{
#im.mat<-NULL
pvalue.vec<-NULL
for(i in 1:length(close.grps))
{
#Now we have to check the classification results for the unknown aligned against EACH of the "close" groups
specific.grp<-close.grps[i]
print("*********************************************************")
print(paste("Iteration: ",i," Alignment with group: ", close.groups[i]))
print("*********************************************************")
specific.grp.idx<-which(close.grps==specific.grp)
tmp<-align.unknown.to.group(unk.obs.vec, specific.grp.idx, within.aligned.close.list, lagmax=unk.maxlag.within, printQ=FALSE, plotQ=FALSE)
unk.obs.mov<-tmp[[1]] #The unknown, aligned to one of the "close" groups
within.aligned.close.list.tmp<-tmp[[2]] #The within group aligned data
#Align between groups
within.between.aligned.list.tmp<-align.between.groups.with.unknown.in.a.group(unk.obs.mov, specific.grp.idx, within.aligned.close.list.tmp, lbl.close, maxlag.between, "longest", printQ=FALSE)
Xdat<-within.between.aligned.list.tmp[[2]] #Within and Between group aligned data
wb.aligned.unk.obs.vec<-within.between.aligned.list.tmp[[1]] #Within and Between group aligned unknown, wrt close group i
num.pts<-dim(Xdat)[2] #number of points in the profiles used, once alighed
Xdat<-scale(Xdat,center=TRUE,scale=FALSE)[,]
pca.model<-prcomp(Xdat,scale=FALSE)
#im.mat<-cbind(im.mat,summary(pca.model)$importance[3,1:50])
Mmax<-which(summary(pca.model)$importance[3,]>=var.tol)[1]
print("")
print(paste(100*var.tol,"% variance occurs at dimension: ", Mmax, sep=""))
print("Begin HOO-CV model dimension determination.")
#Find optimal discrimination dimension with HOO-CV
err.vec<-NULL
ind.mat<-NULL
for(j in 2:Mmax)
{
Z<-predict(pca.model)[,1:j]
ind.vec<-NULL
for(k in 1:nrow(Z))
{
Z.heldout<-t(as.matrix(Z[k,]))
lbl.heldout<-lbl.close[k]
Z.kept<-Z[-k,]
lbl.kept<-lbl.close[-k]
svm.model<-svm(Z.kept,lbl.kept,scale=FALSE,type="C-classification",kernel="linear",cost=0.1,fitted=TRUE,probability=TRUE)
pred<-predict(svm.model,Z.heldout)
#prob.vec<-attr(pred, "probabilities")[,]
ind.vec<-c(ind.vec,pred==lbl.heldout)
} #end for k
ind.mat<-cbind(ind.mat,ind.vec)
ccp<-(sum(ind.vec)/nrow(Z) )
err<-(1-ccp)*100
print(paste(j,err))
err.vec<-c(err.vec,err)
} #end for j
cv.err.mat<-cbind(2:Mmax,err.vec)
colnames(cv.err.mat)<-c("Dimension", "HOO-CV error")
print(cv.err.mat)
print("")
Mmin<-(which(err.vec==min(err.vec))+1)[1]
print(paste("Minimal error dimension is: ", Mmin,"D. Minimum HOO-CV error is: ",min(err.vec),sep=""))
#plot(2:Mmax,err.vec,typ="l")
#Predict unknown with chosen dimension:
Mpred<-Mmin
Z<-predict(pca.model)[,1:Mpred] #Grab PCA scores
#Project unknown into mimimal dimension space:
Apc<-pca.model$rotation[,1:Mpred]
Zunk.vec<-wb.aligned.unk.obs.vec%*%Apc
#Put the unknown into the "bag" with lbl.unk = specific.grp (ie the unk labeled as the group it is aligned on)
Zaug<-rbind(Z,Zunk.vec)
lblaug<-as.factor(c(as.character(lbl.close), as.character(specific.grp)))
#Determine nonconformity measures of each item in the bag, cf. Algorithmic Learning in a Random World and Tutorial on Conformal Prediction
noncf.vec<-NULL
for(kk in 1:nrow(Zaug))
{
Zaug.heldout<-t(as.matrix(Zaug[kk,]))
lblaug.heldout<-lblaug[kk]
Zaug.kept<-Zaug[-kk,]
lblaug.kept<-lblaug[-kk]
svm.model<-svm(Zaug.kept,lblaug.kept,scale=FALSE,type="C-classification",kernel="linear",cost=0.1,fitted=TRUE,probability=TRUE)
#pred<-predict(svm.model,Zaug.heldout)
grp.platt.scores<-attr(predict(svm.model, Zaug.heldout, probability=TRUE), "probabilities")[,]
lblaug.heldout.idx<-which(names(grp.platt.scores)==as.character(lblaug.heldout))
platt.pred.score<-grp.platt.scores[lblaug.heldout.idx]
noncf.vec<-c(noncf.vec,platt.pred.score)
} #end for kk
pvalue<-sum(noncf.vec>=noncf.vec[length(noncf.vec)])/length(noncf.vec)
print(paste("Assignment group",specific.grp,"p-value:",pvalue))
pvalue.vec<-c(pvalue.vec,pvalue)
} #end for i
print(cbind(as.character(close.groups),as.character(pvalue.vec)))
print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Sets the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## This function returns the inverse of a matrix if already found in cache or else it
## will calculate and set the inverse and return.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
naveens239/ProgrammingAssignment2
|
R
| false | false | 1,075 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Sets the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## This function returns the inverse of a matrix if already found in cache or else it
## will calculate and set the inverse and return.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
alphabet <- c( "Alpha", "Bravo", "Charlie" )
print( alphabet )
print( paste( "2nd Element: ", alphabet[2] ) )
print( paste( "Vector Length: ", length( alphabet ) ) )
alphabet[ 5 ] <- "Echo"
print( alphabet )
print( paste( "Vector Length Now: ", length( alphabet ) ) )
print( paste( "Is alphabet a Vector: ", is.vector( alphabet ) ) )
|
/SRC/MyRScripts/Multiple.R
|
no_license
|
djohnson67/RScripts
|
R
| false | false | 352 |
r
|
alphabet <- c( "Alpha", "Bravo", "Charlie" )
print( alphabet )
print( paste( "2nd Element: ", alphabet[2] ) )
print( paste( "Vector Length: ", length( alphabet ) ) )
alphabet[ 5 ] <- "Echo"
print( alphabet )
print( paste( "Vector Length Now: ", length( alphabet ) ) )
print( paste( "Is alphabet a Vector: ", is.vector( alphabet ) ) )
|
setwd("~/Desktop/2019_Summer/Iakoucheva_Lab/R_Codes/Cul3_Permutation/CUL3_MICE/data/DE_NULL_DISTRIBUTIONS")
library(tidyverse)
# Data Processing & Null Distribution Generation
deg_files <- list.files(pattern = "deg", full.names = T)
dep_files <- list.files(pattern = "dep", full.names = T)
test <- read.csv("./null_distribution_deg_PERMUTATION_1.csv")
nine_dge_name <- paste(str_split(
str_split(unique(test$FILE), pattern = "/", simplify = T)[,2],
pattern = "_",
simplify = T)[,1],str_split(
str_split(unique(test$FILE), pattern = "/", simplify = T)[,2],
pattern = "_",
simplify = T)[,2],
sep = "_")
test_1 <- read.csv("./null_distribution_dep_PERMUTATION_9.csv")
six_dpe_name <- paste(str_split(
str_split(unique(test_1$FILE), pattern = "/", simplify = T)[,2],
pattern = "_",
simplify = T)[,1],str_split(
str_split(unique(test_1$FILE), pattern = "/", simplify = T)[,2],
pattern = "_",
simplify = T)[,2],
sep = "_")
lists_names <- as.character(unique(test$GENELIST))
input_matrix <- expand_grid(nine_dge_name, lists_names)
null_dist_list <- lapply(seq(length(input_matrix$nine_dge_name)), function(x){
temp_result <- lapply(deg_files, function(fn){
temp_sheet <- read.csv(fn) %>%
filter(grepl(as.character(input_matrix[x,1]), FILE)) %>%
filter(GENELIST == as.character(input_matrix[x,2]))
}) %>%
bind_rows() %>%
arrange(OVERLAP)
}) %>%
set_names(nm = paste(input_matrix$nine_dge_name,input_matrix$lists_names, sep = "_"))
lists_names_dep <- as.character(unique(test_1$GENELIST))
input_matrix_dep <- expand_grid(six_dpe_name, lists_names_dep)
null_dist_list_dep <- lapply(seq(length(input_matrix_dep$six_dpe_name)), function(x){
temp_result <- lapply(dep_files, function(fn){
temp_sheet <- read.csv(fn) %>%
filter(grepl(as.character(input_matrix_dep[x,1]), FILE)) %>%
filter(GENELIST == as.character(input_matrix_dep[x,2]))
}) %>%
bind_rows() %>%
arrange(OVERLAP)
}) %>%
set_names(nm = paste(input_matrix_dep$six_dpe_name,input_matrix_dep$lists_names_dep, sep = "_"))
# P_val & FDR Calculation
setwd("~/Desktop/2019_Summer/Iakoucheva_Lab/R_Codes/Cul3_Permutation/Permutation_Result")
dge_lists <- list.files("./src/DGE", full.names = T)
dpe_lists <- list.files("./src/DPE", full.names = T)
P_val_deg <- lapply(seq(length(input_matrix$nine_dge_name)), function(x){
temp_dge <- read.csv(dge_lists[grepl(as.character(input_matrix[x,1]), dge_lists)]) %>%
filter(FDR <= 0.1) %>%
pull(Gene_name) %>%
toupper()
target_list_genes <- readxl::read_xlsx("./Input_files/ASDRelevantGeneListsFromLiterature.xlsx",
sheet = as.character(input_matrix[x,2])) %>%
pull(gene_symbol)
temp_overlap <- length(intersect(temp_dge, target_list_genes))
null_dist <- null_dist_list[[paste(as.character(input_matrix[x,1]),
as.character(input_matrix[x,2]),
sep = "_")]] %>%
pull(OVERLAP)
p_val <- (sum(null_dist > temp_overlap)+1)/(length(null_dist)+1)
data.frame("P_val" = c(p_val),
"Overlap" = c(temp_overlap))
}) %>%
bind_rows()
final_dge_permutation_result <- bind_cols(input_matrix, P_val_deg)
P_val_dep <- lapply(seq(length(input_matrix_dep$six_dpe_name)), function(x){
temp_dpe <- read.csv(dpe_lists[grepl(as.character(input_matrix_dep[x,1]), dpe_lists)]) %>%
filter(adj.P.Val <= 0.15) %>%
pull(GeneSymbol) %>%
toupper()
target_list_genes <- readxl::read_xlsx("./Input_files/ASDRelevantGeneListsFromLiterature.xlsx",
sheet = as.character(input_matrix_dep[x,2])) %>%
pull(gene_symbol)
temp_overlap <- length(intersect(temp_dpe, target_list_genes))
null_dist <- null_dist_list_dep[[paste(as.character(input_matrix_dep[x,1]),
as.character(input_matrix_dep[x,2]),
sep = "_")]] %>%
pull(OVERLAP)
p_val <- (sum(null_dist > temp_overlap)+1)/(length(null_dist)+1)
data.frame("P_val" = c(p_val),
"Overlap" = c(temp_overlap))
}) %>%
bind_rows()
final_dpe_permutation_result <- bind_cols(input_matrix_dep, P_val_dep)
final_dge_permutation_result <- lapply(unique(input_matrix$nine_dge_name), function(x){
temp_result <- final_dge_permutation_result %>%
filter(nine_dge_name == x) %>%
mutate(FDR_BH = p.adjust(P_val, method = "BH"),
FDR_bonferroni = p.adjust(P_val, method = "bonferroni"))
}) %>%
bind_rows()
final_dpe_permutation_result <- lapply(unique(input_matrix_dep$six_dpe_name), function(x){
temp_result <- final_dpe_permutation_result %>%
filter(six_dpe_name == x) %>%
mutate(FDR_BH = p.adjust(P_val, method = "BH"),
FDR_bonferroni = p.adjust(P_val, method = "bonferroni"))
}) %>%
bind_rows()
final_dge_permutation_result <- final_dge_permutation_result %>%
rename("nine_dge_name" = "DGE_List") %>%
mutate(Period = str_split(DGE_List, "_", simplify = T)[,1],
Region = str_split(DGE_List, "_", simplify = T)[,2])
final_dpe_permutation_result <- final_dpe_permutation_result %>%
rename("six_dpe_name" = "DPE_List") %>%
mutate(Period = str_split(DPE_List, "_", simplify = T)[,1],
Region = str_split(DPE_List, "_", simplify = T)[,2])
writexl::write_xlsx(final_dge_permutation_result, "./Output_files/CUL3_DGE_Permutation_Result_015.xlsx")
writexl::write_xlsx(final_dpe_permutation_result, "./Output_files/CUL3_DPE_Permutation_Result_015.xlsx")
final_dge_permutation_result <- final_dge_permutation_result %>%
mutate(Period = str_split(DGE_List, "_", simplify = T)[,1],
Region = str_split(DGE_List, "_", simplify = T)[,2])
|
/P_Val_FDR_after_Permutation.R
|
no_license
|
Pramod2776/Cul3_ASD_Proteomics
|
R
| false | false | 5,790 |
r
|
setwd("~/Desktop/2019_Summer/Iakoucheva_Lab/R_Codes/Cul3_Permutation/CUL3_MICE/data/DE_NULL_DISTRIBUTIONS")
library(tidyverse)
# Data Processing & Null Distribution Generation
deg_files <- list.files(pattern = "deg", full.names = T)
dep_files <- list.files(pattern = "dep", full.names = T)
test <- read.csv("./null_distribution_deg_PERMUTATION_1.csv")
nine_dge_name <- paste(str_split(
str_split(unique(test$FILE), pattern = "/", simplify = T)[,2],
pattern = "_",
simplify = T)[,1],str_split(
str_split(unique(test$FILE), pattern = "/", simplify = T)[,2],
pattern = "_",
simplify = T)[,2],
sep = "_")
test_1 <- read.csv("./null_distribution_dep_PERMUTATION_9.csv")
six_dpe_name <- paste(str_split(
str_split(unique(test_1$FILE), pattern = "/", simplify = T)[,2],
pattern = "_",
simplify = T)[,1],str_split(
str_split(unique(test_1$FILE), pattern = "/", simplify = T)[,2],
pattern = "_",
simplify = T)[,2],
sep = "_")
lists_names <- as.character(unique(test$GENELIST))
input_matrix <- expand_grid(nine_dge_name, lists_names)
null_dist_list <- lapply(seq(length(input_matrix$nine_dge_name)), function(x){
temp_result <- lapply(deg_files, function(fn){
temp_sheet <- read.csv(fn) %>%
filter(grepl(as.character(input_matrix[x,1]), FILE)) %>%
filter(GENELIST == as.character(input_matrix[x,2]))
}) %>%
bind_rows() %>%
arrange(OVERLAP)
}) %>%
set_names(nm = paste(input_matrix$nine_dge_name,input_matrix$lists_names, sep = "_"))
lists_names_dep <- as.character(unique(test_1$GENELIST))
input_matrix_dep <- expand_grid(six_dpe_name, lists_names_dep)
null_dist_list_dep <- lapply(seq(length(input_matrix_dep$six_dpe_name)), function(x){
temp_result <- lapply(dep_files, function(fn){
temp_sheet <- read.csv(fn) %>%
filter(grepl(as.character(input_matrix_dep[x,1]), FILE)) %>%
filter(GENELIST == as.character(input_matrix_dep[x,2]))
}) %>%
bind_rows() %>%
arrange(OVERLAP)
}) %>%
set_names(nm = paste(input_matrix_dep$six_dpe_name,input_matrix_dep$lists_names_dep, sep = "_"))
# P_val & FDR Calculation
setwd("~/Desktop/2019_Summer/Iakoucheva_Lab/R_Codes/Cul3_Permutation/Permutation_Result")
dge_lists <- list.files("./src/DGE", full.names = T)
dpe_lists <- list.files("./src/DPE", full.names = T)
P_val_deg <- lapply(seq(length(input_matrix$nine_dge_name)), function(x){
temp_dge <- read.csv(dge_lists[grepl(as.character(input_matrix[x,1]), dge_lists)]) %>%
filter(FDR <= 0.1) %>%
pull(Gene_name) %>%
toupper()
target_list_genes <- readxl::read_xlsx("./Input_files/ASDRelevantGeneListsFromLiterature.xlsx",
sheet = as.character(input_matrix[x,2])) %>%
pull(gene_symbol)
temp_overlap <- length(intersect(temp_dge, target_list_genes))
null_dist <- null_dist_list[[paste(as.character(input_matrix[x,1]),
as.character(input_matrix[x,2]),
sep = "_")]] %>%
pull(OVERLAP)
p_val <- (sum(null_dist > temp_overlap)+1)/(length(null_dist)+1)
data.frame("P_val" = c(p_val),
"Overlap" = c(temp_overlap))
}) %>%
bind_rows()
final_dge_permutation_result <- bind_cols(input_matrix, P_val_deg)
P_val_dep <- lapply(seq(length(input_matrix_dep$six_dpe_name)), function(x){
temp_dpe <- read.csv(dpe_lists[grepl(as.character(input_matrix_dep[x,1]), dpe_lists)]) %>%
filter(adj.P.Val <= 0.15) %>%
pull(GeneSymbol) %>%
toupper()
target_list_genes <- readxl::read_xlsx("./Input_files/ASDRelevantGeneListsFromLiterature.xlsx",
sheet = as.character(input_matrix_dep[x,2])) %>%
pull(gene_symbol)
temp_overlap <- length(intersect(temp_dpe, target_list_genes))
null_dist <- null_dist_list_dep[[paste(as.character(input_matrix_dep[x,1]),
as.character(input_matrix_dep[x,2]),
sep = "_")]] %>%
pull(OVERLAP)
p_val <- (sum(null_dist > temp_overlap)+1)/(length(null_dist)+1)
data.frame("P_val" = c(p_val),
"Overlap" = c(temp_overlap))
}) %>%
bind_rows()
final_dpe_permutation_result <- bind_cols(input_matrix_dep, P_val_dep)
final_dge_permutation_result <- lapply(unique(input_matrix$nine_dge_name), function(x){
temp_result <- final_dge_permutation_result %>%
filter(nine_dge_name == x) %>%
mutate(FDR_BH = p.adjust(P_val, method = "BH"),
FDR_bonferroni = p.adjust(P_val, method = "bonferroni"))
}) %>%
bind_rows()
final_dpe_permutation_result <- lapply(unique(input_matrix_dep$six_dpe_name), function(x){
temp_result <- final_dpe_permutation_result %>%
filter(six_dpe_name == x) %>%
mutate(FDR_BH = p.adjust(P_val, method = "BH"),
FDR_bonferroni = p.adjust(P_val, method = "bonferroni"))
}) %>%
bind_rows()
final_dge_permutation_result <- final_dge_permutation_result %>%
rename("nine_dge_name" = "DGE_List") %>%
mutate(Period = str_split(DGE_List, "_", simplify = T)[,1],
Region = str_split(DGE_List, "_", simplify = T)[,2])
final_dpe_permutation_result <- final_dpe_permutation_result %>%
rename("six_dpe_name" = "DPE_List") %>%
mutate(Period = str_split(DPE_List, "_", simplify = T)[,1],
Region = str_split(DPE_List, "_", simplify = T)[,2])
writexl::write_xlsx(final_dge_permutation_result, "./Output_files/CUL3_DGE_Permutation_Result_015.xlsx")
writexl::write_xlsx(final_dpe_permutation_result, "./Output_files/CUL3_DPE_Permutation_Result_015.xlsx")
final_dge_permutation_result <- final_dge_permutation_result %>%
mutate(Period = str_split(DGE_List, "_", simplify = T)[,1],
Region = str_split(DGE_List, "_", simplify = T)[,2])
|
# Load libraries
library(arules)
library(arulesViz)
library(DT)
library(shiny)
library(shinydashboard)
library(plotly)
# Load Data
data("Groceries")
basket = read.csv('basket.csv',header = TRUE)
item = read.csv('single.csv',header = TRUE)
product_info = read.csv('product_info.csv',header = TRUE)
# Design User Interface
ui = shinyUI(navbarPage("Market Basket Analysis",
navbarMenu("Data",
tabPanel("Product Info",DTOutput('product_info'),downloadButton('Download','download_info')),
tabPanel('Transactions - Basket Format',DTOutput('basket')),
tabPanel('Transactions - Single Format',DTOutput('item'))
),
navbarMenu('Rule',
tabPanel('Visualization',
sidebarLayout(
sidebarPanel(
sliderInput("support", "Support:", min = 0, max = 1, value = 0.05, step = 1/100),
sliderInput("confidence", "Confidence:", min = 0, max = 1, value = 0.2, step = 1/100),
actionButton("rule_button", "Run New Rules")
),
mainPanel(
tabsetPanel(
tabPanel("Rule",DTOutput('rule_table')),
tabPanel("Scatter Plot",plotlyOutput('rule_scatter')),
tabPanel("Matrix Plot",plotlyOutput('rule_matrix_confidence')),
tabPanel('Network Graph',plotOutput('rule_graph'))
)))
))
))
# Server Elements
server = function(input, output) {
# Data - Basket Format
output$basket = renderDT({
datatable(basket, filter = 'top',rownames = FALSE)
})
# Data - Single Format
output$item = renderDT({
datatable(item, filter = 'top',rownames = FALSE)
})
# Data - Product Info
output$product_info = renderDT({
datatable(product_info, filter = 'top',rownames = FALSE)
})
# Reactive Object - event triggered when button clicked
rule = eventReactive(input$rule_button, {
apriori(Groceries, parameter=list(support=input$support,
confidence=input$confidence))
})
# Rule Visualization - Table
output$rule_table = renderDataTable({
p=inspectDT(rule())[[1]]$data
ratio_list = c('support','confidence','lift')
for (i in ratio_list){
p[i] = round(p[i], digits = 2)
}
datatable(p,filter='top', caption = 'Association Rules', rownames = FALSE)
})
# Rule Visualization - Scatter Plot
output$rule_scatter = renderPlotly({
plotly_arules(rule(), measure = c('lift','confidence'), shading = 'support') %>%
layout(title = 'Scatter Plot')
})
# Rule Visualization - Matrix Plot
output$rule_matrix_confidence = renderPlotly({
plotly_arules(rule(),method = 'matrix', measure = 'confidence', shading = 'confidence', max = 20) %>%
layout(title = 'Matrix Plot(Confidence)',
xaxis = list(title = 'Antecedent'),
yaxis = list(title = 'Consequent'))
})
# Rule Visualization - Network Graph
output$rule_graph = renderPlot({
top_lift = sort(rule(), decreasing = TRUE, na.last = NA, by = 'lift')
subrule = head(top_lift,10)
plot(subrule, method = 'graph', main='Network Graph for Top Ten Rules')
})
}
# Run app
shinyApp(ui, server)
|
/app.R
|
no_license
|
LuqiKong/Market-Basket
|
R
| false | false | 3,969 |
r
|
# Load libraries
library(arules)
library(arulesViz)
library(DT)
library(shiny)
library(shinydashboard)
library(plotly)
# Load Data
data("Groceries")
basket = read.csv('basket.csv',header = TRUE)
item = read.csv('single.csv',header = TRUE)
product_info = read.csv('product_info.csv',header = TRUE)
# Design User Interface
ui = shinyUI(navbarPage("Market Basket Analysis",
navbarMenu("Data",
tabPanel("Product Info",DTOutput('product_info'),downloadButton('Download','download_info')),
tabPanel('Transactions - Basket Format',DTOutput('basket')),
tabPanel('Transactions - Single Format',DTOutput('item'))
),
navbarMenu('Rule',
tabPanel('Visualization',
sidebarLayout(
sidebarPanel(
sliderInput("support", "Support:", min = 0, max = 1, value = 0.05, step = 1/100),
sliderInput("confidence", "Confidence:", min = 0, max = 1, value = 0.2, step = 1/100),
actionButton("rule_button", "Run New Rules")
),
mainPanel(
tabsetPanel(
tabPanel("Rule",DTOutput('rule_table')),
tabPanel("Scatter Plot",plotlyOutput('rule_scatter')),
tabPanel("Matrix Plot",plotlyOutput('rule_matrix_confidence')),
tabPanel('Network Graph',plotOutput('rule_graph'))
)))
))
))
# Server Elements
server = function(input, output) {
# Data - Basket Format
output$basket = renderDT({
datatable(basket, filter = 'top',rownames = FALSE)
})
# Data - Single Format
output$item = renderDT({
datatable(item, filter = 'top',rownames = FALSE)
})
# Data - Product Info
output$product_info = renderDT({
datatable(product_info, filter = 'top',rownames = FALSE)
})
# Reactive Object - event triggered when button clicked
rule = eventReactive(input$rule_button, {
apriori(Groceries, parameter=list(support=input$support,
confidence=input$confidence))
})
# Rule Visualization - Table
output$rule_table = renderDataTable({
p=inspectDT(rule())[[1]]$data
ratio_list = c('support','confidence','lift')
for (i in ratio_list){
p[i] = round(p[i], digits = 2)
}
datatable(p,filter='top', caption = 'Association Rules', rownames = FALSE)
})
# Rule Visualization - Scatter Plot
output$rule_scatter = renderPlotly({
plotly_arules(rule(), measure = c('lift','confidence'), shading = 'support') %>%
layout(title = 'Scatter Plot')
})
# Rule Visualization - Matrix Plot
output$rule_matrix_confidence = renderPlotly({
plotly_arules(rule(),method = 'matrix', measure = 'confidence', shading = 'confidence', max = 20) %>%
layout(title = 'Matrix Plot(Confidence)',
xaxis = list(title = 'Antecedent'),
yaxis = list(title = 'Consequent'))
})
# Rule Visualization - Network Graph
output$rule_graph = renderPlot({
top_lift = sort(rule(), decreasing = TRUE, na.last = NA, by = 'lift')
subrule = head(top_lift,10)
plot(subrule, method = 'graph', main='Network Graph for Top Ten Rules')
})
}
# Run app
shinyApp(ui, server)
|
# gcifu$ --- return the current value of Comunit
integer function gcifu$ (funit)
integer funit
include SWT_COMMON
funit = Comunit
return (funit)
end
|
/swt/src/lib/swt/src/gcifu$.r
|
no_license
|
arnoldrobbins/gt-swt
|
R
| false | false | 171 |
r
|
# gcifu$ --- return the current value of Comunit
integer function gcifu$ (funit)
integer funit
include SWT_COMMON
funit = Comunit
return (funit)
end
|
library(rorcid)
library(tidyverse)
edu <- do.call("rbind", rorcid::orcid_educations("0000-0001-9713-2330")$`0000-0001-9713-2330`$`affiliation-group`$summaries)
saveRDS(edu, "./data/edu.rds")
|
/publications/get_education.R
|
permissive
|
jimjunker1/jimjunker1.github.io
|
R
| false | false | 191 |
r
|
library(rorcid)
library(tidyverse)
edu <- do.call("rbind", rorcid::orcid_educations("0000-0001-9713-2330")$`0000-0001-9713-2330`$`affiliation-group`$summaries)
saveRDS(edu, "./data/edu.rds")
|
# Assign some values
A <- 10
B <- 5
C <- A + B
var1 <- 2.5
var2 <- 4
greeting <- "Hello"
name <- "Bobby"
# Some numerical calculaions
result <- var1 / var2
result
answer <- sqrt(A)
answer
# Text calculation
message <- paste(greeting, name)
message
|
/Reference Scripts/Assignments.R
|
no_license
|
jimcc333/R_projects
|
R
| false | false | 254 |
r
|
# Assign some values
A <- 10
B <- 5
C <- A + B
var1 <- 2.5
var2 <- 4
greeting <- "Hello"
name <- "Bobby"
# Some numerical calculaions
result <- var1 / var2
result
answer <- sqrt(A)
answer
# Text calculation
message <- paste(greeting, name)
message
|
`%>%` <- magrittr::`%>%`
source("R/Remover NA.R")
pokemon <- readr::read_rds("data/pokemon.rds") %>%
tirando_na(id_geracao)
# Quantos pokémos de cada geração são apresentados ------------------------
pokemon %>%
dplyr::mutate(id_geracao = forcats::as_factor(id_geracao)) %>%
dplyr::group_by(id_geracao) %>%
dplyr::summarise(quantidade = dplyr::n()) %>%
ggplot2::ggplot(ggplot2::aes(x = id_geracao, y = quantidade))+
ggplot2::geom_col()+
Rokemon::theme_gameboy()+
ggplot2::labs(x = "Geração dos Pokémons",
y = "Quantidade de Pokémons",
title = "Quantidade de Pokémons Por Cada Geração"
)+
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))+
ggplot2::geom_label(ggplot2::aes(x = id_geracao, y = quantidade,label = quantidade))
# Pokémons por tipo e por geração -----------------------------------------
pokemon %>%
tidyr::pivot_longer(
cols = dplyr::starts_with("tipo"),
names_to = "especificacao",
values_to = "tipos") %>%
tirando_na(tipos) %>%
dplyr::mutate(id_geracao = forcats::as_factor(id_geracao)) %>%
dplyr::group_by(id_geracao, tipos) %>%
dplyr::summarise(quantidade = dplyr::n()) %>%
ggplot2::ggplot(ggplot2::aes(x = quantidade, y = tipos, fill = id_geracao))+
ggplot2::geom_col()+
ggplot2::labs(x = "Quantidade",
y = "Tipos",
title = "Pokémons por Tipo e po Geração"
)+
ggplot2::guides(fill= ggplot2::guide_legend(title = "Gerações"))+
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))
|
/data-raw/Características Descritivas.R
|
no_license
|
Flavsou/Projeto_TCC_R4DSII
|
R
| false | false | 1,572 |
r
|
`%>%` <- magrittr::`%>%`
source("R/Remover NA.R")
pokemon <- readr::read_rds("data/pokemon.rds") %>%
tirando_na(id_geracao)
# Quantos pokémos de cada geração são apresentados ------------------------
pokemon %>%
dplyr::mutate(id_geracao = forcats::as_factor(id_geracao)) %>%
dplyr::group_by(id_geracao) %>%
dplyr::summarise(quantidade = dplyr::n()) %>%
ggplot2::ggplot(ggplot2::aes(x = id_geracao, y = quantidade))+
ggplot2::geom_col()+
Rokemon::theme_gameboy()+
ggplot2::labs(x = "Geração dos Pokémons",
y = "Quantidade de Pokémons",
title = "Quantidade de Pokémons Por Cada Geração"
)+
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))+
ggplot2::geom_label(ggplot2::aes(x = id_geracao, y = quantidade,label = quantidade))
# Pokémons por tipo e por geração -----------------------------------------
pokemon %>%
tidyr::pivot_longer(
cols = dplyr::starts_with("tipo"),
names_to = "especificacao",
values_to = "tipos") %>%
tirando_na(tipos) %>%
dplyr::mutate(id_geracao = forcats::as_factor(id_geracao)) %>%
dplyr::group_by(id_geracao, tipos) %>%
dplyr::summarise(quantidade = dplyr::n()) %>%
ggplot2::ggplot(ggplot2::aes(x = quantidade, y = tipos, fill = id_geracao))+
ggplot2::geom_col()+
ggplot2::labs(x = "Quantidade",
y = "Tipos",
title = "Pokémons por Tipo e po Geração"
)+
ggplot2::guides(fill= ggplot2::guide_legend(title = "Gerações"))+
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))
|
\name{unsplit}
\alias{unsplit}
\title{Unsplit a list-like object}
\description{
Given a list-like object \code{value} and grouping \code{f},
\code{unsplit} produces a vector-like object \code{x} by conceptually
reversing the split operation \code{value <- split(x, f)}.
NOTE: This man page is for the \code{unsplit}
\emph{S4 generic function} defined in the \pkg{BiocGenerics} package.
See \code{?base::\link[base]{unsplit}} for the default method
(defined in the \pkg{base} package).
Bioconductor packages can define specific methods for objects
not supported by the default method.
}
\usage{
unsplit(x, recursive=TRUE, use.names=TRUE)
}
\arguments{
\item{value}{
A list-like object.
}
\item{f}{
A factor or other grouping object that corresponds to the \code{f}
symbol in \code{value <- split(x, f)}.
}
\item{drop}{
See \code{?base::\link[base]{unsplit}} for a description of
this argument.
}
}
\value{
See \code{?base::\link[base]{unsplit}} for the value returned
by the default method.
Specific methods defined in Bioconductor packages should
behave as consistently as possible with the default method.
}
\seealso{
\itemize{
\item \code{base::\link[base]{unsplit}} for the default
\code{unsplit} method.
\item \code{\link[methods]{showMethods}} for displaying a summary of the
methods defined for a given generic function.
\item \code{\link[methods]{selectMethod}} for getting the definition of
a specific method.
\item \link[IRanges]{unsplit,List-method} in the \pkg{IRanges} package
for an example of a specific \code{unsplit} method (defined for
\link[S4Vectors]{List} objects).
\item \link{BiocGenerics} for a summary of all the generics defined
in the \pkg{BiocGenerics} package.
}
}
\examples{
unsplit # note the dispatch on the 'value' and 'f' args only
showMethods("unsplit")
selectMethod("unsplit", "ANY") # the default method
}
\keyword{methods}
|
/man/unsplit.Rd
|
no_license
|
jimhester/BiocGenerics
|
R
| false | false | 2,019 |
rd
|
\name{unsplit}
\alias{unsplit}
\title{Unsplit a list-like object}
\description{
Given a list-like object \code{value} and grouping \code{f},
\code{unsplit} produces a vector-like object \code{x} by conceptually
reversing the split operation \code{value <- split(x, f)}.
NOTE: This man page is for the \code{unsplit}
\emph{S4 generic function} defined in the \pkg{BiocGenerics} package.
See \code{?base::\link[base]{unsplit}} for the default method
(defined in the \pkg{base} package).
Bioconductor packages can define specific methods for objects
not supported by the default method.
}
\usage{
unsplit(x, recursive=TRUE, use.names=TRUE)
}
\arguments{
\item{value}{
A list-like object.
}
\item{f}{
A factor or other grouping object that corresponds to the \code{f}
symbol in \code{value <- split(x, f)}.
}
\item{drop}{
See \code{?base::\link[base]{unsplit}} for a description of
this argument.
}
}
\value{
See \code{?base::\link[base]{unsplit}} for the value returned
by the default method.
Specific methods defined in Bioconductor packages should
behave as consistently as possible with the default method.
}
\seealso{
\itemize{
\item \code{base::\link[base]{unsplit}} for the default
\code{unsplit} method.
\item \code{\link[methods]{showMethods}} for displaying a summary of the
methods defined for a given generic function.
\item \code{\link[methods]{selectMethod}} for getting the definition of
a specific method.
\item \link[IRanges]{unsplit,List-method} in the \pkg{IRanges} package
for an example of a specific \code{unsplit} method (defined for
\link[S4Vectors]{List} objects).
\item \link{BiocGenerics} for a summary of all the generics defined
in the \pkg{BiocGenerics} package.
}
}
\examples{
unsplit # note the dispatch on the 'value' and 'f' args only
showMethods("unsplit")
selectMethod("unsplit", "ANY") # the default method
}
\keyword{methods}
|
#' ---
#' title: "Check LMM assumption on FPS data from the Fear Generalization Task (FGT) in the SAM study"
#' author: "Milou Sep"
#' date: '`r format(Sys.time(), "%d %B, %Y")`'
#' output:
#' html_document: default
#' ---
#' Load required packages
library(mice); library(lme4); library(influence.ME); library(miceadds)
#' Define required variables
dependent_var='FPS'# Fear Potentiated Startle
n_subjects = 117 # Note this is always the same for all outcome measures (or datasets) in the FGT
influential.case.names<-c()
influentialpoints<-list()
#' Loop assumption checks over all datasets (= list elements) in Data_list
for(i in 1:101){ # Note 101 = 1 dataset with missing values and 100 imputed datasets
# Print in output which dataset is checked --------------------------------
if (i == 1){print(paste(dataset_name, dependent_var, "- LMER assumption check on data with missings"))
}else if (i>=1){ print(paste(dataset_name, dependent_var, "- LMER assumption check on imputed dataset", i-1))} # i-1 is included because the data list contains 1 complete case dataset, so (i=2)-1 is imputed dataset 1
# Select Dataset for assumption check -------------------------------------
check.data=Data_list[[i]]
# Log-transform FPS (if needed) ----------------------------------------------
# Note log-transformation needs to be performed prior to the formula call. The unstandardized data contains 0-responses, which will lead to infinite values (that LMER() can not handle).
# The solution is to add 1 to all FPS values BEFORE the log-transformation (log()) (ref https://stackoverflow.com/questions/8415778/lm-na-nan-inf-error).
# Note, an alternative solution, to make 0-responses NA, is not suitable here because 0-responses contain meaningful information for FPS analyses.
# The 0-responses stay in the analyses with +1, as log(1)=0.
if (log_transformation == 'yes'){
check.data$FPS <- check.data$FPS + 1
check.data$FPS <- log(check.data$FPS)
}
# print(str(check.data$FPS)) # To check if transformation works.
# Fit Linear(mixed)model --------------------------------------------------
if (within_factor == 1){
FullModel<-lmer(Model_Formula, data=check.data, REML=F, na.action = na.omit)
} else if (within_factor == 0){
FullModel<-lm(Model_Formula, data=check.data)
}
# Plot's for LMER assumption checks ---------------------------------------
## Raw data Histogram
datacolum<-check.data[which(colnames(check.data) == dependent_var)]
hist(data.matrix(datacolum), main=paste(dataset_name, "dataset", i, "- Historgram raw data", dependent_var))
## Linearity
plot(fitted(FullModel),residuals(FullModel), main=paste(dataset_name, "dataset", i, "- Linearity contrast", dependent_var)); abline(0,0,lty=2,lwd=2);
## Normality of residuals
### histogram Residuals
hist(residuals(FullModel), main=paste(dataset_name, "dataset", i, "- Historgram Model Residuals", dependent_var));
## QQ-plots
qqnorm(residuals(FullModel), main=paste(dataset_name, "dataset", i, "- QQ plot", dependent_var)); qqline(residuals(FullModel),col = 2)
## Cook's distance
if (within_factor == 1){ # Display Cook's distance for LMER() model
estex.FullModel <-influence(FullModel,"pp")
cooks_FullModel<-cooks.distance(estex.FullModel)
these_influential_points<-cooks_FullModel[(cooks_FullModel>(4/(n_subjects))),] # Shows measures with cook's distance above cutoff
print(these_influential_points)
# Plot cook's distance
plot(estex.FullModel, which="cook",
cutoff=4/(n_subjects), sort=TRUE,
xlab="Cook's Distance",
ylab="pp",
cex.lab=0.01, # Smaller font size for as labels.
main=paste(dataset_name, "dataset", i, "Cook's Distance", dependent_var))
# Collect influential cases (as information for analyses)
influentialpoints[[i]] <- these_influential_points # participant name & value of cook's distance
if (i>1){ # collects only names of participants that are influential in imputed datasets(not cooks distance data)
influential.case.names <- c(influential.case.names, names(influentialpoints[[i]]))
}
} else if (within_factor == 0){ # Display Cook's distance for lm() model
plot(FullModel,2) # QQ plot, with case names.
plot(FullModel,4) # Plot for cooks distance for lm() (Note, this shows observations, not participants numbers)
}
}
#' Remove redundant variables from the global environment.
rm(list=c("check.data", "cooks_FullModel", "Data_list", "datacolum", "dataset_name", "dependent_var", "estex.FullModel",
"FullModel", "i", "Model_Formula", "n_subjects", "these_influential_points", "within_factor"), pos=.GlobalEnv)
|
/R/FPS_LMM_Assumptions_source.R
|
permissive
|
mscsep/SAM_FGT
|
R
| false | false | 4,696 |
r
|
#' ---
#' title: "Check LMM assumption on FPS data from the Fear Generalization Task (FGT) in the SAM study"
#' author: "Milou Sep"
#' date: '`r format(Sys.time(), "%d %B, %Y")`'
#' output:
#' html_document: default
#' ---
#' Load required packages
library(mice); library(lme4); library(influence.ME); library(miceadds)
#' Define required variables
dependent_var='FPS'# Fear Potentiated Startle
n_subjects = 117 # Note this is always the same for all outcome measures (or datasets) in the FGT
influential.case.names<-c()
influentialpoints<-list()
#' Loop assumption checks over all datasets (= list elements) in Data_list
for(i in 1:101){ # Note 101 = 1 dataset with missing values and 100 imputed datasets
# Print in output which dataset is checked --------------------------------
if (i == 1){print(paste(dataset_name, dependent_var, "- LMER assumption check on data with missings"))
}else if (i>=1){ print(paste(dataset_name, dependent_var, "- LMER assumption check on imputed dataset", i-1))} # i-1 is included because the data list contains 1 complete case dataset, so (i=2)-1 is imputed dataset 1
# Select Dataset for assumption check -------------------------------------
check.data=Data_list[[i]]
# Log-transform FPS (if needed) ----------------------------------------------
# Note log-transformation needs to be performed prior to the formula call. The unstandardized data contains 0-responses, which will lead to infinite values (that LMER() can not handle).
# The solution is to add 1 to all FPS values BEFORE the log-transformation (log()) (ref https://stackoverflow.com/questions/8415778/lm-na-nan-inf-error).
# Note, an alternative solution, to make 0-responses NA, is not suitable here because 0-responses contain meaningful information for FPS analyses.
# The 0-responses stay in the analyses with +1, as log(1)=0.
if (log_transformation == 'yes'){
check.data$FPS <- check.data$FPS + 1
check.data$FPS <- log(check.data$FPS)
}
# print(str(check.data$FPS)) # To check if transformation works.
# Fit Linear(mixed)model --------------------------------------------------
if (within_factor == 1){
FullModel<-lmer(Model_Formula, data=check.data, REML=F, na.action = na.omit)
} else if (within_factor == 0){
FullModel<-lm(Model_Formula, data=check.data)
}
# Plot's for LMER assumption checks ---------------------------------------
## Raw data Histogram
datacolum<-check.data[which(colnames(check.data) == dependent_var)]
hist(data.matrix(datacolum), main=paste(dataset_name, "dataset", i, "- Historgram raw data", dependent_var))
## Linearity
plot(fitted(FullModel),residuals(FullModel), main=paste(dataset_name, "dataset", i, "- Linearity contrast", dependent_var)); abline(0,0,lty=2,lwd=2);
## Normality of residuals
### histogram Residuals
hist(residuals(FullModel), main=paste(dataset_name, "dataset", i, "- Historgram Model Residuals", dependent_var));
## QQ-plots
qqnorm(residuals(FullModel), main=paste(dataset_name, "dataset", i, "- QQ plot", dependent_var)); qqline(residuals(FullModel),col = 2)
## Cook's distance
if (within_factor == 1){ # Display Cook's distance for LMER() model
estex.FullModel <-influence(FullModel,"pp")
cooks_FullModel<-cooks.distance(estex.FullModel)
these_influential_points<-cooks_FullModel[(cooks_FullModel>(4/(n_subjects))),] # Shows measures with cook's distance above cutoff
print(these_influential_points)
# Plot cook's distance
plot(estex.FullModel, which="cook",
cutoff=4/(n_subjects), sort=TRUE,
xlab="Cook's Distance",
ylab="pp",
cex.lab=0.01, # Smaller font size for as labels.
main=paste(dataset_name, "dataset", i, "Cook's Distance", dependent_var))
# Collect influential cases (as information for analyses)
influentialpoints[[i]] <- these_influential_points # participant name & value of cook's distance
if (i>1){ # collects only names of participants that are influential in imputed datasets(not cooks distance data)
influential.case.names <- c(influential.case.names, names(influentialpoints[[i]]))
}
} else if (within_factor == 0){ # Display Cook's distance for lm() model
plot(FullModel,2) # QQ plot, with case names.
plot(FullModel,4) # Plot for cooks distance for lm() (Note, this shows observations, not participants numbers)
}
}
#' Remove redundant variables from the global environment.
rm(list=c("check.data", "cooks_FullModel", "Data_list", "datacolum", "dataset_name", "dependent_var", "estex.FullModel",
"FullModel", "i", "Model_Formula", "n_subjects", "these_influential_points", "within_factor"), pos=.GlobalEnv)
|
library(netCoin)
### Name: timeCoin
### Title: Networked coincidences.
### Aliases: timeCoin
### ** Examples
# Database of 19th century sociologists
data(sociologists)
timeCoin(sociologists,"name","birth","death","birthcountry",
dir = "./timeline", show = FALSE) # See ./timeline/index.html file
|
/data/genthat_extracted_code/netCoin/examples/timeCoin.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 313 |
r
|
library(netCoin)
### Name: timeCoin
### Title: Networked coincidences.
### Aliases: timeCoin
### ** Examples
# Database of 19th century sociologists
data(sociologists)
timeCoin(sociologists,"name","birth","death","birthcountry",
dir = "./timeline", show = FALSE) # See ./timeline/index.html file
|
library(foreign)
library(ggplot2)
library(spdep)
library(maptools)
# Load a shapefile of the Census tracts in Chicago
# The data include tract-level disadvantage characteristics
chicago <- readShapePoly('HomRates_Disadvantage.shp')
summary(chicago)
plot(chicago,border=gray(0.5))
# Define a list of the variable names
vars <- c('PctMinPop','PctPoorHH','MedIndvInc','MedHHInc','PctNoBac','PctNoHSD','PctFHH','PctHisp','PctMNoJob','PctUnemp')
# Pull the variables out into a dataframe
dat <- data.frame(chicago@data[,vars])
summary(dat)
# Center and scale the variables
sdat <- scale(dat)
summary(sdat)
# Create neighbor list from tract map based on first-order Queen contiguity
chicago.nb <- poly2nb(chicago)
# Plot the connectivity diagram
plot(chicago.nb,coordinates(chicago),col='dark turquoise',add=TRUE)
# Calculate the cost of each edge (multi-dimensional distance between the nodes)
lcosts <- nbcosts(chicago.nb,sdat)
# Calculate the neighbor weights based on the costs
chicago.w <- nb2listw(chicago.nb,lcosts,style='B')
# Create the minimum spanning tree based on the neighbor weights
chicago.mst <- mstree(chicago.w)
# Plot the minimum spanning tree
plot(chicago.mst,coordinates(chicago),col='dark turquoise',cex.lab=0.001)
plot(chicago,border='deep pink',add=TRUE)
# Use SKATER to cluster Chicago Census tracts into 12 regions based on social-disadvantage variables
clusters12 <- skater(chicago.mst[,1:2],sdat,11)
groups12 <- clusters12$groups
table(groups12)
# Fun with colors! https://www.nceas.ucsb.edu/~frazier/RSpatialGuides/colorPaletteCheatsheet.pdf
plot(chicago,col=c('green yellow','orangered','dark gray','cyan','gold','pink','deep pink','dark cyan','purple 3','spring green 3','royal blue 2','dark orchid 4')[clusters12$groups])
# Create a new dataframe from the groups vector and add the GeoID
SKATER12 <- as.data.frame(cbind(chicago$geoid10, chicago@data[,vars],
chicago$HOMRATE, groups12))
# Write the dataframe to a CSV
write.csv(SKATER12,'SKATER12.csv', row.names = FALSE)
|
/2017-07-18_skater_demo/skater.R
|
no_license
|
rladies-chicago/2017-07-18_kickoff_meeting
|
R
| false | false | 2,070 |
r
|
library(foreign)
library(ggplot2)
library(spdep)
library(maptools)
# Load a shapefile of the Census tracts in Chicago
# The data include tract-level disadvantage characteristics
chicago <- readShapePoly('HomRates_Disadvantage.shp')
summary(chicago)
plot(chicago,border=gray(0.5))
# Define a list of the variable names
vars <- c('PctMinPop','PctPoorHH','MedIndvInc','MedHHInc','PctNoBac','PctNoHSD','PctFHH','PctHisp','PctMNoJob','PctUnemp')
# Pull the variables out into a dataframe
dat <- data.frame(chicago@data[,vars])
summary(dat)
# Center and scale the variables
sdat <- scale(dat)
summary(sdat)
# Create neighbor list from tract map based on first-order Queen contiguity
chicago.nb <- poly2nb(chicago)
# Plot the connectivity diagram
plot(chicago.nb,coordinates(chicago),col='dark turquoise',add=TRUE)
# Calculate the cost of each edge (multi-dimensional distance between the nodes)
lcosts <- nbcosts(chicago.nb,sdat)
# Calculate the neighbor weights based on the costs
chicago.w <- nb2listw(chicago.nb,lcosts,style='B')
# Create the minimum spanning tree based on the neighbor weights
chicago.mst <- mstree(chicago.w)
# Plot the minimum spanning tree
plot(chicago.mst,coordinates(chicago),col='dark turquoise',cex.lab=0.001)
plot(chicago,border='deep pink',add=TRUE)
# Use SKATER to cluster Chicago Census tracts into 12 regions based on social-disadvantage variables
clusters12 <- skater(chicago.mst[,1:2],sdat,11)
groups12 <- clusters12$groups
table(groups12)
# Fun with colors! https://www.nceas.ucsb.edu/~frazier/RSpatialGuides/colorPaletteCheatsheet.pdf
plot(chicago,col=c('green yellow','orangered','dark gray','cyan','gold','pink','deep pink','dark cyan','purple 3','spring green 3','royal blue 2','dark orchid 4')[clusters12$groups])
# Create a new dataframe from the groups vector and add the GeoID
SKATER12 <- as.data.frame(cbind(chicago$geoid10, chicago@data[,vars],
chicago$HOMRATE, groups12))
# Write the dataframe to a CSV
write.csv(SKATER12,'SKATER12.csv', row.names = FALSE)
|
\name{Outliers}
\alias{Outliers}
\title{Computes outliers}
\description{Computes outlierness scores and detects outliers.}
\usage{
Outliers(prox, cls=NULL, data=NULL, threshold=10)
}
\arguments{
\item{prox}{a proximity matrix (a square matrix with 1 on the diagonal and values between 0 and 1 in the off-diagonal positions).}
\item{cls}{Factor. The classes the rows in the proximity matrix belong to. If NULL (default), all data are assumed to come from the same class.}
\item{data}{A data frame of variables to describe the outliers (optional).}
\item{threshold}{Numeric. The value of outlierness above which an observation is considered an outlier. Default is 10.}
}
\details{
The outlierness score of a case is computed as n / sum(squared proximity), normalized by
subtracting the median and divided by the MAD, within each class.
}
\value{
A list with the following elements :
\item{scores}{numeric vector containing the outlierness scores}
\item{outliers}{numeric vector of indexes of the outliers, or a data frame with the outliers and their characteristics}
}
\note{
The code is adapted from \code{outlier} function in \code{randomForest} package.
}
\examples{
data(iris)
iris2 = iris
iris2$Species = factor(iris$Species == "versicolor")
iris.cf = party::cforest(Species ~ ., data = iris2,
control = party::cforest_unbiased(mtry = 2, ntree = 50))
prox=proximity(iris.cf)
Outliers(prox, iris2$Species, iris2[,1:4])
}
\keyword{classif}
|
/man/Outliers.Rd
|
no_license
|
nicolas-robette/moreparty
|
R
| false | false | 1,489 |
rd
|
\name{Outliers}
\alias{Outliers}
\title{Computes outliers}
\description{Computes outlierness scores and detects outliers.}
\usage{
Outliers(prox, cls=NULL, data=NULL, threshold=10)
}
\arguments{
\item{prox}{a proximity matrix (a square matrix with 1 on the diagonal and values between 0 and 1 in the off-diagonal positions).}
\item{cls}{Factor. The classes the rows in the proximity matrix belong to. If NULL (default), all data are assumed to come from the same class.}
\item{data}{A data frame of variables to describe the outliers (optional).}
\item{threshold}{Numeric. The value of outlierness above which an observation is considered an outlier. Default is 10.}
}
\details{
The outlierness score of a case is computed as n / sum(squared proximity), normalized by
subtracting the median and divided by the MAD, within each class.
}
\value{
A list with the following elements :
\item{scores}{numeric vector containing the outlierness scores}
\item{outliers}{numeric vector of indexes of the outliers, or a data frame with the outliers and their characteristics}
}
\note{
The code is adapted from \code{outlier} function in \code{randomForest} package.
}
\examples{
data(iris)
iris2 = iris
iris2$Species = factor(iris$Species == "versicolor")
iris.cf = party::cforest(Species ~ ., data = iris2,
control = party::cforest_unbiased(mtry = 2, ntree = 50))
prox=proximity(iris.cf)
Outliers(prox, iris2$Species, iris2[,1:4])
}
\keyword{classif}
|
#' Summarize the Object of '\code{bma}' Class
#'
#' This function summarizes an object of '\code{bma}' class
#'
#' @param x An object of '\code{bma}' class
#' @param ... Arguments to be passed to methods
#'
#' @return An object of '\code{sum.bma}' class with the elements
#' \item{expected.coefficient}{The expected value of each coefficient}
#' \item{posterior.prob}{the posterior probability that the coefficient is non-zero}
#' @author Myunghoon Kang \email{myunghoon@@wustl.edu}
#' @note This produces an object of a new class '\code{sum.bma}'.
#' @seealso \code{\link{fitBMA}}
#' @seealso \code{\link{plotBMA}}
#' @examples
#'
#' # Create a random 10 by 5 covariate matrix
#' x <- matrix(rnorm(50,0,1),10,5)
#' # Create a vector of the values for the dependent variable
#' y <- 2+1.2*x[,1]+2.2*x[,2]+0.2*x[,3]+3.2*x[,4]+1.8*x[,5]+rnorm(10,0,3)
#' # run fitBMA function
#' a <- fitBMA(x=x,y=y,g=3)
#' summary(a)
#' @rdname summaryBMA
#' @aliases summaryBMA, ANY-method
#' @export
setGeneric(name="summaryBMA",
function(x,...){
standardGeneric("summaryBMA")
}
)
#' @export
setMethod("summaryBMA","bma",
function(x,...){
return(new("sum.bma", expected.coefficient=x@expected.coeff, posterior.prob=x@posterior.prob))
}
)
|
/BmaPack/R/summaryBMA.R
|
no_license
|
mhoonkang/MidtermBMA
|
R
| false | false | 1,296 |
r
|
#' Summarize the Object of '\code{bma}' Class
#'
#' This function summarizes an object of '\code{bma}' class
#'
#' @param x An object of '\code{bma}' class
#' @param ... Arguments to be passed to methods
#'
#' @return An object of '\code{sum.bma}' class with the elements
#' \item{expected.coefficient}{The expected value of each coefficient}
#' \item{posterior.prob}{the posterior probability that the coefficient is non-zero}
#' @author Myunghoon Kang \email{myunghoon@@wustl.edu}
#' @note This produces an object of a new class '\code{sum.bma}'.
#' @seealso \code{\link{fitBMA}}
#' @seealso \code{\link{plotBMA}}
#' @examples
#'
#' # Create a random 10 by 5 covariate matrix
#' x <- matrix(rnorm(50,0,1),10,5)
#' # Create a vector of the values for the dependent variable
#' y <- 2+1.2*x[,1]+2.2*x[,2]+0.2*x[,3]+3.2*x[,4]+1.8*x[,5]+rnorm(10,0,3)
#' # run fitBMA function
#' a <- fitBMA(x=x,y=y,g=3)
#' summary(a)
#' @rdname summaryBMA
#' @aliases summaryBMA, ANY-method
#' @export
setGeneric(name="summaryBMA",
function(x,...){
standardGeneric("summaryBMA")
}
)
#' @export
setMethod("summaryBMA","bma",
function(x,...){
return(new("sum.bma", expected.coefficient=x@expected.coeff, posterior.prob=x@posterior.prob))
}
)
|
readSiteDetail <- function(Directory, filename= "site_detail.txt"){
X <- read.delim(file.path(Directory,filename, fsep = .Platform$file.sep),
comment.char= "%",
quote ="",
stringsAsFactors = FALSE,header = FALSE)
colnames(X)<- c("Id","Name", "Lat","Lon" ,"Altitude" ,"LatUnc","LonUnc","AltUnc",
"Country","State","County","Tzone","WMO","COOP","WBAN","ICOA",
"Relo","SugRelo","Sources","Hash")
X$Lat[X$Lat == -999] <- NA
X$Lon[X$Lon == -999] <- NA
X$Altitude[X$Altitude == -999] <- NA
X$LatUnc[X$LatUnc == -9.99999] <- NA
X$LonUnc[X$LonUnc == -9.99999] <- NA
X$AltUnc[X$AltUnc == -9.99999] <- NA
X$Tzone[X$Tzone == -99] <- NA
X$WMO[X$WMO == -9999] <- NA
X$COOP[X$COOP == -9999] <- NA
X$WBAN[X$WBAN == -9999] <- NA
X$ICOA[X$ICOA == " "] <- NA
X$State[X$State == " "] <-NA
X$Country[X$Country == "[Missing] "] <- NA
X$County[X$County == " "] <- NA
X <- X[,c(1,3,4,2,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)]
return(X)
}
|
/R/readSiteDetail.R
|
no_license
|
cran/BerkeleyEarth
|
R
| false | false | 1,259 |
r
|
readSiteDetail <- function(Directory, filename= "site_detail.txt"){
X <- read.delim(file.path(Directory,filename, fsep = .Platform$file.sep),
comment.char= "%",
quote ="",
stringsAsFactors = FALSE,header = FALSE)
colnames(X)<- c("Id","Name", "Lat","Lon" ,"Altitude" ,"LatUnc","LonUnc","AltUnc",
"Country","State","County","Tzone","WMO","COOP","WBAN","ICOA",
"Relo","SugRelo","Sources","Hash")
X$Lat[X$Lat == -999] <- NA
X$Lon[X$Lon == -999] <- NA
X$Altitude[X$Altitude == -999] <- NA
X$LatUnc[X$LatUnc == -9.99999] <- NA
X$LonUnc[X$LonUnc == -9.99999] <- NA
X$AltUnc[X$AltUnc == -9.99999] <- NA
X$Tzone[X$Tzone == -99] <- NA
X$WMO[X$WMO == -9999] <- NA
X$COOP[X$COOP == -9999] <- NA
X$WBAN[X$WBAN == -9999] <- NA
X$ICOA[X$ICOA == " "] <- NA
X$State[X$State == " "] <-NA
X$Country[X$Country == "[Missing] "] <- NA
X$County[X$County == " "] <- NA
X <- X[,c(1,3,4,2,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)]
return(X)
}
|
##############################
#Clean Console and Environment
##############################
cat("\014")
rm(list = ls())
##############################
# Read Training Data Set
##############################
WD<-setwd("C:\\Users\\Nikitas Marios\\Desktop\\Data Mining Techniques\\Assignment_2\\Data Mining VU data")
load("rawdata_environment.RData")
##############################
#Keep separately the competitors data
##############################
competitors_data<-rawdata[,28:51]
no_comp_data<-rawdata[,c(1:27,52:54)]
##############################
#Save Competitors and Non Competitors data Separately
##############################
save(competitors_data,file="competitors_data.RData")
save(no_comp_data,file="no_comp_data.RData")
|
/assignment-2/R/LamdaMART R Code/DataCleaning_1.R
|
no_license
|
arashparnia/Data-Mining
|
R
| false | false | 754 |
r
|
##############################
#Clean Console and Environment
##############################
cat("\014")
rm(list = ls())
##############################
# Read Training Data Set
##############################
WD<-setwd("C:\\Users\\Nikitas Marios\\Desktop\\Data Mining Techniques\\Assignment_2\\Data Mining VU data")
load("rawdata_environment.RData")
##############################
#Keep separately the competitors data
##############################
competitors_data<-rawdata[,28:51]
no_comp_data<-rawdata[,c(1:27,52:54)]
##############################
#Save Competitors and Non Competitors data Separately
##############################
save(competitors_data,file="competitors_data.RData")
save(no_comp_data,file="no_comp_data.RData")
|
library(evclust)
### Name: ecm
### Title: Evidential c-means algorithm
### Aliases: ecm
### ** Examples
## Clustering of the Four-class dataset
data(fourclass)
x<-fourclass[,1:2]
y<-fourclass[,3]
clus<-ecm(x,c=4,type='full',alpha=1,beta=2,delta=sqrt(20),epsi=1e-3,disp=TRUE)
plot(clus,X=x,mfrow=c(2,2),ytrue=y,Outliers=TRUE,Approx=2)
|
/data/genthat_extracted_code/evclust/examples/ecm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 341 |
r
|
library(evclust)
### Name: ecm
### Title: Evidential c-means algorithm
### Aliases: ecm
### ** Examples
## Clustering of the Four-class dataset
data(fourclass)
x<-fourclass[,1:2]
y<-fourclass[,3]
clus<-ecm(x,c=4,type='full',alpha=1,beta=2,delta=sqrt(20),epsi=1e-3,disp=TRUE)
plot(clus,X=x,mfrow=c(2,2),ytrue=y,Outliers=TRUE,Approx=2)
|
#' Load and format data for single sample processing
#'
#' @param snp_file A file with extracted snp data, after running 'extract_snp_single.py'
#'
#' @return A data.frame suitable for downstream single sample analysis
#' @export
#'
#'
readSingle <- function(snp_file){
read.table(snp_file,
stringsAsFactors = F,
header = T,
sep="\t") %>%
dplyr::mutate(relation="O",
Origin=NA,
type= ifelse(copynumber<2, "del", "dup")) %>%
tidyr::gather(-Name, -Chr, -Position, -locus, -copynumber, -type, -numsnp, -Origin,
-coordcnv, -sample, -relation, key="parameter", value="value") %>%
dplyr::group_by(Name, Chr, Position, locus, coordcnv, sample, copynumber, numsnp,
relation, Origin, type, parameter) %>%
dplyr::summarise(value=unique(value)) %>%
dplyr::ungroup()
}
|
/R/readSingle.R
|
permissive
|
shulp2211/SeeCiTe
|
R
| false | false | 894 |
r
|
#' Load and format data for single sample processing
#'
#' @param snp_file A file with extracted snp data, after running 'extract_snp_single.py'
#'
#' @return A data.frame suitable for downstream single sample analysis
#' @export
#'
#'
readSingle <- function(snp_file){
read.table(snp_file,
stringsAsFactors = F,
header = T,
sep="\t") %>%
dplyr::mutate(relation="O",
Origin=NA,
type= ifelse(copynumber<2, "del", "dup")) %>%
tidyr::gather(-Name, -Chr, -Position, -locus, -copynumber, -type, -numsnp, -Origin,
-coordcnv, -sample, -relation, key="parameter", value="value") %>%
dplyr::group_by(Name, Chr, Position, locus, coordcnv, sample, copynumber, numsnp,
relation, Origin, type, parameter) %>%
dplyr::summarise(value=unique(value)) %>%
dplyr::ungroup()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_delta_variance.R
\name{calculate_delta_variance}
\alias{calculate_delta_variance}
\title{Calculate delta variance}
\usage{
calculate_delta_variance(
input,
meta = NULL,
replicate_col = "replicate",
cell_type_col = "cell_type",
label_col = "label",
min_cells = 3,
min_reps = 2,
min_features = 0
)
}
\arguments{
\item{input}{a single-cell matrix to be converted, with features (genes) in rows
and cells in columns. Alternatively, a \code{Seurat}, \code{monocle3}, or
or \code{SingleCellExperiment} object can be directly input.}
\item{meta}{the accompanying meta data whereby the rownames match the column
names of \code{input}. If a \code{Seurat}, \code{monocle3} or
\code{SingleCellExperiment} object is provided this can be null.}
\item{replicate_col}{the vector in \code{meta} containing the replicate
information. Defaults to \code{replicate}.}
\item{cell_type_col}{the vector in \code{meta} containing the cell type
information. Defaults to \code{cell_type}.}
\item{label_col}{the vector in \code{meta} containing the experimental
label. Defaults to \code{label}.}
\item{min_cells}{the minimum number of cells in a cell type to retain it.
Defaults to \code{3}.}
\item{min_reps}{the minimum number of replicates in a cell type to retain it.
Defaults to \code{2}.}
\item{min_features}{the minimum number of replicates expressing a gene
to retain it. Defaults to \code{0}}
}
\value{
a list of pseudobulk matrices, for each cell type.
}
\description{
Calculate delta variance from a single-cell matrix.
}
|
/man/calculate_delta_variance.Rd
|
permissive
|
phycomlab/Libra
|
R
| false | true | 1,621 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_delta_variance.R
\name{calculate_delta_variance}
\alias{calculate_delta_variance}
\title{Calculate delta variance}
\usage{
calculate_delta_variance(
input,
meta = NULL,
replicate_col = "replicate",
cell_type_col = "cell_type",
label_col = "label",
min_cells = 3,
min_reps = 2,
min_features = 0
)
}
\arguments{
\item{input}{a single-cell matrix to be converted, with features (genes) in rows
and cells in columns. Alternatively, a \code{Seurat}, \code{monocle3}, or
or \code{SingleCellExperiment} object can be directly input.}
\item{meta}{the accompanying meta data whereby the rownames match the column
names of \code{input}. If a \code{Seurat}, \code{monocle3} or
\code{SingleCellExperiment} object is provided this can be null.}
\item{replicate_col}{the vector in \code{meta} containing the replicate
information. Defaults to \code{replicate}.}
\item{cell_type_col}{the vector in \code{meta} containing the cell type
information. Defaults to \code{cell_type}.}
\item{label_col}{the vector in \code{meta} containing the experimental
label. Defaults to \code{label}.}
\item{min_cells}{the minimum number of cells in a cell type to retain it.
Defaults to \code{3}.}
\item{min_reps}{the minimum number of replicates in a cell type to retain it.
Defaults to \code{2}.}
\item{min_features}{the minimum number of replicates expressing a gene
to retain it. Defaults to \code{0}}
}
\value{
a list of pseudobulk matrices, for each cell type.
}
\description{
Calculate delta variance from a single-cell matrix.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.