content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
setwd("/Users/Shugars/Dropbox/research projects/conversation dynamics/replication materials")
# read in data
targ <- read.table("data/conv_features_final.txt.gzip",header=F,sep=",",stringsAsFactors=F)
# varnames
colnames(targ) <- c('y',
'j',
'threadID',
'tweet_t',
'tweet_tp1',
'i_t',
'i_tp1',
't',
'i_t_verified',
'i_t_followers_count',
'i_t_following_count',
'i_t_statuses_count',
'i_t_favourites_count',
'i_t_comments_count',
'i_tp1_verified',
'i_tp1_followers_count',
'i_tp1_following_count',
'i_tp1_statuses_count',
'i_tp1_favourites_count',
'i_tp1_comments_count',
'i_tp1_response',
'tweet_t_favorite_count',
'tweet_t_retweet_count',
'tweet_t_max_reply_count',
'tweet_t_max_quality',
'tweet_t_source',
'tweet_t_xday',
'tweet_t_yday',
'tweet_t_xhour',
'tweet_t_yhour',
'tweet_t_chars',
'tweet_t_url',
'tweet_t_mentions',
'tweet_t_hashtags',
'tweet_t_sentiment',
'tweet_t_vader_neg',
'tweet_t_vader_neu',
'tweet_t_vader_pos',
'tweet_t_vader_compound',
'tweet_t_valence',
'tweet_t_arousal',
'tweet_t_dominance',
'tweet_t_topic0',
'tweet_t_topic1',
'tweet_t_topic2',
'tweet_t_topic3',
'tweet_t_topic4',
'tweet_t_topic5',
'tweet_t_topic6',
'tweet_t_topic7',
'tweet_t_topic8',
'tweet_t_topic9',
'tweet_t_reply_count',
'tweet_t_quality',
'tweet_tp1_favorite_count',
'tweet_tp1_retweet_count',
'tweet_tp1_reply_count',
'tweet_tp1_quality',
'tweet_tp1_source',
'tweet_tp1_xday',
'tweet_tp1_yday',
'tweet_tp1_xhour',
'tweet_tp1_yhour',
'tweet_tp1_chars',
'tweet_tp1_url',
'tweet_tp1_mentions',
'tweet_tp1_hashtags',
'tweet_tp1_sentiment',
'tweet_tp1_vader_neg',
'tweet_tp1_vader_neu',
'tweet_tp1_vader_pos',
'tweet_tp1_vader_compound',
'tweet_tp1_valence',
'tweet_tp1_arousal',
'tweet_tp1_dominance',
'tweet_tp1_topic0',
'tweet_tp1_topic1',
'tweet_tp1_topic2',
'tweet_tp1_topic3',
'tweet_tp1_topic4',
'tweet_tp1_topic5',
'tweet_tp1_topic6',
'tweet_tp1_topic7',
'tweet_tp1_topic8',
'tweet_tp1_topic9',
'tweet_tm1_favorite_count',
'tweet_tm1_retweet_count',
'tweet_tm1_reply_count',
'tweet_tm1_quality',
'tweet_tm1_source',
'tweet_tm1_xday',
'tweet_tm1_yday',
'tweet_tm1_xhour',
'tweet_tm1_yhour',
'tweet_tm1_chars',
'tweet_tm1_url',
'tweet_tm1_mentions',
'tweet_tm1_hashtags',
'tweet_tm1_sentiment',
'tweet_tm1_vader_neg',
'tweet_tm1_vader_neu',
'tweet_tm1_vader_pos',
'tweet_tm1_vader_compound',
'tweet_tm1_valence',
'tweet_tm1_arousal',
'tweet_tm1_dominance',
'tweet_tm1_topic0',
'tweet_tm1_topic1',
'tweet_tm1_topic2',
'tweet_tm1_topic3',
'tweet_tm1_topic4',
'tweet_tm1_topic5',
'tweet_tm1_topic6',
'tweet_tm1_topic7',
'tweet_tm1_topic8',
'tweet_tm1_topic9',
'tweet_tm1_t',
'tweet_t_tm1_cos',
'tweet_t_tm1_euc',
'j_t_thread_length',
'j_t_participants'
)
head(targ)
dim(targ)
#scale everything but y values
#add quadratic features for euclean distance and thread length
# ** NB: probably shouldn't scale the topics, and remember to leave one out for the regression**
trimmed <- targ[,c(-(1:8),-(24:25),-(43:52),-(55:85),-(107:116))] # cut out y, indicators, endog vars, t+1 vars, topics
trsc <- scale(trimmed)
targ3 <- cbind('y' = targ$y,
trsc, # scaled vars
targ[,c(44:52,108:116)], # topics
'tweet_t_tm1_euc_quad' = trsc[,58]**2, # quadratic terms
'j_t_length_quad' = trsc[,59]**2)
targ4c <- targ3[,c(1,8:14,36:50,52,54:57,71:79,61,60,81,2:7,15:16,34:35,17:27,29,31:33,62:70,59,80)]
# OLS baseline
lmout <- lm(y~.,data=targ4c)
summary(lmout) # r2 of about 0.26
# OLS out-of-sample - 91.1%
set.seed(1)
train <- sample(1:nrow(targ4c),floor(8*nrow(targ4c)/10),replace=F)
dat_tr <- targ4c[train,]
dat_tst <- targ4c[-train,]
1- mean(dat_tst$y) # baseline for this sample: 90.5%
lmoos <- lm(y~.,data=dat_tr)
lyhat <- predict(lmoos, newdata = dat_tst)
lyhat2 <- ifelse(lyhat > 0.5,1,0)
sum(dat_tst$y == lyhat2) / (sum(dat_tst$y == lyhat2) + sum(dat_tst$y != lyhat2))
# Logit full-sample - something wrong, weirdly unstable if remove tweet_t_quality, though OOS works ok without it
logitfullout <- glm(y~.,data=targ4c,family="binomial") # no error clustering
logitfullout_cl <- miceadds::glm.cluster(y~.,data=targ4c,family="binomial",cluster=targ3$tweet_t) # error clustering on prev tweet
summary(logitfullout)
# Logit out-of-sample - 94%
logitoos <- glm(y~.,data=dat_tr,family="binomial")
lyhat <- predict(logitoos, newdata = dat_tst, type="response")
lyhat2 <- ifelse(lyhat > 0.5,1,0)
sum(dat_tst$y == lyhat2) / (sum(dat_tst$y == lyhat2) + sum(dat_tst$y != lyhat2))
# SVM tuned out-of-sample - 97% at 1% in-sample; 98.0% on 10%
library(e1071)
costvalues <- 10^seq(-3,2,1)
svdat <- data.frame(x=as.matrix(targ4c[,-1]),y=as.factor(targ4c$y))
set.seed(1)
train <- sample(1:nrow(svdat),floor(nrow(svdat)/100),replace=F) # NB: should be 80%, but can only manage 10%
svdat_tr <- svdat[train,]
svdat_tst <- svdat[-train,]
tuned.svm <- tune(svm,y~., data=svdat_tr, ranges=list(cost=costvalues), kernel="radial") # takes > 24hrs for 10%
save(tuned.svm,file="svmout.RData")
yhat <- predict(tuned.svm$best.model,newdata=svdat_tst)
save(yhat,file="svmout_oss_preds.RData")
sum(yhat==svdat_tst$y)/length(svdat_tst$y)
table(predicted=yhat,truth=svdat_tst$y)/length(svdat_tst$y)
# for 10%:
# truth
# predicted 0 1
# 0 0.898 0.012
# 1 0.008 0.082
# Latex output from logit
library(stargazer)
vlist <- c("verified", "followers count", "following count", "statuses count", "favourites count", "comments count", "prev response", "favorite count", "retweet count", "reply count", "quality", "source", "xday", "yday", "xhour", "yhour", "chars", "has url", "mentions", "hashtags", "sentiment", "vader neg", "vader pos", "valence", "arousal", "dominance", "time since prev", "topic 2", "topic 3", "topic 4", "topic 5", "topic 6", "topic 7", "topic 8", "topic 9", "topic 10", "participants", "thread length", "thread length^2", "verified", "followers count", "following count", "statuses count", "favourites count", "comments count", "favorite count", "retweet count", "reply count", "quality", "source", "xday", "yday", "xhour", "yhour", "chars", "has url", "mentions", "hashtags", "sentiment", "vader neg", "vader pos", "valence", "arousal", "dominance", "topic 2", "topic 3", "topic 4", "topic 5", "topic 6", "topic 7", "topic 8", "topic 9", "topic 10", "difference", "difference^2")
# multiple testing correction fuction -- BH (FDR) function
bhp <- function(x){
return(p.adjust(x,"BH"))
}
# Plain version
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vcp*")
# Verssion with multiple-testing-corrected pvalues
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vcp*",apply.p=bhp)
# Version with cluster-corrected pvalues
pvcl <- summary(logitfullout_cl)[,4]
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vc*",p=list(pvcl))
# Version with cluster-corrected pvalues plus multiple-testing corrected pvalues
pvcl <- summary(logitfullout_cl)[,4]
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vcp*",p=list(bhp(pvcl)))
# Version with all together, though requires some hand-editing afterwards
rawp <- summary(logitfullout)$coefficients[,4]
pvcl <- summary(logitfullout_cl)[,4]
stargazer(logitfullout,logitfullout,logitfullout,logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="c*",p=list(1,rawp,pvcl,bhp(pvcl)),single.row=TRUE)
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vc")
|
/model/conversation_dynamics_model.R
|
no_license
|
sshugars/conversations
|
R
| false | false | 10,725 |
r
|
setwd("/Users/Shugars/Dropbox/research projects/conversation dynamics/replication materials")
# read in data
targ <- read.table("data/conv_features_final.txt.gzip",header=F,sep=",",stringsAsFactors=F)
# varnames
colnames(targ) <- c('y',
'j',
'threadID',
'tweet_t',
'tweet_tp1',
'i_t',
'i_tp1',
't',
'i_t_verified',
'i_t_followers_count',
'i_t_following_count',
'i_t_statuses_count',
'i_t_favourites_count',
'i_t_comments_count',
'i_tp1_verified',
'i_tp1_followers_count',
'i_tp1_following_count',
'i_tp1_statuses_count',
'i_tp1_favourites_count',
'i_tp1_comments_count',
'i_tp1_response',
'tweet_t_favorite_count',
'tweet_t_retweet_count',
'tweet_t_max_reply_count',
'tweet_t_max_quality',
'tweet_t_source',
'tweet_t_xday',
'tweet_t_yday',
'tweet_t_xhour',
'tweet_t_yhour',
'tweet_t_chars',
'tweet_t_url',
'tweet_t_mentions',
'tweet_t_hashtags',
'tweet_t_sentiment',
'tweet_t_vader_neg',
'tweet_t_vader_neu',
'tweet_t_vader_pos',
'tweet_t_vader_compound',
'tweet_t_valence',
'tweet_t_arousal',
'tweet_t_dominance',
'tweet_t_topic0',
'tweet_t_topic1',
'tweet_t_topic2',
'tweet_t_topic3',
'tweet_t_topic4',
'tweet_t_topic5',
'tweet_t_topic6',
'tweet_t_topic7',
'tweet_t_topic8',
'tweet_t_topic9',
'tweet_t_reply_count',
'tweet_t_quality',
'tweet_tp1_favorite_count',
'tweet_tp1_retweet_count',
'tweet_tp1_reply_count',
'tweet_tp1_quality',
'tweet_tp1_source',
'tweet_tp1_xday',
'tweet_tp1_yday',
'tweet_tp1_xhour',
'tweet_tp1_yhour',
'tweet_tp1_chars',
'tweet_tp1_url',
'tweet_tp1_mentions',
'tweet_tp1_hashtags',
'tweet_tp1_sentiment',
'tweet_tp1_vader_neg',
'tweet_tp1_vader_neu',
'tweet_tp1_vader_pos',
'tweet_tp1_vader_compound',
'tweet_tp1_valence',
'tweet_tp1_arousal',
'tweet_tp1_dominance',
'tweet_tp1_topic0',
'tweet_tp1_topic1',
'tweet_tp1_topic2',
'tweet_tp1_topic3',
'tweet_tp1_topic4',
'tweet_tp1_topic5',
'tweet_tp1_topic6',
'tweet_tp1_topic7',
'tweet_tp1_topic8',
'tweet_tp1_topic9',
'tweet_tm1_favorite_count',
'tweet_tm1_retweet_count',
'tweet_tm1_reply_count',
'tweet_tm1_quality',
'tweet_tm1_source',
'tweet_tm1_xday',
'tweet_tm1_yday',
'tweet_tm1_xhour',
'tweet_tm1_yhour',
'tweet_tm1_chars',
'tweet_tm1_url',
'tweet_tm1_mentions',
'tweet_tm1_hashtags',
'tweet_tm1_sentiment',
'tweet_tm1_vader_neg',
'tweet_tm1_vader_neu',
'tweet_tm1_vader_pos',
'tweet_tm1_vader_compound',
'tweet_tm1_valence',
'tweet_tm1_arousal',
'tweet_tm1_dominance',
'tweet_tm1_topic0',
'tweet_tm1_topic1',
'tweet_tm1_topic2',
'tweet_tm1_topic3',
'tweet_tm1_topic4',
'tweet_tm1_topic5',
'tweet_tm1_topic6',
'tweet_tm1_topic7',
'tweet_tm1_topic8',
'tweet_tm1_topic9',
'tweet_tm1_t',
'tweet_t_tm1_cos',
'tweet_t_tm1_euc',
'j_t_thread_length',
'j_t_participants'
)
head(targ)
dim(targ)
#scale everything but y values
#add quadratic features for euclean distance and thread length
# ** NB: probably shouldn't scale the topics, and remember to leave one out for the regression**
trimmed <- targ[,c(-(1:8),-(24:25),-(43:52),-(55:85),-(107:116))] # cut out y, indicators, endog vars, t+1 vars, topics
trsc <- scale(trimmed)
targ3 <- cbind('y' = targ$y,
trsc, # scaled vars
targ[,c(44:52,108:116)], # topics
'tweet_t_tm1_euc_quad' = trsc[,58]**2, # quadratic terms
'j_t_length_quad' = trsc[,59]**2)
targ4c <- targ3[,c(1,8:14,36:50,52,54:57,71:79,61,60,81,2:7,15:16,34:35,17:27,29,31:33,62:70,59,80)]
# OLS baseline
lmout <- lm(y~.,data=targ4c)
summary(lmout) # r2 of about 0.26
# OLS out-of-sample - 91.1%
set.seed(1)
train <- sample(1:nrow(targ4c),floor(8*nrow(targ4c)/10),replace=F)
dat_tr <- targ4c[train,]
dat_tst <- targ4c[-train,]
1- mean(dat_tst$y) # baseline for this sample: 90.5%
lmoos <- lm(y~.,data=dat_tr)
lyhat <- predict(lmoos, newdata = dat_tst)
lyhat2 <- ifelse(lyhat > 0.5,1,0)
sum(dat_tst$y == lyhat2) / (sum(dat_tst$y == lyhat2) + sum(dat_tst$y != lyhat2))
# Logit full-sample - something wrong, weirdly unstable if remove tweet_t_quality, though OOS works ok without it
logitfullout <- glm(y~.,data=targ4c,family="binomial") # no error clustering
logitfullout_cl <- miceadds::glm.cluster(y~.,data=targ4c,family="binomial",cluster=targ3$tweet_t) # error clustering on prev tweet
summary(logitfullout)
# Logit out-of-sample - 94%
logitoos <- glm(y~.,data=dat_tr,family="binomial")
lyhat <- predict(logitoos, newdata = dat_tst, type="response")
lyhat2 <- ifelse(lyhat > 0.5,1,0)
sum(dat_tst$y == lyhat2) / (sum(dat_tst$y == lyhat2) + sum(dat_tst$y != lyhat2))
# SVM tuned out-of-sample - 97% at 1% in-sample; 98.0% on 10%
library(e1071)
costvalues <- 10^seq(-3,2,1)
svdat <- data.frame(x=as.matrix(targ4c[,-1]),y=as.factor(targ4c$y))
set.seed(1)
train <- sample(1:nrow(svdat),floor(nrow(svdat)/100),replace=F) # NB: should be 80%, but can only manage 10%
svdat_tr <- svdat[train,]
svdat_tst <- svdat[-train,]
tuned.svm <- tune(svm,y~., data=svdat_tr, ranges=list(cost=costvalues), kernel="radial") # takes > 24hrs for 10%
save(tuned.svm,file="svmout.RData")
yhat <- predict(tuned.svm$best.model,newdata=svdat_tst)
save(yhat,file="svmout_oss_preds.RData")
sum(yhat==svdat_tst$y)/length(svdat_tst$y)
table(predicted=yhat,truth=svdat_tst$y)/length(svdat_tst$y)
# for 10%:
# truth
# predicted 0 1
# 0 0.898 0.012
# 1 0.008 0.082
# Latex output from logit
library(stargazer)
vlist <- c("verified", "followers count", "following count", "statuses count", "favourites count", "comments count", "prev response", "favorite count", "retweet count", "reply count", "quality", "source", "xday", "yday", "xhour", "yhour", "chars", "has url", "mentions", "hashtags", "sentiment", "vader neg", "vader pos", "valence", "arousal", "dominance", "time since prev", "topic 2", "topic 3", "topic 4", "topic 5", "topic 6", "topic 7", "topic 8", "topic 9", "topic 10", "participants", "thread length", "thread length^2", "verified", "followers count", "following count", "statuses count", "favourites count", "comments count", "favorite count", "retweet count", "reply count", "quality", "source", "xday", "yday", "xhour", "yhour", "chars", "has url", "mentions", "hashtags", "sentiment", "vader neg", "vader pos", "valence", "arousal", "dominance", "topic 2", "topic 3", "topic 4", "topic 5", "topic 6", "topic 7", "topic 8", "topic 9", "topic 10", "difference", "difference^2")
# multiple testing correction fuction -- BH (FDR) function
bhp <- function(x){
return(p.adjust(x,"BH"))
}
# Plain version
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vcp*")
# Verssion with multiple-testing-corrected pvalues
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vcp*",apply.p=bhp)
# Version with cluster-corrected pvalues
pvcl <- summary(logitfullout_cl)[,4]
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vc*",p=list(pvcl))
# Version with cluster-corrected pvalues plus multiple-testing corrected pvalues
pvcl <- summary(logitfullout_cl)[,4]
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vcp*",p=list(bhp(pvcl)))
# Version with all together, though requires some hand-editing afterwards
rawp <- summary(logitfullout)$coefficients[,4]
pvcl <- summary(logitfullout_cl)[,4]
stargazer(logitfullout,logitfullout,logitfullout,logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="c*",p=list(1,rawp,pvcl,bhp(pvcl)),single.row=TRUE)
stargazer(logitfullout, title="title", covariate.labels=vlist,
no.space=TRUE, align=TRUE, dep.var.labels.include=F,dep.var.caption = "",
omit.stat=c("all"),header=FALSE,digits=3,report="vc")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/friendships.followers.R
\name{friendships.followers}
\alias{friendships.followers}
\title{Return the followers list of a user.}
\usage{
friendships.followers(roauth, uid, screen_name, count = 20, cursor = 0, ...)
}
\arguments{
\item{roauth}{A OAuth object created by \code{\link{createOAuth}}.}
\item{uid}{User ID to be queried.}
\item{screen_name}{User nickname to be queried.}
\item{count}{The returned count for every page.The default value is 20. The max value is 200.}
\item{cursor}{The cursor of the returned data.The returned next_cursor is for the next page.The returned previous_cursor is for the previous page. Default is 0.}
\item{...}{Other request parameters for this API.}
}
\value{
A list of user, which contains:
\item{id}{User ID}
\item{screen_name}{User nickname}
\item{name}{Friendly displayed name}
\item{province}{Province ID user located}
\item{city}{City ID user located}
\item{location}{Address user located}
\item{description}{User description}
\item{url}{Url of the user's blog}
\item{profile_image_url}{Profile image}
\item{domain}{The user's personalized weibo url}
\item{gender}{Gender, m : male; f : female; n : unknown}
\item{followers_count}{Followers count}
\item{friends_count}{Friends count}
\item{statuses_count}{Weibo count}
\item{favourites_count}{Favorites count}
\item{created_at}{Created time}
\item{following}{Whether the current user is following the user}
\item{allow_all_act_msg}{Whether all the people can send direct message to the user}
\item{geo_enabled}{Whether can have the geography information}
\item{verified}{Whether the user is verified by his real identity, marked with "V"}
\item{allow_all_comment}{Whether all the people can comment the user's weibo}
\item{avatar_large}{Profile large image}
\item{verified_reason}{verification reason}
\item{follow_me}{Whether the user is following the current user}
\item{online_status}{Whether the user is online, 0 : offline, 1 : online}
\item{bi_followers_count}{The count of the users that are following the user and are being followed by the user}
\item{status}{The latest weibo of the user}
}
\description{
Return the followers list of a user.
}
\note{
You must provide a parameter either uid or screen_name. You can get 5000 followers at the most.
}
\examples{
\dontrun{
friendships.followers(roauth, uid = "1318558807")
}
}
\author{
Jian Li <\email{rweibo@sina.com}>
}
\references{
\url{http://open.weibo.com/wiki/2/friendships/followers}
}
\keyword{Friendships}
|
/man/friendships.followers.Rd
|
no_license
|
sjhfx/Rweibo
|
R
| false | true | 2,578 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/friendships.followers.R
\name{friendships.followers}
\alias{friendships.followers}
\title{Return the followers list of a user.}
\usage{
friendships.followers(roauth, uid, screen_name, count = 20, cursor = 0, ...)
}
\arguments{
\item{roauth}{A OAuth object created by \code{\link{createOAuth}}.}
\item{uid}{User ID to be queried.}
\item{screen_name}{User nickname to be queried.}
\item{count}{The returned count for every page.The default value is 20. The max value is 200.}
\item{cursor}{The cursor of the returned data.The returned next_cursor is for the next page.The returned previous_cursor is for the previous page. Default is 0.}
\item{...}{Other request parameters for this API.}
}
\value{
A list of user, which contains:
\item{id}{User ID}
\item{screen_name}{User nickname}
\item{name}{Friendly displayed name}
\item{province}{Province ID user located}
\item{city}{City ID user located}
\item{location}{Address user located}
\item{description}{User description}
\item{url}{Url of the user's blog}
\item{profile_image_url}{Profile image}
\item{domain}{The user's personalized weibo url}
\item{gender}{Gender, m : male; f : female; n : unknown}
\item{followers_count}{Followers count}
\item{friends_count}{Friends count}
\item{statuses_count}{Weibo count}
\item{favourites_count}{Favorites count}
\item{created_at}{Created time}
\item{following}{Whether the current user is following the user}
\item{allow_all_act_msg}{Whether all the people can send direct message to the user}
\item{geo_enabled}{Whether can have the geography information}
\item{verified}{Whether the user is verified by his real identity, marked with "V"}
\item{allow_all_comment}{Whether all the people can comment the user's weibo}
\item{avatar_large}{Profile large image}
\item{verified_reason}{verification reason}
\item{follow_me}{Whether the user is following the current user}
\item{online_status}{Whether the user is online, 0 : offline, 1 : online}
\item{bi_followers_count}{The count of the users that are following the user and are being followed by the user}
\item{status}{The latest weibo of the user}
}
\description{
Return the followers list of a user.
}
\note{
You must provide a parameter either uid or screen_name. You can get 5000 followers at the most.
}
\examples{
\dontrun{
friendships.followers(roauth, uid = "1318558807")
}
}
\author{
Jian Li <\email{rweibo@sina.com}>
}
\references{
\url{http://open.weibo.com/wiki/2/friendships/followers}
}
\keyword{Friendships}
|
#### Reset a model object ####
#' @include model.CAB.R
NULL
#' Do this later
#'
#' Do this later
#'#'
#' @rdname set_param
#'
#' @exportMethod set_param
setGeneric( "set_param", function( model, organism_params ) standardGeneric( "set_param" ) )
setMethod( "set_param", signature( model = "CAB.model", organism_params = "list" ),
function( model, organism_params ){
reset_model_helper( model, organism_params )
}
)
reset_model_helper = function( model, organism_params ){
list2env( x = organism_params, envir = model@organism )
eval( model@derived_params, model@organism )
}
|
/R/reset_model.R
|
no_license
|
Don-Li/CAB
|
R
| false | false | 604 |
r
|
#### Reset a model object ####
#' @include model.CAB.R
NULL
#' Do this later
#'
#' Do this later
#'#'
#' @rdname set_param
#'
#' @exportMethod set_param
setGeneric( "set_param", function( model, organism_params ) standardGeneric( "set_param" ) )
setMethod( "set_param", signature( model = "CAB.model", organism_params = "list" ),
function( model, organism_params ){
reset_model_helper( model, organism_params )
}
)
reset_model_helper = function( model, organism_params ){
list2env( x = organism_params, envir = model@organism )
eval( model@derived_params, model@organism )
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc.R
\name{makeDocs}
\alias{makeDocs}
\title{Make documentation web site.}
\usage{
makeDocs(doc.dir)
}
\arguments{
\item{doc.dir}{containing example subdirectories.}
}
\value{
nothing.
}
\description{
Make documentation web site.
}
\author{
Toby Dylan Hocking
}
|
/man/makeDocs.Rd
|
no_license
|
viswaraavi/animint
|
R
| false | true | 342 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc.R
\name{makeDocs}
\alias{makeDocs}
\title{Make documentation web site.}
\usage{
makeDocs(doc.dir)
}
\arguments{
\item{doc.dir}{containing example subdirectories.}
}
\value{
nothing.
}
\description{
Make documentation web site.
}
\author{
Toby Dylan Hocking
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/http.r
\name{ec2HTTP}
\alias{ec2HTTP}
\title{EC2 API Requests}
\usage{
ec2HTTP(query = list(), dryrun, region = Sys.getenv("AWS_DEFAULT_REGION",
"us-east-1"), key = Sys.getenv("AWS_ACCESS_KEY_ID"),
secret = Sys.getenv("AWS_SECRET_ACCESS_KEY"), version = "2015-10-01", ...)
}
\arguments{
\item{query}{A named list of query string parameters.}
\item{dryrun}{An optional logical specifying whether to execute a consequence-free \dQuote{dry run} of the request.}
\item{region}{A character string containing the AWS region. If missing, defaults to \dQuote{us-east-1}.}
\item{key}{A character string containing an AWS Access Key ID. If missing, defaults to value stored in environment variable \dQuote{AWS_ACCESS_KEY_ID}.}
\item{secret}{A character string containing an AWS Secret Access Key. If missing, defaults to value stored in environment variable \dQuote{AWS_SECRET_ACCESS_KEY}.}
\item{version}{A character string specifying an API version. Default is \dQuote{2015-10-01}.}
\item{...}{Additional arguments passed to \code{\link[httr]{GET}}.}
}
\value{
A list
}
\description{
Execute an EC2 API Request
}
|
/man/ec2HTTP.Rd
|
no_license
|
Sandy4321/aws.ec2
|
R
| false | true | 1,195 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/http.r
\name{ec2HTTP}
\alias{ec2HTTP}
\title{EC2 API Requests}
\usage{
ec2HTTP(query = list(), dryrun, region = Sys.getenv("AWS_DEFAULT_REGION",
"us-east-1"), key = Sys.getenv("AWS_ACCESS_KEY_ID"),
secret = Sys.getenv("AWS_SECRET_ACCESS_KEY"), version = "2015-10-01", ...)
}
\arguments{
\item{query}{A named list of query string parameters.}
\item{dryrun}{An optional logical specifying whether to execute a consequence-free \dQuote{dry run} of the request.}
\item{region}{A character string containing the AWS region. If missing, defaults to \dQuote{us-east-1}.}
\item{key}{A character string containing an AWS Access Key ID. If missing, defaults to value stored in environment variable \dQuote{AWS_ACCESS_KEY_ID}.}
\item{secret}{A character string containing an AWS Secret Access Key. If missing, defaults to value stored in environment variable \dQuote{AWS_SECRET_ACCESS_KEY}.}
\item{version}{A character string specifying an API version. Default is \dQuote{2015-10-01}.}
\item{...}{Additional arguments passed to \code{\link[httr]{GET}}.}
}
\value{
A list
}
\description{
Execute an EC2 API Request
}
|
library(RColorBrewer)
library(SNPRelate)
library(gdsfmt)
library(scales)
library(ggplot2)
library(hierfstat)
setwd("~/Documents/Ch2-NAm-RAD/analysis")
### TEST FOR MISSING DATA ###
# convert SNPs to binary format (gdsfmt or Genomic Data Structure data files) - accelerates computation
snpgdsVCF2GDS(vcf.fn="/Users/nataliehofmeister/Documents/Ch2-NAm-RAD/analysis/EUSTallSNPr8HWE.recode.vcf", out.fn="eustrad.hwe.gds",method = c("biallelic.only"),compress.annotation="ZIP.max", snpfirstdim=FALSE, verbose=TRUE)
snpgdsSummary("eustrad.hwe.gds")
genofile <- snpgdsOpen("eustrad.hwe.gds")
# 14134 SNPs
snpgdsVCF2GDS(vcf.fn="/Users/nataliehofmeister/Documents/starlingRAD/analysis/EUSTallSNPr8.nomiss.vcf.recode.vcf", out.fn="eustrad.nomiss.gds",method = c("biallelic.only"),compress.annotation="ZIP.max", snpfirstdim=FALSE, verbose=TRUE)
snpgdsSummary("eustrad.nomiss.gds")
genofile <- snpgdsOpen("eustrad.nomiss.gds")
# 6287 SNPs
# get number of loci for one SNP per locus
snpgdsVCF2GDS(vcf.fn="/Users/nataliehofmeister/Documents/starlingRAD/analysis/EUSToneSNPr8maf01p.vcf", out.fn="eust.gds.one",method = c("biallelic.only"),compress.annotation="ZIP.max", snpfirstdim=FALSE, verbose=TRUE)
snpgdsSummary("eust.gds.one")
# 3568 SNPs
snpgdsVCF2GDS(vcf.fn="/Users/nataliehofmeister/Documents/starlingRAD/analysis/EUSTallSNPr8_rarer.vcf", out.fn="eustrad.rarer.gds",method = c("biallelic.only"),compress.annotation="ZIP.max", snpfirstdim=FALSE, verbose=TRUE)
snpgdsSummary("eustrad.rarer.gds")
genofile.rare <- snpgdsOpen("eustrad.rarer.gds")
# how much data is missing? (fraction for each sample) - median 32%, range 18-56%
miss <- snpgdsSampMissRate(genofile, sample.id=NULL, snp.id=NULL, with.id=TRUE)
miss
summary(miss)
#remove EUST_TX0202 since >50%
###################################### PCAs ####################################
# PCA with SNPRelate
pca <- snpgdsPCA(gdsobj = genofile,autosome.only=FALSE)
pc.percent <- pca$varprop*100
head(round(pc.percent, 2))
pcatab <- data.frame(sample.id = pca$sample.id,
EV1 = pca$eigenvect[,1], # the first eigenvector
EV2 = pca$eigenvect[,2], # the second eigenvector
stringsAsFactors = FALSE)
head(pcatab)
pcatab
plot(pcatab$EV2, pcatab$EV1, xlab="eigenvector 2", ylab="eigenvector 1")
# add labels by population
# order in vcf (check cohort file)
# label particular populations
samplingsite <- c("NM","NM","NM","NM","NM","NM","NM","KS","KS","KS","KS","KS","KS","KS","KS","KS","MO","MO","MO","MO","MO","MO","MO","MO","MO","IL","IL","IL","IL","IL","IL","IL","NC","NC","NC","NC","NC","NC","NC","NC","NC","NC","NC","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","IA","IA","IA","IA","IA","IA","IA","IA","IA","ID","ID","ID","ID","ID","ID","ID","ID","NV","NV","NV","NV","NV","NV","NV","NV","NV","NV","WA","WA","WA","WA","WA","WA","WA","WA","WA","WA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CO","CO","CO","CO","CO","CO","CO","CO","TX","TX","TX","TX","TX","TX","TX","TX","TX","NH","NH","NH","NH","NH","NH","NH","NH","NH","NH","NH","NY","NY","NY","NY","NY","NY","NY","NY","NY","NY","NE","NE","NE","NE","NE","NE","NE","NE","NE","NE","NE","WI","WI","WI","WI","WI","WI","WI","WI")
# alphabetical # population <- c("AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","CA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CO","CO","CO","CO","CO","CO","CO","CO","IA","IA","IA","IA","IA","IA","IA","IA","IA","ID","ID","ID","ID","ID","ID","ID","ID","IL","IL","IL","IL","IL","IL","IL","KS","KS","KS","KS","KS","KS","KS","KS","KS","MO","MO","MO","MO","MO","MO","MO","MO","MO","NC","NC","NC","NC","NC","NC","NC","NC","NC","NC","NC","NE","NE","NE","NE","NE","NE","NE","NE","NE","NE","NE","NH","NH","NH","NH","NH","NH","NH","NH","NH","NH","NH","NM","NM","NM","NM","NM","NM","NM","NV","NV","NV","NV","NV","NV","NV","NV","NV","NV","NY","NY","NY","NY","NY","NY","NY","NY","NY","NY","TX","TX","TX","TX","TX","TX","TX","TX","TX","WA","WA","WA","WA","WA","WA","WA","WA","WA","WA","WI","WI","WI","WI","WI","WI","WI","WI")
pcatab2 <- cbind(pcatab,samplingsite)
pcatab2
write.csv(pcatab2)
# then copy .csv to analysis folder
### PCA plot with all populations
colorlist<-colorRampPalette(colors)
colorlist<-viridis(17)
quartz()
pdf("PCAallpop.pdf")
plot(pcatab2$EV1, pcatab2$EV2, xlab="PC1 1.07%", ylab="PC2 1.03%",col="black",bg=colorlist,pch=21,cex=0.8)
legend("topright",legend=levels(pcatab2$samplingsite),pch=21,col=c("black"),pt.bg=colorlist,cex=0.8)
dev.off()
### PCA plot with regions
population<-c("west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","west","west","west","west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east")
region <- c("Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","West","West","West","West","West","West","West","West","West","West","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","West","West","West","West","West","West","West","West","West","West","West","Mountain","Mountain","Mountain","Mountain","Mountain","Mountain","Mountain","Mountain","South","South","South","South","South","South","South","South","South","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest")
pcatab3 <- cbind(pcatab,region)
pcatab3
# just east and west divisions
colors<-c("#4393c3","#f7f7f7")
quartz()
pdf("PCArealpop.pdf")
plot(pcatab3$EV1, pcatab3$EV2, xlab="PC1 1.07%", ylab="PC2 1.03%",col="black",bg=colors,pch=21,cex=2)
legend("topright",legend=levels(pcatab3$realpop),pch=21,col=c("black"),pt.bg=colors,cex=1)
dev.off()
###################################### ADEGENET ####################################
### includes dudi.pca, AMOVA, Mantel
library(adegenet)
library(ade4)
library(poppr)
# make genind object
genind<-read.structure("EUSToneSNPr8maf01p.str",n.ind=158,n.loc=3570,onerowperind=FALSE,col.lab=1,col.pop=2,row.marknames=0,NA.char="-9",ask=FALSE,quiet=FALSE)
genind
# check number of loci if not reading properly
sum(!sapply(read.table("EUSToneSNPr8maf01p.str", sep = "\t"), is.logical))
# 3570
genind<-read.structure("EUSTallSNPr8.stru",n.ind=158,n.loc=15040,onerowperind=FALSE,col.lab=1,col.pop=2,row.marknames=0,NA.char="-9",ask=FALSE,quiet=FALSE)
genind
sum(!sapply(read.table("EUSTallSNPr8.stru", sep = "\t"), is.logical))
# 15040
# modify genind object to update strata for AMOVA
pop(genind) # population info stored in this accessor
# create df to bind to genind with hierarchical levels needed
# vectors already created for PCAs above
individual<-c(as.factor(seq(1,158,1)))
individual
population<-c("west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","west","west","west","west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east")
strata_df<-as.data.frame(cbind(individual,samplingsite,population,region))
strata_df
head(strata_df)
strata(genind)<-strata_df
genind
#genclone<-as.genclone(genind)
#genclone
# add lat-long coordinates
coords <- read.csv("latlongxy.csv")
head(coords)
genind@other$xy <- coords
genind@other$xy
# check strata
genind
table(strata(genind,~population))
poppr(genind)
### Ready for analysis!
### plotting missingness
# out of 42791 alleles, how many are missing (-9 in .stru format)?
genind.df<-read.csv("EUSTallSNPr8.stru", sep="\t")
head(genind.df)
colSums(genind.df = -9)
################################## fst by pop #############################
library(stargazer)
hf.input <- genind2hierfstat(genind)
# table of fst by population
fst.table <- genet.dist(hf.input, method = "WC84")
stargazer(fst.table[1:16,2:17], summary=FALSE, rownames=FALSE)
write.csv(fst.table,"FST_EUSTallSNPr8.csv")
loci <- hf.input[, -1] # Remove the population column
varcomp.glob(levels = data.frame(samplingsite), loci, diploid = TRUE)
test.g(loci, level = population)
test.between(loci, test.lev = population, rand.unit = county, nperm = 100)
################################## more pop gen #############################
### manhattan plot
library(qqman)
manhattan <- read.table("manhattan.csv", header=TRUE, sep=",")
manhattansubset <- manhattan[complete.cases(manhattan),]
manhattan <- data.frame(manhattansubset)
head(manhattan)
snpsOfInterest <- c(1895,7208,10808,11103)
#for LFMM
snpsOfInterest <- c(1247,1246,14704,12657,10040,5382,10039,3104,5385,3105,4566,4567,4564,11768,4563,11771,11770,3603,14573,14574,3834,4565,3601,3835,7841,14575,1362,11769,10179,1361,3955,7211,11802,11798,11804,3953)
# Bayescan
manhattan(manhattan,chr="CHROM", bp="POS", snp="SNP", p="log10.PO.", logp=TRUE, ylim=c(0,2),ylab="log10p",xlab="Scaffold")
pdf("Manhattan_FST.pdf",height=3)
manhattan(manhattan,chr="CHROM", bp="POS", snp="SNP", p="fst", logp=FALSE, ylab="Weir and Cockerham Fst",xlab="Scaffold",ylim=c(0,0.04),cex=0.5,genomewideline=0.0079,suggestiveline=0.0126,highlight=snpsOfInterest)
dev.off()
#LFMM
library(scales)
quartz()
pdf("LFMM_LocalAdaptation_withlegend.pdf",width=12,height=3)
par(mar = c(5,5,2,5))
with(manhattan, plot(manhattan$SNP, manhattan$LFMM.K3.PC2, pch=16, axes=F, col=alpha("#4393c3", 0.8), cex=0.5,
ylab="log10p",xlab="SNP", ylim=c(0,50)),xlim=c(0,16000))
axis(side=1,at=c(0,2000,4000,6000,8000,10000,12000,14000,16000))
axis(side=2)
par(new = T)
with(manhattan, plot(manhattan$SNP, manhattan$LFMM.K3.PC3, pch=16, axes=F, xlab=NA, ylab=NA, col=alpha("gray70",0.8), cex=0.5, ylim=c(0,50)))
abline(h=18, col="black")
par(new = T)
with(manhattan, plot(manhattan$SNP, manhattan$LFMM.PC1.K3, pch=16, ylab=NA, xlab=NA, axes=F, cex=0.5, col="black"))
legend("top",legend=c("PC1","PC2","PC3"),pch=16,col=c("black",alpha("#4393c3",0.8),alpha("gray70",0.8)),cex=1,bty="n",horiz=TRUE)
dev.off()
# plot LFMM and allele frequency
quartz()
pdf("SelectionSignif_v_AlleleFreq.pdf",height=4)
par(mar = c(5,5,2,5))
with(manhattan, plot(manhattan$SNP, manhattan$minorAF, type="l",lwd=0.2, axes=F, xlab=NA, ylab=NA, col="#92c5de", ylim=c(0,1), xlim=c(0,16000)))
par(new=T)
with(manhattan, plot(manhattan$SNP, manhattan$LFMM.K3.PC3, pch=16, cex=0.5, col="black",
ylab="log10p",xlab="ScNP",
ylim=c(0,50), xlim=c(0,16000)))
abline(h=20, col="black")
par(new = T)
axis(side = 4)
mtext(side = 4, line = 3, 'Allele frequency')
#legend("topright",
#legend=c(expression(-log[10](italic(p))), "FST"),
#lty=c(1,0), pch=c(NA, 16), col=c("gray", "black"))
dev.off()
# plot bayescenv results
quartz()
pdf("Manhattan_Bayescenv.pdf",height=4)
par(mar = c(5,5,2,5))
with(manhattan, plot(manhattan$CHROM, manhattan$PEP_diff_g_alpha, pch=16, cex=0.5, col="black",
ylab="Difference in PEP",xlab="Scaffold",
ylim=c(0,1), xlim=c(0,400)))
abline(h=0.9, col="black")
par(new = T)
with(manhattan, plot(manhattan$CHROM, manhattan$fst_bayescenv, pch=16, axes=F, xlab=NA, ylab=NA, col="#92c5de", cex=0.5, ylim=c(0,1), xlim=c(0,400)))
par(new = T)
with(manhattan, plot(manhattan$CHROM, manhattan$fst, type="l",lwd=2, axes=F, xlab=NA, ylab=NA, col="#2166ac", ylim=c(0,1), xlim=c(0,400)))
axis(side = 4)
mtext(side = 4, line = 3, 'FST')
#legend("topright",
#legend=c(expression(-log[10](italic(p))), "FST"),
#lty=c(1,0), pch=c(NA, 16), col=c("gray", "black"))
dev.off()
# compare LFMM and Bayescenv
quartz()
par(mar = c(5,5,2,5))
with(manhattan, plot(manhattan$CHROM, manhattan$LFMM.K3, pch=16, cex=1, col="gray",
ylab=expression(-log[10](italic(p))),xlab="Scaffold",
ylim=c(0,25), xlim=c(0,400)))
par(new = T)
with(manhattan, plot(manhattan$CHROM, manhattan$PEP_diff_g_alpha, pch=16, axes=F, xlab=NA, ylab=NA, col="black", cex=0.3, ylim=c(0,1), xlim=c(0,400)))
axis(side = 4)
mtext(side = 4, line = 3, 'Difference in PEP')
######## summary stats beyond FST
library(mmod)
library(reshape2)
setPop(genind.all) <- ~population # Use ~population analyze by pop
diff_genind.all <- diff_stats(genind.all)
diff_genind.all
per.locus <- melt(diff_genind.all$per.locus, varnames = c("Locus", "Statistic"))
stats <- c("Hs", "Ht", "Gst", "Gprime_st", "D", "D")
glob <- data.frame(Statistic = stats, value = diff_genind.all$global)
head(per.locus)
ggplot(per.locus, aes(x = Statistic, y = value)) +
geom_boxplot() +
geom_point() +
geom_point(size = rel(3), color = "red", data = glob) +
theme_bw() +
ggtitle("Estimates of population differentiation")
# observed vs expected heterozygosity
genind.smry <- summary(genind)
genind.smry
pdf("Heterozygosity.pdf")
plot(genind.smry$Hexp, genind.smry$Hobs, xlab="Expected heterozygosity",ylab="Observed heterozygosity",pch=16,cex=0.5)
abline(0, 1, col = "gray")
dev.off()
t.test(genind.smry$Hexp, genind.smry$Hobs, paired = TRUE, var.equal = TRUE)
# another PCA
genind.pca1 <- dudi.pca(genind, cent = FALSE, scale = FALSE, scannf = FALSE, nf=2)
barplot(genind.pca1$eig)
genind.pca1
s.label(genind.pca1$li)
s.kde2d(genind.pca1$li, add.p = TRUE, cpoint = 0)
add.scatter.eig(genind.pca1$eig, 2, 1, 2)
# plotting for figure
library(viridis)
library(factoextra)
dudi.pca.plot <- s.class(genind.pca1$li, fac=genind@strata$population, xax = 1, yax = 2,
grid=FALSE, axesell=FALSE, col=viridis(17),clabel=1,addaxes=1)
scree.plot <- screeplot(genind.pca1,main="",npcs=25)
### Mantel test for IBD
library(vegan)
dist.geo <- dist(genind$other$xy)
dist.genet <- dist(genind)
mtest <- mantel(dist.genet, dist.geo,method="spearman")
mtest
pdf("GenetvGeoDistance.pdf",h=5,w=5)
plot(dist.geo,dist.genet,xlab="Geographic distance",ylab="Genetic distance", cex=0.8)
dev.off()
mantel.correlog <- mantel.correlog(dist(genind),D.geo=dist.geo,r.type="spearman", nperm=1000)
mantel.correlog
plot(mantel.correlog)
mantel.correlog.more <- mantel.correlog(dist(genind),D.geo=dist.geo,r.type="spearman", nperm=99999)
mantel.correlog.more
plot(mantel.correlog.more)
#### plotting distribution of data
hist(dist.geo)
##### Partial Mantel for IBE vs IBD #####
env <- read.csv("EUSTrad.env.csv")
env.df <- as.data.frame(env)
str(env.df)
head(env.df)
library(vegan)
BIO1dist<-vegdist(env$BIO1, method="euclidean")
BIO1dist
BIO12dist<-vegdist(env$BIO12, method="euclidean")
BIO12dist
BIO16dist<-vegdist(env$BIO16, method="euclidean")
BIO16dist
BIO4dist<-vegdist(env$BIO4, method="euclidean")
BIO4dist
BIO7dist<-vegdist(env$BIO7, method="euclidean")
BIO7dist
elevdist<-vegdist(env$elevation, method="euclidean")
elevdist
#Mantel test for bioclim
mantel.partial(dist.genet, BIO1dist, dist.geo, method="spearman", permutations = 999)
mantel.partial(dist.genet, BIO12dist, dist.geo, method="spearman", permutations = 999)
mantel.partial(dist.genet, BIO16dist, dist.geo, method="spearman", permutations = 999)
mantel.partial(dist.genet, BIO4dist, dist.geo, method="spearman", permutations = 999)
mantel.partial(dist.genet, BIO7dist, dist.geo, method="spearman", permutations = 999)
#Mantel test for elevation
mantel.partial(dist.genet, elevdist, dist.geo, method="spearman", permutations=999)
### spatial autocorrelation
library(geoR)
### AMOVA
# run AMOVA on genind (genclone is haploid...)
# check that "total" is 2N-1 where N is number of individuals
# real pop is just east-west based on EEMS break
amova.pop<-poppr.amova(genind,hier=~population,method="ade",nperm=1000)
amova.pop
amova.pop.pegas<-poppr.amova(genind,hier=~population,method="pegas",nperm=1000)
amova.pop.pegas
amova.samples<-poppr.amova(genind,hier=~population,within=FALSE,method="ade",nperm=1000)
amova.samples
# test for significance
set.seed(1230)
amova.pop.signif<-randtest(amova.pop,nrepet=999)
amova.pop.signif
pdf("AMOVA.pop.signif.pdf")
plot(amova.pop.signif)
dev.off()
# rand test for variance within individuals
as.randtest(sim=rnorm(1000),obs=amova.pop.signif$obs[1],alter=c("less"))
# rand test for variance among individuals within population
as.randtest(sim=rnorm(1000),obs=amova.pop.signif$obs[2],alter=c("greater"))
# rand test for variance among populations
as.randtest(sim=rnorm(1000),obs=amova.pop.signif$obs[3],alter=c("less"))
# AMOVAs without within-individual variance
set.seed(20151219)
pegas_amova <- pegas::amova(dist.genet ~ population/individual, data = strata_df, nperm = 1000)
pegas_amova
adonis_amova <- adonis(dist.genet ~ population, data = strata_df, permutations = 1000)
adonis_amova
# shuffle population assignments to check if results sensitive
genind.shuffle <- genind
set.seed(9001)
head(strata(genind)[sample(nInd(genind)),-1])
strata(genind.shuffle) <- strata(genind.shuffle)[sample(nInd(genind)), -1]
strata(genind.shuffle2) <- strata(genind.shuffle2)[sample(nInd(genind)), -1]
amova.pop.shuffle<-poppr.amova(genind.shuffle,hier=~population,method="ade")
amova.pop.shuffle
amova.pop.shuffle2<-poppr.amova(genind.shuffle2,hier=~population,method="ade")
amova.pop.shuffle2
amova.pop.shuffle3<-poppr.amova(genind.shuffle3,hier=~population,method="ade")
amova.pop.shuffle3
set.seed(1734)
amova.pop.shuffle.signif<-randtest(amova.pop.shuffle,nrepet=999)
plot(amova.pop.shuffle.signif)
amova.pop.shuffle.signif
set.seed(8932)
amova.pop.shuffle2.signif<-randtest(amova.pop.shuffle2,nrepet=999)
plot(amova.pop.shuffle2.signif)
amova.pop.shuffle.signif
set.seed(3721)
amova.pop.shuffle3.signif<-randtest(amova.pop.shuffle3,nrepet=999)
plot(amova.pop.shuffle3.signif)
amova.pop.shuffle3.signif
# arbitrary regions: Midwest, Southeast, etc.
amova.region<-poppr.amova(genind,hier=~region,method="ade")
amova.region
# with region and population
amova.all<-poppr.amova(genind,hier=~region/population,method="ade")
amova.all
# test for significance
set.seed(1328)
amova.all.signif<-randtest(amova.all,nrepet=999)
plot(amova.all.signif)
amova.all.signif
################# GPhocs
library(HDInterval)
eustrad.mcmc <- read.csv("EUSTallSNPr95.mcmc.log",sep="\t")
head(eustrad.mcmc)
hdi(eustrad.mcmc$tau_root, credMass = 0.95)
hdi(eustrad.mcmc$theta_east, credMass = 0.95)
hdi(eustrad.mcmc$theta_west, credMass = 0.95)
hdi(eustrad.mcmc$theta_root, credMass = 0.95)
hdi(eustrad.mcmc$m_west..east, credMass = 0.95)
hdi(eustrad.mcmc$m_east..west, credMass = 0.95)
############################## conStruct ##############################
library(conStruct)
data(conStruct.data)
# preparing input files
# allele frequency matrix
# no locus labels
conStruct.freqs <- structure2conStruct(infile = "EUSTallSNPr8maf01p.conStruct.str",
start.loci = 3,
onerowperind = TRUE,
missing.datum = -9,
outfile = "conStruct.freqs")
# looked at matrix to check that it worked
# geographic distance matrix
library(fields)
conStruct.coord <- read.csv("latlong.conStruct.csv",stringsAsFactors=FALSE)
conStruct.coord <- as.matrix(conStruct.coord)
conStruct.geodist <- rdist.earth(conStruct.coord,miles=FALSE)
hist(conStruct.geodist)
conStruct.geodist[1:20,1:20]
# because kept several individuals in each pop, cells identical within blocks
# chose to run this way bc diff genetic distances even though same geographic
# make sure geodist is symmetric matrix (diagonal of 0s)
pmean <- function(x,y) (x+y)/2
conStruct.geodist[] <- pmean(conStruct.geodist, matrix(conStruct.geodist, nrow(conStruct.geodist), byrow=TRUE))
conStruct.geodist
library("rstan")
stan(conStruct(spatial = TRUE,
K = 3,
freqs = conStruct.freqs,
geoDist = conStruct.geodist,
coords = conStruct.coord,
prefix = "spK3",
n.chains = 1,
n.iter = 3000), control = list(adapt_delta = 0.99))
conStruct(spatial = TRUE,
K = 2,
freqs = conStruct.freqs,
geoDist = conStruct.geodist,
coords = conStruct.coord,
prefix = "spK2-2000",
n.chains = 3,
n.iter = 2000)
conStruct(spatial = TRUE,
K = 3,
freqs = conStruct.freqs,
geoDist = conStruct.geodist,
coords = conStruct.coord,
prefix = "spK3-5000",
n.chains = 3,
n.iter = 5000)
#match.layers.x.runs(admix.mat1 = ,admix.mat2 = ,admix.mat1.order = NULL)
spK3.5000.conStruct.results <- load("spK3.5000.conStruct.results.Robj")
spK3.5000.data.block <- load("spK3.5000.data.block.Robj",verbose=TRUE)
str(spK3.5000.data.block)
library(maps)
quartz()
pdf("ConStruct_AdmixtureMap.pdf")
#maps::map(xlim = range(conStruct.coord[,1])+c(-4,4), ylim = range(conStruct.coord[,2])+c(-4,4), col="gray")
maps::map(xlim = c(-130,-60), ylim = c(25,50), col="gray")
make.admix.pie.plot(admix.proportions=conStruct.results$chain_3$MAP$admix.proportions, coords=conStruct.coord, add=TRUE)
dev.off()
library(devtools)
source("http://bioconductor.org/biocLite.R")
biocLite("qvalue", suppressUpdates=T)
biocLite("SNPRelate", suppressUpdates=T)
install_github("green-striped-gecko/dartR")
library(dartR)
browseVignettes("dartR")
library(adegenet)
genlight <- read.PLINK()
|
/PopulationStructure.R
|
no_license
|
nathofme/radseq-NAm
|
R
| false | false | 23,668 |
r
|
library(RColorBrewer)
library(SNPRelate)
library(gdsfmt)
library(scales)
library(ggplot2)
library(hierfstat)
setwd("~/Documents/Ch2-NAm-RAD/analysis")
### TEST FOR MISSING DATA ###
# convert SNPs to binary format (gdsfmt or Genomic Data Structure data files) - accelerates computation
snpgdsVCF2GDS(vcf.fn="/Users/nataliehofmeister/Documents/Ch2-NAm-RAD/analysis/EUSTallSNPr8HWE.recode.vcf", out.fn="eustrad.hwe.gds",method = c("biallelic.only"),compress.annotation="ZIP.max", snpfirstdim=FALSE, verbose=TRUE)
snpgdsSummary("eustrad.hwe.gds")
genofile <- snpgdsOpen("eustrad.hwe.gds")
# 14134 SNPs
snpgdsVCF2GDS(vcf.fn="/Users/nataliehofmeister/Documents/starlingRAD/analysis/EUSTallSNPr8.nomiss.vcf.recode.vcf", out.fn="eustrad.nomiss.gds",method = c("biallelic.only"),compress.annotation="ZIP.max", snpfirstdim=FALSE, verbose=TRUE)
snpgdsSummary("eustrad.nomiss.gds")
genofile <- snpgdsOpen("eustrad.nomiss.gds")
# 6287 SNPs
# get number of loci for one SNP per locus
snpgdsVCF2GDS(vcf.fn="/Users/nataliehofmeister/Documents/starlingRAD/analysis/EUSToneSNPr8maf01p.vcf", out.fn="eust.gds.one",method = c("biallelic.only"),compress.annotation="ZIP.max", snpfirstdim=FALSE, verbose=TRUE)
snpgdsSummary("eust.gds.one")
# 3568 SNPs
snpgdsVCF2GDS(vcf.fn="/Users/nataliehofmeister/Documents/starlingRAD/analysis/EUSTallSNPr8_rarer.vcf", out.fn="eustrad.rarer.gds",method = c("biallelic.only"),compress.annotation="ZIP.max", snpfirstdim=FALSE, verbose=TRUE)
snpgdsSummary("eustrad.rarer.gds")
genofile.rare <- snpgdsOpen("eustrad.rarer.gds")
# how much data is missing? (fraction for each sample) - median 32%, range 18-56%
miss <- snpgdsSampMissRate(genofile, sample.id=NULL, snp.id=NULL, with.id=TRUE)
miss
summary(miss)
#remove EUST_TX0202 since >50%
###################################### PCAs ####################################
# PCA with SNPRelate
pca <- snpgdsPCA(gdsobj = genofile,autosome.only=FALSE)
pc.percent <- pca$varprop*100
head(round(pc.percent, 2))
pcatab <- data.frame(sample.id = pca$sample.id,
EV1 = pca$eigenvect[,1], # the first eigenvector
EV2 = pca$eigenvect[,2], # the second eigenvector
stringsAsFactors = FALSE)
head(pcatab)
pcatab
plot(pcatab$EV2, pcatab$EV1, xlab="eigenvector 2", ylab="eigenvector 1")
# add labels by population
# order in vcf (check cohort file)
# label particular populations
samplingsite <- c("NM","NM","NM","NM","NM","NM","NM","KS","KS","KS","KS","KS","KS","KS","KS","KS","MO","MO","MO","MO","MO","MO","MO","MO","MO","IL","IL","IL","IL","IL","IL","IL","NC","NC","NC","NC","NC","NC","NC","NC","NC","NC","NC","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","IA","IA","IA","IA","IA","IA","IA","IA","IA","ID","ID","ID","ID","ID","ID","ID","ID","NV","NV","NV","NV","NV","NV","NV","NV","NV","NV","WA","WA","WA","WA","WA","WA","WA","WA","WA","WA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CO","CO","CO","CO","CO","CO","CO","CO","TX","TX","TX","TX","TX","TX","TX","TX","TX","NH","NH","NH","NH","NH","NH","NH","NH","NH","NH","NH","NY","NY","NY","NY","NY","NY","NY","NY","NY","NY","NE","NE","NE","NE","NE","NE","NE","NE","NE","NE","NE","WI","WI","WI","WI","WI","WI","WI","WI")
# alphabetical # population <- c("AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","AZ","CA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CA","CO","CO","CO","CO","CO","CO","CO","CO","IA","IA","IA","IA","IA","IA","IA","IA","IA","ID","ID","ID","ID","ID","ID","ID","ID","IL","IL","IL","IL","IL","IL","IL","KS","KS","KS","KS","KS","KS","KS","KS","KS","MO","MO","MO","MO","MO","MO","MO","MO","MO","NC","NC","NC","NC","NC","NC","NC","NC","NC","NC","NC","NE","NE","NE","NE","NE","NE","NE","NE","NE","NE","NE","NH","NH","NH","NH","NH","NH","NH","NH","NH","NH","NH","NM","NM","NM","NM","NM","NM","NM","NV","NV","NV","NV","NV","NV","NV","NV","NV","NV","NY","NY","NY","NY","NY","NY","NY","NY","NY","NY","TX","TX","TX","TX","TX","TX","TX","TX","TX","WA","WA","WA","WA","WA","WA","WA","WA","WA","WA","WI","WI","WI","WI","WI","WI","WI","WI")
pcatab2 <- cbind(pcatab,samplingsite)
pcatab2
write.csv(pcatab2)
# then copy .csv to analysis folder
### PCA plot with all populations
colorlist<-colorRampPalette(colors)
colorlist<-viridis(17)
quartz()
pdf("PCAallpop.pdf")
plot(pcatab2$EV1, pcatab2$EV2, xlab="PC1 1.07%", ylab="PC2 1.03%",col="black",bg=colorlist,pch=21,cex=0.8)
legend("topright",legend=levels(pcatab2$samplingsite),pch=21,col=c("black"),pt.bg=colorlist,cex=0.8)
dev.off()
### PCA plot with regions
population<-c("west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","west","west","west","west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east")
region <- c("Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southeast","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Southwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","West","West","West","West","West","West","West","West","West","West","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","Northwest","West","West","West","West","West","West","West","West","West","West","West","Mountain","Mountain","Mountain","Mountain","Mountain","Mountain","Mountain","Mountain","South","South","South","South","South","South","South","South","South","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Northeast","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest","Midwest")
pcatab3 <- cbind(pcatab,region)
pcatab3
# just east and west divisions
colors<-c("#4393c3","#f7f7f7")
quartz()
pdf("PCArealpop.pdf")
plot(pcatab3$EV1, pcatab3$EV2, xlab="PC1 1.07%", ylab="PC2 1.03%",col="black",bg=colors,pch=21,cex=2)
legend("topright",legend=levels(pcatab3$realpop),pch=21,col=c("black"),pt.bg=colors,cex=1)
dev.off()
###################################### ADEGENET ####################################
### includes dudi.pca, AMOVA, Mantel
library(adegenet)
library(ade4)
library(poppr)
# make genind object
genind<-read.structure("EUSToneSNPr8maf01p.str",n.ind=158,n.loc=3570,onerowperind=FALSE,col.lab=1,col.pop=2,row.marknames=0,NA.char="-9",ask=FALSE,quiet=FALSE)
genind
# check number of loci if not reading properly
sum(!sapply(read.table("EUSToneSNPr8maf01p.str", sep = "\t"), is.logical))
# 3570
genind<-read.structure("EUSTallSNPr8.stru",n.ind=158,n.loc=15040,onerowperind=FALSE,col.lab=1,col.pop=2,row.marknames=0,NA.char="-9",ask=FALSE,quiet=FALSE)
genind
sum(!sapply(read.table("EUSTallSNPr8.stru", sep = "\t"), is.logical))
# 15040
# modify genind object to update strata for AMOVA
pop(genind) # population info stored in this accessor
# create df to bind to genind with hierarchical levels needed
# vectors already created for PCAs above
individual<-c(as.factor(seq(1,158,1)))
individual
population<-c("west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","west","west","west","west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","west","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east","east")
strata_df<-as.data.frame(cbind(individual,samplingsite,population,region))
strata_df
head(strata_df)
strata(genind)<-strata_df
genind
#genclone<-as.genclone(genind)
#genclone
# add lat-long coordinates
coords <- read.csv("latlongxy.csv")
head(coords)
genind@other$xy <- coords
genind@other$xy
# check strata
genind
table(strata(genind,~population))
poppr(genind)
### Ready for analysis!
### plotting missingness
# out of 42791 alleles, how many are missing (-9 in .stru format)?
genind.df<-read.csv("EUSTallSNPr8.stru", sep="\t")
head(genind.df)
colSums(genind.df = -9)
################################## fst by pop #############################
library(stargazer)
hf.input <- genind2hierfstat(genind)
# table of fst by population
fst.table <- genet.dist(hf.input, method = "WC84")
stargazer(fst.table[1:16,2:17], summary=FALSE, rownames=FALSE)
write.csv(fst.table,"FST_EUSTallSNPr8.csv")
loci <- hf.input[, -1] # Remove the population column
varcomp.glob(levels = data.frame(samplingsite), loci, diploid = TRUE)
test.g(loci, level = population)
test.between(loci, test.lev = population, rand.unit = county, nperm = 100)
################################## more pop gen #############################
### manhattan plot
library(qqman)
manhattan <- read.table("manhattan.csv", header=TRUE, sep=",")
manhattansubset <- manhattan[complete.cases(manhattan),]
manhattan <- data.frame(manhattansubset)
head(manhattan)
snpsOfInterest <- c(1895,7208,10808,11103)
#for LFMM
snpsOfInterest <- c(1247,1246,14704,12657,10040,5382,10039,3104,5385,3105,4566,4567,4564,11768,4563,11771,11770,3603,14573,14574,3834,4565,3601,3835,7841,14575,1362,11769,10179,1361,3955,7211,11802,11798,11804,3953)
# Bayescan
manhattan(manhattan,chr="CHROM", bp="POS", snp="SNP", p="log10.PO.", logp=TRUE, ylim=c(0,2),ylab="log10p",xlab="Scaffold")
pdf("Manhattan_FST.pdf",height=3)
manhattan(manhattan,chr="CHROM", bp="POS", snp="SNP", p="fst", logp=FALSE, ylab="Weir and Cockerham Fst",xlab="Scaffold",ylim=c(0,0.04),cex=0.5,genomewideline=0.0079,suggestiveline=0.0126,highlight=snpsOfInterest)
dev.off()
#LFMM
library(scales)
quartz()
pdf("LFMM_LocalAdaptation_withlegend.pdf",width=12,height=3)
par(mar = c(5,5,2,5))
with(manhattan, plot(manhattan$SNP, manhattan$LFMM.K3.PC2, pch=16, axes=F, col=alpha("#4393c3", 0.8), cex=0.5,
ylab="log10p",xlab="SNP", ylim=c(0,50)),xlim=c(0,16000))
axis(side=1,at=c(0,2000,4000,6000,8000,10000,12000,14000,16000))
axis(side=2)
par(new = T)
with(manhattan, plot(manhattan$SNP, manhattan$LFMM.K3.PC3, pch=16, axes=F, xlab=NA, ylab=NA, col=alpha("gray70",0.8), cex=0.5, ylim=c(0,50)))
abline(h=18, col="black")
par(new = T)
with(manhattan, plot(manhattan$SNP, manhattan$LFMM.PC1.K3, pch=16, ylab=NA, xlab=NA, axes=F, cex=0.5, col="black"))
legend("top",legend=c("PC1","PC2","PC3"),pch=16,col=c("black",alpha("#4393c3",0.8),alpha("gray70",0.8)),cex=1,bty="n",horiz=TRUE)
dev.off()
# plot LFMM and allele frequency
quartz()
pdf("SelectionSignif_v_AlleleFreq.pdf",height=4)
par(mar = c(5,5,2,5))
with(manhattan, plot(manhattan$SNP, manhattan$minorAF, type="l",lwd=0.2, axes=F, xlab=NA, ylab=NA, col="#92c5de", ylim=c(0,1), xlim=c(0,16000)))
par(new=T)
with(manhattan, plot(manhattan$SNP, manhattan$LFMM.K3.PC3, pch=16, cex=0.5, col="black",
ylab="log10p",xlab="ScNP",
ylim=c(0,50), xlim=c(0,16000)))
abline(h=20, col="black")
par(new = T)
axis(side = 4)
mtext(side = 4, line = 3, 'Allele frequency')
#legend("topright",
#legend=c(expression(-log[10](italic(p))), "FST"),
#lty=c(1,0), pch=c(NA, 16), col=c("gray", "black"))
dev.off()
# plot bayescenv results
quartz()
pdf("Manhattan_Bayescenv.pdf",height=4)
par(mar = c(5,5,2,5))
with(manhattan, plot(manhattan$CHROM, manhattan$PEP_diff_g_alpha, pch=16, cex=0.5, col="black",
ylab="Difference in PEP",xlab="Scaffold",
ylim=c(0,1), xlim=c(0,400)))
abline(h=0.9, col="black")
par(new = T)
with(manhattan, plot(manhattan$CHROM, manhattan$fst_bayescenv, pch=16, axes=F, xlab=NA, ylab=NA, col="#92c5de", cex=0.5, ylim=c(0,1), xlim=c(0,400)))
par(new = T)
with(manhattan, plot(manhattan$CHROM, manhattan$fst, type="l",lwd=2, axes=F, xlab=NA, ylab=NA, col="#2166ac", ylim=c(0,1), xlim=c(0,400)))
axis(side = 4)
mtext(side = 4, line = 3, 'FST')
#legend("topright",
#legend=c(expression(-log[10](italic(p))), "FST"),
#lty=c(1,0), pch=c(NA, 16), col=c("gray", "black"))
dev.off()
# compare LFMM and Bayescenv
quartz()
par(mar = c(5,5,2,5))
with(manhattan, plot(manhattan$CHROM, manhattan$LFMM.K3, pch=16, cex=1, col="gray",
ylab=expression(-log[10](italic(p))),xlab="Scaffold",
ylim=c(0,25), xlim=c(0,400)))
par(new = T)
with(manhattan, plot(manhattan$CHROM, manhattan$PEP_diff_g_alpha, pch=16, axes=F, xlab=NA, ylab=NA, col="black", cex=0.3, ylim=c(0,1), xlim=c(0,400)))
axis(side = 4)
mtext(side = 4, line = 3, 'Difference in PEP')
######## summary stats beyond FST
library(mmod)
library(reshape2)
setPop(genind.all) <- ~population # Use ~population analyze by pop
diff_genind.all <- diff_stats(genind.all)
diff_genind.all
per.locus <- melt(diff_genind.all$per.locus, varnames = c("Locus", "Statistic"))
stats <- c("Hs", "Ht", "Gst", "Gprime_st", "D", "D")
glob <- data.frame(Statistic = stats, value = diff_genind.all$global)
head(per.locus)
ggplot(per.locus, aes(x = Statistic, y = value)) +
geom_boxplot() +
geom_point() +
geom_point(size = rel(3), color = "red", data = glob) +
theme_bw() +
ggtitle("Estimates of population differentiation")
# observed vs expected heterozygosity
genind.smry <- summary(genind)
genind.smry
pdf("Heterozygosity.pdf")
plot(genind.smry$Hexp, genind.smry$Hobs, xlab="Expected heterozygosity",ylab="Observed heterozygosity",pch=16,cex=0.5)
abline(0, 1, col = "gray")
dev.off()
t.test(genind.smry$Hexp, genind.smry$Hobs, paired = TRUE, var.equal = TRUE)
# another PCA
genind.pca1 <- dudi.pca(genind, cent = FALSE, scale = FALSE, scannf = FALSE, nf=2)
barplot(genind.pca1$eig)
genind.pca1
s.label(genind.pca1$li)
s.kde2d(genind.pca1$li, add.p = TRUE, cpoint = 0)
add.scatter.eig(genind.pca1$eig, 2, 1, 2)
# plotting for figure
library(viridis)
library(factoextra)
dudi.pca.plot <- s.class(genind.pca1$li, fac=genind@strata$population, xax = 1, yax = 2,
grid=FALSE, axesell=FALSE, col=viridis(17),clabel=1,addaxes=1)
scree.plot <- screeplot(genind.pca1,main="",npcs=25)
### Mantel test for IBD
library(vegan)
dist.geo <- dist(genind$other$xy)
dist.genet <- dist(genind)
mtest <- mantel(dist.genet, dist.geo,method="spearman")
mtest
pdf("GenetvGeoDistance.pdf",h=5,w=5)
plot(dist.geo,dist.genet,xlab="Geographic distance",ylab="Genetic distance", cex=0.8)
dev.off()
mantel.correlog <- mantel.correlog(dist(genind),D.geo=dist.geo,r.type="spearman", nperm=1000)
mantel.correlog
plot(mantel.correlog)
mantel.correlog.more <- mantel.correlog(dist(genind),D.geo=dist.geo,r.type="spearman", nperm=99999)
mantel.correlog.more
plot(mantel.correlog.more)
#### plotting distribution of data
hist(dist.geo)
##### Partial Mantel for IBE vs IBD #####
env <- read.csv("EUSTrad.env.csv")
env.df <- as.data.frame(env)
str(env.df)
head(env.df)
library(vegan)
BIO1dist<-vegdist(env$BIO1, method="euclidean")
BIO1dist
BIO12dist<-vegdist(env$BIO12, method="euclidean")
BIO12dist
BIO16dist<-vegdist(env$BIO16, method="euclidean")
BIO16dist
BIO4dist<-vegdist(env$BIO4, method="euclidean")
BIO4dist
BIO7dist<-vegdist(env$BIO7, method="euclidean")
BIO7dist
elevdist<-vegdist(env$elevation, method="euclidean")
elevdist
#Mantel test for bioclim
mantel.partial(dist.genet, BIO1dist, dist.geo, method="spearman", permutations = 999)
mantel.partial(dist.genet, BIO12dist, dist.geo, method="spearman", permutations = 999)
mantel.partial(dist.genet, BIO16dist, dist.geo, method="spearman", permutations = 999)
mantel.partial(dist.genet, BIO4dist, dist.geo, method="spearman", permutations = 999)
mantel.partial(dist.genet, BIO7dist, dist.geo, method="spearman", permutations = 999)
#Mantel test for elevation
mantel.partial(dist.genet, elevdist, dist.geo, method="spearman", permutations=999)
### spatial autocorrelation
library(geoR)
### AMOVA
# run AMOVA on genind (genclone is haploid...)
# check that "total" is 2N-1 where N is number of individuals
# real pop is just east-west based on EEMS break
amova.pop<-poppr.amova(genind,hier=~population,method="ade",nperm=1000)
amova.pop
amova.pop.pegas<-poppr.amova(genind,hier=~population,method="pegas",nperm=1000)
amova.pop.pegas
amova.samples<-poppr.amova(genind,hier=~population,within=FALSE,method="ade",nperm=1000)
amova.samples
# test for significance
set.seed(1230)
amova.pop.signif<-randtest(amova.pop,nrepet=999)
amova.pop.signif
pdf("AMOVA.pop.signif.pdf")
plot(amova.pop.signif)
dev.off()
# rand test for variance within individuals
as.randtest(sim=rnorm(1000),obs=amova.pop.signif$obs[1],alter=c("less"))
# rand test for variance among individuals within population
as.randtest(sim=rnorm(1000),obs=amova.pop.signif$obs[2],alter=c("greater"))
# rand test for variance among populations
as.randtest(sim=rnorm(1000),obs=amova.pop.signif$obs[3],alter=c("less"))
# AMOVAs without within-individual variance
set.seed(20151219)
pegas_amova <- pegas::amova(dist.genet ~ population/individual, data = strata_df, nperm = 1000)
pegas_amova
adonis_amova <- adonis(dist.genet ~ population, data = strata_df, permutations = 1000)
adonis_amova
# shuffle population assignments to check if results sensitive
genind.shuffle <- genind
set.seed(9001)
head(strata(genind)[sample(nInd(genind)),-1])
strata(genind.shuffle) <- strata(genind.shuffle)[sample(nInd(genind)), -1]
strata(genind.shuffle2) <- strata(genind.shuffle2)[sample(nInd(genind)), -1]
amova.pop.shuffle<-poppr.amova(genind.shuffle,hier=~population,method="ade")
amova.pop.shuffle
amova.pop.shuffle2<-poppr.amova(genind.shuffle2,hier=~population,method="ade")
amova.pop.shuffle2
amova.pop.shuffle3<-poppr.amova(genind.shuffle3,hier=~population,method="ade")
amova.pop.shuffle3
set.seed(1734)
amova.pop.shuffle.signif<-randtest(amova.pop.shuffle,nrepet=999)
plot(amova.pop.shuffle.signif)
amova.pop.shuffle.signif
set.seed(8932)
amova.pop.shuffle2.signif<-randtest(amova.pop.shuffle2,nrepet=999)
plot(amova.pop.shuffle2.signif)
amova.pop.shuffle.signif
set.seed(3721)
amova.pop.shuffle3.signif<-randtest(amova.pop.shuffle3,nrepet=999)
plot(amova.pop.shuffle3.signif)
amova.pop.shuffle3.signif
# arbitrary regions: Midwest, Southeast, etc.
amova.region<-poppr.amova(genind,hier=~region,method="ade")
amova.region
# with region and population
amova.all<-poppr.amova(genind,hier=~region/population,method="ade")
amova.all
# test for significance
set.seed(1328)
amova.all.signif<-randtest(amova.all,nrepet=999)
plot(amova.all.signif)
amova.all.signif
################# GPhocs
library(HDInterval)
eustrad.mcmc <- read.csv("EUSTallSNPr95.mcmc.log",sep="\t")
head(eustrad.mcmc)
hdi(eustrad.mcmc$tau_root, credMass = 0.95)
hdi(eustrad.mcmc$theta_east, credMass = 0.95)
hdi(eustrad.mcmc$theta_west, credMass = 0.95)
hdi(eustrad.mcmc$theta_root, credMass = 0.95)
hdi(eustrad.mcmc$m_west..east, credMass = 0.95)
hdi(eustrad.mcmc$m_east..west, credMass = 0.95)
############################## conStruct ##############################
library(conStruct)
data(conStruct.data)
# preparing input files
# allele frequency matrix
# no locus labels
conStruct.freqs <- structure2conStruct(infile = "EUSTallSNPr8maf01p.conStruct.str",
start.loci = 3,
onerowperind = TRUE,
missing.datum = -9,
outfile = "conStruct.freqs")
# looked at matrix to check that it worked
# geographic distance matrix
library(fields)
conStruct.coord <- read.csv("latlong.conStruct.csv",stringsAsFactors=FALSE)
conStruct.coord <- as.matrix(conStruct.coord)
conStruct.geodist <- rdist.earth(conStruct.coord,miles=FALSE)
hist(conStruct.geodist)
conStruct.geodist[1:20,1:20]
# because kept several individuals in each pop, cells identical within blocks
# chose to run this way bc diff genetic distances even though same geographic
# make sure geodist is symmetric matrix (diagonal of 0s)
pmean <- function(x,y) (x+y)/2
conStruct.geodist[] <- pmean(conStruct.geodist, matrix(conStruct.geodist, nrow(conStruct.geodist), byrow=TRUE))
conStruct.geodist
library("rstan")
stan(conStruct(spatial = TRUE,
K = 3,
freqs = conStruct.freqs,
geoDist = conStruct.geodist,
coords = conStruct.coord,
prefix = "spK3",
n.chains = 1,
n.iter = 3000), control = list(adapt_delta = 0.99))
conStruct(spatial = TRUE,
K = 2,
freqs = conStruct.freqs,
geoDist = conStruct.geodist,
coords = conStruct.coord,
prefix = "spK2-2000",
n.chains = 3,
n.iter = 2000)
conStruct(spatial = TRUE,
K = 3,
freqs = conStruct.freqs,
geoDist = conStruct.geodist,
coords = conStruct.coord,
prefix = "spK3-5000",
n.chains = 3,
n.iter = 5000)
#match.layers.x.runs(admix.mat1 = ,admix.mat2 = ,admix.mat1.order = NULL)
spK3.5000.conStruct.results <- load("spK3.5000.conStruct.results.Robj")
spK3.5000.data.block <- load("spK3.5000.data.block.Robj",verbose=TRUE)
str(spK3.5000.data.block)
library(maps)
quartz()
pdf("ConStruct_AdmixtureMap.pdf")
#maps::map(xlim = range(conStruct.coord[,1])+c(-4,4), ylim = range(conStruct.coord[,2])+c(-4,4), col="gray")
maps::map(xlim = c(-130,-60), ylim = c(25,50), col="gray")
make.admix.pie.plot(admix.proportions=conStruct.results$chain_3$MAP$admix.proportions, coords=conStruct.coord, add=TRUE)
dev.off()
library(devtools)
source("http://bioconductor.org/biocLite.R")
biocLite("qvalue", suppressUpdates=T)
biocLite("SNPRelate", suppressUpdates=T)
install_github("green-striped-gecko/dartR")
library(dartR)
browseVignettes("dartR")
library(adegenet)
genlight <- read.PLINK()
|
# Plot Fig S9 showing correlation between bacterial population structure and patient transfer or distance between nursing facilities.
library(entropy)
source("spearman_correlation.R")
# As patient-level hospital transfer information is linked to sensitive clinical data, here we provide pre-processed 1) patient transfer data (see Fig2.R for details) and 2) distance between nursing facilities for downstream analysis.
# Patient transfer KL distance
pt_transfer_dist = readRDS("patient_transfer_KL_distance.RDS")
# Nursing facility distance data
path_fac_dist = readRDS("NF_geocode.RDS")
# Create a matrix containing facility pair names (useful for selecting which pairs to include for analysis later)
geo_mat_names = outer(rownames(path_fac_dist), colnames(path_fac_dist),
Vectorize(FUN = function(x, y){paste0(x, '_', y)}))
geo_df = data.frame('geodist' = path_fac_dist[upper.tri(path_fac_dist)],
'pt_transfer' = pt_transfer_dist[upper.tri(pt_transfer_dist)],
'pair' = geo_mat_names[upper.tri(geo_mat_names)])
# Make a correlation plot showing the correlation between geographical distance and patient transfer between nursing facilities
model = lm(geo_df$pt_transfer ~ geo_df$geodist)
plot(geo_df$geodist, geo_df$pt_transfer, col="lightblue",
pch = 19,
xlab = "Geographical distance between NFs (kilometers)",
ylab = "Difference in patient transfer\npattern between NF pairs",
panel.first = abline(model, col = "pink", lwd = 3))
text(pt_transfer ~ geodist, labels=pair,data=geo_df, cex = 0.75, font=2)
cor_dat = round(spearman_rho_ci(geo_df$pt_transfer, geo_df$geodist, df = geo_df), 2)
p_val = ifelse(cor_dat[2] == 0, "< 0.001", paste0("= ", formatC(cor_dat[2], digits = 2, format = "e")))
rho_ci = paste0(cor_dat[1])
title(main = paste0("Spearman rho = ", rho_ci, "\n",
"p ", p_val), line = 0.5, cex.main=1)
|
/codes/FigS9.R
|
no_license
|
joycewang914/Genomic_and_Patient_Transfer_Analyasis_of_Resistant_Bacteria
|
R
| false | false | 1,931 |
r
|
# Plot Fig S9 showing correlation between bacterial population structure and patient transfer or distance between nursing facilities.
library(entropy)
source("spearman_correlation.R")
# As patient-level hospital transfer information is linked to sensitive clinical data, here we provide pre-processed 1) patient transfer data (see Fig2.R for details) and 2) distance between nursing facilities for downstream analysis.
# Patient transfer KL distance
pt_transfer_dist = readRDS("patient_transfer_KL_distance.RDS")
# Nursing facility distance data
path_fac_dist = readRDS("NF_geocode.RDS")
# Create a matrix containing facility pair names (useful for selecting which pairs to include for analysis later)
geo_mat_names = outer(rownames(path_fac_dist), colnames(path_fac_dist),
Vectorize(FUN = function(x, y){paste0(x, '_', y)}))
geo_df = data.frame('geodist' = path_fac_dist[upper.tri(path_fac_dist)],
'pt_transfer' = pt_transfer_dist[upper.tri(pt_transfer_dist)],
'pair' = geo_mat_names[upper.tri(geo_mat_names)])
# Make a correlation plot showing the correlation between geographical distance and patient transfer between nursing facilities
model = lm(geo_df$pt_transfer ~ geo_df$geodist)
plot(geo_df$geodist, geo_df$pt_transfer, col="lightblue",
pch = 19,
xlab = "Geographical distance between NFs (kilometers)",
ylab = "Difference in patient transfer\npattern between NF pairs",
panel.first = abline(model, col = "pink", lwd = 3))
text(pt_transfer ~ geodist, labels=pair,data=geo_df, cex = 0.75, font=2)
cor_dat = round(spearman_rho_ci(geo_df$pt_transfer, geo_df$geodist, df = geo_df), 2)
p_val = ifelse(cor_dat[2] == 0, "< 0.001", paste0("= ", formatC(cor_dat[2], digits = 2, format = "e")))
rho_ci = paste0(cor_dat[1])
title(main = paste0("Spearman rho = ", rho_ci, "\n",
"p ", p_val), line = 0.5, cex.main=1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_selection.R
\name{get_selection}
\alias{get_selection}
\title{Get the current selection available in
a graph object}
\usage{
get_selection(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a vector with the current
selection of nodes or edges.
}
\description{
Get the current
selection of node IDs or edge IDs
from a graph object of class
\code{dgr_graph}.
}
\examples{
# Create a simple graph
graph <-
create_graph() \%>\%
add_path(n = 6)
# Select node `4`, then select
# all nodes a distance of 1 away
# from node `4`, and finally
# return the selection of nodes as
# a vector object
graph \%>\%
select_nodes(nodes = 4) \%>\%
select_nodes_in_neighborhood(
node = 4,
distance = 1) \%>\%
get_selection()
# Select edges associated with
# node `4` and return the
# selection of edges
graph \%>\%
select_edges_by_node_id(
nodes = 4) \%>\%
get_selection()
}
|
/man/get_selection.Rd
|
permissive
|
akkalbist55/DiagrammeR
|
R
| false | true | 1,001 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_selection.R
\name{get_selection}
\alias{get_selection}
\title{Get the current selection available in
a graph object}
\usage{
get_selection(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a vector with the current
selection of nodes or edges.
}
\description{
Get the current
selection of node IDs or edge IDs
from a graph object of class
\code{dgr_graph}.
}
\examples{
# Create a simple graph
graph <-
create_graph() \%>\%
add_path(n = 6)
# Select node `4`, then select
# all nodes a distance of 1 away
# from node `4`, and finally
# return the selection of nodes as
# a vector object
graph \%>\%
select_nodes(nodes = 4) \%>\%
select_nodes_in_neighborhood(
node = 4,
distance = 1) \%>\%
get_selection()
# Select edges associated with
# node `4` and return the
# selection of edges
graph \%>\%
select_edges_by_node_id(
nodes = 4) \%>\%
get_selection()
}
|
## Clear the R environment
rm(list=ls())
## Load packages
library(foreign)
library(ggplot2)
library(scales)
library(dplyr)
library(plyr)
library(car)
library(stargazer)
library(lmtest)
library(sandwich)
library(stargazer)
## Cluster SE function
cl <- function(dat,fm, cluster){
require(sandwich, quietly = TRUE)
require(lmtest, quietly = TRUE)
M <- length(unique(cluster))
N <- length(cluster)
K <- fm$rank
dfc <- (M/(M-1))*((N-1)/(N-K))
uj <- apply(estfun(fm),2, function(x) tapply(x, cluster, sum));
vcovCL <- dfc*sandwich(fm, meat=crossprod(uj)/N)
coeftest(fm, vcovCL) }
## Set working directory
setwd("/Users/shu8/Dropbox/ConHu Indian Police/Data/Imputation/New Code")
## Read csv data
police.data.save <- read.csv("HuCon_ISQ_Data.csv")
police.data <- read.csv("HuCon_ISQ_Data.csv")
## Delete DAMAN & DIU 2001
police.data <- police.data[-which(is.na(police.data$death_not_remanded)), ]
##############
###Table A1###
##############
stargazer(police.data, median = T)
###############
###Figure A1###
###############
## min max for death
min(police.data$death_remanded, na.rm = T)
max(police.data$death_remanded, na.rm = T)
min(police.data$death_not_remanded, na.rm = T)
max(police.data$death_not_remanded, na.rm = T)
sum(police.data$death_remanded == 0, na.rm = T)
sum(police.data$death_not_remanded == 0, na.rm = T)
library(plyr)
death.state <- ddply(police.data, .(state_ut), summarise, sum = sum(death_remanded))
death.state$not_remanded <- ddply(police.data, .(state_ut), summarise, sum = sum(death_not_remanded))
death.year <- ddply(police.data, .(year), summarise, sum = sum(death_remanded, na.rm = T))
death.year.not <- ddply(police.data, .(year), summarise, sum.not = sum(death_not_remanded, na.rm = T))
merge <- merge(death.year, death.year.not, by = "year")
library(reshape)
merge.long <- melt(merge, id = "year")
names(merge.long)[2]<-"Variable"
f.a1 <- ggplot(merge.long, aes(year, value, colour = Variable)) + geom_line() + scale_x_continuous(breaks = c(2002, 2004, 2006, 2008,2010,2012,2014,2016)) + scale_color_manual(labels = c("Death remanded", "Death not remanded"), values = c("#F8766D", "#00BFC4")) + ylab("Count") + xlab("Year")
f.a1
ggsave("death_time.pdf", f.a1, width = 6, height = 4)
##############
###Table A3###
##############
police.data.2006.all <- subset(police.data, year <= 2006)
death.state.2006.all <- ddply(police.data.2006.all, .(state_ut), summarise, remanded.2006.all = sum(death_remanded, na.rm = T))
death.state.not.2006.all <- ddply(police.data.2006.all, .(state_ut), summarise, notremanded.2006.all = sum(death_not_remanded, na.rm = T))
death.state.2006.all$notremanded.2006.all <- death.state.not.2006.all$notremanded.2006.all
death.state.2006.all
##############
###Table A4###
##############
police.data.2006 <- subset(police.data, year == 2006)
death.state.2006 <- ddply(police.data.2006, .(state_ut), summarise, remanded.2006 = sum(death_remanded, na.rm = T))
death.state.not.2006 <- ddply(police.data.2006, .(state_ut), summarise, notremanded.2006 = sum(death_not_remanded, na.rm = T))
death.state.2006$notremanded.2006 <- death.state.not.2006$notremanded.2006
death.state.2006
################
## Imputation ##
################
## Because Multiple Imputation is a random process, results are slightly different every time
## Load data
police.imp <- police.data.save[, c("state_ut", "year", "death_remanded", "death_not_remanded", "state_pca", "district_pca", "type", "sc_order1", "committee1",
"gdp", "religion2", "head_trans")]
## Load Aelia and Zelig
library("Amelia")
library("Zelig")
## AmeliaView()
## Multiple imputation with settings below
bds.3 <- c(3, 0, 100)
bds.4 <- c(4, 0, 100)
bds.12 <- c(12, 0, 50)
bds <- rbind(bds.3, bds.4, bds.12)
a.out <- amelia(police.imp, m = 5, idvars = "type",
ts = "year", cs = "state_ut", priors = NULL, lags = "gdp",
empri = 0, intercs = TRUE, leads = "gdp", splinetime = 0,
logs = c("gdp", "head_trans"), sqrts = NULL,
lgstc = NULL, ords = NULL, noms = c("state_pca", "district_pca",
"sc_order1", "committee1", "religion2"), bounds = bds, max.resample = 1000,
tolerance = 1e-04)
## Rewrite outdata to replace the original outdata1.csv - outdata5.csv
## Model results with multiple imputed variables will be slightly different
write.amelia(obj = a.out, file.stem = "outdata")
#############
###Table 1###
#############
police.data.t1 <- police.data[ ,c("death_not_remanded", "death_remanded", "state_ut", "year", "state_pca", "t")]
police.data.t1 <- na.omit(police.data.t1)
## Lagged state_pca
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.data.t1$l.state_pca <- ifelse(is.na(police.data.t1$l.state_pca), 0, police.data.t1$l.state_pca)
## Table 1 model
model.poisson.t1 <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.t1.cl <- cl(police.data.t1, model.poisson.t1 , police.data.t1$state_ut)
stargazer(model.poisson.t1.cl)
## predict death count if all PCAs are inplemented on time
police.imp.p <- police.data.t1
police.imp.p$l.state_pca <- ifelse(police.imp.p$year >= 2008, 1, 0)
Y <- predict(model.poisson.t1, police.imp.p, type="response")
sum(Y)
sum(police.data.t1$death_not_remanded)-sum(Y)
## predict death count if no PCA is inplemented.
police.imp.p <- police.data.t1
police.imp.p$l.state_pca <- 0
Y.2 <- predict(model.poisson.t1, police.imp.p, type="response")
sum(Y.2)
sum(Y.2)-sum(police.data.t1$death_not_remanded)
##############
###Figure 1###
##############
## Lagged state_pca
police.data.f1 <- ddply(police.data.t1, .(state_ut), transform, tm1 = lead(t))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tm2 = lead(tm1))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tm3 = lead(tm2))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tm4 = lead(tm3))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tp1 = lag(t))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tp2 = lag(tp1))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tp3 = lag(tp2))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tp4 = lag(tp3))
police.data.f1[is.na(police.data.f1)] <- 0
## Poisson Placebo Test
model.poisson.plb <- glm(death_not_remanded ~ 1 + tm3 + tm2 + tm1 + t + tp1 + tp2 + tp3 + state_ut + as.factor(year), data = police.data.f1, family="poisson")
model.poisson.plb.cl <- cl(police.data.f1, model.poisson.plb, police.data.f1$state_ut)
stargazer(model.poisson.plb.cl)
## Overdispersion test
library(AER)
dispersiontest(model.poisson.plb,trafo=1)
## Graph Placebo Test Figure 3
## Save Ts Poisson result
graph.f1 <- as.data.frame(model.poisson.plb.cl[2:8, ])
graph.f1$time <- c(-3,-2,-1,0,1,2,3)
## Calculate CIs
graph.f1$ci.l <- graph.f1[, 1] - qnorm(0.975)*graph.f1[, 2]
graph.f1$ci.u <- graph.f1[, 1] + qnorm(0.975)*graph.f1[, 2]
graph.f1$ci.l.90 <- graph.f1[, 1] - qnorm(0.95)*graph.f1[, 2]
graph.f1$ci.u.90 <- graph.f1[, 1] + qnorm(0.95)*graph.f1[, 2]
## Plot
p.placebo <- ggplot(graph.f1, aes(time, Estimate))+
#geom_ribbon(aes(ymin=ci.l,ymax=ci.u),alpha=0.3)+
geom_errorbar(aes(ymin=ci.l,ymax=ci.u),width=0.3, color = "#999999")+
#geom_errorbar(aes(ymin=ci.l.90,ymax=ci.u.90),width=0.1, color = "#999999")+
geom_pointrange(aes(ymin=ci.l.90,ymax=ci.u.90),size=1.5, shape = 46, color = "#999999")+
geom_point(size = 2)+
geom_line()+
ylim(-1.1, 1.1)+
xlab("Years from PCA Creation")+
ylab("Coefficient of PCA Creation")+
#geom_line(aes(y=ci.l))+
#geom_line(aes(y=ci.u))+
#geom_line(aes(y=ci.l.90), linetype = "dashed")+
# geom_line(aes(y=ci.u.90), linetype = "dashed")+
geom_hline(yintercept = 0, linetype = "dotted")+
scale_x_continuous(breaks = c(-3, -2, -1, 0, 1, 2, 3))
p.placebo
ggsave("p_placebo_good_2016.pdf", plot = p.placebo, height = 4.5, width = 4.5)
#############
###Table 2###
#############
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
## T2 results
## Row: State PCA, State Capacity, and State Desire
## Column: Effect, SE, P value
result.t2
#############
###Table 3###
#############
## Add SHRC to police data
police.data.t1$SHRC <- police.data$SHRC
## Lagged SHRC
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.SHRC = c(NA, SHRC[-length(SHRC)]))
## Fill NA with 0
police.data.t1$l.SHRC <- ifelse(is.na(police.data.t1$l.SHRC), 0, police.data.t1$l.SHRC)
## Correlation check
cor.test(police.data.t1$state_pca, police.data.t1$SHRC)
## Model with SHRC
model.poisson.SHRC <- glm(death_not_remanded ~ 1 + l.SHRC + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.SHRC.cl <- cl(police.data.t1, model.poisson.SHRC, police.data.t1$state_ut)
stargazer(model.poisson.SHRC.cl)
## Model SHRC with controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Add SHRC
police.imp.1.l$l.SHRC <- police.data.t1$l.SHRC
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.SHRC + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
## T3 (2) results
## Row: State PCA, State Capacity, and State Desire
## Column: Effect, SE, P value
result.t3
##############
###Table A5###
##############
police.imp.d <- police.data.save[, c("state_ut", "year", "death_remanded", "death_not_remanded", "state_pca", "district_pca", "type", "sc_order1", "committee1",
"gdp", "religion2", "head_trans")]
stargazer(police.imp.d, median = T)
##############
###Table A6###
##############
## OLS Placebo Test
police.data.f3 <- police.data.f1
model.ols.plb <- lm(death_not_remanded ~ 1 + tm3 + tm2 + tm1 + t + tp1 + tp2 + tp3 + state_ut + as.factor(year), data = police.data.f3)
model.ols.plb.cl <- cl(police.data.f3, model.ols.plb, police.data.f3$state_ut)
## OLS Placebo Test with controls
## Loop models for 5 imputation datasets
i <- 1
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add t to outdata
police.imp.1.l$t <- police.data.t1$t
## lags and leads
police.data.f3 <- ddply(police.imp.1.l, .(state_ut), transform, tm1 = lead(t))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm2 = lead(tm1))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm3 = lead(tm2))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm4 = lead(tm3))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp1 = lag(t))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp2 = lag(tp1))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp3 = lag(tp2))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp4 = lag(tp3))
police.data.f3[is.na(police.data.f3)] <- 0
## Poisson Placebo Test
imp.1.p <- lm(death_not_remanded ~ 1 + tm3 + tm2 + tm1 + t + tp1 + tp2 + tp3 + gdp +
head_trans+ state_ut + as.factor(year), data = police.data.f3)
result.p.1 <- cl(police.data.f3, imp.1.p, police.data.f3$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:10, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:10, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 9, ncol = 3)
for (i in 1:9){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
## Replace results to model result
model.ols.plb.cl.c <- result.p.1
model.ols.plb.cl.c[2:10, 1] <- result.t2[, 1]
model.ols.plb.cl.c[2:10, 2] <- result.t2[, 2]
model.ols.plb.cl.c[2:10, 4] <- result.t2[, 3]
model.ols.plb.cl.c
## Poisson Placebo Test with controls
## Loop models for 5 imputation datasets
i <- 1
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add t to outdata
police.imp.1.l$t <- police.data.t1$t
## lags and leads
police.data.f3 <- ddply(police.imp.1.l, .(state_ut), transform, tm1 = lead(t))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm2 = lead(tm1))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm3 = lead(tm2))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm4 = lead(tm3))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp1 = lag(t))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp2 = lag(tp1))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp3 = lag(tp2))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp4 = lag(tp3))
police.data.f3[is.na(police.data.f3)] <- 0
## Poisson Placebo Test
imp.1.p <- glm(death_not_remanded ~ 1 + tm3 + tm2 + tm1 + t + tp1 + tp2 + tp3 + gdp +
head_trans+ state_ut + as.factor(year), data = police.data.f3, family="poisson")
result.p.1 <- cl(police.data.f3, imp.1.p, police.data.f3$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:10, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:10, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 9, ncol = 3)
for (i in 1:9){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
## Replace results to model result
model.poisson.plb.cl.c <- result.p.1
model.poisson.plb.cl.c[2:10, 1] <- result.t2[, 1]
model.poisson.plb.cl.c[2:10, 2] <- result.t2[, 2]
model.poisson.plb.cl.c[2:10, 4] <- result.t2[, 3]
model.poisson.plb.cl.c
## Make table
stargazer(model.ols.plb.cl, model.ols.plb.cl.c, model.poisson.plb.cl, model.poisson.plb.cl.c)
###############
###Figure A3###
###############
## Save Ts Poisson result
graph.a1 <- as.data.frame(model.poisson.plb.cl.c[2:8, ])
graph.a1$time <- c(-3,-2,-1,0,1,2,3)
## Calculate CIs
graph.a1$ci.l <- graph.a1[, 1] - qnorm(0.975)*graph.a1[, 2]
graph.a1$ci.u <- graph.a1[, 1] + qnorm(0.975)*graph.a1[, 2]
graph.a1$ci.l.90 <- graph.a1[, 1] - qnorm(0.95)*graph.a1[, 2]
graph.a1$ci.u.90 <- graph.a1[, 1] + qnorm(0.95)*graph.a1[, 2]
## Plot
p.placebo.a3 <- ggplot(graph.a1, aes(time, Estimate))+
#geom_ribbon(aes(ymin=ci.l,ymax=ci.u),alpha=0.3)+
geom_errorbar(aes(ymin=ci.l,ymax=ci.u),width=0.3, color = "#999999")+
#geom_errorbar(aes(ymin=ci.l.90,ymax=ci.u.90),width=0.1, color = "#999999")+
geom_pointrange(aes(ymin=ci.l.90,ymax=ci.u.90),size=1.5, shape = 46, color = "#999999")+
geom_point(size = 2)+
geom_line()+
ylim(-1.2, 1.2)+
xlab("Years from PCA Creation")+
ylab("Coefficient of PCA Creation")+
#geom_line(aes(y=ci.l))+
#geom_line(aes(y=ci.u))+
#geom_line(aes(y=ci.l.90), linetype = "dashed")+
# geom_line(aes(y=ci.u.90), linetype = "dashed")+
geom_hline(yintercept = 0, linetype = "dotted")+
scale_x_continuous(breaks = c(-3, -2, -1, 0, 1, 2, 3))
p.placebo.a3
ggsave("p_placebo_controls_2016.pdf", plot = p.placebo.a3, height = 4.8, width = 4.5)
##############
###Table A7###
##############
## Load GTD data
police.data.ta5 <- police.data.t1
police.data.ta5$l.event <- police.data$l.event
#police.data.save <- merge(police.data, gtd.sum.l, by = c("state_ut", "year"), all.x = T)
#police.data.save <- subset(police.data.save, select=-c(iyear, provstate))
#write.csv(police.data.save, "final1.csv")
## fill NA with 0
#police.data.save$l.event <- ifelse(is.na(police.data.save$l.event), 0, police.data.save$l.event)
##Correlation check
cor.test(police.data.ta5$l.event, police.data.ta5$l.state_pca)
## OLS with GTD
model.ols.GTD <- lm(death_not_remanded ~ 1 + l.state_pca + l.event + state_ut + as.factor(year), data = police.data.ta5)
model.ols.GTD.cl <- cl(police.data.ta5, model.ols.GTD, police.data.ta5$state_ut)
## Poisson with GTD
model.poisson.GTD <- glm(death_not_remanded ~ 1 + l.state_pca + l.event + state_ut + as.factor(year), data = police.data.ta5, family="poisson")
model.p.GTD.cl <- cl(police.data.ta5, model.poisson.GTD, police.data.ta5$state_ut)
## Poisson with GTD and Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add GTD l.event to outdata
police.imp.1.l$l.event <- police.data.ta5$l.event
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.event + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.ols.GTD.cl.c <- result.p.1
model.ols.GTD.cl.c[2:5, 1] <- result.t2[, 1]
model.ols.GTD.cl.c[2:5, 2] <- result.t2[, 2]
model.ols.GTD.cl.c[2:5, 4] <- result.t2[, 3]
model.ols.GTD.cl.c
## Poisson with GTD and Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add GTD l.event to outdata
police.imp.1.l$l.event <- police.data.ta5$l.event
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.event + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.p.GTD.cl.c <- result.p.1
model.p.GTD.cl.c[2:5, 1] <- result.t2[, 1]
model.p.GTD.cl.c[2:5, 2] <- result.t2[, 2]
model.p.GTD.cl.c[2:5, 4] <- result.t2[, 3]
stargazer(model.ols.GTD.cl, model.ols.GTD.cl.c, model.p.GTD.cl, model.p.GTD.cl.c)
##############
###Table A8###
##############
## Add religion to police data
police.data.t1$religion2 <- police.data$religion2
## OLS Model with religion
model.ols.religion <- lm(death_not_remanded ~ 1 + l.state_pca + religion2 + state_ut + as.factor(year), data = police.data.t1)
model.ols.religion.cl <- cl(police.data.t1, model.ols.religion, police.data.t1$state_ut)
## Poisson Model with religion
model.poisson.religion <- glm(death_not_remanded ~ 1 + l.state_pca + religion2 + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.religion.cl <- cl(police.data.t1, model.poisson.religion, police.data.t1$state_ut)
## OLS Model with religion
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add religion
police.imp.1.l$religion2 <- police.data.t1$religion2
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + religion2 + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.religion.cl.c <- result.p.1
model.ols.religion.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.religion.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.religion.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with religion and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add religion
police.imp.1.l$religion2 <- police.data.t1$religion2
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + religion2 + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.poisson.religion.cl.c <- result.p.1
model.poisson.religion.cl.c[2:5, 1] <- result.t3[, 1]
model.poisson.religion.cl.c[2:5, 2] <- result.t3[, 2]
model.poisson.religion.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.religion.cl, model.ols.religion.cl.c, model.poisson.religion.cl, model.poisson.religion.cl.c)
##############
###Table A9###
##############
## OLS
model.ols <- lm(death_not_remanded ~ 1 + l.state_pca + state_ut +
as.factor(year), data = police.data.t1)
model.ols.cl <- cl(police.data.t1, model.ols, police.data.t1$state_ut)
## OLS with logged DV
police.data.t1$death_not_remanded_ln <- log(police.data.t1$death_not_remanded+1)
model.ols.log <- lm(death_not_remanded_ln ~ 1 + l.state_pca + state_ut +
as.factor(year), data = police.data.t1)
model.ols.log.cl <- cl(police.data.t1, model.ols.log, police.data.t1$state_ut)
## OLS with Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.ols.cl.c <- result.p.1
model.ols.cl.c[2:4, 1] <- result.t2[, 1]
model.ols.cl.c[2:4, 2] <- result.t2[, 2]
model.ols.cl.c[2:4, 4] <- result.t2[, 3]
## OLS with logged DV and Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Log DV
police.imp.1$death_not_remanded_ln <- log(police.imp.1$death_not_remanded+1)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded_ln ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.ols.log.cl.c <- result.p.1
model.ols.log.cl.c[2:4, 1] <- result.t2[, 1]
model.ols.log.cl.c[2:4, 2] <- result.t2[, 2]
model.ols.log.cl.c[2:4, 4] <- result.t2[, 3]
stargazer(model.ols.cl, model.ols.cl.c,model.ols.log.cl, model.ols.log.cl.c)
###############
###Table A10###
###############
## OLS no lag
model.ols.nl <- lm(death_not_remanded ~ 1 + state_pca + state_ut +
as.factor(year), data = police.data.t1)
model.ols.nl.cl <- cl(police.data.t1, model.ols.nl, police.data.t1$state_ut)
## Poisson no lag
model.p.nl <- glm(death_not_remanded ~ 1 + state_pca + state_ut +
as.factor(year), data = police.data.t1, family="poisson")
model.p.nl.cl <- cl(police.data.t1, model.p.nl, police.data.t1$state_ut)
## OLS no lag with Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.ols.nl.cl.c <- result.p.1
model.ols.cl.c[2:4, 1] <- result.t2[, 1]
model.ols.cl.c[2:4, 2] <- result.t2[, 2]
model.ols.cl.c[2:4, 4] <- result.t2[, 3]
## Poisson no lag with Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.p.nl.cl.c <- result.p.1
model.ols.log.cl.c[2:4, 1] <- result.t2[, 1]
model.ols.log.cl.c[2:4, 2] <- result.t2[, 2]
model.ols.log.cl.c[2:4, 4] <- result.t2[, 3]
stargazer(model.ols.nl.cl, model.ols.nl.cl.c, model.p.nl.cl, model.p.nl.cl.c)
###############
###Table A11###
###############
## Balanced Pabel
police.data.b <- subset(police.data.t1, police.data.t1$state_ut != "TELANGANA")
police.data.b <- subset(police.data.b, police.data.b$state_ut != "Z DAMAN & DIU")
police.data.b$state_ut <- as.factor(as.character(police.data.b$state_ut))
levels(police.data.b$state_ut)
length(police.data.b$death_not_remanded)
model.poisson.b <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.b, family="poisson")
model.poisson.b.cl <- cl(police.data.b, model.poisson.b, police.data.b$state_ut)
##Quasi-poisson
model.qp <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1, family = "quasipoisson")
model.qp.cl <- cl(police.data.t1, model.qp, police.data.t1$state_ut)
##Negative binominal
library(MASS)
model.nb <- glm.nb(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1)
model.nb.cl <- cl(police.data.t1, model.nb, police.data.t1$state_ut)
## Delete new states
police.data.nn <- subset(police.data.t1, police.data.t1$state_ut != "TELANGANA")
police.data.nn <- police.data.nn[!police.data.nn$state_ut == "ANDHRA PRADESH", ]
police.data.nn$state_ut <- as.factor(as.character(police.data.nn$state_ut))
levels(police.data.nn$state_ut)
length(police.data.nn$death_not_remanded)
model.poisson.nn <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.nn, family="poisson")
model.poisson.nn.cl <- cl(police.data.nn, model.poisson.nn, police.data.nn$state_ut)
## Delete MAHARASHTRA
police.data.nm <- subset(police.data.t1, police.data.t1$state_ut != "MAHARASHTRA")
police.data.nm$state_ut <- as.factor(as.character(police.data.nm$state_ut))
levels(police.data.nm$state_ut)
length(police.data.nm$death_not_remanded)
model.poisson.nm <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.nm, family="poisson")
model.poisson.nm.cl <- cl(police.data.nm, model.poisson.nm, police.data.nm$state_ut)
## Delete MAHARASHTRA and ANDHRA PRADESH
police.data.nma <- subset(police.data.t1, police.data.t1$state_ut != "MAHARASHTRA")
police.data.nma <- police.data.nma[!police.data.nma$state_ut == "ANDHRA PRADESH", ]
police.data.nma$state_ut <- as.factor(as.character(police.data.nma$state_ut))
levels(police.data.nma$state_ut)
length(police.data.nma$death_not_remanded)
model.poisson.nma <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.nma, family="poisson")
model.poisson.nma.cl <- cl(police.data.nma, model.poisson.nma, police.data.nma$state_ut)
##Print
stargazer(model.poisson.b, model.qp.cl, model.nb.cl, model.poisson.nn.cl , model.poisson.nm.cl, model.poisson.nma.cl)
###############
###Table A12###
###############
## OLS Model with SHRC
model.ols.SHRCc <- lm(death_not_remanded ~ 1 + l.state_pca + l.SHRC + state_ut + as.factor(year), data = police.data.t1)
model.ols.SHRCc.cl <- cl(police.data.t1, model.ols.SHRCc, police.data.t1$state_ut)
## Poisson Model with SHRC
model.poisson.SHRCc <- glm(death_not_remanded ~ 1 + l.state_pca + l.SHRC + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.SHRCc.cl <- cl(police.data.t1, model.poisson.SHRCc, police.data.t1$state_ut)
## OLS Model with SHRC
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add SHRC
police.imp.1.l$l.SHRC <- police.data.t1$l.SHRC
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.SHRC + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.SHRCc.cl.c <- result.p.1
model.ols.SHRCc.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.SHRCc.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.SHRCc.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with SHRC and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add SHRC
police.imp.1.l$l.SHRC <- police.data.t1$l.SHRC
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.SHRC + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.poisson.SHRCc.cl.c <- result.p.1
model.poisson.SHRCc.cl.c[2:5, 1] <- result.t3[, 1]
model.poisson.SHRCc.cl.c[2:5, 2] <- result.t3[, 2]
model.poisson.SHRCc.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.SHRCc.cl, model.ols.SHRCc.cl.c, model.poisson.SHRCc.cl, model.poisson.SHRCc.cl.c)
###############
###Table A13###
###############
## Add party_match to data
police.data.t1$party_match <- police.data$party_match
## OLS Model with party
model.ols.party <- lm(death_not_remanded ~ 1 + l.state_pca + party_match + state_ut + as.factor(year), data = police.data.t1)
model.ols.party.cl <- cl(police.data.t1, model.ols.party, police.data.t1$state_ut)
## Poisson Model with party
model.poisson.party <- glm(death_not_remanded ~ 1 + l.state_pca + party_match + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.party.cl <- cl(police.data.t1, model.poisson.party, police.data.t1$state_ut)
## OLS Model with party
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add party
police.imp.1.l$party_match <- police.data.t1$party_match
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + party_match + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.party.cl.c <- result.p.1
model.ols.party.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.party.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.party.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with party and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add party
police.imp.1.l$party_match <- police.data.t1$party_match
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + party_match + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.poisson.party.cl.c <- result.p.1
model.poisson.party.cl.c[2:5, 1] <- result.t3[, 1]
model.poisson.party.cl.c[2:5, 2] <- result.t3[, 2]
model.poisson.party.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.party.cl, model.ols.party.cl.c, model.poisson.party.cl, model.poisson.party.cl.c)
###############
###Table A14###
###############
## Add party_match_2006 to data
police.data.t1$party_match_2006 <- police.data$party_match_2006
## OLS Model with party 2006
model.ols.party06 <- lm(death_not_remanded ~ 1 + l.state_pca + party_match_2006 + state_ut + as.factor(year), data = police.data.t1)
model.ols.party06.cl <- cl(police.data.t1, model.ols.party06, police.data.t1$state_ut)
## Poisson Model with party 2006
model.poisson.party06 <- glm(death_not_remanded ~ 1 + l.state_pca + party_match_2006 + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.party06.cl <- cl(police.data.t1, model.poisson.party06, police.data.t1$state_ut)
## OLS Model with party 2006
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add party 2006
police.imp.1.l$party_match_2006 <- police.data.t1$party_match_2006
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + party_match_2006 + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.party06.cl.c <- result.p.1
model.ols.party06.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.party06.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.party06.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with party 2006 and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add party 2006
police.imp.1.l$party_match_2006 <- police.data.t1$party_match_2006
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + party_match_2006 + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.poisson.party06.cl.c <- result.p.1
model.poisson.party06.cl.c[2:5, 1] <- result.t3[, 1]
model.poisson.party06.cl.c[2:5, 2] <- result.t3[, 2]
model.poisson.party06.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.party06.cl, model.ols.party06.cl.c, model.poisson.party06.cl, model.poisson.party06.cl.c)
###############
###Table A15###
###############
## Add directives to police data
police.data.t1$ssc <- police.data$ssc
police.data.t1$dgp_tenure <- police.data$dgp_tenure
police.data.t1$o_tenure <- police.data$o_tenure
police.data.t1$invest_law <- police.data$invest_law
police.data.t1$peb <- police.data$peb
police.data.t1$district_pca <- police.data$district_pca
## Lagged ssc
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.ssc = c(NA, ssc[-length(ssc)]))
## fill NA with 0
police.data.t1$l.ssc <- ifelse(is.na(police.data.t1$l.ssc), 0, police.data.t1$l.ssc)
## Lagged dgp_tenure
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.dgp_tenure = c(NA, dgp_tenure[-length(dgp_tenure)]))
## fill NA with 0
police.data.t1$l.dgp_tenure <- ifelse(is.na(police.data.t1$l.dgp_tenure), 0, police.data.t1$l.dgp_tenure)
## Lagged o_tenure
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.o_tenure = c(NA, o_tenure[-length(o_tenure)]))
## fill NA with 0
police.data.t1$l.o_tenure <- ifelse(is.na(police.data.t1$l.o_tenure), 0, police.data.t1$l.o_tenure)
## Lagged invest_law
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.invest_law = c(NA, invest_law[-length(invest_law)]))
## fill NA with 0
police.data.t1$l.invest_law <- ifelse(is.na(police.data.t1$l.invest_law), 0, police.data.t1$l.invest_law)
## Lagged peb
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.peb = c(NA, peb[-length(peb)]))
## fill NA with 0
police.data.t1$l.peb <- ifelse(is.na(police.data.t1$l.peb), 0, police.data.t1$l.peb)
## Lagged district_pca
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.district_pca = c(NA, district_pca[-length(district_pca)]))
## fill NA with 0
police.data.t1$l.district_pca <- ifelse(is.na(police.data.t1$l.district_pca), 0, police.data.t1$l.district_pca)
## Directives correlations
## directives data
directives <- police.data.t1[, c("l.ssc", "l.dgp_tenure","l.o_tenure", "l.invest_law", "l.peb", "l.state_pca", "l.district_pca")]
stargazer(cor(directives))
###############
###Table A16###
###############
## OLS Models
model.ols.dis <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + state_ut + as.factor(year), data = police.data.t1)
model.ols.dis.cl <- cl(police.data.t1, model.ols.dis, police.data.t1$state_ut)
model.ols.dir <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + state_ut + as.factor(year), data = police.data.t1)
model.ols.dir.cl <- cl(police.data.t1, model.ols.dir, police.data.t1$state_ut)
model.ols.dir <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + l.state_pca*l.invest_law + state_ut + as.factor(year), data = police.data.t1)
model.ols.dir.i1.cl <- cl(police.data.t1, model.ols.dir, police.data.t1$state_ut)
model.ols.dir <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + l.state_pca*l.o_tenure + state_ut + as.factor(year), data = police.data.t1)
model.ols.dir.i2.cl <- cl(police.data.t1, model.ols.dir, police.data.t1$state_ut)
## Poisson Models
model.p.dis <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.dis.cl <- cl(police.data.t1, model.p.dis, police.data.t1$state_ut)
model.p.dir <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.dir.cl <- cl(police.data.t1, model.p.dir, police.data.t1$state_ut)
model.p.dir <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + l.state_pca*l.invest_law + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.dir.i1.cl <- cl(police.data.t1, model.p.dir, police.data.t1$state_ut)
model.p.dir <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + l.state_pca*l.o_tenure + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.dir.i2.cl <- cl(police.data.t1, model.p.dir, police.data.t1$state_ut)
stargazer(model.ols.dis.cl, model.ols.dir.cl, model.ols.dir.i1.cl, model.ols.dir.i2.cl,
model.p.dis.cl, model.p.dir.cl, model.p.dir.i1.cl, model.p.dir.i2.cl)
###############
###Table A17###
###############
## OLS Model with controls
## Dis
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.dis.cl.c <- result.p.1
model.ols.dis.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.dis.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.dis.cl.c[2:5, 4] <- result.t3[, 3]
## Dir
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:10, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:10, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 9, ncol = 3)
for (i in 1:9){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.dir.cl.c <- result.p.1
model.ols.dir.cl.c[2:10, 1] <- result.t3[, 1]
model.ols.dir.cl.c[2:10, 2] <- result.t3[, 2]
model.ols.dir.cl.c[2:10, 4] <- result.t3[, 3]
## Dir.i1
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb +
l.state_pca*l.invest_law +gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[c(2:10, 61), 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[c(2:10, 61), 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 10, ncol = 3)
for (i in 1:10){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.dir.i1.cl.c <- result.p.1
model.ols.dir.i1.cl.c[c(2:10, 61), 1] <- result.t3[, 1]
model.ols.dir.i1.cl.c[c(2:10, 61), 2] <- result.t3[, 2]
model.ols.dir.i1.cl.c[c(2:10, 61), 4] <- result.t3[, 3]
## Dir.i2
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb +
l.state_pca*l.o_tenure +gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[c(2:10, 61), 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[c(2:10, 61), 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 10, ncol = 3)
for (i in 1:10){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.dir.i2.cl.c <- result.p.1
model.ols.dir.i2.cl.c[c(2:10, 61), 1] <- result.t3[, 1]
model.ols.dir.i2.cl.c[c(2:10, 61), 2] <- result.t3[, 2]
model.ols.dir.i2.cl.c[c(2:10, 61), 4] <- result.t3[, 3]
## Poisson Model with controls
## Dis
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.dis.cl.c <- result.p.1
model.p.dis.cl.c[2:5, 1] <- result.t3[, 1]
model.p.dis.cl.c[2:5, 2] <- result.t3[, 2]
model.p.dis.cl.c[2:5, 4] <- result.t3[, 3]
## Dir
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:10, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:10, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 9, ncol = 3)
for (i in 1:9){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.dir.cl.c <- result.p.1
model.p.dir.cl.c[2:10, 1] <- result.t3[, 1]
model.p.dir.cl.c[2:10, 2] <- result.t3[, 2]
model.p.dir.cl.c[2:10, 4] <- result.t3[, 3]
## Dir.i1
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb +
l.state_pca*l.invest_law +gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[c(2:10, 61), 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[c(2:10, 61), 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 10, ncol = 3)
for (i in 1:10){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.dir.i1.cl.c <- result.p.1
model.p.dir.i1.cl.c[c(2:10, 61), 1] <- result.t3[, 1]
model.p.dir.i1.cl.c[c(2:10, 61), 2] <- result.t3[, 2]
model.p.dir.i1.cl.c[c(2:10, 61), 4] <- result.t3[, 3]
## Dir.i2
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb +
l.state_pca*l.o_tenure +gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family = "poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[c(2:10, 61), 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[c(2:10, 61), 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 10, ncol = 3)
for (i in 1:10){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.dir.i2.cl.c <- result.p.1
model.p.dir.i2.cl.c[c(2:10, 61), 1] <- result.t3[, 1]
model.p.dir.i2.cl.c[c(2:10, 61), 2] <- result.t3[, 2]
model.p.dir.i2.cl.c[c(2:10, 61), 4] <- result.t3[, 3]
stargazer(model.ols.dis.cl.c, model.ols.dir.cl.c, model.ols.dir.i1.cl.c, model.ols.dir.i2.cl.c,
model.p.dis.cl.c, model.p.dir.cl.c, model.p.dir.i1.cl.c, model.p.dir.i2.cl.c)
###############
###Table A18###
###############
## Add pca_bind
police.data.t1$pca_bind <- police.data$pca_bind
## Lag pca_ind
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.pca_bind = c(NA, pca_bind[-length(pca_bind)]))
## fill NA with 0
police.data.t1$l.pca_bind <- ifelse(is.na(police.data.t1$l.pca_bind), 0, police.data.t1$l.pca_bind)
## Poisson no control binding
model.poisson.bind <- glm(death_not_remanded ~ 1 + l.state_pca + l.pca_bind + as.factor(state_ut) + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.bind.cl <- cl(police.data.t1, model.poisson.bind, police.data.t1$state_ut)
stargazer(model.poisson.bind.cl)
## categorical variable
police.data.t1$bindinglvl <- ifelse(police.data.t1$l.pca_bind == 1 & police.data.t1$l.state_pca == 1, "Binding",
ifelse(police.data.t1$l.pca_bind == 0 & police.data.t1$l.state_pca == 1, "Regular", "No PCA"))
police.data.t1$bindinglvl <- as.factor(police.data.t1$bindinglvl)
police.data.t1$bindinglvl <- relevel(police.data.t1$bindinglvl, ref = "Regular")
levels(police.data.t1$bindinglvl)
## Poisson no control binding categorical
model.poisson.bind.ca <- glm(death_not_remanded ~ 1 + bindinglvl + as.factor(state_ut) + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.bind.ca.cl <- cl(police.data.t1, model.poisson.bind.ca, police.data.t1$state_ut)
stargazer(model.poisson.bind.ca.cl)
##############
###Table A19##
##############
## Add media_women
police.data.t1$media_women <- police.data$media_women_0
police.data.media <- police.data.t1[-which(is.na(police.data.t1$media_women)), ]
police.data.media$state_ut <- as.factor(as.character(police.data.media$state_ut))
levels(police.data.media$state_ut)
## OLS Model with media_women
model.ols.media <- lm(death_not_remanded ~ 1 + l.state_pca + media_women + state_ut + as.factor(year), data = police.data.media)
model.ols.media.cl <- cl(police.data.media, model.ols.media, police.data.media$state_ut)
## Poisson Model with media_women
model.p.media <- glm(death_not_remanded ~ 1 + l.state_pca + media_women + state_ut + as.factor(year), data = police.data.media, family="poisson")
model.p.media.cl <- cl(police.data.media, model.p.media, police.data.media$state_ut)
## OLS Model with media
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add media women
police.imp.1.l$media_women <- police.data.t1$media_women
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$media_women)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
levels(police.imp.1.l)
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + media_women + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.media.cl.c <- result.p.1
model.ols.media.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.media.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.media.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with religion and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add media women
police.imp.1.l$media_women <- police.data.t1$media_women
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$media_women)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + media_women + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.media.cl.c <- result.p.1
model.p.media.cl.c[2:5, 1] <- result.t3[, 1]
model.p.media.cl.c[2:5, 2] <- result.t3[, 2]
model.p.media.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.media.cl, model.ols.media.cl.c, model.p.media.cl, model.p.media.cl.c)
##############
###Table A20##
##############
## Add literacy
police.data.t1$literacy <- police.data$literacy
police.data.liter <- police.data.t1[-which(is.na(police.data.t1$literacy)), ]
police.data.liter$state_ut <- as.factor(as.character(police.data.liter$state_ut))
levels(police.data.liter$state_ut)
## OLS Model with literacy
model.ols.liter <- lm(death_not_remanded ~ 1 + l.state_pca + literacy + state_ut + as.factor(year), data = police.data.liter)
model.ols.liter.cl <- cl(police.data.liter, model.ols.liter, police.data.liter$state_ut)
## Poisson Model with literacy
model.p.liter <- glm(death_not_remanded ~ 1 + l.state_pca + literacy + state_ut + as.factor(year), data = police.data.liter, family="poisson")
model.p.liter.cl <- cl(police.data.liter, model.p.liter, police.data.liter$state_ut)
## OLS Model with literacy
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add literacy
police.imp.1.l$literacy <- police.data.t1$literacy
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$literacy)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
levels(police.imp.1.l)
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + literacy + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.liter.cl.c <- result.p.1
model.ols.liter.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.liter.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.liter.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with literacy and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add literacy
police.imp.1.l$literacy <- police.data.t1$literacy
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$literacy)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + literacy + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.liter.cl.c <- result.p.1
model.p.liter.cl.c[2:5, 1] <- result.t3[, 1]
model.p.liter.cl.c[2:5, 2] <- result.t3[, 2]
model.p.liter.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.liter.cl, model.ols.liter.cl.c, model.p.liter.cl, model.p.liter.cl.c)
##############
###Table A21##
##############
## Total Death
police.data.t1$total_death <- police.data.t1$death_not_remanded + police.data.t1$death_remanded
## OLS Model with logged
police.data.t1$total_death_ln <- log(police.data.t1$total_death + 1)
model.ols.total.l <- lm(total_death_ln ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1)
model.ols.total.l.cl <- cl(police.data.t1, model.ols.total.l, police.data.t1$state_ut)
## OLS Model with SHRC
model.ols.total <- lm(total_death ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1)
model.ols.total.cl <- cl(police.data.t1, model.ols.total, police.data.t1$state_ut)
## Poisson Model with SHRC
model.p.total <- glm(total_death ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.total.cl <- cl(police.data.t1, model.p.total, police.data.t1$state_ut)
## OLS Model with media
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Total Death
police.imp.1.l$total_death <- police.imp.1.l$death_not_remanded + police.imp.1.l$death_remanded
## Poisson with outdata1.csv
imp.1.p <- lm(total_death ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.total.cl.c <- result.p.1
model.ols.total.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.total.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.total.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with religion and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Total Death
police.imp.1.l$total_death <- police.imp.1.l$death_not_remanded + police.imp.1.l$death_remanded
## Poisson with outdata1.csv
imp.1.p <- glm(total_death ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.total.cl.c <- result.p.1
model.p.total.cl.c[2:5, 1] <- result.t3[, 1]
model.p.total.cl.c[2:5, 2] <- result.t3[, 2]
model.p.total.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.total.cl, model.ols.total.cl.c, model.p.total.cl, model.p.total.cl.c)
##############
###Table A22##
##############
## OLS Model
model.ols.remanded <- lm(death_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1)
model.ols.remanded.cl <- cl(police.data.t1, model.ols.remanded, police.data.t1$state_ut)
## Poisson Model
model.p.remanded <- glm(death_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.remanded.cl <- cl(police.data.t1, model.p.remanded, police.data.t1$state_ut)
## OLS Model
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- lm(death_remanded ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.remanded.cl.c <- result.p.1
model.ols.remanded.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.remanded.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.remanded.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- glm(death_remanded ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.remanded.cl.c <- result.p.1
model.p.remanded.cl.c[2:5, 1] <- result.t3[, 1]
model.p.remanded.cl.c[2:5, 2] <- result.t3[, 2]
model.p.remanded.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.remanded.cl, model.ols.remanded.cl.c, model.p.remanded.cl, model.p.remanded.cl.c)
###############
###Table A23###
###############
## Add pca_ind
police.data.t1$pca_ind <- police.data$pca_ind
## Lag pca_ind
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.pca_ind = c(NA, pca_ind[-length(pca_ind)]))
## fill NA with 0
police.data.t1$l.pca_ind <- ifelse(is.na(police.data.t1$l.pca_ind), 0, police.data.t1$l.pca_ind)
## categorical variable
police.data.t1$indlvl <- ifelse(police.data.t1$l.pca_ind == 1 & police.data.t1$l.state_pca == 1, "Ind",
ifelse(police.data.t1$l.pca_ind == 0 & police.data.t1$l.state_pca == 1, "Regular", "No PCA"))
police.data.t1$indlvl <- as.factor(police.data.t1$indlvl)
police.data.t1$indlvl <- relevel(police.data.t1$indlvl, ref = "Regular")
levels(police.data.t1$indlvl)
## Poisson no control binding categorical
model.poisson.ind.ca <- glm(death_not_remanded ~ 1 + indlvl + as.factor(state_ut) + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.ind.ca.cl <- cl(police.data.t1, model.poisson.ind.ca, police.data.t1$state_ut)
stargazer(model.poisson.ind.ca.cl)
##############
###Table A24##
##############
## Add ngo
police.data.t1$ngo <- police.data$ngo*100
police.data.ngo <- police.data.t1[-which(is.na(police.data.t1$ngo)), ]
police.data.ngo$state_ut <- as.factor(as.character(police.data.ngo$state_ut))
levels(police.data.ngo$state_ut)
## OLS Model with ngo
model.ols.ngo <- lm(death_not_remanded ~ 1 + l.state_pca + ngo + state_ut + as.factor(year), data = police.data.ngo)
model.ols.ngo.cl <- cl(police.data.ngo, model.ols.ngo, police.data.ngo$state_ut)
## Poisson Model with ngo
model.p.ngo <- glm(death_not_remanded ~ 1 + l.state_pca + ngo + state_ut + as.factor(year), data = police.data.ngo, family="poisson")
model.p.ngo.cl <- cl(police.data.ngo, model.p.ngo, police.data.ngo$state_ut)
## OLS Model with ngo
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add ngo
police.imp.1.l$ngo <- police.data$ngo*100
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$ngo)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
levels(police.imp.1.l)
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + ngo + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.ngo.cl.c <- result.p.1
model.ols.ngo.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.ngo.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.ngo.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with ngo and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add ngo
police.imp.1.l$ngo <- police.data$ngo*100
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$ngo)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + ngo + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.ngo.cl.c <- result.p.1
model.p.ngo.cl.c[2:5, 1] <- result.t3[, 1]
model.p.ngo.cl.c[2:5, 2] <- result.t3[, 2]
model.p.ngo.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.ngo.cl, model.ols.ngo.cl.c, model.p.ngo.cl, model.p.ngo.cl.c)
|
/HuCon_ISQ_R_Code_Rewrite_Output.R
|
no_license
|
shengkuohu/diff_in_diff_isq
|
R
| false | false | 104,438 |
r
|
## Clear the R environment
rm(list=ls())
## Load packages
library(foreign)
library(ggplot2)
library(scales)
library(dplyr)
library(plyr)
library(car)
library(stargazer)
library(lmtest)
library(sandwich)
library(stargazer)
## Cluster SE function
cl <- function(dat,fm, cluster){
require(sandwich, quietly = TRUE)
require(lmtest, quietly = TRUE)
M <- length(unique(cluster))
N <- length(cluster)
K <- fm$rank
dfc <- (M/(M-1))*((N-1)/(N-K))
uj <- apply(estfun(fm),2, function(x) tapply(x, cluster, sum));
vcovCL <- dfc*sandwich(fm, meat=crossprod(uj)/N)
coeftest(fm, vcovCL) }
## Set working directory
setwd("/Users/shu8/Dropbox/ConHu Indian Police/Data/Imputation/New Code")
## Read csv data
police.data.save <- read.csv("HuCon_ISQ_Data.csv")
police.data <- read.csv("HuCon_ISQ_Data.csv")
## Delete DAMAN & DIU 2001
police.data <- police.data[-which(is.na(police.data$death_not_remanded)), ]
##############
###Table A1###
##############
stargazer(police.data, median = T)
###############
###Figure A1###
###############
## min max for death
min(police.data$death_remanded, na.rm = T)
max(police.data$death_remanded, na.rm = T)
min(police.data$death_not_remanded, na.rm = T)
max(police.data$death_not_remanded, na.rm = T)
sum(police.data$death_remanded == 0, na.rm = T)
sum(police.data$death_not_remanded == 0, na.rm = T)
library(plyr)
death.state <- ddply(police.data, .(state_ut), summarise, sum = sum(death_remanded))
death.state$not_remanded <- ddply(police.data, .(state_ut), summarise, sum = sum(death_not_remanded))
death.year <- ddply(police.data, .(year), summarise, sum = sum(death_remanded, na.rm = T))
death.year.not <- ddply(police.data, .(year), summarise, sum.not = sum(death_not_remanded, na.rm = T))
merge <- merge(death.year, death.year.not, by = "year")
library(reshape)
merge.long <- melt(merge, id = "year")
names(merge.long)[2]<-"Variable"
f.a1 <- ggplot(merge.long, aes(year, value, colour = Variable)) + geom_line() + scale_x_continuous(breaks = c(2002, 2004, 2006, 2008,2010,2012,2014,2016)) + scale_color_manual(labels = c("Death remanded", "Death not remanded"), values = c("#F8766D", "#00BFC4")) + ylab("Count") + xlab("Year")
f.a1
ggsave("death_time.pdf", f.a1, width = 6, height = 4)
##############
###Table A3###
##############
police.data.2006.all <- subset(police.data, year <= 2006)
death.state.2006.all <- ddply(police.data.2006.all, .(state_ut), summarise, remanded.2006.all = sum(death_remanded, na.rm = T))
death.state.not.2006.all <- ddply(police.data.2006.all, .(state_ut), summarise, notremanded.2006.all = sum(death_not_remanded, na.rm = T))
death.state.2006.all$notremanded.2006.all <- death.state.not.2006.all$notremanded.2006.all
death.state.2006.all
##############
###Table A4###
##############
police.data.2006 <- subset(police.data, year == 2006)
death.state.2006 <- ddply(police.data.2006, .(state_ut), summarise, remanded.2006 = sum(death_remanded, na.rm = T))
death.state.not.2006 <- ddply(police.data.2006, .(state_ut), summarise, notremanded.2006 = sum(death_not_remanded, na.rm = T))
death.state.2006$notremanded.2006 <- death.state.not.2006$notremanded.2006
death.state.2006
################
## Imputation ##
################
## Because Multiple Imputation is a random process, results are slightly different every time
## Load data
police.imp <- police.data.save[, c("state_ut", "year", "death_remanded", "death_not_remanded", "state_pca", "district_pca", "type", "sc_order1", "committee1",
"gdp", "religion2", "head_trans")]
## Load Aelia and Zelig
library("Amelia")
library("Zelig")
## AmeliaView()
## Multiple imputation with settings below
bds.3 <- c(3, 0, 100)
bds.4 <- c(4, 0, 100)
bds.12 <- c(12, 0, 50)
bds <- rbind(bds.3, bds.4, bds.12)
a.out <- amelia(police.imp, m = 5, idvars = "type",
ts = "year", cs = "state_ut", priors = NULL, lags = "gdp",
empri = 0, intercs = TRUE, leads = "gdp", splinetime = 0,
logs = c("gdp", "head_trans"), sqrts = NULL,
lgstc = NULL, ords = NULL, noms = c("state_pca", "district_pca",
"sc_order1", "committee1", "religion2"), bounds = bds, max.resample = 1000,
tolerance = 1e-04)
## Rewrite outdata to replace the original outdata1.csv - outdata5.csv
## Model results with multiple imputed variables will be slightly different
write.amelia(obj = a.out, file.stem = "outdata")
#############
###Table 1###
#############
police.data.t1 <- police.data[ ,c("death_not_remanded", "death_remanded", "state_ut", "year", "state_pca", "t")]
police.data.t1 <- na.omit(police.data.t1)
## Lagged state_pca
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.data.t1$l.state_pca <- ifelse(is.na(police.data.t1$l.state_pca), 0, police.data.t1$l.state_pca)
## Table 1 model
model.poisson.t1 <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.t1.cl <- cl(police.data.t1, model.poisson.t1 , police.data.t1$state_ut)
stargazer(model.poisson.t1.cl)
## predict death count if all PCAs are inplemented on time
police.imp.p <- police.data.t1
police.imp.p$l.state_pca <- ifelse(police.imp.p$year >= 2008, 1, 0)
Y <- predict(model.poisson.t1, police.imp.p, type="response")
sum(Y)
sum(police.data.t1$death_not_remanded)-sum(Y)
## predict death count if no PCA is inplemented.
police.imp.p <- police.data.t1
police.imp.p$l.state_pca <- 0
Y.2 <- predict(model.poisson.t1, police.imp.p, type="response")
sum(Y.2)
sum(Y.2)-sum(police.data.t1$death_not_remanded)
##############
###Figure 1###
##############
## Lagged state_pca
police.data.f1 <- ddply(police.data.t1, .(state_ut), transform, tm1 = lead(t))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tm2 = lead(tm1))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tm3 = lead(tm2))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tm4 = lead(tm3))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tp1 = lag(t))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tp2 = lag(tp1))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tp3 = lag(tp2))
police.data.f1 <- ddply(police.data.f1, .(state_ut), transform, tp4 = lag(tp3))
police.data.f1[is.na(police.data.f1)] <- 0
## Poisson Placebo Test
model.poisson.plb <- glm(death_not_remanded ~ 1 + tm3 + tm2 + tm1 + t + tp1 + tp2 + tp3 + state_ut + as.factor(year), data = police.data.f1, family="poisson")
model.poisson.plb.cl <- cl(police.data.f1, model.poisson.plb, police.data.f1$state_ut)
stargazer(model.poisson.plb.cl)
## Overdispersion test
library(AER)
dispersiontest(model.poisson.plb,trafo=1)
## Graph Placebo Test Figure 3
## Save Ts Poisson result
graph.f1 <- as.data.frame(model.poisson.plb.cl[2:8, ])
graph.f1$time <- c(-3,-2,-1,0,1,2,3)
## Calculate CIs
graph.f1$ci.l <- graph.f1[, 1] - qnorm(0.975)*graph.f1[, 2]
graph.f1$ci.u <- graph.f1[, 1] + qnorm(0.975)*graph.f1[, 2]
graph.f1$ci.l.90 <- graph.f1[, 1] - qnorm(0.95)*graph.f1[, 2]
graph.f1$ci.u.90 <- graph.f1[, 1] + qnorm(0.95)*graph.f1[, 2]
## Plot
p.placebo <- ggplot(graph.f1, aes(time, Estimate))+
#geom_ribbon(aes(ymin=ci.l,ymax=ci.u),alpha=0.3)+
geom_errorbar(aes(ymin=ci.l,ymax=ci.u),width=0.3, color = "#999999")+
#geom_errorbar(aes(ymin=ci.l.90,ymax=ci.u.90),width=0.1, color = "#999999")+
geom_pointrange(aes(ymin=ci.l.90,ymax=ci.u.90),size=1.5, shape = 46, color = "#999999")+
geom_point(size = 2)+
geom_line()+
ylim(-1.1, 1.1)+
xlab("Years from PCA Creation")+
ylab("Coefficient of PCA Creation")+
#geom_line(aes(y=ci.l))+
#geom_line(aes(y=ci.u))+
#geom_line(aes(y=ci.l.90), linetype = "dashed")+
# geom_line(aes(y=ci.u.90), linetype = "dashed")+
geom_hline(yintercept = 0, linetype = "dotted")+
scale_x_continuous(breaks = c(-3, -2, -1, 0, 1, 2, 3))
p.placebo
ggsave("p_placebo_good_2016.pdf", plot = p.placebo, height = 4.5, width = 4.5)
#############
###Table 2###
#############
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
## T2 results
## Row: State PCA, State Capacity, and State Desire
## Column: Effect, SE, P value
result.t2
#############
###Table 3###
#############
## Add SHRC to police data
police.data.t1$SHRC <- police.data$SHRC
## Lagged SHRC
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.SHRC = c(NA, SHRC[-length(SHRC)]))
## Fill NA with 0
police.data.t1$l.SHRC <- ifelse(is.na(police.data.t1$l.SHRC), 0, police.data.t1$l.SHRC)
## Correlation check
cor.test(police.data.t1$state_pca, police.data.t1$SHRC)
## Model with SHRC
model.poisson.SHRC <- glm(death_not_remanded ~ 1 + l.SHRC + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.SHRC.cl <- cl(police.data.t1, model.poisson.SHRC, police.data.t1$state_ut)
stargazer(model.poisson.SHRC.cl)
## Model SHRC with controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Add SHRC
police.imp.1.l$l.SHRC <- police.data.t1$l.SHRC
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.SHRC + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
## T3 (2) results
## Row: State PCA, State Capacity, and State Desire
## Column: Effect, SE, P value
result.t3
##############
###Table A5###
##############
police.imp.d <- police.data.save[, c("state_ut", "year", "death_remanded", "death_not_remanded", "state_pca", "district_pca", "type", "sc_order1", "committee1",
"gdp", "religion2", "head_trans")]
stargazer(police.imp.d, median = T)
##############
###Table A6###
##############
## OLS Placebo Test
police.data.f3 <- police.data.f1
model.ols.plb <- lm(death_not_remanded ~ 1 + tm3 + tm2 + tm1 + t + tp1 + tp2 + tp3 + state_ut + as.factor(year), data = police.data.f3)
model.ols.plb.cl <- cl(police.data.f3, model.ols.plb, police.data.f3$state_ut)
## OLS Placebo Test with controls
## Loop models for 5 imputation datasets
i <- 1
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add t to outdata
police.imp.1.l$t <- police.data.t1$t
## lags and leads
police.data.f3 <- ddply(police.imp.1.l, .(state_ut), transform, tm1 = lead(t))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm2 = lead(tm1))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm3 = lead(tm2))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm4 = lead(tm3))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp1 = lag(t))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp2 = lag(tp1))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp3 = lag(tp2))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp4 = lag(tp3))
police.data.f3[is.na(police.data.f3)] <- 0
## Poisson Placebo Test
imp.1.p <- lm(death_not_remanded ~ 1 + tm3 + tm2 + tm1 + t + tp1 + tp2 + tp3 + gdp +
head_trans+ state_ut + as.factor(year), data = police.data.f3)
result.p.1 <- cl(police.data.f3, imp.1.p, police.data.f3$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:10, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:10, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 9, ncol = 3)
for (i in 1:9){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
## Replace results to model result
model.ols.plb.cl.c <- result.p.1
model.ols.plb.cl.c[2:10, 1] <- result.t2[, 1]
model.ols.plb.cl.c[2:10, 2] <- result.t2[, 2]
model.ols.plb.cl.c[2:10, 4] <- result.t2[, 3]
model.ols.plb.cl.c
## Poisson Placebo Test with controls
## Loop models for 5 imputation datasets
i <- 1
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add t to outdata
police.imp.1.l$t <- police.data.t1$t
## lags and leads
police.data.f3 <- ddply(police.imp.1.l, .(state_ut), transform, tm1 = lead(t))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm2 = lead(tm1))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm3 = lead(tm2))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tm4 = lead(tm3))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp1 = lag(t))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp2 = lag(tp1))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp3 = lag(tp2))
police.data.f3 <- ddply(police.data.f3, .(state_ut), transform, tp4 = lag(tp3))
police.data.f3[is.na(police.data.f3)] <- 0
## Poisson Placebo Test
imp.1.p <- glm(death_not_remanded ~ 1 + tm3 + tm2 + tm1 + t + tp1 + tp2 + tp3 + gdp +
head_trans+ state_ut + as.factor(year), data = police.data.f3, family="poisson")
result.p.1 <- cl(police.data.f3, imp.1.p, police.data.f3$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:10, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:10, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 9, ncol = 3)
for (i in 1:9){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
## Replace results to model result
model.poisson.plb.cl.c <- result.p.1
model.poisson.plb.cl.c[2:10, 1] <- result.t2[, 1]
model.poisson.plb.cl.c[2:10, 2] <- result.t2[, 2]
model.poisson.plb.cl.c[2:10, 4] <- result.t2[, 3]
model.poisson.plb.cl.c
## Make table
stargazer(model.ols.plb.cl, model.ols.plb.cl.c, model.poisson.plb.cl, model.poisson.plb.cl.c)
###############
###Figure A3###
###############
## Save Ts Poisson result
graph.a1 <- as.data.frame(model.poisson.plb.cl.c[2:8, ])
graph.a1$time <- c(-3,-2,-1,0,1,2,3)
## Calculate CIs
graph.a1$ci.l <- graph.a1[, 1] - qnorm(0.975)*graph.a1[, 2]
graph.a1$ci.u <- graph.a1[, 1] + qnorm(0.975)*graph.a1[, 2]
graph.a1$ci.l.90 <- graph.a1[, 1] - qnorm(0.95)*graph.a1[, 2]
graph.a1$ci.u.90 <- graph.a1[, 1] + qnorm(0.95)*graph.a1[, 2]
## Plot
p.placebo.a3 <- ggplot(graph.a1, aes(time, Estimate))+
#geom_ribbon(aes(ymin=ci.l,ymax=ci.u),alpha=0.3)+
geom_errorbar(aes(ymin=ci.l,ymax=ci.u),width=0.3, color = "#999999")+
#geom_errorbar(aes(ymin=ci.l.90,ymax=ci.u.90),width=0.1, color = "#999999")+
geom_pointrange(aes(ymin=ci.l.90,ymax=ci.u.90),size=1.5, shape = 46, color = "#999999")+
geom_point(size = 2)+
geom_line()+
ylim(-1.2, 1.2)+
xlab("Years from PCA Creation")+
ylab("Coefficient of PCA Creation")+
#geom_line(aes(y=ci.l))+
#geom_line(aes(y=ci.u))+
#geom_line(aes(y=ci.l.90), linetype = "dashed")+
# geom_line(aes(y=ci.u.90), linetype = "dashed")+
geom_hline(yintercept = 0, linetype = "dotted")+
scale_x_continuous(breaks = c(-3, -2, -1, 0, 1, 2, 3))
p.placebo.a3
ggsave("p_placebo_controls_2016.pdf", plot = p.placebo.a3, height = 4.8, width = 4.5)
##############
###Table A7###
##############
## Load GTD data
police.data.ta5 <- police.data.t1
police.data.ta5$l.event <- police.data$l.event
#police.data.save <- merge(police.data, gtd.sum.l, by = c("state_ut", "year"), all.x = T)
#police.data.save <- subset(police.data.save, select=-c(iyear, provstate))
#write.csv(police.data.save, "final1.csv")
## fill NA with 0
#police.data.save$l.event <- ifelse(is.na(police.data.save$l.event), 0, police.data.save$l.event)
##Correlation check
cor.test(police.data.ta5$l.event, police.data.ta5$l.state_pca)
## OLS with GTD
model.ols.GTD <- lm(death_not_remanded ~ 1 + l.state_pca + l.event + state_ut + as.factor(year), data = police.data.ta5)
model.ols.GTD.cl <- cl(police.data.ta5, model.ols.GTD, police.data.ta5$state_ut)
## Poisson with GTD
model.poisson.GTD <- glm(death_not_remanded ~ 1 + l.state_pca + l.event + state_ut + as.factor(year), data = police.data.ta5, family="poisson")
model.p.GTD.cl <- cl(police.data.ta5, model.poisson.GTD, police.data.ta5$state_ut)
## Poisson with GTD and Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add GTD l.event to outdata
police.imp.1.l$l.event <- police.data.ta5$l.event
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.event + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.ols.GTD.cl.c <- result.p.1
model.ols.GTD.cl.c[2:5, 1] <- result.t2[, 1]
model.ols.GTD.cl.c[2:5, 2] <- result.t2[, 2]
model.ols.GTD.cl.c[2:5, 4] <- result.t2[, 3]
model.ols.GTD.cl.c
## Poisson with GTD and Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add GTD l.event to outdata
police.imp.1.l$l.event <- police.data.ta5$l.event
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.event + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.p.GTD.cl.c <- result.p.1
model.p.GTD.cl.c[2:5, 1] <- result.t2[, 1]
model.p.GTD.cl.c[2:5, 2] <- result.t2[, 2]
model.p.GTD.cl.c[2:5, 4] <- result.t2[, 3]
stargazer(model.ols.GTD.cl, model.ols.GTD.cl.c, model.p.GTD.cl, model.p.GTD.cl.c)
##############
###Table A8###
##############
## Add religion to police data
police.data.t1$religion2 <- police.data$religion2
## OLS Model with religion
model.ols.religion <- lm(death_not_remanded ~ 1 + l.state_pca + religion2 + state_ut + as.factor(year), data = police.data.t1)
model.ols.religion.cl <- cl(police.data.t1, model.ols.religion, police.data.t1$state_ut)
## Poisson Model with religion
model.poisson.religion <- glm(death_not_remanded ~ 1 + l.state_pca + religion2 + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.religion.cl <- cl(police.data.t1, model.poisson.religion, police.data.t1$state_ut)
## OLS Model with religion
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add religion
police.imp.1.l$religion2 <- police.data.t1$religion2
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + religion2 + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.religion.cl.c <- result.p.1
model.ols.religion.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.religion.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.religion.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with religion and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add religion
police.imp.1.l$religion2 <- police.data.t1$religion2
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + religion2 + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.poisson.religion.cl.c <- result.p.1
model.poisson.religion.cl.c[2:5, 1] <- result.t3[, 1]
model.poisson.religion.cl.c[2:5, 2] <- result.t3[, 2]
model.poisson.religion.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.religion.cl, model.ols.religion.cl.c, model.poisson.religion.cl, model.poisson.religion.cl.c)
##############
###Table A9###
##############
## OLS
model.ols <- lm(death_not_remanded ~ 1 + l.state_pca + state_ut +
as.factor(year), data = police.data.t1)
model.ols.cl <- cl(police.data.t1, model.ols, police.data.t1$state_ut)
## OLS with logged DV
police.data.t1$death_not_remanded_ln <- log(police.data.t1$death_not_remanded+1)
model.ols.log <- lm(death_not_remanded_ln ~ 1 + l.state_pca + state_ut +
as.factor(year), data = police.data.t1)
model.ols.log.cl <- cl(police.data.t1, model.ols.log, police.data.t1$state_ut)
## OLS with Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.ols.cl.c <- result.p.1
model.ols.cl.c[2:4, 1] <- result.t2[, 1]
model.ols.cl.c[2:4, 2] <- result.t2[, 2]
model.ols.cl.c[2:4, 4] <- result.t2[, 3]
## OLS with logged DV and Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Log DV
police.imp.1$death_not_remanded_ln <- log(police.imp.1$death_not_remanded+1)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded_ln ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.ols.log.cl.c <- result.p.1
model.ols.log.cl.c[2:4, 1] <- result.t2[, 1]
model.ols.log.cl.c[2:4, 2] <- result.t2[, 2]
model.ols.log.cl.c[2:4, 4] <- result.t2[, 3]
stargazer(model.ols.cl, model.ols.cl.c,model.ols.log.cl, model.ols.log.cl.c)
###############
###Table A10###
###############
## OLS no lag
model.ols.nl <- lm(death_not_remanded ~ 1 + state_pca + state_ut +
as.factor(year), data = police.data.t1)
model.ols.nl.cl <- cl(police.data.t1, model.ols.nl, police.data.t1$state_ut)
## Poisson no lag
model.p.nl <- glm(death_not_remanded ~ 1 + state_pca + state_ut +
as.factor(year), data = police.data.t1, family="poisson")
model.p.nl.cl <- cl(police.data.t1, model.p.nl, police.data.t1$state_ut)
## OLS no lag with Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.ols.nl.cl.c <- result.p.1
model.ols.cl.c[2:4, 1] <- result.t2[, 1]
model.ols.cl.c[2:4, 2] <- result.t2[, 2]
model.ols.cl.c[2:4, 4] <- result.t2[, 3]
## Poisson no lag with Controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:4, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:4, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t2 <- matrix(NA, nrow = 3, ncol = 3)
for (i in 1:3){
result.t2[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t2
## Replace results to model result
model.p.nl.cl.c <- result.p.1
model.ols.log.cl.c[2:4, 1] <- result.t2[, 1]
model.ols.log.cl.c[2:4, 2] <- result.t2[, 2]
model.ols.log.cl.c[2:4, 4] <- result.t2[, 3]
stargazer(model.ols.nl.cl, model.ols.nl.cl.c, model.p.nl.cl, model.p.nl.cl.c)
###############
###Table A11###
###############
## Balanced Pabel
police.data.b <- subset(police.data.t1, police.data.t1$state_ut != "TELANGANA")
police.data.b <- subset(police.data.b, police.data.b$state_ut != "Z DAMAN & DIU")
police.data.b$state_ut <- as.factor(as.character(police.data.b$state_ut))
levels(police.data.b$state_ut)
length(police.data.b$death_not_remanded)
model.poisson.b <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.b, family="poisson")
model.poisson.b.cl <- cl(police.data.b, model.poisson.b, police.data.b$state_ut)
##Quasi-poisson
model.qp <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1, family = "quasipoisson")
model.qp.cl <- cl(police.data.t1, model.qp, police.data.t1$state_ut)
##Negative binominal
library(MASS)
model.nb <- glm.nb(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1)
model.nb.cl <- cl(police.data.t1, model.nb, police.data.t1$state_ut)
## Delete new states
police.data.nn <- subset(police.data.t1, police.data.t1$state_ut != "TELANGANA")
police.data.nn <- police.data.nn[!police.data.nn$state_ut == "ANDHRA PRADESH", ]
police.data.nn$state_ut <- as.factor(as.character(police.data.nn$state_ut))
levels(police.data.nn$state_ut)
length(police.data.nn$death_not_remanded)
model.poisson.nn <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.nn, family="poisson")
model.poisson.nn.cl <- cl(police.data.nn, model.poisson.nn, police.data.nn$state_ut)
## Delete MAHARASHTRA
police.data.nm <- subset(police.data.t1, police.data.t1$state_ut != "MAHARASHTRA")
police.data.nm$state_ut <- as.factor(as.character(police.data.nm$state_ut))
levels(police.data.nm$state_ut)
length(police.data.nm$death_not_remanded)
model.poisson.nm <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.nm, family="poisson")
model.poisson.nm.cl <- cl(police.data.nm, model.poisson.nm, police.data.nm$state_ut)
## Delete MAHARASHTRA and ANDHRA PRADESH
police.data.nma <- subset(police.data.t1, police.data.t1$state_ut != "MAHARASHTRA")
police.data.nma <- police.data.nma[!police.data.nma$state_ut == "ANDHRA PRADESH", ]
police.data.nma$state_ut <- as.factor(as.character(police.data.nma$state_ut))
levels(police.data.nma$state_ut)
length(police.data.nma$death_not_remanded)
model.poisson.nma <- glm(death_not_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.nma, family="poisson")
model.poisson.nma.cl <- cl(police.data.nma, model.poisson.nma, police.data.nma$state_ut)
##Print
stargazer(model.poisson.b, model.qp.cl, model.nb.cl, model.poisson.nn.cl , model.poisson.nm.cl, model.poisson.nma.cl)
###############
###Table A12###
###############
## OLS Model with SHRC
model.ols.SHRCc <- lm(death_not_remanded ~ 1 + l.state_pca + l.SHRC + state_ut + as.factor(year), data = police.data.t1)
model.ols.SHRCc.cl <- cl(police.data.t1, model.ols.SHRCc, police.data.t1$state_ut)
## Poisson Model with SHRC
model.poisson.SHRCc <- glm(death_not_remanded ~ 1 + l.state_pca + l.SHRC + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.SHRCc.cl <- cl(police.data.t1, model.poisson.SHRCc, police.data.t1$state_ut)
## OLS Model with SHRC
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add SHRC
police.imp.1.l$l.SHRC <- police.data.t1$l.SHRC
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.SHRC + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.SHRCc.cl.c <- result.p.1
model.ols.SHRCc.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.SHRCc.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.SHRCc.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with SHRC and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add SHRC
police.imp.1.l$l.SHRC <- police.data.t1$l.SHRC
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.SHRC + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.poisson.SHRCc.cl.c <- result.p.1
model.poisson.SHRCc.cl.c[2:5, 1] <- result.t3[, 1]
model.poisson.SHRCc.cl.c[2:5, 2] <- result.t3[, 2]
model.poisson.SHRCc.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.SHRCc.cl, model.ols.SHRCc.cl.c, model.poisson.SHRCc.cl, model.poisson.SHRCc.cl.c)
###############
###Table A13###
###############
## Add party_match to data
police.data.t1$party_match <- police.data$party_match
## OLS Model with party
model.ols.party <- lm(death_not_remanded ~ 1 + l.state_pca + party_match + state_ut + as.factor(year), data = police.data.t1)
model.ols.party.cl <- cl(police.data.t1, model.ols.party, police.data.t1$state_ut)
## Poisson Model with party
model.poisson.party <- glm(death_not_remanded ~ 1 + l.state_pca + party_match + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.party.cl <- cl(police.data.t1, model.poisson.party, police.data.t1$state_ut)
## OLS Model with party
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add party
police.imp.1.l$party_match <- police.data.t1$party_match
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + party_match + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.party.cl.c <- result.p.1
model.ols.party.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.party.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.party.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with party and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add party
police.imp.1.l$party_match <- police.data.t1$party_match
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + party_match + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.poisson.party.cl.c <- result.p.1
model.poisson.party.cl.c[2:5, 1] <- result.t3[, 1]
model.poisson.party.cl.c[2:5, 2] <- result.t3[, 2]
model.poisson.party.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.party.cl, model.ols.party.cl.c, model.poisson.party.cl, model.poisson.party.cl.c)
###############
###Table A14###
###############
## Add party_match_2006 to data
police.data.t1$party_match_2006 <- police.data$party_match_2006
## OLS Model with party 2006
model.ols.party06 <- lm(death_not_remanded ~ 1 + l.state_pca + party_match_2006 + state_ut + as.factor(year), data = police.data.t1)
model.ols.party06.cl <- cl(police.data.t1, model.ols.party06, police.data.t1$state_ut)
## Poisson Model with party 2006
model.poisson.party06 <- glm(death_not_remanded ~ 1 + l.state_pca + party_match_2006 + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.party06.cl <- cl(police.data.t1, model.poisson.party06, police.data.t1$state_ut)
## OLS Model with party 2006
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add party 2006
police.imp.1.l$party_match_2006 <- police.data.t1$party_match_2006
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + party_match_2006 + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.party06.cl.c <- result.p.1
model.ols.party06.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.party06.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.party06.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with party 2006 and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add party 2006
police.imp.1.l$party_match_2006 <- police.data.t1$party_match_2006
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + party_match_2006 + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.poisson.party06.cl.c <- result.p.1
model.poisson.party06.cl.c[2:5, 1] <- result.t3[, 1]
model.poisson.party06.cl.c[2:5, 2] <- result.t3[, 2]
model.poisson.party06.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.party06.cl, model.ols.party06.cl.c, model.poisson.party06.cl, model.poisson.party06.cl.c)
###############
###Table A15###
###############
## Add directives to police data
police.data.t1$ssc <- police.data$ssc
police.data.t1$dgp_tenure <- police.data$dgp_tenure
police.data.t1$o_tenure <- police.data$o_tenure
police.data.t1$invest_law <- police.data$invest_law
police.data.t1$peb <- police.data$peb
police.data.t1$district_pca <- police.data$district_pca
## Lagged ssc
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.ssc = c(NA, ssc[-length(ssc)]))
## fill NA with 0
police.data.t1$l.ssc <- ifelse(is.na(police.data.t1$l.ssc), 0, police.data.t1$l.ssc)
## Lagged dgp_tenure
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.dgp_tenure = c(NA, dgp_tenure[-length(dgp_tenure)]))
## fill NA with 0
police.data.t1$l.dgp_tenure <- ifelse(is.na(police.data.t1$l.dgp_tenure), 0, police.data.t1$l.dgp_tenure)
## Lagged o_tenure
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.o_tenure = c(NA, o_tenure[-length(o_tenure)]))
## fill NA with 0
police.data.t1$l.o_tenure <- ifelse(is.na(police.data.t1$l.o_tenure), 0, police.data.t1$l.o_tenure)
## Lagged invest_law
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.invest_law = c(NA, invest_law[-length(invest_law)]))
## fill NA with 0
police.data.t1$l.invest_law <- ifelse(is.na(police.data.t1$l.invest_law), 0, police.data.t1$l.invest_law)
## Lagged peb
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.peb = c(NA, peb[-length(peb)]))
## fill NA with 0
police.data.t1$l.peb <- ifelse(is.na(police.data.t1$l.peb), 0, police.data.t1$l.peb)
## Lagged district_pca
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.district_pca = c(NA, district_pca[-length(district_pca)]))
## fill NA with 0
police.data.t1$l.district_pca <- ifelse(is.na(police.data.t1$l.district_pca), 0, police.data.t1$l.district_pca)
## Directives correlations
## directives data
directives <- police.data.t1[, c("l.ssc", "l.dgp_tenure","l.o_tenure", "l.invest_law", "l.peb", "l.state_pca", "l.district_pca")]
stargazer(cor(directives))
###############
###Table A16###
###############
## OLS Models
model.ols.dis <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + state_ut + as.factor(year), data = police.data.t1)
model.ols.dis.cl <- cl(police.data.t1, model.ols.dis, police.data.t1$state_ut)
model.ols.dir <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + state_ut + as.factor(year), data = police.data.t1)
model.ols.dir.cl <- cl(police.data.t1, model.ols.dir, police.data.t1$state_ut)
model.ols.dir <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + l.state_pca*l.invest_law + state_ut + as.factor(year), data = police.data.t1)
model.ols.dir.i1.cl <- cl(police.data.t1, model.ols.dir, police.data.t1$state_ut)
model.ols.dir <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + l.state_pca*l.o_tenure + state_ut + as.factor(year), data = police.data.t1)
model.ols.dir.i2.cl <- cl(police.data.t1, model.ols.dir, police.data.t1$state_ut)
## Poisson Models
model.p.dis <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.dis.cl <- cl(police.data.t1, model.p.dis, police.data.t1$state_ut)
model.p.dir <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.dir.cl <- cl(police.data.t1, model.p.dir, police.data.t1$state_ut)
model.p.dir <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + l.state_pca*l.invest_law + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.dir.i1.cl <- cl(police.data.t1, model.p.dir, police.data.t1$state_ut)
model.p.dir <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + l.state_pca*l.o_tenure + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.dir.i2.cl <- cl(police.data.t1, model.p.dir, police.data.t1$state_ut)
stargazer(model.ols.dis.cl, model.ols.dir.cl, model.ols.dir.i1.cl, model.ols.dir.i2.cl,
model.p.dis.cl, model.p.dir.cl, model.p.dir.i1.cl, model.p.dir.i2.cl)
###############
###Table A17###
###############
## OLS Model with controls
## Dis
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.dis.cl.c <- result.p.1
model.ols.dis.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.dis.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.dis.cl.c[2:5, 4] <- result.t3[, 3]
## Dir
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:10, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:10, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 9, ncol = 3)
for (i in 1:9){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.dir.cl.c <- result.p.1
model.ols.dir.cl.c[2:10, 1] <- result.t3[, 1]
model.ols.dir.cl.c[2:10, 2] <- result.t3[, 2]
model.ols.dir.cl.c[2:10, 4] <- result.t3[, 3]
## Dir.i1
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb +
l.state_pca*l.invest_law +gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[c(2:10, 61), 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[c(2:10, 61), 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 10, ncol = 3)
for (i in 1:10){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.dir.i1.cl.c <- result.p.1
model.ols.dir.i1.cl.c[c(2:10, 61), 1] <- result.t3[, 1]
model.ols.dir.i1.cl.c[c(2:10, 61), 2] <- result.t3[, 2]
model.ols.dir.i1.cl.c[c(2:10, 61), 4] <- result.t3[, 3]
## Dir.i2
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb +
l.state_pca*l.o_tenure +gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[c(2:10, 61), 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[c(2:10, 61), 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 10, ncol = 3)
for (i in 1:10){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.dir.i2.cl.c <- result.p.1
model.ols.dir.i2.cl.c[c(2:10, 61), 1] <- result.t3[, 1]
model.ols.dir.i2.cl.c[c(2:10, 61), 2] <- result.t3[, 2]
model.ols.dir.i2.cl.c[c(2:10, 61), 4] <- result.t3[, 3]
## Poisson Model with controls
## Dis
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.dis.cl.c <- result.p.1
model.p.dis.cl.c[2:5, 1] <- result.t3[, 1]
model.p.dis.cl.c[2:5, 2] <- result.t3[, 2]
model.p.dis.cl.c[2:5, 4] <- result.t3[, 3]
## Dir
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:10, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:10, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 9, ncol = 3)
for (i in 1:9){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.dir.cl.c <- result.p.1
model.p.dir.cl.c[2:10, 1] <- result.t3[, 1]
model.p.dir.cl.c[2:10, 2] <- result.t3[, 2]
model.p.dir.cl.c[2:10, 4] <- result.t3[, 3]
## Dir.i1
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb +
l.state_pca*l.invest_law +gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[c(2:10, 61), 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[c(2:10, 61), 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 10, ncol = 3)
for (i in 1:10){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.dir.i1.cl.c <- result.p.1
model.p.dir.i1.cl.c[c(2:10, 61), 1] <- result.t3[, 1]
model.p.dir.i1.cl.c[c(2:10, 61), 2] <- result.t3[, 2]
model.p.dir.i1.cl.c[c(2:10, 61), 4] <- result.t3[, 3]
## Dir.i2
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add directives
police.imp.1.l$l.ssc <- police.data.t1$l.ssc
police.imp.1.l$l.dgp_tenure <- police.data.t1$l.dgp_tenure
police.imp.1.l$l.o_tenure <- police.data.t1$l.o_tenure
police.imp.1.l$l.invest_law <- police.data.t1$l.invest_law
police.imp.1.l$l.peb <- police.data.t1$l.peb
police.imp.1.l$l.district_pca <- police.data.t1$l.district_pca
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + l.district_pca + l.ssc + l.dgp_tenure + l.o_tenure + l.invest_law + l.peb +
l.state_pca*l.o_tenure +gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family = "poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[c(2:10, 61), 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[c(2:10, 61), 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 10, ncol = 3)
for (i in 1:10){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.dir.i2.cl.c <- result.p.1
model.p.dir.i2.cl.c[c(2:10, 61), 1] <- result.t3[, 1]
model.p.dir.i2.cl.c[c(2:10, 61), 2] <- result.t3[, 2]
model.p.dir.i2.cl.c[c(2:10, 61), 4] <- result.t3[, 3]
stargazer(model.ols.dis.cl.c, model.ols.dir.cl.c, model.ols.dir.i1.cl.c, model.ols.dir.i2.cl.c,
model.p.dis.cl.c, model.p.dir.cl.c, model.p.dir.i1.cl.c, model.p.dir.i2.cl.c)
###############
###Table A18###
###############
## Add pca_bind
police.data.t1$pca_bind <- police.data$pca_bind
## Lag pca_ind
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.pca_bind = c(NA, pca_bind[-length(pca_bind)]))
## fill NA with 0
police.data.t1$l.pca_bind <- ifelse(is.na(police.data.t1$l.pca_bind), 0, police.data.t1$l.pca_bind)
## Poisson no control binding
model.poisson.bind <- glm(death_not_remanded ~ 1 + l.state_pca + l.pca_bind + as.factor(state_ut) + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.bind.cl <- cl(police.data.t1, model.poisson.bind, police.data.t1$state_ut)
stargazer(model.poisson.bind.cl)
## categorical variable
police.data.t1$bindinglvl <- ifelse(police.data.t1$l.pca_bind == 1 & police.data.t1$l.state_pca == 1, "Binding",
ifelse(police.data.t1$l.pca_bind == 0 & police.data.t1$l.state_pca == 1, "Regular", "No PCA"))
police.data.t1$bindinglvl <- as.factor(police.data.t1$bindinglvl)
police.data.t1$bindinglvl <- relevel(police.data.t1$bindinglvl, ref = "Regular")
levels(police.data.t1$bindinglvl)
## Poisson no control binding categorical
model.poisson.bind.ca <- glm(death_not_remanded ~ 1 + bindinglvl + as.factor(state_ut) + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.bind.ca.cl <- cl(police.data.t1, model.poisson.bind.ca, police.data.t1$state_ut)
stargazer(model.poisson.bind.ca.cl)
##############
###Table A19##
##############
## Add media_women
police.data.t1$media_women <- police.data$media_women_0
police.data.media <- police.data.t1[-which(is.na(police.data.t1$media_women)), ]
police.data.media$state_ut <- as.factor(as.character(police.data.media$state_ut))
levels(police.data.media$state_ut)
## OLS Model with media_women
model.ols.media <- lm(death_not_remanded ~ 1 + l.state_pca + media_women + state_ut + as.factor(year), data = police.data.media)
model.ols.media.cl <- cl(police.data.media, model.ols.media, police.data.media$state_ut)
## Poisson Model with media_women
model.p.media <- glm(death_not_remanded ~ 1 + l.state_pca + media_women + state_ut + as.factor(year), data = police.data.media, family="poisson")
model.p.media.cl <- cl(police.data.media, model.p.media, police.data.media$state_ut)
## OLS Model with media
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add media women
police.imp.1.l$media_women <- police.data.t1$media_women
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$media_women)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
levels(police.imp.1.l)
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + media_women + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.media.cl.c <- result.p.1
model.ols.media.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.media.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.media.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with religion and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add media women
police.imp.1.l$media_women <- police.data.t1$media_women
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$media_women)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + media_women + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.media.cl.c <- result.p.1
model.p.media.cl.c[2:5, 1] <- result.t3[, 1]
model.p.media.cl.c[2:5, 2] <- result.t3[, 2]
model.p.media.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.media.cl, model.ols.media.cl.c, model.p.media.cl, model.p.media.cl.c)
##############
###Table A20##
##############
## Add literacy
police.data.t1$literacy <- police.data$literacy
police.data.liter <- police.data.t1[-which(is.na(police.data.t1$literacy)), ]
police.data.liter$state_ut <- as.factor(as.character(police.data.liter$state_ut))
levels(police.data.liter$state_ut)
## OLS Model with literacy
model.ols.liter <- lm(death_not_remanded ~ 1 + l.state_pca + literacy + state_ut + as.factor(year), data = police.data.liter)
model.ols.liter.cl <- cl(police.data.liter, model.ols.liter, police.data.liter$state_ut)
## Poisson Model with literacy
model.p.liter <- glm(death_not_remanded ~ 1 + l.state_pca + literacy + state_ut + as.factor(year), data = police.data.liter, family="poisson")
model.p.liter.cl <- cl(police.data.liter, model.p.liter, police.data.liter$state_ut)
## OLS Model with literacy
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add literacy
police.imp.1.l$literacy <- police.data.t1$literacy
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$literacy)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
levels(police.imp.1.l)
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + literacy + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.liter.cl.c <- result.p.1
model.ols.liter.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.liter.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.liter.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with literacy and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add literacy
police.imp.1.l$literacy <- police.data.t1$literacy
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$literacy)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + literacy + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.liter.cl.c <- result.p.1
model.p.liter.cl.c[2:5, 1] <- result.t3[, 1]
model.p.liter.cl.c[2:5, 2] <- result.t3[, 2]
model.p.liter.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.liter.cl, model.ols.liter.cl.c, model.p.liter.cl, model.p.liter.cl.c)
##############
###Table A21##
##############
## Total Death
police.data.t1$total_death <- police.data.t1$death_not_remanded + police.data.t1$death_remanded
## OLS Model with logged
police.data.t1$total_death_ln <- log(police.data.t1$total_death + 1)
model.ols.total.l <- lm(total_death_ln ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1)
model.ols.total.l.cl <- cl(police.data.t1, model.ols.total.l, police.data.t1$state_ut)
## OLS Model with SHRC
model.ols.total <- lm(total_death ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1)
model.ols.total.cl <- cl(police.data.t1, model.ols.total, police.data.t1$state_ut)
## Poisson Model with SHRC
model.p.total <- glm(total_death ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.total.cl <- cl(police.data.t1, model.p.total, police.data.t1$state_ut)
## OLS Model with media
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Total Death
police.imp.1.l$total_death <- police.imp.1.l$death_not_remanded + police.imp.1.l$death_remanded
## Poisson with outdata1.csv
imp.1.p <- lm(total_death ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.total.cl.c <- result.p.1
model.ols.total.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.total.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.total.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with religion and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Total Death
police.imp.1.l$total_death <- police.imp.1.l$death_not_remanded + police.imp.1.l$death_remanded
## Poisson with outdata1.csv
imp.1.p <- glm(total_death ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.total.cl.c <- result.p.1
model.p.total.cl.c[2:5, 1] <- result.t3[, 1]
model.p.total.cl.c[2:5, 2] <- result.t3[, 2]
model.p.total.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.total.cl, model.ols.total.cl.c, model.p.total.cl, model.p.total.cl.c)
##############
###Table A22##
##############
## OLS Model
model.ols.remanded <- lm(death_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1)
model.ols.remanded.cl <- cl(police.data.t1, model.ols.remanded, police.data.t1$state_ut)
## Poisson Model
model.p.remanded <- glm(death_remanded ~ 1 + l.state_pca + state_ut + as.factor(year), data = police.data.t1, family="poisson")
model.p.remanded.cl <- cl(police.data.t1, model.p.remanded, police.data.t1$state_ut)
## OLS Model
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- lm(death_remanded ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.remanded.cl.c <- result.p.1
model.ols.remanded.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.remanded.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.remanded.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Poisson with outdata1.csv
imp.1.p <- glm(death_remanded ~ 1 + l.state_pca + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.remanded.cl.c <- result.p.1
model.p.remanded.cl.c[2:5, 1] <- result.t3[, 1]
model.p.remanded.cl.c[2:5, 2] <- result.t3[, 2]
model.p.remanded.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.remanded.cl, model.ols.remanded.cl.c, model.p.remanded.cl, model.p.remanded.cl.c)
###############
###Table A23###
###############
## Add pca_ind
police.data.t1$pca_ind <- police.data$pca_ind
## Lag pca_ind
police.data.t1 <- ddply(police.data.t1, .(state_ut), transform, l.pca_ind = c(NA, pca_ind[-length(pca_ind)]))
## fill NA with 0
police.data.t1$l.pca_ind <- ifelse(is.na(police.data.t1$l.pca_ind), 0, police.data.t1$l.pca_ind)
## categorical variable
police.data.t1$indlvl <- ifelse(police.data.t1$l.pca_ind == 1 & police.data.t1$l.state_pca == 1, "Ind",
ifelse(police.data.t1$l.pca_ind == 0 & police.data.t1$l.state_pca == 1, "Regular", "No PCA"))
police.data.t1$indlvl <- as.factor(police.data.t1$indlvl)
police.data.t1$indlvl <- relevel(police.data.t1$indlvl, ref = "Regular")
levels(police.data.t1$indlvl)
## Poisson no control binding categorical
model.poisson.ind.ca <- glm(death_not_remanded ~ 1 + indlvl + as.factor(state_ut) + as.factor(year), data = police.data.t1, family="poisson")
model.poisson.ind.ca.cl <- cl(police.data.t1, model.poisson.ind.ca, police.data.t1$state_ut)
stargazer(model.poisson.ind.ca.cl)
##############
###Table A24##
##############
## Add ngo
police.data.t1$ngo <- police.data$ngo*100
police.data.ngo <- police.data.t1[-which(is.na(police.data.t1$ngo)), ]
police.data.ngo$state_ut <- as.factor(as.character(police.data.ngo$state_ut))
levels(police.data.ngo$state_ut)
## OLS Model with ngo
model.ols.ngo <- lm(death_not_remanded ~ 1 + l.state_pca + ngo + state_ut + as.factor(year), data = police.data.ngo)
model.ols.ngo.cl <- cl(police.data.ngo, model.ols.ngo, police.data.ngo$state_ut)
## Poisson Model with ngo
model.p.ngo <- glm(death_not_remanded ~ 1 + l.state_pca + ngo + state_ut + as.factor(year), data = police.data.ngo, family="poisson")
model.p.ngo.cl <- cl(police.data.ngo, model.p.ngo, police.data.ngo$state_ut)
## OLS Model with ngo
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add ngo
police.imp.1.l$ngo <- police.data$ngo*100
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$ngo)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
levels(police.imp.1.l)
## Poisson with outdata1.csv
imp.1.p <- lm(death_not_remanded ~ 1 + l.state_pca + ngo + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l)
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.ols.ngo.cl.c <- result.p.1
model.ols.ngo.cl.c[2:5, 1] <- result.t3[, 1]
model.ols.ngo.cl.c[2:5, 2] <- result.t3[, 2]
model.ols.ngo.cl.c[2:5, 4] <- result.t3[, 3]
## Poisson Model with ngo and controls
## Loop models for 5 imputation datasets
for (i in c(1:5)){
filename <- paste("outdata", i, sep = "")
filename.csv <- paste(filename, "csv", sep = ".")
police.imp.1 <- read.csv(filename.csv)
## Lagged state_pca
police.imp.1.l <- ddply(police.imp.1, .(state_ut), transform, l.state_pca = c(NA, state_pca[-length(state_pca)]))
## fill NA with 0
police.imp.1.l$l.state_pca <- ifelse(is.na(police.imp.1.l$l.state_pca), 0, police.imp.1.l$l.state_pca)
## delete DAMAN & DIU 2001
police.imp.1.l <- police.imp.1.l[-500,]
## Rescale GDP
police.imp.1.l$gdp <- police.imp.1.l$gdp/1000000
## Add ngo
police.imp.1.l$ngo <- police.data$ngo*100
police.imp.1.l <- police.imp.1.l[-which(is.na(police.imp.1.l$ngo)), ]
police.imp.1.l$state_ut <- as.factor(as.character(police.imp.1.l$state_ut))
## Poisson with outdata1.csv
imp.1.p <- glm(death_not_remanded ~ 1 + l.state_pca + ngo + gdp +
head_trans + state_ut +
as.factor(year), data = police.imp.1.l, family="poisson")
result.p.1 <- cl(police.imp.1.l, imp.1.p, police.imp.1.l$state_ut)
nam.e <- paste("e", i, sep = "")
assign(nam.e, result.p.1[2:5, 1])
nam.se <- paste("se", i, sep = "")
assign(nam.se, result.p.1[2:5, 2])
}
beta.t <- cbind(e1, e2, e3, e4, e5)
beta.se <- cbind(se1, se2, se3, se4, se5)
## Calculate imputed beta and SEs
se_calc <- function(q, se){
part1 <- sum((se)^2)/length(se)
part2 <- sum((q - mean(q))^2)/(length(q)-1)*(1+1/length(q))
se.imp <- sqrt(part1 + part2)
q.imp <- mean(q)
p.value <- 2*pnorm(abs(q.imp/se.imp),lower.tail = FALSE)
return(c(q.imp, se.imp, p.value))
}
## Print poisson results
result.t3 <- matrix(NA, nrow = 4, ncol = 3)
for (i in 1:4){
result.t3[i, ]<- se_calc(q=beta.t[i, ], se = beta.se[i, ])
}
result.t3
## Replace results to model result
model.p.ngo.cl.c <- result.p.1
model.p.ngo.cl.c[2:5, 1] <- result.t3[, 1]
model.p.ngo.cl.c[2:5, 2] <- result.t3[, 2]
model.p.ngo.cl.c[2:5, 4] <- result.t3[, 3]
stargazer(model.ols.ngo.cl, model.ols.ngo.cl.c, model.p.ngo.cl, model.p.ngo.cl.c)
|
## packages
library(dplyr)
#library(NeuralNetTools)
library(neuralnet)
## import
raw_data <- read.csv("dataset.csv", sep=";", header=T)
df <- raw_data
## check data
# na per column
apply(df, 2, function(x) sum(is.na(x)))
names(df)
sum(is.na(df['default']))
dim(df)
### analytics vidhy
## process data
# drop all NA observations
# qq
# select first 999 rows
data <- df[1:999, ]
# Random sampling
samplesize = 0.60 * nrow(data)
set.seed(80)
index = sample( seq_len ( nrow ( data ) ), size = samplesize )
# Create training and test set
datatrain = data[ index, ]
datatest = data[ -index, ]
## beckmw
library(NeuralNetTools)
# create model
library(neuralnet)
AND <- c(rep(0, 7), 1)
OR <- c(0, rep(1, 7))
binary_data <- data.frame(expand.grid(c(0, 1), c(0, 1), c(0, 1)), AND, OR)
mod <- neuralnet(AND + OR ~ Var1 + Var2 + Var3, binary_data,
hidden = c(6, 12, 8), rep = 10, err.fct = 'ce', linear.output = FALSE)
|
/notes/do1.R
|
no_license
|
jacolind/credit-defaults
|
R
| false | false | 938 |
r
|
## packages
library(dplyr)
#library(NeuralNetTools)
library(neuralnet)
## import
raw_data <- read.csv("dataset.csv", sep=";", header=T)
df <- raw_data
## check data
# na per column
apply(df, 2, function(x) sum(is.na(x)))
names(df)
sum(is.na(df['default']))
dim(df)
### analytics vidhy
## process data
# drop all NA observations
# qq
# select first 999 rows
data <- df[1:999, ]
# Random sampling
samplesize = 0.60 * nrow(data)
set.seed(80)
index = sample( seq_len ( nrow ( data ) ), size = samplesize )
# Create training and test set
datatrain = data[ index, ]
datatest = data[ -index, ]
## beckmw
library(NeuralNetTools)
# create model
library(neuralnet)
AND <- c(rep(0, 7), 1)
OR <- c(0, rep(1, 7))
binary_data <- data.frame(expand.grid(c(0, 1), c(0, 1), c(0, 1)), AND, OR)
mod <- neuralnet(AND + OR ~ Var1 + Var2 + Var3, binary_data,
hidden = c(6, 12, 8), rep = 10, err.fct = 'ce', linear.output = FALSE)
|
library(hamlet)
### Name: extendsymrange
### Title: Extend range of variable limits while retaining a point of
### symmetricity
### Aliases: extendsymrange
### Keywords: dplot
### ** Examples
set.seed(1)
ex <- rnorm(10)+2
hist(ex, xlim=extendsymrange(ex, sym=0), breaks=100)
|
/data/genthat_extracted_code/hamlet/examples/extendsymrange.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 285 |
r
|
library(hamlet)
### Name: extendsymrange
### Title: Extend range of variable limits while retaining a point of
### symmetricity
### Aliases: extendsymrange
### Keywords: dplot
### ** Examples
set.seed(1)
ex <- rnorm(10)+2
hist(ex, xlim=extendsymrange(ex, sym=0), breaks=100)
|
# read data from csv and filter dates so that only data
# from feb 1, 2007 and feb 2, 2007 is used
options(stringsAsFactors = FALSE)
dat <- read.csv("data/household_power_consumption.txt", sep=";")
powerconsumption <- dat[dat$Date == "1/2/2007" | dat$Date == "2/2/2007", ]
# get the required columns from our dataset and convert the strings to numeric/time
activepower <- as.numeric(powerconsumption$Global_active_power)
# create a png file
png("plot2.png", bg="white", width=480, height=480)
# plot data
plot(activepower, type="l", ylab="Global Active Power (kilowatts)", xlab="",
axes=FALSE, ylim=c(0, 8), frame.plot=TRUE)
axis(1, at=c(0, 1440, 2880), labels=c("Thu", "Fri", "Sat"))
axis(2, at=c(0, 2, 4, 6))
# close the file
dev.off()
|
/plot2.R
|
no_license
|
grout/ExData_Plotting1
|
R
| false | false | 750 |
r
|
# read data from csv and filter dates so that only data
# from feb 1, 2007 and feb 2, 2007 is used
options(stringsAsFactors = FALSE)
dat <- read.csv("data/household_power_consumption.txt", sep=";")
powerconsumption <- dat[dat$Date == "1/2/2007" | dat$Date == "2/2/2007", ]
# get the required columns from our dataset and convert the strings to numeric/time
activepower <- as.numeric(powerconsumption$Global_active_power)
# create a png file
png("plot2.png", bg="white", width=480, height=480)
# plot data
plot(activepower, type="l", ylab="Global Active Power (kilowatts)", xlab="",
axes=FALSE, ylim=c(0, 8), frame.plot=TRUE)
axis(1, at=c(0, 1440, 2880), labels=c("Thu", "Fri", "Sat"))
axis(2, at=c(0, 2, 4, 6))
# close the file
dev.off()
|
max <- function(x,y)
{
if(x>y)
return(x)
else
return(y)
}
|
/R/max.R
|
no_license
|
HaiyangYu1999/CodeBackup
|
R
| false | false | 69 |
r
|
max <- function(x,y)
{
if(x>y)
return(x)
else
return(y)
}
|
#These functions implement the inverting of a matrix, caching it's inverse
#if that hasn't already been done and returing that inverse.
# this function creates helper functions that set and get a matrix,
# and set and get it's cached inverse which is generated by the next function........
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(s) m <<- s
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#this function either returns the value of the (cached) inverse of the matrix or
# calculates, caches and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m # return(m) is better
}
# test code:
# > v<-c(9,1,2,3,4,5,6,7,8)
# > m= matrix(v, nrow=3, ncol=3)
# > m
# [,1] [,2] [,3]
# [1,] 9 3 6
# [2,] 1 4 7
# [3,] 2 5 8
#
# > n<-makeCacheMatrix(m)
#
# > n$get()
# [,1] [,2] [,3]
# [1,] 9 3 6
# [2,] 1 4 7
# [3,] 2 5 8
#
# > cacheSolve(n)
# [,1] [,2] [,3]
# [1,] 0.1111111 -0.2222222 0.1111111
# [2,] -0.2222222 -2.2222222 2.1111111
# [3,] 0.1111111 1.4444444 -1.2222222
#
# > cacheSolve(n)
# getting cached data
# [,1] [,2] [,3]
# [1,] 0.1111111 -0.2222222 0.1111111
# [2,] -0.2222222 -2.2222222 2.1111111
# [3,] 0.1111111 1.4444444 -1.2222222
#
# > v<-c(0.1111111,-0.2222222,0.1111111,-0.2222222,-2.2222222,1.4444444,0.1111111,2.1111111,-1.2222222)
# > v
# [1] 0.1111111 -0.2222222 0.1111111 -0.2222222 -2.2222222 1.4444444 0.1111111 2.1111111 -1.2222222
#
# > m= matrix(v, nrow=3, ncol=3)
#
# > n<-makeCacheMatrix(m)
# > n$get()
# [,1] [,2] [,3]
# [1,] 0.1111111 -0.2222222 0.1111111
# [2,] -0.2222222 -2.2222222 2.1111111
# [3,] 0.1111111 1.4444444 -1.2222222
#
# > cacheSolve(n)
# [,1] [,2] [,3]
# [1,] 9.000001 3 6.000001
# [2,] 1.000000 4 7.000001
# [3,] 2.000000 5 8.000001
#
# getting cached data
# [,1] [,2] [,3]
# [1,] 9.000001 3 6.000001
# [2,] 1.000000 4 7.000001
# [3,] 2.000000 5 8.000001
|
/cachematrix.R
|
no_license
|
TonyHoldroyd/ProgrammingAssignment2
|
R
| false | false | 2,340 |
r
|
#These functions implement the inverting of a matrix, caching it's inverse
#if that hasn't already been done and returing that inverse.
# this function creates helper functions that set and get a matrix,
# and set and get it's cached inverse which is generated by the next function........
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(s) m <<- s
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#this function either returns the value of the (cached) inverse of the matrix or
# calculates, caches and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m # return(m) is better
}
# test code:
# > v<-c(9,1,2,3,4,5,6,7,8)
# > m= matrix(v, nrow=3, ncol=3)
# > m
# [,1] [,2] [,3]
# [1,] 9 3 6
# [2,] 1 4 7
# [3,] 2 5 8
#
# > n<-makeCacheMatrix(m)
#
# > n$get()
# [,1] [,2] [,3]
# [1,] 9 3 6
# [2,] 1 4 7
# [3,] 2 5 8
#
# > cacheSolve(n)
# [,1] [,2] [,3]
# [1,] 0.1111111 -0.2222222 0.1111111
# [2,] -0.2222222 -2.2222222 2.1111111
# [3,] 0.1111111 1.4444444 -1.2222222
#
# > cacheSolve(n)
# getting cached data
# [,1] [,2] [,3]
# [1,] 0.1111111 -0.2222222 0.1111111
# [2,] -0.2222222 -2.2222222 2.1111111
# [3,] 0.1111111 1.4444444 -1.2222222
#
# > v<-c(0.1111111,-0.2222222,0.1111111,-0.2222222,-2.2222222,1.4444444,0.1111111,2.1111111,-1.2222222)
# > v
# [1] 0.1111111 -0.2222222 0.1111111 -0.2222222 -2.2222222 1.4444444 0.1111111 2.1111111 -1.2222222
#
# > m= matrix(v, nrow=3, ncol=3)
#
# > n<-makeCacheMatrix(m)
# > n$get()
# [,1] [,2] [,3]
# [1,] 0.1111111 -0.2222222 0.1111111
# [2,] -0.2222222 -2.2222222 2.1111111
# [3,] 0.1111111 1.4444444 -1.2222222
#
# > cacheSolve(n)
# [,1] [,2] [,3]
# [1,] 9.000001 3 6.000001
# [2,] 1.000000 4 7.000001
# [3,] 2.000000 5 8.000001
#
# getting cached data
# [,1] [,2] [,3]
# [1,] 9.000001 3 6.000001
# [2,] 1.000000 4 7.000001
# [3,] 2.000000 5 8.000001
|
#!/usr/bin/env Rscript
library("R.utils")
library(ada)
library(mlbench) # BostonHousing, BreastCancer, Glass, Ionoshere, PimaIndiansDiabetes, Sonar, Soybean
num <- cmdArgs()
data_name <- as.character(num[[1]])
file_num <- as.integer(num[[2]])
range1 <- as.integer(num[[3]])
range2 <- as.integer(num[[4]])
num
data_name
file_num
range1
range2
# Load all of the binary datasets for testing. The datasets are called
# bc, Ionosphere, pid, sonar, lich, mullein, titanic
f1 <- "/scratch/general/lustre/u6007925/Grid/Data/"
f2 <- "_Data.R"
source(paste(f1, data_name, f2, sep = ""))
dat <- switch(data_name,
"BreastCancer" = bc,
"Ionosphere" = Ionosphere,
"Lichen" = lichen,
"Mullein" = mullein,
"Pima" = pid,
"Sonar" = Sonar)
# cv.pred function
source("/scratch/general/lustre/u6007925/Grid/Functions/ADAbin-cvpred.R")
# my own caret type function for a sparse grid.
tmp <- c(0.01, 0.1, 0.3, 0.5, 0.7, 1)
p <- NULL
for(i in 1:length(tmp)) {
p <- rbind(p, cbind(tmp[i], seq(100, 1400, 100)))
}
p2 <- NULL
tmp2 <- c(1:6, 8, 10, 12, 14, 16, 18, 20)
for(i in 1:length(tmp2)) {
p2 <- rbind(p2, cbind(p, tmp2[i]))
}
head(p2)
tail(p2)
param <- NULL
for(i in 1:10) {
param <- rbind(param, cbind(p2, NA, NA, NA))
}
colnames(param) <- c("Nu", "Iter", "Maxdepth", "Accuracy", "AccLCL", "Time")
# Results
param <- param[c(range1:range2), ]
for(i in 1:nrow(param)) {
t1 <- Sys.time()
try(pr <- cv.pred(dat[, -1], as.numeric(as.factor(dat[, 1])) - 1,
fold = 3, nu = param[i, 1], iter = param[i, 2],
maxd = param[i, 3]))
if(!is.null(pr)) {
param[i, 4] <- pr$acc
param[i, 5] <- mean(pr$cv.acc) - 1.96 * sd(pr$cv.acc) / sqrt(length(pr$cv.acc))
}
t2 <- Sys.time()
param[i, 6] <- as.numeric(t2 - t1, units = "secs")
}
f3 <- "/scratch/general/lustre/u6007925/Grid/ADA/Binary/"
f4 <- "_Grid_"
ff <- paste(f3, data_name, "/", data_name, f4, file_num, ".csv", sep = "")
write.csv(param, ff, row.names = FALSE)
|
/Tuning Research/Examples of Grid Search Code/Adaboost/ADA_Binary.R
|
no_license
|
jillbo1000/Dissertation-code
|
R
| false | false | 2,143 |
r
|
#!/usr/bin/env Rscript
library("R.utils")
library(ada)
library(mlbench) # BostonHousing, BreastCancer, Glass, Ionoshere, PimaIndiansDiabetes, Sonar, Soybean
num <- cmdArgs()
data_name <- as.character(num[[1]])
file_num <- as.integer(num[[2]])
range1 <- as.integer(num[[3]])
range2 <- as.integer(num[[4]])
num
data_name
file_num
range1
range2
# Load all of the binary datasets for testing. The datasets are called
# bc, Ionosphere, pid, sonar, lich, mullein, titanic
f1 <- "/scratch/general/lustre/u6007925/Grid/Data/"
f2 <- "_Data.R"
source(paste(f1, data_name, f2, sep = ""))
dat <- switch(data_name,
"BreastCancer" = bc,
"Ionosphere" = Ionosphere,
"Lichen" = lichen,
"Mullein" = mullein,
"Pima" = pid,
"Sonar" = Sonar)
# cv.pred function
source("/scratch/general/lustre/u6007925/Grid/Functions/ADAbin-cvpred.R")
# my own caret type function for a sparse grid.
tmp <- c(0.01, 0.1, 0.3, 0.5, 0.7, 1)
p <- NULL
for(i in 1:length(tmp)) {
p <- rbind(p, cbind(tmp[i], seq(100, 1400, 100)))
}
p2 <- NULL
tmp2 <- c(1:6, 8, 10, 12, 14, 16, 18, 20)
for(i in 1:length(tmp2)) {
p2 <- rbind(p2, cbind(p, tmp2[i]))
}
head(p2)
tail(p2)
param <- NULL
for(i in 1:10) {
param <- rbind(param, cbind(p2, NA, NA, NA))
}
colnames(param) <- c("Nu", "Iter", "Maxdepth", "Accuracy", "AccLCL", "Time")
# Results
param <- param[c(range1:range2), ]
for(i in 1:nrow(param)) {
t1 <- Sys.time()
try(pr <- cv.pred(dat[, -1], as.numeric(as.factor(dat[, 1])) - 1,
fold = 3, nu = param[i, 1], iter = param[i, 2],
maxd = param[i, 3]))
if(!is.null(pr)) {
param[i, 4] <- pr$acc
param[i, 5] <- mean(pr$cv.acc) - 1.96 * sd(pr$cv.acc) / sqrt(length(pr$cv.acc))
}
t2 <- Sys.time()
param[i, 6] <- as.numeric(t2 - t1, units = "secs")
}
f3 <- "/scratch/general/lustre/u6007925/Grid/ADA/Binary/"
f4 <- "_Grid_"
ff <- paste(f3, data_name, "/", data_name, f4, file_num, ".csv", sep = "")
write.csv(param, ff, row.names = FALSE)
|
library(plsVarSel)
### Name: bve_pls
### Title: Backward variable elimination PLS (BVE-PLS)
### Aliases: bve_pls
### ** Examples
data(gasoline, package = "pls")
with( gasoline, bve_pls(octane, NIR) )
|
/data/genthat_extracted_code/plsVarSel/examples/bve_pls.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 208 |
r
|
library(plsVarSel)
### Name: bve_pls
### Title: Backward variable elimination PLS (BVE-PLS)
### Aliases: bve_pls
### ** Examples
data(gasoline, package = "pls")
with( gasoline, bve_pls(octane, NIR) )
|
library(caret)
library(AppliedPredictiveModeling)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
trainingCol = names(training)
#print(trainingCol)
ILCol <- c();
for(i in 1:length(trainingCol)) {
if(substr(trainingCol[i], 1, 2) == "IL") {
ILCol <- c(ILCol, i)
print(i)
}
}
trainingData <- training[,ILCol]
prComp <- prcomp(trainingData)
#preProc <- preProcess(log10(trainingData+1),method="pca",pcaComp=2)
|
/ml/Questions4.R
|
permissive
|
krishnaiitd/Rprogramming
|
R
| false | false | 619 |
r
|
library(caret)
library(AppliedPredictiveModeling)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
trainingCol = names(training)
#print(trainingCol)
ILCol <- c();
for(i in 1:length(trainingCol)) {
if(substr(trainingCol[i], 1, 2) == "IL") {
ILCol <- c(ILCol, i)
print(i)
}
}
trainingData <- training[,ILCol]
prComp <- prcomp(trainingData)
#preProc <- preProcess(log10(trainingData+1),method="pca",pcaComp=2)
|
################################
## Functions to do subsetting ##
################################
#' @title Subset factors
#' @name subsetFactors
#' @description Method to subset (or sort) factors
#' @param object a \code{\link{MOFAmodel}} object.
#' @param factors character vector with the factor names, or numeric vector with the index of the factors.
#' @param keep_intercept bool whether intercept is kept when subsetting (default TRUE).
#' @export
subsetFactors <- function(object, factors, keep_intercept=T) {
# Sanity checks
if (class(object) != "MOFAmodel") stop("'object' has to be an instance of MOFAmodel")
stopifnot(length(factors) <= object@Dimensions[["K"]])
# Get factors
if(is.numeric(factors)) {
if (object@ModelOptions$learnIntercept == T) factors <- factorNames(object)[factors+1]
else factors <- factorNames(object)[factors]
}
else{ stopifnot(all(factors %in% factorNames(object))) }
if (keep_intercept & object@ModelOptions$learnIntercept == T & !"intercept" %in% factors) {
factors <- c("intercept", factors)
}
# Subset relevant slots
object@Expectations$Z <- object@Expectations$Z[,factors, drop=F]
object@Expectations$AlphaW <- sapply(object@Expectations$AlphaW, function(x) x[factors], simplify = F, USE.NAMES = T)
object@Expectations$W <- sapply(object@Expectations$W, function(x) x[,factors, drop=F], simplify = F, USE.NAMES = T)
object@Expectations$Theta <- sapply(object@Expectations$Theta, function(x) x[factors], simplify = F, USE.NAMES = T)
# Modify dimensionality
object@Dimensions[["K"]] <- length(factors)
# Modify factor names
factorNames(object) <- as.character(factors)
return(object)
}
#' @title Subset samples
#' @name subsetSamples
#' @description Method to subset (or sort) samples
#' @param object a \code{\link{MOFAmodel}} object.
#' @param samples character vector with the sample names, numeric vector with the sample indices or logical vector with the samples to be kept as TRUE.
#' @export
subsetSamples <- function(object, samples) {
# Sanity checks
if (class(object) != "MOFAmodel") stop("'object' has to be an instance of MOFAmodel")
stopifnot(length(samples) <= object@Dimensions[["N"]])
warning("Removing samples is fine for an exploratory analysis, but we recommend removing them before training!\n")
# Get samples
if (is.character(samples)) {
stopifnot(all(samples %in% sampleNames(object)))
} else {
samples <- sampleNames(object)[samples]
}
# Subset relevant slots
object@Expectations$Z <- object@Expectations$Z[samples,, drop=F]
object@Expectations$Y <- sapply(object@Expectations$Y, function(x) x[samples,], simplify = F, USE.NAMES = T)
object@TrainData <- sapply(object@TrainData, function(x) x[,samples], simplify = F, USE.NAMES = T)
object@InputData <- object@InputData[,samples,]
if (length(object@ImputedData)==0) { object@ImputedData <- sapply(object@ImputedData, function(x) x[,samples], simplify = F, USE.NAMES = T)}
# Modify dimensionality
object@Dimensions[["N"]] <- length(samples)
# Modify sample names in the MOFAobject
sampleNames(object) <- samples
return(object)
}
#' @title Subset views
#' @name subsetViews
#' @description Method to subset (or sort) views
#' @param object a \code{\link{MOFAmodel}} object.
#' @param views character vector with the view names, numeric vector with the view indices or logical vector with the view to be kept as TRUE.
#' @export
subsetViews <- function(object, views) {
# Sanity checks
if (class(object) != "MOFAmodel") stop("'object' has to be an instance of MOFAmodel")
stopifnot(length(views) <= object@Dimensions[["N"]])
warning("Removing views is fine for an exploratory analysis, but we recommend removing them before training!\n")
# Get views
if (is.character(views)) {
stopifnot(all(views %in% viewNames(object)))
} else {
views <- viewNames(object)[views]
}
# Subset relevant slots
object@Expectations$Y <- object@Expectations$Y[views]
object@Expectations$W <- object@Expectations$W[views]
object@TrainData <- object@TrainData[views]
if (length(object@ImputedData)==0) { object@ImputedData <- object@ImputedData[views] }
# Modify dimensionality
object@Dimensions[["M"]] <- length(views)
# Modify sample names in the MOFAobject
viewNames(object) <- views
return(object)
}
|
/MOFAtools/R/subset.R
|
no_license
|
vd4mmind/MOFA
|
R
| false | false | 4,400 |
r
|
################################
## Functions to do subsetting ##
################################
#' @title Subset factors
#' @name subsetFactors
#' @description Method to subset (or sort) factors
#' @param object a \code{\link{MOFAmodel}} object.
#' @param factors character vector with the factor names, or numeric vector with the index of the factors.
#' @param keep_intercept bool whether intercept is kept when subsetting (default TRUE).
#' @export
subsetFactors <- function(object, factors, keep_intercept=T) {
# Sanity checks
if (class(object) != "MOFAmodel") stop("'object' has to be an instance of MOFAmodel")
stopifnot(length(factors) <= object@Dimensions[["K"]])
# Get factors
if(is.numeric(factors)) {
if (object@ModelOptions$learnIntercept == T) factors <- factorNames(object)[factors+1]
else factors <- factorNames(object)[factors]
}
else{ stopifnot(all(factors %in% factorNames(object))) }
if (keep_intercept & object@ModelOptions$learnIntercept == T & !"intercept" %in% factors) {
factors <- c("intercept", factors)
}
# Subset relevant slots
object@Expectations$Z <- object@Expectations$Z[,factors, drop=F]
object@Expectations$AlphaW <- sapply(object@Expectations$AlphaW, function(x) x[factors], simplify = F, USE.NAMES = T)
object@Expectations$W <- sapply(object@Expectations$W, function(x) x[,factors, drop=F], simplify = F, USE.NAMES = T)
object@Expectations$Theta <- sapply(object@Expectations$Theta, function(x) x[factors], simplify = F, USE.NAMES = T)
# Modify dimensionality
object@Dimensions[["K"]] <- length(factors)
# Modify factor names
factorNames(object) <- as.character(factors)
return(object)
}
#' @title Subset samples
#' @name subsetSamples
#' @description Method to subset (or sort) samples
#' @param object a \code{\link{MOFAmodel}} object.
#' @param samples character vector with the sample names, numeric vector with the sample indices or logical vector with the samples to be kept as TRUE.
#' @export
subsetSamples <- function(object, samples) {
# Sanity checks
if (class(object) != "MOFAmodel") stop("'object' has to be an instance of MOFAmodel")
stopifnot(length(samples) <= object@Dimensions[["N"]])
warning("Removing samples is fine for an exploratory analysis, but we recommend removing them before training!\n")
# Get samples
if (is.character(samples)) {
stopifnot(all(samples %in% sampleNames(object)))
} else {
samples <- sampleNames(object)[samples]
}
# Subset relevant slots
object@Expectations$Z <- object@Expectations$Z[samples,, drop=F]
object@Expectations$Y <- sapply(object@Expectations$Y, function(x) x[samples,], simplify = F, USE.NAMES = T)
object@TrainData <- sapply(object@TrainData, function(x) x[,samples], simplify = F, USE.NAMES = T)
object@InputData <- object@InputData[,samples,]
if (length(object@ImputedData)==0) { object@ImputedData <- sapply(object@ImputedData, function(x) x[,samples], simplify = F, USE.NAMES = T)}
# Modify dimensionality
object@Dimensions[["N"]] <- length(samples)
# Modify sample names in the MOFAobject
sampleNames(object) <- samples
return(object)
}
#' @title Subset views
#' @name subsetViews
#' @description Method to subset (or sort) views
#' @param object a \code{\link{MOFAmodel}} object.
#' @param views character vector with the view names, numeric vector with the view indices or logical vector with the view to be kept as TRUE.
#' @export
subsetViews <- function(object, views) {
# Sanity checks
if (class(object) != "MOFAmodel") stop("'object' has to be an instance of MOFAmodel")
stopifnot(length(views) <= object@Dimensions[["N"]])
warning("Removing views is fine for an exploratory analysis, but we recommend removing them before training!\n")
# Get views
if (is.character(views)) {
stopifnot(all(views %in% viewNames(object)))
} else {
views <- viewNames(object)[views]
}
# Subset relevant slots
object@Expectations$Y <- object@Expectations$Y[views]
object@Expectations$W <- object@Expectations$W[views]
object@TrainData <- object@TrainData[views]
if (length(object@ImputedData)==0) { object@ImputedData <- object@ImputedData[views] }
# Modify dimensionality
object@Dimensions[["M"]] <- length(views)
# Modify sample names in the MOFAobject
viewNames(object) <- views
return(object)
}
|
library(shiny)
library(shinycssloaders)
library(wordcloud)
library(ggplot2)
# get variables for shiny app
source("searches.R")
source("functions.R")
tweets_cp1 = process(tweets_covid$text)
tweets_tbl1 = get_tbl(tweets_cp1)
tweets_cp2 = process(tweets_covid19$text)
tweets_tbl2 = get_tbl(tweets_cp2)
tweets_cp3 = process(tweets_covid.19$text)
tweets_tbl3 = get_tbl(tweets_cp3)
tweets_cp4 = process(tweets_covid_19$text)
tweets_tbl4 = get_tbl(tweets_cp4)
# define ui
ui <- fluidPage(
titlePanel("Tweets about Coronavirus"),
sidebarLayout(
sidebarPanel(
selectInput("hashtag", "Pick a hashtag",
c("#COVID",
"#COVID19",
"#COVIDー19",
"#COVID_19"),
selected = "COVID"),
p(' The most recent 500 tweets were pulled for each hashtag. '),
p(' Tweets were processed in the following order:'),
p(' - Unicode was removed'),
p(' - Created corpus'),
p(' - URLs were removed'),
p(' - HTML converted into text'),
p(' - Tags were removed'),
p(' - Abbreviations and contractions were replaced'),
p(' - All words were converted to lowercase'),
p(' - All numbers were removed'),
p(' - All punctuation was removed'),
p(' - English stop words were removed, along with the following common terms
in virtually all tweets: "covid", "covid19", "covid-19", "covid_19",
"covid__19", "also", "can", "like", "coronavirus", "corona virus" '),
p(' - Excessive whitespace was removed'),
p(' - Words were lemmatized'),
p(' - DTM was created with unigrams and bigrams'),
p(' - Sparse terms were removed at .97')
),
mainPanel(
tabsetPanel(
tabPanel("Wordclouds", plotOutput("wordcloud")),
tabPanel("Top 20 Bargraph", withSpinner(plotOutput("bargraph"))),
tabPanel("Shared Terms", withSpinner(tableOutput("shared")))
)
)
)
)
# define server
server = function(input, output) {
output$wordcloud = renderPlot({
if(input$hashtag == "#COVID") {
wordcloud(tweets_tbl1$words, tweets_tbl1$freq,
max.words = 50, colors = "tomato",
main = "50 most popular words",
scale = c(4,.25),
random.order = F,
rot.per = 0)
} else if(input$hashtag == "#COVID19") {
wordcloud(tweets_tbl2$words, tweets_tbl2$freq,
max.words = 50, colors = "slateblue",
main = "50 most popular words",
scale = c(4,.25),
random.order = F,
rot.per = 0)
} else if(input$hashtag == "#COVIDー19") {
wordcloud(tweets_tbl3$words, tweets_tbl3$freq,
max.words = 50, colors = "royalblue",
main = "50 most popular words",
scale = c(4,.25),
random.order = F,
rot.per = 0)
} else if(input$hashtag == "#COVID_19") {
wordcloud(tweets_tbl4$words, tweets_tbl4$freq,
max.words = 50, colors = "purple3",
main = "50 most popular words",
scale = c(4,.25),
random.order = F,
rot.per = 0)
}
})
output$bargraph = renderPlot({
# top 20 terms across hastags
tweets_covid$group = rep("covid", nrow(tweets_covid))
tweets_covid19$group = rep("covid19", nrow(tweets_covid19))
tweets_covid.19$group = rep("covid.19", nrow(tweets_covid.19))
tweets_covid_19$group = rep("covid_19", nrow(tweets_covid_19))
covid_tbl = rbind(tweets_covid, tweets_covid19, tweets_covid.19, tweets_covid_19)
covid_cp = process(covid_tbl$text)
top_tbl = get_tbl(covid_cp)
top_20 = top_tbl[1:20,]
ggplot(top_20, aes(x = reorder(words, freq), y = freq)) +
geom_col() +
coord_flip() +
labs(title = "Top 20 terms across hashtags",
x = "terms",
y = "raw count")
})
output$shared = renderTable({
all_covid = paste(tweets_covid$text, collapse = "")
all_covid19 = paste(tweets_covid19$text, collapse = "")
all_covid.19 = paste(tweets_covid.19$text, collapse = "")
all_covid_19 = paste(tweets_covid_19$text, collapse = "")
all_tweets = c(all_covid, all_covid19,
all_covid.19, all_covid_19)
all_cp = process(all_tweets)
all_tdm = tdm(all_cp)
colnames(all_tdm) = c("COVID","COVID19",
"COVIDー19", "COVID_19")
all_m = as.matrix(all_tdm)
shared = tibble(
hashtag_comparison = c("COVID - COVID19",
"COVID - COVIDー19",
"COVID - COVID_19",
"COVID19 - COVIDー19",
"COVID19 - COVID_19",
"COVIDー19 - COVID_19"),
shared_terms = c(
nrow(subset(all_m, all_m[, 1] > 5 & all_m[, 2] > 5)),
nrow(subset(all_m, all_m[, 1] > 5 & all_m[, 3] > 5)),
nrow(subset(all_m, all_m[, 1] > 5 & all_m[, 4] > 5)),
nrow(subset(all_m, all_m[, 2] > 5 & all_m[, 3] > 5)),
nrow(subset(all_m, all_m[, 2] > 5 & all_m[, 4] > 5)),
nrow(subset(all_m, all_m[, 3] > 5 & all_m[, 4] > 5))
)
)
shared
})
}
# run app
shinyApp(ui = ui, server = server)
#seabass.shinyapps.io/projectA
|
/projectA/coronavirus.R
|
no_license
|
sebastianmarinc/comprehensive-project
|
R
| false | false | 5,375 |
r
|
library(shiny)
library(shinycssloaders)
library(wordcloud)
library(ggplot2)
# get variables for shiny app
source("searches.R")
source("functions.R")
tweets_cp1 = process(tweets_covid$text)
tweets_tbl1 = get_tbl(tweets_cp1)
tweets_cp2 = process(tweets_covid19$text)
tweets_tbl2 = get_tbl(tweets_cp2)
tweets_cp3 = process(tweets_covid.19$text)
tweets_tbl3 = get_tbl(tweets_cp3)
tweets_cp4 = process(tweets_covid_19$text)
tweets_tbl4 = get_tbl(tweets_cp4)
# define ui
ui <- fluidPage(
titlePanel("Tweets about Coronavirus"),
sidebarLayout(
sidebarPanel(
selectInput("hashtag", "Pick a hashtag",
c("#COVID",
"#COVID19",
"#COVIDー19",
"#COVID_19"),
selected = "COVID"),
p(' The most recent 500 tweets were pulled for each hashtag. '),
p(' Tweets were processed in the following order:'),
p(' - Unicode was removed'),
p(' - Created corpus'),
p(' - URLs were removed'),
p(' - HTML converted into text'),
p(' - Tags were removed'),
p(' - Abbreviations and contractions were replaced'),
p(' - All words were converted to lowercase'),
p(' - All numbers were removed'),
p(' - All punctuation was removed'),
p(' - English stop words were removed, along with the following common terms
in virtually all tweets: "covid", "covid19", "covid-19", "covid_19",
"covid__19", "also", "can", "like", "coronavirus", "corona virus" '),
p(' - Excessive whitespace was removed'),
p(' - Words were lemmatized'),
p(' - DTM was created with unigrams and bigrams'),
p(' - Sparse terms were removed at .97')
),
mainPanel(
tabsetPanel(
tabPanel("Wordclouds", plotOutput("wordcloud")),
tabPanel("Top 20 Bargraph", withSpinner(plotOutput("bargraph"))),
tabPanel("Shared Terms", withSpinner(tableOutput("shared")))
)
)
)
)
# define server
server = function(input, output) {
output$wordcloud = renderPlot({
if(input$hashtag == "#COVID") {
wordcloud(tweets_tbl1$words, tweets_tbl1$freq,
max.words = 50, colors = "tomato",
main = "50 most popular words",
scale = c(4,.25),
random.order = F,
rot.per = 0)
} else if(input$hashtag == "#COVID19") {
wordcloud(tweets_tbl2$words, tweets_tbl2$freq,
max.words = 50, colors = "slateblue",
main = "50 most popular words",
scale = c(4,.25),
random.order = F,
rot.per = 0)
} else if(input$hashtag == "#COVIDー19") {
wordcloud(tweets_tbl3$words, tweets_tbl3$freq,
max.words = 50, colors = "royalblue",
main = "50 most popular words",
scale = c(4,.25),
random.order = F,
rot.per = 0)
} else if(input$hashtag == "#COVID_19") {
wordcloud(tweets_tbl4$words, tweets_tbl4$freq,
max.words = 50, colors = "purple3",
main = "50 most popular words",
scale = c(4,.25),
random.order = F,
rot.per = 0)
}
})
output$bargraph = renderPlot({
# top 20 terms across hastags
tweets_covid$group = rep("covid", nrow(tweets_covid))
tweets_covid19$group = rep("covid19", nrow(tweets_covid19))
tweets_covid.19$group = rep("covid.19", nrow(tweets_covid.19))
tweets_covid_19$group = rep("covid_19", nrow(tweets_covid_19))
covid_tbl = rbind(tweets_covid, tweets_covid19, tweets_covid.19, tweets_covid_19)
covid_cp = process(covid_tbl$text)
top_tbl = get_tbl(covid_cp)
top_20 = top_tbl[1:20,]
ggplot(top_20, aes(x = reorder(words, freq), y = freq)) +
geom_col() +
coord_flip() +
labs(title = "Top 20 terms across hashtags",
x = "terms",
y = "raw count")
})
output$shared = renderTable({
all_covid = paste(tweets_covid$text, collapse = "")
all_covid19 = paste(tweets_covid19$text, collapse = "")
all_covid.19 = paste(tweets_covid.19$text, collapse = "")
all_covid_19 = paste(tweets_covid_19$text, collapse = "")
all_tweets = c(all_covid, all_covid19,
all_covid.19, all_covid_19)
all_cp = process(all_tweets)
all_tdm = tdm(all_cp)
colnames(all_tdm) = c("COVID","COVID19",
"COVIDー19", "COVID_19")
all_m = as.matrix(all_tdm)
shared = tibble(
hashtag_comparison = c("COVID - COVID19",
"COVID - COVIDー19",
"COVID - COVID_19",
"COVID19 - COVIDー19",
"COVID19 - COVID_19",
"COVIDー19 - COVID_19"),
shared_terms = c(
nrow(subset(all_m, all_m[, 1] > 5 & all_m[, 2] > 5)),
nrow(subset(all_m, all_m[, 1] > 5 & all_m[, 3] > 5)),
nrow(subset(all_m, all_m[, 1] > 5 & all_m[, 4] > 5)),
nrow(subset(all_m, all_m[, 2] > 5 & all_m[, 3] > 5)),
nrow(subset(all_m, all_m[, 2] > 5 & all_m[, 4] > 5)),
nrow(subset(all_m, all_m[, 3] > 5 & all_m[, 4] > 5))
)
)
shared
})
}
# run app
shinyApp(ui = ui, server = server)
#seabass.shinyapps.io/projectA
|
#' InsuranceAgency
#'
#' An Insurance agency.
#'
#'
#' @param id identifier for the object (URI)
#' @param feesAndCommissionsSpecification (URL or Text or URL or Text type.) Description of fees, commissions, and other terms applied either to a class of financial product, or by a financial service organization.
#' @param priceRange (Text type.) The price range of the business, for example ```$$$```.
#' @param paymentAccepted (Text type.) Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc.
#' @param openingHours (Text or Text type.) The general opening hours for a business. Opening hours can be specified as a weekly time range, starting with days, then times per day. Multiple days can be listed with commas ',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```, ```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 time. For example, 3pm is specified as ```15:00```. * Here is an example: <code><time itemprop="openingHours" datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time></code>.* If a business is open 7 days a week, then it can be specified as <code><time itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all day</time></code>.
#' @param currenciesAccepted (Text type.) The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR".
#' @param branchOf (Organization type.) The larger organization that this local business is a branch of, if any. Not to be confused with (anatomical)[[branch]].
#' @param telephone (Text or Text or Text or Text type.) The telephone number.
#' @param specialOpeningHoursSpecification (OpeningHoursSpecification type.) The special opening hours of a certain place.Use this to explicitly override general opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]].
#' @param smokingAllowed (Boolean type.) Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or hotel room.
#' @param reviews (Review or Review or Review or Review or Review type.) Review of the item.
#' @param review (Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.
#' @param publicAccess (Boolean type.) A flag to signal that the [[Place]] is open to public visitors. If this property is omitted there is no assumed default boolean value
#' @param photos (Photograph or ImageObject type.) Photographs of this place.
#' @param photo (Photograph or ImageObject type.) A photograph of this place.
#' @param openingHoursSpecification (OpeningHoursSpecification type.) The opening hours of a certain place.
#' @param maximumAttendeeCapacity (Integer or Integer type.) The total number of individuals that may attend an event or venue.
#' @param maps (URL type.) A URL to a map of the place.
#' @param map (URL type.) A URL to a map of the place.
#' @param logo (URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject type.) An associated logo.
#' @param isicV4 (Text or Text or Text type.) The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.
#' @param isAccessibleForFree (Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.
#' @param hasMap (URL or Map type.) A URL to a map of the place.
#' @param globalLocationNumber (Text or Text or Text type.) The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.
#' @param geo (GeoShape or GeoCoordinates type.) The geo coordinates of the place.
#' @param faxNumber (Text or Text or Text or Text type.) The fax number.
#' @param events (Event or Event type.) Upcoming or past events associated with this place or organization.
#' @param event (Event or Event or Event or Event or Event or Event or Event type.) Upcoming or past event associated with this place, organization, or action.
#' @param containsPlace (Place type.) The basic containment relation between a place and another that it contains.
#' @param containedInPlace (Place type.) The basic containment relation between a place and one that contains it.
#' @param containedIn (Place type.) The basic containment relation between a place and one that contains it.
#' @param branchCode (Text type.) A short textual code (also called "store code") that uniquely identifies a place of business. The code is typically assigned by the parentOrganization and used in structured URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047 the code "3047" is a branchCode for a particular branch.
#' @param amenityFeature (LocationFeatureSpecification or LocationFeatureSpecification or LocationFeatureSpecification type.) An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic property does not make a statement about whether the feature is included in an offer for the main accommodation or available at extra costs.
#' @param aggregateRating (AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.
#' @param address (Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress type.) Physical address of the item.
#' @param additionalProperty (PropertyValue or PropertyValue or PropertyValue or PropertyValue type.) A property-value pair representing an additional characteristics of the entitity, e.g. a product feature or another characteristic for which there is no matching property in schema.org.Note: Publishers should be aware that applications designed to use specific schema.org properties (e.g. http://schema.org/width, http://schema.org/color, http://schema.org/gtin13, ...) will typically expect such data to be provided using those properties, rather than using the generic property/value mechanism.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#'
#' @return a list object corresponding to a schema:InsuranceAgency
#'
#' @export
InsuranceAgency <- function(id = NULL,
feesAndCommissionsSpecification = NULL,
priceRange = NULL,
paymentAccepted = NULL,
openingHours = NULL,
currenciesAccepted = NULL,
branchOf = NULL,
telephone = NULL,
specialOpeningHoursSpecification = NULL,
smokingAllowed = NULL,
reviews = NULL,
review = NULL,
publicAccess = NULL,
photos = NULL,
photo = NULL,
openingHoursSpecification = NULL,
maximumAttendeeCapacity = NULL,
maps = NULL,
map = NULL,
logo = NULL,
isicV4 = NULL,
isAccessibleForFree = NULL,
hasMap = NULL,
globalLocationNumber = NULL,
geo = NULL,
faxNumber = NULL,
events = NULL,
event = NULL,
containsPlace = NULL,
containedInPlace = NULL,
containedIn = NULL,
branchCode = NULL,
amenityFeature = NULL,
aggregateRating = NULL,
address = NULL,
additionalProperty = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL){
Filter(Negate(is.null),
list(
type = "InsuranceAgency",
id = id,
feesAndCommissionsSpecification = feesAndCommissionsSpecification,
priceRange = priceRange,
paymentAccepted = paymentAccepted,
openingHours = openingHours,
currenciesAccepted = currenciesAccepted,
branchOf = branchOf,
telephone = telephone,
specialOpeningHoursSpecification = specialOpeningHoursSpecification,
smokingAllowed = smokingAllowed,
reviews = reviews,
review = review,
publicAccess = publicAccess,
photos = photos,
photo = photo,
openingHoursSpecification = openingHoursSpecification,
maximumAttendeeCapacity = maximumAttendeeCapacity,
maps = maps,
map = map,
logo = logo,
isicV4 = isicV4,
isAccessibleForFree = isAccessibleForFree,
hasMap = hasMap,
globalLocationNumber = globalLocationNumber,
geo = geo,
faxNumber = faxNumber,
events = events,
event = event,
containsPlace = containsPlace,
containedInPlace = containedInPlace,
containedIn = containedIn,
branchCode = branchCode,
amenityFeature = amenityFeature,
aggregateRating = aggregateRating,
address = address,
additionalProperty = additionalProperty,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType))}
|
/R/InsuranceAgency.R
|
no_license
|
cboettig/schemar
|
R
| false | false | 11,291 |
r
|
#' InsuranceAgency
#'
#' An Insurance agency.
#'
#'
#' @param id identifier for the object (URI)
#' @param feesAndCommissionsSpecification (URL or Text or URL or Text type.) Description of fees, commissions, and other terms applied either to a class of financial product, or by a financial service organization.
#' @param priceRange (Text type.) The price range of the business, for example ```$$$```.
#' @param paymentAccepted (Text type.) Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc.
#' @param openingHours (Text or Text type.) The general opening hours for a business. Opening hours can be specified as a weekly time range, starting with days, then times per day. Multiple days can be listed with commas ',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```, ```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 time. For example, 3pm is specified as ```15:00```. * Here is an example: <code><time itemprop="openingHours" datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time></code>.* If a business is open 7 days a week, then it can be specified as <code><time itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all day</time></code>.
#' @param currenciesAccepted (Text type.) The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR".
#' @param branchOf (Organization type.) The larger organization that this local business is a branch of, if any. Not to be confused with (anatomical)[[branch]].
#' @param telephone (Text or Text or Text or Text type.) The telephone number.
#' @param specialOpeningHoursSpecification (OpeningHoursSpecification type.) The special opening hours of a certain place.Use this to explicitly override general opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]].
#' @param smokingAllowed (Boolean type.) Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or hotel room.
#' @param reviews (Review or Review or Review or Review or Review type.) Review of the item.
#' @param review (Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.
#' @param publicAccess (Boolean type.) A flag to signal that the [[Place]] is open to public visitors. If this property is omitted there is no assumed default boolean value
#' @param photos (Photograph or ImageObject type.) Photographs of this place.
#' @param photo (Photograph or ImageObject type.) A photograph of this place.
#' @param openingHoursSpecification (OpeningHoursSpecification type.) The opening hours of a certain place.
#' @param maximumAttendeeCapacity (Integer or Integer type.) The total number of individuals that may attend an event or venue.
#' @param maps (URL type.) A URL to a map of the place.
#' @param map (URL type.) A URL to a map of the place.
#' @param logo (URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject type.) An associated logo.
#' @param isicV4 (Text or Text or Text type.) The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.
#' @param isAccessibleForFree (Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.
#' @param hasMap (URL or Map type.) A URL to a map of the place.
#' @param globalLocationNumber (Text or Text or Text type.) The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.
#' @param geo (GeoShape or GeoCoordinates type.) The geo coordinates of the place.
#' @param faxNumber (Text or Text or Text or Text type.) The fax number.
#' @param events (Event or Event type.) Upcoming or past events associated with this place or organization.
#' @param event (Event or Event or Event or Event or Event or Event or Event type.) Upcoming or past event associated with this place, organization, or action.
#' @param containsPlace (Place type.) The basic containment relation between a place and another that it contains.
#' @param containedInPlace (Place type.) The basic containment relation between a place and one that contains it.
#' @param containedIn (Place type.) The basic containment relation between a place and one that contains it.
#' @param branchCode (Text type.) A short textual code (also called "store code") that uniquely identifies a place of business. The code is typically assigned by the parentOrganization and used in structured URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047 the code "3047" is a branchCode for a particular branch.
#' @param amenityFeature (LocationFeatureSpecification or LocationFeatureSpecification or LocationFeatureSpecification type.) An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic property does not make a statement about whether the feature is included in an offer for the main accommodation or available at extra costs.
#' @param aggregateRating (AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.
#' @param address (Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress type.) Physical address of the item.
#' @param additionalProperty (PropertyValue or PropertyValue or PropertyValue or PropertyValue type.) A property-value pair representing an additional characteristics of the entitity, e.g. a product feature or another characteristic for which there is no matching property in schema.org.Note: Publishers should be aware that applications designed to use specific schema.org properties (e.g. http://schema.org/width, http://schema.org/color, http://schema.org/gtin13, ...) will typically expect such data to be provided using those properties, rather than using the generic property/value mechanism.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#'
#' @return a list object corresponding to a schema:InsuranceAgency
#'
#' @export
InsuranceAgency <- function(id = NULL,
feesAndCommissionsSpecification = NULL,
priceRange = NULL,
paymentAccepted = NULL,
openingHours = NULL,
currenciesAccepted = NULL,
branchOf = NULL,
telephone = NULL,
specialOpeningHoursSpecification = NULL,
smokingAllowed = NULL,
reviews = NULL,
review = NULL,
publicAccess = NULL,
photos = NULL,
photo = NULL,
openingHoursSpecification = NULL,
maximumAttendeeCapacity = NULL,
maps = NULL,
map = NULL,
logo = NULL,
isicV4 = NULL,
isAccessibleForFree = NULL,
hasMap = NULL,
globalLocationNumber = NULL,
geo = NULL,
faxNumber = NULL,
events = NULL,
event = NULL,
containsPlace = NULL,
containedInPlace = NULL,
containedIn = NULL,
branchCode = NULL,
amenityFeature = NULL,
aggregateRating = NULL,
address = NULL,
additionalProperty = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL){
Filter(Negate(is.null),
list(
type = "InsuranceAgency",
id = id,
feesAndCommissionsSpecification = feesAndCommissionsSpecification,
priceRange = priceRange,
paymentAccepted = paymentAccepted,
openingHours = openingHours,
currenciesAccepted = currenciesAccepted,
branchOf = branchOf,
telephone = telephone,
specialOpeningHoursSpecification = specialOpeningHoursSpecification,
smokingAllowed = smokingAllowed,
reviews = reviews,
review = review,
publicAccess = publicAccess,
photos = photos,
photo = photo,
openingHoursSpecification = openingHoursSpecification,
maximumAttendeeCapacity = maximumAttendeeCapacity,
maps = maps,
map = map,
logo = logo,
isicV4 = isicV4,
isAccessibleForFree = isAccessibleForFree,
hasMap = hasMap,
globalLocationNumber = globalLocationNumber,
geo = geo,
faxNumber = faxNumber,
events = events,
event = event,
containsPlace = containsPlace,
containedInPlace = containedInPlace,
containedIn = containedIn,
branchCode = branchCode,
amenityFeature = amenityFeature,
aggregateRating = aggregateRating,
address = address,
additionalProperty = additionalProperty,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType))}
|
covEllipses <-function(x, ...) {
UseMethod("covEllipses")
}
covEllipses.boxM <-
function(x, ...) {
cov <- c(x$cov, pooled=list(x$pooled))
mns <- x$means
df <- x$df
covEllipses.default(cov, mns, df, ...)
}
covEllipses.data.frame <-
function(x, group,
pooled=TRUE,
method = c("classical", "mve", "mcd"), ...) {
method <- match.arg(method)
if (missing(group)) {
group <- factor(rep(1, nrow(x)))
pooled <- FALSE
}
if (!is.factor(group)) {
warning(deparse(substitute(group)), " coerced to factor.")
group <- as.factor(group)
}
p <- ncol(x)
nlev <- nlevels(group)
lev <- levels(group)
dfs <- tapply(group, group, length) - 1
mats <- list()
means <- matrix(0, nrow=nlev, ncol=p)
for(i in 1:nlev) {
rcov <- MASS::cov.rob(x[group == lev[i], ], method=method)
mats[[i]] <- rcov$cov
means[i,] <- rcov$center
}
names(mats) <- lev
rownames(means) <- lev
colnames(means) <- colnames(x)
if(pooled) {
rcov <- MASS::cov.rob(x, method=method)
pooled <- rcov$cov
mats <- c(mats, pooled=list(pooled))
means <- rbind(means, pooled=rcov$center)
dfs <- c(dfs, sum(dfs))
}
covEllipses.default(mats, means, dfs, ...)
}
covEllipses.matrix <- covEllipses.data.frame
covEllipses.default <-
function (
x, # a list of covariance matrices
means, # a matrix of means
df, # vector of degrees of freedom
labels=NULL,
variables=1:2, # x,y variables for the plot [variable names or numbers]
level=0.68,
segments=40, # line segments in each ellipse
center = FALSE, # center the ellipses at c(0,0)?
center.pch="+",
center.cex=2,
col=getOption("heplot.colors", c("red", "blue", "black", "darkgreen", "darkcyan","magenta", "brown","darkgray")),
# colors for ellipses
lty=1,
lwd=2,
fill=FALSE, ## whether to draw filled ellipses (vectorized)
fill.alpha=0.3, ## alpha transparency for filled ellipses
label.pos=0, # label positions: NULL or 0:4
xlab,
ylab,
main="",
xlim, # min/max for X (override internal min/max calc)
ylim,
axes=TRUE, # whether to draw the axes
offset.axes, # if specified, the proportion by which to expand the axes on each end (e.g., .05)
add=FALSE, # add to existing plot?
warn.rank=FALSE,
...)
{
ell <- function(center, shape, radius) {
angles <- (0:segments)*2*pi/segments
circle <- radius * cbind( cos(angles), sin(angles))
if (!warn.rank){
warn <- options(warn=-1)
on.exit(options(warn))
}
Q <- chol(shape, pivot=TRUE)
order <- order(attr(Q, "pivot"))
t( c(center) + t( circle %*% Q[,order]))
}
if (!is.list(x)) stop("Argument 'x' must be a list of covariance matrices")
cov <- x
response.names <- colnames(cov[[1]])
p <- ncol(cov[[1]])
if (!is.numeric(variables)) {
vars <- variables
variables <- match(vars, response.names)
check <- is.na(variables)
if (any(check)) stop(paste(vars[check], collapse=", "),
" not among response variables.")
}
else {
if (any (variables > p)) stop("There are only ", p, " response variables among", variables)
vars <- response.names[variables]
}
n.ell <- length(cov)
if (n.ell == 0) stop("Nothing to plot.")
if (n.ell != nrow(means))
stop( paste0("number of covariance matrices (", n.ell, ") does not conform to rows of means (", nrow(means), ")") )
if (n.ell != length(df))
stop( paste0("number of covariance matrices (", n.ell, ") does not conform to df (", length(df), ")") )
if (missing(xlab)) xlab <- vars[1]
if (missing(ylab)) ylab <- vars[2]
# assign colors and line styles
rep_fun <- rep_len
col <- rep_fun(col, n.ell)
lty <- rep_fun(lty, n.ell)
lwd <- rep_fun(lwd, n.ell)
# handle filled ellipses
fill <- rep_fun(fill, n.ell)
fill.alpha <- rep_fun(fill.alpha, n.ell)
fill.col <- trans.colors(col, fill.alpha)
label.pos <- rep_fun(label.pos, n.ell)
fill.col <- ifelse(fill, fill.col, NA)
radius <- c(sqrt(2 * qf(level, 2, df)))
ellipses <- as.list(rep(0, n.ell))
for(i in 1:n.ell) {
S <- as.matrix(cov[[i]])
S <- S[vars, vars]
ctr <- if (center) c(0,0)
else as.numeric(means[i, vars])
ellipses[[i]] <- ell(ctr, S, radius[i])
}
if (!add){
max <- apply(sapply(ellipses, function(X) apply(X, 2, max)), 1, max)
min <- apply(sapply(ellipses, function(X) apply(X, 2, min)), 1, min)
if (!missing(offset.axes)){
range <- max - min
min <- min - offset.axes*range
max <- max + offset.axes*range
}
xlim <- if(missing(xlim)) c(min[1], max[1]) else xlim
ylim <- if(missing(ylim)) c(min[2], max[2]) else ylim
plot(xlim, ylim, type = "n", xlab=xlab, ylab=ylab, main=main, axes=axes, ...)
}
labels <- if (!is.null(labels)) labels
else names(cov)
for (i in 1:n.ell){
polygon(ellipses[[i]], col=fill.col[i], border=col[i], lty=lty[i], lwd=lwd[i])
label.ellipse(ellipses[[i]], labels[i], col=col[i], label.pos=label.pos[i], ...)
if (!center)
points(means[i,1], means[i,2], pch=center.pch, cex=center.cex, col=col[i], xpd=TRUE)
}
names(ellipses) <- labels
# result <- if (!add) list(ellipses, center=means, xlim=xlim, ylim=ylim, radius=radius)
# else list(H=ellipses, center=gmean, radius=radius)
result <- ellipses
class(result) <- "covEllipses"
invisible(result)
}
|
/heplots/R/covEllipses.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 5,562 |
r
|
covEllipses <-function(x, ...) {
UseMethod("covEllipses")
}
covEllipses.boxM <-
function(x, ...) {
cov <- c(x$cov, pooled=list(x$pooled))
mns <- x$means
df <- x$df
covEllipses.default(cov, mns, df, ...)
}
covEllipses.data.frame <-
function(x, group,
pooled=TRUE,
method = c("classical", "mve", "mcd"), ...) {
method <- match.arg(method)
if (missing(group)) {
group <- factor(rep(1, nrow(x)))
pooled <- FALSE
}
if (!is.factor(group)) {
warning(deparse(substitute(group)), " coerced to factor.")
group <- as.factor(group)
}
p <- ncol(x)
nlev <- nlevels(group)
lev <- levels(group)
dfs <- tapply(group, group, length) - 1
mats <- list()
means <- matrix(0, nrow=nlev, ncol=p)
for(i in 1:nlev) {
rcov <- MASS::cov.rob(x[group == lev[i], ], method=method)
mats[[i]] <- rcov$cov
means[i,] <- rcov$center
}
names(mats) <- lev
rownames(means) <- lev
colnames(means) <- colnames(x)
if(pooled) {
rcov <- MASS::cov.rob(x, method=method)
pooled <- rcov$cov
mats <- c(mats, pooled=list(pooled))
means <- rbind(means, pooled=rcov$center)
dfs <- c(dfs, sum(dfs))
}
covEllipses.default(mats, means, dfs, ...)
}
covEllipses.matrix <- covEllipses.data.frame
covEllipses.default <-
function (
x, # a list of covariance matrices
means, # a matrix of means
df, # vector of degrees of freedom
labels=NULL,
variables=1:2, # x,y variables for the plot [variable names or numbers]
level=0.68,
segments=40, # line segments in each ellipse
center = FALSE, # center the ellipses at c(0,0)?
center.pch="+",
center.cex=2,
col=getOption("heplot.colors", c("red", "blue", "black", "darkgreen", "darkcyan","magenta", "brown","darkgray")),
# colors for ellipses
lty=1,
lwd=2,
fill=FALSE, ## whether to draw filled ellipses (vectorized)
fill.alpha=0.3, ## alpha transparency for filled ellipses
label.pos=0, # label positions: NULL or 0:4
xlab,
ylab,
main="",
xlim, # min/max for X (override internal min/max calc)
ylim,
axes=TRUE, # whether to draw the axes
offset.axes, # if specified, the proportion by which to expand the axes on each end (e.g., .05)
add=FALSE, # add to existing plot?
warn.rank=FALSE,
...)
{
ell <- function(center, shape, radius) {
angles <- (0:segments)*2*pi/segments
circle <- radius * cbind( cos(angles), sin(angles))
if (!warn.rank){
warn <- options(warn=-1)
on.exit(options(warn))
}
Q <- chol(shape, pivot=TRUE)
order <- order(attr(Q, "pivot"))
t( c(center) + t( circle %*% Q[,order]))
}
if (!is.list(x)) stop("Argument 'x' must be a list of covariance matrices")
cov <- x
response.names <- colnames(cov[[1]])
p <- ncol(cov[[1]])
if (!is.numeric(variables)) {
vars <- variables
variables <- match(vars, response.names)
check <- is.na(variables)
if (any(check)) stop(paste(vars[check], collapse=", "),
" not among response variables.")
}
else {
if (any (variables > p)) stop("There are only ", p, " response variables among", variables)
vars <- response.names[variables]
}
n.ell <- length(cov)
if (n.ell == 0) stop("Nothing to plot.")
if (n.ell != nrow(means))
stop( paste0("number of covariance matrices (", n.ell, ") does not conform to rows of means (", nrow(means), ")") )
if (n.ell != length(df))
stop( paste0("number of covariance matrices (", n.ell, ") does not conform to df (", length(df), ")") )
if (missing(xlab)) xlab <- vars[1]
if (missing(ylab)) ylab <- vars[2]
# assign colors and line styles
rep_fun <- rep_len
col <- rep_fun(col, n.ell)
lty <- rep_fun(lty, n.ell)
lwd <- rep_fun(lwd, n.ell)
# handle filled ellipses
fill <- rep_fun(fill, n.ell)
fill.alpha <- rep_fun(fill.alpha, n.ell)
fill.col <- trans.colors(col, fill.alpha)
label.pos <- rep_fun(label.pos, n.ell)
fill.col <- ifelse(fill, fill.col, NA)
radius <- c(sqrt(2 * qf(level, 2, df)))
ellipses <- as.list(rep(0, n.ell))
for(i in 1:n.ell) {
S <- as.matrix(cov[[i]])
S <- S[vars, vars]
ctr <- if (center) c(0,0)
else as.numeric(means[i, vars])
ellipses[[i]] <- ell(ctr, S, radius[i])
}
if (!add){
max <- apply(sapply(ellipses, function(X) apply(X, 2, max)), 1, max)
min <- apply(sapply(ellipses, function(X) apply(X, 2, min)), 1, min)
if (!missing(offset.axes)){
range <- max - min
min <- min - offset.axes*range
max <- max + offset.axes*range
}
xlim <- if(missing(xlim)) c(min[1], max[1]) else xlim
ylim <- if(missing(ylim)) c(min[2], max[2]) else ylim
plot(xlim, ylim, type = "n", xlab=xlab, ylab=ylab, main=main, axes=axes, ...)
}
labels <- if (!is.null(labels)) labels
else names(cov)
for (i in 1:n.ell){
polygon(ellipses[[i]], col=fill.col[i], border=col[i], lty=lty[i], lwd=lwd[i])
label.ellipse(ellipses[[i]], labels[i], col=col[i], label.pos=label.pos[i], ...)
if (!center)
points(means[i,1], means[i,2], pch=center.pch, cex=center.cex, col=col[i], xpd=TRUE)
}
names(ellipses) <- labels
# result <- if (!add) list(ellipses, center=means, xlim=xlim, ylim=ylim, radius=radius)
# else list(H=ellipses, center=gmean, radius=radius)
result <- ellipses
class(result) <- "covEllipses"
invisible(result)
}
|
library(dplyr)
library(shiny)
library(ggplot2)
library(ROCR)
load(url('http://gmyrland.capstone.s3.amazonaws.com/df.Rdata'))
# some last minute tweaks
df$child_seat_present <- as.factor(df$child_seat_present)
df$displacement[df$displacement < 0] <- 0
df$fire <- as.factor(df$fire)
df$is_weeked <- as.factor(df$is_weeked)
df$special_use <- as.factor(df$special_use)
df$travel_lanes <- as.factor(df$travel_lanes)
df$vehicle_year[df$vehicle_year < 0] <- 0
set.seed(1234)
n <- nrow(df)
shuffled <- df[sample(n),]
train <- shuffled[1:round(0.7 * n),]
test <- shuffled[(round(0.7 * n) + 1):n,]
ggMMplot <- function(var1, var2) {
# Adapted from http://stackoverflow.com/questions/19233365/how-to-create-a-marimekko-mosaic-plot-in-ggplot2
levVar1 <- length(levels(var1))
levVar2 <- length(levels(var2))
jointTable <- prop.table(table(var1, var2))
plotData <- as.data.frame(jointTable)
plotData$marginVar1 <- prop.table(table(var1))
plotData$var2Height <- plotData$Freq / plotData$marginVar1
plotData$var1Center <- c(0, cumsum(plotData$marginVar1)[1:levVar1 -1]) + plotData$marginVar1 / 2
ggplot(plotData, aes(var1Center, var2Height)) +
geom_bar(stat = "identity", aes(width = marginVar1, fill = var2), col = "Black", size=0.25) +
geom_text(aes(label = as.character(var1), x = var1Center, y = 0.5, angle=90)) +
scale_x_continuous(expand=c(0,0)) + scale_y_continuous(expand=c(0,0)) +
xlab("Predictor Proportion") + ylab("Response Proportion") + guides(fill=guide_legend(title="Fatal"))
}
ggViolinplot <- function(var1, var2) {
ggplot(df, aes(factor(var2), var1)) + geom_violin(fill="darkgreen")
}
#terms <- names(df)[!names(df) %in% c('CaseId', 'CaseWeight', 'fatal')]
terms <- c()
lterms <- list(age = "age", airbag_available = "airbag_available", airbag_available_police = "airbag_available_police", airbag_deployment = "airbag_deployment", alcohol_present = "alcohol_present", alcohol_test = "alcohol_test", alcohol_test_result = "alcohol_test_result", average_track = "average_track", avoidance_maneuver = "avoidance_maneuver", body_category = "body_category", body_type = "body_type", cargo_weight = "cargo_weight", child_seat_present = "child_seat_present", compartment_integrity_loss = "compartment_integrity_loss", contacted = "contacted", contacted_area = "contacted_area", contacted_class = "contacted_class", crash_config = "crash_config", crash_type = "crash_type", crashtime = "crashtime", curb_weight = "curb_weight", cylinders = "cylinders", damage_area = "damage_area", damage_plane = "damage_plane", dayofweek = "dayofweek", displacement = "displacement", drive_Wheels = "drive_Wheels", driver_race = "driver_race", drugs_present = "drugs_present", entrapment = "entrapment", event_class = "event_class", eyewear = "eyewear", fire = "fire", front_overhang = "front_overhang", height = "height", is_weeked = "is_weeked", light = "light", make = "make", maximum_width = "maximum_width", model = "model", month = "month", odometer = "odometer", overall_length = "overall_length", posted_speed = "posted_speed", posture = "posture", precrash_category = "precrash_category", preevent_movement = "preevent_movement", preimpact_location = "preimpact_location", preimpact_stability = "preimpact_stability", race = "race", rear_overhang = "rear_overhang", roadway_alignment = "roadway_alignment", roadway_condition = "roadway_condition", roadway_profile = "roadway_profile", roadway_surface = "roadway_surface", role = "role", rollover = "rollover", rollover_contacted = "rollover_contacted", rollover_qtr_turns = "rollover_qtr_turns", seat_inclination = "seat_inclination", seat_location = "seat_location", seat_orientation = "seat_orientation", seat_position = "seat_position", seat_row = "seat_row", seatbelt_availability = "seatbelt_availability", seatbelt_used = "seatbelt_used", sex = "sex", special_use = "special_use", tire_tread_depth = "tire_tread_depth", towed_unit = "towed_unit", traffic_control_device = "traffic_control_device", traffic_control_device_functioning = "traffic_control_device_functioning", transmission = "transmission", travel_lanes = "travel_lanes", travel_speed = "travel_speed", undeformed_end_Width = "undeformed_end_Width", vehicle_year = "vehicle_year", vehicles_involved = "vehicles_involved", weather = "weather", weight = "weight", wheelbase = "wheelbase", year = "year")
## Shiny UI
ui <- fluidPage(
selectInput("select", label = h3("Select Term"), choices = lterms),
#actionButton("addterm", "Add Term"),
hr(),
column(12, align="center",
## mosiac / violin
plotOutput("correlation", height=300),
## confusion matrix
#tableOutput('conf'),
fluidRow(
## roc
column(6, plotOutput("roc", height=400, width=500), "ROC Curve"),
## rp
column(6, plotOutput("rp", height=400, width=500), "Recall-Precision Curve")
)
)
)
## Shiny Server
server <- function(input, output, session) {
#session$onSessionEnded(function() stopApp(returnValue=NULL))
vals <- reactive({
f <- reformulate(input$select, "fatal")
var <- as.character(input$select)
fit <- glm(f, family = binomial(link="logit"), data=train)
probs <- predict(fit, test, type="response")
conf <- table(test$fatal, as.integer(probs > 0.5))
pred <- prediction(probs, test$fatal)
list(fit=fit, var=var, probs=probs, conf=conf, pred=pred)
})
output$correlation <- renderPlot({
data <- df[, vals()$var]
print(class(data))
if (is.double(data)) {
ggViolinplot(data, df$fatal)
} else {
ggMMplot(data, df$fatal)
}
})
output$roc <- renderPlot({
plot(performance(vals()$pred, "tpr", "fpr"))
})
output$rp <- renderPlot({
plot (performance(vals()$pred, "prec", "rec"))
})
output$conf <- renderTable({
print(vals()$conf)
vals()$conf
})
}
shinyApp(ui, server)
|
/shiny/app.R
|
no_license
|
gmyrland/capstone_project
|
R
| false | false | 6,022 |
r
|
library(dplyr)
library(shiny)
library(ggplot2)
library(ROCR)
load(url('http://gmyrland.capstone.s3.amazonaws.com/df.Rdata'))
# some last minute tweaks
df$child_seat_present <- as.factor(df$child_seat_present)
df$displacement[df$displacement < 0] <- 0
df$fire <- as.factor(df$fire)
df$is_weeked <- as.factor(df$is_weeked)
df$special_use <- as.factor(df$special_use)
df$travel_lanes <- as.factor(df$travel_lanes)
df$vehicle_year[df$vehicle_year < 0] <- 0
set.seed(1234)
n <- nrow(df)
shuffled <- df[sample(n),]
train <- shuffled[1:round(0.7 * n),]
test <- shuffled[(round(0.7 * n) + 1):n,]
ggMMplot <- function(var1, var2) {
# Adapted from http://stackoverflow.com/questions/19233365/how-to-create-a-marimekko-mosaic-plot-in-ggplot2
levVar1 <- length(levels(var1))
levVar2 <- length(levels(var2))
jointTable <- prop.table(table(var1, var2))
plotData <- as.data.frame(jointTable)
plotData$marginVar1 <- prop.table(table(var1))
plotData$var2Height <- plotData$Freq / plotData$marginVar1
plotData$var1Center <- c(0, cumsum(plotData$marginVar1)[1:levVar1 -1]) + plotData$marginVar1 / 2
ggplot(plotData, aes(var1Center, var2Height)) +
geom_bar(stat = "identity", aes(width = marginVar1, fill = var2), col = "Black", size=0.25) +
geom_text(aes(label = as.character(var1), x = var1Center, y = 0.5, angle=90)) +
scale_x_continuous(expand=c(0,0)) + scale_y_continuous(expand=c(0,0)) +
xlab("Predictor Proportion") + ylab("Response Proportion") + guides(fill=guide_legend(title="Fatal"))
}
ggViolinplot <- function(var1, var2) {
ggplot(df, aes(factor(var2), var1)) + geom_violin(fill="darkgreen")
}
#terms <- names(df)[!names(df) %in% c('CaseId', 'CaseWeight', 'fatal')]
terms <- c()
lterms <- list(age = "age", airbag_available = "airbag_available", airbag_available_police = "airbag_available_police", airbag_deployment = "airbag_deployment", alcohol_present = "alcohol_present", alcohol_test = "alcohol_test", alcohol_test_result = "alcohol_test_result", average_track = "average_track", avoidance_maneuver = "avoidance_maneuver", body_category = "body_category", body_type = "body_type", cargo_weight = "cargo_weight", child_seat_present = "child_seat_present", compartment_integrity_loss = "compartment_integrity_loss", contacted = "contacted", contacted_area = "contacted_area", contacted_class = "contacted_class", crash_config = "crash_config", crash_type = "crash_type", crashtime = "crashtime", curb_weight = "curb_weight", cylinders = "cylinders", damage_area = "damage_area", damage_plane = "damage_plane", dayofweek = "dayofweek", displacement = "displacement", drive_Wheels = "drive_Wheels", driver_race = "driver_race", drugs_present = "drugs_present", entrapment = "entrapment", event_class = "event_class", eyewear = "eyewear", fire = "fire", front_overhang = "front_overhang", height = "height", is_weeked = "is_weeked", light = "light", make = "make", maximum_width = "maximum_width", model = "model", month = "month", odometer = "odometer", overall_length = "overall_length", posted_speed = "posted_speed", posture = "posture", precrash_category = "precrash_category", preevent_movement = "preevent_movement", preimpact_location = "preimpact_location", preimpact_stability = "preimpact_stability", race = "race", rear_overhang = "rear_overhang", roadway_alignment = "roadway_alignment", roadway_condition = "roadway_condition", roadway_profile = "roadway_profile", roadway_surface = "roadway_surface", role = "role", rollover = "rollover", rollover_contacted = "rollover_contacted", rollover_qtr_turns = "rollover_qtr_turns", seat_inclination = "seat_inclination", seat_location = "seat_location", seat_orientation = "seat_orientation", seat_position = "seat_position", seat_row = "seat_row", seatbelt_availability = "seatbelt_availability", seatbelt_used = "seatbelt_used", sex = "sex", special_use = "special_use", tire_tread_depth = "tire_tread_depth", towed_unit = "towed_unit", traffic_control_device = "traffic_control_device", traffic_control_device_functioning = "traffic_control_device_functioning", transmission = "transmission", travel_lanes = "travel_lanes", travel_speed = "travel_speed", undeformed_end_Width = "undeformed_end_Width", vehicle_year = "vehicle_year", vehicles_involved = "vehicles_involved", weather = "weather", weight = "weight", wheelbase = "wheelbase", year = "year")
## Shiny UI
ui <- fluidPage(
selectInput("select", label = h3("Select Term"), choices = lterms),
#actionButton("addterm", "Add Term"),
hr(),
column(12, align="center",
## mosiac / violin
plotOutput("correlation", height=300),
## confusion matrix
#tableOutput('conf'),
fluidRow(
## roc
column(6, plotOutput("roc", height=400, width=500), "ROC Curve"),
## rp
column(6, plotOutput("rp", height=400, width=500), "Recall-Precision Curve")
)
)
)
## Shiny Server
server <- function(input, output, session) {
#session$onSessionEnded(function() stopApp(returnValue=NULL))
vals <- reactive({
f <- reformulate(input$select, "fatal")
var <- as.character(input$select)
fit <- glm(f, family = binomial(link="logit"), data=train)
probs <- predict(fit, test, type="response")
conf <- table(test$fatal, as.integer(probs > 0.5))
pred <- prediction(probs, test$fatal)
list(fit=fit, var=var, probs=probs, conf=conf, pred=pred)
})
output$correlation <- renderPlot({
data <- df[, vals()$var]
print(class(data))
if (is.double(data)) {
ggViolinplot(data, df$fatal)
} else {
ggMMplot(data, df$fatal)
}
})
output$roc <- renderPlot({
plot(performance(vals()$pred, "tpr", "fpr"))
})
output$rp <- renderPlot({
plot (performance(vals()$pred, "prec", "rec"))
})
output$conf <- renderTable({
print(vals()$conf)
vals()$conf
})
}
shinyApp(ui, server)
|
## ==========================
## Figure 8.5-2 on Page 402
## --------------------------
# n=25
mu = seq(60, 68, by=0.1)
K1 = 1-pnorm( (62-mu)/2 )
K2 = 1-pnorm( (63.29-mu)/2 )
#---------------------------------
plot (mu, K1)
lines(mu, K2)
#---------------------------------
plot (mu, K1, type="l", xlim=c(58,68), ylim=c(0,1), col="blue" )
lines(mu, K2, col="red")
#========================================================================
# n=100
K3 = 1-pnorm( 61.645-mu )
plot (mu, K1, type="l", xlim=c(58,68), ylim=c(0,1), col="blue" )
lines(mu, K2, col="red")
lines(mu, K3, col="black", lty=2)
#========================================================================
# Page 404 of Textbook
q1 = qnorm(0.05)
q2 = qnorm(0.975)
n = 4*(q2-q1)^2
n
c = ( 65*q2-60*q1) / (q2-q1)
c
|
/Stat/R/Figure-8-5-2.r
|
no_license
|
lee-sangjae/class
|
R
| false | false | 837 |
r
|
## ==========================
## Figure 8.5-2 on Page 402
## --------------------------
# n=25
mu = seq(60, 68, by=0.1)
K1 = 1-pnorm( (62-mu)/2 )
K2 = 1-pnorm( (63.29-mu)/2 )
#---------------------------------
plot (mu, K1)
lines(mu, K2)
#---------------------------------
plot (mu, K1, type="l", xlim=c(58,68), ylim=c(0,1), col="blue" )
lines(mu, K2, col="red")
#========================================================================
# n=100
K3 = 1-pnorm( 61.645-mu )
plot (mu, K1, type="l", xlim=c(58,68), ylim=c(0,1), col="blue" )
lines(mu, K2, col="red")
lines(mu, K3, col="black", lty=2)
#========================================================================
# Page 404 of Textbook
q1 = qnorm(0.05)
q2 = qnorm(0.975)
n = 4*(q2-q1)^2
n
c = ( 65*q2-60*q1) / (q2-q1)
c
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qte.R
\name{computeSE}
\alias{computeSE}
\title{computeSE}
\usage{
computeSE(bootIters, qteobj, alp = 0.05)
}
\arguments{
\item{bootIters}{List of bootstrap iterations}
\item{alp}{The significance level used for constructing bootstrap
confidence intervals}
}
\value{
SEObj
}
\description{
Computes standard errors from bootstrap results. This function
is called by several functions in the qte package
}
\keyword{internal}
|
/man/computeSE.Rd
|
no_license
|
arlionn/qte-1
|
R
| false | true | 504 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qte.R
\name{computeSE}
\alias{computeSE}
\title{computeSE}
\usage{
computeSE(bootIters, qteobj, alp = 0.05)
}
\arguments{
\item{bootIters}{List of bootstrap iterations}
\item{alp}{The significance level used for constructing bootstrap
confidence intervals}
}
\value{
SEObj
}
\description{
Computes standard errors from bootstrap results. This function
is called by several functions in the qte package
}
\keyword{internal}
|
#Author: herrj1
#SVM
#Read data from files
train <- read.csv("kddcup.data_10_percent_corrected")
test <- read.csv("kddcup.testdata.labeled_10_percent")
#Install and use SVM to train the model
install.packages("kernlab")
library(kernlab)
classifier <- ksvm(labels ~ ., data = train, kernel = "vanilladot")
#Evaluating model performance
predictions <- predict(classifier, test)
table(predictions, test$labels)
|
/analytics/app6/svm.R
|
no_license
|
herrj1/R
|
R
| false | false | 426 |
r
|
#Author: herrj1
#SVM
#Read data from files
train <- read.csv("kddcup.data_10_percent_corrected")
test <- read.csv("kddcup.testdata.labeled_10_percent")
#Install and use SVM to train the model
install.packages("kernlab")
library(kernlab)
classifier <- ksvm(labels ~ ., data = train, kernel = "vanilladot")
#Evaluating model performance
predictions <- predict(classifier, test)
table(predictions, test$labels)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ssh_functions.R
\name{run_project_on_cluster}
\alias{run_project_on_cluster}
\title{run pirouette example}
\usage{
run_project_on_cluster(
project_name,
function_name,
account = jap::your_account(),
session = NA,
fun_arguments
)
}
\arguments{
\item{account}{a peregrine account}
\item{session}{a ssh session}
}
\value{
nothing
}
\description{
NOT WORKING YET
}
\author{
Giovanni Laudanno
}
|
/man/run_project_on_cluster.Rd
|
no_license
|
TheoPannetier/jap
|
R
| false | true | 479 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ssh_functions.R
\name{run_project_on_cluster}
\alias{run_project_on_cluster}
\title{run pirouette example}
\usage{
run_project_on_cluster(
project_name,
function_name,
account = jap::your_account(),
session = NA,
fun_arguments
)
}
\arguments{
\item{account}{a peregrine account}
\item{session}{a ssh session}
}
\value{
nothing
}
\description{
NOT WORKING YET
}
\author{
Giovanni Laudanno
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{C3.design.plot}
\alias{C3.design.plot}
\title{Plot fraction of reads with partucular quality or higher per position.}
\usage{
C3.design.plot(samples, fqc, design.table)
}
\arguments{
\item{samples}{ShortReadQ object from package ShortRead}
\item{fqc}{FastQA from package ShortRead}
\item{design.table}{data.frame holds information about experimantal design}
}
\value{
list of plot objects
}
\description{
4 line plots, for qualities 18, 20, 24 and 28, showing fraction
of reads with one of those qualities per position in the read.
One line per sample.
Samples are explected to be separage fastq files.
There is one plot per grouping factor.
Samples colored by grouping factor.
Grouping factors are extracted from design.table.
}
|
/man/C3.design.plot.Rd
|
no_license
|
vlpb3/faradr
|
R
| false | false | 792 |
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{C3.design.plot}
\alias{C3.design.plot}
\title{Plot fraction of reads with partucular quality or higher per position.}
\usage{
C3.design.plot(samples, fqc, design.table)
}
\arguments{
\item{samples}{ShortReadQ object from package ShortRead}
\item{fqc}{FastQA from package ShortRead}
\item{design.table}{data.frame holds information about experimantal design}
}
\value{
list of plot objects
}
\description{
4 line plots, for qualities 18, 20, 24 and 28, showing fraction
of reads with one of those qualities per position in the read.
One line per sample.
Samples are explected to be separage fastq files.
There is one plot per grouping factor.
Samples colored by grouping factor.
Grouping factors are extracted from design.table.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/onesim.R
\name{onesim}
\alias{onesim}
\title{Generate an output matrix for one simulation}
\usage{
onesim(
pHSinit = 0.8,
Kx = 100,
betax = 0.02,
wxtnormm = 0.8,
wxtnormsd = 0.3,
hx = 1,
mxtnormm = 1,
mxtnormsd = 0.1,
axtnormm = 1,
axtnormsd = 0.1,
rx = 0.1,
zxtnormm = 1,
zxtnormsd = 0.1,
gx = 4,
cx = 0.9,
phix = 0,
nseasons = 10,
HPcut = 0.5,
pHScut = 0.5,
maY = 100,
miY = 0,
thetax = 0.2,
Ex = 0
)
}
\arguments{
\item{pHSinit}{the initial proportion of healthy seed, numeric or numeric
vector.}
\item{Kx}{the total number of plants, positive interger, numeric or numeric
vector.}
\item{betax}{the maximum seasonal transmission rate, numeric or numeric
vector.}
\item{wxtnormm}{the environmental effect on transmission rate (mean of
underlying normal distribution prior to truncation), numeric or numeric
vector.}
\item{wxtnormsd}{the environmental effect on transmission rate (standard
deviation of underlying normal distribution prior to truncation), numeric
or numeric vector.}
\item{hx}{the host effect on transmission rate, numeric or numeric vector.}
\item{mxtnormm}{the vector management effect on transmission rate (mean of
underlying normal distribution prior to truncation), numeric or numeric
vector.}
\item{mxtnormsd}{the vector management effect on transmission rate (standard
deviation of underlying normal distribution prior to truncation), numeric
or numeric vector.}
\item{axtnormm}{the roguing effect in terms of decreased DP (mean of
underlying normal distribution prior to truncation), numeric or numeric
vector.}
\item{axtnormsd}{the roguing effect in terms of decreased DP (standard
deviation of underlying normal distribution prior to truncation), numeric
or numeric vector.}
\item{rx}{the reversion rate, numeric or numeric vector.}
\item{zxtnormm}{the proportional selection against diseased plants (mean of
underlying normal distribution prior to truncation), numeric or numeric
vector.}
\item{zxtnormsd}{the proportional selection against diseased plants (standard
deviation of underlying normal distribution prior to truncation), numeric
or numeric vector.}
\item{gx}{the seed production rate in healthy plants, numeric or numeric
vector.}
\item{cx}{the proportional seed production rate in diseased plants, numeric
or numeric vector.}
\item{phix}{the proportion clean seed purchased, numeric or numeric vector.}
\item{nseasons}{the number of seasons, numeric or numeric vector.}
\item{HPcut}{the proportion healthy plant number cutoff, numeric or numeric
vector.}
\item{pHScut}{the proportion healthy seed cutoff, numeric or numeric vector.}
\item{maY}{the maximum attainable yield, end of season, in the absence of
disease, numeric or numeric vector.}
\item{miY}{the minimum yield when all plants are diseased (useable yield
despite disease), numeric or numeric vector.}
\item{thetax}{the rate of decline of Yld with increasing disease incidence,
numeric or numeric vector.}
\item{Ex}{the amount of external inoculum around field, numeric or numeric
vector.}
}
\description{
This function simulates one parameter combination across nseasons once.
}
\details{
The truncated random normal variables are generated by
\code{\link{altruncnorm}}.
}
\examples{
onesim() # to be added
}
\keyword{health}
\keyword{seed}
|
/man/onesim.Rd
|
permissive
|
rucky151/seedHealth-1
|
R
| false | true | 3,391 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/onesim.R
\name{onesim}
\alias{onesim}
\title{Generate an output matrix for one simulation}
\usage{
onesim(
pHSinit = 0.8,
Kx = 100,
betax = 0.02,
wxtnormm = 0.8,
wxtnormsd = 0.3,
hx = 1,
mxtnormm = 1,
mxtnormsd = 0.1,
axtnormm = 1,
axtnormsd = 0.1,
rx = 0.1,
zxtnormm = 1,
zxtnormsd = 0.1,
gx = 4,
cx = 0.9,
phix = 0,
nseasons = 10,
HPcut = 0.5,
pHScut = 0.5,
maY = 100,
miY = 0,
thetax = 0.2,
Ex = 0
)
}
\arguments{
\item{pHSinit}{the initial proportion of healthy seed, numeric or numeric
vector.}
\item{Kx}{the total number of plants, positive interger, numeric or numeric
vector.}
\item{betax}{the maximum seasonal transmission rate, numeric or numeric
vector.}
\item{wxtnormm}{the environmental effect on transmission rate (mean of
underlying normal distribution prior to truncation), numeric or numeric
vector.}
\item{wxtnormsd}{the environmental effect on transmission rate (standard
deviation of underlying normal distribution prior to truncation), numeric
or numeric vector.}
\item{hx}{the host effect on transmission rate, numeric or numeric vector.}
\item{mxtnormm}{the vector management effect on transmission rate (mean of
underlying normal distribution prior to truncation), numeric or numeric
vector.}
\item{mxtnormsd}{the vector management effect on transmission rate (standard
deviation of underlying normal distribution prior to truncation), numeric
or numeric vector.}
\item{axtnormm}{the roguing effect in terms of decreased DP (mean of
underlying normal distribution prior to truncation), numeric or numeric
vector.}
\item{axtnormsd}{the roguing effect in terms of decreased DP (standard
deviation of underlying normal distribution prior to truncation), numeric
or numeric vector.}
\item{rx}{the reversion rate, numeric or numeric vector.}
\item{zxtnormm}{the proportional selection against diseased plants (mean of
underlying normal distribution prior to truncation), numeric or numeric
vector.}
\item{zxtnormsd}{the proportional selection against diseased plants (standard
deviation of underlying normal distribution prior to truncation), numeric
or numeric vector.}
\item{gx}{the seed production rate in healthy plants, numeric or numeric
vector.}
\item{cx}{the proportional seed production rate in diseased plants, numeric
or numeric vector.}
\item{phix}{the proportion clean seed purchased, numeric or numeric vector.}
\item{nseasons}{the number of seasons, numeric or numeric vector.}
\item{HPcut}{the proportion healthy plant number cutoff, numeric or numeric
vector.}
\item{pHScut}{the proportion healthy seed cutoff, numeric or numeric vector.}
\item{maY}{the maximum attainable yield, end of season, in the absence of
disease, numeric or numeric vector.}
\item{miY}{the minimum yield when all plants are diseased (useable yield
despite disease), numeric or numeric vector.}
\item{thetax}{the rate of decline of Yld with increasing disease incidence,
numeric or numeric vector.}
\item{Ex}{the amount of external inoculum around field, numeric or numeric
vector.}
}
\description{
This function simulates one parameter combination across nseasons once.
}
\details{
The truncated random normal variables are generated by
\code{\link{altruncnorm}}.
}
\examples{
onesim() # to be added
}
\keyword{health}
\keyword{seed}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/maply.r
\name{maply}
\alias{maply}
\title{Call function with arguments in array or data frame, returning an array.}
\usage{
maply(
.data,
.fun = NULL,
...,
.expand = TRUE,
.progress = "none",
.inform = FALSE,
.drop = TRUE,
.parallel = FALSE,
.paropts = NULL
)
}
\arguments{
\item{.data}{matrix or data frame to use as source of arguments}
\item{.fun}{function to apply to each piece}
\item{...}{other arguments passed on to \code{.fun}}
\item{.expand}{should output be 1d (expand = FALSE), with an element for
each row; or nd (expand = TRUE), with a dimension for each variable.}
\item{.progress}{name of the progress bar to use, see
\code{\link{create_progress_bar}}}
\item{.inform}{produce informative error messages? This is turned off
by default because it substantially slows processing speed, but is very
useful for debugging}
\item{.drop}{should extra dimensions of length 1 in the output be
dropped, simplifying the output. Defaults to \code{TRUE}}
\item{.parallel}{if \code{TRUE}, apply function in parallel, using parallel
backend provided by foreach}
\item{.paropts}{a list of additional options passed into
the \code{\link[foreach]{foreach}} function when parallel computation
is enabled. This is important if (for example) your code relies on
external data or packages: use the \code{.export} and \code{.packages}
arguments to supply them so that all cluster nodes have the correct
environment set up for computing.}
}
\value{
if results are atomic with same type and dimensionality, a
vector, matrix or array; otherwise, a list-array (a list with
dimensions)
}
\description{
Call a multi-argument function with values taken from columns of an
data frame or array, and combine results into an array
}
\details{
The \code{m*ply} functions are the \code{plyr} version of \code{mapply},
specialised according to the type of output they produce. These functions
are just a convenient wrapper around \code{a*ply} with \code{margins = 1}
and \code{.fun} wrapped in \code{\link{splat}}.
}
\section{Input}{
Call a multi-argument function with values taken from
columns of an data frame or array
}
\section{Output}{
If there are no results, then this function will return a vector of
length 0 (\code{vector()}).
}
\examples{
maply(cbind(mean = 1:5, sd = 1:5), rnorm, n = 5)
maply(expand.grid(mean = 1:5, sd = 1:5), rnorm, n = 5)
maply(cbind(1:5, 1:5), rnorm, n = 5)
}
\references{
Hadley Wickham (2011). The Split-Apply-Combine Strategy
for Data Analysis. Journal of Statistical Software, 40(1), 1-29.
\url{http://www.jstatsoft.org/v40/i01/}.
}
\seealso{
Other multiple arguments input:
\code{\link{m_ply}()},
\code{\link{mdply}()},
\code{\link{mlply}()}
Other array output:
\code{\link{aaply}()},
\code{\link{daply}()},
\code{\link{laply}()}
}
\concept{array output}
\concept{multiple arguments input}
\keyword{manip}
|
/man/maply.Rd
|
no_license
|
batpigandme/plyr
|
R
| false | true | 2,952 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/maply.r
\name{maply}
\alias{maply}
\title{Call function with arguments in array or data frame, returning an array.}
\usage{
maply(
.data,
.fun = NULL,
...,
.expand = TRUE,
.progress = "none",
.inform = FALSE,
.drop = TRUE,
.parallel = FALSE,
.paropts = NULL
)
}
\arguments{
\item{.data}{matrix or data frame to use as source of arguments}
\item{.fun}{function to apply to each piece}
\item{...}{other arguments passed on to \code{.fun}}
\item{.expand}{should output be 1d (expand = FALSE), with an element for
each row; or nd (expand = TRUE), with a dimension for each variable.}
\item{.progress}{name of the progress bar to use, see
\code{\link{create_progress_bar}}}
\item{.inform}{produce informative error messages? This is turned off
by default because it substantially slows processing speed, but is very
useful for debugging}
\item{.drop}{should extra dimensions of length 1 in the output be
dropped, simplifying the output. Defaults to \code{TRUE}}
\item{.parallel}{if \code{TRUE}, apply function in parallel, using parallel
backend provided by foreach}
\item{.paropts}{a list of additional options passed into
the \code{\link[foreach]{foreach}} function when parallel computation
is enabled. This is important if (for example) your code relies on
external data or packages: use the \code{.export} and \code{.packages}
arguments to supply them so that all cluster nodes have the correct
environment set up for computing.}
}
\value{
if results are atomic with same type and dimensionality, a
vector, matrix or array; otherwise, a list-array (a list with
dimensions)
}
\description{
Call a multi-argument function with values taken from columns of an
data frame or array, and combine results into an array
}
\details{
The \code{m*ply} functions are the \code{plyr} version of \code{mapply},
specialised according to the type of output they produce. These functions
are just a convenient wrapper around \code{a*ply} with \code{margins = 1}
and \code{.fun} wrapped in \code{\link{splat}}.
}
\section{Input}{
Call a multi-argument function with values taken from
columns of an data frame or array
}
\section{Output}{
If there are no results, then this function will return a vector of
length 0 (\code{vector()}).
}
\examples{
maply(cbind(mean = 1:5, sd = 1:5), rnorm, n = 5)
maply(expand.grid(mean = 1:5, sd = 1:5), rnorm, n = 5)
maply(cbind(1:5, 1:5), rnorm, n = 5)
}
\references{
Hadley Wickham (2011). The Split-Apply-Combine Strategy
for Data Analysis. Journal of Statistical Software, 40(1), 1-29.
\url{http://www.jstatsoft.org/v40/i01/}.
}
\seealso{
Other multiple arguments input:
\code{\link{m_ply}()},
\code{\link{mdply}()},
\code{\link{mlply}()}
Other array output:
\code{\link{aaply}()},
\code{\link{daply}()},
\code{\link{laply}()}
}
\concept{array output}
\concept{multiple arguments input}
\keyword{manip}
|
library(lubridate)
#Read the data into memory
elec <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
#convert string to date
elec$Date <- as.Date(elec$Date, "%d/%m/%Y")
#subset only the days between 2007-02-01 and 2007-02-02
subelec <- (elec[(elec$Date >= as.Date("2007-02-01")) & (elec$Date <= as.Date("2007-02-02")),])
#set Time column
subelec$Time <- ymd_hms(paste(subelec$Date, subelec$Time))
#Draw the required plot
par(mfrow = c(2,2))
#First Plot
with(subelec, plot(Time, Global_active_power, type = "l",
xlab = "", ylab = "Global Active Power"))
#Second Plot
with(subelec, plot(Time, Voltage, type = "l", xlab = "datetime", ylab="Voltage"))
#third Plot
with(subelec, plot(Time, Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering"))
with(subelec, lines(Time, Sub_metering_1, col="black"))
with(subelec, lines(Time, Sub_metering_2, col="red"))
with(subelec, lines(Time, Sub_metering_3, col="blue"))
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black", "red", "blue"), lty = 1, lwd = 1, pt.cex=1, cex=0.5)
#fourth plot
with(subelec, plot(Time, Global_reactive_power, type = "l",
xlab = "datetime", ylab="Global_recative_power"))
#Write to png file
dev.copy(png, filename = "plot4.png", width = 480, height = 480, units = "px")
dev.off()
|
/Plot4.R
|
no_license
|
Jayeshs81/ExData_Plotting1
|
R
| false | false | 1,426 |
r
|
library(lubridate)
#Read the data into memory
elec <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
#convert string to date
elec$Date <- as.Date(elec$Date, "%d/%m/%Y")
#subset only the days between 2007-02-01 and 2007-02-02
subelec <- (elec[(elec$Date >= as.Date("2007-02-01")) & (elec$Date <= as.Date("2007-02-02")),])
#set Time column
subelec$Time <- ymd_hms(paste(subelec$Date, subelec$Time))
#Draw the required plot
par(mfrow = c(2,2))
#First Plot
with(subelec, plot(Time, Global_active_power, type = "l",
xlab = "", ylab = "Global Active Power"))
#Second Plot
with(subelec, plot(Time, Voltage, type = "l", xlab = "datetime", ylab="Voltage"))
#third Plot
with(subelec, plot(Time, Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering"))
with(subelec, lines(Time, Sub_metering_1, col="black"))
with(subelec, lines(Time, Sub_metering_2, col="red"))
with(subelec, lines(Time, Sub_metering_3, col="blue"))
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black", "red", "blue"), lty = 1, lwd = 1, pt.cex=1, cex=0.5)
#fourth plot
with(subelec, plot(Time, Global_reactive_power, type = "l",
xlab = "datetime", ylab="Global_recative_power"))
#Write to png file
dev.copy(png, filename = "plot4.png", width = 480, height = 480, units = "px")
dev.off()
|
## File Name: lsem_bootstrap_postproc_output.R
## File Version: 0.02
lsem_bootstrap_postproc_output <- function(parameters, parameters_boot,
fitstats_joint, fitstats_joint_boot, est_joint=FALSE)
{
#* parameters
res <- lsem_bootstrap_inference(parameters_boot=parameters_boot, est=parameters$est)
parameters$est_bc <- res$est_bc
parameters$se <- res$se_boot
parameters$z <- parameters$est / parameters$se
parameters$pvalue <- 2*stats::pnorm(abs(-parameters$z))
quant <- stats::qnorm(.975)
parameters$ci.lower <- parameters$est - quant * parameters$se
parameters$ci.upper <- parameters$est + quant * parameters$se
#* fitstats_joint
if (est_joint){
res <- lsem_bootstrap_inference(parameters_boot=fitstats_joint_boot,
est=fitstats_joint$value)
fitstats_joint$value_bc <- res$est_bc
fitstats_joint$se <- res$se_boot
}
#-- output
res <- list(parameters=parameters, fitstats_joint=fitstats_joint)
return(res)
}
|
/sirt/R/lsem_bootstrap_postproc_output.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | false | 1,044 |
r
|
## File Name: lsem_bootstrap_postproc_output.R
## File Version: 0.02
lsem_bootstrap_postproc_output <- function(parameters, parameters_boot,
fitstats_joint, fitstats_joint_boot, est_joint=FALSE)
{
#* parameters
res <- lsem_bootstrap_inference(parameters_boot=parameters_boot, est=parameters$est)
parameters$est_bc <- res$est_bc
parameters$se <- res$se_boot
parameters$z <- parameters$est / parameters$se
parameters$pvalue <- 2*stats::pnorm(abs(-parameters$z))
quant <- stats::qnorm(.975)
parameters$ci.lower <- parameters$est - quant * parameters$se
parameters$ci.upper <- parameters$est + quant * parameters$se
#* fitstats_joint
if (est_joint){
res <- lsem_bootstrap_inference(parameters_boot=fitstats_joint_boot,
est=fitstats_joint$value)
fitstats_joint$value_bc <- res$est_bc
fitstats_joint$se <- res$se_boot
}
#-- output
res <- list(parameters=parameters, fitstats_joint=fitstats_joint)
return(res)
}
|
source("downloadZip.R")
# Load the data frames.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset Emission data by Baltimore's emission.
baltimoreEmissions <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
aggTotBalt <- aggregate(Emissions ~ year, baltimoreEmissions,sum)
png("plot2.png",width=480,height=480,units="px",bg="transparent")
barplot(
aggTotBalt$Emissions,
names.arg=aggTotBalt$year,
xlab="Year",
ylab="Emissions",
main="Total Emissions From Baltimore City"
)
dev.off()
|
/plot2.R
|
no_license
|
rhervey/Course_Project_2
|
R
| false | false | 609 |
r
|
source("downloadZip.R")
# Load the data frames.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset Emission data by Baltimore's emission.
baltimoreEmissions <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
aggTotBalt <- aggregate(Emissions ~ year, baltimoreEmissions,sum)
png("plot2.png",width=480,height=480,units="px",bg="transparent")
barplot(
aggTotBalt$Emissions,
names.arg=aggTotBalt$year,
xlab="Year",
ylab="Emissions",
main="Total Emissions From Baltimore City"
)
dev.off()
|
testlist <- list(x = c(3.60034843160563e+228, 7.24452062230663e+165, 3.44802964582437e+159, 1.39065335312201e-309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(diceR:::indicator_matrix,testlist)
str(result)
|
/diceR/inst/testfiles/indicator_matrix/libFuzzer_indicator_matrix/indicator_matrix_valgrind_files/1609959402-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 431 |
r
|
testlist <- list(x = c(3.60034843160563e+228, 7.24452062230663e+165, 3.44802964582437e+159, 1.39065335312201e-309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(diceR:::indicator_matrix,testlist)
str(result)
|
# peak-scalability.R --- Test function peak.scalability
library(usl)
data(specsdm91)
u <- usl(throughput ~ load, specsdm91)
# Calculate where peak scalability is reached
stopifnot(all.equal(peak.scalability(u), 96.51956, 0.0001))
# Calculate scalability for different coefficients
stopifnot(all.equal(peak.scalability(u, 0.001, 0.00001), 316.0696, 0.0001))
|
/tests/peak-scalability.R
|
no_license
|
smoeding/usl
|
R
| false | false | 362 |
r
|
# peak-scalability.R --- Test function peak.scalability
library(usl)
data(specsdm91)
u <- usl(throughput ~ load, specsdm91)
# Calculate where peak scalability is reached
stopifnot(all.equal(peak.scalability(u), 96.51956, 0.0001))
# Calculate scalability for different coefficients
stopifnot(all.equal(peak.scalability(u, 0.001, 0.00001), 316.0696, 0.0001))
|
\name{NISTrevolutionPerMinTOradianPerSec}
\alias{NISTrevolutionPerMinTOradianPerSec}
\title{Convert revolution per minute to radian per second }
\usage{NISTrevolutionPerMinTOradianPerSec(revolutionPerMin)}
\description{\code{NISTrevolutionPerMinTOradianPerSec} converts from revolution per minute (rpm) (r/min) to radian per second (rad/s) }
\arguments{
\item{revolutionPerMin}{revolution per minute (rpm) (r/min) }
}
\value{radian per second (rad/s) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTrevolutionPerMinTOradianPerSec(10)
}
\keyword{programming}
|
/man/NISTrevolutionPerMinTOradianPerSec.Rd
|
no_license
|
cran/NISTunits
|
R
| false | false | 936 |
rd
|
\name{NISTrevolutionPerMinTOradianPerSec}
\alias{NISTrevolutionPerMinTOradianPerSec}
\title{Convert revolution per minute to radian per second }
\usage{NISTrevolutionPerMinTOradianPerSec(revolutionPerMin)}
\description{\code{NISTrevolutionPerMinTOradianPerSec} converts from revolution per minute (rpm) (r/min) to radian per second (rad/s) }
\arguments{
\item{revolutionPerMin}{revolution per minute (rpm) (r/min) }
}
\value{radian per second (rad/s) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTrevolutionPerMinTOradianPerSec(10)
}
\keyword{programming}
|
.libPaths("F:/Mes_Docs/WorkingDIR/Library")
# Load packages
library(rgdal)
library(raster)
library(plyr)
library(dplyr)
library(RStoolbox)
library(RColorBrewer)
library(ggplot2)
library(sp)
library(caret)
library(doParallel)
library(openxlsx)
library(pROC)
library(tidyverse)
library(CAST)
##################
####################################################################################
#Modeling without transformation of categorical variables and with variables selection
######################################################################################
setwd("F:/Mes_Docs/Souss/Floods/Data")
# Load training and testing data
trainDat = read.xlsx("Train_Data.xlsx")
testDat = read.xlsx("Test_Data.xlsx")
# Convert categorical variables to factor
trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# Variables selection for rf
cl <- makePSOCKcluster(15)
registerDoParallel(cl)
clusterEvalQ(cl, .libPaths("F:/Mes_Docs/WorkingDIR/Library"))
train_control <- trainControl(method = 'cv', number = 10, returnResamp = 'all',
classProbs = TRUE)
ffs_rf <- ffs(trainDat,trainDat$Floods,
method="rf",
trControl = train_control,
tuneLength=3)
save(ffs_rf, file = "F:/Mes_Docs/Souss/Floods/Data/ffs_rf.Rdata")
stopCluster(cl)
# Selected variables: Distance_to_rivers, Soil_type
trainDat = trainDat[,c("Floods","Distance_to_rivers", "Soil_type")]
testDat = testDat[,c("Floods","Distance_to_rivers", "Soil_type")]
# Train rf model
#Random search#####
control <- trainControl(method='repeatedcv',
number=10,
repeats=3,
search = 'random')
set.seed(1)
rf_random <- train(Floods~.,
data=trainDat,
method = 'rf',
metric = 'Accuracy',
trControl = control,
importance = TRUE)
save(rf_random, file = "fit_rf_select.RData")
# Final model
All_incidents <- merge(trainDat, testDat, all=TRUE)
set.seed(849)
fit.rfAll<- train(Floods~.,
data=All_incidents,
method = "rf",
metric = "Accuracy",
trControl = control,
importance = TRUE)
save(fit.rfAll, file = "fit_rfAll_select.Rdata")
# Load rasters dataframe
df_scaled = get(load(file = "df_scaled_OV.RData"))
# Convert categorical variables to factor
df_scaled[,c("Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(df_scaled[,c("Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# PRODUCE PROBABILITY MAP
p <-as.data.frame(predict(fit.rfAll, df_scaled[,-c(1,2)], type = "prob"))
df$Levels_yes<-p$yes
df$Levels_no<-p$no
x<-SpatialPointsDataFrame(as.data.frame(df)[, c("x", "y")], data = df)
r_yes <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_yes")])
r_no <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_no")])
# Load DEM
dem = raster("F:/Mes_Docs/Souss/Floods/Data/Rasters/Dem.tif")
# Assign the DEM coordinates to the prediction raster
proj4string(r_yes)=CRS(projection(dem))
proj4string(r_no)=CRS(projection(dem))
# Save rasters
writeRaster(r_yes,filename="Prediction_floods_rf_select.tif", format="GTiff", overwrite=TRUE)
writeRaster(r_no,filename="Prediction_non_floods_rf_select.tif", format="GTiff", overwrite=TRUE)
# Plot Maps
# Palette function
palfunc <- function (n, alpha = 1, begin = 0, end = 1, direction = 1)
{
colors <- rev(brewer.pal(11, "RdYlGn"))
if (direction < 0) colors <- rev(colors)
colorRampPalette(colors, alpha = alpha)(n)
}
palfunc2 <- function (n, alpha = 1, begin = 0, end = 1, direction = 1)
{
colors <- brewer.pal(11, "RdYlGn")
if (direction < 0) colors <- rev(colors)
colorRampPalette(colors, alpha = alpha)(n)
}
# Save plots
jpeg("Floods_SM_RF_select.jpg", width = 800, height = 500)
spplot(r_ave_yes, main="Floods Susceptibility Mapping using RF",col.regions=palfunc)
dev.off()
jpeg("Non_Floods_SM_RF_select.jpg", width = 800, height = 500)
spplot(r_ave_no, main="Non Floods RF",col.regions=palfunc2)
dev.off()
#Run XGBoost function ------------------------------------------------
# Load training and testing data
trainDat = read.xlsx("Train_Data.xlsx")
testDat = read.xlsx("Test_Data.xlsx")
# Convert categorical variables to factor
trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# Variables selection for XGBoost
cl <- makePSOCKcluster(15)
registerDoParallel(cl)
clusterEvalQ(cl, .libPaths("F:/Mes_Docs/WorkingDIR/Library"))
train_control <- trainControl(method = 'cv', number = 10, returnResamp = 'all',
classProbs = TRUE)
ffs_xgb <- ffs(predictors = trainDat[,2:14],
response = trainDat$Floods,
method = "xgbTree",
trControl = train_control)
save(ffs_xgb, file = "F:/Mes_Docs/Souss/Floods/Data/ffs_xgb.Rdata")
stopCluster(cl)
# Selected variables: Dem,Rainfall,Distance_to_rivers
trainDat = trainDat[,c("Floods","Dem","Rainfall","Distance_to_rivers")]
testDat = testDat[,c("Floods","Dem","Rainfall","Distance_to_rivers")]
#Run XGBoost function ------------------------------------------------
control <- trainControl(method='repeatedcv',
number=10,
repeats=3,
search = 'random')
set.seed(5)
fit.xgb_train <- train(Floods~.,
data=trainDat,
method = "xgbTree",
metric= "Accuracy",
preProc = c("center", "scale"),
trControl = control)
save(fit.xgb_train, file = "fit.xgb_select.RData")
# Final model
All_incidents <- merge(trainDat, testDat, all=TRUE)
set.seed(849)
fit.rfAll<- train(Floods~.,
data=All_incidents,
method = "rf",
metric = "Accuracy",
trControl = control,
importance = TRUE)
save(fit.xgbAll, file = "fit_xgbAll_select.Rdata")
stopCluster(cl)
# Prediction
# PRODUCE PROBABILITY MAP
p <-as.data.frame(predict(fit.rfAll, df_scaled[,-c(1,2)], type = "prob"))
df$Levels_yes<-p$yes
df$Levels_no<-p$no
x<-SpatialPointsDataFrame(as.data.frame(df)[, c("x", "y")], data = df)
r_yes <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_yes")])
r_no <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_no")])
proj4string(r_yes)=CRS(projection(dem))
proj4string(r_no)=CRS(projection(dem))
# Save prediction
writeRaster(r_yes,filename="Prediction_floods_xgb_select.tif", format="GTiff", overwrite=TRUE)
writeRaster(r_no,filename="Prediction_non_floods_xgb_select.tif", format="GTiff", overwrite=TRUE)
# Plot and save Maps
jpeg("Floods_SM_XGB_select.jpg", width = 800, height = 500)
spplot(r_ave_yes, main="Floods Susceptibility Mapping using XGB",col.regions=palfunc)
dev.off()
jpeg("Non_Floods_SM_XGB_select.jpg", width = 800, height = 500)
spplot(r_ave_no, main="Non Floods XGB",col.regions=palfunc2)
dev.off()
############################
#Run KNN function ------------------------------------------------
# Load training and testing data
trainDat = read.xlsx("Train_Data.xlsx")
testDat = read.xlsx("Test_Data.xlsx")
# Convert categorical variables to factor
trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# Variables selection for KNN
cl <- makePSOCKcluster(15)
registerDoParallel(cl)
clusterEvalQ(cl, .libPaths("F:/Mes_Docs/WorkingDIR/Library"))
train_control <- trainControl(method = 'cv', number = 10, returnResamp = 'all',
classProbs = TRUE)
ffs_knn <- ffs(predictors = trainDat[,2:14],
response = trainDat$Floods,
method = "knn",
trControl = train_control)
save(ffs_knn, file = "F:/Mes_Docs/Souss/Floods/Data/ffs_knn.Rdata")
stopCluster(cl)
# Selected variables: "Dem", "Distance_to_rivers","Drainage_density"
trainDat = trainDat[,c("Floods","Dem", "Distance_to_rivers","Drainage_density")]
testDat = testDat[,c("Floods","Dem", "Distance_to_rivers","Drainage_density")]
control <- trainControl(method='repeatedcv',
number=10,
repeats=3)
set.seed(1)
knn_default = train(Floods~.,
data=trainDat,
method = "knn",
trControl = control)
save(knn_default, file = "fit_knn_select.Rdata")
All_incidents <- merge(trainDat, testDat, all=TRUE)
#Train KNN model USING aLL dependent data
set.seed(849)
fit.KNNAll<- train(Floods~.,
data=All_incidents,
method = "knn",
trControl = control)
save(fit.KNNAll, file = "fit_knnAll_select.Rdata")
# PRODUCE PROBABILITY MAP
p <-as.data.frame(predict(fit.rfAll, df_scaled[,-c(1,2)], type = "prob"))
df$Levels_yes<-p$yes
df$Levels_no<-p$no
x<-SpatialPointsDataFrame(as.data.frame(df)[, c("x", "y")], data = df)
r_yes <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_yes")])
r_no <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_no")])
proj4string(r_yes)=CRS(projection(dem))
proj4string(r_no)=CRS(projection(dem))
# Save rasters
writeRaster(r_yes,filename="Prediction_floods_knn_select.tif", format="GTiff", overwrite=TRUE)
writeRaster(r_no,filename="Prediction_non_floods_knn_select.tif", format="GTiff", overwrite=TRUE)
# Plot Maps
jpeg("Floods_SM_KNN_select.jpg", width = 800, height = 500)
spplot(r_ave_yes, main="Floods Susceptibility Mapping using KNN",col.regions=palfunc)
dev.off()
jpeg("Non_Floods_SM_KNN_select.jpg", width = 800, height = 500)
spplot(r_ave_no, main="Non Floods KNN",col.regions=palfunc2)
dev.off()
#######NNET
trainDat = read.xlsx("Train_Data.xlsx")
testDat = read.xlsx("Test_Data.xlsx")
trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# Variables selection for NNET
cl <- makePSOCKcluster(15)
registerDoParallel(cl)
clusterEvalQ(cl, .libPaths("F:/Mes_Docs/WorkingDIR/Library"))
train_control <- trainControl(method = 'cv', number = 10, returnResamp = 'all',
classProbs = TRUE)
ffs_nnet <- ffs(predictors = trainDat[,2:14],
response = trainDat$Floods,
method = "nnet",
trControl = train_control)
save(ffs_nnet, file = "F:/Mes_Docs/Souss/Floods/Data/ffs_nnet.Rdata")
stopCluster(cl)
# Selected variables: Dem,Distance_to_rivers
trainDat = trainDat[,c("Floods","Dem", "Distance_to_rivers")]
testDat = testDat[,c("Floods","Dem", "Distance_to_rivers")]
#Run nnet function ------------------------------------------------
control <- trainControl(method='repeatedcv',
number=10,
repeats=3)
set.seed(1)
nnet_default = train(Floods~.,
data=trainDat,
method = "nnet",
trControl = control)
save(nnet_default,file = "fit_nnet_select.RData")
All_incidents <- merge(trainDat, testDat, all=TRUE)
#Train nnet model USING aLL dependent data
set.seed(849)
fit.nnetAll<- train(Floods~.,
data=All_incidents,
method = "nnet",
trControl = control)
save(fit.nnetAll,file = "fit_nnetAll_select.RData")
# PRODUCE PROBABILITY MAP
p <-as.data.frame(predict(fit.rfAll, df_scaled[,-c(1,2)], type = "prob"))
df$Levels_yes<-p$yes
df$Levels_no<-p$no
x<-SpatialPointsDataFrame(as.data.frame(df)[, c("x", "y")], data = df)
r_yes <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_yes")])
r_no <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_no")])
proj4string(r_yes)=CRS(projection(dem))
proj4string(r_no)=CRS(projection(dem))
# Save rasters
writeRaster(r_yes,filename="Prediction_floods_nnet_select.tif", format="GTiff", overwrite=TRUE)
writeRaster(r_no,filename="Prediction_non_floods_nnet_select.tif", format="GTiff", overwrite=TRUE)
# Plot Maps
jpeg("Floods_SM_nnet_select.jpg", width = 800, height = 500)
spplot(r_ave_yes, main="Floods Susceptibility Mapping using nnet",col.regions=palfunc)
dev.off()
jpeg("Non_Floods_SM_nnet_select.jpg", width = 800, height = 500)
spplot(r_ave_no, main="Non Floods nnet",col.regions=palfunc2)
dev.off()
|
/Codes/3_modeling_with_selection_without_transformation.R
|
no_license
|
melmos44/Floods_Risks_mapping_ML
|
R
| false | false | 14,065 |
r
|
.libPaths("F:/Mes_Docs/WorkingDIR/Library")
# Load packages
library(rgdal)
library(raster)
library(plyr)
library(dplyr)
library(RStoolbox)
library(RColorBrewer)
library(ggplot2)
library(sp)
library(caret)
library(doParallel)
library(openxlsx)
library(pROC)
library(tidyverse)
library(CAST)
##################
####################################################################################
#Modeling without transformation of categorical variables and with variables selection
######################################################################################
setwd("F:/Mes_Docs/Souss/Floods/Data")
# Load training and testing data
trainDat = read.xlsx("Train_Data.xlsx")
testDat = read.xlsx("Test_Data.xlsx")
# Convert categorical variables to factor
trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# Variables selection for rf
cl <- makePSOCKcluster(15)
registerDoParallel(cl)
clusterEvalQ(cl, .libPaths("F:/Mes_Docs/WorkingDIR/Library"))
train_control <- trainControl(method = 'cv', number = 10, returnResamp = 'all',
classProbs = TRUE)
ffs_rf <- ffs(trainDat,trainDat$Floods,
method="rf",
trControl = train_control,
tuneLength=3)
save(ffs_rf, file = "F:/Mes_Docs/Souss/Floods/Data/ffs_rf.Rdata")
stopCluster(cl)
# Selected variables: Distance_to_rivers, Soil_type
trainDat = trainDat[,c("Floods","Distance_to_rivers", "Soil_type")]
testDat = testDat[,c("Floods","Distance_to_rivers", "Soil_type")]
# Train rf model
#Random search#####
control <- trainControl(method='repeatedcv',
number=10,
repeats=3,
search = 'random')
set.seed(1)
rf_random <- train(Floods~.,
data=trainDat,
method = 'rf',
metric = 'Accuracy',
trControl = control,
importance = TRUE)
save(rf_random, file = "fit_rf_select.RData")
# Final model
All_incidents <- merge(trainDat, testDat, all=TRUE)
set.seed(849)
fit.rfAll<- train(Floods~.,
data=All_incidents,
method = "rf",
metric = "Accuracy",
trControl = control,
importance = TRUE)
save(fit.rfAll, file = "fit_rfAll_select.Rdata")
# Load rasters dataframe
df_scaled = get(load(file = "df_scaled_OV.RData"))
# Convert categorical variables to factor
df_scaled[,c("Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(df_scaled[,c("Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# PRODUCE PROBABILITY MAP
p <-as.data.frame(predict(fit.rfAll, df_scaled[,-c(1,2)], type = "prob"))
df$Levels_yes<-p$yes
df$Levels_no<-p$no
x<-SpatialPointsDataFrame(as.data.frame(df)[, c("x", "y")], data = df)
r_yes <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_yes")])
r_no <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_no")])
# Load DEM
dem = raster("F:/Mes_Docs/Souss/Floods/Data/Rasters/Dem.tif")
# Assign the DEM coordinates to the prediction raster
proj4string(r_yes)=CRS(projection(dem))
proj4string(r_no)=CRS(projection(dem))
# Save rasters
writeRaster(r_yes,filename="Prediction_floods_rf_select.tif", format="GTiff", overwrite=TRUE)
writeRaster(r_no,filename="Prediction_non_floods_rf_select.tif", format="GTiff", overwrite=TRUE)
# Plot Maps
# Palette function
palfunc <- function (n, alpha = 1, begin = 0, end = 1, direction = 1)
{
colors <- rev(brewer.pal(11, "RdYlGn"))
if (direction < 0) colors <- rev(colors)
colorRampPalette(colors, alpha = alpha)(n)
}
palfunc2 <- function (n, alpha = 1, begin = 0, end = 1, direction = 1)
{
colors <- brewer.pal(11, "RdYlGn")
if (direction < 0) colors <- rev(colors)
colorRampPalette(colors, alpha = alpha)(n)
}
# Save plots
jpeg("Floods_SM_RF_select.jpg", width = 800, height = 500)
spplot(r_ave_yes, main="Floods Susceptibility Mapping using RF",col.regions=palfunc)
dev.off()
jpeg("Non_Floods_SM_RF_select.jpg", width = 800, height = 500)
spplot(r_ave_no, main="Non Floods RF",col.regions=palfunc2)
dev.off()
#Run XGBoost function ------------------------------------------------
# Load training and testing data
trainDat = read.xlsx("Train_Data.xlsx")
testDat = read.xlsx("Test_Data.xlsx")
# Convert categorical variables to factor
trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# Variables selection for XGBoost
cl <- makePSOCKcluster(15)
registerDoParallel(cl)
clusterEvalQ(cl, .libPaths("F:/Mes_Docs/WorkingDIR/Library"))
train_control <- trainControl(method = 'cv', number = 10, returnResamp = 'all',
classProbs = TRUE)
ffs_xgb <- ffs(predictors = trainDat[,2:14],
response = trainDat$Floods,
method = "xgbTree",
trControl = train_control)
save(ffs_xgb, file = "F:/Mes_Docs/Souss/Floods/Data/ffs_xgb.Rdata")
stopCluster(cl)
# Selected variables: Dem,Rainfall,Distance_to_rivers
trainDat = trainDat[,c("Floods","Dem","Rainfall","Distance_to_rivers")]
testDat = testDat[,c("Floods","Dem","Rainfall","Distance_to_rivers")]
#Run XGBoost function ------------------------------------------------
control <- trainControl(method='repeatedcv',
number=10,
repeats=3,
search = 'random')
set.seed(5)
fit.xgb_train <- train(Floods~.,
data=trainDat,
method = "xgbTree",
metric= "Accuracy",
preProc = c("center", "scale"),
trControl = control)
save(fit.xgb_train, file = "fit.xgb_select.RData")
# Final model
All_incidents <- merge(trainDat, testDat, all=TRUE)
set.seed(849)
fit.rfAll<- train(Floods~.,
data=All_incidents,
method = "rf",
metric = "Accuracy",
trControl = control,
importance = TRUE)
save(fit.xgbAll, file = "fit_xgbAll_select.Rdata")
stopCluster(cl)
# Prediction
# PRODUCE PROBABILITY MAP
p <-as.data.frame(predict(fit.rfAll, df_scaled[,-c(1,2)], type = "prob"))
df$Levels_yes<-p$yes
df$Levels_no<-p$no
x<-SpatialPointsDataFrame(as.data.frame(df)[, c("x", "y")], data = df)
r_yes <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_yes")])
r_no <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_no")])
proj4string(r_yes)=CRS(projection(dem))
proj4string(r_no)=CRS(projection(dem))
# Save prediction
writeRaster(r_yes,filename="Prediction_floods_xgb_select.tif", format="GTiff", overwrite=TRUE)
writeRaster(r_no,filename="Prediction_non_floods_xgb_select.tif", format="GTiff", overwrite=TRUE)
# Plot and save Maps
jpeg("Floods_SM_XGB_select.jpg", width = 800, height = 500)
spplot(r_ave_yes, main="Floods Susceptibility Mapping using XGB",col.regions=palfunc)
dev.off()
jpeg("Non_Floods_SM_XGB_select.jpg", width = 800, height = 500)
spplot(r_ave_no, main="Non Floods XGB",col.regions=palfunc2)
dev.off()
############################
#Run KNN function ------------------------------------------------
# Load training and testing data
trainDat = read.xlsx("Train_Data.xlsx")
testDat = read.xlsx("Test_Data.xlsx")
# Convert categorical variables to factor
trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# Variables selection for KNN
cl <- makePSOCKcluster(15)
registerDoParallel(cl)
clusterEvalQ(cl, .libPaths("F:/Mes_Docs/WorkingDIR/Library"))
train_control <- trainControl(method = 'cv', number = 10, returnResamp = 'all',
classProbs = TRUE)
ffs_knn <- ffs(predictors = trainDat[,2:14],
response = trainDat$Floods,
method = "knn",
trControl = train_control)
save(ffs_knn, file = "F:/Mes_Docs/Souss/Floods/Data/ffs_knn.Rdata")
stopCluster(cl)
# Selected variables: "Dem", "Distance_to_rivers","Drainage_density"
trainDat = trainDat[,c("Floods","Dem", "Distance_to_rivers","Drainage_density")]
testDat = testDat[,c("Floods","Dem", "Distance_to_rivers","Drainage_density")]
control <- trainControl(method='repeatedcv',
number=10,
repeats=3)
set.seed(1)
knn_default = train(Floods~.,
data=trainDat,
method = "knn",
trControl = control)
save(knn_default, file = "fit_knn_select.Rdata")
All_incidents <- merge(trainDat, testDat, all=TRUE)
#Train KNN model USING aLL dependent data
set.seed(849)
fit.KNNAll<- train(Floods~.,
data=All_incidents,
method = "knn",
trControl = control)
save(fit.KNNAll, file = "fit_knnAll_select.Rdata")
# PRODUCE PROBABILITY MAP
p <-as.data.frame(predict(fit.rfAll, df_scaled[,-c(1,2)], type = "prob"))
df$Levels_yes<-p$yes
df$Levels_no<-p$no
x<-SpatialPointsDataFrame(as.data.frame(df)[, c("x", "y")], data = df)
r_yes <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_yes")])
r_no <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_no")])
proj4string(r_yes)=CRS(projection(dem))
proj4string(r_no)=CRS(projection(dem))
# Save rasters
writeRaster(r_yes,filename="Prediction_floods_knn_select.tif", format="GTiff", overwrite=TRUE)
writeRaster(r_no,filename="Prediction_non_floods_knn_select.tif", format="GTiff", overwrite=TRUE)
# Plot Maps
jpeg("Floods_SM_KNN_select.jpg", width = 800, height = 500)
spplot(r_ave_yes, main="Floods Susceptibility Mapping using KNN",col.regions=palfunc)
dev.off()
jpeg("Non_Floods_SM_KNN_select.jpg", width = 800, height = 500)
spplot(r_ave_no, main="Non Floods KNN",col.regions=palfunc2)
dev.off()
#######NNET
trainDat = read.xlsx("Train_Data.xlsx")
testDat = read.xlsx("Test_Data.xlsx")
trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(trainDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")] =
lapply(testDat[,c("Floods","Aspect","Flow_Direction","Geology","Landuse","Soil_type")],as.factor)
# Variables selection for NNET
cl <- makePSOCKcluster(15)
registerDoParallel(cl)
clusterEvalQ(cl, .libPaths("F:/Mes_Docs/WorkingDIR/Library"))
train_control <- trainControl(method = 'cv', number = 10, returnResamp = 'all',
classProbs = TRUE)
ffs_nnet <- ffs(predictors = trainDat[,2:14],
response = trainDat$Floods,
method = "nnet",
trControl = train_control)
save(ffs_nnet, file = "F:/Mes_Docs/Souss/Floods/Data/ffs_nnet.Rdata")
stopCluster(cl)
# Selected variables: Dem,Distance_to_rivers
trainDat = trainDat[,c("Floods","Dem", "Distance_to_rivers")]
testDat = testDat[,c("Floods","Dem", "Distance_to_rivers")]
#Run nnet function ------------------------------------------------
control <- trainControl(method='repeatedcv',
number=10,
repeats=3)
set.seed(1)
nnet_default = train(Floods~.,
data=trainDat,
method = "nnet",
trControl = control)
save(nnet_default,file = "fit_nnet_select.RData")
All_incidents <- merge(trainDat, testDat, all=TRUE)
#Train nnet model USING aLL dependent data
set.seed(849)
fit.nnetAll<- train(Floods~.,
data=All_incidents,
method = "nnet",
trControl = control)
save(fit.nnetAll,file = "fit_nnetAll_select.RData")
# PRODUCE PROBABILITY MAP
p <-as.data.frame(predict(fit.rfAll, df_scaled[,-c(1,2)], type = "prob"))
df$Levels_yes<-p$yes
df$Levels_no<-p$no
x<-SpatialPointsDataFrame(as.data.frame(df)[, c("x", "y")], data = df)
r_yes <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_yes")])
r_no <- rasterFromXYZ(as.data.frame(x)[, c("x", "y", "Levels_no")])
proj4string(r_yes)=CRS(projection(dem))
proj4string(r_no)=CRS(projection(dem))
# Save rasters
writeRaster(r_yes,filename="Prediction_floods_nnet_select.tif", format="GTiff", overwrite=TRUE)
writeRaster(r_no,filename="Prediction_non_floods_nnet_select.tif", format="GTiff", overwrite=TRUE)
# Plot Maps
jpeg("Floods_SM_nnet_select.jpg", width = 800, height = 500)
spplot(r_ave_yes, main="Floods Susceptibility Mapping using nnet",col.regions=palfunc)
dev.off()
jpeg("Non_Floods_SM_nnet_select.jpg", width = 800, height = 500)
spplot(r_ave_no, main="Non Floods nnet",col.regions=palfunc2)
dev.off()
|
n <- 100
degrees <- 1:50
X <- runif(n, max=2 * pi / 4 * 3)
Y <- 0.1 + -3 * sin(X) + rnorm(n, sd=0.5)
new_Y <- 0.1 + -3 * sin(X) + rnorm(n, sd=0.5)
X_mat <- sapply(degrees, function(i) X^i)
MSEs <- rep(NA, length(degrees))
test_MSEs <- MSEs
for(i in seq_along(degrees)){
ols <- lm(Y ~ X_mat[, 1:i])
MSEs[i] <- mean(ols$residuals^2)
new_errors <- new_Y - ols$fitted.values
test_MSEs[i] <- mean(new_errors^2)
}
plot(degrees, MSEs, type="b",
ylim=c(0, max(test_MSEs)))
lines(degrees, test_MSEs, type="b", col="red")
legend("topright", legend=c("Test", "Train"),
fill=c("red", "black"))
plot(X, new_Y)
points(X, ols$fitted.values, col="red")
ols <- lm(Y ~ X_mat)
df <- data.frame(Y, X_mat)
ols <- lm(Y ~ ., df)
ols <- lm(Y ~ X_mat[, 1]:X_mat[, 2])
ols <- lm(Y ~ X_mat[, 1]*X_mat[, 2])
summary(ols)
|
/courses/data_mining/labs/test_train_error_trade_off.R
|
no_license
|
leewtai/leewtai.github.io
|
R
| false | false | 821 |
r
|
n <- 100
degrees <- 1:50
X <- runif(n, max=2 * pi / 4 * 3)
Y <- 0.1 + -3 * sin(X) + rnorm(n, sd=0.5)
new_Y <- 0.1 + -3 * sin(X) + rnorm(n, sd=0.5)
X_mat <- sapply(degrees, function(i) X^i)
MSEs <- rep(NA, length(degrees))
test_MSEs <- MSEs
for(i in seq_along(degrees)){
ols <- lm(Y ~ X_mat[, 1:i])
MSEs[i] <- mean(ols$residuals^2)
new_errors <- new_Y - ols$fitted.values
test_MSEs[i] <- mean(new_errors^2)
}
plot(degrees, MSEs, type="b",
ylim=c(0, max(test_MSEs)))
lines(degrees, test_MSEs, type="b", col="red")
legend("topright", legend=c("Test", "Train"),
fill=c("red", "black"))
plot(X, new_Y)
points(X, ols$fitted.values, col="red")
ols <- lm(Y ~ X_mat)
df <- data.frame(Y, X_mat)
ols <- lm(Y ~ ., df)
ols <- lm(Y ~ X_mat[, 1]:X_mat[, 2])
ols <- lm(Y ~ X_mat[, 1]*X_mat[, 2])
summary(ols)
|
library(ggpubr)
library(ggplot2)
library(dplyr) #add rows
library(lmridge) #for ridge regression
library(lars) # lasso regression
library(caret) # for cross validation
library(gdata) #to read xls file format
library(rsm) # for response surface
#Importing Dataset into energy
energy <-ENB2012_data
str(energy)
#Forward Selection
fwd_model_energy <- lm(X1 ~ 1, data = energy)
step(fwd_model_energy, direction = "forward", scope = formula(X1 ~ X2 + X3 + X4 + X5 + X6+X7+X8+Y1+Y2))
#Lm Formula
lm(formula = X1 ~ X2 + X5 + X4 + Y2 + X7 + Y1, data = energy)
#MeanImputations
energy$X1[is.na(energy$X1)] <- mean(energy$X1, na.rm = TRUE)
energy$X2[is.na(energy$X2)] <- mean(energy$X2, na.rm = TRUE)
energy$X3[is.na(energy$X3)] <- mean(energy$X3, na.rm = TRUE)
energy$X4[is.na(energy$X4)] <- mean(energy$X4, na.rm = TRUE)
energy$X5[is.na(energy$X5)] <- mean(energy$X5, na.rm = TRUE)
energy$X6[is.na(energy$X6)] <- mean(energy$X6, na.rm = TRUE)
energy$X7[is.na(energy$X7)] <- mean(energy$X7, na.rm = TRUE)
energy$X8[is.na(energy$X8)] <- mean(energy$X8, na.rm = TRUE)
energy$Y1[is.na(energy$Y1)] <- mean(energy$Y1, na.rm = TRUE)
energy$Y2[is.na(energy$Y2)] <- mean(energy$Y2, na.rm = TRUE)
#Variables Remaining X3,X6,X8
#Linear Regression
lin_energy1 <- lm(X1 ~ X2, data = energy)
lin_energy2 <- lm(X1 ~ X2+X5, data = energy)
lin_energy3 <- lm(X1 ~ X2+X5+X4, data = energy)
lin_energy4 <- lm(X1 ~ X2+X5+X4+Y2, data = energy)
lin_energy5 <- lm(X1 ~ X2+X5+X4+Y2+X7, data = energy)
lin_energy6 <- lm(X1 ~ X2+X5+X4+Y2+X7+Y1, data = energy)
lin_energy7 <- lm(X1 ~ X2+X5+X4+Y2+X7+Y1+X3, data = energy)
lin_energy8 <- lm(X1 ~ X2+X5+X4+Y2+X7+Y1+X3+X6, data = energy)
lin_energy9 <- lm(X1 ~ X2+X5+X4+Y2+X7+Y1+X3+X6+X8, data = energy)
#Cross-Validation
train_control <- trainControl(method = "cv", number = 10)
cv_energymodel1 <- train(X1 ~ X2, data = energy, trControl = train_control, method = "lm")
cv_energymodel2 <- train(X1 ~ X2+X5, data = energy, trControl = train_control, method = "lm")
cv_energymodel3 <- train(X1 ~ X2+X5+X4, data = energy, trControl = train_control, method = "lm")
cv_energymodel4 <- train(X1 ~ X2+X5+X4+Y2, data = energy, trControl = train_control, method = "lm")
cv_energymodel5 <- train(X1 ~ X2+X5+X4+Y2+X7, data = energy, trControl = train_control, method = "lm")
cv_energymodel6 <- train(X1 ~ X2+X5+X4+Y2+X7+Y1, data = energy, trControl = train_control, method = "lm")
cv_energymodel7 <- train(X1 ~ X2+X5+X4+Y2+X7+Y1+X3, data = energy, trControl = train_control, method = "lm")
cv_energymodel8 <- train(X1 ~ X2+X5+X4+Y2+X7+Y1+X3+X6, data = energy, trControl = train_control, method = "lm")
cv_energymodel9 <- train(X1 ~ X2+X5+X4+Y2+X7+Y1+X3+X6+X8, data = energy, trControl = train_control, method = "lm")
#Dataframes
frame_energy<-data.frame("r_sq" = double(0), "adj_r_sq" = double(0), "cv_r_sq" = double(0))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy1)$r.squared, adj_r_sq = summary(lin_energy1)$adj.r.squared, cv_r_sq = mean(cv_energymodel1$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy2)$r.squared, adj_r_sq = summary(lin_energy2)$adj.r.squared, cv_r_sq = mean(cv_energymodel2$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy3)$r.squared, adj_r_sq = summary(lin_energy3)$adj.r.squared, cv_r_sq = mean(cv_energymodel3$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy4)$r.squared, adj_r_sq = summary(lin_energy4)$adj.r.squared, cv_r_sq = mean(cv_energymodel4$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy5)$r.squared, adj_r_sq = summary(lin_energy5)$adj.r.squared, cv_r_sq = mean(cv_energymodel5$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy6)$r.squared, adj_r_sq = summary(lin_energy6)$adj.r.squared, cv_r_sq = mean(cv_energymodel6$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy7)$r.squared, adj_r_sq = summary(lin_energy7)$adj.r.squared, cv_r_sq = mean(cv_energymodel7$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy8)$r.squared, adj_r_sq = summary(lin_energy8)$adj.r.squared, cv_r_sq = mean(cv_energymodel8$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy9)$r.squared, adj_r_sq = summary(lin_energy9)$adj.r.squared, cv_r_sq = mean(cv_energymodel9$resample$Rsquared))
#Plotting The Values:-
plot(frame_energy$r_sq, type = 'l', col = 'red', main = "Linear Plot", ylab = "Errors", ylim = c(0.1,1))
lines(frame_energy$adj_r_sq, col = 'green' )
lines(frame_energy$cv_r_sq, col = 'blue')
legend(5,0.8, legend = c("R Squared","Adj R Squared","R Squared CV"),
col = c("red","green","blue"), lty = 1:2, cex = 0.8)
#Ridge Regression
ridge_ns1 <- lmridge(X1 ~ X2 + X5, energy, K = c(0.1, 0.001))
ridge_ns2 <- lmridge(X1 ~ X2 + X5 + X4, energy, K = c(0.1, 0.001))
ridge_ns3 <- lmridge(X1 ~ X2 + X5 + X4 +Y2, energy, K = c(0.1, 0.001))
ridge_ns4 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7, energy, K = c(0.1, 0.001))
ridge_ns5 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7+Y1, energy, K = c(0.1, 0.001))
ridge_ns6 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7+Y1+X3, energy, K = c(0.1, 0.001))
ridge_ns7 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7+Y1+X3+X6, energy, K = c(0.1, 0.001))
ridge_ns8 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7+Y1+X3+X6+X8, energy, K = c(0.1, 0.001))
#Cross-Validation
cv_ridge_ns1 <- train(X1 ~ X2 + X5, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns2 <- train(X1 ~ X2 + X5+X4, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns3 <- train(X1 ~ X2 + X5+X4+Y2, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns4 <- train(X1 ~ X2 + X5+X4+Y2+X7, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns5 <- train(X1 ~ X2 + X5+X4+Y2+X7+Y1, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns6 <- train(X1 ~ X2 + X5+X4+Y2+X7+Y1+X3, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns7 <- train(X1 ~ X2 + X5+X4+Y2+X7+Y1+X3+X6, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns8 <- train(X1 ~ X2 + X5+X4+Y2+X7+Y1+X3+X6+X8, data = energy, trControl = train_control, method = "ridge")
#DataFrames
frame_ns1 <-data.frame("r_sq" = double(0), "adj_r_sq" = double(0), "cv_r_sq" = double(0))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns1)$R2), adj_r_sq = max(rstats1(ridge_ns1)$adjR2), cv_r_sq = mean(cv_ridge_ns1$resample$Rsquared))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns2)$R2), adj_r_sq = max(rstats1(ridge_ns2)$adjR2), cv_r_sq = mean(cv_ridge_ns2$resample$Rsquared))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns3)$R2), adj_r_sq = max(rstats1(ridge_ns3)$adjR2), cv_r_sq = mean(cv_ridge_ns3$resample$Rsquared))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns4)$R2), adj_r_sq = max(rstats1(ridge_ns4)$adjR2), cv_r_sq = mean(cv_ridge_ns4$resample$Rsquared))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns5)$R2), adj_r_sq = max(rstats1(ridge_ns5)$adjR2), cv_r_sq = mean(cv_ridge_ns5$resample$Rsquared))
#Plotting the values
plot(frame_ns1$adj_r_sq,type="l",col="red",main = "RidgePlot",ylab="Variation",ylim = c(0,1))
lines(frame_ns1$r_sq,col="green")
lines(frame_ns1$cv_r_sq,col="Blue")
legend(2,1,legend = c("adj_r_square","r_square","cv_r_square"),col=c("red","green","blue"),lty = 1:2,cex = 0.8)
#Lasso Regression
#X1 ~ X2 + X5+X4+Y2+X7+Y1+X3+X6+X8
x <-(energy$X2)
x <-cbind(energy$X5,x)
lasso_mod1 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
lasso_mod2 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
lasso_mod3 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
lasso_mod4 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
x <-cbind(energy$Y1,x)
lasso_mod5 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
x <-cbind(energy$Y1,x)
x <-cbind(energy$X3,x)
lasso_mod6 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
x <-cbind(energy$Y1,x)
x <-cbind(energy$X3,x)
x <-cbind(energy$X8,x)
lasso_mod7 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
x <-cbind(energy$Y1,x)
x <-cbind(energy$X3,x)
x <-cbind(energy$X8,x)
x <-cbind(energy$X6,x)
lasso_mod8 <- lars(x, energy$X1, type = 'lasso')
#Cross-Validation
##X1 ~ X2 + X5+X4+Y2+X7+Y1+X3+X6+X8
cv_lasso_ns1<-train(X1~X2+X5,energy,method="lasso",trControl=train_control)
cv_lasso_ns2<-train(X1~X2+X5+X4,energy,method="lasso",trControl=train_control)
cv_lasso_ns3<-train(X1~X5+X4+Y2,energy,method="lasso",trControl=train_control)
cv_lasso_ns4<-train(X1~X5+X4+Y2+X7,energy,method="lasso",trControl=train_control)
cv_lasso_ns5<-train(X1~X5+X4+Y2+X7+Y1,energy,method="lasso",trControl=train_control)
cv_lasso_ns6<-train(X1~X5+X4+Y2+X7+Y1+X3,energy,method="lasso",trControl=train_control)
cv_lasso_ns7<-train(X1~X5+X4+Y2+X7+Y1+X3+X6,energy,method="lasso",trControl=train_control)
cv_lasso_ns8<-train(X1~X5+X4+Y2+X7+Y1+X3+X6+X8,energy,method="lasso",trControl=train_control)
#DataFrames:-
frame_ns2 <- data.frame("r_sq" = double(0), "cv_r_sq" = double(0))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod1)$R2), cv_r_sq = mean(cv_lasso_ns1$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod2)$R2), cv_r_sq = mean(cv_lasso_ns2$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod3)$R2), cv_r_sq = mean(cv_lasso_ns3$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod4)$R2), cv_r_sq = mean(cv_lasso_ns4$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod5)$R2), cv_r_sq = mean(cv_lasso_ns5$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod6)$R2), cv_r_sq = mean(cv_lasso_ns6$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod7)$R2), cv_r_sq = mean(cv_lasso_ns7$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod8)$R2), cv_r_sq = mean(cv_lasso_ns8$resample$Rsquared))
#Plotting The Values
plot(frame_ns2$r_sq,type="l",col="red",main = "LassoPLOT",ylab="Variation",ylim = c(0,1))
lines(frame_ns2$cv_r_sq,col="Blue")
legend(2,1,legend = c("r_square","cv_r_square"),col=c("red","blue"),lty = 1:2,cex = 0.8)
#QuadPlot
q_energy<-energy
#Squaring The Values
#X1~X5+X4+Y2+X7+Y1+X3+X6+X8
q_energy$X1_sq<-q_energy$X1^2
q_energy$X2_sq<-q_energy$X2^2
q_energy$X3_sq<-q_energy$X3^2
q_energy$X4_sq<-q_energy$X4^2
q_energy$X5_sq<-q_energy$X5^2
q_energy$X6_sq<-q_energy$X6^2
q_energy$X7_sq<-q_energy$X7^2
q_energy$X8_sq<-q_energy$X8^2
q_energy$Y1_sq<-q_energy$Y1^2
q_energy$Y2_sq<-q_energy$Y2^2
#Linear Regression
#X1~X5+X4+Y2+X7+Y1+X3+X6+X8
Quad1 <- lm(X1~X5+X5_sq,data=q_energy)
Quad2 <- lm(X1~X5+X5_sq+X4+X4_sq,data=q_energy)
Quad3 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq,data=q_energy)
Quad4 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq,data=q_energy)
Quad5 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq,data=q_energy)
Quad6 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq,data=q_energy)
Quad7 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq+X6+X6_sq,data=q_energy)
Quad8 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq+X6+X6_sq+X8+X8_sq,data=q_energy)
#Cross-Validation
cv_quad_ns1 <- train(X1~X5+X5_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns2 <- train(X1~X5+X5_sq+X4+X4_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns3 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns4 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns5 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns6 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns7 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq+X6+X6_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns8 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq+X6+X6_sq+X8+X8_sq, data = q_energy, trControl = train_control, method = "lm")
#DataFrames:-
frame_ns3 <- data.frame("adj_r_sq" = double(0),"r_sq" = double(0), "cv_r_sq" = double(0))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad1)$r.squared, adj_r_sq = summary(Quad1)$adj.r.squared, cv_r_sq = mean(cv_quad_ns1$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad2)$r.squared, adj_r_sq = summary(Quad2)$adj.r.squared, cv_r_sq = mean(cv_quad_ns2$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad3)$r.squared, adj_r_sq = summary(Quad3)$adj.r.squared, cv_r_sq = mean(cv_quad_ns3$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad4)$r.squared, adj_r_sq = summary(Quad4)$adj.r.squared, cv_r_sq = mean(cv_quad_ns4$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad5)$r.squared, adj_r_sq = summary(Quad5)$adj.r.squared, cv_r_sq = mean(cv_quad_ns5$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad6)$r.squared, adj_r_sq = summary(Quad6)$adj.r.squared, cv_r_sq = mean(cv_quad_ns6$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad7)$r.squared, adj_r_sq = summary(Quad7)$adj.r.squared, cv_r_sq = mean(cv_quad_ns7$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad8)$r.squared, adj_r_sq = summary(Quad8)$adj.r.squared, cv_r_sq = mean(cv_quad_ns8$resample$Rsquared))
#Plotting The Values
plot(frame_ns3$adj_r_sq,type="l",col="red",main = "QuadPLOT",ylab="Variation",ylim = c(0,1))
lines(frame_ns3$r_sq,col="green")
lines(frame_ns3$cv_r_sq,col="Blue")
legend(2,1,legend = c("adj_r_square","r_square","cv_r_square"),col=c("red","green","blue"),lty = 1:2,cex = 0.8)
#Response Surface
#X1~X5+X4+Y2+X7+Y1+X3+X6+X8
resp<-energy
rs_md1<-rsm(X1~SO(X5,X4),data=resp)
rs_md2<-rsm(X1~SO(X5,X4,Y2),data=resp)
rs_md3<-rsm(X1~SO(X5,X4,Y2),data=resp)
rs_md4<-rsm(X1~SO(X5,X4,Y2,X7),data=resp)
rs_md5<-rsm(X1~SO(X5,X4,Y2,X7,Y1),data=resp)
rs_md6<-rsm(X1~SO(X5,X4,Y2,X7,Y1,X3),data=resp)
rs_md7<-rsm(X1~SO(X5,X4,Y2,X7,Y1,X3,X6),data=resp)
rs_md8<-rsm(X1~SO(X5,X4,Y2,X7,Y1,X3,X6,X8),data=resp)
#DataFrame
frame_ns4 <-data.frame("adj_r_square" = double(0), "r_square" = double(0))
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md1)$adj.r.squared, r_square = summary(rs_md1)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md2)$adj.r.squared, r_square = summary(rs_md2)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md3)$adj.r.squared, r_square = summary(rs_md3)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md4)$adj.r.squared, r_square = summary(rs_md4)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md5)$adj.r.squared, r_square = summary(rs_md5)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md6)$adj.r.squared, r_square = summary(rs_md6)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md7)$adj.r.squared, r_square = summary(rs_md7)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md8)$adj.r.squared, r_square = summary(rs_md8)$r.squared)
#Plotting The Values
plot(frame_ns4$adj_r_square,type="l",col="red",main = "ReponseSurfacePLOT",ylab="Variation",ylim = c(0,1))
lines(frame_ns4$r_square,col="green")
legend(2,1,legend = c("adj_r_square","r_square"),col=c("red","green"),lty = 1:2,cex = 0.8)
|
/Energy.r
|
no_license
|
singhrohit01/Regression-
|
R
| false | false | 16,356 |
r
|
library(ggpubr)
library(ggplot2)
library(dplyr) #add rows
library(lmridge) #for ridge regression
library(lars) # lasso regression
library(caret) # for cross validation
library(gdata) #to read xls file format
library(rsm) # for response surface
#Importing Dataset into energy
energy <-ENB2012_data
str(energy)
#Forward Selection
fwd_model_energy <- lm(X1 ~ 1, data = energy)
step(fwd_model_energy, direction = "forward", scope = formula(X1 ~ X2 + X3 + X4 + X5 + X6+X7+X8+Y1+Y2))
#Lm Formula
lm(formula = X1 ~ X2 + X5 + X4 + Y2 + X7 + Y1, data = energy)
#MeanImputations
energy$X1[is.na(energy$X1)] <- mean(energy$X1, na.rm = TRUE)
energy$X2[is.na(energy$X2)] <- mean(energy$X2, na.rm = TRUE)
energy$X3[is.na(energy$X3)] <- mean(energy$X3, na.rm = TRUE)
energy$X4[is.na(energy$X4)] <- mean(energy$X4, na.rm = TRUE)
energy$X5[is.na(energy$X5)] <- mean(energy$X5, na.rm = TRUE)
energy$X6[is.na(energy$X6)] <- mean(energy$X6, na.rm = TRUE)
energy$X7[is.na(energy$X7)] <- mean(energy$X7, na.rm = TRUE)
energy$X8[is.na(energy$X8)] <- mean(energy$X8, na.rm = TRUE)
energy$Y1[is.na(energy$Y1)] <- mean(energy$Y1, na.rm = TRUE)
energy$Y2[is.na(energy$Y2)] <- mean(energy$Y2, na.rm = TRUE)
#Variables Remaining X3,X6,X8
#Linear Regression
lin_energy1 <- lm(X1 ~ X2, data = energy)
lin_energy2 <- lm(X1 ~ X2+X5, data = energy)
lin_energy3 <- lm(X1 ~ X2+X5+X4, data = energy)
lin_energy4 <- lm(X1 ~ X2+X5+X4+Y2, data = energy)
lin_energy5 <- lm(X1 ~ X2+X5+X4+Y2+X7, data = energy)
lin_energy6 <- lm(X1 ~ X2+X5+X4+Y2+X7+Y1, data = energy)
lin_energy7 <- lm(X1 ~ X2+X5+X4+Y2+X7+Y1+X3, data = energy)
lin_energy8 <- lm(X1 ~ X2+X5+X4+Y2+X7+Y1+X3+X6, data = energy)
lin_energy9 <- lm(X1 ~ X2+X5+X4+Y2+X7+Y1+X3+X6+X8, data = energy)
#Cross-Validation
train_control <- trainControl(method = "cv", number = 10)
cv_energymodel1 <- train(X1 ~ X2, data = energy, trControl = train_control, method = "lm")
cv_energymodel2 <- train(X1 ~ X2+X5, data = energy, trControl = train_control, method = "lm")
cv_energymodel3 <- train(X1 ~ X2+X5+X4, data = energy, trControl = train_control, method = "lm")
cv_energymodel4 <- train(X1 ~ X2+X5+X4+Y2, data = energy, trControl = train_control, method = "lm")
cv_energymodel5 <- train(X1 ~ X2+X5+X4+Y2+X7, data = energy, trControl = train_control, method = "lm")
cv_energymodel6 <- train(X1 ~ X2+X5+X4+Y2+X7+Y1, data = energy, trControl = train_control, method = "lm")
cv_energymodel7 <- train(X1 ~ X2+X5+X4+Y2+X7+Y1+X3, data = energy, trControl = train_control, method = "lm")
cv_energymodel8 <- train(X1 ~ X2+X5+X4+Y2+X7+Y1+X3+X6, data = energy, trControl = train_control, method = "lm")
cv_energymodel9 <- train(X1 ~ X2+X5+X4+Y2+X7+Y1+X3+X6+X8, data = energy, trControl = train_control, method = "lm")
#Dataframes
frame_energy<-data.frame("r_sq" = double(0), "adj_r_sq" = double(0), "cv_r_sq" = double(0))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy1)$r.squared, adj_r_sq = summary(lin_energy1)$adj.r.squared, cv_r_sq = mean(cv_energymodel1$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy2)$r.squared, adj_r_sq = summary(lin_energy2)$adj.r.squared, cv_r_sq = mean(cv_energymodel2$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy3)$r.squared, adj_r_sq = summary(lin_energy3)$adj.r.squared, cv_r_sq = mean(cv_energymodel3$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy4)$r.squared, adj_r_sq = summary(lin_energy4)$adj.r.squared, cv_r_sq = mean(cv_energymodel4$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy5)$r.squared, adj_r_sq = summary(lin_energy5)$adj.r.squared, cv_r_sq = mean(cv_energymodel5$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy6)$r.squared, adj_r_sq = summary(lin_energy6)$adj.r.squared, cv_r_sq = mean(cv_energymodel6$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy7)$r.squared, adj_r_sq = summary(lin_energy7)$adj.r.squared, cv_r_sq = mean(cv_energymodel7$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy8)$r.squared, adj_r_sq = summary(lin_energy8)$adj.r.squared, cv_r_sq = mean(cv_energymodel8$resample$Rsquared))
frame_energy<- add_row(frame_energy, r_sq = summary(lin_energy9)$r.squared, adj_r_sq = summary(lin_energy9)$adj.r.squared, cv_r_sq = mean(cv_energymodel9$resample$Rsquared))
#Plotting The Values:-
plot(frame_energy$r_sq, type = 'l', col = 'red', main = "Linear Plot", ylab = "Errors", ylim = c(0.1,1))
lines(frame_energy$adj_r_sq, col = 'green' )
lines(frame_energy$cv_r_sq, col = 'blue')
legend(5,0.8, legend = c("R Squared","Adj R Squared","R Squared CV"),
col = c("red","green","blue"), lty = 1:2, cex = 0.8)
#Ridge Regression
ridge_ns1 <- lmridge(X1 ~ X2 + X5, energy, K = c(0.1, 0.001))
ridge_ns2 <- lmridge(X1 ~ X2 + X5 + X4, energy, K = c(0.1, 0.001))
ridge_ns3 <- lmridge(X1 ~ X2 + X5 + X4 +Y2, energy, K = c(0.1, 0.001))
ridge_ns4 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7, energy, K = c(0.1, 0.001))
ridge_ns5 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7+Y1, energy, K = c(0.1, 0.001))
ridge_ns6 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7+Y1+X3, energy, K = c(0.1, 0.001))
ridge_ns7 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7+Y1+X3+X6, energy, K = c(0.1, 0.001))
ridge_ns8 <- lmridge(X1 ~ X2 + X5 + X4+Y2+X7+Y1+X3+X6+X8, energy, K = c(0.1, 0.001))
#Cross-Validation
cv_ridge_ns1 <- train(X1 ~ X2 + X5, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns2 <- train(X1 ~ X2 + X5+X4, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns3 <- train(X1 ~ X2 + X5+X4+Y2, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns4 <- train(X1 ~ X2 + X5+X4+Y2+X7, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns5 <- train(X1 ~ X2 + X5+X4+Y2+X7+Y1, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns6 <- train(X1 ~ X2 + X5+X4+Y2+X7+Y1+X3, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns7 <- train(X1 ~ X2 + X5+X4+Y2+X7+Y1+X3+X6, data = energy, trControl = train_control, method = "ridge")
cv_ridge_ns8 <- train(X1 ~ X2 + X5+X4+Y2+X7+Y1+X3+X6+X8, data = energy, trControl = train_control, method = "ridge")
#DataFrames
frame_ns1 <-data.frame("r_sq" = double(0), "adj_r_sq" = double(0), "cv_r_sq" = double(0))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns1)$R2), adj_r_sq = max(rstats1(ridge_ns1)$adjR2), cv_r_sq = mean(cv_ridge_ns1$resample$Rsquared))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns2)$R2), adj_r_sq = max(rstats1(ridge_ns2)$adjR2), cv_r_sq = mean(cv_ridge_ns2$resample$Rsquared))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns3)$R2), adj_r_sq = max(rstats1(ridge_ns3)$adjR2), cv_r_sq = mean(cv_ridge_ns3$resample$Rsquared))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns4)$R2), adj_r_sq = max(rstats1(ridge_ns4)$adjR2), cv_r_sq = mean(cv_ridge_ns4$resample$Rsquared))
frame_ns1 <- add_row(frame_ns1, r_sq = max(rstats1(ridge_ns5)$R2), adj_r_sq = max(rstats1(ridge_ns5)$adjR2), cv_r_sq = mean(cv_ridge_ns5$resample$Rsquared))
#Plotting the values
plot(frame_ns1$adj_r_sq,type="l",col="red",main = "RidgePlot",ylab="Variation",ylim = c(0,1))
lines(frame_ns1$r_sq,col="green")
lines(frame_ns1$cv_r_sq,col="Blue")
legend(2,1,legend = c("adj_r_square","r_square","cv_r_square"),col=c("red","green","blue"),lty = 1:2,cex = 0.8)
#Lasso Regression
#X1 ~ X2 + X5+X4+Y2+X7+Y1+X3+X6+X8
x <-(energy$X2)
x <-cbind(energy$X5,x)
lasso_mod1 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
lasso_mod2 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
lasso_mod3 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
lasso_mod4 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
x <-cbind(energy$Y1,x)
lasso_mod5 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
x <-cbind(energy$Y1,x)
x <-cbind(energy$X3,x)
lasso_mod6 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
x <-cbind(energy$Y1,x)
x <-cbind(energy$X3,x)
x <-cbind(energy$X8,x)
lasso_mod7 <- lars(x, energy$X1, type = 'lasso')
x <-(energy$X2)
x <-cbind(energy$X5,x)
x <-cbind(energy$X4,x)
x <-cbind(energy$Y2,x)
x <-cbind(energy$X7,x)
x <-cbind(energy$Y1,x)
x <-cbind(energy$X3,x)
x <-cbind(energy$X8,x)
x <-cbind(energy$X6,x)
lasso_mod8 <- lars(x, energy$X1, type = 'lasso')
#Cross-Validation
##X1 ~ X2 + X5+X4+Y2+X7+Y1+X3+X6+X8
cv_lasso_ns1<-train(X1~X2+X5,energy,method="lasso",trControl=train_control)
cv_lasso_ns2<-train(X1~X2+X5+X4,energy,method="lasso",trControl=train_control)
cv_lasso_ns3<-train(X1~X5+X4+Y2,energy,method="lasso",trControl=train_control)
cv_lasso_ns4<-train(X1~X5+X4+Y2+X7,energy,method="lasso",trControl=train_control)
cv_lasso_ns5<-train(X1~X5+X4+Y2+X7+Y1,energy,method="lasso",trControl=train_control)
cv_lasso_ns6<-train(X1~X5+X4+Y2+X7+Y1+X3,energy,method="lasso",trControl=train_control)
cv_lasso_ns7<-train(X1~X5+X4+Y2+X7+Y1+X3+X6,energy,method="lasso",trControl=train_control)
cv_lasso_ns8<-train(X1~X5+X4+Y2+X7+Y1+X3+X6+X8,energy,method="lasso",trControl=train_control)
#DataFrames:-
frame_ns2 <- data.frame("r_sq" = double(0), "cv_r_sq" = double(0))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod1)$R2), cv_r_sq = mean(cv_lasso_ns1$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod2)$R2), cv_r_sq = mean(cv_lasso_ns2$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod3)$R2), cv_r_sq = mean(cv_lasso_ns3$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod4)$R2), cv_r_sq = mean(cv_lasso_ns4$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod5)$R2), cv_r_sq = mean(cv_lasso_ns5$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod6)$R2), cv_r_sq = mean(cv_lasso_ns6$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod7)$R2), cv_r_sq = mean(cv_lasso_ns7$resample$Rsquared))
frame_ns2 <- add_row(frame_ns2, r_sq =max((lasso_mod8)$R2), cv_r_sq = mean(cv_lasso_ns8$resample$Rsquared))
#Plotting The Values
plot(frame_ns2$r_sq,type="l",col="red",main = "LassoPLOT",ylab="Variation",ylim = c(0,1))
lines(frame_ns2$cv_r_sq,col="Blue")
legend(2,1,legend = c("r_square","cv_r_square"),col=c("red","blue"),lty = 1:2,cex = 0.8)
#QuadPlot
q_energy<-energy
#Squaring The Values
#X1~X5+X4+Y2+X7+Y1+X3+X6+X8
q_energy$X1_sq<-q_energy$X1^2
q_energy$X2_sq<-q_energy$X2^2
q_energy$X3_sq<-q_energy$X3^2
q_energy$X4_sq<-q_energy$X4^2
q_energy$X5_sq<-q_energy$X5^2
q_energy$X6_sq<-q_energy$X6^2
q_energy$X7_sq<-q_energy$X7^2
q_energy$X8_sq<-q_energy$X8^2
q_energy$Y1_sq<-q_energy$Y1^2
q_energy$Y2_sq<-q_energy$Y2^2
#Linear Regression
#X1~X5+X4+Y2+X7+Y1+X3+X6+X8
Quad1 <- lm(X1~X5+X5_sq,data=q_energy)
Quad2 <- lm(X1~X5+X5_sq+X4+X4_sq,data=q_energy)
Quad3 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq,data=q_energy)
Quad4 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq,data=q_energy)
Quad5 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq,data=q_energy)
Quad6 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq,data=q_energy)
Quad7 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq+X6+X6_sq,data=q_energy)
Quad8 <- lm(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq+X6+X6_sq+X8+X8_sq,data=q_energy)
#Cross-Validation
cv_quad_ns1 <- train(X1~X5+X5_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns2 <- train(X1~X5+X5_sq+X4+X4_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns3 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns4 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns5 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns6 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns7 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq+X6+X6_sq, data = q_energy, trControl = train_control, method = "lm")
cv_quad_ns8 <- train(X1~X5+X5_sq+X4+X4_sq+Y2+Y2_sq+X7+X7_sq+Y1+Y1_sq+X3+X3_sq+X6+X6_sq+X8+X8_sq, data = q_energy, trControl = train_control, method = "lm")
#DataFrames:-
frame_ns3 <- data.frame("adj_r_sq" = double(0),"r_sq" = double(0), "cv_r_sq" = double(0))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad1)$r.squared, adj_r_sq = summary(Quad1)$adj.r.squared, cv_r_sq = mean(cv_quad_ns1$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad2)$r.squared, adj_r_sq = summary(Quad2)$adj.r.squared, cv_r_sq = mean(cv_quad_ns2$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad3)$r.squared, adj_r_sq = summary(Quad3)$adj.r.squared, cv_r_sq = mean(cv_quad_ns3$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad4)$r.squared, adj_r_sq = summary(Quad4)$adj.r.squared, cv_r_sq = mean(cv_quad_ns4$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad5)$r.squared, adj_r_sq = summary(Quad5)$adj.r.squared, cv_r_sq = mean(cv_quad_ns5$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad6)$r.squared, adj_r_sq = summary(Quad6)$adj.r.squared, cv_r_sq = mean(cv_quad_ns6$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad7)$r.squared, adj_r_sq = summary(Quad7)$adj.r.squared, cv_r_sq = mean(cv_quad_ns7$resample$Rsquared))
frame_ns3<- add_row(frame_ns3, r_sq = summary(Quad8)$r.squared, adj_r_sq = summary(Quad8)$adj.r.squared, cv_r_sq = mean(cv_quad_ns8$resample$Rsquared))
#Plotting The Values
plot(frame_ns3$adj_r_sq,type="l",col="red",main = "QuadPLOT",ylab="Variation",ylim = c(0,1))
lines(frame_ns3$r_sq,col="green")
lines(frame_ns3$cv_r_sq,col="Blue")
legend(2,1,legend = c("adj_r_square","r_square","cv_r_square"),col=c("red","green","blue"),lty = 1:2,cex = 0.8)
#Response Surface
#X1~X5+X4+Y2+X7+Y1+X3+X6+X8
resp<-energy
rs_md1<-rsm(X1~SO(X5,X4),data=resp)
rs_md2<-rsm(X1~SO(X5,X4,Y2),data=resp)
rs_md3<-rsm(X1~SO(X5,X4,Y2),data=resp)
rs_md4<-rsm(X1~SO(X5,X4,Y2,X7),data=resp)
rs_md5<-rsm(X1~SO(X5,X4,Y2,X7,Y1),data=resp)
rs_md6<-rsm(X1~SO(X5,X4,Y2,X7,Y1,X3),data=resp)
rs_md7<-rsm(X1~SO(X5,X4,Y2,X7,Y1,X3,X6),data=resp)
rs_md8<-rsm(X1~SO(X5,X4,Y2,X7,Y1,X3,X6,X8),data=resp)
#DataFrame
frame_ns4 <-data.frame("adj_r_square" = double(0), "r_square" = double(0))
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md1)$adj.r.squared, r_square = summary(rs_md1)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md2)$adj.r.squared, r_square = summary(rs_md2)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md3)$adj.r.squared, r_square = summary(rs_md3)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md4)$adj.r.squared, r_square = summary(rs_md4)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md5)$adj.r.squared, r_square = summary(rs_md5)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md6)$adj.r.squared, r_square = summary(rs_md6)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md7)$adj.r.squared, r_square = summary(rs_md7)$r.squared)
frame_ns4 <- add_row(frame_ns4, adj_r_square = summary(rs_md8)$adj.r.squared, r_square = summary(rs_md8)$r.squared)
#Plotting The Values
plot(frame_ns4$adj_r_square,type="l",col="red",main = "ReponseSurfacePLOT",ylab="Variation",ylim = c(0,1))
lines(frame_ns4$r_square,col="green")
legend(2,1,legend = c("adj_r_square","r_square"),col=c("red","green"),lty = 1:2,cex = 0.8)
|
## Getting full sub_dataset of power consumption sub_data
power_consump_full_sub_data <- read.csv("./sub_data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
power_consump_full_sub_data$Date <- as.Date(power_consump_full_sub_data$Date, format="%d/%m/%Y")
## getting sub_data covering 2007-02-01 to 2007-02-02sub_sub_data
sub_data <- subset(power_consump_full_sub_data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(power_consump_full_sub_data)
## Now, we need to convert dates
datetime <- paste(as.Date(sub_data$Date), sub_data$Time)
sub_data$Datetime <- as.POSIXct(datetime)
## Plot and power_consump_full_dataCreate png
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(sub_data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving phase
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
/plot4.r
|
no_license
|
jcolao/ExData_Plotting1
|
R
| false | false | 1,587 |
r
|
## Getting full sub_dataset of power consumption sub_data
power_consump_full_sub_data <- read.csv("./sub_data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
power_consump_full_sub_data$Date <- as.Date(power_consump_full_sub_data$Date, format="%d/%m/%Y")
## getting sub_data covering 2007-02-01 to 2007-02-02sub_sub_data
sub_data <- subset(power_consump_full_sub_data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(power_consump_full_sub_data)
## Now, we need to convert dates
datetime <- paste(as.Date(sub_data$Date), sub_data$Time)
sub_data$Datetime <- as.POSIXct(datetime)
## Plot and power_consump_full_dataCreate png
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(sub_data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving phase
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
# Gist at: https://gist.github.com/rudeboybert/9905f44013c18d6add279cf13ab8e398
# RStudio Connect, then name: Seattle House Prices Interactive Plot
|
/docs/images/plotly/seattle_house_prices_interactive_plot.R
|
permissive
|
sachinbuzz/moderndive_book
|
R
| false | false | 148 |
r
|
# Gist at: https://gist.github.com/rudeboybert/9905f44013c18d6add279cf13ab8e398
# RStudio Connect, then name: Seattle House Prices Interactive Plot
|
## archivist package for R
##
#' @title Show Artifact's History
#'
#' @description
#' \code{ahistory} extracts artifact's history and creates a data frame with
#' history of calls and md5hashes of partial results. The overloaded
#' \code{print.ahistory} function prints this history in a concise way. The overloaded
#' \code{print.ahistoryKable} function prints this history in the same way as \link[knitr]{kable}.
#' When \code{alink=TRUE} one can create history table/kable with hooks to partial results (artifacts) as in the \link{alink} function.
#'
#' @details
#' All artifacts created with \link[archivist]{\%a\%} operator are archivised with
#' detailed information about it's source (both call and md5hash of the input).
#' The function \code{ahistory} reads all artifacts that
#' precede \code{artifact} and create a description of the input flow.
#' The generic \code{print.ahistory} function plots the history in a human readable way.
#'
#' @param artifact An artifact which history is supposed to be reconstructed.
#' It will be converted into md5hash.
#' @param md5hash If \code{artifact} is not specified then \code{md5hash} is used.
#' @param repoDir A character denoting an existing directory in which an artifact will be saved.
#' @param ... Further parameters passed to \link{alink} function. Used when \code{format = "kable"} and \code{alink = TRUE}.
#' @param format A character denoting whether to print history in either a \code{"regular"} (default) way or like in a \code{"kable"} function.
#' See Notes.
#' @param alink Whether to provide hooks to objects like in \link{alink}. See examples.
#'
#' @return A data frame with two columns - names of calls and md5hashes of partial results.
#'
#' @note There are provided functions (\code{print.ahistory} and \code{print.ahistoryKable}) to print the artifact's history.
#' History can be printed either in a \code{regular} way which is friendy for the console output or in a \code{kable} format which
#' prints the artifact's history in a way \link[knitr]{kable} function would. This is convenient when one prints history
#' in \code{.Rmd} files using \link[rmarkdown]{rmarkdown}.
#'
#' Moreover when user passes \code{format = 'kable'} and \code{alink = TRUE} then one can use links for remote Repository.
#' Then mdhashes are taken from Local Repository, so user has to specify \code{repo}, \code{user} and \code{repoDir} even though
#' they are set globally, because \code{repo} is a substring of \code{repoDir} and during evalutation of \code{...} R treats \code{repo} as \code{repoDir}.
#'
#' Bug reports and feature requests can be sent to \href{https://github.com/pbiecek/archivist/issues}{https://github.com/pbiecek/archivist/issues}
#'
#' @author
#' Przemyslaw Biecek, \email{przemyslaw.biecek@@gmail.com}
#'
#' Marcin Kosinski, \email{m.p.kosinski@@gmail.com}
#'
#' @examples
#'
#' createLocalRepo("ahistory_check", default = TRUE)
#' library(dplyr)
#' iris %a%
#' filter(Sepal.Length < 6) %a%
#' lm(Petal.Length~Species, data=.) %a%
#' summary() -> artifact
#'
#' ahistory(artifact)
#' ahistory(artifact, format = "kable")
#' print(ahistory(artifact, format = "kable"), format = "latex")
#' ahistory(artifact, format = "kable", alink = TRUE, repoDir = "ahistory_check",
#' repo = "repo", user = "user")
#'
#'
#' repoDir <- file.path(getwd(), "ahistory_check")
#' deleteLocalRepo(repoDir, deleteRoot = TRUE)
#' aoptions('repoDir', NULL, unset = TRUE)
#'
#' @family archivist
#' @rdname ahistory
#' @export
ahistory <- function(artifact = NULL, md5hash = NULL, repoDir = aoptions('repoDir'), format = "regular", alink = FALSE, ...) {
# if artifact is set then calculate md5hash for it
if (!is.null(artifact))
md5hash = digest(artifact)
if (is.null(md5hash))
stop("Either artifact or md5hash has to be set")
stopifnot(length(format) == 1 & format %in% c("regular", "kable"))
res_names <- c()
res_md5 <- md5hash
ind <- 1
while (!is.null(md5hash) && length(md5hash)>0) {
tags <- getTagsLocal(md5hash, tag = "", repoDir=repoDir)
tmp <- grep(tags, pattern="^RHS:", value = TRUE)
if (length(tmp) > 0) {
res_names[ind] <- substr(tmp[1],5,nchar(tmp[1]))
} else {
# if not RHS then maybe name
tmp <- grep(tags, pattern="^name:", value = TRUE)
if (length(tmp) > 0) {
res_names[ind] <- substr(tmp[1],6,nchar(tmp[1]))
} else {
# that's should not happen
df <- data.frame(md5hash = res_md5, call = rep("", length(res_md5)), stringsAsFactors = FALSE)
if (format == "kable") {
class(df) = c("ahistoryKable", "data.frame")
if (alink) {
df$md5hash <- paste0("[",
df$md5hash,
"]",
sapply(df$md5hash, alink, ...) %>%
as.vector() %>%
strsplit(split = "]") %>%
lapply(`[[`, 2)
)
}
} else {
class(df) = c("ahistory", "data.frame")
}
return(df)
}
}
md5hash <- grep(tags, pattern="^LHS:", value = TRUE)
if (length(md5hash) > 0) {
md5hash <- substr(md5hash[1],5,nchar(md5hash[1]))
res_md5[ind+1] <- md5hash
}
ind <- ind + 1
}
if (length(res_md5) != length(res_names)) {
res_md5[max(length(res_md5), length(res_names))+1] = ""
res_names[max(length(res_md5), length(res_names))+1] = ""
}
df <- data.frame(md5hash = res_md5, call = res_names, stringsAsFactors = FALSE)
if (format == "kable") {
class(df) = c("ahistoryKable", "data.frame")
if (alink) {
df$md5hash <- paste0("[",
df$md5hash,
"]",
sapply(df$md5hash, alink, ...) %>%
as.vector() %>%
strsplit(split = "]") %>%
lapply(`[[`, 2)
)
}
} else {
class(df) = c("ahistory", "data.frame")
}
df
}
#' @export
print.ahistory <- function(x, ...) {
x[,2] <- paste0(x[,2], sapply(max(nchar(x[,2])) + 1 - nchar(x[,2]),
function(j) paste(rep(" ", j), collapse="")))
for (i in nrow(x):1) {
if (i < nrow(x)) cat("-> ") else cat(" ")
cat(x[i,2], " [", x[i,1], "]\n", sep = "")
}
}
#' @export
print.ahistoryKable <- function(x, ...){
if (!requireNamespace("knitr", quietly = TRUE)) {
stop("knitr package required for archivist:::print.ahistoryKable function")
}
cat(knitr::kable(x[nrow(x):1, 2:1], ...), sep="\n")
}
|
/R/ahistory.R
|
no_license
|
cha63506/archivist-3
|
R
| false | false | 6,674 |
r
|
## archivist package for R
##
#' @title Show Artifact's History
#'
#' @description
#' \code{ahistory} extracts artifact's history and creates a data frame with
#' history of calls and md5hashes of partial results. The overloaded
#' \code{print.ahistory} function prints this history in a concise way. The overloaded
#' \code{print.ahistoryKable} function prints this history in the same way as \link[knitr]{kable}.
#' When \code{alink=TRUE} one can create history table/kable with hooks to partial results (artifacts) as in the \link{alink} function.
#'
#' @details
#' All artifacts created with \link[archivist]{\%a\%} operator are archivised with
#' detailed information about it's source (both call and md5hash of the input).
#' The function \code{ahistory} reads all artifacts that
#' precede \code{artifact} and create a description of the input flow.
#' The generic \code{print.ahistory} function plots the history in a human readable way.
#'
#' @param artifact An artifact which history is supposed to be reconstructed.
#' It will be converted into md5hash.
#' @param md5hash If \code{artifact} is not specified then \code{md5hash} is used.
#' @param repoDir A character denoting an existing directory in which an artifact will be saved.
#' @param ... Further parameters passed to \link{alink} function. Used when \code{format = "kable"} and \code{alink = TRUE}.
#' @param format A character denoting whether to print history in either a \code{"regular"} (default) way or like in a \code{"kable"} function.
#' See Notes.
#' @param alink Whether to provide hooks to objects like in \link{alink}. See examples.
#'
#' @return A data frame with two columns - names of calls and md5hashes of partial results.
#'
#' @note There are provided functions (\code{print.ahistory} and \code{print.ahistoryKable}) to print the artifact's history.
#' History can be printed either in a \code{regular} way which is friendy for the console output or in a \code{kable} format which
#' prints the artifact's history in a way \link[knitr]{kable} function would. This is convenient when one prints history
#' in \code{.Rmd} files using \link[rmarkdown]{rmarkdown}.
#'
#' Moreover when user passes \code{format = 'kable'} and \code{alink = TRUE} then one can use links for remote Repository.
#' Then mdhashes are taken from Local Repository, so user has to specify \code{repo}, \code{user} and \code{repoDir} even though
#' they are set globally, because \code{repo} is a substring of \code{repoDir} and during evalutation of \code{...} R treats \code{repo} as \code{repoDir}.
#'
#' Bug reports and feature requests can be sent to \href{https://github.com/pbiecek/archivist/issues}{https://github.com/pbiecek/archivist/issues}
#'
#' @author
#' Przemyslaw Biecek, \email{przemyslaw.biecek@@gmail.com}
#'
#' Marcin Kosinski, \email{m.p.kosinski@@gmail.com}
#'
#' @examples
#'
#' createLocalRepo("ahistory_check", default = TRUE)
#' library(dplyr)
#' iris %a%
#' filter(Sepal.Length < 6) %a%
#' lm(Petal.Length~Species, data=.) %a%
#' summary() -> artifact
#'
#' ahistory(artifact)
#' ahistory(artifact, format = "kable")
#' print(ahistory(artifact, format = "kable"), format = "latex")
#' ahistory(artifact, format = "kable", alink = TRUE, repoDir = "ahistory_check",
#' repo = "repo", user = "user")
#'
#'
#' repoDir <- file.path(getwd(), "ahistory_check")
#' deleteLocalRepo(repoDir, deleteRoot = TRUE)
#' aoptions('repoDir', NULL, unset = TRUE)
#'
#' @family archivist
#' @rdname ahistory
#' @export
ahistory <- function(artifact = NULL, md5hash = NULL, repoDir = aoptions('repoDir'), format = "regular", alink = FALSE, ...) {
# if artifact is set then calculate md5hash for it
if (!is.null(artifact))
md5hash = digest(artifact)
if (is.null(md5hash))
stop("Either artifact or md5hash has to be set")
stopifnot(length(format) == 1 & format %in% c("regular", "kable"))
res_names <- c()
res_md5 <- md5hash
ind <- 1
while (!is.null(md5hash) && length(md5hash)>0) {
tags <- getTagsLocal(md5hash, tag = "", repoDir=repoDir)
tmp <- grep(tags, pattern="^RHS:", value = TRUE)
if (length(tmp) > 0) {
res_names[ind] <- substr(tmp[1],5,nchar(tmp[1]))
} else {
# if not RHS then maybe name
tmp <- grep(tags, pattern="^name:", value = TRUE)
if (length(tmp) > 0) {
res_names[ind] <- substr(tmp[1],6,nchar(tmp[1]))
} else {
# that's should not happen
df <- data.frame(md5hash = res_md5, call = rep("", length(res_md5)), stringsAsFactors = FALSE)
if (format == "kable") {
class(df) = c("ahistoryKable", "data.frame")
if (alink) {
df$md5hash <- paste0("[",
df$md5hash,
"]",
sapply(df$md5hash, alink, ...) %>%
as.vector() %>%
strsplit(split = "]") %>%
lapply(`[[`, 2)
)
}
} else {
class(df) = c("ahistory", "data.frame")
}
return(df)
}
}
md5hash <- grep(tags, pattern="^LHS:", value = TRUE)
if (length(md5hash) > 0) {
md5hash <- substr(md5hash[1],5,nchar(md5hash[1]))
res_md5[ind+1] <- md5hash
}
ind <- ind + 1
}
if (length(res_md5) != length(res_names)) {
res_md5[max(length(res_md5), length(res_names))+1] = ""
res_names[max(length(res_md5), length(res_names))+1] = ""
}
df <- data.frame(md5hash = res_md5, call = res_names, stringsAsFactors = FALSE)
if (format == "kable") {
class(df) = c("ahistoryKable", "data.frame")
if (alink) {
df$md5hash <- paste0("[",
df$md5hash,
"]",
sapply(df$md5hash, alink, ...) %>%
as.vector() %>%
strsplit(split = "]") %>%
lapply(`[[`, 2)
)
}
} else {
class(df) = c("ahistory", "data.frame")
}
df
}
#' @export
print.ahistory <- function(x, ...) {
x[,2] <- paste0(x[,2], sapply(max(nchar(x[,2])) + 1 - nchar(x[,2]),
function(j) paste(rep(" ", j), collapse="")))
for (i in nrow(x):1) {
if (i < nrow(x)) cat("-> ") else cat(" ")
cat(x[i,2], " [", x[i,1], "]\n", sep = "")
}
}
#' @export
print.ahistoryKable <- function(x, ...){
if (!requireNamespace("knitr", quietly = TRUE)) {
stop("knitr package required for archivist:::print.ahistoryKable function")
}
cat(knitr::kable(x[nrow(x):1, 2:1], ...), sep="\n")
}
|
### Generate keep lists for subsetted case-control analyses
setwd(Sys.getenv("inflation"))
#get cluster assignments
clusters = read.table("../../pca/mega_cluster_assignments_and_pcs.txt", header=TRUE, sep="\t")
#get set of ids included in case control analyses
unrelateds = read.table("../cc_analysis/unrelateds_all.txt", header=TRUE)
keep = clusters[clusters$IID %in% unrelateds$iid & !is.na(clusters$PHENO), c("IID","cluster_label", "PHENO")]
getRandomSet = function(ancestry, n, seed) {
set.seed(seed)
source_cases = keep$IID[keep$cluster_label==ancestry & keep$PHENO==2]
source_controls = keep$IID[keep$cluster_label==ancestry & keep$PHENO==1]
return(c(sample(source_cases, size=n, replace=FALSE), sample(source_controls, size=n, replace=FALSE)))
}
getKeepLists = function(ancestry, n_cases, n_subs) {
seeds = seq(1,n_subs)
subs = list()
for (i in 1:n_subs) {
subs[[i]] = getRandomSet(ancestry, n=n_cases, seed=seeds[i])
write.table(subs[[i]], file=paste0(ancestry,"_sub",i,".txt"), col.names=FALSE, row.names=FALSE, quote=FALSE)
}
subs
}
#get trio counts
trio_counts = read.table(paste0(Sys.getenv("fam_assoc"),"/trio_count.txt"), header=FALSE, skip=1)
names(trio_counts) = c("affected_trios","group")
trio_counts_dict = list()
for (i in 1:nrow(trio_counts)) {
trio_counts_dict[[trio_counts$group[i]]] = trio_counts$affected_trios[i]
}
## EUR --> 4758 cases, 4758 controls
subs_EUR = getKeepLists("EUR",n_cases=trio_counts_dict[["EUR"]],n_subs=5)
## FIN --> 812 cases, 812 controls
subs_FIN = getKeepLists("FIN",n_cases=trio_counts_dict[["FIN"]],n_subs=5)
## AFR --> 122 cases, 122 controls
subs_AFR = getKeepLists("AFR",n_cases=trio_counts_dict[["AFR"]],n_subs=5)
## AMR --> 294 cases, 294 controls
subs_AMR = getKeepLists("AMR",n_cases=trio_counts_dict[["AMR"]],n_subs=5)
|
/scripts/inflation_analysis/subset_cohorts.R
|
no_license
|
ccrobertson/t1d-immunochip-2020
|
R
| false | false | 1,806 |
r
|
### Generate keep lists for subsetted case-control analyses
setwd(Sys.getenv("inflation"))
#get cluster assignments
clusters = read.table("../../pca/mega_cluster_assignments_and_pcs.txt", header=TRUE, sep="\t")
#get set of ids included in case control analyses
unrelateds = read.table("../cc_analysis/unrelateds_all.txt", header=TRUE)
keep = clusters[clusters$IID %in% unrelateds$iid & !is.na(clusters$PHENO), c("IID","cluster_label", "PHENO")]
getRandomSet = function(ancestry, n, seed) {
set.seed(seed)
source_cases = keep$IID[keep$cluster_label==ancestry & keep$PHENO==2]
source_controls = keep$IID[keep$cluster_label==ancestry & keep$PHENO==1]
return(c(sample(source_cases, size=n, replace=FALSE), sample(source_controls, size=n, replace=FALSE)))
}
getKeepLists = function(ancestry, n_cases, n_subs) {
seeds = seq(1,n_subs)
subs = list()
for (i in 1:n_subs) {
subs[[i]] = getRandomSet(ancestry, n=n_cases, seed=seeds[i])
write.table(subs[[i]], file=paste0(ancestry,"_sub",i,".txt"), col.names=FALSE, row.names=FALSE, quote=FALSE)
}
subs
}
#get trio counts
trio_counts = read.table(paste0(Sys.getenv("fam_assoc"),"/trio_count.txt"), header=FALSE, skip=1)
names(trio_counts) = c("affected_trios","group")
trio_counts_dict = list()
for (i in 1:nrow(trio_counts)) {
trio_counts_dict[[trio_counts$group[i]]] = trio_counts$affected_trios[i]
}
## EUR --> 4758 cases, 4758 controls
subs_EUR = getKeepLists("EUR",n_cases=trio_counts_dict[["EUR"]],n_subs=5)
## FIN --> 812 cases, 812 controls
subs_FIN = getKeepLists("FIN",n_cases=trio_counts_dict[["FIN"]],n_subs=5)
## AFR --> 122 cases, 122 controls
subs_AFR = getKeepLists("AFR",n_cases=trio_counts_dict[["AFR"]],n_subs=5)
## AMR --> 294 cases, 294 controls
subs_AMR = getKeepLists("AMR",n_cases=trio_counts_dict[["AMR"]],n_subs=5)
|
afl = read.csv("D:/Chrome Download/MetricsDataAFL.csv")
fuzz = lm(Performance ~ Files + Lines + Statements + X..Branches + X..Comments + Functions + Avg.stmts.Function + max.complexity + max.depth + avg.depth + avg.complexity)
fuzz2 = lm(Performance ~ Files + Lines + Statements + X..Branches + X..Comments + Functions + max.complexity + avg.depth + avg.complexity)
step(fuzz2)
fuzz3 = lm(Performance ~ Files + Statements + X..Branches + Functions + max.complexity + avg.depth + avg.complexity)
summary(fuzz3)
hong = read.csv("D:/Chrome Download/MetricsDataHonggfuzz.csv")
fuzzy = lm(Performance ~ Files + Lines + Statements + X..Branches + X..Comments + Functions + Avg.stmts.Function + max.complexity + max.depth + avg.depth + avg.complexity)
fuzzy2 = lm(Performance ~ Files + Lines + Statements + X..Branches + X..Comments + Functions + max.complexity + max.depth + avg.depth + avg.complexity)
fuzzy3 = lm(Performance ~ Statements + X..Branches + X..Comments + Functions + max.complexity + avg.depth + avg.complexity)
|
/fuzz.R
|
no_license
|
AIK13/Fuzz-Predictions
|
R
| false | false | 1,020 |
r
|
afl = read.csv("D:/Chrome Download/MetricsDataAFL.csv")
fuzz = lm(Performance ~ Files + Lines + Statements + X..Branches + X..Comments + Functions + Avg.stmts.Function + max.complexity + max.depth + avg.depth + avg.complexity)
fuzz2 = lm(Performance ~ Files + Lines + Statements + X..Branches + X..Comments + Functions + max.complexity + avg.depth + avg.complexity)
step(fuzz2)
fuzz3 = lm(Performance ~ Files + Statements + X..Branches + Functions + max.complexity + avg.depth + avg.complexity)
summary(fuzz3)
hong = read.csv("D:/Chrome Download/MetricsDataHonggfuzz.csv")
fuzzy = lm(Performance ~ Files + Lines + Statements + X..Branches + X..Comments + Functions + Avg.stmts.Function + max.complexity + max.depth + avg.depth + avg.complexity)
fuzzy2 = lm(Performance ~ Files + Lines + Statements + X..Branches + X..Comments + Functions + max.complexity + max.depth + avg.depth + avg.complexity)
fuzzy3 = lm(Performance ~ Statements + X..Branches + X..Comments + Functions + max.complexity + avg.depth + avg.complexity)
|
library(shiny)
shinyServer(function(input, output) {
output$output_text <- renderText(paste("Petal.Width~", input$x)) #Titulo del main Panel
output$output_plot <- renderPlot(plot(as.formula(paste("Petal.Width~", input$x)),
data = iris) )
output$summary <- renderPrint({
summary(iris)
})
output$table <- renderTable({
data.frame(iris)
})
output$data_table <- renderDataTable({iris},
options = list(aLengthMenu = c(10,20,50),
iDisplayLength = 10))
})
|
/sesion_8/ejemplo/server.R
|
no_license
|
YaelRmz/bedu_fase_2_modulo_1
|
R
| false | false | 664 |
r
|
library(shiny)
shinyServer(function(input, output) {
output$output_text <- renderText(paste("Petal.Width~", input$x)) #Titulo del main Panel
output$output_plot <- renderPlot(plot(as.formula(paste("Petal.Width~", input$x)),
data = iris) )
output$summary <- renderPrint({
summary(iris)
})
output$table <- renderTable({
data.frame(iris)
})
output$data_table <- renderDataTable({iris},
options = list(aLengthMenu = c(10,20,50),
iDisplayLength = 10))
})
|
readname<-"ea203553-9919-490a-8efb-d6076706bb69"
readname<-"00e1b69e-ea0e-47c5-8c50-39b88049e1af"
readname<-"4fef7a01-72f5-481c-992b-90d6f996167d"
readname <-"f84f8a14-c966-495c-8a4b-3c256c0abfd9"
readname<-"97c60617-a585-4f8b-b09b-6782ee6ab954"
df_check <- subset(df,query =="cel-5s-unit1" & length >17 &subject == readname)
View(df_check)
readname<-"f0997810-05e4-4bbc-b8e8-fa9710ed980c"
|
/Rcode/N2L1_5S_ERC.R
|
no_license
|
qiutaoding/QD_nanoKit_py3
|
R
| false | false | 391 |
r
|
readname<-"ea203553-9919-490a-8efb-d6076706bb69"
readname<-"00e1b69e-ea0e-47c5-8c50-39b88049e1af"
readname<-"4fef7a01-72f5-481c-992b-90d6f996167d"
readname <-"f84f8a14-c966-495c-8a4b-3c256c0abfd9"
readname<-"97c60617-a585-4f8b-b09b-6782ee6ab954"
df_check <- subset(df,query =="cel-5s-unit1" & length >17 &subject == readname)
View(df_check)
readname<-"f0997810-05e4-4bbc-b8e8-fa9710ed980c"
|
#Simulate random X,Y from true(linear) model,plot different regression lines,
#Does average coeff converge? Change variance of of noise ?how does this change
#the multiple lines plotted?
#sd=0.4
par(mfrow=c(1,1))
x=rnorm(50,mean=2,sd=4)
y=10+2*x
plot(x,y,type="l",col="red")
coef1=rep(0,5)
coef2=rep(0,5)
for(i in c(1:5))
{
y=10+2*x+rnorm(50,sd=0.4)
fit1=lm(y~x)
coeffs=coefficients(fit1)
coef1[i]=coeffs[1]
coef2[i]=coeffs[2]
lines(x,predict(fit1),col="blue")
}
mean(coef1)
mean(coef2)
#Coefficients convergence on an average?
?coefficients
#sd=1
par(mfrow=c(1,1))
x=rnorm(50,mean=2,sd=4)
y=10+2*x
plot(x,y,type="l",col="red")
coef1=rep(0,5)
coef2=rep(0,5)
for(i in c(1:5))
{
y=10+2*x+rnorm(50,sd=1)
fit1=lm(y~x)
coeffs=coefficients(fit1)
coef1[i]=coeffs[1]
coef2[i]=coeffs[2]
lines(x,predict(fit1),col="blue")
}
mean(coef1)
mean(coef2)
#sd=5
par(mfrow=c(1,1))
x=rnorm(50,mean=2,sd=4)
y=10+2*x
plot(x,y,type="l",col="red")
coef1=rep(0,5)
coef2=rep(0,5)
for(i in c(1:50))
{
y=10+2*x+rnorm(50,sd=5)
fit1=lm(y~x)
coeffs=coefficients(fit1)
coef1[i]=coeffs[1]
coef2[i]=coeffs[2]
lines(x,predict(fit1),col="blue")
}
mean(coef1)
mean(coef2)
#as noise variance increases the lines deviate more from the original line,
#also convergence rate to true coefficients is poorer, convergence rate gets better when we plot more regression line
##14)Centered x and check beta_0 should be same as mean
x=c(1,2,3,4,5)
x=x-mean(x)
x
mean(x)
y=3*x+5+rnorm(5)
fit1=lm(y~x)
coefficients(fit1)[1]
mean(y)
#2)RSS variation on adding features to the model
#Rss also tends to decrease on average on adding covariates
rssa=rep(0,47)
for(i in c(2:48))
{
X=rnorm(50*i)
X=matrix(X,nrow=50,ncol=i)
y=6+2*X[,1]+5*X[,2]+rnorm(50)
fit=lm(y~X)
pred_y=predict(fit)
rss=sum((y-pred_y)^2)
rssa[i-1]=rss
}
plot(c(2:48),rssa)
rssa
#13)More spread out the x's smaller is the standard error of the coefficients
#Spread out
x=c(10,30,40,50,100,200,300,400,500,600)
y=9+6*x+rnorm(10)
fit=lm(y~x)
sum=summary(fit)
sum$coefficients
#Not spread out
x=c(10,15,20,6,21,22,23,24,25,18)
y=9+6*x+rnorm(10)
fit=lm(y~x)
sum=summary(fit)
sum$coefficients
#even more concentrated
x=c(1,1.5,1.25,1.34,1.65,1.73,1.95,1.84,1.57,1.23)
y=9+6*x+rnorm(10)
fit=lm(y~x)
sum=summary(fit)
sum$coefficients
#verifies intuition standard error of coefficient increases when x are not spread out
#27)Non constant variance of error terms/ Heteroscedacity and using log transforms to mitigate it
#25)Simulate own non linear data and check residual plots patterns
x=rnorm(100,mean=5,sd=5)
y=10+6*x+13*x^2+15*x^3+rnorm(100)
fit=lm(y~x)
plot(fit,which=1)#Tukey anscombe shows a clear pattern
#Reduce degree of polynomial
x=rnorm(100,mean=5,sd=5)
y=10+6*x+13*x^2+rnorm(100)
fit=lm(y~x)
plot(fit,which=1)#Tukey anscombe shows a clear pattern, narrower than earlier but still a bowl
#Now linear
x=rnorm(100,mean=5,sd=5)
y=10+6*x+rnorm(100)
fit=lm(y~x)
plot(fit,which=1)#approximately random
#Heteroskedastic errors
x=rnorm(100,mean=5)
dev=rep(0,100)
for (i in c(1:100))
{ dev[i]=2*abs(x[i]) }
y=4+2*x+rnorm(100,sd=dev)
fit=lm(y~x)
plot(fit,which=1)#approximately random
#Correlated covariates
#multiple simulations
for (i in c(1:10)){
X1=rnorm(100)
X2=6+2*X1+rnorm(100)
#X2=rnorm(100)
#X3=rnorm(100)
full_X=cbind(X1,X2)
y=7+2*X1+rnorm(100)
#full_X
fit_cor=lm(y~full_X)
#summary(fit_all)$coefficients
print(cor(full_X))
#Not correlated covariates
#X1=rnorm(100)
#X2=6+2*X1+rnorm(100)
X2=rnorm(100)
#X3=rnorm(100)
full_X=cbind(X1,X2)
#y=7+2*X1+rnorm(100)
#full_X
fit_nocor=lm(y~full_X)
#summary(fit_all)$coefficients
print(cor(full_X))
print(summary(fit_cor)$coefficients)
print(summary(fit_nocor)$coefficients)}
#Almost always standard error is the correlated case is larger
#12)Justify by simulation the more observations we have the smaller the standard error
for(i in c(1:10))
{
#increase n
x=rnorm(5000)
mat=matrix(x,nrow=500,ncol=10)
y=2*mat[,1]+9+rnorm(500)
fit1=lm(y[1:100]~mat[1:100,])
print(summary(fit1)$coefficients)
print(sqrt(sum((summary(fit1)$residuals)^2)/89))
fit2=lm(y~mat)
print(summary(fit2)$coefficients)
print(sqrt(sum((summary(fit2)$residuals)^2)/489))
}
#summary(fit1)
#sqrt(sum(fit1$residuals^2)/500)
#Standard error of the coefficients almost always lower when n increases
print(length(y[101:500]))
###ASK does rse always decrease when we increase the sample size
#Calculating F statistic by hand
x=rnorm(1000)
X=matrix(x,nrow=100,ncol=10)
y=4+6*X[,4]+rnorm(100)
fit1=lm(y~X)
summary(fit1)$fstatistic
#Calculation by hand
y_pred=predict(fit1)
f_num=sum((y_pred-mean(y))^2)/10
f_deno=sum((y_pred-y)^2)/89
F_stat=f_num/f_deno#matched the value returned by lm
F_stat
#Calculate predictions by hand
X_new=cbind(rep(1,100),X)
y_pred2=X_new%*%solve(t(X_new)%*%(X_new))%*%t(X_new)%*%y
#y_pred=predict(fit1)
f_num=sum((y_pred2-mean(y))^2)/10
f_deno=sum((y_pred2-y)^2)/89
F_stat=f_num/f_deno#matched the value returned by lm
F_stat
#Calculation of predictions by hand do not forget to add the intercept column
#22)Verify intuition prediction intervals are always wider than confidence intervals
X=matrix(rnorm(500),nrow=50,ncol=10)
y=8+2*X[,1]+6*X[,3]+9*X[,4]+rnorm(50)
fit_1<-lm(y~X)
#Confidence intervals
s=summary(fit_1)
rse=sqrt(sum(s$residuals^2)/(50-11))
rse
#Confidence intervals of y
predict.lm(fit_1,interval ="prediction",level=0.95)
y_pred_upr=predict(fit_1)-qt(0.975,39)*rse
y_pred_upr
|
/R_code_concepts_Illustration/Regression_lines_multiple_coeff_convergence.R
|
no_license
|
rheasukthanker/Computational_Statistics_ETH
|
R
| false | false | 5,378 |
r
|
#Simulate random X,Y from true(linear) model,plot different regression lines,
#Does average coeff converge? Change variance of of noise ?how does this change
#the multiple lines plotted?
#sd=0.4
par(mfrow=c(1,1))
x=rnorm(50,mean=2,sd=4)
y=10+2*x
plot(x,y,type="l",col="red")
coef1=rep(0,5)
coef2=rep(0,5)
for(i in c(1:5))
{
y=10+2*x+rnorm(50,sd=0.4)
fit1=lm(y~x)
coeffs=coefficients(fit1)
coef1[i]=coeffs[1]
coef2[i]=coeffs[2]
lines(x,predict(fit1),col="blue")
}
mean(coef1)
mean(coef2)
#Coefficients convergence on an average?
?coefficients
#sd=1
par(mfrow=c(1,1))
x=rnorm(50,mean=2,sd=4)
y=10+2*x
plot(x,y,type="l",col="red")
coef1=rep(0,5)
coef2=rep(0,5)
for(i in c(1:5))
{
y=10+2*x+rnorm(50,sd=1)
fit1=lm(y~x)
coeffs=coefficients(fit1)
coef1[i]=coeffs[1]
coef2[i]=coeffs[2]
lines(x,predict(fit1),col="blue")
}
mean(coef1)
mean(coef2)
#sd=5
par(mfrow=c(1,1))
x=rnorm(50,mean=2,sd=4)
y=10+2*x
plot(x,y,type="l",col="red")
coef1=rep(0,5)
coef2=rep(0,5)
for(i in c(1:50))
{
y=10+2*x+rnorm(50,sd=5)
fit1=lm(y~x)
coeffs=coefficients(fit1)
coef1[i]=coeffs[1]
coef2[i]=coeffs[2]
lines(x,predict(fit1),col="blue")
}
mean(coef1)
mean(coef2)
#as noise variance increases the lines deviate more from the original line,
#also convergence rate to true coefficients is poorer, convergence rate gets better when we plot more regression line
##14)Centered x and check beta_0 should be same as mean
x=c(1,2,3,4,5)
x=x-mean(x)
x
mean(x)
y=3*x+5+rnorm(5)
fit1=lm(y~x)
coefficients(fit1)[1]
mean(y)
#2)RSS variation on adding features to the model
#Rss also tends to decrease on average on adding covariates
rssa=rep(0,47)
for(i in c(2:48))
{
X=rnorm(50*i)
X=matrix(X,nrow=50,ncol=i)
y=6+2*X[,1]+5*X[,2]+rnorm(50)
fit=lm(y~X)
pred_y=predict(fit)
rss=sum((y-pred_y)^2)
rssa[i-1]=rss
}
plot(c(2:48),rssa)
rssa
#13)More spread out the x's smaller is the standard error of the coefficients
#Spread out
x=c(10,30,40,50,100,200,300,400,500,600)
y=9+6*x+rnorm(10)
fit=lm(y~x)
sum=summary(fit)
sum$coefficients
#Not spread out
x=c(10,15,20,6,21,22,23,24,25,18)
y=9+6*x+rnorm(10)
fit=lm(y~x)
sum=summary(fit)
sum$coefficients
#even more concentrated
x=c(1,1.5,1.25,1.34,1.65,1.73,1.95,1.84,1.57,1.23)
y=9+6*x+rnorm(10)
fit=lm(y~x)
sum=summary(fit)
sum$coefficients
#verifies intuition standard error of coefficient increases when x are not spread out
#27)Non constant variance of error terms/ Heteroscedacity and using log transforms to mitigate it
#25)Simulate own non linear data and check residual plots patterns
x=rnorm(100,mean=5,sd=5)
y=10+6*x+13*x^2+15*x^3+rnorm(100)
fit=lm(y~x)
plot(fit,which=1)#Tukey anscombe shows a clear pattern
#Reduce degree of polynomial
x=rnorm(100,mean=5,sd=5)
y=10+6*x+13*x^2+rnorm(100)
fit=lm(y~x)
plot(fit,which=1)#Tukey anscombe shows a clear pattern, narrower than earlier but still a bowl
#Now linear
x=rnorm(100,mean=5,sd=5)
y=10+6*x+rnorm(100)
fit=lm(y~x)
plot(fit,which=1)#approximately random
#Heteroskedastic errors
x=rnorm(100,mean=5)
dev=rep(0,100)
for (i in c(1:100))
{ dev[i]=2*abs(x[i]) }
y=4+2*x+rnorm(100,sd=dev)
fit=lm(y~x)
plot(fit,which=1)#approximately random
#Correlated covariates
#multiple simulations
for (i in c(1:10)){
X1=rnorm(100)
X2=6+2*X1+rnorm(100)
#X2=rnorm(100)
#X3=rnorm(100)
full_X=cbind(X1,X2)
y=7+2*X1+rnorm(100)
#full_X
fit_cor=lm(y~full_X)
#summary(fit_all)$coefficients
print(cor(full_X))
#Not correlated covariates
#X1=rnorm(100)
#X2=6+2*X1+rnorm(100)
X2=rnorm(100)
#X3=rnorm(100)
full_X=cbind(X1,X2)
#y=7+2*X1+rnorm(100)
#full_X
fit_nocor=lm(y~full_X)
#summary(fit_all)$coefficients
print(cor(full_X))
print(summary(fit_cor)$coefficients)
print(summary(fit_nocor)$coefficients)}
#Almost always standard error is the correlated case is larger
#12)Justify by simulation the more observations we have the smaller the standard error
for(i in c(1:10))
{
#increase n
x=rnorm(5000)
mat=matrix(x,nrow=500,ncol=10)
y=2*mat[,1]+9+rnorm(500)
fit1=lm(y[1:100]~mat[1:100,])
print(summary(fit1)$coefficients)
print(sqrt(sum((summary(fit1)$residuals)^2)/89))
fit2=lm(y~mat)
print(summary(fit2)$coefficients)
print(sqrt(sum((summary(fit2)$residuals)^2)/489))
}
#summary(fit1)
#sqrt(sum(fit1$residuals^2)/500)
#Standard error of the coefficients almost always lower when n increases
print(length(y[101:500]))
###ASK does rse always decrease when we increase the sample size
#Calculating F statistic by hand
x=rnorm(1000)
X=matrix(x,nrow=100,ncol=10)
y=4+6*X[,4]+rnorm(100)
fit1=lm(y~X)
summary(fit1)$fstatistic
#Calculation by hand
y_pred=predict(fit1)
f_num=sum((y_pred-mean(y))^2)/10
f_deno=sum((y_pred-y)^2)/89
F_stat=f_num/f_deno#matched the value returned by lm
F_stat
#Calculate predictions by hand
X_new=cbind(rep(1,100),X)
y_pred2=X_new%*%solve(t(X_new)%*%(X_new))%*%t(X_new)%*%y
#y_pred=predict(fit1)
f_num=sum((y_pred2-mean(y))^2)/10
f_deno=sum((y_pred2-y)^2)/89
F_stat=f_num/f_deno#matched the value returned by lm
F_stat
#Calculation of predictions by hand do not forget to add the intercept column
#22)Verify intuition prediction intervals are always wider than confidence intervals
X=matrix(rnorm(500),nrow=50,ncol=10)
y=8+2*X[,1]+6*X[,3]+9*X[,4]+rnorm(50)
fit_1<-lm(y~X)
#Confidence intervals
s=summary(fit_1)
rse=sqrt(sum(s$residuals^2)/(50-11))
rse
#Confidence intervals of y
predict.lm(fit_1,interval ="prediction",level=0.95)
y_pred_upr=predict(fit_1)-qt(0.975,39)*rse
y_pred_upr
|
# tests designed to produce warnings
context("warnings")
test_that("Extra args for `str`", {
a <- "hello"
b <- "goodbye"
expect_warning(diffStr(a, b, extra=list(comp.str="^")), "Specifying")
expect_warning(diffStr(a, b, extra=list(comp="^")), "Specifying")
expect_warning(diffStr(a, b, extra=list(indent.str="...")), "Specifying")
expect_warning(diffStr(a, b, extra=list(indent="...")), "Specifying")
})
test_that("Max diffs", {
# Max limit warnings work properly; these are not fully fleshed out
A3 <- c("a b c", "d e f A B C D", "g h i", "f")
B3 <- c("a b c", "xd e f E Q L S", "g h i", "q")
expect_warning(diffChr(A3, B3, max.diffs=2), "Exceeded diff")
})
|
/tests/testthat/testthat.warnings.R
|
no_license
|
bestwpw/diffobj
|
R
| false | false | 684 |
r
|
# tests designed to produce warnings
context("warnings")
test_that("Extra args for `str`", {
a <- "hello"
b <- "goodbye"
expect_warning(diffStr(a, b, extra=list(comp.str="^")), "Specifying")
expect_warning(diffStr(a, b, extra=list(comp="^")), "Specifying")
expect_warning(diffStr(a, b, extra=list(indent.str="...")), "Specifying")
expect_warning(diffStr(a, b, extra=list(indent="...")), "Specifying")
})
test_that("Max diffs", {
# Max limit warnings work properly; these are not fully fleshed out
A3 <- c("a b c", "d e f A B C D", "g h i", "f")
B3 <- c("a b c", "xd e f E Q L S", "g h i", "q")
expect_warning(diffChr(A3, B3, max.diffs=2), "Exceeded diff")
})
|
navbarMenu("More",
tabPanel("Manual",
HTML('<iframe src=\"https://jackolney.github.io/modelr/manual\"style=\"border: 0; position:absolute; top:50px; left:0; right:0; width:100%; height:100%\"></iframe>')
),
tabPanel("Diagnostics",
verbatimTextOutput(outputId = "diagnostics")
)
)
|
/inst/app/ui/more.R
|
permissive
|
strategist922/modelr
|
R
| false | false | 312 |
r
|
navbarMenu("More",
tabPanel("Manual",
HTML('<iframe src=\"https://jackolney.github.io/modelr/manual\"style=\"border: 0; position:absolute; top:50px; left:0; right:0; width:100%; height:100%\"></iframe>')
),
tabPanel("Diagnostics",
verbatimTextOutput(outputId = "diagnostics")
)
)
|
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=FALSE)
sink('./Model/EN/AvgRank/soft_tissue/soft_tissue_082.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/AvgRank/soft_tissue/soft_tissue_082.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 367 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=FALSE)
sink('./Model/EN/AvgRank/soft_tissue/soft_tissue_082.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## some useful functions for geometric manipulations, for example finding
## neighbours on a ring, or manipulating sweq multiple fields.
## None of these should be used directly except for debugging and exploration purpose.
## the following are passed as argument to other functions: ring/sweq_neigbours/partition/GC
# ring geometry tools ---------------------------------------------------
#' Ring modulo
#' @description
#' Special modulo for indices on ring ( a mod b)
#' with special treatment for a==0
#' @param a,b a mod b
#' @examples
#' ring_mod(-2, 10)
ring_mod <- function(a,b){
c <- a %% b
ifelse(c==0, b, c)
}
#' Distance on ring
#' @description
#' Compute minimal distances (discrete) on a ring
#'
#' @param i from
#' @param j to
#' @param N size of the ring
#' @return distance on a ring between i and j
#' @examples
#' ring_dist(1, 9, 10)
ring_dist <- function(i, j, N){
abs(N/2 - abs(abs(i-j) - N/2))
}
#' Pairwise distances on a rings
#'
#' @param N the size of the ring
#' @return the matrix of pairwise distances on a ring of size N.
#' @examples
#' A <- ring_alldist(9)
ring_alldist <- function(N){
ind <- 1:N
A <- matrix(NA, nrow=N, ncol=N)
for (i in ind){
A[i,] <- ring_dist(i, ind, N)
}
return(A)
}
#' Neighborhood on a ring
#'
#' @description
#' get the l neighbours on each side of
#' i on a ring of size ndim
#'
#' @param i center
#' @param l number of neighbours in each direction
#' @param ndim size of the ring
#' @return list(x.ind, center, local.center)
#' local.center are the indices of the centers in the local space (x.local) to apply the update
ring_neighbours <- function(i, l, ndim){
x.ind <- ring_mod((i-l):(i+l), ndim)
center <- i
local.center <- ring_mod(l + 1, ndim)
return(list(x.ind=unique(x.ind), center=center, local.center=local.center))
}
#' Ring partition for the block-LEnKPF
#'
#' @description
#' Partition the observations in blocks for assimilation with block-LEnKPF.
#' Takes into account the ring geometry.
#' We assume that R is diagonal and don't take it into account
#'
#' @param H the observation operator
#' @param taper the tapering matrix (qxq)
#' @param block.size chunk size in domain space to take into account for one block
#' @examples
#' ndim <- 100
#' taper <- ring_GC(ndim, 5)
#' H <- diag(ndim)
#' partition <- ring_partition(H, taper, 5)
#' partition <- ring_partition( matrix( c(rep(0,ndim-1),1),1,ndim), taper, 5)
#' H <- matrix(0, 1, ndim)
#' H[1,round(ndim/2)] <- 1
#' partition <- ring_partition(H, taper, 5)
ring_partition <- function(H, taper, block.size){
## split y in blocks of size ~ block.size
## and assign color such that they can
## be updated in parallel
## from ring geometry
ndim <- ncol(H)
## cut the domain in blocks of approximate size:
domain <- 1:ndim
n.blocks <- ceiling(ndim/block.size)
ind.blocks <- split(domain, cut(domain, n.blocks, labels = FALSE)) ## in domain space
## for each block, find the corresponding y's
yind.blocks <- vector('list', n.blocks)
for (i in 1:n.blocks){
x.ind <- ind.blocks[[i]]
yind.blocks[[i]] <- which(apply(H[, x.ind, drop=FALSE], 1, function(x) {!all(x==0)}))
}
## find x.ind, v.ind and w.ind:
all.ind <- vector('list', n.blocks)
for (i in 1:n.blocks){
y.ind <- yind.blocks[[i]]
x.ind <- which(apply(H[y.ind,, drop=F], 2, function(x){ any(x!=0)}))
v.ind <- setdiff( which(apply( taper[x.ind,,drop=FALSE], 2, function(x){ any(x!=0)})), x.ind )
all.ind[[i]]$x.ind <- x.ind
all.ind[[i]]$v.ind <- v.ind
all.ind[[i]]$y.ind <- y.ind
}
## assign the colors:
col.vec <- rep(1, n.blocks) ## original guess : all 1
## list of indices associated with each color:
col.pool <- list( c(all.ind[[1]]$x.ind,all.ind[[1]]$v.ind) )
ind_path <- 2:n.blocks
for (i in ind_path){
new.ind <- c(all.ind[[i]]$x.ind,all.ind[[i]]$v.ind,all.ind[[i]]$w.ind)
## is there a color with which there is no intersection of indices?
which.block.indep <- sapply( col.pool, function(x) { ! any( new.ind %in% x )} )
if (all(!which.block.indep)){ # if no such color:
col.vec[i] <- max(col.vec)+1 # assign new color
col.pool[[col.vec[i]]] <- new.ind # initate pool of indices for this color
} else {
col.vec[i] <- which(which.block.indep)[1] #take the first color matching the criterion
col.pool[[col.vec[i]]] <- c( col.pool[[col.vec[i]]], new.ind) # add indices to its color pool
}
}
return( list( ind=all.ind, colors=col.vec) )
}
# sweq geometry tools -----------------------------------------------------
#' SWEQ neighbours
#'
#' @description
#' Find the neighbours around i, l in each direction
#' It takes into account the ring structure of the space and the fact that the states has three components (h,u and r)
#'
#' @param i the reference position
#' @param l the number of neighbours in each direction
#' @return a list with x.ind the indices of the neighbours and center the position i for h, u and r (in the vector state)
#' local.center are the indices of the centers in the local space (x.local) to apply the update
#' @examples
#' sweq_neighbours(5, 2, 10)
sweq_neighbours <- function(i, l, ndim){
x.ind <- ring_mod((i-l):(i+l), ndim)
all.ind <- c(x.ind, x.ind + ndim, x.ind + 2*ndim)
center <- c(i, i + ndim, i + 2*ndim)
w.size <- 2*l + 1
local.center <- c(l+1, l+1 + w.size, l+1 + 2*w.size)
return(list(x.ind=all.ind, center=center, local.center=local.center))
}
#' SWEQ stack state
#'
#' @description
#' Stack a state vector from the h,u and r components.
#' Basically the oposite of sweq_split.
#'
#' @param h,u, r
#' @return state vector or matrix
#' @examples
#' state0 <- .sweq_init(10, umean=0, unoise=FALSE, topo=1)
#' state <- sweq_stack(state0$h, state0$u, state0$r)
sweq_stack <- function(h, u, r){
if(is.null(dim(h))) { #vectors:
state=c(h, u, r)
} else { #matrix of ensembles:
state=rbind(h, u, r)
}
return(state)
}
#' SWEQ split state
#'
#' @description
#' Split a state vector into the h,u and r components.
#' Basically the opposite of sweq_stack
#'
#' @param state Vector (3ndim) or matrix (3ndim x N) if ensemble
#' @param ndim number of dimensions in physical space
#' @param names.only T/F create factors (useful in sweq_plot)
#' @return a list with components h, u and r, each of size ndim
#' @examples
#' state0 <- .sweq_init(10, umean=0, unoise=FALSE, topo=1)
#' hur <- sweq_split(state0$state, 10)
#' h <- hur$h
#' u <- hur$u
#' r <- hur$r
sweq_split <- function(state, ndim, names.only=FALSE){
if (names.only){
return( rep(c('h', 'u', 'r'), each=ndim) )
}
if (is.null(dim(state))){#vectors:
h <- state[1:ndim]
u <- state[(ndim+1):(2*ndim)]
r <- state[(2*ndim+1):(3*ndim)]
} else{# matrix of ensembles:
h <- state[1:ndim,]
u <- state[(ndim+1):(2*ndim),]
r <- state[(2*ndim+1):(3*ndim),]
}
return(list(h=h, u=u, r=r))
}
#' Project back onto the manifold where rain>0:
#'
#' @param xa matrix of analysis ensemble to project
#' @param xb matrix of background ensemble (currently not used)
#' @param model.run object (currently not used)
#' @param ndim domain dimension
#' @return xa with rain set to zero
sweq_proj <- function(xa, xb, model.run, ndim=model.run$ndim){
## project negative rain to zero:
rind <- sweq_split(xa, ndim, names.only = TRUE) == 'r'
xa[(xa < 0 & rind)] <- 0
xa
}
#' SWEQ partition for the block-LEnKPF
#'
#' @description
#' Partition the observations in blocks for assimilation with block-LEnKPF.
#' Takes into account the sweq geometry
#' We assume that R is diagonal and don't take it into account
#'
#' @param H the observation operator
#' @param taper the tapering matrix (qxq)
#' @param block.size chunk size in domain space to take into account for one block
#' @examples
#' ndim <- 168
#' ## observe all rain + some wind:
#' y.ind <- c( (ndim+1):(ndim+10), (2*ndim+1):(3*ndim) )
#' H <- diag(ndim*3)[y.ind, ]
#' taper <- sweq_GC(ndim, 5)
#' partition <- sweq_partition(H, taper, 21)
sweq_partition <- function(H, taper, block.size){
## split y in blocks of size ~ block.size
## and assign color such that they can
## be updated in parallel
## from SWEQ
ndim <- ncol(H)/3
## cut the domain in blocks of approximate size:
domain <- 1:ndim
n.blocks <- ceiling(ndim/block.size)
ind.blocks <- split(domain, cut(domain, n.blocks, labels = FALSE)) ## in domain space
## for each block, find the corresponding y's
yind.blocks <- vector('list', n.blocks)
for (i in 1:n.blocks){
## get the indices for h,r and w:
x.ind <- c( ind.blocks[[i]], ndim+ind.blocks[[i]], 2*ndim+ind.blocks[[i]])
yind.blocks[[i]] <- which(apply(H[, x.ind], 1, function(x) {!all(x==0)}))
}
## find x.ind, v.ind and w.ind:
all.ind <- vector('list', n.blocks)
for (i in 1:n.blocks){
y.ind <- yind.blocks[[i]]
x.ind <- which(apply(H[y.ind,, drop=F], 2, function(x){ any(x!=0)}))
v.ind <- setdiff( which(apply( taper[x.ind,,drop=FALSE], 2, function(x){ any(x!=0)})), x.ind )
all.ind[[i]]$x.ind <- x.ind
all.ind[[i]]$v.ind <- v.ind
all.ind[[i]]$y.ind <- y.ind
}
## assign the colors:
col.vec <- rep(1, n.blocks) ## original guess : all 1
## list of indices associated with each color:
col.pool <- list( c(all.ind[[1]]$x.ind,all.ind[[1]]$v.ind) )
for (i in 2:n.blocks){
new.ind <- c(all.ind[[i]]$x.ind,all.ind[[i]]$v.ind)
## is there a color with which there is no intersection of indices?
which.block.indep <- sapply( col.pool, function(x) { ! any( new.ind %in% x )} )
if (all(!which.block.indep)){ # if no such color:
col.vec[i] <- max(col.vec)+1 # assign new color
col.pool[[col.vec[i]]] <- new.ind # initate pool of indices for this color
} else {
col.vec[i] <- which(which.block.indep)[1] #take the first color matching the criterion
col.pool[[col.vec[i]]] <- c( col.pool[[col.vec[i]]], new.ind) # add indices to its color pool
}
}
return( list( ind=all.ind, colors=col.vec) )
}
#' Additive model error
#' @description
#' Experimental function for additive model error.
#' The values of pm_* should be chosen
#' as std of noise to add for each variables at a frequency of 360.
#'
#' @param rho inflation factor to change the std of additive noise
#' @param xa analysis ensemble
#' @param xb background ensemble
#' @param model.run as returned from cycling experiment
#' @return xa inflated
additive_error <- function(rho, xa, xb, model.run){
## values chosen for freq=360:
# pm_r <- 0.00075; pm_h <- 0.0075; pm_u <- 0.00075
# pm_r <- 0.0025; pm_h <- 0.025; pm_u <- 0.0025
pm_r <- 0.0025; pm_h <- 0.05; pm_u <- 0.0025
pm_r <- rho*pm_r; pm_h <- pm_h*rho; pm_u <- pm_u *rho
## adapt to other frequencies:
freqfac <- model.run$freq/360
pm_r <- freqfac * pm_r; pm_h <- freqfac * pm_h; pm_u <- freqfac * pm_u
## iid model error:
k <- ncol(xa)
q <- model.run$ndim
eps_r <- pm_r * matrix( rnorm(q*k), q, k)
eps_h <- pm_h * matrix( rnorm(q*k), q, k)
eps_u <- pm_u * matrix( rnorm(q*k), q, k)
## add to state:
state <- sweq_split(xa, model.run$ndim)
newr <- state$r + eps_r
newh <- state$h + eps_h
newu <- state$u + eps_u
xa <- sweq_stack(newh, newu,newr)
## based on cov of x: (introduces weird artefacts)
# Pb <- cov(t(xa))*taper
# Pb2 <- msqrt(Pb)
# p <- nrow(xa)
# xa <- ens0 + Pb2 %*% matrix(rnorm(p*k),p,k)
return(xa)
}
# Gaspari Cohn covariance tools -------------------------------------------
#' GC covariance function
#'
#' @description
#' correlation function from Gaspari & Coh 1999, eq. 4.10
#' used in GC_taper
#'
#' @param z is a distance
#' @param c the support half-length
#' @family sweq.manip
#' @examples
#' GC_function(2, 5)
#' sapply(1:10, GC_function, c=5)
GC_function <- function(z, c){
## correlation function
##
##
## c is the support half-length
zc <- z/c
if (z >= 0 & z <= c){
-1/4 * zc^5 + 1/2 * zc^4 + 5/8 * zc^3 - 5/3 * zc^2 + 1
} else if (z > c & z <= 2*c) {
1/12 * zc^5 - 1/2 * zc^4 + 5/8 * zc^3 + 5/3 * zc^2 - 5 * zc + 4 - 2/3 * c/z
} else {
0
}
}
#' GC taper on ring
#'
#' @description
#' Compute the GC taper on a ring, with support half-length c
#'
#' @param ndim number of dimensions in physical space
#' @param c the support half-length
#' @return taper matrix
#' @examples
#' taper <- ring_GC(100, 10)
#' image_mat(taper)
ring_GC <- function(ndim, c){
if (c*4 > ndim) warning("no zeroes in taper")
dd <- ring_alldist(ndim)
apply(dd, c(1,2), GC_function, c=c)
}
#' GC taper on SWEQ
#'
#' @description
#' Compute the GC taper on the sweq geometry for h,u and r, with support half-length c
#' Cross correlations between fields can be defined (and should be non-zero)
#'
#' @return taper matrix of size 3ndim x 3ndim
#' @examples
#' taper <- sweq_GC(100, 10)
#' image_mat(taper)
sweq_GC <- function(ndim, c, cross.vec=rep(0.9,3)){
if(length(cross.vec)==1) cross.vec <- rep(cross.vec, 3)
uni.taper <- ring_GC(ndim, c)
Tmat <- diag(3)
Tmat[1,2] <- Tmat[2,1] <- cross.vec[1]
Tmat[1,3] <- Tmat[3,1] <- cross.vec[3]
Tmat[2,3] <- Tmat[3,2] <- cross.vec[2]
global.taper <- kronecker(Tmat, uni.taper)
return(global.taper)
}
|
/R/utils_geom.R
|
no_license
|
robertsy/assimilr
|
R
| false | false | 13,231 |
r
|
## some useful functions for geometric manipulations, for example finding
## neighbours on a ring, or manipulating sweq multiple fields.
## None of these should be used directly except for debugging and exploration purpose.
## the following are passed as argument to other functions: ring/sweq_neigbours/partition/GC
# ring geometry tools ---------------------------------------------------
#' Ring modulo
#' @description
#' Special modulo for indices on ring ( a mod b)
#' with special treatment for a==0
#' @param a,b a mod b
#' @examples
#' ring_mod(-2, 10)
ring_mod <- function(a,b){
c <- a %% b
ifelse(c==0, b, c)
}
#' Distance on ring
#' @description
#' Compute minimal distances (discrete) on a ring
#'
#' @param i from
#' @param j to
#' @param N size of the ring
#' @return distance on a ring between i and j
#' @examples
#' ring_dist(1, 9, 10)
ring_dist <- function(i, j, N){
abs(N/2 - abs(abs(i-j) - N/2))
}
#' Pairwise distances on a rings
#'
#' @param N the size of the ring
#' @return the matrix of pairwise distances on a ring of size N.
#' @examples
#' A <- ring_alldist(9)
ring_alldist <- function(N){
ind <- 1:N
A <- matrix(NA, nrow=N, ncol=N)
for (i in ind){
A[i,] <- ring_dist(i, ind, N)
}
return(A)
}
#' Neighborhood on a ring
#'
#' @description
#' get the l neighbours on each side of
#' i on a ring of size ndim
#'
#' @param i center
#' @param l number of neighbours in each direction
#' @param ndim size of the ring
#' @return list(x.ind, center, local.center)
#' local.center are the indices of the centers in the local space (x.local) to apply the update
ring_neighbours <- function(i, l, ndim){
x.ind <- ring_mod((i-l):(i+l), ndim)
center <- i
local.center <- ring_mod(l + 1, ndim)
return(list(x.ind=unique(x.ind), center=center, local.center=local.center))
}
#' Ring partition for the block-LEnKPF
#'
#' @description
#' Partition the observations in blocks for assimilation with block-LEnKPF.
#' Takes into account the ring geometry.
#' We assume that R is diagonal and don't take it into account
#'
#' @param H the observation operator
#' @param taper the tapering matrix (qxq)
#' @param block.size chunk size in domain space to take into account for one block
#' @examples
#' ndim <- 100
#' taper <- ring_GC(ndim, 5)
#' H <- diag(ndim)
#' partition <- ring_partition(H, taper, 5)
#' partition <- ring_partition( matrix( c(rep(0,ndim-1),1),1,ndim), taper, 5)
#' H <- matrix(0, 1, ndim)
#' H[1,round(ndim/2)] <- 1
#' partition <- ring_partition(H, taper, 5)
ring_partition <- function(H, taper, block.size){
## split y in blocks of size ~ block.size
## and assign color such that they can
## be updated in parallel
## from ring geometry
ndim <- ncol(H)
## cut the domain in blocks of approximate size:
domain <- 1:ndim
n.blocks <- ceiling(ndim/block.size)
ind.blocks <- split(domain, cut(domain, n.blocks, labels = FALSE)) ## in domain space
## for each block, find the corresponding y's
yind.blocks <- vector('list', n.blocks)
for (i in 1:n.blocks){
x.ind <- ind.blocks[[i]]
yind.blocks[[i]] <- which(apply(H[, x.ind, drop=FALSE], 1, function(x) {!all(x==0)}))
}
## find x.ind, v.ind and w.ind:
all.ind <- vector('list', n.blocks)
for (i in 1:n.blocks){
y.ind <- yind.blocks[[i]]
x.ind <- which(apply(H[y.ind,, drop=F], 2, function(x){ any(x!=0)}))
v.ind <- setdiff( which(apply( taper[x.ind,,drop=FALSE], 2, function(x){ any(x!=0)})), x.ind )
all.ind[[i]]$x.ind <- x.ind
all.ind[[i]]$v.ind <- v.ind
all.ind[[i]]$y.ind <- y.ind
}
## assign the colors:
col.vec <- rep(1, n.blocks) ## original guess : all 1
## list of indices associated with each color:
col.pool <- list( c(all.ind[[1]]$x.ind,all.ind[[1]]$v.ind) )
ind_path <- 2:n.blocks
for (i in ind_path){
new.ind <- c(all.ind[[i]]$x.ind,all.ind[[i]]$v.ind,all.ind[[i]]$w.ind)
## is there a color with which there is no intersection of indices?
which.block.indep <- sapply( col.pool, function(x) { ! any( new.ind %in% x )} )
if (all(!which.block.indep)){ # if no such color:
col.vec[i] <- max(col.vec)+1 # assign new color
col.pool[[col.vec[i]]] <- new.ind # initate pool of indices for this color
} else {
col.vec[i] <- which(which.block.indep)[1] #take the first color matching the criterion
col.pool[[col.vec[i]]] <- c( col.pool[[col.vec[i]]], new.ind) # add indices to its color pool
}
}
return( list( ind=all.ind, colors=col.vec) )
}
# sweq geometry tools -----------------------------------------------------
#' SWEQ neighbours
#'
#' @description
#' Find the neighbours around i, l in each direction
#' It takes into account the ring structure of the space and the fact that the states has three components (h,u and r)
#'
#' @param i the reference position
#' @param l the number of neighbours in each direction
#' @return a list with x.ind the indices of the neighbours and center the position i for h, u and r (in the vector state)
#' local.center are the indices of the centers in the local space (x.local) to apply the update
#' @examples
#' sweq_neighbours(5, 2, 10)
sweq_neighbours <- function(i, l, ndim){
x.ind <- ring_mod((i-l):(i+l), ndim)
all.ind <- c(x.ind, x.ind + ndim, x.ind + 2*ndim)
center <- c(i, i + ndim, i + 2*ndim)
w.size <- 2*l + 1
local.center <- c(l+1, l+1 + w.size, l+1 + 2*w.size)
return(list(x.ind=all.ind, center=center, local.center=local.center))
}
#' SWEQ stack state
#'
#' @description
#' Stack a state vector from the h,u and r components.
#' Basically the oposite of sweq_split.
#'
#' @param h,u, r
#' @return state vector or matrix
#' @examples
#' state0 <- .sweq_init(10, umean=0, unoise=FALSE, topo=1)
#' state <- sweq_stack(state0$h, state0$u, state0$r)
sweq_stack <- function(h, u, r){
if(is.null(dim(h))) { #vectors:
state=c(h, u, r)
} else { #matrix of ensembles:
state=rbind(h, u, r)
}
return(state)
}
#' SWEQ split state
#'
#' @description
#' Split a state vector into the h,u and r components.
#' Basically the opposite of sweq_stack
#'
#' @param state Vector (3ndim) or matrix (3ndim x N) if ensemble
#' @param ndim number of dimensions in physical space
#' @param names.only T/F create factors (useful in sweq_plot)
#' @return a list with components h, u and r, each of size ndim
#' @examples
#' state0 <- .sweq_init(10, umean=0, unoise=FALSE, topo=1)
#' hur <- sweq_split(state0$state, 10)
#' h <- hur$h
#' u <- hur$u
#' r <- hur$r
sweq_split <- function(state, ndim, names.only=FALSE){
if (names.only){
return( rep(c('h', 'u', 'r'), each=ndim) )
}
if (is.null(dim(state))){#vectors:
h <- state[1:ndim]
u <- state[(ndim+1):(2*ndim)]
r <- state[(2*ndim+1):(3*ndim)]
} else{# matrix of ensembles:
h <- state[1:ndim,]
u <- state[(ndim+1):(2*ndim),]
r <- state[(2*ndim+1):(3*ndim),]
}
return(list(h=h, u=u, r=r))
}
#' Project back onto the manifold where rain>0:
#'
#' @param xa matrix of analysis ensemble to project
#' @param xb matrix of background ensemble (currently not used)
#' @param model.run object (currently not used)
#' @param ndim domain dimension
#' @return xa with rain set to zero
sweq_proj <- function(xa, xb, model.run, ndim=model.run$ndim){
## project negative rain to zero:
rind <- sweq_split(xa, ndim, names.only = TRUE) == 'r'
xa[(xa < 0 & rind)] <- 0
xa
}
#' SWEQ partition for the block-LEnKPF
#'
#' @description
#' Partition the observations in blocks for assimilation with block-LEnKPF.
#' Takes into account the sweq geometry
#' We assume that R is diagonal and don't take it into account
#'
#' @param H the observation operator
#' @param taper the tapering matrix (qxq)
#' @param block.size chunk size in domain space to take into account for one block
#' @examples
#' ndim <- 168
#' ## observe all rain + some wind:
#' y.ind <- c( (ndim+1):(ndim+10), (2*ndim+1):(3*ndim) )
#' H <- diag(ndim*3)[y.ind, ]
#' taper <- sweq_GC(ndim, 5)
#' partition <- sweq_partition(H, taper, 21)
sweq_partition <- function(H, taper, block.size){
## split y in blocks of size ~ block.size
## and assign color such that they can
## be updated in parallel
## from SWEQ
ndim <- ncol(H)/3
## cut the domain in blocks of approximate size:
domain <- 1:ndim
n.blocks <- ceiling(ndim/block.size)
ind.blocks <- split(domain, cut(domain, n.blocks, labels = FALSE)) ## in domain space
## for each block, find the corresponding y's
yind.blocks <- vector('list', n.blocks)
for (i in 1:n.blocks){
## get the indices for h,r and w:
x.ind <- c( ind.blocks[[i]], ndim+ind.blocks[[i]], 2*ndim+ind.blocks[[i]])
yind.blocks[[i]] <- which(apply(H[, x.ind], 1, function(x) {!all(x==0)}))
}
## find x.ind, v.ind and w.ind:
all.ind <- vector('list', n.blocks)
for (i in 1:n.blocks){
y.ind <- yind.blocks[[i]]
x.ind <- which(apply(H[y.ind,, drop=F], 2, function(x){ any(x!=0)}))
v.ind <- setdiff( which(apply( taper[x.ind,,drop=FALSE], 2, function(x){ any(x!=0)})), x.ind )
all.ind[[i]]$x.ind <- x.ind
all.ind[[i]]$v.ind <- v.ind
all.ind[[i]]$y.ind <- y.ind
}
## assign the colors:
col.vec <- rep(1, n.blocks) ## original guess : all 1
## list of indices associated with each color:
col.pool <- list( c(all.ind[[1]]$x.ind,all.ind[[1]]$v.ind) )
for (i in 2:n.blocks){
new.ind <- c(all.ind[[i]]$x.ind,all.ind[[i]]$v.ind)
## is there a color with which there is no intersection of indices?
which.block.indep <- sapply( col.pool, function(x) { ! any( new.ind %in% x )} )
if (all(!which.block.indep)){ # if no such color:
col.vec[i] <- max(col.vec)+1 # assign new color
col.pool[[col.vec[i]]] <- new.ind # initate pool of indices for this color
} else {
col.vec[i] <- which(which.block.indep)[1] #take the first color matching the criterion
col.pool[[col.vec[i]]] <- c( col.pool[[col.vec[i]]], new.ind) # add indices to its color pool
}
}
return( list( ind=all.ind, colors=col.vec) )
}
#' Additive model error
#' @description
#' Experimental function for additive model error.
#' The values of pm_* should be chosen
#' as std of noise to add for each variables at a frequency of 360.
#'
#' @param rho inflation factor to change the std of additive noise
#' @param xa analysis ensemble
#' @param xb background ensemble
#' @param model.run as returned from cycling experiment
#' @return xa inflated
additive_error <- function(rho, xa, xb, model.run){
## values chosen for freq=360:
# pm_r <- 0.00075; pm_h <- 0.0075; pm_u <- 0.00075
# pm_r <- 0.0025; pm_h <- 0.025; pm_u <- 0.0025
pm_r <- 0.0025; pm_h <- 0.05; pm_u <- 0.0025
pm_r <- rho*pm_r; pm_h <- pm_h*rho; pm_u <- pm_u *rho
## adapt to other frequencies:
freqfac <- model.run$freq/360
pm_r <- freqfac * pm_r; pm_h <- freqfac * pm_h; pm_u <- freqfac * pm_u
## iid model error:
k <- ncol(xa)
q <- model.run$ndim
eps_r <- pm_r * matrix( rnorm(q*k), q, k)
eps_h <- pm_h * matrix( rnorm(q*k), q, k)
eps_u <- pm_u * matrix( rnorm(q*k), q, k)
## add to state:
state <- sweq_split(xa, model.run$ndim)
newr <- state$r + eps_r
newh <- state$h + eps_h
newu <- state$u + eps_u
xa <- sweq_stack(newh, newu,newr)
## based on cov of x: (introduces weird artefacts)
# Pb <- cov(t(xa))*taper
# Pb2 <- msqrt(Pb)
# p <- nrow(xa)
# xa <- ens0 + Pb2 %*% matrix(rnorm(p*k),p,k)
return(xa)
}
# Gaspari Cohn covariance tools -------------------------------------------
#' GC covariance function
#'
#' @description
#' correlation function from Gaspari & Coh 1999, eq. 4.10
#' used in GC_taper
#'
#' @param z is a distance
#' @param c the support half-length
#' @family sweq.manip
#' @examples
#' GC_function(2, 5)
#' sapply(1:10, GC_function, c=5)
GC_function <- function(z, c){
## correlation function
##
##
## c is the support half-length
zc <- z/c
if (z >= 0 & z <= c){
-1/4 * zc^5 + 1/2 * zc^4 + 5/8 * zc^3 - 5/3 * zc^2 + 1
} else if (z > c & z <= 2*c) {
1/12 * zc^5 - 1/2 * zc^4 + 5/8 * zc^3 + 5/3 * zc^2 - 5 * zc + 4 - 2/3 * c/z
} else {
0
}
}
#' GC taper on ring
#'
#' @description
#' Compute the GC taper on a ring, with support half-length c
#'
#' @param ndim number of dimensions in physical space
#' @param c the support half-length
#' @return taper matrix
#' @examples
#' taper <- ring_GC(100, 10)
#' image_mat(taper)
ring_GC <- function(ndim, c){
if (c*4 > ndim) warning("no zeroes in taper")
dd <- ring_alldist(ndim)
apply(dd, c(1,2), GC_function, c=c)
}
#' GC taper on SWEQ
#'
#' @description
#' Compute the GC taper on the sweq geometry for h,u and r, with support half-length c
#' Cross correlations between fields can be defined (and should be non-zero)
#'
#' @return taper matrix of size 3ndim x 3ndim
#' @examples
#' taper <- sweq_GC(100, 10)
#' image_mat(taper)
sweq_GC <- function(ndim, c, cross.vec=rep(0.9,3)){
if(length(cross.vec)==1) cross.vec <- rep(cross.vec, 3)
uni.taper <- ring_GC(ndim, c)
Tmat <- diag(3)
Tmat[1,2] <- Tmat[2,1] <- cross.vec[1]
Tmat[1,3] <- Tmat[3,1] <- cross.vec[3]
Tmat[2,3] <- Tmat[3,2] <- cross.vec[2]
global.taper <- kronecker(Tmat, uni.taper)
return(global.taper)
}
|
df <- data.frame(
x = c(1, 2), width = c(2, 2),
y = c(1, 2), height = c(2, 2),
colour = c("A", "B")
)
base <- ggplot(df, aes(x = x, y = y, width = width, height = height,
fill = colour))
test_that("geom_tile_theme can change tile appearance", {
base <- base + geom_tile_theme()
case1 <- base + theme(elementalist.geom_rect = element_rect_wiggle())
case2 <- base + theme(elementalist.geom_rect = element_rect_multicolour())
ctrl <- base + theme(elementalist.geom_rect = element_rect_seq())
cases <- list(case1, case2, ctrl)
cases <- lapply(cases, ggplotGrob)
cases <- lapply(cases, function(x) {
is_panel <- grep("panel", x$layout$name)
panel <- x$grobs[[is_panel]]
names <- names(panel$children)
i <- which(startsWith(names, "rectseq") | startsWith(names, "GRID.polygon"))
panel$children[[i]]
})
expect_length(cases[[1]]$children[[1]]$x, 408)
expect_length(cases[[1]]$children[[1]]$gp$col, 1)
expect_length(cases[[2]]$children[[1]]$x, 416)
expect_length(cases[[2]]$children[[2]]$gp$col, 408)
expect_length(cases[[3]]$x, 16)
expect_length(cases[[3]]$gp$col, 2)
})
test_that("geom_tile_theme child elements inherits from theme", {
test <- base + geom_tile_theme(
element = element_rect_seq(colour = "blue")
) +
theme(
elementalist.geom_rect = element_rect_seq(linetype = 2)
)
gt <- ggplotGrob(test)
gt <- gt$grobs[[grep("panel", gt$layout$name)]]
gt <- gt$children[vapply(gt$children, inherits, logical(1), "polygon")][[1]]
expect_equal(gt$gp$col, c("blue", "blue"))
expect_equal(gt$gp$lty, c(2))
})
test_that("geom_tile_theme rejects inappropriate elements", {
case <- substitute(geom_tile_theme(element = NULL))
expect_silent(eval(case))
case <- substitute(geom_tile_theme(element = element_rect()))
expect_silent(eval(case))
case <- substitute(geom_tile_theme(element = element_line()))
expect_error(eval(case), "should be of type")
})
|
/tests/testthat/test-geom_tile_theme.R
|
permissive
|
gejielin/elementalist
|
R
| false | false | 1,970 |
r
|
df <- data.frame(
x = c(1, 2), width = c(2, 2),
y = c(1, 2), height = c(2, 2),
colour = c("A", "B")
)
base <- ggplot(df, aes(x = x, y = y, width = width, height = height,
fill = colour))
test_that("geom_tile_theme can change tile appearance", {
base <- base + geom_tile_theme()
case1 <- base + theme(elementalist.geom_rect = element_rect_wiggle())
case2 <- base + theme(elementalist.geom_rect = element_rect_multicolour())
ctrl <- base + theme(elementalist.geom_rect = element_rect_seq())
cases <- list(case1, case2, ctrl)
cases <- lapply(cases, ggplotGrob)
cases <- lapply(cases, function(x) {
is_panel <- grep("panel", x$layout$name)
panel <- x$grobs[[is_panel]]
names <- names(panel$children)
i <- which(startsWith(names, "rectseq") | startsWith(names, "GRID.polygon"))
panel$children[[i]]
})
expect_length(cases[[1]]$children[[1]]$x, 408)
expect_length(cases[[1]]$children[[1]]$gp$col, 1)
expect_length(cases[[2]]$children[[1]]$x, 416)
expect_length(cases[[2]]$children[[2]]$gp$col, 408)
expect_length(cases[[3]]$x, 16)
expect_length(cases[[3]]$gp$col, 2)
})
test_that("geom_tile_theme child elements inherits from theme", {
test <- base + geom_tile_theme(
element = element_rect_seq(colour = "blue")
) +
theme(
elementalist.geom_rect = element_rect_seq(linetype = 2)
)
gt <- ggplotGrob(test)
gt <- gt$grobs[[grep("panel", gt$layout$name)]]
gt <- gt$children[vapply(gt$children, inherits, logical(1), "polygon")][[1]]
expect_equal(gt$gp$col, c("blue", "blue"))
expect_equal(gt$gp$lty, c(2))
})
test_that("geom_tile_theme rejects inappropriate elements", {
case <- substitute(geom_tile_theme(element = NULL))
expect_silent(eval(case))
case <- substitute(geom_tile_theme(element = element_rect()))
expect_silent(eval(case))
case <- substitute(geom_tile_theme(element = element_line()))
expect_error(eval(case), "should be of type")
})
|
### require or.prev
predict.mypaper <- function(x,traindata=NULL,testdata= NULL, method=c("glm","svm","lasso","randomforest", "knn"), kernal="linear", OOB=T,k=25,logit=F, ...){
corrected.c <- NULL
calibration.auc <- NULL
predicted_pro_2 <- NULL
model.data <-x$model$GLM
model.formula <- model.data$formula
if(method=="lasso" ){
dep_var_level2 <- levels(x$data[,x$dep_var])[[2]]
dep_var_0or1 <- ifelse(traindata[,x$dep_var] == dep_var_level2,1,0 )
ind_var_str <- paste(c(as.character(x$ind_var)), collapse = "+ ")
fmla <- as.formula( paste( x$dep_var,"~", ind_var_str))
}
if(is.null(traindata)){
if(method=="glm"){model <- x$model$GLM}
if(method=="svm"){model <- x$model$SVM}
if(method=="randomforest"){model <- x$model$RF}
if(method=="lasso"){model <- x$model$lasso.glm
mod.matrix <- x$model$model.matrix
}
if(method=="knn"){model <- x$model$KNN}
}else{
if(method=="glm"){ model <- glm(model.formula, data = traindata, family = binomial(), method = "brglmFit")}
if(method=="svm"){model <-e1071::svm(model.formula, data=traindata, probability=TRUE, scale=FALSE,
kernal="radial basis",gamma=0.125, epsilon=0.125, cost=0.25 )
} #hyperparameter after tunning: kernal="radial basis",gamma=0.125, epsilon=0.125, cost=0.25
if(method=="randomforest"){ model<- randomForest::randomForest(model.formula, data=traindata, ntree = 500)}
if(method=="lasso"){
mod.matrix <- model.matrix(fmla, traindata)
cv.glm.out <- cv.glmnet(x=mod.matrix, y= dep_var_0or1,
weights = NULL, offset = NULL, lambda = NULL,
type.measure = "auc",
nfolds = 10, foldid = NULL, alignment = c("lambda","fraction"), grouped = TRUE, keep = FALSE, parallel = FALSE,
gamma = c(0, 0.25, 0.5, 0.75, 1), relax = FALSE, trace.it = 0)
model <- glmnet(x=mod.matrix, y= dep_var_0or1, family = "binomial", alpha = 1, lambda = cv.glm.out$lambda.1se)
}
}
# below: prepare testdata--------------------
dep_var <- x$dep_var
ind_var <- x$ind_var
testdata_rf <- testdata
if(is.null(testdata)){testdata= x$data}
#testdata = testdata[,c(ind_var, dep_var)]
# above: prepare testdata-------------------------------------------
#dep_varname <- as.name(dep_var)
dep_var_level2 <- levels(x$data[,dep_var])[[2]]
dep_var_0or1 <- ifelse(testdata[,dep_var] == dep_var_level2,1,0 )
#data1<-mutate(data1, dep_var_0 = dep_var_0or1)
if(method=="glm"){
if(logit){ predicted_pro_2 <- predict(model,newdata= testdata, type = "link")
}else{predicted_pro_2 <- predict(model,newdata= testdata, type = "response")}
}
if(method=="svm"){
predicted_svm <- predict(model, newdata = testdata, probability=T)
predicted_svm <- attr(predicted_svm, "probabilities")[,2]
predicted_pro_2 <- predicted_svm
}
if(method=="randomforest"){
if(is.null(testdata_rf)){
if(OOB){
message(paste("out-of-bag auc"))
predicted_rf <- predict(model, type="prob")
}else{
message(paste("not out-of-bag but original auc"))
predicted_rf <- predict(model, newdata= x$data,
type="prob")
}
}else{
message(paste("not out-of-bag but original auc"))
predicted_rf <- predict(model, newdata= testdata_rf,
type="prob")
}
predicted_rf <- as.numeric(predicted_rf[,2])
predicted_pro_2 <- predicted_rf
}
if(method=="lasso"){
mod.matrix <- model.matrix(fmla, testdata)
predicted_lasso <- predict(model, newx= mod.matrix ,type="response",s=model$lambda)
predicted_lasso <- as.numeric(predicted_lasso)
predicted_pro_2 <- predicted_lasso
}
if(method=="knn"){model <- kknn::kknn(x$model$model.formula, train = x$data, test = testdata, k=k)
predicted_pro_2 <- model$prob[,2]
}
predicted <- data.frame( Predicted = predicted_pro_2, Observed = dep_var_0or1)
class(predicted) <- c("mypaper", "data.frame")
return(predicted)
}
#predglm <- predict.mypaper(model3.imp, traindata=model3.imp$data, method="glm")
#predsvm <- predict.mypaper(model3.imp, traindata=model3.imp$data, method="svm")
#predlasso <- predict.mypaper(model3.imp, traindata=model3.imp$data, method="lasso")
#data.test <- cbind(dataset_selected_imp,OBS=predglm$Observed, GLM=predglm$Predicted, SVM= predsvm$Predicted, lasso= predlasso$Predicted)
# tunning for the svm model
#d <- tune(svm, model1.imp$model$model.formula, data = model1.imp$data, ranges = list(gamma = 2^(-1:1), cost = 2^(-2:2), epsilon=2^(-2:2)),
# tunecontrol = tune.control(sampling = "bootstrap"), nboot=20)
#d <- tune(svm, model3.s$model$model.formula, data = model3.s$data, ranges = list(gamma = 2^(-2:2), cost = 2^(-2:2), epsilon=2^(-2:2)),
# tunecontrol = tune.control(sampling = "bootstrap"), nboot=20)
# tunning for the random forest model
#e <- tune.randomForest(model1.imp$model$model.formula, tunecontrol= tune.control(sampling = "bootstrap", nboot=20),
# data =model1.imp$data , nodesize = 1:5,
# mtry = 2:5, ntree = 500)
# tunning for the knn model
#knn.boot <- e1071::tune.knn( x= model1.imp$model$model.matrix, y=model1.imp$data[,model1.imp$dep_var], k=1:100, tunecontrol= tune.control(sampling = "boot"))
|
/predict_mypaper.R
|
no_license
|
jasonliao2jesus/bully_inv
|
R
| false | false | 5,643 |
r
|
### require or.prev
predict.mypaper <- function(x,traindata=NULL,testdata= NULL, method=c("glm","svm","lasso","randomforest", "knn"), kernal="linear", OOB=T,k=25,logit=F, ...){
corrected.c <- NULL
calibration.auc <- NULL
predicted_pro_2 <- NULL
model.data <-x$model$GLM
model.formula <- model.data$formula
if(method=="lasso" ){
dep_var_level2 <- levels(x$data[,x$dep_var])[[2]]
dep_var_0or1 <- ifelse(traindata[,x$dep_var] == dep_var_level2,1,0 )
ind_var_str <- paste(c(as.character(x$ind_var)), collapse = "+ ")
fmla <- as.formula( paste( x$dep_var,"~", ind_var_str))
}
if(is.null(traindata)){
if(method=="glm"){model <- x$model$GLM}
if(method=="svm"){model <- x$model$SVM}
if(method=="randomforest"){model <- x$model$RF}
if(method=="lasso"){model <- x$model$lasso.glm
mod.matrix <- x$model$model.matrix
}
if(method=="knn"){model <- x$model$KNN}
}else{
if(method=="glm"){ model <- glm(model.formula, data = traindata, family = binomial(), method = "brglmFit")}
if(method=="svm"){model <-e1071::svm(model.formula, data=traindata, probability=TRUE, scale=FALSE,
kernal="radial basis",gamma=0.125, epsilon=0.125, cost=0.25 )
} #hyperparameter after tunning: kernal="radial basis",gamma=0.125, epsilon=0.125, cost=0.25
if(method=="randomforest"){ model<- randomForest::randomForest(model.formula, data=traindata, ntree = 500)}
if(method=="lasso"){
mod.matrix <- model.matrix(fmla, traindata)
cv.glm.out <- cv.glmnet(x=mod.matrix, y= dep_var_0or1,
weights = NULL, offset = NULL, lambda = NULL,
type.measure = "auc",
nfolds = 10, foldid = NULL, alignment = c("lambda","fraction"), grouped = TRUE, keep = FALSE, parallel = FALSE,
gamma = c(0, 0.25, 0.5, 0.75, 1), relax = FALSE, trace.it = 0)
model <- glmnet(x=mod.matrix, y= dep_var_0or1, family = "binomial", alpha = 1, lambda = cv.glm.out$lambda.1se)
}
}
# below: prepare testdata--------------------
dep_var <- x$dep_var
ind_var <- x$ind_var
testdata_rf <- testdata
if(is.null(testdata)){testdata= x$data}
#testdata = testdata[,c(ind_var, dep_var)]
# above: prepare testdata-------------------------------------------
#dep_varname <- as.name(dep_var)
dep_var_level2 <- levels(x$data[,dep_var])[[2]]
dep_var_0or1 <- ifelse(testdata[,dep_var] == dep_var_level2,1,0 )
#data1<-mutate(data1, dep_var_0 = dep_var_0or1)
if(method=="glm"){
if(logit){ predicted_pro_2 <- predict(model,newdata= testdata, type = "link")
}else{predicted_pro_2 <- predict(model,newdata= testdata, type = "response")}
}
if(method=="svm"){
predicted_svm <- predict(model, newdata = testdata, probability=T)
predicted_svm <- attr(predicted_svm, "probabilities")[,2]
predicted_pro_2 <- predicted_svm
}
if(method=="randomforest"){
if(is.null(testdata_rf)){
if(OOB){
message(paste("out-of-bag auc"))
predicted_rf <- predict(model, type="prob")
}else{
message(paste("not out-of-bag but original auc"))
predicted_rf <- predict(model, newdata= x$data,
type="prob")
}
}else{
message(paste("not out-of-bag but original auc"))
predicted_rf <- predict(model, newdata= testdata_rf,
type="prob")
}
predicted_rf <- as.numeric(predicted_rf[,2])
predicted_pro_2 <- predicted_rf
}
if(method=="lasso"){
mod.matrix <- model.matrix(fmla, testdata)
predicted_lasso <- predict(model, newx= mod.matrix ,type="response",s=model$lambda)
predicted_lasso <- as.numeric(predicted_lasso)
predicted_pro_2 <- predicted_lasso
}
if(method=="knn"){model <- kknn::kknn(x$model$model.formula, train = x$data, test = testdata, k=k)
predicted_pro_2 <- model$prob[,2]
}
predicted <- data.frame( Predicted = predicted_pro_2, Observed = dep_var_0or1)
class(predicted) <- c("mypaper", "data.frame")
return(predicted)
}
#predglm <- predict.mypaper(model3.imp, traindata=model3.imp$data, method="glm")
#predsvm <- predict.mypaper(model3.imp, traindata=model3.imp$data, method="svm")
#predlasso <- predict.mypaper(model3.imp, traindata=model3.imp$data, method="lasso")
#data.test <- cbind(dataset_selected_imp,OBS=predglm$Observed, GLM=predglm$Predicted, SVM= predsvm$Predicted, lasso= predlasso$Predicted)
# tunning for the svm model
#d <- tune(svm, model1.imp$model$model.formula, data = model1.imp$data, ranges = list(gamma = 2^(-1:1), cost = 2^(-2:2), epsilon=2^(-2:2)),
# tunecontrol = tune.control(sampling = "bootstrap"), nboot=20)
#d <- tune(svm, model3.s$model$model.formula, data = model3.s$data, ranges = list(gamma = 2^(-2:2), cost = 2^(-2:2), epsilon=2^(-2:2)),
# tunecontrol = tune.control(sampling = "bootstrap"), nboot=20)
# tunning for the random forest model
#e <- tune.randomForest(model1.imp$model$model.formula, tunecontrol= tune.control(sampling = "bootstrap", nboot=20),
# data =model1.imp$data , nodesize = 1:5,
# mtry = 2:5, ntree = 500)
# tunning for the knn model
#knn.boot <- e1071::tune.knn( x= model1.imp$model$model.matrix, y=model1.imp$data[,model1.imp$dep_var], k=1:100, tunecontrol= tune.control(sampling = "boot"))
|
library(ggplot2)
library(MASS)
library(dplyr)
library(lubridate)
library(fairness)
library(fairmodels)
library(DALEX)
library(ranger)
library(gbm)
setwd('~/GitHub/inds4997-datasciencecapstone')
dataTrain <- read.csv('./data/compas-scores-updated-training.csv')
dataTest <- read.csv('./data/compas-scores-updated-testing.csv')
# Order Score Text for graph processing later
dataTrain$score_text <- factor(dataTrain$score_text,
order = TRUE,
levels = c("Low", "Medium", "High"))
# Order Score Text for graph processing later
dataTest$score_text <- factor(dataTest$score_text,
order = TRUE,
levels = c("Low", "Medium", "High"))
# Create Ordinal Logistic Model
#Removed days_b_screening_arrest & c_days_from_compas because they did not contribute to algorithm and had NA values
model_fit <- polr(score_text ~ race + age + sex + juv_fel_count + juv_misd_count + juv_other_count + priors_count + c_charge_degree + c_charge_violent + c_time_in_jail, data = dataTrain, Hess = TRUE)
#Creating vector that will hold model predictions for each record in dataset
n<- nrow(dataTest)
temp=c()
i=1
while(i<n+1){
temp[i]<- if (predict(model_fit,dataTest[i,] , type = "p")[1] > predict(model_fit,dataTest[i,] ,type = "p")[2] & predict(model_fit,dataTest[i,] ,type = "p")[1] > predict(model_fit,dataTest[i,] ,type = "p")[3]) {
"Low"
} else if (predict(model_fit,dataTest[i,] ,type = "p")[2] > predict(model_fit,dataTest[i,] ,type = "p")[1] & predict(model_fit,dataTest[i,] ,type = "p")[2] > predict(model_fit,dataTest[i,] ,type = "p")[3]) {
"Medium"
} else {
"High"
}
i=i+1
}
#Appending predictions to df
dataTest$model<-temp
#####################################################
# Creating gbm model and applying reweighing
dataTrain <- read.csv('./data/compas-scores-updated-training.csv')
dataTrain <- dataTrain %>%
filter(!score_text == "Medium")
#Low prob of recidivism is the positive outcome so given 1, high risk given 0
dataTrain$score <- ifelse(dataTrain$score_text=="Low",1,0)
#Race is our protected class
protected <- as.factor(dataTrain$race)
dataTrain$c_charge_degree <- as.factor(dataTrain$c_charge_degree)
dataTrain$c_charge_violent <- as.factor(dataTrain$c_charge_violent)
# making model
set.seed(1)
gbm_model <-gbm(score ~ age + juv_fel_count + juv_misd_count + juv_other_count + priors_count + c_charge_degree + c_charge_violent + c_time_in_jail , data = dataTrain, distribution = "bernoulli")
# making explainer
gbm_explainer <- explain(gbm_model,
data = dataTrain,
y = dataTrain$score,
label = "original",
colorize = FALSE)
model_performance(gbm_explainer)
fobject <- fairness_check(gbm_explainer,
protected = protected,
privileged = "Caucasian",
colorize = FALSE)
weights <- reweight(protected = protected, y = dataTrain$score)
set.seed(1)
gbm_weighted <-gbm(score ~ age + juv_fel_count + juv_misd_count + juv_other_count + priors_count + c_charge_degree + c_charge_violent + c_time_in_jail , data = dataTrain, weights = weights, distribution = "bernoulli")
gbm_explainer_w <- explain(gbm_weighted,
data = dataTrain,
y = dataTrain$score,
label = "reweighed",
verbose = FALSE)
fobject <- fairness_check(fobject, gbm_explainer_w, verbose = FALSE)
plot(fobject)
#--------------
# ROC
gbm_explainer_r <- roc_pivot(gbm_explainer,
protected = protected,
privileged = "Caucasian")
fobject <- fairness_check(fobject, gbm_explainer_r,
label = "ROC", # label as vector for explainers
verbose = FALSE)
plot(fobject)
dataTest$c_charge_degree <- as.factor(dataTest$c_charge_degree)
dataTest$c_charge_violent <- as.factor(dataTest$c_charge_violent)
#Creating vector that will hold model predictions for each record in dataset
n<- nrow(dataTest)
out=c()
out2=c()
temp=c()
roc =c()
temp2=c()
temp3=c()
temp4=c()
i=1
#Testing for best cutoff
while(i<n+1){
out[i]<- predict(gbm_explainer,dataTest[i,])
temp[i]<- if (predict(gbm_explainer,dataTest[i,])>0.7) {
"Low"
} else if (predict(gbm_explainer,dataTest[i,])<0.4){
"High"
} else {
"Medium"
}
temp2[i]<- if (predict(gbm_explainer_w,dataTest[i,])>0.7) {
"Low"
} else if (predict(gbm_explainer_w,dataTest[i,])<0.4){
"High"
} else {
"Medium"
}
roc[i]<- if (predict(gbm_explainer_r,dataTest[i,])>0.7) {
"Low"
} else if (predict(gbm_explainer_r,dataTest[i,])<0.4){
"High"
} else {
"Medium"
}
}
#Appending predictions to df
dataTest$gbm<-temp
dataTest$weighted<-temp2
dataTest$roc<-roc
#Saving to new CSV
write.csv(dataTest,'./data/compas-scores-predictions.csv', row.names = FALSE)
|
/code/model-predictions.R
|
permissive
|
Pburns525/inds4997-datasciencecapstone
|
R
| false | false | 5,272 |
r
|
library(ggplot2)
library(MASS)
library(dplyr)
library(lubridate)
library(fairness)
library(fairmodels)
library(DALEX)
library(ranger)
library(gbm)
setwd('~/GitHub/inds4997-datasciencecapstone')
dataTrain <- read.csv('./data/compas-scores-updated-training.csv')
dataTest <- read.csv('./data/compas-scores-updated-testing.csv')
# Order Score Text for graph processing later
dataTrain$score_text <- factor(dataTrain$score_text,
order = TRUE,
levels = c("Low", "Medium", "High"))
# Order Score Text for graph processing later
dataTest$score_text <- factor(dataTest$score_text,
order = TRUE,
levels = c("Low", "Medium", "High"))
# Create Ordinal Logistic Model
#Removed days_b_screening_arrest & c_days_from_compas because they did not contribute to algorithm and had NA values
model_fit <- polr(score_text ~ race + age + sex + juv_fel_count + juv_misd_count + juv_other_count + priors_count + c_charge_degree + c_charge_violent + c_time_in_jail, data = dataTrain, Hess = TRUE)
#Creating vector that will hold model predictions for each record in dataset
n<- nrow(dataTest)
temp=c()
i=1
while(i<n+1){
temp[i]<- if (predict(model_fit,dataTest[i,] , type = "p")[1] > predict(model_fit,dataTest[i,] ,type = "p")[2] & predict(model_fit,dataTest[i,] ,type = "p")[1] > predict(model_fit,dataTest[i,] ,type = "p")[3]) {
"Low"
} else if (predict(model_fit,dataTest[i,] ,type = "p")[2] > predict(model_fit,dataTest[i,] ,type = "p")[1] & predict(model_fit,dataTest[i,] ,type = "p")[2] > predict(model_fit,dataTest[i,] ,type = "p")[3]) {
"Medium"
} else {
"High"
}
i=i+1
}
#Appending predictions to df
dataTest$model<-temp
#####################################################
# Creating gbm model and applying reweighing
dataTrain <- read.csv('./data/compas-scores-updated-training.csv')
dataTrain <- dataTrain %>%
filter(!score_text == "Medium")
#Low prob of recidivism is the positive outcome so given 1, high risk given 0
dataTrain$score <- ifelse(dataTrain$score_text=="Low",1,0)
#Race is our protected class
protected <- as.factor(dataTrain$race)
dataTrain$c_charge_degree <- as.factor(dataTrain$c_charge_degree)
dataTrain$c_charge_violent <- as.factor(dataTrain$c_charge_violent)
# making model
set.seed(1)
gbm_model <-gbm(score ~ age + juv_fel_count + juv_misd_count + juv_other_count + priors_count + c_charge_degree + c_charge_violent + c_time_in_jail , data = dataTrain, distribution = "bernoulli")
# making explainer
gbm_explainer <- explain(gbm_model,
data = dataTrain,
y = dataTrain$score,
label = "original",
colorize = FALSE)
model_performance(gbm_explainer)
fobject <- fairness_check(gbm_explainer,
protected = protected,
privileged = "Caucasian",
colorize = FALSE)
weights <- reweight(protected = protected, y = dataTrain$score)
set.seed(1)
gbm_weighted <-gbm(score ~ age + juv_fel_count + juv_misd_count + juv_other_count + priors_count + c_charge_degree + c_charge_violent + c_time_in_jail , data = dataTrain, weights = weights, distribution = "bernoulli")
gbm_explainer_w <- explain(gbm_weighted,
data = dataTrain,
y = dataTrain$score,
label = "reweighed",
verbose = FALSE)
fobject <- fairness_check(fobject, gbm_explainer_w, verbose = FALSE)
plot(fobject)
#--------------
# ROC
gbm_explainer_r <- roc_pivot(gbm_explainer,
protected = protected,
privileged = "Caucasian")
fobject <- fairness_check(fobject, gbm_explainer_r,
label = "ROC", # label as vector for explainers
verbose = FALSE)
plot(fobject)
dataTest$c_charge_degree <- as.factor(dataTest$c_charge_degree)
dataTest$c_charge_violent <- as.factor(dataTest$c_charge_violent)
#Creating vector that will hold model predictions for each record in dataset
n<- nrow(dataTest)
out=c()
out2=c()
temp=c()
roc =c()
temp2=c()
temp3=c()
temp4=c()
i=1
#Testing for best cutoff
while(i<n+1){
out[i]<- predict(gbm_explainer,dataTest[i,])
temp[i]<- if (predict(gbm_explainer,dataTest[i,])>0.7) {
"Low"
} else if (predict(gbm_explainer,dataTest[i,])<0.4){
"High"
} else {
"Medium"
}
temp2[i]<- if (predict(gbm_explainer_w,dataTest[i,])>0.7) {
"Low"
} else if (predict(gbm_explainer_w,dataTest[i,])<0.4){
"High"
} else {
"Medium"
}
roc[i]<- if (predict(gbm_explainer_r,dataTest[i,])>0.7) {
"Low"
} else if (predict(gbm_explainer_r,dataTest[i,])<0.4){
"High"
} else {
"Medium"
}
}
#Appending predictions to df
dataTest$gbm<-temp
dataTest$weighted<-temp2
dataTest$roc<-roc
#Saving to new CSV
write.csv(dataTest,'./data/compas-scores-predictions.csv', row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iscamaddexp.R
\name{iscamaddexp}
\alias{iscamaddexp}
\title{iscamaddexp Function}
\usage{
iscamaddexp(x)
}
\arguments{
\item{x}{a vector of numeric values.}
}
\description{
This function creates a histogram of the inputted variable and overlays an exponential density function with lambda = 1/mean.
}
\examples{
iscamaddexp(x)
}
\keyword{exponential}
|
/ISCAM2/man/iscamaddexp.Rd
|
no_license
|
shannonpileggi/SP--Pablo--RProgramming
|
R
| false | true | 429 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iscamaddexp.R
\name{iscamaddexp}
\alias{iscamaddexp}
\title{iscamaddexp Function}
\usage{
iscamaddexp(x)
}
\arguments{
\item{x}{a vector of numeric values.}
}
\description{
This function creates a histogram of the inputted variable and overlays an exponential density function with lambda = 1/mean.
}
\examples{
iscamaddexp(x)
}
\keyword{exponential}
|
library("ggplot2")
library("tidyr")
data <- read.csv("perm_data/index_inner_join.csv")
df_index <- subset(data, type=="index")
df_index <- df_index[c("num_rows", "time")]
constant <- mean(df_index$time/(df_index$num_rows * log(df_index$num_rows)))
df_index$fitted <- constant * df_index$num_rows * log(df_index$num_rows)
colnames(df_index) <- c("num_rows", "actual", "fitted")
df_tidy <- gather(df_index, type, time, -num_rows)
print(df_tidy)
ggplot(data = df_tidy, aes(x=num_rows, y= time, color = type)) +
geom_line() +
ggtitle("Index") +
ylab("time in seconds") +
scale_x_continuous(name="number rows", labels = scales::comma)
ggsave("data_out/index.png")
|
/big_data/index.r
|
permissive
|
paulhtremblay/big-data
|
R
| false | false | 670 |
r
|
library("ggplot2")
library("tidyr")
data <- read.csv("perm_data/index_inner_join.csv")
df_index <- subset(data, type=="index")
df_index <- df_index[c("num_rows", "time")]
constant <- mean(df_index$time/(df_index$num_rows * log(df_index$num_rows)))
df_index$fitted <- constant * df_index$num_rows * log(df_index$num_rows)
colnames(df_index) <- c("num_rows", "actual", "fitted")
df_tidy <- gather(df_index, type, time, -num_rows)
print(df_tidy)
ggplot(data = df_tidy, aes(x=num_rows, y= time, color = type)) +
geom_line() +
ggtitle("Index") +
ylab("time in seconds") +
scale_x_continuous(name="number rows", labels = scales::comma)
ggsave("data_out/index.png")
|
attach(test.data.vector)
data=mort_orig
plot(as.ts(data))
plot(as.ts(newdata))
case="foreclosures"
if(case=="mort_orig"){
#replicate 24 months of last value
lastval=data[length(data)]
piece1 = rep(lastval,24)
#for the next 36 months, fit a curve to the first part of the data
#then adjust the intercept to patch where piece 1 leaves-off
X=c(1:36)-1
Y=data[X+1]
model <- lm(Y ~ X + I(X^2) + I(X^2))
int=as.numeric(model$coeff[1])
piece2=model$fit-int
#concatenate
projection = c(piece1,piece2)
newdata = c(data,projection)
}
if(case=="foreclosures"){
#the following number taken as "best case" cumulative foreclosures in 2008/2009
#assume recovery (less foreclosures each month) by Q3'09
max.cumulative=10000
cumsum(data)
(length(foreclosures)-6)/12
cumulative = cumsum(data[c((length(data)-6):length(data))])
lines(as.ts(data),col="blue")
plot(as.ts(data))
plot(as.ts(cumulative))
plot(as.ts(cumsum(data)))
X=c(1:length(cumulative))-1
Y=cumulative[X+1]
X=c(X,length(cumulative)+24)
Y=c(Y,max.cumulative)
#optional
X=c(X,length(cumulative)+48)
Y=c(Y,max.cumulative+diff(cumulative[c(4:5)]))
#tapering-off
X=c(X,length(cumulative)+60)
Y=c(Y,max.cumulative+3*diff(cumulative[c(5:6)]))
prediction = c(1:(length(cumulative)+60))-1
plot(X,Y)
convexity=rep(1,length(X))
newfit=concon(x=X,y=Y,v=convexity,k=X)
lines(predict(newfit,newx))
newdata1=predict(newfit,newx)
lines(as.ts(newdata1))
newx=prediction
data[c((length(data)-6):length(data))]
c(data[(length(data)-6)],diff(cumulative))
newdata2=predict(model,newdata=prediction)
heightadjust=data[(length(data))]-diff(newdata1)[7]
conversion = c(diff(newdata1)[-c(1:6)]+heightadjust)
newconverteddata = c(data ,conversion)
plot(as.ts(conversion))
plot(as.ts(newconverteddata))
newconverteddata[which(newconverteddata<0)]=0
newdata=newconverteddata
length(data)
length(newconverteddata)
}
if(case=="unemp_rate"){
X=c(40:length(data))-1
Y=data[X+1]
X=c(X,length(data)+60-13)
Y=c(Y,data[50])
#optional
X=c(X,length(data)+60-13+25)
Y=c(Y,data[25])
plot(X,Y)
model <- lm(Y ~ X + I(X^2) + I(X^2) + I(X^3) + I(X^4)+ I(X^5))
newx=c(114:(length(data)+50-13+25))-1
int = model$coefficients[1]
coeff1 = ifelse(is.na(model$coefficients[2]),0,model$coefficients[2])
coeff2 = ifelse(is.na(model$coefficients[3]),0,model$coefficients[3])
coeff3 = ifelse(is.na(model$coefficients[4]),0,model$coefficients[4])
coeff4 = ifelse(is.na(model$coefficients[5]),0,model$coefficients[5])
coeff5 = ifelse(is.na(model$coefficients[6]),0,model$coefficients[6])
coeff6 = ifelse(is.na(model$coefficients[7]),0,model$coefficients[7])
newdata2=int+coeff1*newx + coeff2*newx^2 + coeff3*newx^3 +coeff4*newx^4 + coeff5*newx^5 + coeff6*newx^6
newdata=c(data[c(1:113)],newdata2)
plot(as.ts(newdata),col="blue")
lines(as.ts(data),col="black")
}
if(case=="median_income"){
plot(as.ts(data))
replicant=data[c((length(data)-60):length(data))]
newdata=c(data,replicant)
}
if(case=="pop_size"){
X=c(1:length(data))
Y=data
model <- lm(Y ~ X)
newx=c((length(data)+1):(length(data)+60))-1
int = model$coefficients[1]
coeff1 = ifelse(is.na(model$coefficients[2]),0,model$coefficients[2])
coeff2 = ifelse(is.na(model$coefficients[3]),0,model$coefficients[3])
coeff3 = ifelse(is.na(model$coefficients[4]),0,model$coefficients[4])
coeff4 = ifelse(is.na(model$coefficients[5]),0,model$coefficients[5])
coeff5 = ifelse(is.na(model$coefficients[6]),0,model$coefficients[6])
coeff6 = ifelse(is.na(model$coefficients[7]),0,model$coefficients[7])
newdata2=int+coeff1*newx + coeff2*newx^2 + coeff3*newx^3 +coeff4*newx^4 + coeff5*newx^5 + coeff6*newx^6
newdata=c(data,newdata2)
}
if(case=="building_permits"){
plot(as.ts(data))
replicant=data[c(1:60)]
newdata=c(data,replicant)
}
if(case=="ty_cr"){
garch.model.tycr = garchFit(formula= ~arma(1,0) + garch(1,1),data)
newdata = c(data,predict(garch.model.tycr,60)$meanForecast)
)
}
|
/SVNRepository/Code/Development/initial_analysis_r_code/curve.fit.trials.R
|
no_license
|
zhouzhenghui/svnrepository
|
R
| false | false | 4,030 |
r
|
attach(test.data.vector)
data=mort_orig
plot(as.ts(data))
plot(as.ts(newdata))
case="foreclosures"
if(case=="mort_orig"){
#replicate 24 months of last value
lastval=data[length(data)]
piece1 = rep(lastval,24)
#for the next 36 months, fit a curve to the first part of the data
#then adjust the intercept to patch where piece 1 leaves-off
X=c(1:36)-1
Y=data[X+1]
model <- lm(Y ~ X + I(X^2) + I(X^2))
int=as.numeric(model$coeff[1])
piece2=model$fit-int
#concatenate
projection = c(piece1,piece2)
newdata = c(data,projection)
}
if(case=="foreclosures"){
#the following number taken as "best case" cumulative foreclosures in 2008/2009
#assume recovery (less foreclosures each month) by Q3'09
max.cumulative=10000
cumsum(data)
(length(foreclosures)-6)/12
cumulative = cumsum(data[c((length(data)-6):length(data))])
lines(as.ts(data),col="blue")
plot(as.ts(data))
plot(as.ts(cumulative))
plot(as.ts(cumsum(data)))
X=c(1:length(cumulative))-1
Y=cumulative[X+1]
X=c(X,length(cumulative)+24)
Y=c(Y,max.cumulative)
#optional
X=c(X,length(cumulative)+48)
Y=c(Y,max.cumulative+diff(cumulative[c(4:5)]))
#tapering-off
X=c(X,length(cumulative)+60)
Y=c(Y,max.cumulative+3*diff(cumulative[c(5:6)]))
prediction = c(1:(length(cumulative)+60))-1
plot(X,Y)
convexity=rep(1,length(X))
newfit=concon(x=X,y=Y,v=convexity,k=X)
lines(predict(newfit,newx))
newdata1=predict(newfit,newx)
lines(as.ts(newdata1))
newx=prediction
data[c((length(data)-6):length(data))]
c(data[(length(data)-6)],diff(cumulative))
newdata2=predict(model,newdata=prediction)
heightadjust=data[(length(data))]-diff(newdata1)[7]
conversion = c(diff(newdata1)[-c(1:6)]+heightadjust)
newconverteddata = c(data ,conversion)
plot(as.ts(conversion))
plot(as.ts(newconverteddata))
newconverteddata[which(newconverteddata<0)]=0
newdata=newconverteddata
length(data)
length(newconverteddata)
}
if(case=="unemp_rate"){
X=c(40:length(data))-1
Y=data[X+1]
X=c(X,length(data)+60-13)
Y=c(Y,data[50])
#optional
X=c(X,length(data)+60-13+25)
Y=c(Y,data[25])
plot(X,Y)
model <- lm(Y ~ X + I(X^2) + I(X^2) + I(X^3) + I(X^4)+ I(X^5))
newx=c(114:(length(data)+50-13+25))-1
int = model$coefficients[1]
coeff1 = ifelse(is.na(model$coefficients[2]),0,model$coefficients[2])
coeff2 = ifelse(is.na(model$coefficients[3]),0,model$coefficients[3])
coeff3 = ifelse(is.na(model$coefficients[4]),0,model$coefficients[4])
coeff4 = ifelse(is.na(model$coefficients[5]),0,model$coefficients[5])
coeff5 = ifelse(is.na(model$coefficients[6]),0,model$coefficients[6])
coeff6 = ifelse(is.na(model$coefficients[7]),0,model$coefficients[7])
newdata2=int+coeff1*newx + coeff2*newx^2 + coeff3*newx^3 +coeff4*newx^4 + coeff5*newx^5 + coeff6*newx^6
newdata=c(data[c(1:113)],newdata2)
plot(as.ts(newdata),col="blue")
lines(as.ts(data),col="black")
}
if(case=="median_income"){
plot(as.ts(data))
replicant=data[c((length(data)-60):length(data))]
newdata=c(data,replicant)
}
if(case=="pop_size"){
X=c(1:length(data))
Y=data
model <- lm(Y ~ X)
newx=c((length(data)+1):(length(data)+60))-1
int = model$coefficients[1]
coeff1 = ifelse(is.na(model$coefficients[2]),0,model$coefficients[2])
coeff2 = ifelse(is.na(model$coefficients[3]),0,model$coefficients[3])
coeff3 = ifelse(is.na(model$coefficients[4]),0,model$coefficients[4])
coeff4 = ifelse(is.na(model$coefficients[5]),0,model$coefficients[5])
coeff5 = ifelse(is.na(model$coefficients[6]),0,model$coefficients[6])
coeff6 = ifelse(is.na(model$coefficients[7]),0,model$coefficients[7])
newdata2=int+coeff1*newx + coeff2*newx^2 + coeff3*newx^3 +coeff4*newx^4 + coeff5*newx^5 + coeff6*newx^6
newdata=c(data,newdata2)
}
if(case=="building_permits"){
plot(as.ts(data))
replicant=data[c(1:60)]
newdata=c(data,replicant)
}
if(case=="ty_cr"){
garch.model.tycr = garchFit(formula= ~arma(1,0) + garch(1,1),data)
newdata = c(data,predict(garch.model.tycr,60)$meanForecast)
)
}
|
plot2 <- function() {
data<-read.table("household_power_consumption.txt",sep=";",header=TRUE,colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),na.strings=c("?"))
d2<-data[data$Date=="1/2/2007"|data$Date=="2/2/2007",]
DT<-strptime(paste(d2$Date,d2$Time,sep=" "),format="%d/%m/%Y %H:%M:%S")
png("plot2.png")
plot(DT,d2$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off()
}
|
/plot2.R
|
no_license
|
vzaigrin/ExData_Plotting1
|
R
| false | false | 476 |
r
|
plot2 <- function() {
data<-read.table("household_power_consumption.txt",sep=";",header=TRUE,colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),na.strings=c("?"))
d2<-data[data$Date=="1/2/2007"|data$Date=="2/2/2007",]
DT<-strptime(paste(d2$Date,d2$Time,sep=" "),format="%d/%m/%Y %H:%M:%S")
png("plot2.png")
plot(DT,d2$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off()
}
|
# create a quartic graph and see how much space it takes up
# use igraph to generate the shortest path tree
# then accumulate it up over the tree and repeat for each node
# 1. Create matrix representing the pixel image
n_size <- 2^(1:4)
mem_use <- matrix(0, length(n_size), 1)
time_use <- matrix(0, length(n_size), 5)
for (i in seq_along(n_size)) {
n <- n_size[i]
time_use[i,1] <- system.time({m <- matrix(abs(rnorm(n*n)), n, n)})[3]
# 2. Turn this into a graph. This is massively ineffeictict
vnum <- function(x, y, n) {
(x-1)*n + y
}
square_graph <- function(m) {
edges <- matrix(0, (nrow(m)-1)*ncol(m) + (ncol(m)-1)*nrow(m), 3)
enum <- 1
for (x in 1:nrow(m)) {
for (y in 1:ncol(m)) {
if (x > 1) {
# add link between x-1 and x
edges[enum,] <- c(vnum(x, y, ncol(m)), vnum(x-1, y, ncol(m)), 1/(m[x,y]*m[x-1,y]))
enum <- enum + 1
}
if (y > 1) {
# add link between y-1 and y
edges[enum,] <- c(vnum(x, y, ncol(m)), vnum(x, y-1, ncol(m)), 1/(m[x,y]*m[x,y-1]))
enum <- enum + 1
}
}
}
make_graph(t(edges[,1:2]), dir=FALSE) %>% set_edge_attr('weight', value = edges[,3])
}
time_use[i,3] <- system.time({
g <- square_graph(m)
})[3]
# now let's see how large it is
mem_use[i,1] <- object.size(g)
# compute the endemic risk to each vertex
endemic_risk <- numeric(n*n)
time_use[i,4] <- system.time({
for (v in 1:(n*n)) {
#cat('processing', v, 'of', n*n, '\n')
# find shortest paths from v to all other vertices. Note this isn't symmetric
# TODO: This would be scaled by the risk at v
risk_contrib <- distances(g, v=v, algorithm='dijkstra')
# add on to our endemic risk
endemic_risk <- endemic_risk + risk_contrib[1,]
}})[3]
endemic_risk1 <- endemic_risk
my_dist <- function(m, x, y) {
# JM version of Dijkstra's algorithm for images. Returns distance
# from (x,y) to all values in the image.
# nodes are coded with (x,y) to make things easier to track
# (may need to replace this later for efficiency purposes)
# initialise our vertex distances to infinity, except for our source node
dist <- matrix(Inf, nrow(m), ncol(m)); dist[x,y] <- 0;
# prev <- array(NA, dim=c(nrow(m), ncol(m), 2))
Q <- 1:(nrow(m)*ncol(m))
# run over and iteratively improve
while(length(Q) > 0) {
# find node with smallest dist in the array inside Q.
cat("length(Q) =", length(Q), "\n")
row <- which.min(dist[Q]) # row in Q
cat("smallest row is", row, "with dist", dist[Q[row]], "\n")
v <- Q[row] # vertex on the grid
# find the neighbours of v...
nb <- function(v) {
x <- (v - 1) %% nrow(m) + 1
y <- (v - 1) %/% nrow(m) + 1
nb <- NULL
if (x > 1)
nb <- c(nb, vnum(x-1, y, nrow(m)))
if (y > 1)
nb <- c(nb, vnum(x, y-1, nrow(m))) #v - nrow(m))
if (x < nrow(m))
nb <- c(nb, vnum(x+1, y, nrow(m))) #v + 1)
if (y < ncol(m))
nb <- c(nb, vnum(x, y+1, nrow(m))) #v + nrow(m))
nb
}
# cat("this corresponds to x=", x, ", y=", y, "with dist", dist[x,y], "\n")
md <- dist[v]
Q <- Q[-row]; # removes the vertex from Q (super inefficient)
# find and update the neighbours
n <- nb(v)
for (i in n) { # TODO: Only really need to check within Q
d = dist[v] + 1/(m[v]*m[i])
if (d < dist[i]) {
dist[i] <- d
}
}
}
dist
}
endemic_risk <- numeric(n*n)
time_use[i,5] <- system.time({
d <- 20
for (x in 1:n) {
for (y in 1:n) {
# find shortest paths from v to all other vertices. Note this isn't symmetric
# TODO: This would be scaled by the risk at v
risk_contrib <- my_dist(m, x, y)
# add on to our endemic risk
endemic_risk <- endemic_risk + as.numeric(t(risk_contrib))
}
}})[3]
# print(all.equal(endemic_risk, endemic_risk1))
}
# repeat for each vertex
|
/endemic/shortest_path.R
|
no_license
|
jmarshallnz/encroachr
|
R
| false | false | 4,079 |
r
|
# create a quartic graph and see how much space it takes up
# use igraph to generate the shortest path tree
# then accumulate it up over the tree and repeat for each node
# 1. Create matrix representing the pixel image
n_size <- 2^(1:4)
mem_use <- matrix(0, length(n_size), 1)
time_use <- matrix(0, length(n_size), 5)
for (i in seq_along(n_size)) {
n <- n_size[i]
time_use[i,1] <- system.time({m <- matrix(abs(rnorm(n*n)), n, n)})[3]
# 2. Turn this into a graph. This is massively ineffeictict
vnum <- function(x, y, n) {
(x-1)*n + y
}
square_graph <- function(m) {
edges <- matrix(0, (nrow(m)-1)*ncol(m) + (ncol(m)-1)*nrow(m), 3)
enum <- 1
for (x in 1:nrow(m)) {
for (y in 1:ncol(m)) {
if (x > 1) {
# add link between x-1 and x
edges[enum,] <- c(vnum(x, y, ncol(m)), vnum(x-1, y, ncol(m)), 1/(m[x,y]*m[x-1,y]))
enum <- enum + 1
}
if (y > 1) {
# add link between y-1 and y
edges[enum,] <- c(vnum(x, y, ncol(m)), vnum(x, y-1, ncol(m)), 1/(m[x,y]*m[x,y-1]))
enum <- enum + 1
}
}
}
make_graph(t(edges[,1:2]), dir=FALSE) %>% set_edge_attr('weight', value = edges[,3])
}
time_use[i,3] <- system.time({
g <- square_graph(m)
})[3]
# now let's see how large it is
mem_use[i,1] <- object.size(g)
# compute the endemic risk to each vertex
endemic_risk <- numeric(n*n)
time_use[i,4] <- system.time({
for (v in 1:(n*n)) {
#cat('processing', v, 'of', n*n, '\n')
# find shortest paths from v to all other vertices. Note this isn't symmetric
# TODO: This would be scaled by the risk at v
risk_contrib <- distances(g, v=v, algorithm='dijkstra')
# add on to our endemic risk
endemic_risk <- endemic_risk + risk_contrib[1,]
}})[3]
endemic_risk1 <- endemic_risk
my_dist <- function(m, x, y) {
# JM version of Dijkstra's algorithm for images. Returns distance
# from (x,y) to all values in the image.
# nodes are coded with (x,y) to make things easier to track
# (may need to replace this later for efficiency purposes)
# initialise our vertex distances to infinity, except for our source node
dist <- matrix(Inf, nrow(m), ncol(m)); dist[x,y] <- 0;
# prev <- array(NA, dim=c(nrow(m), ncol(m), 2))
Q <- 1:(nrow(m)*ncol(m))
# run over and iteratively improve
while(length(Q) > 0) {
# find node with smallest dist in the array inside Q.
cat("length(Q) =", length(Q), "\n")
row <- which.min(dist[Q]) # row in Q
cat("smallest row is", row, "with dist", dist[Q[row]], "\n")
v <- Q[row] # vertex on the grid
# find the neighbours of v...
nb <- function(v) {
x <- (v - 1) %% nrow(m) + 1
y <- (v - 1) %/% nrow(m) + 1
nb <- NULL
if (x > 1)
nb <- c(nb, vnum(x-1, y, nrow(m)))
if (y > 1)
nb <- c(nb, vnum(x, y-1, nrow(m))) #v - nrow(m))
if (x < nrow(m))
nb <- c(nb, vnum(x+1, y, nrow(m))) #v + 1)
if (y < ncol(m))
nb <- c(nb, vnum(x, y+1, nrow(m))) #v + nrow(m))
nb
}
# cat("this corresponds to x=", x, ", y=", y, "with dist", dist[x,y], "\n")
md <- dist[v]
Q <- Q[-row]; # removes the vertex from Q (super inefficient)
# find and update the neighbours
n <- nb(v)
for (i in n) { # TODO: Only really need to check within Q
d = dist[v] + 1/(m[v]*m[i])
if (d < dist[i]) {
dist[i] <- d
}
}
}
dist
}
endemic_risk <- numeric(n*n)
time_use[i,5] <- system.time({
d <- 20
for (x in 1:n) {
for (y in 1:n) {
# find shortest paths from v to all other vertices. Note this isn't symmetric
# TODO: This would be scaled by the risk at v
risk_contrib <- my_dist(m, x, y)
# add on to our endemic risk
endemic_risk <- endemic_risk + as.numeric(t(risk_contrib))
}
}})[3]
# print(all.equal(endemic_risk, endemic_risk1))
}
# repeat for each vertex
|
library(data.table)
library(dplyr)
library(tidyr)
library(xgboost)
library(ggplot2)
library(gridExtra)
library(cowplot)
# xgb <- xgb.load('C:\\Users\\Guillaume\\Documents\\Nes\\Excitebike\\xgb.model')
# xgb.importance(feature_names = c('ypos', 'angle', 'status', 'lane', 'futurepos0', 'futurepos1', 'futurepos2', 'futurepos3', 'action'), model = xgb)
for(i in 1:500){
df.Game_Decision = fread("C:\\Users\\Guillaume\\Documents\\Nes\\Excitebike\\xgbresults.csv")
g1 = ggplot(df.Game_Decision) +
geom_hline(yintercept = 5408.14, size = 1.5, linetype = 'dashed', color = 'red')+
geom_ribbon(aes(x = game), ymin = 4831.8, ymax = 6124.0, alpha = 0.05, fill = 'red')+
geom_point(aes(x = game, y = 4*frame)) +
geom_smooth(aes(x = game, y = 4*frame), span = 1/4, alpha = 0) +
scale_x_continuous(expand = c(0, 0))+
scale_y_continuous(expand = c(0, 0))+
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank()
)
g2 = ggplot(df.Game_Decision) +
geom_point(aes(x = game, y = err)) +
geom_smooth(aes(x = game, y = err)) +
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank()
)
g3 = ggplot(df.Game_Decision %>% mutate(actionh = ifelse(is.na(actionh), 0, actionh))) +
geom_point(aes(x = game, y = actionh)) +
geom_smooth(aes(x = game, y = actionh)) +
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank()
)
g4 = ggplot(df.Game_Decision %>% mutate(actionv = ifelse(is.na(actionv), 0, actionv))) +
geom_point(aes(x = game, y = actionv)) +
geom_smooth(aes(x = game, y = actionv)) +
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank()
)
g5 =
ggplot(gather(df.Game_Decision, key = 'Feature', Gain, 1:13) %>% mutate(Gain = ifelse(is.na(Gain), 0, Gain))) +
geom_area(aes(x = game, y = Gain, fill = Feature)) +
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank(),
legend.position=c(.8,.75)
)
g6 = ggplot(df.Game_Decision) +
geom_point(aes(x = game, y = niter)) +
geom_smooth(aes(x = game, y = niter)) +
theme_bw()
print(plot_grid(g1, g2, g3, g4, g5, g6, align = 'v', nrow = 6, rel_heights = c(1/6, 1/6, 1/6, 1/6, 1/6, 1/6)))
# print(plot_grid(g1, g2, g3, g4, align = 'v', nrow = 4, rel_heights = c(2/6, 1/6, 1/6, 1/5)))
Sys.sleep(30)
}
|
/Excitebike Results.r
|
no_license
|
guiguilegui/ExciteBike_XGBoost
|
R
| false | false | 2,535 |
r
|
library(data.table)
library(dplyr)
library(tidyr)
library(xgboost)
library(ggplot2)
library(gridExtra)
library(cowplot)
# xgb <- xgb.load('C:\\Users\\Guillaume\\Documents\\Nes\\Excitebike\\xgb.model')
# xgb.importance(feature_names = c('ypos', 'angle', 'status', 'lane', 'futurepos0', 'futurepos1', 'futurepos2', 'futurepos3', 'action'), model = xgb)
for(i in 1:500){
df.Game_Decision = fread("C:\\Users\\Guillaume\\Documents\\Nes\\Excitebike\\xgbresults.csv")
g1 = ggplot(df.Game_Decision) +
geom_hline(yintercept = 5408.14, size = 1.5, linetype = 'dashed', color = 'red')+
geom_ribbon(aes(x = game), ymin = 4831.8, ymax = 6124.0, alpha = 0.05, fill = 'red')+
geom_point(aes(x = game, y = 4*frame)) +
geom_smooth(aes(x = game, y = 4*frame), span = 1/4, alpha = 0) +
scale_x_continuous(expand = c(0, 0))+
scale_y_continuous(expand = c(0, 0))+
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank()
)
g2 = ggplot(df.Game_Decision) +
geom_point(aes(x = game, y = err)) +
geom_smooth(aes(x = game, y = err)) +
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank()
)
g3 = ggplot(df.Game_Decision %>% mutate(actionh = ifelse(is.na(actionh), 0, actionh))) +
geom_point(aes(x = game, y = actionh)) +
geom_smooth(aes(x = game, y = actionh)) +
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank()
)
g4 = ggplot(df.Game_Decision %>% mutate(actionv = ifelse(is.na(actionv), 0, actionv))) +
geom_point(aes(x = game, y = actionv)) +
geom_smooth(aes(x = game, y = actionv)) +
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank()
)
g5 =
ggplot(gather(df.Game_Decision, key = 'Feature', Gain, 1:13) %>% mutate(Gain = ifelse(is.na(Gain), 0, Gain))) +
geom_area(aes(x = game, y = Gain, fill = Feature)) +
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank(),
legend.position=c(.8,.75)
)
g6 = ggplot(df.Game_Decision) +
geom_point(aes(x = game, y = niter)) +
geom_smooth(aes(x = game, y = niter)) +
theme_bw()
print(plot_grid(g1, g2, g3, g4, g5, g6, align = 'v', nrow = 6, rel_heights = c(1/6, 1/6, 1/6, 1/6, 1/6, 1/6)))
# print(plot_grid(g1, g2, g3, g4, align = 'v', nrow = 4, rel_heights = c(2/6, 1/6, 1/6, 1/5)))
Sys.sleep(30)
}
|
context('reprs of lists')
test_that('plain lists display correctly', {
expect_identical(repr_html(list(1, 2)), '<ol>
\t<li>1</li>
\t<li>2</li>
</ol>
')
})
test_that('named lists display correctly', {
expect_identical(repr_html(list(a = 1, b = 2)), '<dl>
\t<dt>$a</dt>
\t\t<dd>1</dd>
\t<dt>$b</dt>
\t\t<dd>2</dd>
</dl>
')
})
test_that('lists with unknown element types don’t display', {
methods::setClass('__unknown', methods::representation(n = 'character'))
expect_identical(repr_html(list(1, methods::new('__unknown'))), NULL)
})
test_that('lists with nonexistant names work', {
expect_identical(repr_html(list(a = 0, 1)), '<dl>
\t<dt>$a</dt>
\t\t<dd>0</dd>
\t<dt>[[2]]</dt>
\t\t<dd>1</dd>
</dl>
')
})
|
/tests/testthat/test_repr_list.r
|
no_license
|
daf/repr
|
R
| false | false | 715 |
r
|
context('reprs of lists')
test_that('plain lists display correctly', {
expect_identical(repr_html(list(1, 2)), '<ol>
\t<li>1</li>
\t<li>2</li>
</ol>
')
})
test_that('named lists display correctly', {
expect_identical(repr_html(list(a = 1, b = 2)), '<dl>
\t<dt>$a</dt>
\t\t<dd>1</dd>
\t<dt>$b</dt>
\t\t<dd>2</dd>
</dl>
')
})
test_that('lists with unknown element types don’t display', {
methods::setClass('__unknown', methods::representation(n = 'character'))
expect_identical(repr_html(list(1, methods::new('__unknown'))), NULL)
})
test_that('lists with nonexistant names work', {
expect_identical(repr_html(list(a = 0, 1)), '<dl>
\t<dt>$a</dt>
\t\t<dd>0</dd>
\t<dt>[[2]]</dt>
\t\t<dd>1</dd>
</dl>
')
})
|
#Running around function
wandering<-function(agents){
#Select each agent randomly
randIndex = sample(nrow(agents))
for(i in 1:nrow(agents)){
#For each agent
ind<-randIndex[i]
#Decide a direction
angle<-runif(1,min= 0,max= 2)*pi
#Compute where they are going (assuming a distance to be travelled equal to 1)
agents$x[ind]<-agents$x[ind]+cos(angle)
agents$y[ind]<-agents$y[ind]+sin(angle)
#Make sure that the agents stay in the matrix
if(agents$x[ind]>10)
agents$x[ind]<-agents$x[ind]-20
if(agents$x[ind]<(-10))
agents$x[ind]<-agents$x[ind]+20
if(agents$y[ind]>10)
agents$y[ind]<-agents$y[ind]-20
if(agents$y[ind]<(-10))
agents$y[ind]<-agents$y[ind]+20
}
#Sends the agents matrix back (after having changed their characteristics)
return(agents)
}
runModel<-function(nTime=100,nAgents=10){
#Creates agent matrix
agents<-as.data.frame(matrix(NA,nrow=nAgents,ncol=2,dimnames=list(NULL,c("x","y"))))
#Initialise agents randomly in the matrix
agents$x<-runif(nAgents,-10,10)
agents$y<-runif(nAgents,-10,10)
#Simulations
for(t in 1:nTime){
#Have agents running around
agents<-wandering(agents)
#Show where they are
layout(matrix(c(1), 1, 1))
layout.show(1)
plot(agents$x,agents$y,type='p',main="Agents position",xlab="",ylab="",ylim = c(-10,10))
Sys.sleep(0.1)
}
}
runModel(nTime=20,nAgents=10)
|
/Homeworks/Solution.R
|
no_license
|
antoinegodin/AB-Lectures
|
R
| false | false | 1,421 |
r
|
#Running around function
wandering<-function(agents){
#Select each agent randomly
randIndex = sample(nrow(agents))
for(i in 1:nrow(agents)){
#For each agent
ind<-randIndex[i]
#Decide a direction
angle<-runif(1,min= 0,max= 2)*pi
#Compute where they are going (assuming a distance to be travelled equal to 1)
agents$x[ind]<-agents$x[ind]+cos(angle)
agents$y[ind]<-agents$y[ind]+sin(angle)
#Make sure that the agents stay in the matrix
if(agents$x[ind]>10)
agents$x[ind]<-agents$x[ind]-20
if(agents$x[ind]<(-10))
agents$x[ind]<-agents$x[ind]+20
if(agents$y[ind]>10)
agents$y[ind]<-agents$y[ind]-20
if(agents$y[ind]<(-10))
agents$y[ind]<-agents$y[ind]+20
}
#Sends the agents matrix back (after having changed their characteristics)
return(agents)
}
runModel<-function(nTime=100,nAgents=10){
#Creates agent matrix
agents<-as.data.frame(matrix(NA,nrow=nAgents,ncol=2,dimnames=list(NULL,c("x","y"))))
#Initialise agents randomly in the matrix
agents$x<-runif(nAgents,-10,10)
agents$y<-runif(nAgents,-10,10)
#Simulations
for(t in 1:nTime){
#Have agents running around
agents<-wandering(agents)
#Show where they are
layout(matrix(c(1), 1, 1))
layout.show(1)
plot(agents$x,agents$y,type='p',main="Agents position",xlab="",ylab="",ylim = c(-10,10))
Sys.sleep(0.1)
}
}
runModel(nTime=20,nAgents=10)
|
testlist <- list(c = 167772170L, r = 168435978L)
result <- do.call(landscapemetrics:::triangular_index,testlist)
str(result)
|
/landscapemetrics/inst/testfiles/triangular_index/libFuzzer_triangular_index/triangular_index_valgrind_files/1609955245-test.R
|
no_license
|
akhikolla/newtestfiles-2
|
R
| false | false | 124 |
r
|
testlist <- list(c = 167772170L, r = 168435978L)
result <- do.call(landscapemetrics:::triangular_index,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deviance.bess.R
\name{deviance.bess}
\alias{deviance.bess}
\title{Extract the deviance from a "bess.one" object.}
\usage{
\method{deviance}{bess}(object, best.model = TRUE, ...)
}
\arguments{
\item{object}{A "\code{bess}" object.}
\item{best.model}{Whether only return the loglikelihood of the best model. Default is \code{TRUE}.
If \code{best.model = FALSE}, the loglikelihood of the best models with model size and
\eqn{\lambda} in the original \code{s.list} and \code{lambda.list} (for \code{method = "sequential"})
or in the iteration path (for \code{method = "gsection"}, \code{method = "pgsection"},
and \code{method = "psequential"}) is returned.}
\item{\dots}{additional arguments}
}
\value{
A matrix or vector containing the deviance for each model is returned.
For \code{bess} object fitted by \code{sequantial} method, values in each row in the
returned matrix corresponding to the model size in \code{s.list}, and each column the shrinkage parameters
in \code{lambda.list}.
For \code{bess} object fitted by \code{gsection}, \code{pgsection} and \code{psequential}, the returned vector
contains deviance for fitted models in each iteration. The coefficients of those model can be extracted
from \code{beta.all} and \code{coef0.all} in the \code{bess} object.
}
\description{
Similar to other deviance methods, which returns deviance from a fitted "\code{bess.one}" object.
}
\examples{
# Generate simulated data
n <- 200
p <- 20
k <- 5
rho <- 0.4
seed <- 10
Tbeta <- rep(0, p)
Tbeta[1:k*floor(p/k):floor(p/k)] <- rep(1, k)
Data <- gen.data(n, p, k, rho, family = "gaussian", seed = seed)
lm.bss <- bess(Data$x, Data$y, method = "sequential")
deviance(lm.bss)
deviance(lm.bss, best.model = FALSE)
}
\references{
Wen, C., Zhang, A., Quan, S. and Wang, X. (2020). BeSS: An R
Package for Best Subset Selection in Linear, Logistic and Cox Proportional
Hazards Models, \emph{Journal of Statistical Software}, Vol. 94(4).
doi:10.18637/jss.v094.i04.
}
\seealso{
\code{\link{bess}}, \code{\link{summary.bess}}.
}
\author{
Canhong Wen, Aijun Zhang, Shijie Quan, Liyuan Hu, Kangkang Jiang, Yanhang Zhang, Jin Zhu and Xueqin Wang.
}
|
/R/man/deviance.bess.Rd
|
no_license
|
Mamba413/bess
|
R
| false | true | 2,218 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deviance.bess.R
\name{deviance.bess}
\alias{deviance.bess}
\title{Extract the deviance from a "bess.one" object.}
\usage{
\method{deviance}{bess}(object, best.model = TRUE, ...)
}
\arguments{
\item{object}{A "\code{bess}" object.}
\item{best.model}{Whether only return the loglikelihood of the best model. Default is \code{TRUE}.
If \code{best.model = FALSE}, the loglikelihood of the best models with model size and
\eqn{\lambda} in the original \code{s.list} and \code{lambda.list} (for \code{method = "sequential"})
or in the iteration path (for \code{method = "gsection"}, \code{method = "pgsection"},
and \code{method = "psequential"}) is returned.}
\item{\dots}{additional arguments}
}
\value{
A matrix or vector containing the deviance for each model is returned.
For \code{bess} object fitted by \code{sequantial} method, values in each row in the
returned matrix corresponding to the model size in \code{s.list}, and each column the shrinkage parameters
in \code{lambda.list}.
For \code{bess} object fitted by \code{gsection}, \code{pgsection} and \code{psequential}, the returned vector
contains deviance for fitted models in each iteration. The coefficients of those model can be extracted
from \code{beta.all} and \code{coef0.all} in the \code{bess} object.
}
\description{
Similar to other deviance methods, which returns deviance from a fitted "\code{bess.one}" object.
}
\examples{
# Generate simulated data
n <- 200
p <- 20
k <- 5
rho <- 0.4
seed <- 10
Tbeta <- rep(0, p)
Tbeta[1:k*floor(p/k):floor(p/k)] <- rep(1, k)
Data <- gen.data(n, p, k, rho, family = "gaussian", seed = seed)
lm.bss <- bess(Data$x, Data$y, method = "sequential")
deviance(lm.bss)
deviance(lm.bss, best.model = FALSE)
}
\references{
Wen, C., Zhang, A., Quan, S. and Wang, X. (2020). BeSS: An R
Package for Best Subset Selection in Linear, Logistic and Cox Proportional
Hazards Models, \emph{Journal of Statistical Software}, Vol. 94(4).
doi:10.18637/jss.v094.i04.
}
\seealso{
\code{\link{bess}}, \code{\link{summary.bess}}.
}
\author{
Canhong Wen, Aijun Zhang, Shijie Quan, Liyuan Hu, Kangkang Jiang, Yanhang Zhang, Jin Zhu and Xueqin Wang.
}
|
source(file.path(path.package('swirl'), 'Courses', 'r_programming_dbs', 'basis.R'))
dbs_on_demand <- function() {
return(submit_dbs_on_demand('r_sequences_of_numbers'))
}
|
/Sequences_of_Numbers/customTests.R
|
no_license
|
darrenredmond/r_programming_dbs
|
R
| false | false | 174 |
r
|
source(file.path(path.package('swirl'), 'Courses', 'r_programming_dbs', 'basis.R'))
dbs_on_demand <- function() {
return(submit_dbs_on_demand('r_sequences_of_numbers'))
}
|
# ========================================================================================================
# Load Libraries
# ========================================================================================================
library(reshape2)
library(data.table)
# download end extract ZipFile if not already downloaded
if(!file.exists("UCI HAR Dataset")){
dataURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(dataURL, zipFile)
unzip(zipFile, files = NULL, list = FALSE, overwrite = TRUE, junkpaths = FALSE, exdir = ".", unzip = "internal", setTimes = FALSE)
}
# load test data files X_test.txt and y_test.txt
test.x <- read.table("./UCI HAR Dataset/test/X_test.txt")
test.y <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject.test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# load training data files X_train.txt and y_train.txt
train.x <- read.table("./UCI HAR Dataset/train/X_train.txt")
train.y <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject.train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# ========================================================================================================
# part 1 - Merges the training and the test sets to create one data set (vertically).
# ========================================================================================================
merged.x <- rbind(test.x, train.x)
merged.y <- rbind(test.y, train.y)
merged.subject <- rbind(subject.test, subject.train)
# add feature names to columns
features.names <- read.table("./UCI HAR Dataset/features.txt")
features.names <- features.names$V2
colnames(merged.x) <- features.names
# ========================================================================================================
# part 2 - Extracts only the measurements on the mean and standard deviation for each measurement.
# ========================================================================================================
merged.subset <- merged.x[ , grep("mean|std", colnames(merged.x))]
# ========================================================================================================
# part 3 - Uses descriptive activity names to name the activities in the data set
# ========================================================================================================
# load activity lables data files
activity.labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
merged.y$activity <- activity.labels[merged.y$V1, 2]
# ========================================================================================================
# part 4 - Appropriately labels the data set with descriptive variable names.
# ========================================================================================================
names(merged.y) <- c("ActivityID", "ActivityLabel")
names(merged.subject) <- "Subject"
# ========================================================================================================
# part 5 - From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
# ========================================================================================================
# merge (bind) all data to a single data set
merged.all <- cbind(merged.subject, merged.y, merged.x)
labels.all <- c("Subject", "ActivityID", "ActivityLabel")
data.labels = setdiff(colnames(merged.all), labels.all)
melted.data = melt(merged.all, id = labels.all, measure.vars = data.labels, na.rm=TRUE)
tidy.data = dcast(melted.data, Subject + ActivityLabel ~ variable, mean)
write.table(tidy.data, file = "./tidy_data.txt", row.names = FALSE)
ThisIsTheEnd <- "ThisIsTheEnd"
|
/Project/run_analysis.R
|
no_license
|
nouhailabensalah/datasciencecoursera
|
R
| false | false | 3,814 |
r
|
# ========================================================================================================
# Load Libraries
# ========================================================================================================
library(reshape2)
library(data.table)
# download end extract ZipFile if not already downloaded
if(!file.exists("UCI HAR Dataset")){
dataURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(dataURL, zipFile)
unzip(zipFile, files = NULL, list = FALSE, overwrite = TRUE, junkpaths = FALSE, exdir = ".", unzip = "internal", setTimes = FALSE)
}
# load test data files X_test.txt and y_test.txt
test.x <- read.table("./UCI HAR Dataset/test/X_test.txt")
test.y <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject.test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# load training data files X_train.txt and y_train.txt
train.x <- read.table("./UCI HAR Dataset/train/X_train.txt")
train.y <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject.train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# ========================================================================================================
# part 1 - Merges the training and the test sets to create one data set (vertically).
# ========================================================================================================
merged.x <- rbind(test.x, train.x)
merged.y <- rbind(test.y, train.y)
merged.subject <- rbind(subject.test, subject.train)
# add feature names to columns
features.names <- read.table("./UCI HAR Dataset/features.txt")
features.names <- features.names$V2
colnames(merged.x) <- features.names
# ========================================================================================================
# part 2 - Extracts only the measurements on the mean and standard deviation for each measurement.
# ========================================================================================================
merged.subset <- merged.x[ , grep("mean|std", colnames(merged.x))]
# ========================================================================================================
# part 3 - Uses descriptive activity names to name the activities in the data set
# ========================================================================================================
# load activity lables data files
activity.labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
merged.y$activity <- activity.labels[merged.y$V1, 2]
# ========================================================================================================
# part 4 - Appropriately labels the data set with descriptive variable names.
# ========================================================================================================
names(merged.y) <- c("ActivityID", "ActivityLabel")
names(merged.subject) <- "Subject"
# ========================================================================================================
# part 5 - From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
# ========================================================================================================
# merge (bind) all data to a single data set
merged.all <- cbind(merged.subject, merged.y, merged.x)
labels.all <- c("Subject", "ActivityID", "ActivityLabel")
data.labels = setdiff(colnames(merged.all), labels.all)
melted.data = melt(merged.all, id = labels.all, measure.vars = data.labels, na.rm=TRUE)
tidy.data = dcast(melted.data, Subject + ActivityLabel ~ variable, mean)
write.table(tidy.data, file = "./tidy_data.txt", row.names = FALSE)
ThisIsTheEnd <- "ThisIsTheEnd"
|
# TBMIBIquantifyCellAbundanceinMEPerPatient.R
# Author: Erin McCaffrey
# Date created: 191205
# Overview: This script reads in the csv for the topic loadings for each cell. It assigns each cell to a topic
# (max loading) and then plots the abundance of each cell type in each topic across each patient (ie. one subplot per patient and topic)
library(ggplot2)
library(dplyr)
##...Load in data..##
setwd("/Volumes/GoogleDrive/My Drive/Angelo Lab/MIBIProjects/Human_ATB_paper-cohort/topic_analysis")
topic_data<-read.csv('all_TB_topic_annotation.csv')
##...Quantify cell type abundances in each topic...##
topic_freqs<-as.data.frame(table(topic_data$cell_type, topic_data$topic, topic_data$SampleID))
colnames(topic_freqs)<-c('cell_type','topic','SampleID','count')
##...Plot broken down by cell type..##
imm_order <- unique(topic_freqs$cell_type)
setwd("/Volumes/GoogleDrive/My Drive/Angelo Lab/MIBIProjects/Human_ATB_paper-cohort/all-fields/denoised/dataPerCell_3px")
colorkey<-read.csv('colorkey_R.csv')
colorkey_imm<-droplevels(colorkey[colorkey$imm_order %in% imm_order,])
colorkey_imm$imm_order<-factor(colorkey_imm$imm_order, levels = imm_order)
colorkey_imm<-colorkey_imm[order(colorkey_imm$imm_order),]
color<-as.vector(colorkey_imm$code)
point_order_gran<-c(64, 65, 21, 84, 42, 88, 28, 89, 85, 13, 35, 36, 14, 15, 57, 58, 19, 87, 6, 7, 33, 34, 26, 27, 40, 61, 47, 48, 54, 55)
topic_freqs$SampleID <- factor(topic_freqs$SampleID, levels=point_order_gran)
ggplot(data = topic_freqs, aes(x = SampleID, y = count, fill = cell_type)) +
geom_col() +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) +
labs(x = 'SampleID') +
labs(y = 'Cell Count') +
scale_fill_manual(name = 'Topic', values = color) +
facet_wrap(.~topic, scale='free_y', ncol = 2)
|
/R/TBMIBIquantifyCellAbundanceinMEPerPatient.R
|
no_license
|
efrancis28/TBMIBI
|
R
| false | false | 1,876 |
r
|
# TBMIBIquantifyCellAbundanceinMEPerPatient.R
# Author: Erin McCaffrey
# Date created: 191205
# Overview: This script reads in the csv for the topic loadings for each cell. It assigns each cell to a topic
# (max loading) and then plots the abundance of each cell type in each topic across each patient (ie. one subplot per patient and topic)
library(ggplot2)
library(dplyr)
##...Load in data..##
setwd("/Volumes/GoogleDrive/My Drive/Angelo Lab/MIBIProjects/Human_ATB_paper-cohort/topic_analysis")
topic_data<-read.csv('all_TB_topic_annotation.csv')
##...Quantify cell type abundances in each topic...##
topic_freqs<-as.data.frame(table(topic_data$cell_type, topic_data$topic, topic_data$SampleID))
colnames(topic_freqs)<-c('cell_type','topic','SampleID','count')
##...Plot broken down by cell type..##
imm_order <- unique(topic_freqs$cell_type)
setwd("/Volumes/GoogleDrive/My Drive/Angelo Lab/MIBIProjects/Human_ATB_paper-cohort/all-fields/denoised/dataPerCell_3px")
colorkey<-read.csv('colorkey_R.csv')
colorkey_imm<-droplevels(colorkey[colorkey$imm_order %in% imm_order,])
colorkey_imm$imm_order<-factor(colorkey_imm$imm_order, levels = imm_order)
colorkey_imm<-colorkey_imm[order(colorkey_imm$imm_order),]
color<-as.vector(colorkey_imm$code)
point_order_gran<-c(64, 65, 21, 84, 42, 88, 28, 89, 85, 13, 35, 36, 14, 15, 57, 58, 19, 87, 6, 7, 33, 34, 26, 27, 40, 61, 47, 48, 54, 55)
topic_freqs$SampleID <- factor(topic_freqs$SampleID, levels=point_order_gran)
ggplot(data = topic_freqs, aes(x = SampleID, y = count, fill = cell_type)) +
geom_col() +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) +
labs(x = 'SampleID') +
labs(y = 'Cell Count') +
scale_fill_manual(name = 'Topic', values = color) +
facet_wrap(.~topic, scale='free_y', ncol = 2)
|
#practica 2
install.packages("Rcpp")
install.packages("RcppArmadillo")
install.packages("quadprog")
install.packages("fracdiff")
install.packages("curl")
install.packages("forecast")
install.packages("rstan")
install.packages("prophet")
install.packages("mblm")
install.packages("rq")
install.packages("coin")
install.packages("ggpubr")
library(forecast)
library(Rcpp)
library(prophet)
library(Hmisc)
library(dplyr)
library(plyr)
# library(readr)
# library(outliers)
# library(graphics)
##### 1 Descripció dels datasets #####
item_categories <- read.csv("item_categories.csv")
shops <- read.csv("shops.csv")
items <- read.csv("items.csv")
sample_submission <- read.csv("sample_submission.csv")
test <- read.csv("test.csv")
#vendes
vendes <- read.csv("sales_train_v2.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
vendes <- read.csv("sales_train_v2_data_format.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
vendes <- read.csv("dades_totals.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
attach(vendes)
dim(vendes)
summary(vendes)
describe(vendes)
str(vendes)
head(vendes)
str(shops)
head(shops)
str(items)
head(items)
##### 3 Neteja de les dades ######
#tipus dades assignades a cada camp
sapply(vendes, function(x) class(x))
#funció describe del package Hmisc podem analitzar el nombre de variables del data.frame
describe(vendes)
describe(item_categories)
describe(items)
#merge sales i item_categories
library(raster)
# merge per les variables ocumnuescommon variable, here called ''
m <- merge(vendes, items, by='item_id')
#comprovar nuls
sapply(vendes, function(x) sum(is.na(x)))
#comprovar zeros
subset(vendes, date_block_num==0)
length(subset(vendes, shop_id==0))
subset(vendes, shop_id==0)
subset(vendes, item_id==0)
subset(vendes, item_price=0)
subset(vendes, item_cnt_day==0)
subset(vendes, item_price>40000)
subset(vendes, item_id==13403)
subset(items, item_id==13403)
subset(item_categories, item_category_id==16)
subset(vendes, item_cnt_day>300)
binnedCounts(subset(vendes, ITEM_PRICE<10000)[,"item_price", drop=FALSE])
hist(subset(vendes, item_price<3000 & item_cnt_day<6 & item_cnt_day>0))
hist(subset(vendes, item_price<3000 & item_cnt_day==1))
# comprovar outliers
par(mfrow=c(1,5))
boxplot(vendes$date_block_num)
boxplot(vendes$shop_id)
boxplot(vendes$item_id)
boxplot(vendes$item_price)
boxplot(vendes$item_cnt_day)
boxplot (vendes$item_price)
boxplot (vendes$item_cnt_day)
outliersItemId <-boxplot.stats(item_id)$out
indexItemId <- which( item_id %in% outliersItemId)
length(indexItemId)
outliersItemCnt
# item_price per intervals
binnedCounts(vendes[,"item_price", drop=FALSE])
with(vendes, Hist(item_price, scale="frequency", breaks="Sturges", col="darkgray"))
# utilitzem boxplot.stats per a veure els valors outliers de ITEM_PRICE
outliersItemPrice <-boxplot.stats(item_price)$out
outliersItemPrice
table(outliersItemPrice)
indexItemPrice <- which( item_price %in% outliersItemPrice)
length(indexItemPrice)
# eliminem els registres que contenen outliers de ITEM_PRICE
vendes<-vendes[-indexItemPrice,]
dim(vendes)
subset(vendes, item_price>2124)
# item_cnt_day per intervals
binnedCounts(vendes[,"item_cnt_day", drop=FALSE])
with(vendes, Hist(item_cnt_day, scale="frequency", breaks="Sturges", col="darkgray"))
# utilitzem boxplot.stats per a veure els valors outliers de ITEM_CNT_DAY
outliersItemCnt <-boxplot.stats(item_cnt_day)$out
outliersItemCnt
indexItemCnt <- which( item_cnt_day %in% outliersItemCnt)
length(indexItemCnt)
# eliminem els registres que contenen outliers de ITEM_CNT_DAY
vendes<-vendes[-indexItemCnt,]
dim(vendes)
subset(vendes, item_cnt_day>1)
##### 4 Anàlisi de les dades ######
### Comprovació de la normalitat ###
# estimar els paràmetres de la distribució normal a partir de la funció fitdistr del paquet MASS, en la variable item_price
require(MASS)
ajust <- fitdistr(item_price,"normal")
ajust
#test Kolmogorov-Smirnov per comprovar la normalitat. Si p<0.05
Ks<- ks.test(item_price, "pnorm", mean =ajust$estimate[1], sd= ajust$estimate[2])
Ks
# estimar els paràmetres de la distribució normal a partir de la funció fitdistr del paquet MASS, en la variable item_cnt_day
require(MASS)
ajust <- fitdistr(item_cnt_day,"normal")
ajust
#test Kolmogorov-Smirnov per comprovar la normalitat. Si p<0.05
Ks<- ks.test(item_cnt_day, "pnorm", mean =ajust$estimate[1], sd= ajust$estimate[2])
Ks
par(mfrow = c(1,1))
shapiro.test(X.ITEM_PRICE.) --sample size must be between 3 and 5000
###### Objectiu 1: Quins variables influeixen més en el preu dels productes? ######
vendes <- read.csv("dades_totals.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
attach(vendes)
str(vendes)
dim(vendes)
head(vendes)
#funcio sample_frac del package dplyr
require(dplyr)
mostra <- sample_frac(vendes, 0.05, replace = FALSE)
dim(mostra)
cor.test(x = vendes$ITEM_PRICE, y = vendes$ITEM_CNT_DAY,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$DATE_BLOCK_NUM,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$SHOP_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$ITEM_CATEGORY_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$ITEM_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$ITEM_PRICE,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$DATE_BLOCK_NUM,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$SHOP_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$ITEM_CATEGORY_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$ITEM_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
# utilitzem boxplot.stats per a veure els valors outliers de ITEM_PRICE
outliersCnt <-boxplot.stats(vendes$ITEM_PRICE)$out
table(outliersCnt)
indexCnt <- which( vendes$ITEM_PRICE %in% outliersCnt)
length(indexCnt)
# eliminem els registres que contenen outliers de ITEM_PRICE
vendes<-vendes[-indexCnt,]
dim(vendes)
cor.test(x = vendes$ITEM_PRICE, y = vendes$ITEM_CNT_DAY,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$DATE_BLOCK_NUM,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$DATE_BLOCK_NUM,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
#Mijançant diagrama dispersió, comprovem si existeix relació lineal o monotònica.
#Si no hi ha relació, no té sentit calcular aquest tipus de correlacions.
require(MASS)
require(ggplot2)
ggplot(data = vendes, aes(x = ITEM_PRICE, y = DATA_BLOCK_NUM)) +
geom_point(colour = "red4") +
ggtitle("Diagrama de dispersió") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
ggplot(data = vendes, aes(x = ITEM_PRICE, y = ITEM_CNT_DAY)) +
geom_point(colour = "red4") +
ggtitle("Diagrama de dispersió") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
ggplot(data = vendes, aes(x = ITEM_CNT_DAY, y = DATA_BLOCK_NUM)) +
geom_point(colour = "red4") +
ggtitle("Diagrama de dispersió") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
###### Objectiu 2: La facturació és superior durant el segon semestre de l’any? ######
vendes_1semestre <- read.csv("dataset_primer_semestre.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
vendes_2semestre <- read.csv("dataset_segon_semestre.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
str(vendes_1semestre)
str(vendes_2semestre)
head(vendes_1semestre)
head(vendes_2semestre)
#comprovar normalitat dels datasets
# estimar els paràmetres de la distribució normal a partir de la funció fitdistr del paquet MASS, en la variable vendes_1semestre$TOTAL
require(MASS)
ajust <- fitdistr(vendes_1semestre$TOTAL,"normal")
ajust
#test Kolmogorov-Smirnov per comprovar la normalitat. Si p<0.05
Ks<- ks.test(vendes_1semestre$TOTAL, "pnorm", mean =ajust$estimate[1], sd= ajust$estimate[2])
Ks
par(mfrow=c(1,3))
hist(vendes_1semestre$TOTAL, xlab="Semestre 1 - Total", ylab="Freqüència", las=1, main="")
plot(density(vendes_1semestre$TOTAL), xlab="Semestre 1 - Total", ylab="Densitat", las=1, main="")
qqnorm(vendes_1semestre$TOTAL, xlab="Quantils teòrics", ylab="Quantils mostrals", las=1,main="")
qqline(vendes_1semestre$TOTAL)
# estimar els paràmetres de la distribució normal a partir de la funció fitdistr del paquet MASS, en la variable vendes_2semestre$TOTAL
require(MASS)
ajust <- fitdistr(vendes_2semestre$TOTAL,"normal")
ajust
#test Kolmogorov-Smirnov per comprovar la normalitat. Si p<0.05
Ks<- ks.test(vendes_2semestre$TOTAL, "pnorm", mean =ajust$estimate[1], sd= ajust$estimate[2])
Ks
par(mfrow=c(1,3))
hist(vendes_2semestre$TOTAL, xlab="Semestre 2 - Total", ylab="Freqüència", las=1, main="")
plot(density(vendes_2semestre$TOTAL), xlab="Semestre 2 - Total", ylab="Densitat", las=1, main="")
qqnorm(vendes_2semestre$TOTAL, xlab="Quantils teòrics", ylab="Quantils mostrals", las=1,main="")
qqline(vendes_2semestre$TOTAL)
#analitzem la variància amb el test Fligner-Killeen
fligner.test(x = list(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL))
#test Mann–Whitney–Wilcoxon
wilcox.test(x = vendes_1semestre$TOTAL, y = vendes_2semestre$TOTAL, alternative = "less", mu = 0, paired = FALSE, conf.int = 0.95)
#eliminem els outliers de vendes_1semestre
# utilitzem boxplot.stats per a veure els valors outliers de vendes_1semestre$TOTAL
outliers1sem <-boxplot.stats(vendes_1semestre$TOTAL)$out
index1sem <- which( vendes_1semestre$TOTAL %in% outliers1sem)
length(index1sem)
# eliminem els registres que contenen outliers de vendes_1semestre
vendes_1semestre<-vendes_1semestre[-index1sem,]
#eliminem els outliers de vendes_2semestre
# utilitzem boxplot.stats per a veure els valors outliers de vendes_2semestre$TOTAL
outliers2sem <-boxplot.stats(vendes_2semestre$TOTAL)$out
index2sem <- which( vendes_2semestre$TOTAL %in% outliers2sem)
length(index2sem)
# eliminem els registres que contenen outliers de vendes_2semestre
vendes_2semestre<-vendes_2semestre[-index2sem,]
#analitzem la variància amb el test Fligner-Killeen
fligner.test(x = list(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL))
#test Mann–Whitney–Wilcoxon
wilcox.test(x = vendes_1semestre$TOTAL, y = vendes_2semestre$TOTAL, alternative = "less", mu = 0, paired = FALSE, conf.int = 0.95)
boxplot(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL, names=c("semestre1","semestre2"))
#provem el t.test
t.test(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL,alternative = "less")
t.test(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL,alternative = "great")
###### Objectiu 3: Crearem models de regressió que permetin predir el preu dels productes ######
vendes <- read.csv("dades_totals.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
attach(vendes)
str(vendes)
dim(vendes)
head(vendes)
#funcio sample_frac del package dplyr per crear una mostra
require(dplyr)
mostra <- sample_frac(vendes, 0.001, replace = FALSE)
dim(mostra)
#Kendall–Theil Sen Siegel nonparametric linear regression
library(mblm)
set.seed(1234)
model1.k <- mblm(ITEM_PRICE ~ DATE_BLOCK_NUM, data=mostra)
summary(model1.k)
model2.k <- mblm(ITEM_PRICE ~ SHOP_ID, data=mostra)
summary(model2.k)
model3.k <- mblm(ITEM_PRICE ~ ITEM_CATEGORY_ID, data=mostra)
summary(model3.k)
model4.k <- mblm(ITEM_PRICE ~ ITEM_CNT_DAY, data=mostra)
summary(model4.k)
model5.k <- mblm(ITEM_PRICE ~ ITEM_CNT_DAY + DATE_BLOCK_NUM, data=mostra)
summary(model5.k)
Only linear models are accepted
model6.k <- mblm(ITEM_PRICE ~ ITEM_CNT_DAY + DATE_BLOCK_NUM + ITEM_CATEGORY_ID, data=mostra)
summary(model6.k)
Only linear models are accepted
newdata <- data.frame(
DATE_BLOCK_NUM = 30
)
predict(model1.k, newdata)
plot(ITEM_PRICE ~ DATE_BLOCK_NUM, data = mostra, pch = 16)
#Quantile regression
install.packages("quantreg")
library(quantreg)
vendes <- read.csv("dades_totals.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
set.seed(1234)
#funcio sample_frac del package dplyr per crear una mostra
require(dplyr)
mostra <- sample_frac(vendes, 0.001, replace = FALSE)
dim(mostra)
model1_q50 <- rq(ITEM_PRICE ~ DATE_BLOCK_NUM, tau = 0.5, data = mostra)
summary(model1_q50)
model2_q50 <- rq(ITEM_PRICE ~ ITEM_CNT_DAY + DATE_BLOCK_NUM, tau = 0.5, data = mostra)
summary(model2_q50)
model3_q50 <- rq(ITEM_PRICE ~ ITEM_CNT_DAY + DATE_BLOCK_NUM + ITEM_CATEGORY_ID, tau = 0.5, data = mostra)
summary(model3_q50)
|
/code/predict_future_sales_cleaning.R
|
no_license
|
mcalzada/predict_future_sales_cleaning
|
R
| false | false | 13,179 |
r
|
#practica 2
install.packages("Rcpp")
install.packages("RcppArmadillo")
install.packages("quadprog")
install.packages("fracdiff")
install.packages("curl")
install.packages("forecast")
install.packages("rstan")
install.packages("prophet")
install.packages("mblm")
install.packages("rq")
install.packages("coin")
install.packages("ggpubr")
library(forecast)
library(Rcpp)
library(prophet)
library(Hmisc)
library(dplyr)
library(plyr)
# library(readr)
# library(outliers)
# library(graphics)
##### 1 Descripció dels datasets #####
item_categories <- read.csv("item_categories.csv")
shops <- read.csv("shops.csv")
items <- read.csv("items.csv")
sample_submission <- read.csv("sample_submission.csv")
test <- read.csv("test.csv")
#vendes
vendes <- read.csv("sales_train_v2.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
vendes <- read.csv("sales_train_v2_data_format.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
vendes <- read.csv("dades_totals.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
attach(vendes)
dim(vendes)
summary(vendes)
describe(vendes)
str(vendes)
head(vendes)
str(shops)
head(shops)
str(items)
head(items)
##### 3 Neteja de les dades ######
#tipus dades assignades a cada camp
sapply(vendes, function(x) class(x))
#funció describe del package Hmisc podem analitzar el nombre de variables del data.frame
describe(vendes)
describe(item_categories)
describe(items)
#merge sales i item_categories
library(raster)
# merge per les variables ocumnuescommon variable, here called ''
m <- merge(vendes, items, by='item_id')
#comprovar nuls
sapply(vendes, function(x) sum(is.na(x)))
#comprovar zeros
subset(vendes, date_block_num==0)
length(subset(vendes, shop_id==0))
subset(vendes, shop_id==0)
subset(vendes, item_id==0)
subset(vendes, item_price=0)
subset(vendes, item_cnt_day==0)
subset(vendes, item_price>40000)
subset(vendes, item_id==13403)
subset(items, item_id==13403)
subset(item_categories, item_category_id==16)
subset(vendes, item_cnt_day>300)
binnedCounts(subset(vendes, ITEM_PRICE<10000)[,"item_price", drop=FALSE])
hist(subset(vendes, item_price<3000 & item_cnt_day<6 & item_cnt_day>0))
hist(subset(vendes, item_price<3000 & item_cnt_day==1))
# comprovar outliers
par(mfrow=c(1,5))
boxplot(vendes$date_block_num)
boxplot(vendes$shop_id)
boxplot(vendes$item_id)
boxplot(vendes$item_price)
boxplot(vendes$item_cnt_day)
boxplot (vendes$item_price)
boxplot (vendes$item_cnt_day)
outliersItemId <-boxplot.stats(item_id)$out
indexItemId <- which( item_id %in% outliersItemId)
length(indexItemId)
outliersItemCnt
# item_price per intervals
binnedCounts(vendes[,"item_price", drop=FALSE])
with(vendes, Hist(item_price, scale="frequency", breaks="Sturges", col="darkgray"))
# utilitzem boxplot.stats per a veure els valors outliers de ITEM_PRICE
outliersItemPrice <-boxplot.stats(item_price)$out
outliersItemPrice
table(outliersItemPrice)
indexItemPrice <- which( item_price %in% outliersItemPrice)
length(indexItemPrice)
# eliminem els registres que contenen outliers de ITEM_PRICE
vendes<-vendes[-indexItemPrice,]
dim(vendes)
subset(vendes, item_price>2124)
# item_cnt_day per intervals
binnedCounts(vendes[,"item_cnt_day", drop=FALSE])
with(vendes, Hist(item_cnt_day, scale="frequency", breaks="Sturges", col="darkgray"))
# utilitzem boxplot.stats per a veure els valors outliers de ITEM_CNT_DAY
outliersItemCnt <-boxplot.stats(item_cnt_day)$out
outliersItemCnt
indexItemCnt <- which( item_cnt_day %in% outliersItemCnt)
length(indexItemCnt)
# eliminem els registres que contenen outliers de ITEM_CNT_DAY
vendes<-vendes[-indexItemCnt,]
dim(vendes)
subset(vendes, item_cnt_day>1)
##### 4 Anàlisi de les dades ######
### Comprovació de la normalitat ###
# estimar els paràmetres de la distribució normal a partir de la funció fitdistr del paquet MASS, en la variable item_price
require(MASS)
ajust <- fitdistr(item_price,"normal")
ajust
#test Kolmogorov-Smirnov per comprovar la normalitat. Si p<0.05
Ks<- ks.test(item_price, "pnorm", mean =ajust$estimate[1], sd= ajust$estimate[2])
Ks
# estimar els paràmetres de la distribució normal a partir de la funció fitdistr del paquet MASS, en la variable item_cnt_day
require(MASS)
ajust <- fitdistr(item_cnt_day,"normal")
ajust
#test Kolmogorov-Smirnov per comprovar la normalitat. Si p<0.05
Ks<- ks.test(item_cnt_day, "pnorm", mean =ajust$estimate[1], sd= ajust$estimate[2])
Ks
par(mfrow = c(1,1))
shapiro.test(X.ITEM_PRICE.) --sample size must be between 3 and 5000
###### Objectiu 1: Quins variables influeixen més en el preu dels productes? ######
vendes <- read.csv("dades_totals.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
attach(vendes)
str(vendes)
dim(vendes)
head(vendes)
#funcio sample_frac del package dplyr
require(dplyr)
mostra <- sample_frac(vendes, 0.05, replace = FALSE)
dim(mostra)
cor.test(x = vendes$ITEM_PRICE, y = vendes$ITEM_CNT_DAY,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$DATE_BLOCK_NUM,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$SHOP_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$ITEM_CATEGORY_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$ITEM_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$ITEM_PRICE,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$DATE_BLOCK_NUM,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$SHOP_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$ITEM_CATEGORY_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$ITEM_ID,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
# utilitzem boxplot.stats per a veure els valors outliers de ITEM_PRICE
outliersCnt <-boxplot.stats(vendes$ITEM_PRICE)$out
table(outliersCnt)
indexCnt <- which( vendes$ITEM_PRICE %in% outliersCnt)
length(indexCnt)
# eliminem els registres que contenen outliers de ITEM_PRICE
vendes<-vendes[-indexCnt,]
dim(vendes)
cor.test(x = vendes$ITEM_PRICE, y = vendes$ITEM_CNT_DAY,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_PRICE, y = vendes$DATE_BLOCK_NUM,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
cor.test(x = vendes$ITEM_CNT_DAY, y = vendes$DATE_BLOCK_NUM,
alternative = "two.sided", conf.level = 0.95, method = "spearman")
#Mijançant diagrama dispersió, comprovem si existeix relació lineal o monotònica.
#Si no hi ha relació, no té sentit calcular aquest tipus de correlacions.
require(MASS)
require(ggplot2)
ggplot(data = vendes, aes(x = ITEM_PRICE, y = DATA_BLOCK_NUM)) +
geom_point(colour = "red4") +
ggtitle("Diagrama de dispersió") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
ggplot(data = vendes, aes(x = ITEM_PRICE, y = ITEM_CNT_DAY)) +
geom_point(colour = "red4") +
ggtitle("Diagrama de dispersió") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
ggplot(data = vendes, aes(x = ITEM_CNT_DAY, y = DATA_BLOCK_NUM)) +
geom_point(colour = "red4") +
ggtitle("Diagrama de dispersió") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))
###### Objectiu 2: La facturació és superior durant el segon semestre de l’any? ######
vendes_1semestre <- read.csv("dataset_primer_semestre.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
vendes_2semestre <- read.csv("dataset_segon_semestre.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
str(vendes_1semestre)
str(vendes_2semestre)
head(vendes_1semestre)
head(vendes_2semestre)
#comprovar normalitat dels datasets
# estimar els paràmetres de la distribució normal a partir de la funció fitdistr del paquet MASS, en la variable vendes_1semestre$TOTAL
require(MASS)
ajust <- fitdistr(vendes_1semestre$TOTAL,"normal")
ajust
#test Kolmogorov-Smirnov per comprovar la normalitat. Si p<0.05
Ks<- ks.test(vendes_1semestre$TOTAL, "pnorm", mean =ajust$estimate[1], sd= ajust$estimate[2])
Ks
par(mfrow=c(1,3))
hist(vendes_1semestre$TOTAL, xlab="Semestre 1 - Total", ylab="Freqüència", las=1, main="")
plot(density(vendes_1semestre$TOTAL), xlab="Semestre 1 - Total", ylab="Densitat", las=1, main="")
qqnorm(vendes_1semestre$TOTAL, xlab="Quantils teòrics", ylab="Quantils mostrals", las=1,main="")
qqline(vendes_1semestre$TOTAL)
# estimar els paràmetres de la distribució normal a partir de la funció fitdistr del paquet MASS, en la variable vendes_2semestre$TOTAL
require(MASS)
ajust <- fitdistr(vendes_2semestre$TOTAL,"normal")
ajust
#test Kolmogorov-Smirnov per comprovar la normalitat. Si p<0.05
Ks<- ks.test(vendes_2semestre$TOTAL, "pnorm", mean =ajust$estimate[1], sd= ajust$estimate[2])
Ks
par(mfrow=c(1,3))
hist(vendes_2semestre$TOTAL, xlab="Semestre 2 - Total", ylab="Freqüència", las=1, main="")
plot(density(vendes_2semestre$TOTAL), xlab="Semestre 2 - Total", ylab="Densitat", las=1, main="")
qqnorm(vendes_2semestre$TOTAL, xlab="Quantils teòrics", ylab="Quantils mostrals", las=1,main="")
qqline(vendes_2semestre$TOTAL)
#analitzem la variància amb el test Fligner-Killeen
fligner.test(x = list(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL))
#test Mann–Whitney–Wilcoxon
wilcox.test(x = vendes_1semestre$TOTAL, y = vendes_2semestre$TOTAL, alternative = "less", mu = 0, paired = FALSE, conf.int = 0.95)
#eliminem els outliers de vendes_1semestre
# utilitzem boxplot.stats per a veure els valors outliers de vendes_1semestre$TOTAL
outliers1sem <-boxplot.stats(vendes_1semestre$TOTAL)$out
index1sem <- which( vendes_1semestre$TOTAL %in% outliers1sem)
length(index1sem)
# eliminem els registres que contenen outliers de vendes_1semestre
vendes_1semestre<-vendes_1semestre[-index1sem,]
#eliminem els outliers de vendes_2semestre
# utilitzem boxplot.stats per a veure els valors outliers de vendes_2semestre$TOTAL
outliers2sem <-boxplot.stats(vendes_2semestre$TOTAL)$out
index2sem <- which( vendes_2semestre$TOTAL %in% outliers2sem)
length(index2sem)
# eliminem els registres que contenen outliers de vendes_2semestre
vendes_2semestre<-vendes_2semestre[-index2sem,]
#analitzem la variància amb el test Fligner-Killeen
fligner.test(x = list(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL))
#test Mann–Whitney–Wilcoxon
wilcox.test(x = vendes_1semestre$TOTAL, y = vendes_2semestre$TOTAL, alternative = "less", mu = 0, paired = FALSE, conf.int = 0.95)
boxplot(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL, names=c("semestre1","semestre2"))
#provem el t.test
t.test(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL,alternative = "less")
t.test(vendes_1semestre$TOTAL,vendes_2semestre$TOTAL,alternative = "great")
###### Objectiu 3: Crearem models de regressió que permetin predir el preu dels productes ######
vendes <- read.csv("dades_totals.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
attach(vendes)
str(vendes)
dim(vendes)
head(vendes)
#funcio sample_frac del package dplyr per crear una mostra
require(dplyr)
mostra <- sample_frac(vendes, 0.001, replace = FALSE)
dim(mostra)
#Kendall–Theil Sen Siegel nonparametric linear regression
library(mblm)
set.seed(1234)
model1.k <- mblm(ITEM_PRICE ~ DATE_BLOCK_NUM, data=mostra)
summary(model1.k)
model2.k <- mblm(ITEM_PRICE ~ SHOP_ID, data=mostra)
summary(model2.k)
model3.k <- mblm(ITEM_PRICE ~ ITEM_CATEGORY_ID, data=mostra)
summary(model3.k)
model4.k <- mblm(ITEM_PRICE ~ ITEM_CNT_DAY, data=mostra)
summary(model4.k)
model5.k <- mblm(ITEM_PRICE ~ ITEM_CNT_DAY + DATE_BLOCK_NUM, data=mostra)
summary(model5.k)
Only linear models are accepted
model6.k <- mblm(ITEM_PRICE ~ ITEM_CNT_DAY + DATE_BLOCK_NUM + ITEM_CATEGORY_ID, data=mostra)
summary(model6.k)
Only linear models are accepted
newdata <- data.frame(
DATE_BLOCK_NUM = 30
)
predict(model1.k, newdata)
plot(ITEM_PRICE ~ DATE_BLOCK_NUM, data = mostra, pch = 16)
#Quantile regression
install.packages("quantreg")
library(quantreg)
vendes <- read.csv("dades_totals.csv", header=TRUE, sep=",", na.strings="NA", dec=".", strip.white=TRUE)
set.seed(1234)
#funcio sample_frac del package dplyr per crear una mostra
require(dplyr)
mostra <- sample_frac(vendes, 0.001, replace = FALSE)
dim(mostra)
model1_q50 <- rq(ITEM_PRICE ~ DATE_BLOCK_NUM, tau = 0.5, data = mostra)
summary(model1_q50)
model2_q50 <- rq(ITEM_PRICE ~ ITEM_CNT_DAY + DATE_BLOCK_NUM, tau = 0.5, data = mostra)
summary(model2_q50)
model3_q50 <- rq(ITEM_PRICE ~ ITEM_CNT_DAY + DATE_BLOCK_NUM + ITEM_CATEGORY_ID, tau = 0.5, data = mostra)
summary(model3_q50)
|
library(shiny)
library(tidyverse)
library(lubridate)
library(plotly)
library(data.table)
library(DT)
library(shinythemes)
library(leaflet)
library(geojsonio)
library(sp)
library(scales)
features <- fread("./data/features.csv")
features$date <- ymd(features$date)
points <- fread("./data/points.csv")
mask <- fread("./data/mask.csv")
points = points %>% left_join(mask, by = c("x", "y"))
points = points %>% filter(sugarcane==TRUE,parcel_typ=="Lot Type Parcel") %>% select("x","y","lotplan","segpar")
ui <- navbarPage("Cane Crushers", theme = shinytheme("spacelab"),id= "nav",
tabPanel("Map View",
mainPanel(width=12, column(4,selectInput("Area", h3("Select a Region"),choices = c("Pindi Pindi","Yalboroo"),
selected = "Pindi Pindi"),offset = 0),
column(4,h3("Select a property lot"),offset = 2),
column(leafletOutput("mapPlot",height = "520px"),width = 12))
),
tabPanel("Descriptive View",value = "desc",
sidebarLayout(
sidebarPanel(width = 4,
br(),
selectInput("Address", "Select Address"
,choices = NULL
,selected = NULL),
br(),
sliderInput("daterange", "Date Range",
min = ymd("2016-12-22"),
max = ymd("2019-08-09"),
value = c(ymd("2016-12-22"),ymd("2019-08-09")),
step = 10,timeFormat="%d %b %y",ticks = F),
br()
),
mainPanel(width = 8,
tabsetPanel( type= "tabs",
tabPanel("Description",br(),
column(6,verbatimTextOutput("dollar_made"),align = "center"),
column(6,verbatimTextOutput("harvested_tonnes"),align = "center"),
column(6,verbatimTextOutput("N_value"),align = "center"),
column(6,verbatimTextOutput("water"),align = "center"),
column(12,plotlyOutput("harvestPlot",height = "250px")),
br(),
column(12,plotlyOutput("dollarPlot",height = "250px"))
),
tabPanel("Leaderboard",br(),tags$head(
tags$style(
HTML(
"#DataTables_Table_0_length {
float: right
}
#DataTables_Table_0_filter {
float: left
}
#DataTables_Table_0_paginate {
float: none;
text-align: center;
}
"
))), DTOutput('leaderboard'))
)
)
)
)
)
server <- function(input, output,session) {
# Address Geojson
points_data <- reactive({
points_data <- geojson_read(paste0("./data/lots/Property_address_Queensland_",str_remove(input$Area," "),".geojson"),what= "sp")
points_data <- subset(points_data,LOTPLAN %in% points$lotplan)
points_data
})
# Lot to Address mapping
lot_to_address <- reactive({
lot_to_address <- data.frame(lotplan = points_data()$LOTPLAN, address = paste0("Lot ",points_data()$LOT,", ",points_data()$ADDRESS),stringsAsFactors = F)
lot_to_address
})
# Dataframe containing features per date and lot
timeline_summary <- reactive({
feature_subset <- features %>% filter(date >= input$daterange[1],date <= input$daterange[2])
feature_subset <- feature_subset %>% filter(lotplan %in% lot_to_address()$lotplan)
feature_subset <- feature_subset %>% left_join(lot_to_address(),by ="lotplan")
feature_subset <- feature_subset %>% group_by(address,date) %>% summarise(harvested_tonnes = sum(harvested_tonnes),sugar_made = sum(sugar_made),dollar_made=sum(dollar_made),min_N = mean(min_N,na.rm = T),max_N = mean(max_N,na.rm = T),leaf_water_content = mean(leaf_water_content,na.rm = T))
feature_subset
})
# Subset containing features per lot
lot_summary <- reactive({
lot_summary <- timeline_summary() %>% group_by(address) %>% summarise(harvested_tonnes = sum(harvested_tonnes),sugar_made = sum(sugar_made),dollar_made=sum(dollar_made),min_N = mean(min_N,na.rm = T),max_N = mean(max_N,na.rm = T),leaf_water_content = mean(leaf_water_content,na.rm = T))
lot_summary$timeline <- paste(format(input$daterange[1],"%d %b %y"), "-", format(input$daterange[2],"%d %b %y"))
lot_summary %>% data.frame
})
# Lot boundaries for map
polygon_data <- reactive({
polygon_data <- geojson_read(paste0("./data/lots/Cadastral_data_QLD_CADASTRE_DCDB_",str_remove(input$Area," "),".geojson"),what= "sp")
polygon_data <- subset(polygon_data, LOTPLAN %in% points_data()$LOTPLAN & is.na(FEAT_NAME)==T & LOTPLAN %in% points$lotplan & SEGPAR %in% points$segpar & is.na(LOTPLAN)==F & is.na(SEGPAR)==F)
polygon_data
})
# Update Address list based on area
observe({
choices <- paste0("Lot ",points_data()$LOT,", ",points_data()$ADDRESS)
updateSelectInput(session, "Address",
choices = choices,
selected = NULL)
})
# Create map plot
output$mapPlot <- renderLeaflet({
map <- leaflet(polygon_data()) %>%
addProviderTiles(providers$OpenStreetMap) %>%
addPolygons(layerId = ~LOTPLAN,fillOpacity = 1, weight = 2, smoothFactor = 0.5,opacity = 1.0,fillColor = "green",highlightOptions = highlightOptions(fillColor = "yellow"),label= paste0("Lot ",polygon_data()$LOT,", ",polygon_data()$ADDRESS))
map
})
# Update selected address based on map click
observeEvent(input$mapPlot_shape_click, {
click <- input$mapPlot_shape_click
address_filter <- subset(lot_to_address(),lotplan==click$id)
selection = address_filter$address[1]
updateSelectInput(session, "Address",
selected = selection)
})
# Subset containing features per lot for the selected lot
descriptive_summary <- reactive({
lot_summary() %>% filter(address == input$Address) %>% data.frame
})
# Subset containing features per date and lot for the selected lot
descriptive_timeline <- reactive({
descriptive_timeline <- timeline_summary() %>% filter(address == input$Address) %>% data.frame
descriptive_timeline$date <- as.Date(descriptive_timeline$date,"%d-%m-%Y")
descriptive_timeline
})
# Text outputs for description page
output$dollar_made <- renderPrint({
cat("Estimated Revenue: $",round(descriptive_summary()$dollar_made[1],2))
})
output$harvested_tonnes <- renderPrint({
cat("Amount of Sugarcane Harvested:", round(descriptive_summary()$harvested_tonnes[1],2),"Tonnes")
})
output$N_value <- renderPrint({
if(descriptive_summary()$min_N[1] < 110){usage = "Low fertilizer uasge"}
else if(descriptive_summary()$min_N[1] < 120){usage = "Optimal fertilizer uasge"}
else{usage = "Extreme fertilizer uasge"}
cat("Nitrogen Content:", round(descriptive_summary()$min_N[1],0),"-",round(descriptive_summary()$max_N[1],0),"KgN/Ha,",usage)
})
output$water <- renderPrint({
if(descriptive_summary()$leaf_water_content[1] < 0.62){content = "Water content in the cane lower than expected"}
else{content = "Optimal water content in the cane"}
cat(content)
})
# Plots for description page
output$harvestPlot <- renderPlotly({
plot = ggplot(data = descriptive_timeline(),aes(x=date,y=harvested_tonnes, group=1 )) + geom_point(colour = "blue") + geom_line(colour = "blue") + scale_x_date(date_breaks = "100 days",date_labels = "%d-%b-%Y") + xlab("Date") + ylab("Sugarcane harvested (Tonnes)")
ggplotly(plot)
})
output$dollarPlot <- renderPlotly({
plot = ggplot(data = descriptive_timeline(),aes(x=date,y=dollar_made, group=1)) + geom_point(colour = "green") + geom_line(colour = "green") + scale_x_date(date_breaks = "100 days",date_labels = "%d-%b-%Y") + scale_y_continuous(labels = dollar) + xlab("Date") + ylab("Estimated Revenue")
ggplotly(plot)
})
# Leaderboard table
output$leaderboard <- renderDT({
leaderboard <- lot_summary() %>% select("address","harvested_tonnes","dollar_made") %>% data.frame
leaderboard$rank <- rank(-leaderboard$dollar_made)
leaderboard$harvested_tonnes <- round(leaderboard$harvested_tonnes,2)
leaderboard$dollar_made <- round(leaderboard$dollar_made,2)
colnames(leaderboard) = c('Address', "Sugarcane Harvested (Tonnes)", "Estimated Revenue","Rank")
leaderboard <- leaderboard %>% select(4,1,2,3)
datatable(leaderboard %>% arrange(Rank),rownames= FALSE,
options = list(
autoWidth = TRUE,
columnDefs = list(list(className = 'dt-center', targets = "_all"))
)) %>% formatStyle('Address',`text-align` = 'left') %>% formatStyle(
'Address',
target = 'row',
backgroundColor = styleEqual(input$Address, '#FCF8E3'),fontWeight = styleEqual(input$Address, 'bold')) %>%
formatCurrency('Estimated Revenue',interval = 3,mark=',',currency = "$") %>% formatRound("Sugarcane Harvested (Tonnes)",interval = 3,mark=',')
})
observeEvent(input$mapPlot_shape_click, {
updateNavbarPage(session, "nav", selected = "desc")
})
}
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
Canecrushers/shiny-app
|
R
| false | false | 12,256 |
r
|
library(shiny)
library(tidyverse)
library(lubridate)
library(plotly)
library(data.table)
library(DT)
library(shinythemes)
library(leaflet)
library(geojsonio)
library(sp)
library(scales)
features <- fread("./data/features.csv")
features$date <- ymd(features$date)
points <- fread("./data/points.csv")
mask <- fread("./data/mask.csv")
points = points %>% left_join(mask, by = c("x", "y"))
points = points %>% filter(sugarcane==TRUE,parcel_typ=="Lot Type Parcel") %>% select("x","y","lotplan","segpar")
ui <- navbarPage("Cane Crushers", theme = shinytheme("spacelab"),id= "nav",
tabPanel("Map View",
mainPanel(width=12, column(4,selectInput("Area", h3("Select a Region"),choices = c("Pindi Pindi","Yalboroo"),
selected = "Pindi Pindi"),offset = 0),
column(4,h3("Select a property lot"),offset = 2),
column(leafletOutput("mapPlot",height = "520px"),width = 12))
),
tabPanel("Descriptive View",value = "desc",
sidebarLayout(
sidebarPanel(width = 4,
br(),
selectInput("Address", "Select Address"
,choices = NULL
,selected = NULL),
br(),
sliderInput("daterange", "Date Range",
min = ymd("2016-12-22"),
max = ymd("2019-08-09"),
value = c(ymd("2016-12-22"),ymd("2019-08-09")),
step = 10,timeFormat="%d %b %y",ticks = F),
br()
),
mainPanel(width = 8,
tabsetPanel( type= "tabs",
tabPanel("Description",br(),
column(6,verbatimTextOutput("dollar_made"),align = "center"),
column(6,verbatimTextOutput("harvested_tonnes"),align = "center"),
column(6,verbatimTextOutput("N_value"),align = "center"),
column(6,verbatimTextOutput("water"),align = "center"),
column(12,plotlyOutput("harvestPlot",height = "250px")),
br(),
column(12,plotlyOutput("dollarPlot",height = "250px"))
),
tabPanel("Leaderboard",br(),tags$head(
tags$style(
HTML(
"#DataTables_Table_0_length {
float: right
}
#DataTables_Table_0_filter {
float: left
}
#DataTables_Table_0_paginate {
float: none;
text-align: center;
}
"
))), DTOutput('leaderboard'))
)
)
)
)
)
server <- function(input, output,session) {
# Address Geojson
points_data <- reactive({
points_data <- geojson_read(paste0("./data/lots/Property_address_Queensland_",str_remove(input$Area," "),".geojson"),what= "sp")
points_data <- subset(points_data,LOTPLAN %in% points$lotplan)
points_data
})
# Lot to Address mapping
lot_to_address <- reactive({
lot_to_address <- data.frame(lotplan = points_data()$LOTPLAN, address = paste0("Lot ",points_data()$LOT,", ",points_data()$ADDRESS),stringsAsFactors = F)
lot_to_address
})
# Dataframe containing features per date and lot
timeline_summary <- reactive({
feature_subset <- features %>% filter(date >= input$daterange[1],date <= input$daterange[2])
feature_subset <- feature_subset %>% filter(lotplan %in% lot_to_address()$lotplan)
feature_subset <- feature_subset %>% left_join(lot_to_address(),by ="lotplan")
feature_subset <- feature_subset %>% group_by(address,date) %>% summarise(harvested_tonnes = sum(harvested_tonnes),sugar_made = sum(sugar_made),dollar_made=sum(dollar_made),min_N = mean(min_N,na.rm = T),max_N = mean(max_N,na.rm = T),leaf_water_content = mean(leaf_water_content,na.rm = T))
feature_subset
})
# Subset containing features per lot
lot_summary <- reactive({
lot_summary <- timeline_summary() %>% group_by(address) %>% summarise(harvested_tonnes = sum(harvested_tonnes),sugar_made = sum(sugar_made),dollar_made=sum(dollar_made),min_N = mean(min_N,na.rm = T),max_N = mean(max_N,na.rm = T),leaf_water_content = mean(leaf_water_content,na.rm = T))
lot_summary$timeline <- paste(format(input$daterange[1],"%d %b %y"), "-", format(input$daterange[2],"%d %b %y"))
lot_summary %>% data.frame
})
# Lot boundaries for map
polygon_data <- reactive({
polygon_data <- geojson_read(paste0("./data/lots/Cadastral_data_QLD_CADASTRE_DCDB_",str_remove(input$Area," "),".geojson"),what= "sp")
polygon_data <- subset(polygon_data, LOTPLAN %in% points_data()$LOTPLAN & is.na(FEAT_NAME)==T & LOTPLAN %in% points$lotplan & SEGPAR %in% points$segpar & is.na(LOTPLAN)==F & is.na(SEGPAR)==F)
polygon_data
})
# Update Address list based on area
observe({
choices <- paste0("Lot ",points_data()$LOT,", ",points_data()$ADDRESS)
updateSelectInput(session, "Address",
choices = choices,
selected = NULL)
})
# Create map plot
output$mapPlot <- renderLeaflet({
map <- leaflet(polygon_data()) %>%
addProviderTiles(providers$OpenStreetMap) %>%
addPolygons(layerId = ~LOTPLAN,fillOpacity = 1, weight = 2, smoothFactor = 0.5,opacity = 1.0,fillColor = "green",highlightOptions = highlightOptions(fillColor = "yellow"),label= paste0("Lot ",polygon_data()$LOT,", ",polygon_data()$ADDRESS))
map
})
# Update selected address based on map click
observeEvent(input$mapPlot_shape_click, {
click <- input$mapPlot_shape_click
address_filter <- subset(lot_to_address(),lotplan==click$id)
selection = address_filter$address[1]
updateSelectInput(session, "Address",
selected = selection)
})
# Subset containing features per lot for the selected lot
descriptive_summary <- reactive({
lot_summary() %>% filter(address == input$Address) %>% data.frame
})
# Subset containing features per date and lot for the selected lot
descriptive_timeline <- reactive({
descriptive_timeline <- timeline_summary() %>% filter(address == input$Address) %>% data.frame
descriptive_timeline$date <- as.Date(descriptive_timeline$date,"%d-%m-%Y")
descriptive_timeline
})
# Text outputs for description page
output$dollar_made <- renderPrint({
cat("Estimated Revenue: $",round(descriptive_summary()$dollar_made[1],2))
})
output$harvested_tonnes <- renderPrint({
cat("Amount of Sugarcane Harvested:", round(descriptive_summary()$harvested_tonnes[1],2),"Tonnes")
})
output$N_value <- renderPrint({
if(descriptive_summary()$min_N[1] < 110){usage = "Low fertilizer uasge"}
else if(descriptive_summary()$min_N[1] < 120){usage = "Optimal fertilizer uasge"}
else{usage = "Extreme fertilizer uasge"}
cat("Nitrogen Content:", round(descriptive_summary()$min_N[1],0),"-",round(descriptive_summary()$max_N[1],0),"KgN/Ha,",usage)
})
output$water <- renderPrint({
if(descriptive_summary()$leaf_water_content[1] < 0.62){content = "Water content in the cane lower than expected"}
else{content = "Optimal water content in the cane"}
cat(content)
})
# Plots for description page
output$harvestPlot <- renderPlotly({
plot = ggplot(data = descriptive_timeline(),aes(x=date,y=harvested_tonnes, group=1 )) + geom_point(colour = "blue") + geom_line(colour = "blue") + scale_x_date(date_breaks = "100 days",date_labels = "%d-%b-%Y") + xlab("Date") + ylab("Sugarcane harvested (Tonnes)")
ggplotly(plot)
})
output$dollarPlot <- renderPlotly({
plot = ggplot(data = descriptive_timeline(),aes(x=date,y=dollar_made, group=1)) + geom_point(colour = "green") + geom_line(colour = "green") + scale_x_date(date_breaks = "100 days",date_labels = "%d-%b-%Y") + scale_y_continuous(labels = dollar) + xlab("Date") + ylab("Estimated Revenue")
ggplotly(plot)
})
# Leaderboard table
output$leaderboard <- renderDT({
leaderboard <- lot_summary() %>% select("address","harvested_tonnes","dollar_made") %>% data.frame
leaderboard$rank <- rank(-leaderboard$dollar_made)
leaderboard$harvested_tonnes <- round(leaderboard$harvested_tonnes,2)
leaderboard$dollar_made <- round(leaderboard$dollar_made,2)
colnames(leaderboard) = c('Address', "Sugarcane Harvested (Tonnes)", "Estimated Revenue","Rank")
leaderboard <- leaderboard %>% select(4,1,2,3)
datatable(leaderboard %>% arrange(Rank),rownames= FALSE,
options = list(
autoWidth = TRUE,
columnDefs = list(list(className = 'dt-center', targets = "_all"))
)) %>% formatStyle('Address',`text-align` = 'left') %>% formatStyle(
'Address',
target = 'row',
backgroundColor = styleEqual(input$Address, '#FCF8E3'),fontWeight = styleEqual(input$Address, 'bold')) %>%
formatCurrency('Estimated Revenue',interval = 3,mark=',',currency = "$") %>% formatRound("Sugarcane Harvested (Tonnes)",interval = 3,mark=',')
})
observeEvent(input$mapPlot_shape_click, {
updateNavbarPage(session, "nav", selected = "desc")
})
}
shinyApp(ui = ui, server = server)
|
####**********************************************************************
####**********************************************************************
####
#### RANDOM SURVIVAL FOREST 3.6.4
####
#### Copyright 2013, Cleveland Clinic Foundation
####
#### This program is free software; you can redistribute it and/or
#### modify it under the terms of the GNU General Public License
#### as published by the Free Software Foundation; either version 2
#### of the License, or (at your option) any later version.
####
#### This program is distributed in the hope that it will be useful,
#### but WITHOUT ANY WARRANTY; without even the implied warranty of
#### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#### GNU General Public License for more details.
####
#### You should have received a copy of the GNU General Public
#### License along with this program; if not, write to the Free
#### Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
#### Boston, MA 02110-1301, USA.
####
#### Written by:
#### Hemant Ishwaran, Ph.D.
#### Director of Statistical Methodology
#### Professor, Division of Biostatistics
#### Clinical Research Building, Room 1058
#### 1120 NW 14th Street
#### University of Miami, Miami FL 33136
####
#### email: hemant.ishwaran@gmail.com
#### URL: http://web.ccs.miami.edu/~hishwaran
#### --------------------------------------------------------------
#### Udaya B. Kogalur, Ph.D.
#### Adjunct Staff
#### Dept of Quantitative Health Sciences
#### Cleveland Clinic Foundation
####
#### Kogalur & Company, Inc.
#### 5425 Nestleway Drive, Suite L1
#### Clemmons, NC 27012
####
#### email: commerce@kogalur.com
#### URL: http://www.kogalur.com
#### --------------------------------------------------------------
####
####**********************************************************************
####**********************************************************************
print.rsf <- function(x, ...) {
if (sum(inherits(x, c("rsf", "forest"), TRUE) == c(1, 2)) == 2) {
print.default(x)
return()
}
if (!is.null(x$cens)) {
events <- na.omit(x$cens)[na.omit(x$cens) > 0]
n.event <- length(unique(events))
event.freq <- paste(tapply(events, events, length), collapse = ", ")
}
else {
n.event <- 1
}
if (!is.null(x$err.rate)) {
x$err.rate <- rbind(x$err.rate)
err.rate <- round(100*x$err.rate[, ncol(x$err.rate)], 2)
if (!all(is.na(err.rate))) {
err.rate <- paste(err.rate, "%", collapse=", ", sep = "")
}
else {
err.rate <- NULL
}
}
else {
err.rate <- NULL
}
if (is.null(x$nsplit)) x$nsplit <- 0
if (sum(inherits(x, c("rsf", "grow"), TRUE) == c(1, 2)) != 2 &
sum(inherits(x, c("rsf", "predict"), TRUE) == c(1, 2)) != 2)
stop("This function only works for objects of class `(rsf, grow)' or '(rsf, predict)'.")
if (sum(inherits(x, c("rsf", "grow"), TRUE) == c(1, 2)) == 2) {
cat("\nCall:\n", deparse(x$call), "\n\n")
cat(" Sample size: ", x$n, "\n", sep="")
if (n.event == 1) {
cat(" Number of deaths: ", x$ndead, "\n", sep="")
}
else {
cat(" Number of events: ", event.freq, "\n", sep="")
}
if (!is.null(x$imputedIndv)) {
cat(" Was data imputed: ", "yes", "\n", sep="")
cat(" Missingness: ",
round(100*length(x$imputedIndv)/x$n,2), "%\n", sep="")
}
cat(" Number of trees: ", x$ntree, "\n",sep="")
cat(" Minimum terminal node size: ", x$nodesize, "\n", sep="")
cat(" Average no. of terminal nodes: ", mean(x$leaf.count), "\n", sep="")
cat("No. of variables tried at each split: ", x$mtry, "\n", sep="")
cat(" Total no. of variables: ", length(x$predictorNames), "\n", sep="")
if (x$nsplit > 0 & x$splitrule != "random") {
cat(" Splitting rule: ", paste(x$splitrule,"*random*"), "\n", sep="")
cat(" Number of random split points: ", x$nsplit , "\n", sep="")
}
else {
cat(" Splitting rule: ", x$splitrule, "\n", sep="")
}
cat(" Estimate of error rate: ", err.rate, "\n\n", sep="")
}
else {
cat("\nCall:\n", deparse(x$call), "\n\n")
cat(" Sample size of test (predict) data: ", x$n, "\n", sep="")
if (!is.null(x$cens)) {
if (n.event == 1) {
cat(" Number of deaths in test data: ", x$ndead, "\n", sep="")
}
else {
cat(" Number of events in test data: ", event.freq, "\n", sep="")
}
}
if (!is.null(x$imputedData)) {
cat(" Was test data imputed: ", "yes", "\n", sep="")
cat(" Missingness: ",
round(100*length(x$imputedIndv)/x$n,2), "%\n", sep="")
}
cat(" Number of grow trees: ", x$ntree, "\n",sep="")
cat(" Average no. of grow terminal nodes: ", mean(x$leaf.count), "\n", sep="")
cat(" Total no. of grow variables: ", length(x$predictorNames), "\n", sep="")
if (!is.null(err.rate)) {
cat(" Test error rate: ", err.rate, "\n\n", sep="")
}
}
}
|
/R/print.rsf.R
|
no_license
|
cran/randomSurvivalForest
|
R
| false | false | 5,525 |
r
|
####**********************************************************************
####**********************************************************************
####
#### RANDOM SURVIVAL FOREST 3.6.4
####
#### Copyright 2013, Cleveland Clinic Foundation
####
#### This program is free software; you can redistribute it and/or
#### modify it under the terms of the GNU General Public License
#### as published by the Free Software Foundation; either version 2
#### of the License, or (at your option) any later version.
####
#### This program is distributed in the hope that it will be useful,
#### but WITHOUT ANY WARRANTY; without even the implied warranty of
#### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#### GNU General Public License for more details.
####
#### You should have received a copy of the GNU General Public
#### License along with this program; if not, write to the Free
#### Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
#### Boston, MA 02110-1301, USA.
####
#### Written by:
#### Hemant Ishwaran, Ph.D.
#### Director of Statistical Methodology
#### Professor, Division of Biostatistics
#### Clinical Research Building, Room 1058
#### 1120 NW 14th Street
#### University of Miami, Miami FL 33136
####
#### email: hemant.ishwaran@gmail.com
#### URL: http://web.ccs.miami.edu/~hishwaran
#### --------------------------------------------------------------
#### Udaya B. Kogalur, Ph.D.
#### Adjunct Staff
#### Dept of Quantitative Health Sciences
#### Cleveland Clinic Foundation
####
#### Kogalur & Company, Inc.
#### 5425 Nestleway Drive, Suite L1
#### Clemmons, NC 27012
####
#### email: commerce@kogalur.com
#### URL: http://www.kogalur.com
#### --------------------------------------------------------------
####
####**********************************************************************
####**********************************************************************
print.rsf <- function(x, ...) {
if (sum(inherits(x, c("rsf", "forest"), TRUE) == c(1, 2)) == 2) {
print.default(x)
return()
}
if (!is.null(x$cens)) {
events <- na.omit(x$cens)[na.omit(x$cens) > 0]
n.event <- length(unique(events))
event.freq <- paste(tapply(events, events, length), collapse = ", ")
}
else {
n.event <- 1
}
if (!is.null(x$err.rate)) {
x$err.rate <- rbind(x$err.rate)
err.rate <- round(100*x$err.rate[, ncol(x$err.rate)], 2)
if (!all(is.na(err.rate))) {
err.rate <- paste(err.rate, "%", collapse=", ", sep = "")
}
else {
err.rate <- NULL
}
}
else {
err.rate <- NULL
}
if (is.null(x$nsplit)) x$nsplit <- 0
if (sum(inherits(x, c("rsf", "grow"), TRUE) == c(1, 2)) != 2 &
sum(inherits(x, c("rsf", "predict"), TRUE) == c(1, 2)) != 2)
stop("This function only works for objects of class `(rsf, grow)' or '(rsf, predict)'.")
if (sum(inherits(x, c("rsf", "grow"), TRUE) == c(1, 2)) == 2) {
cat("\nCall:\n", deparse(x$call), "\n\n")
cat(" Sample size: ", x$n, "\n", sep="")
if (n.event == 1) {
cat(" Number of deaths: ", x$ndead, "\n", sep="")
}
else {
cat(" Number of events: ", event.freq, "\n", sep="")
}
if (!is.null(x$imputedIndv)) {
cat(" Was data imputed: ", "yes", "\n", sep="")
cat(" Missingness: ",
round(100*length(x$imputedIndv)/x$n,2), "%\n", sep="")
}
cat(" Number of trees: ", x$ntree, "\n",sep="")
cat(" Minimum terminal node size: ", x$nodesize, "\n", sep="")
cat(" Average no. of terminal nodes: ", mean(x$leaf.count), "\n", sep="")
cat("No. of variables tried at each split: ", x$mtry, "\n", sep="")
cat(" Total no. of variables: ", length(x$predictorNames), "\n", sep="")
if (x$nsplit > 0 & x$splitrule != "random") {
cat(" Splitting rule: ", paste(x$splitrule,"*random*"), "\n", sep="")
cat(" Number of random split points: ", x$nsplit , "\n", sep="")
}
else {
cat(" Splitting rule: ", x$splitrule, "\n", sep="")
}
cat(" Estimate of error rate: ", err.rate, "\n\n", sep="")
}
else {
cat("\nCall:\n", deparse(x$call), "\n\n")
cat(" Sample size of test (predict) data: ", x$n, "\n", sep="")
if (!is.null(x$cens)) {
if (n.event == 1) {
cat(" Number of deaths in test data: ", x$ndead, "\n", sep="")
}
else {
cat(" Number of events in test data: ", event.freq, "\n", sep="")
}
}
if (!is.null(x$imputedData)) {
cat(" Was test data imputed: ", "yes", "\n", sep="")
cat(" Missingness: ",
round(100*length(x$imputedIndv)/x$n,2), "%\n", sep="")
}
cat(" Number of grow trees: ", x$ntree, "\n",sep="")
cat(" Average no. of grow terminal nodes: ", mean(x$leaf.count), "\n", sep="")
cat(" Total no. of grow variables: ", length(x$predictorNames), "\n", sep="")
if (!is.null(err.rate)) {
cat(" Test error rate: ", err.rate, "\n\n", sep="")
}
}
}
|
library(lmerTest)
library(MuMIn)
library(car)
library(ggplot2)
library(beanplot)
library(effsize)
data$time_before_ci<-ifelse(data$CI_adopted==0,data$time,0)
data$time_after_ci<-ifelse(data$CI_adopted==1,data$time,0)
filterData<- subset(data, data$Ref.Breadth<quantile(data$Ref.Breadth, 0.97))
model <- lm(Ref.Breadth~ time_before_ci +time_after_ci
+ log(TotalDev) + log(AgeAtCI)+ log(nbrReleases)+log(TotalComm)
,data = filterData)
summary(model)
anova(model)
vif(model)
ggplot(data, aes(x=time, y= Ref.Breadth,group=time)) +geom_boxplot()#+ coord_cartesian(ylim = c(0,20))
ggplot(filterData, aes(x=time, y= Ref.Breadth,group=time)) +geom_boxplot()#+ coord_cartesian(ylim = c(0,20))
|
/script R.R
|
no_license
|
ci-ref/replication-package
|
R
| false | false | 727 |
r
|
library(lmerTest)
library(MuMIn)
library(car)
library(ggplot2)
library(beanplot)
library(effsize)
data$time_before_ci<-ifelse(data$CI_adopted==0,data$time,0)
data$time_after_ci<-ifelse(data$CI_adopted==1,data$time,0)
filterData<- subset(data, data$Ref.Breadth<quantile(data$Ref.Breadth, 0.97))
model <- lm(Ref.Breadth~ time_before_ci +time_after_ci
+ log(TotalDev) + log(AgeAtCI)+ log(nbrReleases)+log(TotalComm)
,data = filterData)
summary(model)
anova(model)
vif(model)
ggplot(data, aes(x=time, y= Ref.Breadth,group=time)) +geom_boxplot()#+ coord_cartesian(ylim = c(0,20))
ggplot(filterData, aes(x=time, y= Ref.Breadth,group=time)) +geom_boxplot()#+ coord_cartesian(ylim = c(0,20))
|
.Random.seed <-
c(403L, 96L, 1793153639L, 625602485L, 307531749L, -1225712580L,
-1017251838L, 2140837563L, -1487036697L, 1472558359L, -1875002080L,
1878212252L, 1999907631L, 1408892741L, -1928876932L, -1166554983L,
2066061675L, -1214060594L, -1363037676L, 444453990L, 786126460L,
-744308775L, 1678603075L, 1153253180L, 1711531516L, -1786857214L,
-1754340697L, 1796620271L, 1915832912L, -2102026616L, 1672887443L,
-34925476L, 68698202L, 1426041635L, 1708838213L, 1807206507L,
696083127L, 731608966L, 1397594688L, 1758084833L, -1471643323L,
1306818973L, -685851050L, -1807538622L, 2025772753L, 624325663L,
-165785358L, -1007087937L, -1712150903L, 1896324196L, 1514783082L,
-12491508L, -2046594258L, -1201329917L, -262725199L, -250767862L,
363253178L, -1013254840L, -1098289371L, -143944795L, 362637010L,
-683298174L, 1119691073L, 814756778L, -249751736L, 245666425L,
1618490691L, 769292129L, -662413247L, 811794408L, 90449294L,
-1622984809L, -724604981L, -945576309L, -624683548L, 578972008L,
-1750635005L, -292877671L, 1077840592L, 958836597L, 1089350903L,
1508049178L, -1881648728L, -1815311158L, 349783120L, -620221363L,
-545588601L, -373596624L, 397086840L, -2138978098L, 1521302851L,
1523073875L, 351493564L, 872848932L, 796826639L, 991840272L,
1624237150L, 377550135L, 705762753L, 1845407495L, 1602654419L,
1521000562L, 1569902748L, 2079765197L, -1175833447L, -1442945855L,
1939187274L, -1125010306L, 287428725L, 1981085091L, 1337904502L,
40494507L, 605549861L, -1503004368L, -1840450194L, -228894254L,
1523370217L, 956441073L, -719650497L, 295417777L, -162092246L,
1568736273L, 1392588824L, -956145279L, -2012274385L, 1520514846L,
-1348105195L, 1376935811L, 1779278510L, 1576247530L, -1470397864L,
-808041229L, 1943573882L, -1934234050L, -1380408372L, 1556298256L,
-2147177641L, -1736261186L, -528038283L, -71699632L, -1583855510L,
-1945511959L, -366135008L, -1953243680L, 576725277L, -287007643L,
-172890857L, 984056260L, -646752169L, -1615590097L, -778790183L,
505963251L, 1437118608L, -86430553L, -294193842L, 648538071L,
46191953L, 158749672L, -106075477L, 2147077153L, -404794856L,
741637316L, 524264082L, 1466604725L, 1277621072L, 1372502092L,
-1867887066L, 900447586L, 599472141L, 544343356L, -1777022085L,
577318966L, -284839796L, -1107102401L, 1480233104L, -1352380275L,
-1229120756L, -199491719L, 450715897L, 1336009571L, -971204016L,
1575161325L, -1895175327L, -907849726L, -92206415L, -2058170481L,
-853362784L, 1674647860L, -1319527222L, 1443854598L, 1589637615L,
-915851132L, 2140563745L, 259468896L, 1528564956L, 1721674986L,
-2079975477L, -1841947470L, 572079194L, -848886613L, 1581198956L,
-188537280L, 1619163837L, 137670669L, 276047823L, 535022561L,
380123018L, 930761551L, 1856055610L, 1176640943L, -2080602557L,
-1567654675L, -1713817498L, -1671237493L, -2099658689L, -1035139024L,
-4152069L, 800371745L, 192589518L, 336719722L, 1688039612L, 1804766120L,
1786255305L, -2104848618L, 662860719L, 1921322518L, 1719154854L,
-1098180268L, -1057194287L, 1889491024L, -1700053488L, 872474313L,
-1933553130L, 504874930L, 1405607371L, -1595730621L, -1943380927L,
-1106725469L, -221518556L, 1684891081L, 817737568L, 1428585213L,
-306720883L, 1942197807L, 1740913708L, -103002775L, -420761611L,
380321222L, -317029011L, -1353149181L, 251986900L, 1160456200L,
30758934L, -559616630L, -1624200765L, 263618080L, -2076103915L,
-1741590044L, -699721568L, 112160934L, -159032329L, 563837214L,
-237726770L, 1768425487L, 182810968L, -1717530396L, 62333905L,
-1691400911L, -1312666341L, -80757531L, 2055387038L, 1476848331L,
1860338542L, 1429158691L, -1336495785L, -2085744151L, -427221390L,
1143641495L, 1176739907L, -23745772L, -1838236937L, 329752197L,
-1997733198L, 1755384718L, 2126970785L, 1440931646L, 766735230L,
1018973847L, 1980694363L, -595058212L, -24125585L, 826801779L,
-1154563775L, -99520621L, -943129556L, -1747809391L, -212870829L,
-721601593L, 1414016531L, 1941434731L, 1674125122L, 696038971L,
-484038219L, -589480234L, 1737168738L, -176161555L, -834905554L,
-683816070L, -1163684438L, 105142444L, 1301778139L, 742151632L,
1018521014L, 1823899264L, 552789566L, -854271330L, 276127423L,
-140056736L, -1005368040L, 1630276909L, -1834983187L, -1569735766L,
-1402321467L, -27118379L, -969786133L, -106853919L, -286509346L,
907793795L, -2050116807L, -1713690315L, -653584835L, 280472421L,
1563489092L, -974542815L, 1401666635L, 339440744L, -640380544L,
-449217665L, -1460435072L, -909920032L, 1959195904L, 2105152870L,
1022709697L, 2072641862L, 175753160L, -430167702L, -1742612092L,
507012036L, 780217189L, 2024095418L, 1894738922L, -1742088573L,
1495905095L, -393320752L, -1569678157L, -523139337L, 702590381L,
-445286289L, 905278680L, 380403173L, -2145030249L, -1425250349L,
-1583710137L, 528590047L, -393159986L, -184367473L, 2091030313L,
-41630838L, -907817178L, -1483067823L, 165981458L, -1462993010L,
-331382914L, -1229725600L, -1120725257L, -1856119812L, -1388610526L,
-1854799292L, -878099182L, -163504214L, -86601685L, -1208287756L,
-1089992628L, 1368163553L, 280946721L, -1094755578L, -1805605983L,
451688849L, 1005675967L, 1832481829L, -517785006L, 1030218847L,
2122086805L, -1289798855L, -296819879L, -1472770623L, 1923229592L,
-520367331L, -1838517769L, -107535196L, -1709713880L, 1853493485L,
1600022863L, 1969735431L, -14057598L, 208036874L, -2101923136L,
-26796569L, 19764348L, -362626060L, 126503759L, 1695753239L,
-1087645693L, -383492366L, -2006717081L, -590557522L, 1719489647L,
1412785322L, 832019580L, -1017018168L, 995837119L, 254665387L,
-959527527L, -1898934980L, -511448525L, 1348357701L, 2062618188L,
-1621901852L, -414192232L, 1564585759L, -1954834252L, 992764007L,
440382606L, 947188035L, -1622550459L, -606565207L, 1265990444L,
-1855774980L, 1200250630L, -1607022475L, -1791227202L, -561207398L,
179589609L, 1377079513L, 1029621117L, 1627626300L, -1210181631L,
1449780724L, 695077753L, 265354412L, 2007043222L, -754831682L,
8701589L, 97233673L, 522275227L, -929083474L, 1924395753L, -323595561L,
-1530497310L, 1170380602L, 2011416350L, 800993989L, 527903978L,
-1237521719L, -1300380004L, -1827561471L, -502069589L, -1530060685L,
46828918L, -307518170L, -113614116L, 1166688243L, -1978373536L,
-1071380304L, -2063158125L, 807637379L, -40082793L, 973052166L,
-727978429L, 1650011562L, -2034296461L, -2111040634L, -671418112L,
2048163100L, -665928813L, -1230546409L, -1346315171L, -2086301736L,
1607202311L, -1554006815L, -1786925112L, -230784864L, -889679788L,
21178299L, 828195528L, -1716744581L, -507899590L, 1669294463L,
-295086663L, -293587027L, 1983288008L, -1308796016L, 876408618L,
-1776142983L, 1342295546L, 1016793422L, 618164229L, 1999859485L,
1467429241L, -1700319400L, -1808927291L, 979009393L, 996308871L,
991999779L, -104788665L, -757946654L, -521390361L, -800479336L,
-2007274636L, -1972882066L, -155669846L, -27013101L, 41433962L,
236755103L, -1788471853L, 1887530645L, -1785813382L, 1690078858L,
2105093228L, -179084056L, 1763196084L, -679756183L, -798871370L,
-760789827L, 700716905L, -860358419L, 867997325L, 1641942446L,
-942688707L, -630889808L, 1157220236L, 1050769164L, 1562956013L,
315505387L, -280257223L, -1841329971L, -840402555L, 508719332L,
-1467725019L, -508943326L, 774330166L, -2124611316L, -1623382736L,
-1121109215L, -2027884476L, 1858240017L, -44782771L, -1421861969L,
-281173556L, 611669740L, -640849746L, 2029304274L, -96345822L,
1364598355L, -795798644L, -62749425L, 2128495067L, 696178707L,
13230771L, 1039501670L, -1140211436L, 16886940L, 1644573884L,
-836764375L, -1683468669L, -440456424L, -577064702L, 999147207L,
1174749218L, 1955230774L, 856107883L, -1646783775L, 407335258L,
-238915099L, 90133729L, 191401793L, -1936352039L, -1155445551L,
772538601L, -1740641926L, -841596290L, 424856151L, 1952913485L,
492365864L, 1636178709L, -1826040687L, 1481583134L, 1518607736L,
1435182227L, 1970241634L, 2117107426L, -529994152L, 1032996394L,
-2054467086L, 1351250610L, -811915993L, 883692901L, 1394395354L,
1642132348L, -177577159L, -647353352L, 647781664L, 1917650625L,
-1363360045L, -1272024256L, -1388383633L, -1693505517L, -1471229621L,
-1918990617L, 1810820431L, 814590615L, -666361768L, -432503086L,
2033202338L, 586066856L, 1041394715L)
|
/lmeNB/R/nbinomRE-internal.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 8,255 |
r
|
.Random.seed <-
c(403L, 96L, 1793153639L, 625602485L, 307531749L, -1225712580L,
-1017251838L, 2140837563L, -1487036697L, 1472558359L, -1875002080L,
1878212252L, 1999907631L, 1408892741L, -1928876932L, -1166554983L,
2066061675L, -1214060594L, -1363037676L, 444453990L, 786126460L,
-744308775L, 1678603075L, 1153253180L, 1711531516L, -1786857214L,
-1754340697L, 1796620271L, 1915832912L, -2102026616L, 1672887443L,
-34925476L, 68698202L, 1426041635L, 1708838213L, 1807206507L,
696083127L, 731608966L, 1397594688L, 1758084833L, -1471643323L,
1306818973L, -685851050L, -1807538622L, 2025772753L, 624325663L,
-165785358L, -1007087937L, -1712150903L, 1896324196L, 1514783082L,
-12491508L, -2046594258L, -1201329917L, -262725199L, -250767862L,
363253178L, -1013254840L, -1098289371L, -143944795L, 362637010L,
-683298174L, 1119691073L, 814756778L, -249751736L, 245666425L,
1618490691L, 769292129L, -662413247L, 811794408L, 90449294L,
-1622984809L, -724604981L, -945576309L, -624683548L, 578972008L,
-1750635005L, -292877671L, 1077840592L, 958836597L, 1089350903L,
1508049178L, -1881648728L, -1815311158L, 349783120L, -620221363L,
-545588601L, -373596624L, 397086840L, -2138978098L, 1521302851L,
1523073875L, 351493564L, 872848932L, 796826639L, 991840272L,
1624237150L, 377550135L, 705762753L, 1845407495L, 1602654419L,
1521000562L, 1569902748L, 2079765197L, -1175833447L, -1442945855L,
1939187274L, -1125010306L, 287428725L, 1981085091L, 1337904502L,
40494507L, 605549861L, -1503004368L, -1840450194L, -228894254L,
1523370217L, 956441073L, -719650497L, 295417777L, -162092246L,
1568736273L, 1392588824L, -956145279L, -2012274385L, 1520514846L,
-1348105195L, 1376935811L, 1779278510L, 1576247530L, -1470397864L,
-808041229L, 1943573882L, -1934234050L, -1380408372L, 1556298256L,
-2147177641L, -1736261186L, -528038283L, -71699632L, -1583855510L,
-1945511959L, -366135008L, -1953243680L, 576725277L, -287007643L,
-172890857L, 984056260L, -646752169L, -1615590097L, -778790183L,
505963251L, 1437118608L, -86430553L, -294193842L, 648538071L,
46191953L, 158749672L, -106075477L, 2147077153L, -404794856L,
741637316L, 524264082L, 1466604725L, 1277621072L, 1372502092L,
-1867887066L, 900447586L, 599472141L, 544343356L, -1777022085L,
577318966L, -284839796L, -1107102401L, 1480233104L, -1352380275L,
-1229120756L, -199491719L, 450715897L, 1336009571L, -971204016L,
1575161325L, -1895175327L, -907849726L, -92206415L, -2058170481L,
-853362784L, 1674647860L, -1319527222L, 1443854598L, 1589637615L,
-915851132L, 2140563745L, 259468896L, 1528564956L, 1721674986L,
-2079975477L, -1841947470L, 572079194L, -848886613L, 1581198956L,
-188537280L, 1619163837L, 137670669L, 276047823L, 535022561L,
380123018L, 930761551L, 1856055610L, 1176640943L, -2080602557L,
-1567654675L, -1713817498L, -1671237493L, -2099658689L, -1035139024L,
-4152069L, 800371745L, 192589518L, 336719722L, 1688039612L, 1804766120L,
1786255305L, -2104848618L, 662860719L, 1921322518L, 1719154854L,
-1098180268L, -1057194287L, 1889491024L, -1700053488L, 872474313L,
-1933553130L, 504874930L, 1405607371L, -1595730621L, -1943380927L,
-1106725469L, -221518556L, 1684891081L, 817737568L, 1428585213L,
-306720883L, 1942197807L, 1740913708L, -103002775L, -420761611L,
380321222L, -317029011L, -1353149181L, 251986900L, 1160456200L,
30758934L, -559616630L, -1624200765L, 263618080L, -2076103915L,
-1741590044L, -699721568L, 112160934L, -159032329L, 563837214L,
-237726770L, 1768425487L, 182810968L, -1717530396L, 62333905L,
-1691400911L, -1312666341L, -80757531L, 2055387038L, 1476848331L,
1860338542L, 1429158691L, -1336495785L, -2085744151L, -427221390L,
1143641495L, 1176739907L, -23745772L, -1838236937L, 329752197L,
-1997733198L, 1755384718L, 2126970785L, 1440931646L, 766735230L,
1018973847L, 1980694363L, -595058212L, -24125585L, 826801779L,
-1154563775L, -99520621L, -943129556L, -1747809391L, -212870829L,
-721601593L, 1414016531L, 1941434731L, 1674125122L, 696038971L,
-484038219L, -589480234L, 1737168738L, -176161555L, -834905554L,
-683816070L, -1163684438L, 105142444L, 1301778139L, 742151632L,
1018521014L, 1823899264L, 552789566L, -854271330L, 276127423L,
-140056736L, -1005368040L, 1630276909L, -1834983187L, -1569735766L,
-1402321467L, -27118379L, -969786133L, -106853919L, -286509346L,
907793795L, -2050116807L, -1713690315L, -653584835L, 280472421L,
1563489092L, -974542815L, 1401666635L, 339440744L, -640380544L,
-449217665L, -1460435072L, -909920032L, 1959195904L, 2105152870L,
1022709697L, 2072641862L, 175753160L, -430167702L, -1742612092L,
507012036L, 780217189L, 2024095418L, 1894738922L, -1742088573L,
1495905095L, -393320752L, -1569678157L, -523139337L, 702590381L,
-445286289L, 905278680L, 380403173L, -2145030249L, -1425250349L,
-1583710137L, 528590047L, -393159986L, -184367473L, 2091030313L,
-41630838L, -907817178L, -1483067823L, 165981458L, -1462993010L,
-331382914L, -1229725600L, -1120725257L, -1856119812L, -1388610526L,
-1854799292L, -878099182L, -163504214L, -86601685L, -1208287756L,
-1089992628L, 1368163553L, 280946721L, -1094755578L, -1805605983L,
451688849L, 1005675967L, 1832481829L, -517785006L, 1030218847L,
2122086805L, -1289798855L, -296819879L, -1472770623L, 1923229592L,
-520367331L, -1838517769L, -107535196L, -1709713880L, 1853493485L,
1600022863L, 1969735431L, -14057598L, 208036874L, -2101923136L,
-26796569L, 19764348L, -362626060L, 126503759L, 1695753239L,
-1087645693L, -383492366L, -2006717081L, -590557522L, 1719489647L,
1412785322L, 832019580L, -1017018168L, 995837119L, 254665387L,
-959527527L, -1898934980L, -511448525L, 1348357701L, 2062618188L,
-1621901852L, -414192232L, 1564585759L, -1954834252L, 992764007L,
440382606L, 947188035L, -1622550459L, -606565207L, 1265990444L,
-1855774980L, 1200250630L, -1607022475L, -1791227202L, -561207398L,
179589609L, 1377079513L, 1029621117L, 1627626300L, -1210181631L,
1449780724L, 695077753L, 265354412L, 2007043222L, -754831682L,
8701589L, 97233673L, 522275227L, -929083474L, 1924395753L, -323595561L,
-1530497310L, 1170380602L, 2011416350L, 800993989L, 527903978L,
-1237521719L, -1300380004L, -1827561471L, -502069589L, -1530060685L,
46828918L, -307518170L, -113614116L, 1166688243L, -1978373536L,
-1071380304L, -2063158125L, 807637379L, -40082793L, 973052166L,
-727978429L, 1650011562L, -2034296461L, -2111040634L, -671418112L,
2048163100L, -665928813L, -1230546409L, -1346315171L, -2086301736L,
1607202311L, -1554006815L, -1786925112L, -230784864L, -889679788L,
21178299L, 828195528L, -1716744581L, -507899590L, 1669294463L,
-295086663L, -293587027L, 1983288008L, -1308796016L, 876408618L,
-1776142983L, 1342295546L, 1016793422L, 618164229L, 1999859485L,
1467429241L, -1700319400L, -1808927291L, 979009393L, 996308871L,
991999779L, -104788665L, -757946654L, -521390361L, -800479336L,
-2007274636L, -1972882066L, -155669846L, -27013101L, 41433962L,
236755103L, -1788471853L, 1887530645L, -1785813382L, 1690078858L,
2105093228L, -179084056L, 1763196084L, -679756183L, -798871370L,
-760789827L, 700716905L, -860358419L, 867997325L, 1641942446L,
-942688707L, -630889808L, 1157220236L, 1050769164L, 1562956013L,
315505387L, -280257223L, -1841329971L, -840402555L, 508719332L,
-1467725019L, -508943326L, 774330166L, -2124611316L, -1623382736L,
-1121109215L, -2027884476L, 1858240017L, -44782771L, -1421861969L,
-281173556L, 611669740L, -640849746L, 2029304274L, -96345822L,
1364598355L, -795798644L, -62749425L, 2128495067L, 696178707L,
13230771L, 1039501670L, -1140211436L, 16886940L, 1644573884L,
-836764375L, -1683468669L, -440456424L, -577064702L, 999147207L,
1174749218L, 1955230774L, 856107883L, -1646783775L, 407335258L,
-238915099L, 90133729L, 191401793L, -1936352039L, -1155445551L,
772538601L, -1740641926L, -841596290L, 424856151L, 1952913485L,
492365864L, 1636178709L, -1826040687L, 1481583134L, 1518607736L,
1435182227L, 1970241634L, 2117107426L, -529994152L, 1032996394L,
-2054467086L, 1351250610L, -811915993L, 883692901L, 1394395354L,
1642132348L, -177577159L, -647353352L, 647781664L, 1917650625L,
-1363360045L, -1272024256L, -1388383633L, -1693505517L, -1471229621L,
-1918990617L, 1810820431L, 814590615L, -666361768L, -432503086L,
2033202338L, 586066856L, 1041394715L)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-documentation.R
\docType{data}
\name{data_dictionary_sentiws}
\alias{data_dictionary_sentiws}
\title{SentimentWortschatz (SentiWS)}
\format{
An object of class \code{dictionary3} of length 2.
}
\source{
\url{http://wortschatz.uni-leipzig.de/en/download/}
}
\usage{
data_dictionary_sentiws
}
\description{
A \pkg{quanteda} \link[quanteda:dictionary]{dictionary} object containing
SentimentWortschatz (SentiWS), a publicly available German-language resource
for sentiment analysis. The current version of SentiWS contains 1,650
positive and 1,818 negative words, which sum up to 15,649 positive and 15,632
negative word forms including their inflections. It not only contains
adjectives and adverbs explicitly expressing a sentiment, but also nouns and
verbs implicitly containing one. The original dictionary weights within the
interval of -1 to 1. Note that the version implemented in
\pkg{quanteda.dictionaries} uses a binary classification into positive
(weight > 0) and negative (weight < 0) features.
}
\references{
Remus, R., Quasthoff U., and Heyer, G. (2010). \href{http://www.lrec-conf.org/proceedings/lrec2010/pdf/490_Paper.pdf}{SentiWS: a Publicly Available German-language Resource for Sentiment Analysis}.
In \emph{Proceedings of the 7th International Language Ressources and Evaluation
(LREC'10)}, 1168--1171.
}
\keyword{data}
|
/man/data_dictionary_sentiws.Rd
|
no_license
|
olgasparyan/quanteda.sentiment
|
R
| false | true | 1,423 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-documentation.R
\docType{data}
\name{data_dictionary_sentiws}
\alias{data_dictionary_sentiws}
\title{SentimentWortschatz (SentiWS)}
\format{
An object of class \code{dictionary3} of length 2.
}
\source{
\url{http://wortschatz.uni-leipzig.de/en/download/}
}
\usage{
data_dictionary_sentiws
}
\description{
A \pkg{quanteda} \link[quanteda:dictionary]{dictionary} object containing
SentimentWortschatz (SentiWS), a publicly available German-language resource
for sentiment analysis. The current version of SentiWS contains 1,650
positive and 1,818 negative words, which sum up to 15,649 positive and 15,632
negative word forms including their inflections. It not only contains
adjectives and adverbs explicitly expressing a sentiment, but also nouns and
verbs implicitly containing one. The original dictionary weights within the
interval of -1 to 1. Note that the version implemented in
\pkg{quanteda.dictionaries} uses a binary classification into positive
(weight > 0) and negative (weight < 0) features.
}
\references{
Remus, R., Quasthoff U., and Heyer, G. (2010). \href{http://www.lrec-conf.org/proceedings/lrec2010/pdf/490_Paper.pdf}{SentiWS: a Publicly Available German-language Resource for Sentiment Analysis}.
In \emph{Proceedings of the 7th International Language Ressources and Evaluation
(LREC'10)}, 1168--1171.
}
\keyword{data}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WFSFeatureType.R
\docType{class}
\name{WFSFeatureType}
\alias{WFSFeatureType}
\title{WFSFeatureType}
\format{
\code{\link{R6Class}} object.
}
\value{
Object of \code{\link{R6Class}} modelling a WFS feature type
}
\description{
WFSFeatureType
WFSFeatureType
}
\note{
Class used internally by \pkg{ows4R} to trigger a WFS DescribeFeatureType request
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{FeatureType}
\keyword{OGC}
\keyword{WFS}
\section{Super class}{
\code{\link[ows4R:OGCAbstractObject]{ows4R::OGCAbstractObject}} -> \code{WFSFeatureType}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{description}}{description}
\item{\code{features}}{features}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-WFSFeatureType-new}{\code{WFSFeatureType$new()}}
\item \href{#method-WFSFeatureType-getName}{\code{WFSFeatureType$getName()}}
\item \href{#method-WFSFeatureType-getTitle}{\code{WFSFeatureType$getTitle()}}
\item \href{#method-WFSFeatureType-getAbstract}{\code{WFSFeatureType$getAbstract()}}
\item \href{#method-WFSFeatureType-getKeywords}{\code{WFSFeatureType$getKeywords()}}
\item \href{#method-WFSFeatureType-getDefaultCRS}{\code{WFSFeatureType$getDefaultCRS()}}
\item \href{#method-WFSFeatureType-getBoundingBox}{\code{WFSFeatureType$getBoundingBox()}}
\item \href{#method-WFSFeatureType-getDescription}{\code{WFSFeatureType$getDescription()}}
\item \href{#method-WFSFeatureType-hasGeometry}{\code{WFSFeatureType$hasGeometry()}}
\item \href{#method-WFSFeatureType-getGeometryType}{\code{WFSFeatureType$getGeometryType()}}
\item \href{#method-WFSFeatureType-getFeaturesCRS}{\code{WFSFeatureType$getFeaturesCRS()}}
\item \href{#method-WFSFeatureType-getFeatures}{\code{WFSFeatureType$getFeatures()}}
\item \href{#method-WFSFeatureType-clone}{\code{WFSFeatureType$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="ERROR"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-ERROR'><code>ows4R::OGCAbstractObject$ERROR()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="INFO"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-INFO'><code>ows4R::OGCAbstractObject$INFO()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="WARN"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-WARN'><code>ows4R::OGCAbstractObject$WARN()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="encode"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-encode'><code>ows4R::OGCAbstractObject$encode()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getClass"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getClass'><code>ows4R::OGCAbstractObject$getClass()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getClassName"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getClassName'><code>ows4R::OGCAbstractObject$getClassName()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getNamespaceDefinition"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getNamespaceDefinition'><code>ows4R::OGCAbstractObject$getNamespaceDefinition()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="isFieldInheritedFrom"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-isFieldInheritedFrom'><code>ows4R::OGCAbstractObject$isFieldInheritedFrom()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="logger"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-logger'><code>ows4R::OGCAbstractObject$logger()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-new"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-new}{}}}
\subsection{Method \code{new()}}{
Initializes an object of class \link{WFSFeatureType}
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$new(xmlObj, capabilities, version, logger = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{xmlObj}}{an object of class \link{XMLInternalNode-class} to initialize from XML}
\item{\code{capabilities}}{object of class \link{WFSCapabilities}}
\item{\code{version}}{service version}
\item{\code{logger}}{logger}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getName"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getName}{}}}
\subsection{Method \code{getName()}}{
Get feature type name
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getName()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getTitle"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getTitle}{}}}
\subsection{Method \code{getTitle()}}{
Get feature type title
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getTitle()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getAbstract"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getAbstract}{}}}
\subsection{Method \code{getAbstract()}}{
Get feature type abstract
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getAbstract()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getKeywords"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getKeywords}{}}}
\subsection{Method \code{getKeywords()}}{
Get feature type keywords
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getKeywords()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getDefaultCRS"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getDefaultCRS}{}}}
\subsection{Method \code{getDefaultCRS()}}{
Get feature type default CRS
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getDefaultCRS()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getBoundingBox"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getBoundingBox}{}}}
\subsection{Method \code{getBoundingBox()}}{
Get feature type bounding box
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getBoundingBox()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{matrix}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getDescription"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getDescription}{}}}
\subsection{Method \code{getDescription()}}{
Describes a feature type
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getDescription(pretty = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{pretty}}{pretty whether to return a prettified \code{data.frame}. Default is \code{FALSE}}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
a \code{list} of \link{WFSFeatureTypeElement} or \code{data.frame}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-hasGeometry"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-hasGeometry}{}}}
\subsection{Method \code{hasGeometry()}}{
Indicates with feature type has a geometry
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$hasGeometry()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
object of class \link{logical}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getGeometryType"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getGeometryType}{}}}
\subsection{Method \code{getGeometryType()}}{
Get geometry type
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getGeometryType()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
object of class \link{character} representing the geometry tpe
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getFeaturesCRS"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getFeaturesCRS}{}}}
\subsection{Method \code{getFeaturesCRS()}}{
Inherits features CRS
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getFeaturesCRS(obj)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{obj}}{features object}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
object of class \link{integer}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getFeatures"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getFeatures}{}}}
\subsection{Method \code{getFeatures()}}{
Get features
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getFeatures(
...,
outputFormat = NULL,
paging = FALSE,
paging_length = 1000,
parallel = FALSE,
parallel_handler = NULL,
cl = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{...}}{any other parameter to pass to the \link{WFSGetFeature} request}
\item{\code{outputFormat}}{output format}
\item{\code{paging}}{paging. Default is \code{FALSE}}
\item{\code{paging_length}}{number of features to request per page. Default is 1000}
\item{\code{parallel}}{whether to get features using parallel multicore strategy. Default is \code{FALSE}}
\item{\code{parallel_handler}}{Handler function to parallelize the code. eg \link{mclapply}}
\item{\code{cl}}{optional cluster object for parallel cluster approaches using eg. \code{parallel::makeCluster}}
\item{\code{typeName}}{the name of the feature type}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
features as object of class \code{sf}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-clone"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/man/WFSFeatureType.Rd
|
no_license
|
cran/ows4R
|
R
| false | true | 12,191 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WFSFeatureType.R
\docType{class}
\name{WFSFeatureType}
\alias{WFSFeatureType}
\title{WFSFeatureType}
\format{
\code{\link{R6Class}} object.
}
\value{
Object of \code{\link{R6Class}} modelling a WFS feature type
}
\description{
WFSFeatureType
WFSFeatureType
}
\note{
Class used internally by \pkg{ows4R} to trigger a WFS DescribeFeatureType request
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\keyword{FeatureType}
\keyword{OGC}
\keyword{WFS}
\section{Super class}{
\code{\link[ows4R:OGCAbstractObject]{ows4R::OGCAbstractObject}} -> \code{WFSFeatureType}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{description}}{description}
\item{\code{features}}{features}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-WFSFeatureType-new}{\code{WFSFeatureType$new()}}
\item \href{#method-WFSFeatureType-getName}{\code{WFSFeatureType$getName()}}
\item \href{#method-WFSFeatureType-getTitle}{\code{WFSFeatureType$getTitle()}}
\item \href{#method-WFSFeatureType-getAbstract}{\code{WFSFeatureType$getAbstract()}}
\item \href{#method-WFSFeatureType-getKeywords}{\code{WFSFeatureType$getKeywords()}}
\item \href{#method-WFSFeatureType-getDefaultCRS}{\code{WFSFeatureType$getDefaultCRS()}}
\item \href{#method-WFSFeatureType-getBoundingBox}{\code{WFSFeatureType$getBoundingBox()}}
\item \href{#method-WFSFeatureType-getDescription}{\code{WFSFeatureType$getDescription()}}
\item \href{#method-WFSFeatureType-hasGeometry}{\code{WFSFeatureType$hasGeometry()}}
\item \href{#method-WFSFeatureType-getGeometryType}{\code{WFSFeatureType$getGeometryType()}}
\item \href{#method-WFSFeatureType-getFeaturesCRS}{\code{WFSFeatureType$getFeaturesCRS()}}
\item \href{#method-WFSFeatureType-getFeatures}{\code{WFSFeatureType$getFeatures()}}
\item \href{#method-WFSFeatureType-clone}{\code{WFSFeatureType$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="ERROR"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-ERROR'><code>ows4R::OGCAbstractObject$ERROR()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="INFO"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-INFO'><code>ows4R::OGCAbstractObject$INFO()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="WARN"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-WARN'><code>ows4R::OGCAbstractObject$WARN()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="encode"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-encode'><code>ows4R::OGCAbstractObject$encode()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getClass"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getClass'><code>ows4R::OGCAbstractObject$getClass()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getClassName"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getClassName'><code>ows4R::OGCAbstractObject$getClassName()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="getNamespaceDefinition"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-getNamespaceDefinition'><code>ows4R::OGCAbstractObject$getNamespaceDefinition()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="isFieldInheritedFrom"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-isFieldInheritedFrom'><code>ows4R::OGCAbstractObject$isFieldInheritedFrom()</code></a></span></li>
<li><span class="pkg-link" data-pkg="ows4R" data-topic="OGCAbstractObject" data-id="logger"><a href='../../ows4R/html/OGCAbstractObject.html#method-OGCAbstractObject-logger'><code>ows4R::OGCAbstractObject$logger()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-new"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-new}{}}}
\subsection{Method \code{new()}}{
Initializes an object of class \link{WFSFeatureType}
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$new(xmlObj, capabilities, version, logger = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{xmlObj}}{an object of class \link{XMLInternalNode-class} to initialize from XML}
\item{\code{capabilities}}{object of class \link{WFSCapabilities}}
\item{\code{version}}{service version}
\item{\code{logger}}{logger}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getName"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getName}{}}}
\subsection{Method \code{getName()}}{
Get feature type name
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getName()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getTitle"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getTitle}{}}}
\subsection{Method \code{getTitle()}}{
Get feature type title
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getTitle()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getAbstract"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getAbstract}{}}}
\subsection{Method \code{getAbstract()}}{
Get feature type abstract
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getAbstract()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getKeywords"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getKeywords}{}}}
\subsection{Method \code{getKeywords()}}{
Get feature type keywords
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getKeywords()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getDefaultCRS"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getDefaultCRS}{}}}
\subsection{Method \code{getDefaultCRS()}}{
Get feature type default CRS
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getDefaultCRS()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{character}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getBoundingBox"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getBoundingBox}{}}}
\subsection{Method \code{getBoundingBox()}}{
Get feature type bounding box
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getBoundingBox()}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{object}}{of class \code{matrix}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getDescription"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getDescription}{}}}
\subsection{Method \code{getDescription()}}{
Describes a feature type
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getDescription(pretty = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{pretty}}{pretty whether to return a prettified \code{data.frame}. Default is \code{FALSE}}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
a \code{list} of \link{WFSFeatureTypeElement} or \code{data.frame}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-hasGeometry"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-hasGeometry}{}}}
\subsection{Method \code{hasGeometry()}}{
Indicates with feature type has a geometry
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$hasGeometry()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
object of class \link{logical}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getGeometryType"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getGeometryType}{}}}
\subsection{Method \code{getGeometryType()}}{
Get geometry type
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getGeometryType()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
object of class \link{character} representing the geometry tpe
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getFeaturesCRS"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getFeaturesCRS}{}}}
\subsection{Method \code{getFeaturesCRS()}}{
Inherits features CRS
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getFeaturesCRS(obj)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{obj}}{features object}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
object of class \link{integer}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-getFeatures"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-getFeatures}{}}}
\subsection{Method \code{getFeatures()}}{
Get features
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$getFeatures(
...,
outputFormat = NULL,
paging = FALSE,
paging_length = 1000,
parallel = FALSE,
parallel_handler = NULL,
cl = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{...}}{any other parameter to pass to the \link{WFSGetFeature} request}
\item{\code{outputFormat}}{output format}
\item{\code{paging}}{paging. Default is \code{FALSE}}
\item{\code{paging_length}}{number of features to request per page. Default is 1000}
\item{\code{parallel}}{whether to get features using parallel multicore strategy. Default is \code{FALSE}}
\item{\code{parallel_handler}}{Handler function to parallelize the code. eg \link{mclapply}}
\item{\code{cl}}{optional cluster object for parallel cluster approaches using eg. \code{parallel::makeCluster}}
\item{\code{typeName}}{the name of the feature type}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
features as object of class \code{sf}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-WFSFeatureType-clone"></a>}}
\if{latex}{\out{\hypertarget{method-WFSFeatureType-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{WFSFeatureType$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it
## repeatedly
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv_matrix <- NULL
get <- function() x
set <- function(y) {
x <<- y
inv_matrix <<- NULL
}
setInverse <- function(inv) {
inv_matrix <- inv
}
getInverse <- function() inv_matrix
list(set=set, get=get, get_inverse = getInverse, set_inverse = setInverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and
## the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$get_inverse()
if (!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$set_inverse(inverse)
inverse
}
|
/cachematrix.R
|
no_license
|
darshanmnyk/ProgrammingAssignment2
|
R
| false | false | 1,109 |
r
|
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it
## repeatedly
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv_matrix <- NULL
get <- function() x
set <- function(y) {
x <<- y
inv_matrix <<- NULL
}
setInverse <- function(inv) {
inv_matrix <- inv
}
getInverse <- function() inv_matrix
list(set=set, get=get, get_inverse = getInverse, set_inverse = setInverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and
## the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$get_inverse()
if (!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$set_inverse(inverse)
inverse
}
|
#this code try to reproduce the Velmeshev_2019_autism and reproduce their TSNE plots,
cur_k=17
cur_file="rawMnorm5k"
cor_thres=0.2
file_label=paste0("k",cur_k,"_cor",cor_thres,"_",cur_file)
#Data_PRJNA434002
#install.packages("RSpectra")
library("RSpectra")
library("Rtsne")
#setwd("~/Desktop/fh/1.Testing_scRNAseq/")
#setwd("/Users/mzhang24/Desktop/fh/1.Testing_scRNAseq/")
setwd("/fh/fast/sun_w/mengqi/1.Testing_scRNAseq/")
inputM=as.matrix(readRDS(paste0("../Data_PRJNA434002/",cur_file,".rds")))
meta=read.table("../Data_PRJNA434002/meta.tsv",header = TRUE, sep = "\t")
k = 50
svd50=svds(inputM, k)
#svd50=readRDS("../Data_PRJNA434002/svd50.rds")
pdf(paste0("scree_plot_",file_label,".pdf"),width = 6,height = 6)
plot(svd50$d^2/sum(svd50$d^2), xlim = c(0, k),ylim=c(0,0.01), type = "b", pch = 16, xlab = "principal components",
ylab = "variance explained", main="scree plot")
dev.off()
svd50$d^2/sum(svd50$d^2)
k=cur_k ###here should depend on the plots
svd50v=svd50$v[,1:k]
xc=as.matrix(meta[,"Capbatch"])
xs=as.matrix(meta[,"Seqbatch"])
cor_c=matrix(ncol=1,nrow=k)
cor_s=matrix(ncol=1,nrow=k)
for(ik in 1:k){
y=svd50v[,ik]
##calculate correlation of capbatch
lmc = lm(y ~ as.factor(xc))
cor_c[ik]=summary(lmc)$r.square
##calculate correlation of seqbatch
lms = lm(y ~ as.factor(xs))
cor_s[ik]=sqrt(summary(lms)$r.square)
}
cor_s
cor_c
#cor 0.2 threshold
flag=(abs(cor_c)<cor_thres & abs(cor_s)<cor_thres)
sum(flag)
cur_svd50v=svd50v[,flag==1]
#generate plotting color
library(RColorBrewer)
color_type_num=apply(meta,2,function(x){return(length(table(as.factor(x))))})
n=max(color_type_num[color_type_num<50])
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'seq',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
# ####This part is used for detecting which color provide more distinguish tSNE plots.####
# tsne=Rtsne(cur_svd50v,dims=2, perplexity=15)
#
# pdf(paste0("tSNE_color_test_",cur_file,"_k=",cur_k,".pdf"),height = 8,width = 8)
# for(i in 1:30){
# set.seed(i) #2,8,28
# cust_col=sample(col_vector, n)
# #pie(rep(1,n), col=cust_col)
# plot(tsne$Y, cex=.2,main=paste0("tSNE-cluster",i),col=cust_col[as.numeric(as.factor(meta[,"cluster"]))])
# }
# dev.off()
#Then,we choose seed 28 as the best choice for the color generation.
# set.seed(28) #2,8,28
# cust_col=sample(col_vector, n)
cust_col=c("#023858", "#E31A1C", "#F768A1", "#BD0026", "#D4B9DA", "#7FCDBB", "#CE1256",
"#88419D", "#FDD0A2", "#4D004B", "#E7298A", "#78C679", "#D9F0A3", "#081D58",
"#993404", "#CC4C02", "#FC9272", "#F7FCFD", "#BCBDDC", "#FFEDA0", "#FEE0D2",
"#D0D1E6", "#7F0000", "#FFF7F3", "#9E9AC8", "#FFFFD9", "#CCEBC5", "#FFFFE5",
"#014636", "#DADAEB", "#BFD3E6", "#FE9929", "#C994C7", "#FEE8C8", "#FCC5C0",
"#1D91C0", "#FCFBFD", "#225EA8", "#000000", "#FEC44F", "#41AE76")
#TSNE_plot is a function to do 2 pdf based tsne plots, designed especially for current situation.
#example
#tsne_obj=tsne
#meta_info=meta
#file_label="k17_cor0.2_3k10"
TSNE_plot=function(tsne_obj,meta_info,file_label){
ncluster=length(as.character(unique(meta_info$cluster)))
#find label location
medianY=matrix(ncol=2,nrow=ncluster)
for(i in 1:ncluster){
cur_cluster=as.character(unique(meta_info$cluster)[i])
cur_Y=tsne_obj$Y[(meta_info$cluster==cur_cluster),]
medianY[i,]=apply(cur_Y,2,median)
}
#plot compact tsne
pdf(paste0("tsne_plots_",file_label,".pdf"),height = 8,width = 8)
plot(tsne_obj$Y, cex=.2,main="tSNE-cluster",col=cust_col[as.numeric(as.factor(meta_info[,"cluster"]))])
for(i in 1:ncluster){
text(medianY[i,1],medianY[i,2],as.character(unique(meta_info$cluster)[i]))
}
plot(tsne_obj$Y, cex=.1,main="tSNE-Capbatch",col=cust_col[as.numeric(as.factor(meta_info[,"Capbatch"]))])
for(i in 1:ncluster){
text(medianY[i,1],medianY[i,2],as.character(unique(meta_info$cluster)[i]))
}
plot(tsne_obj$Y, cex=.1,main="tSNE-Seqbatch",col=cust_col[as.numeric(as.factor(meta_info[,"Seqbatch"]))])
for(i in 1:ncluster){
text(medianY[i,1],medianY[i,2],as.character(unique(meta_info$cluster)[i]))
}
plot(tsne_obj$Y, cex=.2,main="tSNE-individual",col=cust_col[as.numeric(as.factor(meta_info[,"individual"]))])
for(i in 1:ncluster){
text(medianY[i,1],medianY[i,2],as.character(unique(meta_info$cluster)[i]))
}
dev.off()
#plot each sub tSNE
pdf(paste0("tSNE_sub_cluster_",file_label,".pdf"),height = 15,width = 30)
op=par(mfrow=c(3,6),mar=c(3, 3, 1, 1), bty="n",cex=0.9)
for(i in 1:ncluster){
cur_cluster=as.character(unique(meta_info$cluster)[i])
metaflag=(meta_info$cluster==cur_cluster)
#pie(rep(1,n), col=cust_col)
plot(tsne_obj$Y, cex=.2,main=paste0("tSNE-cluster",cur_cluster),col=cust_col[as.numeric(as.factor(metaflag))*13+6])
}
par(op)
dev.off()
}
tsne=Rtsne(cur_svd50v,dims=2, perplexity=30)
saveRDS(tsne,paste0("tsne_",cur_file,"_k",cur_k,"_cor",cor_thres,"_30.rds"))
TSNE_plot(tsne,meta,paste0(cur_file,"_k",cur_k,"_cor",cor_thres,"_30"))
tsne=Rtsne(cur_svd50v,dims=2, perplexity=35)
saveRDS(tsne,paste0("tsne_",cur_file,"_k",cur_k,"_cor",cor_thres,"_35.rds"))
TSNE_plot(tsne,meta,paste0(cur_file,"_k",cur_k,"_cor",cor_thres,"_35"))
sessionInfo()
q(save="no")
|
/DE_Autism/3_autism-tSNE_plot.R
|
permissive
|
changwn/scRNAseq_pipelines
|
R
| false | false | 5,308 |
r
|
#this code try to reproduce the Velmeshev_2019_autism and reproduce their TSNE plots,
cur_k=17
cur_file="rawMnorm5k"
cor_thres=0.2
file_label=paste0("k",cur_k,"_cor",cor_thres,"_",cur_file)
#Data_PRJNA434002
#install.packages("RSpectra")
library("RSpectra")
library("Rtsne")
#setwd("~/Desktop/fh/1.Testing_scRNAseq/")
#setwd("/Users/mzhang24/Desktop/fh/1.Testing_scRNAseq/")
setwd("/fh/fast/sun_w/mengqi/1.Testing_scRNAseq/")
inputM=as.matrix(readRDS(paste0("../Data_PRJNA434002/",cur_file,".rds")))
meta=read.table("../Data_PRJNA434002/meta.tsv",header = TRUE, sep = "\t")
k = 50
svd50=svds(inputM, k)
#svd50=readRDS("../Data_PRJNA434002/svd50.rds")
pdf(paste0("scree_plot_",file_label,".pdf"),width = 6,height = 6)
plot(svd50$d^2/sum(svd50$d^2), xlim = c(0, k),ylim=c(0,0.01), type = "b", pch = 16, xlab = "principal components",
ylab = "variance explained", main="scree plot")
dev.off()
svd50$d^2/sum(svd50$d^2)
k=cur_k ###here should depend on the plots
svd50v=svd50$v[,1:k]
xc=as.matrix(meta[,"Capbatch"])
xs=as.matrix(meta[,"Seqbatch"])
cor_c=matrix(ncol=1,nrow=k)
cor_s=matrix(ncol=1,nrow=k)
for(ik in 1:k){
y=svd50v[,ik]
##calculate correlation of capbatch
lmc = lm(y ~ as.factor(xc))
cor_c[ik]=summary(lmc)$r.square
##calculate correlation of seqbatch
lms = lm(y ~ as.factor(xs))
cor_s[ik]=sqrt(summary(lms)$r.square)
}
cor_s
cor_c
#cor 0.2 threshold
flag=(abs(cor_c)<cor_thres & abs(cor_s)<cor_thres)
sum(flag)
cur_svd50v=svd50v[,flag==1]
#generate plotting color
library(RColorBrewer)
color_type_num=apply(meta,2,function(x){return(length(table(as.factor(x))))})
n=max(color_type_num[color_type_num<50])
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'seq',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
# ####This part is used for detecting which color provide more distinguish tSNE plots.####
# tsne=Rtsne(cur_svd50v,dims=2, perplexity=15)
#
# pdf(paste0("tSNE_color_test_",cur_file,"_k=",cur_k,".pdf"),height = 8,width = 8)
# for(i in 1:30){
# set.seed(i) #2,8,28
# cust_col=sample(col_vector, n)
# #pie(rep(1,n), col=cust_col)
# plot(tsne$Y, cex=.2,main=paste0("tSNE-cluster",i),col=cust_col[as.numeric(as.factor(meta[,"cluster"]))])
# }
# dev.off()
#Then,we choose seed 28 as the best choice for the color generation.
# set.seed(28) #2,8,28
# cust_col=sample(col_vector, n)
cust_col=c("#023858", "#E31A1C", "#F768A1", "#BD0026", "#D4B9DA", "#7FCDBB", "#CE1256",
"#88419D", "#FDD0A2", "#4D004B", "#E7298A", "#78C679", "#D9F0A3", "#081D58",
"#993404", "#CC4C02", "#FC9272", "#F7FCFD", "#BCBDDC", "#FFEDA0", "#FEE0D2",
"#D0D1E6", "#7F0000", "#FFF7F3", "#9E9AC8", "#FFFFD9", "#CCEBC5", "#FFFFE5",
"#014636", "#DADAEB", "#BFD3E6", "#FE9929", "#C994C7", "#FEE8C8", "#FCC5C0",
"#1D91C0", "#FCFBFD", "#225EA8", "#000000", "#FEC44F", "#41AE76")
#TSNE_plot is a function to do 2 pdf based tsne plots, designed especially for current situation.
#example
#tsne_obj=tsne
#meta_info=meta
#file_label="k17_cor0.2_3k10"
TSNE_plot=function(tsne_obj,meta_info,file_label){
ncluster=length(as.character(unique(meta_info$cluster)))
#find label location
medianY=matrix(ncol=2,nrow=ncluster)
for(i in 1:ncluster){
cur_cluster=as.character(unique(meta_info$cluster)[i])
cur_Y=tsne_obj$Y[(meta_info$cluster==cur_cluster),]
medianY[i,]=apply(cur_Y,2,median)
}
#plot compact tsne
pdf(paste0("tsne_plots_",file_label,".pdf"),height = 8,width = 8)
plot(tsne_obj$Y, cex=.2,main="tSNE-cluster",col=cust_col[as.numeric(as.factor(meta_info[,"cluster"]))])
for(i in 1:ncluster){
text(medianY[i,1],medianY[i,2],as.character(unique(meta_info$cluster)[i]))
}
plot(tsne_obj$Y, cex=.1,main="tSNE-Capbatch",col=cust_col[as.numeric(as.factor(meta_info[,"Capbatch"]))])
for(i in 1:ncluster){
text(medianY[i,1],medianY[i,2],as.character(unique(meta_info$cluster)[i]))
}
plot(tsne_obj$Y, cex=.1,main="tSNE-Seqbatch",col=cust_col[as.numeric(as.factor(meta_info[,"Seqbatch"]))])
for(i in 1:ncluster){
text(medianY[i,1],medianY[i,2],as.character(unique(meta_info$cluster)[i]))
}
plot(tsne_obj$Y, cex=.2,main="tSNE-individual",col=cust_col[as.numeric(as.factor(meta_info[,"individual"]))])
for(i in 1:ncluster){
text(medianY[i,1],medianY[i,2],as.character(unique(meta_info$cluster)[i]))
}
dev.off()
#plot each sub tSNE
pdf(paste0("tSNE_sub_cluster_",file_label,".pdf"),height = 15,width = 30)
op=par(mfrow=c(3,6),mar=c(3, 3, 1, 1), bty="n",cex=0.9)
for(i in 1:ncluster){
cur_cluster=as.character(unique(meta_info$cluster)[i])
metaflag=(meta_info$cluster==cur_cluster)
#pie(rep(1,n), col=cust_col)
plot(tsne_obj$Y, cex=.2,main=paste0("tSNE-cluster",cur_cluster),col=cust_col[as.numeric(as.factor(metaflag))*13+6])
}
par(op)
dev.off()
}
tsne=Rtsne(cur_svd50v,dims=2, perplexity=30)
saveRDS(tsne,paste0("tsne_",cur_file,"_k",cur_k,"_cor",cor_thres,"_30.rds"))
TSNE_plot(tsne,meta,paste0(cur_file,"_k",cur_k,"_cor",cor_thres,"_30"))
tsne=Rtsne(cur_svd50v,dims=2, perplexity=35)
saveRDS(tsne,paste0("tsne_",cur_file,"_k",cur_k,"_cor",cor_thres,"_35.rds"))
TSNE_plot(tsne,meta,paste0(cur_file,"_k",cur_k,"_cor",cor_thres,"_35"))
sessionInfo()
q(save="no")
|
#' @title Non-Parametric Covariate Balancing Propensity Score (npCBPS) Estimation
#'
#' @description
#' \code{npCBPS} is a method to estimate weights interpretable as (stabilized)
#' inverse generlized propensity score weights, w_i = f(T_i)/f(T_i|X), without
#' actually estimating a model for the treatment to arrive at f(T|X) estimates.
#' In brief, this works by maximizing the empirical likelihood of observing the
#' values of treatment and covariates that were observed, while constraining
#' the weights to be those that (a) ensure balance on the covariates, and (b)
#' maintain the original means of the treatment and covariates.
#'
#' In the continuous treatment context, this balance on covariates means zero
#' correlation of each covariate with the treatment. In binary or categorical
#' treatment contexts, balance on covariates implies equal means on the
#' covariates for observations at each level of the treatment. When given a
#' numeric treatment the software handles it continuously. To handle the
#' treatment as binary or categorical is must be given as a factor.
#'
#' Furthermore, we apply a Bayesian variant that allows the correlation of each
#' covariate with the treatment to be slightly non-zero, as might be expected
#' in a a given finite sample.
#'
#' Estimates non-parametric covariate balancing propensity score weights.
#'
#' ### @aliases npCBPS npCBPS.fit
#' @param formula An object of class \code{formula} (or one that can be coerced
#' to that class): a symbolic description of the model to be fitted.
#' @param data An optional data frame, list or environment (or object coercible
#' by as.data.frame to a data frame) containing the variables in the model. If
#' not found in data, the variables are taken from \code{environment(formula)},
#' typically the environment from which \code{CBPS} is called.
#' @param na.action A function which indicates what should happen when the data
#' contain NAs. The default is set by the na.action setting of options, and is
#' na.fail if that is unset.
#' @param corprior Prior hyperparameter controlling the expected amount of
#' correlation between each covariate and the treatment. Specifically, the
#' amount of correlation between the k-dimensional covariates, X, and the
#' treatment T after weighting is assumed to have prior distribution
#' MVN(0,sigma^2 I_k). We conceptualize sigma^2 as a tuning parameter to be
#' used pragmatically. It's default of 0.1 ensures that the balance constraints
#' are not too harsh, and that a solution is likely to exist. Once the
#' algorithm works at such a high value of sigma^2, the user may wish to
#' attempt values closer to 0 to get finer balance.
#' @param print.level Controls verbosity of output to the screen while npCBPS
#' runs. At the default of print.level=0, little output is produced. It
#' print.level>0, it outputs diagnostics including the log posterior
#' (log_post), the log empirical likelihood associated with the weights
#' (log_el), and the log prior probability of the (weighted) correlation of
#' treatment with the covariates.
#' @param ... Other parameters to be passed.
#' @return \item{weights}{The optimal weights} \item{y}{The treatment vector
#' used} \item{x}{The covariate matrix} \item{model}{The model frame}
#' \item{call}{The matched call} \item{formula}{The formula supplied}
#' \item{data}{The data argument} \item{log.p.eta}{The log density for the
#' (weighted) correlation of the covariates with the treatment, given the
#' choice of prior (\code{corprior})} \item{log.el}{The log empirical
#' likelihood of the observed data at the chosen set of IPW weights.}
#' \item{eta}{A vector describing the correlation between the treatment and
#' each covariate on the weighted data at the solution.} \item{sumw0}{The sum
#' of weights, provided as a check on convergence. This is always 1 when
#' convergence occurs unproblematically. If it differs from 1 substantially, no
#' solution perfectly satisfying the conditions was found, and the user may
#' consider a larger value of \code{corprior}.}
#' @author Christian Fong, Chad Hazlett, and Kosuke Imai
#' @references Fong, Christian, Chad Hazlett, and Kosuke Imai. ``Parametric
#' and Nonparametric Covariate Balancing Propensity Score for General Treatment
#' Regimes.'' Unpublished Manuscript.
#' \url{http://imai.princeton.edu/research/files/CBGPS.pdf}
#' @examples
#'
#' ##Generate data
#' data(LaLonde)
#'
#' ## Restricted two only two covariates so that it will run quickly.
#' ## Performance will remain good if the full LaLonde specification is used
#' fit <- npCBPS(treat ~ age + educ, data = LaLonde, corprior=.1/nrow(LaLonde))
#' plot(fit)
#'
#' @export npCBPS
#'
npCBPS <- function(formula, data, na.action, corprior=.01, print.level=0, ...) {
if (missing(data))
data <- environment(formula)
call <- match.call()
family <- binomial()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "na.action"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
Y <- model.response(mf, "any")
if (length(dim(Y)) == 1L) {
nm <- rownames(Y)
dim(Y) <- NULL
if (!is.null(nm))
names(Y) <- nm
}
X <- if (!is.empty.model(mt)) model.matrix(mt, mf)#[,-2]
else matrix(, NROW(Y), 0L)
X<-X[,apply(X,2,sd)>0]
fit <- eval(call("npCBPS.fit", X = X, treat = Y, corprior = corprior,
print.level = print.level))
fit$na.action <- attr(mf, "na.action")
xlevels <- .getXlevels(mt, mf)
fit$data<-data
fit$call <- call
fit$formula <- formula
fit$terms<-mt
fit
}
#' npCBPS.fit
#'
#' @param treat A vector of treatment assignments. Binary or multi-valued
#' treatments should be factors. Continuous treatments should be numeric.
#' @param X A covariate matrix.
#' @param corprior Prior hyperparameter controlling the expected amount of
#' correlation between each covariate and the treatment. Specifically, the
#' amount of correlation between the k-dimensional covariates, X, and the
#' treatment T after weighting is assumed to have prior distribution
#' MVN(0,sigma^2 I_k). We conceptualize sigma^2 as a tuning parameter to be
#' used pragmatically. It's default of 0.1 ensures that the balance constraints
#' are not too harsh, and that a solution is likely to exist. Once the
#' algorithm works at such a high value of sigma^2, the user may wish to
#' attempt values closer to 0 to get finer balance.
#' @param print.level Controls verbosity of output to the screen while npCBPS
#' runs. At the default of print.level=0, little output is produced. It
#' print.level>0, it outputs diagnostics including the log posterior
#' (log_post), the log empirical likelihood associated with the weights
#' (log_el), and the log prior probability of the (weighted) correlation of
#' treatment with the covariates.
#' @param ... Other parameters to be passed.
#'
npCBPS.fit=function(treat, X, corprior, print.level, ...){
D=treat
rescale.orig=TRUE
orig.X=X
#pre-processesing:
X=X%*%solve(chol(var(X)))
X=scale(X,center=TRUE, scale=TRUE)
n=nrow(X)
eps=1/n
#Constraint matrix
if (is.numeric(D)){
print("Estimating npCBPS as a continuous treatment. To estimate for a binary or multi-valued treatment, use a factor.")
#re-orient each X to have positive correlation with T
X=X%*%diag(as.vector(sign(cor(X,D))),nrow=ncol(X))
D=scale(D,center=TRUE, scale=TRUE)
z=X*as.vector(D)
z=cbind(z,X,D)
ncon=ncol(z)
ncon_cor=ncol(X)*ncol(D)
}
if(is.factor(D)){
#For factor treatments
Td=as.matrix(model.matrix(~D-1))
conds=dim(Td)[2]
dimX=dim(X)[2]
#Now divide each column of Td by it's sum
colsums=apply(Td,2,sum)
Td=Td%*%diag(1/colsums)
#Now subtract the last column from each of the others, and remove the last
subtractMat=Td[,conds]%*%t(as.matrix(rep(1, conds)))
Td=Td-subtractMat
Td=Td[,1:(conds-1)]
#Center and rescale Td now
Td=scale(x=Td, center = TRUE, scale=TRUE)
#form matrix z that will be needed to setup contrasts
z=matrix(NA,nrow=n,ncol=dimX*(conds-1))
z=t(sapply(seq(1:n),function(x) t(kronecker(Td[x,],X[x,]))))
#Check that correlation of Td with X is very close to colMeans of z
cor.init=as.vector(t(apply(X = X,MARGIN = 2,function(x) cor(Td,x))))
rescale.factors=cor.init/colMeans(z)
if (print.level>0){print(rescale.factors)}
#Add aditional constraints that E[wX*]=0, if desired
#NB: I think we need another constraint to ensure something like E[wT*]=0
ncon_cor=dim(z)[2] #keep track of number of constraints not including the additional mean constraint
z=cbind(z,X)
ncon=dim(z)[2] #num constraints including mean constraints
#rm(Td)
}
#-----------------------------------------------
# Functions we will need
#-----------------------------------------------
llog = function(z, eps){
ans = z
avoidNA = !is.na(z)
lo = (z < eps) & avoidNA
ans[lo] = log(eps) - 1.5 + 2 * z[lo]/eps - 0.5 * (z[lo]/eps)^2
ans[!lo] = log(z[!lo])
ans
}
llogp = function(z, eps){
ans = z
avoidNA = !is.na(z)
lo = (z < eps) & avoidNA
ans[lo] = 2/eps - z[lo]/eps^2
ans[!lo] = 1/z[!lo]
ans
}
log_elgiven_eta=function(par,eta,z,eps,ncon_cor){
ncon=ncol(z)
gamma=par
eta_long=as.matrix(c(eta, rep(0,ncon-ncon_cor)))
#matrix version of eta for vectorization purposes
eta_mat=eta_long%*%c(rep(1,nrow(z)))
arg = (n + t(gamma)%*%(eta_mat-t(z)))
#used to be: arg = (1 + t(gamma)%*%(t(z)-eta_mat))
log_el=-sum(llog(z=arg,eps=eps))
return(log_el)
}
get.w=function(eta,z, sumw.tol=0.05, eps){
gam.init=rep(0, ncon)
opt.gamma.given.eta=optim(par=gam.init, eta=eta, method="BFGS", fn=log_elgiven_eta, z=z, eps=eps, ncon_cor=ncon_cor, control=list(fnscale=1))
gam.opt.given.eta=opt.gamma.given.eta$par
eta_long=as.matrix(c(eta, rep(0,ncon-ncon_cor)))
#matrix version of eta for vectorization purposes
eta_mat=eta_long%*%c(rep(1,nrow(z)))
arg_temp = (n + t(gam.opt.given.eta)%*%(eta_mat-t(z)))
#just use 1/x instead instead of the derivative of the pseudo-log
w=as.numeric(1/arg_temp)
sum.w=sum(w)
#scale: should sum to 1 when actually applied:
w_scaled=w/sum.w
if (abs(1-sum.w)<=sumw.tol){log_el=-sum(log(w_scaled))}
if (abs(1-sum.w)>=sumw.tol){log_el=-sum(log(w_scaled))-10^4*(1+abs(1-sum.w))}
R=list()
R$w=w
R$sumw=sum.w
R$log_el=log_el
R$el.gamma=gam.opt.given.eta[1:ncon_cor]
#R$grad.gamma=w*(eta_mat-t(z)) #gradient w.r.t. gamma
return(R)
}
#------------------------
# Some diagnostics:
# (a) is the eta really the cor(X,T) you end up with?
# (b) is balance on X and T (at 0) is maintained
#------------------------
## eta= (.1,.1,...,.1) should produce w's that produce weighted cov = (.1,.1,...)
#test.w=get.w(eta=rep(.1,ncon_cor),z)
##check convergence: is sumw near 1?
#test.w$sumw
##get w
#wtest=test.w$w
##check weighted covariances: are they near 0.10?
#sapply(seq(1,5), function(x) sum(X[,x]*T*wtest))
##means of X and T: are they near 0?
#sapply(seq(1,5), function(x) sum(X[,x]*wtest))
#sum(T*wtest)
log_post = function(par,eta.to.be.scaled,eta_prior_sd,z, eps=eps, sumw.tol=.001){
#get log(p(eta))
eta_now=par*eta.to.be.scaled
log_p_eta=sum(-.5*log(2*pi*eta_prior_sd^2) - (eta_now^2)/(2*eta_prior_sd^2))
#get best log_el for this eta
el.out=get.w(eta=eta_now,z=z, sumw.tol=sumw.tol, eps=eps)
#el.gamma=el.out$el.gamma
#put it together into log(post)
c=1 #in case we want to rescale the log(p(eta)), as sigma/c would.
log_post=el.out$log_el+c*log_p_eta
if(print.level>0){print(c(log_post, el.out$log_el, log_p_eta))}
return(log_post)
}
###-----------------------------------------------------------
### The main event
###-----------------------------------------------------------
#Now the outer optimization over eta
#setup the prior
eta_prior_sd=rep(corprior,ncon_cor)
#get original correlations
if (is.numeric(D)){eta.init=sapply(seq(1:ncon_cor), function(x) cor(X[,x],D))}
#for factor treatment, there is probably a better analog to the intialize correlation,
if (is.factor(D)){
eta.init=cor.init
}
#get vector of 1's long enough to be our dummy that gets rescaled to form eta if we want
#constant etas:
eta.const=rep(1, ncon_cor)
#note that as currently implemented, these are only the non-zero elements of eta that correspond
# to cor(X,T). For additional constraints that hold down the mean of X and T we are assuming
# eta=0 effectively. They get padded in within the optimization.
#Determine if we want to rescale 1's or rescale the original correlations
#rescale.orig=FALSE
if(rescale.orig==TRUE){eta.to.be.scaled=eta.init}else{eta.to.be.scaled=eta.const}
eta.optim.out=optimize(f=log_post, interval=c(-1,1), eta.to.be.scaled=eta.to.be.scaled,
eps=eps, sumw.tol=.001, eta_prior_sd=eta_prior_sd,z=z, maximum=TRUE)
#Some useful values:
par.opt=eta.optim.out$maximum
eta.opt=par.opt*eta.to.be.scaled
log.p.eta.opt=sum(-.5*log(2*pi*eta_prior_sd^2) - (eta.opt^2)/(2*eta_prior_sd^2))
el.out.opt=get.w(eta=eta.opt,z=z, eps=eps)
sumw0=sum(el.out.opt$w)
w=el.out.opt$w/sumw0
log.el.opt=el.out.opt$log_el
R=list()
R$par=par.opt
R$log.p.eta=log.p.eta.opt
R$log.el=log.el.opt
R$eta=eta.opt
R$sumw0=sumw0 #sum of original w prior to any corrective rescaling
R$weights=w
R$y=D
R$x=orig.X
class(R)<-"npCBPS"
return(R)
}
#' Calls the appropriate plot function, based on the number of treatments
#' @param x an object of class \dQuote{CBPS} or \dQuote{npCBPS}, usually, a
#' result of a call to \code{CBPS} or \code{npCBPS}.
#' @param covars Indices of the covariates to be plotted (excluding the intercept). For example,
#' if only the first two covariates from \code{balance} are desired, set \code{covars} to 1:2.
#' The default is \code{NULL}, which plots all covariates.
#' @param silent If set to \code{FALSE}, returns the imbalances used to
#' construct the plot. Default is \code{TRUE}, which returns nothing.
#' @param ... Additional arguments to be passed to balance.
#'
#' @export
#'
plot.npCBPS<-function(x, covars = NULL, silent = TRUE, ...){
bal.x<-balance(x)
if(is.numeric(x$y)) {out<-plot.CBPSContinuous(x, covars, silent, ...)}
else {out<-plot.CBPS(x, covars, silent, ...)}
if(!is.null(out)) return(out)
}
#' Calls the appropriate balance function based on the number of treatments
#'
#' @param object A CBPS, npCBPS, or CBMSM object.
#' @param ... Other parameters to be passed.
#'
#' @export
#'
balance.npCBPS<-function(object, ...){
if(is.numeric(object$y)) {out<-balance.CBPSContinuous(object, ...)}
else {out<-balance.CBPS(object, ...)}
out
}
|
/R/npCBPS.R
|
no_license
|
cran/CBPS
|
R
| false | false | 15,507 |
r
|
#' @title Non-Parametric Covariate Balancing Propensity Score (npCBPS) Estimation
#'
#' @description
#' \code{npCBPS} is a method to estimate weights interpretable as (stabilized)
#' inverse generlized propensity score weights, w_i = f(T_i)/f(T_i|X), without
#' actually estimating a model for the treatment to arrive at f(T|X) estimates.
#' In brief, this works by maximizing the empirical likelihood of observing the
#' values of treatment and covariates that were observed, while constraining
#' the weights to be those that (a) ensure balance on the covariates, and (b)
#' maintain the original means of the treatment and covariates.
#'
#' In the continuous treatment context, this balance on covariates means zero
#' correlation of each covariate with the treatment. In binary or categorical
#' treatment contexts, balance on covariates implies equal means on the
#' covariates for observations at each level of the treatment. When given a
#' numeric treatment the software handles it continuously. To handle the
#' treatment as binary or categorical is must be given as a factor.
#'
#' Furthermore, we apply a Bayesian variant that allows the correlation of each
#' covariate with the treatment to be slightly non-zero, as might be expected
#' in a a given finite sample.
#'
#' Estimates non-parametric covariate balancing propensity score weights.
#'
#' ### @aliases npCBPS npCBPS.fit
#' @param formula An object of class \code{formula} (or one that can be coerced
#' to that class): a symbolic description of the model to be fitted.
#' @param data An optional data frame, list or environment (or object coercible
#' by as.data.frame to a data frame) containing the variables in the model. If
#' not found in data, the variables are taken from \code{environment(formula)},
#' typically the environment from which \code{CBPS} is called.
#' @param na.action A function which indicates what should happen when the data
#' contain NAs. The default is set by the na.action setting of options, and is
#' na.fail if that is unset.
#' @param corprior Prior hyperparameter controlling the expected amount of
#' correlation between each covariate and the treatment. Specifically, the
#' amount of correlation between the k-dimensional covariates, X, and the
#' treatment T after weighting is assumed to have prior distribution
#' MVN(0,sigma^2 I_k). We conceptualize sigma^2 as a tuning parameter to be
#' used pragmatically. It's default of 0.1 ensures that the balance constraints
#' are not too harsh, and that a solution is likely to exist. Once the
#' algorithm works at such a high value of sigma^2, the user may wish to
#' attempt values closer to 0 to get finer balance.
#' @param print.level Controls verbosity of output to the screen while npCBPS
#' runs. At the default of print.level=0, little output is produced. It
#' print.level>0, it outputs diagnostics including the log posterior
#' (log_post), the log empirical likelihood associated with the weights
#' (log_el), and the log prior probability of the (weighted) correlation of
#' treatment with the covariates.
#' @param ... Other parameters to be passed.
#' @return \item{weights}{The optimal weights} \item{y}{The treatment vector
#' used} \item{x}{The covariate matrix} \item{model}{The model frame}
#' \item{call}{The matched call} \item{formula}{The formula supplied}
#' \item{data}{The data argument} \item{log.p.eta}{The log density for the
#' (weighted) correlation of the covariates with the treatment, given the
#' choice of prior (\code{corprior})} \item{log.el}{The log empirical
#' likelihood of the observed data at the chosen set of IPW weights.}
#' \item{eta}{A vector describing the correlation between the treatment and
#' each covariate on the weighted data at the solution.} \item{sumw0}{The sum
#' of weights, provided as a check on convergence. This is always 1 when
#' convergence occurs unproblematically. If it differs from 1 substantially, no
#' solution perfectly satisfying the conditions was found, and the user may
#' consider a larger value of \code{corprior}.}
#' @author Christian Fong, Chad Hazlett, and Kosuke Imai
#' @references Fong, Christian, Chad Hazlett, and Kosuke Imai. ``Parametric
#' and Nonparametric Covariate Balancing Propensity Score for General Treatment
#' Regimes.'' Unpublished Manuscript.
#' \url{http://imai.princeton.edu/research/files/CBGPS.pdf}
#' @examples
#'
#' ##Generate data
#' data(LaLonde)
#'
#' ## Restricted two only two covariates so that it will run quickly.
#' ## Performance will remain good if the full LaLonde specification is used
#' fit <- npCBPS(treat ~ age + educ, data = LaLonde, corprior=.1/nrow(LaLonde))
#' plot(fit)
#'
#' @export npCBPS
#'
npCBPS <- function(formula, data, na.action, corprior=.01, print.level=0, ...) {
if (missing(data))
data <- environment(formula)
call <- match.call()
family <- binomial()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "na.action"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
Y <- model.response(mf, "any")
if (length(dim(Y)) == 1L) {
nm <- rownames(Y)
dim(Y) <- NULL
if (!is.null(nm))
names(Y) <- nm
}
X <- if (!is.empty.model(mt)) model.matrix(mt, mf)#[,-2]
else matrix(, NROW(Y), 0L)
X<-X[,apply(X,2,sd)>0]
fit <- eval(call("npCBPS.fit", X = X, treat = Y, corprior = corprior,
print.level = print.level))
fit$na.action <- attr(mf, "na.action")
xlevels <- .getXlevels(mt, mf)
fit$data<-data
fit$call <- call
fit$formula <- formula
fit$terms<-mt
fit
}
#' npCBPS.fit
#'
#' @param treat A vector of treatment assignments. Binary or multi-valued
#' treatments should be factors. Continuous treatments should be numeric.
#' @param X A covariate matrix.
#' @param corprior Prior hyperparameter controlling the expected amount of
#' correlation between each covariate and the treatment. Specifically, the
#' amount of correlation between the k-dimensional covariates, X, and the
#' treatment T after weighting is assumed to have prior distribution
#' MVN(0,sigma^2 I_k). We conceptualize sigma^2 as a tuning parameter to be
#' used pragmatically. It's default of 0.1 ensures that the balance constraints
#' are not too harsh, and that a solution is likely to exist. Once the
#' algorithm works at such a high value of sigma^2, the user may wish to
#' attempt values closer to 0 to get finer balance.
#' @param print.level Controls verbosity of output to the screen while npCBPS
#' runs. At the default of print.level=0, little output is produced. It
#' print.level>0, it outputs diagnostics including the log posterior
#' (log_post), the log empirical likelihood associated with the weights
#' (log_el), and the log prior probability of the (weighted) correlation of
#' treatment with the covariates.
#' @param ... Other parameters to be passed.
#'
npCBPS.fit=function(treat, X, corprior, print.level, ...){
D=treat
rescale.orig=TRUE
orig.X=X
#pre-processesing:
X=X%*%solve(chol(var(X)))
X=scale(X,center=TRUE, scale=TRUE)
n=nrow(X)
eps=1/n
#Constraint matrix
if (is.numeric(D)){
print("Estimating npCBPS as a continuous treatment. To estimate for a binary or multi-valued treatment, use a factor.")
#re-orient each X to have positive correlation with T
X=X%*%diag(as.vector(sign(cor(X,D))),nrow=ncol(X))
D=scale(D,center=TRUE, scale=TRUE)
z=X*as.vector(D)
z=cbind(z,X,D)
ncon=ncol(z)
ncon_cor=ncol(X)*ncol(D)
}
if(is.factor(D)){
#For factor treatments
Td=as.matrix(model.matrix(~D-1))
conds=dim(Td)[2]
dimX=dim(X)[2]
#Now divide each column of Td by it's sum
colsums=apply(Td,2,sum)
Td=Td%*%diag(1/colsums)
#Now subtract the last column from each of the others, and remove the last
subtractMat=Td[,conds]%*%t(as.matrix(rep(1, conds)))
Td=Td-subtractMat
Td=Td[,1:(conds-1)]
#Center and rescale Td now
Td=scale(x=Td, center = TRUE, scale=TRUE)
#form matrix z that will be needed to setup contrasts
z=matrix(NA,nrow=n,ncol=dimX*(conds-1))
z=t(sapply(seq(1:n),function(x) t(kronecker(Td[x,],X[x,]))))
#Check that correlation of Td with X is very close to colMeans of z
cor.init=as.vector(t(apply(X = X,MARGIN = 2,function(x) cor(Td,x))))
rescale.factors=cor.init/colMeans(z)
if (print.level>0){print(rescale.factors)}
#Add aditional constraints that E[wX*]=0, if desired
#NB: I think we need another constraint to ensure something like E[wT*]=0
ncon_cor=dim(z)[2] #keep track of number of constraints not including the additional mean constraint
z=cbind(z,X)
ncon=dim(z)[2] #num constraints including mean constraints
#rm(Td)
}
#-----------------------------------------------
# Functions we will need
#-----------------------------------------------
llog = function(z, eps){
ans = z
avoidNA = !is.na(z)
lo = (z < eps) & avoidNA
ans[lo] = log(eps) - 1.5 + 2 * z[lo]/eps - 0.5 * (z[lo]/eps)^2
ans[!lo] = log(z[!lo])
ans
}
llogp = function(z, eps){
ans = z
avoidNA = !is.na(z)
lo = (z < eps) & avoidNA
ans[lo] = 2/eps - z[lo]/eps^2
ans[!lo] = 1/z[!lo]
ans
}
log_elgiven_eta=function(par,eta,z,eps,ncon_cor){
ncon=ncol(z)
gamma=par
eta_long=as.matrix(c(eta, rep(0,ncon-ncon_cor)))
#matrix version of eta for vectorization purposes
eta_mat=eta_long%*%c(rep(1,nrow(z)))
arg = (n + t(gamma)%*%(eta_mat-t(z)))
#used to be: arg = (1 + t(gamma)%*%(t(z)-eta_mat))
log_el=-sum(llog(z=arg,eps=eps))
return(log_el)
}
get.w=function(eta,z, sumw.tol=0.05, eps){
gam.init=rep(0, ncon)
opt.gamma.given.eta=optim(par=gam.init, eta=eta, method="BFGS", fn=log_elgiven_eta, z=z, eps=eps, ncon_cor=ncon_cor, control=list(fnscale=1))
gam.opt.given.eta=opt.gamma.given.eta$par
eta_long=as.matrix(c(eta, rep(0,ncon-ncon_cor)))
#matrix version of eta for vectorization purposes
eta_mat=eta_long%*%c(rep(1,nrow(z)))
arg_temp = (n + t(gam.opt.given.eta)%*%(eta_mat-t(z)))
#just use 1/x instead instead of the derivative of the pseudo-log
w=as.numeric(1/arg_temp)
sum.w=sum(w)
#scale: should sum to 1 when actually applied:
w_scaled=w/sum.w
if (abs(1-sum.w)<=sumw.tol){log_el=-sum(log(w_scaled))}
if (abs(1-sum.w)>=sumw.tol){log_el=-sum(log(w_scaled))-10^4*(1+abs(1-sum.w))}
R=list()
R$w=w
R$sumw=sum.w
R$log_el=log_el
R$el.gamma=gam.opt.given.eta[1:ncon_cor]
#R$grad.gamma=w*(eta_mat-t(z)) #gradient w.r.t. gamma
return(R)
}
#------------------------
# Some diagnostics:
# (a) is the eta really the cor(X,T) you end up with?
# (b) is balance on X and T (at 0) is maintained
#------------------------
## eta= (.1,.1,...,.1) should produce w's that produce weighted cov = (.1,.1,...)
#test.w=get.w(eta=rep(.1,ncon_cor),z)
##check convergence: is sumw near 1?
#test.w$sumw
##get w
#wtest=test.w$w
##check weighted covariances: are they near 0.10?
#sapply(seq(1,5), function(x) sum(X[,x]*T*wtest))
##means of X and T: are they near 0?
#sapply(seq(1,5), function(x) sum(X[,x]*wtest))
#sum(T*wtest)
log_post = function(par,eta.to.be.scaled,eta_prior_sd,z, eps=eps, sumw.tol=.001){
#get log(p(eta))
eta_now=par*eta.to.be.scaled
log_p_eta=sum(-.5*log(2*pi*eta_prior_sd^2) - (eta_now^2)/(2*eta_prior_sd^2))
#get best log_el for this eta
el.out=get.w(eta=eta_now,z=z, sumw.tol=sumw.tol, eps=eps)
#el.gamma=el.out$el.gamma
#put it together into log(post)
c=1 #in case we want to rescale the log(p(eta)), as sigma/c would.
log_post=el.out$log_el+c*log_p_eta
if(print.level>0){print(c(log_post, el.out$log_el, log_p_eta))}
return(log_post)
}
###-----------------------------------------------------------
### The main event
###-----------------------------------------------------------
#Now the outer optimization over eta
#setup the prior
eta_prior_sd=rep(corprior,ncon_cor)
#get original correlations
if (is.numeric(D)){eta.init=sapply(seq(1:ncon_cor), function(x) cor(X[,x],D))}
#for factor treatment, there is probably a better analog to the intialize correlation,
if (is.factor(D)){
eta.init=cor.init
}
#get vector of 1's long enough to be our dummy that gets rescaled to form eta if we want
#constant etas:
eta.const=rep(1, ncon_cor)
#note that as currently implemented, these are only the non-zero elements of eta that correspond
# to cor(X,T). For additional constraints that hold down the mean of X and T we are assuming
# eta=0 effectively. They get padded in within the optimization.
#Determine if we want to rescale 1's or rescale the original correlations
#rescale.orig=FALSE
if(rescale.orig==TRUE){eta.to.be.scaled=eta.init}else{eta.to.be.scaled=eta.const}
eta.optim.out=optimize(f=log_post, interval=c(-1,1), eta.to.be.scaled=eta.to.be.scaled,
eps=eps, sumw.tol=.001, eta_prior_sd=eta_prior_sd,z=z, maximum=TRUE)
#Some useful values:
par.opt=eta.optim.out$maximum
eta.opt=par.opt*eta.to.be.scaled
log.p.eta.opt=sum(-.5*log(2*pi*eta_prior_sd^2) - (eta.opt^2)/(2*eta_prior_sd^2))
el.out.opt=get.w(eta=eta.opt,z=z, eps=eps)
sumw0=sum(el.out.opt$w)
w=el.out.opt$w/sumw0
log.el.opt=el.out.opt$log_el
R=list()
R$par=par.opt
R$log.p.eta=log.p.eta.opt
R$log.el=log.el.opt
R$eta=eta.opt
R$sumw0=sumw0 #sum of original w prior to any corrective rescaling
R$weights=w
R$y=D
R$x=orig.X
class(R)<-"npCBPS"
return(R)
}
#' Calls the appropriate plot function, based on the number of treatments
#' @param x an object of class \dQuote{CBPS} or \dQuote{npCBPS}, usually, a
#' result of a call to \code{CBPS} or \code{npCBPS}.
#' @param covars Indices of the covariates to be plotted (excluding the intercept). For example,
#' if only the first two covariates from \code{balance} are desired, set \code{covars} to 1:2.
#' The default is \code{NULL}, which plots all covariates.
#' @param silent If set to \code{FALSE}, returns the imbalances used to
#' construct the plot. Default is \code{TRUE}, which returns nothing.
#' @param ... Additional arguments to be passed to balance.
#'
#' @export
#'
plot.npCBPS<-function(x, covars = NULL, silent = TRUE, ...){
bal.x<-balance(x)
if(is.numeric(x$y)) {out<-plot.CBPSContinuous(x, covars, silent, ...)}
else {out<-plot.CBPS(x, covars, silent, ...)}
if(!is.null(out)) return(out)
}
#' Calls the appropriate balance function based on the number of treatments
#'
#' @param object A CBPS, npCBPS, or CBMSM object.
#' @param ... Other parameters to be passed.
#'
#' @export
#'
balance.npCBPS<-function(object, ...){
if(is.numeric(object$y)) {out<-balance.CBPSContinuous(object, ...)}
else {out<-balance.CBPS(object, ...)}
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wav_best.R
\name{error_wavmap}
\alias{error_wavmap}
\title{Forecast Error Mapping Wavelet Models}
\usage{
error_wavmap(
x,
df,
lags,
.H,
.K,
ic = BIC,
.var = "all",
.diff = FALSE,
dev = "RMSE",
xreg = NULL,
...
)
}
\arguments{
\item{x}{A tibble returned by the function \code{wavsigmap::\link[wavsigmap]{map_wav_args}}.}
\item{df}{A data frame. The first column of the data frame
must be the base variable for constructing the
dependent variable, the multiperiod ahead
value to be forecasted, \eqn{x_{t + h}}. If \code{wav = TRUE},
\code{ncol} of \code{df} can be equal to one (just the dependent variable),
otherwise, \code{ncol(df)} must be greater than one, even
if \code{.var = "ar"}.}
\item{lags}{An integer vector defining the lags of the
regressors. If \code{wav = FALSE}, the length of the vector \code{lags} have to
be the same as the number of columns in \code{df}. However,
if \code{wav = TRUE}, an additional element in \code{lags}
must be add such that the last element in the vector
\code{lags} is the lag associatd with the wavelet regressor.}
\item{.H}{An integer representing the maximum horizon step.}
\item{.K}{An integer, the number of pseudo-out-of-sample forecasts.}
\item{ic}{Information criterion, \code{BIC} or \code{AIC}. When searching
for the best model the dataset is adjusted so that every model
have the same data length
for appropriate comparasion.}
\item{.var}{A string to determine how the model will be
specificated: \code{"all"} (default), \code{"ar"} or \code{"ar_out"}.}
\item{.diff}{Logical \code{FALSE} or \code{TRUE}. If \code{TRUE}, the dependent
variable is differentiated. See \code{\link{wavdrcast-package}} for the
implication on the model specification and wavelet variable.}
\item{dev}{A string, \code{"RMSE"} or \code{"MAE"}.}
\item{xreg}{Data frame. Exogeunous variable not subjected
to be lagged. The number of rows must be the same as in \code{df}.}
\item{...}{Further arguments passed to
\code{wavsigmap::\link[wavsigmap]{signal}}.}
}
\value{
A tibble containing wavelet options and mean of
the forecast error.
}
\description{
\code{error_wavmap} is a functional that computes the
root-mean-squared forecast error (RMSE) or mean absolute
forecast error (MAE) of the direct forecast for
several wavelet models.
}
\examples{
wavmap <- wavsigmap::map_wav_args(list(
wavelet = c("haar", "s8"),
thresh.fun = c("universal", "adaptive")
))
error_wavmap(wavmap, df = inf[1],
lags = c(1, 1),
.H = 2, .K = 4)
error_wavmap(wavmap, df = gdp[1],
lags = c(1, 1),
.H = 2, .K = 4, .diff = TRUE) \%>\%
dplyr::filter(dplyr::near(
mean, min(mean, na.rm = TRUE)))
}
\seealso{
\code{wavsigmap::\link[wavsigmap]{map_wav_args}},
\code{wavsigmap::\link[wavsigmap]{signal}}, \code{\link{error}}
}
|
/man/error_wavmap.Rd
|
permissive
|
nelson16silva/wavdrcast
|
R
| false | true | 2,918 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wav_best.R
\name{error_wavmap}
\alias{error_wavmap}
\title{Forecast Error Mapping Wavelet Models}
\usage{
error_wavmap(
x,
df,
lags,
.H,
.K,
ic = BIC,
.var = "all",
.diff = FALSE,
dev = "RMSE",
xreg = NULL,
...
)
}
\arguments{
\item{x}{A tibble returned by the function \code{wavsigmap::\link[wavsigmap]{map_wav_args}}.}
\item{df}{A data frame. The first column of the data frame
must be the base variable for constructing the
dependent variable, the multiperiod ahead
value to be forecasted, \eqn{x_{t + h}}. If \code{wav = TRUE},
\code{ncol} of \code{df} can be equal to one (just the dependent variable),
otherwise, \code{ncol(df)} must be greater than one, even
if \code{.var = "ar"}.}
\item{lags}{An integer vector defining the lags of the
regressors. If \code{wav = FALSE}, the length of the vector \code{lags} have to
be the same as the number of columns in \code{df}. However,
if \code{wav = TRUE}, an additional element in \code{lags}
must be add such that the last element in the vector
\code{lags} is the lag associatd with the wavelet regressor.}
\item{.H}{An integer representing the maximum horizon step.}
\item{.K}{An integer, the number of pseudo-out-of-sample forecasts.}
\item{ic}{Information criterion, \code{BIC} or \code{AIC}. When searching
for the best model the dataset is adjusted so that every model
have the same data length
for appropriate comparasion.}
\item{.var}{A string to determine how the model will be
specificated: \code{"all"} (default), \code{"ar"} or \code{"ar_out"}.}
\item{.diff}{Logical \code{FALSE} or \code{TRUE}. If \code{TRUE}, the dependent
variable is differentiated. See \code{\link{wavdrcast-package}} for the
implication on the model specification and wavelet variable.}
\item{dev}{A string, \code{"RMSE"} or \code{"MAE"}.}
\item{xreg}{Data frame. Exogeunous variable not subjected
to be lagged. The number of rows must be the same as in \code{df}.}
\item{...}{Further arguments passed to
\code{wavsigmap::\link[wavsigmap]{signal}}.}
}
\value{
A tibble containing wavelet options and mean of
the forecast error.
}
\description{
\code{error_wavmap} is a functional that computes the
root-mean-squared forecast error (RMSE) or mean absolute
forecast error (MAE) of the direct forecast for
several wavelet models.
}
\examples{
wavmap <- wavsigmap::map_wav_args(list(
wavelet = c("haar", "s8"),
thresh.fun = c("universal", "adaptive")
))
error_wavmap(wavmap, df = inf[1],
lags = c(1, 1),
.H = 2, .K = 4)
error_wavmap(wavmap, df = gdp[1],
lags = c(1, 1),
.H = 2, .K = 4, .diff = TRUE) \%>\%
dplyr::filter(dplyr::near(
mean, min(mean, na.rm = TRUE)))
}
\seealso{
\code{wavsigmap::\link[wavsigmap]{map_wav_args}},
\code{wavsigmap::\link[wavsigmap]{signal}}, \code{\link{error}}
}
|
library(reshape)
library(ggplot2)
rm(list = ls())
setwd("~/Dropbox/GitHub/wgs2/")
log <- read.table("./Manu/R/log.txt", header = T)
colSums(log)
log <- melt(data = log, id.vars = "CHR")
log$variable <- factor(log$variable, levels = c("ALL", "PASS", "MAF"))
pdf("./Manu/pdfs/variants_log.pdf", width = 9, height = 5)
ggplot(log, aes(factor(CHR), value, fill = variable)) + geom_bar(stat="identity", position = "dodge") +
scale_fill_brewer(palette = "Set1") + theme_bw() + xlab("") + ylab("Count")
dev.off()
|
/Manu/R/variant.R
|
no_license
|
xulong82/Genetics
|
R
| false | false | 515 |
r
|
library(reshape)
library(ggplot2)
rm(list = ls())
setwd("~/Dropbox/GitHub/wgs2/")
log <- read.table("./Manu/R/log.txt", header = T)
colSums(log)
log <- melt(data = log, id.vars = "CHR")
log$variable <- factor(log$variable, levels = c("ALL", "PASS", "MAF"))
pdf("./Manu/pdfs/variants_log.pdf", width = 9, height = 5)
ggplot(log, aes(factor(CHR), value, fill = variable)) + geom_bar(stat="identity", position = "dodge") +
scale_fill_brewer(palette = "Set1") + theme_bw() + xlab("") + ylab("Count")
dev.off()
|
## * Class conditional probabilities
##' @useDynLib gpuClassifieR, .registration=TRUE
.get_condprob <- function(feats, weights, normalize=TRUE,
log_domain=FALSE, backend='R') {
switch(backend,
R={
condprob <- feats %*% weights
## Normalizing probabilities
if (normalize) {
condprob <- t(apply(condprob, 1,
function(feats) feats - .logsumexp_R(feats)))
}
if (!log_domain) condprob <- exp(condprob)
},
C={
condprob <- t(.Call('get_condprob_logreg_',
as.matrix(feats),
t(weights),
as.logical(normalize),
as.logical(log_domain)))
},
CUDA={
condprob <- t(.Call('get_condprob_logreg_cuda',
as.matrix(feats),
t(weights),
as.logical(normalize),
as.logical(log_domain)))
},
{
stop('unrecognized computation bckend')
})
return(condprob)
}
## * Cost
##' @useDynLib gpuClassifieR, .registration=TRUE
.get_cost <- function(feats, weights, targets, decay=0.0, backend='R') {
switch(backend,
R={
log_prob <- .get_condprob(feats, weights, log_domain=TRUE,
backend='R')
cost <- -mean(log_prob * targets) + 0.5 * decay * sum(weights^2)
},
C={
cost <- .Call('get_cost_logreg_', as.matrix(feats),
t(as.matrix(weights)),
t(as.matrix(targets)),
as.double(decay))
},
CUDA={
cost <- .Call('get_cost_logreg_cuda', as.matrix(feats),
t(as.matrix(weights)),
t(as.matrix(targets)),
as.double(decay))
},
{
stop('unrecognized computation backend')
})
return(cost)
}
## * Cost gradient
##' @useDynLib gpuClassifieR, .registration=TRUE
.get_grad <- function(feats, weights, targets, decay=0.0, backend='R') {
switch(backend,
R={
prob <- .get_condprob(feats, weights, log_domain=FALSE,
backend='R')
grad <- (t(feats) %*% (prob - targets)) + decay * weights
},
C={
grad <- t(.Call('get_grad_logreg_',
as.matrix(feats),
t(as.matrix(weights)),
t(as.matrix(targets)),
as.double(decay)))
},
CUDA={
grad <- t(.Call('get_grad_logreg_cuda',
as.matrix(feats),
t(as.matrix(weights)),
t(as.matrix(targets)),
as.double(decay)))
},
{
stop('unrecognized computation backend')
})
return(grad)
}
## * Prediction
.predict_class <- function(condprob, backend='R') {
switch(backend,
R={
predictions <- mat.or.vec(NROW(condprob), NCOL(condprob))
max_idx <- max.col(condprob, ties.method = 'first')
mapply(function(i, j) predictions[i, j] <<- 1.0,
1:NROW(condprob), max_idx)
},
C={
stop(paste(backend,' backend function not implemented'))
},
CUDA={
stop(paste(backend,' backend function not implemented'))
},
{
stop('unrecognized computation backend')
})
return(predictions)
}
## * Misclassification rate
.get_error <- function(predictions, targets, backend='R') {
switch(backend,
R={
mis_rate <- 1 - mean(rowSums(predictions * targets))
},
C={
stop(paste(backend,' not implemented'))
},
CUDA={
stop(paste(backend,' not implemented'))
},
{
stop('unrecognized computation backend')
})
return(mis_rate)
}
|
/R/model_logreg.R
|
no_license
|
IshmaelBelghazi/gpuClassifieR
|
R
| false | false | 4,494 |
r
|
## * Class conditional probabilities
##' @useDynLib gpuClassifieR, .registration=TRUE
.get_condprob <- function(feats, weights, normalize=TRUE,
log_domain=FALSE, backend='R') {
switch(backend,
R={
condprob <- feats %*% weights
## Normalizing probabilities
if (normalize) {
condprob <- t(apply(condprob, 1,
function(feats) feats - .logsumexp_R(feats)))
}
if (!log_domain) condprob <- exp(condprob)
},
C={
condprob <- t(.Call('get_condprob_logreg_',
as.matrix(feats),
t(weights),
as.logical(normalize),
as.logical(log_domain)))
},
CUDA={
condprob <- t(.Call('get_condprob_logreg_cuda',
as.matrix(feats),
t(weights),
as.logical(normalize),
as.logical(log_domain)))
},
{
stop('unrecognized computation bckend')
})
return(condprob)
}
## * Cost
##' @useDynLib gpuClassifieR, .registration=TRUE
.get_cost <- function(feats, weights, targets, decay=0.0, backend='R') {
switch(backend,
R={
log_prob <- .get_condprob(feats, weights, log_domain=TRUE,
backend='R')
cost <- -mean(log_prob * targets) + 0.5 * decay * sum(weights^2)
},
C={
cost <- .Call('get_cost_logreg_', as.matrix(feats),
t(as.matrix(weights)),
t(as.matrix(targets)),
as.double(decay))
},
CUDA={
cost <- .Call('get_cost_logreg_cuda', as.matrix(feats),
t(as.matrix(weights)),
t(as.matrix(targets)),
as.double(decay))
},
{
stop('unrecognized computation backend')
})
return(cost)
}
## * Cost gradient
##' @useDynLib gpuClassifieR, .registration=TRUE
.get_grad <- function(feats, weights, targets, decay=0.0, backend='R') {
switch(backend,
R={
prob <- .get_condprob(feats, weights, log_domain=FALSE,
backend='R')
grad <- (t(feats) %*% (prob - targets)) + decay * weights
},
C={
grad <- t(.Call('get_grad_logreg_',
as.matrix(feats),
t(as.matrix(weights)),
t(as.matrix(targets)),
as.double(decay)))
},
CUDA={
grad <- t(.Call('get_grad_logreg_cuda',
as.matrix(feats),
t(as.matrix(weights)),
t(as.matrix(targets)),
as.double(decay)))
},
{
stop('unrecognized computation backend')
})
return(grad)
}
## * Prediction
.predict_class <- function(condprob, backend='R') {
switch(backend,
R={
predictions <- mat.or.vec(NROW(condprob), NCOL(condprob))
max_idx <- max.col(condprob, ties.method = 'first')
mapply(function(i, j) predictions[i, j] <<- 1.0,
1:NROW(condprob), max_idx)
},
C={
stop(paste(backend,' backend function not implemented'))
},
CUDA={
stop(paste(backend,' backend function not implemented'))
},
{
stop('unrecognized computation backend')
})
return(predictions)
}
## * Misclassification rate
.get_error <- function(predictions, targets, backend='R') {
switch(backend,
R={
mis_rate <- 1 - mean(rowSums(predictions * targets))
},
C={
stop(paste(backend,' not implemented'))
},
CUDA={
stop(paste(backend,' not implemented'))
},
{
stop('unrecognized computation backend')
})
return(mis_rate)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{centro_2015}
\alias{centro_2015}
\title{Prices of 50 Florianopolis' downtown apartaments}
\format{
A tibble with 53 rows (50 samples and 3 apartments to be
appraised) and 7 variables:
\itemize{
\item valor: price, in brazilian Reais
\item area_total: Total Area, in squared meters
\item quartos: Rooms
\item suites: Ensuites
\item garagens: Garages
\item dist_b_mar: Distance to the beach
\item padrao: Building Standard - baixo, medio, alto
(i.e. low, normal, high)
}
}
\source{
\strong{HOCHHEIM, Norberto}. \emph{Engenharia de avaliacoes
imobiliarias: Modulo Basico}. Florianopolis: IBAPE/SC, 2015, p.21-22
}
\usage{
centro_2015
}
\description{
A SpatialPointsDataFrame containing a sample of 50 apartaments with
prices and other attributes in Florianopolis' downtown
}
\examples{
data(centro_2015)
centro_2015$padrao <- as.numeric(centro_2015$padrao)
fit <- lm(log(valor) ~ area_total + quartos + suites + garagens +
log(dist_b_mar) + I(1/padrao), data = centro_2015)
# Look for outliers
library(car)
qqPlot(fit)
fit1 <- update(fit, subset = -c(31, 39))
qqPlot(fit1)
summary(fit1)
}
\keyword{datasets}
|
/man/centro_2015.Rd
|
permissive
|
lfpdroubi/appraiseR
|
R
| false | true | 1,243 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{centro_2015}
\alias{centro_2015}
\title{Prices of 50 Florianopolis' downtown apartaments}
\format{
A tibble with 53 rows (50 samples and 3 apartments to be
appraised) and 7 variables:
\itemize{
\item valor: price, in brazilian Reais
\item area_total: Total Area, in squared meters
\item quartos: Rooms
\item suites: Ensuites
\item garagens: Garages
\item dist_b_mar: Distance to the beach
\item padrao: Building Standard - baixo, medio, alto
(i.e. low, normal, high)
}
}
\source{
\strong{HOCHHEIM, Norberto}. \emph{Engenharia de avaliacoes
imobiliarias: Modulo Basico}. Florianopolis: IBAPE/SC, 2015, p.21-22
}
\usage{
centro_2015
}
\description{
A SpatialPointsDataFrame containing a sample of 50 apartaments with
prices and other attributes in Florianopolis' downtown
}
\examples{
data(centro_2015)
centro_2015$padrao <- as.numeric(centro_2015$padrao)
fit <- lm(log(valor) ~ area_total + quartos + suites + garagens +
log(dist_b_mar) + I(1/padrao), data = centro_2015)
# Look for outliers
library(car)
qqPlot(fit)
fit1 <- update(fit, subset = -c(31, 39))
qqPlot(fit1)
summary(fit1)
}
\keyword{datasets}
|
#install.packages("data.table")
library(data.table)
#install.packages("shiny")
library(shiny)
#install.packages("xlsx")
#library(xlsx)
#install.packages("rJava")
#library(rJava)
#### 載入資料 ####
from_old_database = T
data_to_analyze = NULL
if (from_old_database) {
csv_files = c("./data/photo.csv")
for (csv in csv_files) {
data_tmp = fread(csv, encoding="UTF-8", na.strings = c("", "NA"))
data_to_analyze = rbind(data_to_analyze, data_tmp)
}
# for data from old database
data_to_analyze = data_to_analyze[, c('project', 'station', 'camera_id', 'filename', 'p_date', 'note', 'sex', 'age', 'identity', 'num')]
names(data_to_analyze) = c('Project', 'Station', 'Camera', 'FileName', 'DateTime', 'Species', 'Sex', 'Age', 'ID', 'IdvCount')
} else {
csv_files = list.files(path="./data", pattern = "*.csv|*.txt", ignore.case = T, recursive = T, full.names = T)
#csv_files = c("./data/aaa.txt")
csv_files = csv_files[1:2]
for (csv in csv_files) {
data_tmp = fread(csv, encoding="UTF-8", na.strings = c("", "NA"))
data_to_analyze = rbind(data_to_analyze, data_tmp)
}
}
data_to_analyze[, unix_datetime := as.numeric(as.POSIXct(DateTime))]
data_to_analyze[is.na(IdvCount), IdvCount:=1]
data_to_analyze$IdvCount = as.numeric(data_to_analyze$IdvCount)
source("function_data_filtering.R", encoding = "UTF-8")
source("function_nop.R", encoding = "UTF-8")
source("function_poa.R", encoding = "UTF-8")
source("function_fcs.R", encoding = "UTF-8")
source("ui.R", encoding = "UTF-8")
source("server.R", encoding = "UTF-8")
shinyApp(ui = ui, server = server)
#debug(nop)
|
/from-mai/cameraTrapR/init.R
|
no_license
|
TaiBIF/camera-trap-calculation
|
R
| false | false | 1,610 |
r
|
#install.packages("data.table")
library(data.table)
#install.packages("shiny")
library(shiny)
#install.packages("xlsx")
#library(xlsx)
#install.packages("rJava")
#library(rJava)
#### 載入資料 ####
from_old_database = T
data_to_analyze = NULL
if (from_old_database) {
csv_files = c("./data/photo.csv")
for (csv in csv_files) {
data_tmp = fread(csv, encoding="UTF-8", na.strings = c("", "NA"))
data_to_analyze = rbind(data_to_analyze, data_tmp)
}
# for data from old database
data_to_analyze = data_to_analyze[, c('project', 'station', 'camera_id', 'filename', 'p_date', 'note', 'sex', 'age', 'identity', 'num')]
names(data_to_analyze) = c('Project', 'Station', 'Camera', 'FileName', 'DateTime', 'Species', 'Sex', 'Age', 'ID', 'IdvCount')
} else {
csv_files = list.files(path="./data", pattern = "*.csv|*.txt", ignore.case = T, recursive = T, full.names = T)
#csv_files = c("./data/aaa.txt")
csv_files = csv_files[1:2]
for (csv in csv_files) {
data_tmp = fread(csv, encoding="UTF-8", na.strings = c("", "NA"))
data_to_analyze = rbind(data_to_analyze, data_tmp)
}
}
data_to_analyze[, unix_datetime := as.numeric(as.POSIXct(DateTime))]
data_to_analyze[is.na(IdvCount), IdvCount:=1]
data_to_analyze$IdvCount = as.numeric(data_to_analyze$IdvCount)
source("function_data_filtering.R", encoding = "UTF-8")
source("function_nop.R", encoding = "UTF-8")
source("function_poa.R", encoding = "UTF-8")
source("function_fcs.R", encoding = "UTF-8")
source("ui.R", encoding = "UTF-8")
source("server.R", encoding = "UTF-8")
shinyApp(ui = ui, server = server)
#debug(nop)
|
library(gcForest)
### Name: gcForest-package
### Title: gcForest-package
### Aliases: gcForest-package
### ** Examples
# ========= Model train=======
have_numpy <- reticulate::py_module_available("numpy")
have_sklearn <- reticulate::py_module_available("sklearn")
if(have_numpy && have_sklearn){
library(gcForest)
# req_py()
sk <- NULL
.onLoad <- function(libname, pkgname) {
sk <<- reticulate::import("sklearn", delay_load = TRUE)
}
sk <<- reticulate::import("sklearn", delay_load = TRUE)
train_test_split <- sk$model_selection$train_test_split
data <- sk$datasets$load_iris
iris <- data()
X = iris$data
y = iris$target
data_split = train_test_split(X, y, test_size=0.33)
X_tr <- data_split[[1]]
X_te <- data_split[[2]]
y_tr <- data_split[[3]]
y_te <- data_split[[4]]
gcforest_m <- gcforest(shape_1X=4L, window=2L, tolerance=0.0)
gcforest_m$fit(X_tr, y_tr)
gcf_model <- model_save(gcforest_m,'gcforest_model.model')
gcf <- model_load('gcforest_model.model')
gcf$predict(X_te)
# learn more from gcForest package tutorial
utils::vignette('gcForest-docs')
}else{
print('You should have the Python testing environment!')
}
|
/data/genthat_extracted_code/gcForest/examples/gcForest-package.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,220 |
r
|
library(gcForest)
### Name: gcForest-package
### Title: gcForest-package
### Aliases: gcForest-package
### ** Examples
# ========= Model train=======
have_numpy <- reticulate::py_module_available("numpy")
have_sklearn <- reticulate::py_module_available("sklearn")
if(have_numpy && have_sklearn){
library(gcForest)
# req_py()
sk <- NULL
.onLoad <- function(libname, pkgname) {
sk <<- reticulate::import("sklearn", delay_load = TRUE)
}
sk <<- reticulate::import("sklearn", delay_load = TRUE)
train_test_split <- sk$model_selection$train_test_split
data <- sk$datasets$load_iris
iris <- data()
X = iris$data
y = iris$target
data_split = train_test_split(X, y, test_size=0.33)
X_tr <- data_split[[1]]
X_te <- data_split[[2]]
y_tr <- data_split[[3]]
y_te <- data_split[[4]]
gcforest_m <- gcforest(shape_1X=4L, window=2L, tolerance=0.0)
gcforest_m$fit(X_tr, y_tr)
gcf_model <- model_save(gcforest_m,'gcforest_model.model')
gcf <- model_load('gcforest_model.model')
gcf$predict(X_te)
# learn more from gcForest package tutorial
utils::vignette('gcForest-docs')
}else{
print('You should have the Python testing environment!')
}
|
\alias{gtkStatusIconGetTooltipText}
\name{gtkStatusIconGetTooltipText}
\title{gtkStatusIconGetTooltipText}
\description{Gets the contents of the tooltip for \code{status.icon}.}
\usage{gtkStatusIconGetTooltipText(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkStatusIcon}}}}
\details{Since 2.16}
\value{[character] the tooltip text, or \code{NULL}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/RGtk2/man/gtkStatusIconGetTooltipText.Rd
|
no_license
|
lawremi/RGtk2
|
R
| false | false | 432 |
rd
|
\alias{gtkStatusIconGetTooltipText}
\name{gtkStatusIconGetTooltipText}
\title{gtkStatusIconGetTooltipText}
\description{Gets the contents of the tooltip for \code{status.icon}.}
\usage{gtkStatusIconGetTooltipText(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkStatusIcon}}}}
\details{Since 2.16}
\value{[character] the tooltip text, or \code{NULL}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
## This script is to implement Exercise001 from https://github.com/davegoblue/TestCodingShortcuts
## Step A: Create a baseOuctomes vector
## Step B: Fill a series of outcomes based on random numbers between 0-1
## Step C: Calculate the cumulative outcome
## Step D: Calculate where a specified condition occured
## Step E: Sort by Condition(Y/N) then _N_ at Condition then Cumulative Final
## Step A: Create a baseOutcomes vector
## Per the exercise, assume that there is an outcomes table (hard code for now)
if (file.exists("BaseOutcomes.csv")) {
baseOutcomes <- read.csv("BaseOutcomes.csv")
if (ncol(baseOutcomes) != 2) { stop("Error in CSV file, should have exactly 2 columns") }
colnames(baseOutcomes) <- c("probs","outcomes")
} else {
baseOutcomes <- data.frame(probs=c(0.01,0.02,0.05,0.18,0.24,0.50),outcomes=c(10,5,2,1,0,-1))
}
if (sum(baseOutcomes$probs)!=1 | min(baseOutcomes$probs) <= 0 |
sum(is.na(baseOutcomes$probs)) > 0 | sum(is.na(baseOutcomes$outcomes)) > 0) {
stop("Please resolve the issue with inputs for probs and outcomes, aborting")
}
## Null the baseOutcomes$outcomes where outcomes >= X
myHurdle <- ">=119"
myCond <- parse(text=paste0("baseOutcomes$outcomes",myHurdle))
baseOutcomes$outcomes[eval(myCond)] <- 0
baseMean <- sum(baseOutcomes$probs*baseOutcomes$outcomes)
baseVar <- sum(baseOutcomes$probs*(baseOutcomes$outcomes-baseMean)^2)
print(paste0("Probabilities sum to 1. Outcomes has mean ",format(baseMean,digits=3),
" and variance ",format(baseVar,digits=3)))
## Make the CDF vector
myCDF <- numeric(nrow(baseOutcomes)+1)
myCDF[1] <- 0
for ( intCtr in 1:nrow(baseOutcomes) ) {
myCDF[intCtr+1] <- myCDF[intCtr] + baseOutcomes$probs[intCtr]
}
## Step B: Fill a series of outcomes based on random numbers between 0-1
nTrials <- 8000
nPerTrial <- 4000
mtxRands <- matrix(data=runif(nTrials*nPerTrial,0,1),nrow=nPerTrial,ncol=nTrials)
mtxOutcomes <- matrix(baseOutcomes$outcomes[findInterval(mtxRands,myCDF,rightmost.closed=TRUE)],
nrow=nPerTrial,ncol=nTrials)
print(paste0("Ouctomes across ",nTrials*nPerTrial," draws have mean: ",
format(mean(mtxOutcomes),digits=3)," and variance: ",format(sd(mtxOutcomes)^2,digits=3))
)
## Step C: Calculate the cumulative total for each column in mtxOutcomes
## cumsum() works properly on columns of a data frame, but coerces a matrix to a single vector (no good)
## So, create a data frame for cumulative sums - each row is a trial, each column is an experiment
dfCumOutcomes <- cumsum(as.data.frame(mtxOutcomes))
maxPerTrial <- as.numeric(apply(dfCumOutcomes,2,FUN=max))
minPerTrial <- as.numeric(apply(dfCumOutcomes,2,FUN=min))
lastPerTrial <- as.numeric(dfCumOutcomes[nrow(dfCumOutcomes),])
dfSummary <- data.frame(myTrial = NA, myMax = maxPerTrial, myMin = minPerTrial, myLast = lastPerTrial,
myCond = FALSE, myN_Cond = NA, myVal_Cond = NA)
## Step D: Calculate where a specified condition first occurred
## Can I find a way to do this more efficiently than once each per column?
## While not considered elegant, parse() followed by eval() seems to do the job
myHurdle <- "<=-120"
myCond <- parse(text=paste0("dfCumOutcomes",myHurdle))
dfCondOutcomes <- eval(myCond)
for ( intCtr in 1:nTrials ) {
dfSummary$myTrial[intCtr] = intCtr
dfSummary$myCond[intCtr] <- sum(dfCondOutcomes[,intCtr]) > 0
myBool <- dfCondOutcomes[,intCtr] & !duplicated(dfCondOutcomes[,intCtr]) ## & works, && does not
if (sum(myBool) > 1) {
stop("Error, 2 or more non-duplicated TRUE may not occur, aborting")
} else if (sum(myBool) == 1) {
keyBool <- sum(1, cumsum(myBool) == 0) ## myBool is F F . . . F T F . . . F F F; cumsum(myBool)
## will get all the F prior to that first T
dfSummary$myN_Cond[intCtr] <- keyBool ## The single T is where myCond first happened
dfSummary$myVal_Cond[intCtr] <- dfCumOutcomes[keyBool,intCtr]
}
}
## Step E: Sort by Condition(Y then N) then _N_ at Condition then Cumulative Final
dfResults <- dfSummary[order(-dfSummary$myCond, dfSummary$myN_Cond, -dfSummary$myLast),]
print(summary(dfResults))
## Would be good to have the x and y units auto-calculated
minX <- min(dfSummary$myMin) ## Find most negative element
maxX <- max(0, dfSummary$myLast) ## Find most positive element (use 0 if all are negative)
powX <- log10(max(1, abs(minX), abs(maxX))) ## Find rough "power" of data
unitX <- 10^(round(powX,0)-1) ## If thousands, use hundreds; if hundreds, use tens; etc.
minX <- unitX*(floor(minX/unitX)-1) ## Round to similar units as unitX
maxX <- unitX*(ceiling(maxX/unitX)+1) ## Round to similar units as unitX
hist(dfSummary$myMin,
col=rgb(1,0,0,.25),
main=paste0("Results: ",nTrials," Trials (",nPerTrial," draws per trial)"),
xlab="Units", ylab="N Trials",
breaks=seq(minX,maxX,by=unitX),
xlim=c(minX, maxX)
)
hist(dfSummary$myLast,col=rgb(0,0,1,.25),
breaks=seq(minX,maxX,by=unitX),
xlim=c(minX,maxX),
add=TRUE
)
legend("topright",col=c(rgb(1,0,0,.25),rgb(0,0,1,.25),rgb(0.5,0,0.5,0.5)),
legend=c("Minimum","Final","Overlap"),pch=20,pt.cex=2)
|
/Exercise001_v007.R
|
no_license
|
davegoblue/TestCodingShortcuts
|
R
| false | false | 5,319 |
r
|
## This script is to implement Exercise001 from https://github.com/davegoblue/TestCodingShortcuts
## Step A: Create a baseOuctomes vector
## Step B: Fill a series of outcomes based on random numbers between 0-1
## Step C: Calculate the cumulative outcome
## Step D: Calculate where a specified condition occured
## Step E: Sort by Condition(Y/N) then _N_ at Condition then Cumulative Final
## Step A: Create a baseOutcomes vector
## Per the exercise, assume that there is an outcomes table (hard code for now)
if (file.exists("BaseOutcomes.csv")) {
baseOutcomes <- read.csv("BaseOutcomes.csv")
if (ncol(baseOutcomes) != 2) { stop("Error in CSV file, should have exactly 2 columns") }
colnames(baseOutcomes) <- c("probs","outcomes")
} else {
baseOutcomes <- data.frame(probs=c(0.01,0.02,0.05,0.18,0.24,0.50),outcomes=c(10,5,2,1,0,-1))
}
if (sum(baseOutcomes$probs)!=1 | min(baseOutcomes$probs) <= 0 |
sum(is.na(baseOutcomes$probs)) > 0 | sum(is.na(baseOutcomes$outcomes)) > 0) {
stop("Please resolve the issue with inputs for probs and outcomes, aborting")
}
## Null the baseOutcomes$outcomes where outcomes >= X
myHurdle <- ">=119"
myCond <- parse(text=paste0("baseOutcomes$outcomes",myHurdle))
baseOutcomes$outcomes[eval(myCond)] <- 0
baseMean <- sum(baseOutcomes$probs*baseOutcomes$outcomes)
baseVar <- sum(baseOutcomes$probs*(baseOutcomes$outcomes-baseMean)^2)
print(paste0("Probabilities sum to 1. Outcomes has mean ",format(baseMean,digits=3),
" and variance ",format(baseVar,digits=3)))
## Make the CDF vector
myCDF <- numeric(nrow(baseOutcomes)+1)
myCDF[1] <- 0
for ( intCtr in 1:nrow(baseOutcomes) ) {
myCDF[intCtr+1] <- myCDF[intCtr] + baseOutcomes$probs[intCtr]
}
## Step B: Fill a series of outcomes based on random numbers between 0-1
nTrials <- 8000
nPerTrial <- 4000
mtxRands <- matrix(data=runif(nTrials*nPerTrial,0,1),nrow=nPerTrial,ncol=nTrials)
mtxOutcomes <- matrix(baseOutcomes$outcomes[findInterval(mtxRands,myCDF,rightmost.closed=TRUE)],
nrow=nPerTrial,ncol=nTrials)
print(paste0("Ouctomes across ",nTrials*nPerTrial," draws have mean: ",
format(mean(mtxOutcomes),digits=3)," and variance: ",format(sd(mtxOutcomes)^2,digits=3))
)
## Step C: Calculate the cumulative total for each column in mtxOutcomes
## cumsum() works properly on columns of a data frame, but coerces a matrix to a single vector (no good)
## So, create a data frame for cumulative sums - each row is a trial, each column is an experiment
dfCumOutcomes <- cumsum(as.data.frame(mtxOutcomes))
maxPerTrial <- as.numeric(apply(dfCumOutcomes,2,FUN=max))
minPerTrial <- as.numeric(apply(dfCumOutcomes,2,FUN=min))
lastPerTrial <- as.numeric(dfCumOutcomes[nrow(dfCumOutcomes),])
dfSummary <- data.frame(myTrial = NA, myMax = maxPerTrial, myMin = minPerTrial, myLast = lastPerTrial,
myCond = FALSE, myN_Cond = NA, myVal_Cond = NA)
## Step D: Calculate where a specified condition first occurred
## Can I find a way to do this more efficiently than once each per column?
## While not considered elegant, parse() followed by eval() seems to do the job
myHurdle <- "<=-120"
myCond <- parse(text=paste0("dfCumOutcomes",myHurdle))
dfCondOutcomes <- eval(myCond)
for ( intCtr in 1:nTrials ) {
dfSummary$myTrial[intCtr] = intCtr
dfSummary$myCond[intCtr] <- sum(dfCondOutcomes[,intCtr]) > 0
myBool <- dfCondOutcomes[,intCtr] & !duplicated(dfCondOutcomes[,intCtr]) ## & works, && does not
if (sum(myBool) > 1) {
stop("Error, 2 or more non-duplicated TRUE may not occur, aborting")
} else if (sum(myBool) == 1) {
keyBool <- sum(1, cumsum(myBool) == 0) ## myBool is F F . . . F T F . . . F F F; cumsum(myBool)
## will get all the F prior to that first T
dfSummary$myN_Cond[intCtr] <- keyBool ## The single T is where myCond first happened
dfSummary$myVal_Cond[intCtr] <- dfCumOutcomes[keyBool,intCtr]
}
}
## Step E: Sort by Condition(Y then N) then _N_ at Condition then Cumulative Final
dfResults <- dfSummary[order(-dfSummary$myCond, dfSummary$myN_Cond, -dfSummary$myLast),]
print(summary(dfResults))
## Would be good to have the x and y units auto-calculated
minX <- min(dfSummary$myMin) ## Find most negative element
maxX <- max(0, dfSummary$myLast) ## Find most positive element (use 0 if all are negative)
powX <- log10(max(1, abs(minX), abs(maxX))) ## Find rough "power" of data
unitX <- 10^(round(powX,0)-1) ## If thousands, use hundreds; if hundreds, use tens; etc.
minX <- unitX*(floor(minX/unitX)-1) ## Round to similar units as unitX
maxX <- unitX*(ceiling(maxX/unitX)+1) ## Round to similar units as unitX
hist(dfSummary$myMin,
col=rgb(1,0,0,.25),
main=paste0("Results: ",nTrials," Trials (",nPerTrial," draws per trial)"),
xlab="Units", ylab="N Trials",
breaks=seq(minX,maxX,by=unitX),
xlim=c(minX, maxX)
)
hist(dfSummary$myLast,col=rgb(0,0,1,.25),
breaks=seq(minX,maxX,by=unitX),
xlim=c(minX,maxX),
add=TRUE
)
legend("topright",col=c(rgb(1,0,0,.25),rgb(0,0,1,.25),rgb(0.5,0,0.5,0.5)),
legend=c("Minimum","Final","Overlap"),pch=20,pt.cex=2)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/fitDistributions.R
\name{Rnorm.exp}
\alias{Rnorm.exp}
\title{Rnorm.exp fits a normal+exponential distribution to a specified data
vector using maximum likelihood.}
\usage{
Rnorm.exp(xi, wi = rep(1, NROW(xi)), guess = c(0.5, 0, 1, 1),
tol = sqrt(.Machine$double.eps), maxit = 10000)
}
\arguments{
\item{xi}{A vector of observations, assumed to be real numbers in the
inveraval (-Inf,+Inf).}
\item{wi}{A vector of weights. Default: vector of repeating 1; indicating
all observations are weighted equally. (Are these normalized internally?!
Or do they have to be [0,1]?)}
\item{guess}{Initial guess for paremeters. Default: c(0.5, 0, 1, 1).}
\item{tol}{Convergence tolerance. Default: sqrt(.Machine$double.eps).}
\item{maxit}{Maximum number of iterations. Default: 10,000.}
}
\value{
Returns a list of parameters for the best-fit normal distribution
(alpha, mean, varience, and lambda).
}
\description{
Distrubtion function devined by: alpha*Normal(mean, varience)+(1-alpha)
*Exponential(lambda).
}
\details{
Fits nicely with data types that look normal overall, but have a long
tail starting for positive values.
}
\author{
Charles G. Danko
}
|
/man/Rnorm.exp.Rd
|
no_license
|
Kraus-Lab/groHMM
|
R
| false | false | 1,238 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/fitDistributions.R
\name{Rnorm.exp}
\alias{Rnorm.exp}
\title{Rnorm.exp fits a normal+exponential distribution to a specified data
vector using maximum likelihood.}
\usage{
Rnorm.exp(xi, wi = rep(1, NROW(xi)), guess = c(0.5, 0, 1, 1),
tol = sqrt(.Machine$double.eps), maxit = 10000)
}
\arguments{
\item{xi}{A vector of observations, assumed to be real numbers in the
inveraval (-Inf,+Inf).}
\item{wi}{A vector of weights. Default: vector of repeating 1; indicating
all observations are weighted equally. (Are these normalized internally?!
Or do they have to be [0,1]?)}
\item{guess}{Initial guess for paremeters. Default: c(0.5, 0, 1, 1).}
\item{tol}{Convergence tolerance. Default: sqrt(.Machine$double.eps).}
\item{maxit}{Maximum number of iterations. Default: 10,000.}
}
\value{
Returns a list of parameters for the best-fit normal distribution
(alpha, mean, varience, and lambda).
}
\description{
Distrubtion function devined by: alpha*Normal(mean, varience)+(1-alpha)
*Exponential(lambda).
}
\details{
Fits nicely with data types that look normal overall, but have a long
tail starting for positive values.
}
\author{
Charles G. Danko
}
|
shinyUI(fluidPage(#theme = "bootstrap.css",
tags$h3("Status of Social Determinants of Health Indicators and Disparities for California Counties"),
sidebarPanel(
#conditionalPanel(condition = paste("input.ID == ",c(1,2,3,4,5,6),c(rep("|",5),""), collapse=""),
cP(c(1,2,3,4,5,6),
selectInput("myV1","Indicator 1:",choices=list("Poverty"=1,"Parks"=2,"Healthy Food"=3,"Child Neglect"=4,"Alcohol Outlets"=5), selected=1),selectize=FALSE),
conditionalPanel(condition = paste("input.ID == ",c(3),c(rep("|",0),""), collapse=""),checkboxInput("myOne", "One Indicator Only",value=TRUE)),
# conditionalPanel(condition = paste0("input.ID == ",c(3),c(rep("|",0)),collapse=""), selectInput("myV2X","Indicator 2:",choices=list("Poverty"=1,"Parks"=2,"Healthy Food"=3,"Child Neglect"=4,"Alcohol Outlets"=5),selected=2)), # "none"=0,
conditionalPanel(condition = paste0("input.myOne == false & (", paste0("input.ID == ",c(3),c(rep("|",0)),collapse=""),")",collapse=""), selectInput("myV2X","Indicator 2:",choices=list("Poverty"=1,"Parks"=2,"Healthy Food"=3,"Child Neglect"=4,"Alcohol Outlets"=5),selected=2)), # "none"=0,
conditionalPanel(condition = paste0("input.ID == ",c(1,2),c(rep("|",1),""),collapse=""), selectInput("myV2","Indicator 2:",choices=list("Poverty"=1,"Parks"=2,"Healthy Food"=3,"Child Neglect"=4,"Alcohol Outlets"=5),selected=2)),
#conditionalPanel(condition = paste("input.ID == ",c(5,6),c(rep("|",1),""), collapse=""),selectInput("myRace","Race/Ethnic Group",choices = r1name, selected="Total")),
conditionalPanel(condition = paste("input.ID == ",c(1),c(rep("|",0),""), collapse=""),checkboxInput("mylim100", "full axis range",value=TRUE)),
conditionalPanel(condition = paste("input.ID == ",c(1,2),c(rep("|",1),""), collapse=""),radioButtons("myGeo", "Geographic Level:",choices=c("CO","CT"))),
conditionalPanel(condition = paste("input.ID == ",c(1,3),c(rep("|",1),""), collapse=""),selectInput("myGeoname", "Location:",choices = c("California",lhjL) )),
conditionalPanel(condition = paste("input.ID == ",c(2),c(rep("|",0),""), collapse=""),radioButtons("myD", label=NULL,choices=c("Density","Boxplot"))),
hr(),
helpText("Data Source: California Department of Public Health","Office of Health Equity",
tags$a(href="https://www.cdph.ca.gov/programs/Pages/HealthyCommunityIndicators.aspx#DataIndAv",
"Healthy Communities Data and Indicators Project (HCI)")),
hr(),
includeText("Text1.txt"),
hr(),
helpText(h6("for questions or suggestions please email: samfam921@gmail.com")) #,style="color:blue",
),
mainPanel(
# conditionalPanel(condition = "input.ID == 1",
# helpText("Two Indicator Scatterplot Showing Indicator Status by County and by Race/ethnicity. This visualization allows the #exploration of two indicator value estimates simultaneously, by race-ethnicity and in relation to the state averages (cross lines in the plot). #Points in the upper right corner represent counties with estimates above state average that could be in higher need for interventions.")),
hr(),
tabsetPanel(type = "tabs",
tabPanel("Within-County Disparity", plotOutput("bar1",height=500), textOutput("barTxt"), value=3), # ,width=800,height=600
tabPanel("Scatterplot", plotOutput("scatter1",height=500), textOutput("scatterTxt"), value=1),
tabPanel("Distribition", plotOutput("dist1"), value=2),
tabPanel("County Map", plotOutput("map1"), value=4),
tabPanel("Census Tract Map", plotOutput("map2"), value=5),
tabPanel("Census Tract Map - Zoom", leafletOutput("map3",width=600,height=600), value=6),
id="ID")
# could use includeText("Scatter.txt")textOutput("junk") for external file if more efficient
)))
#tabPanel("Scatterplot", plotOutput("scatter1", click = "scatter1_click"), verbatimTextOutput("click_info")),
|
/Archive/Xui.R
|
no_license
|
vargovargo/exploreHCI
|
R
| false | false | 3,991 |
r
|
shinyUI(fluidPage(#theme = "bootstrap.css",
tags$h3("Status of Social Determinants of Health Indicators and Disparities for California Counties"),
sidebarPanel(
#conditionalPanel(condition = paste("input.ID == ",c(1,2,3,4,5,6),c(rep("|",5),""), collapse=""),
cP(c(1,2,3,4,5,6),
selectInput("myV1","Indicator 1:",choices=list("Poverty"=1,"Parks"=2,"Healthy Food"=3,"Child Neglect"=4,"Alcohol Outlets"=5), selected=1),selectize=FALSE),
conditionalPanel(condition = paste("input.ID == ",c(3),c(rep("|",0),""), collapse=""),checkboxInput("myOne", "One Indicator Only",value=TRUE)),
# conditionalPanel(condition = paste0("input.ID == ",c(3),c(rep("|",0)),collapse=""), selectInput("myV2X","Indicator 2:",choices=list("Poverty"=1,"Parks"=2,"Healthy Food"=3,"Child Neglect"=4,"Alcohol Outlets"=5),selected=2)), # "none"=0,
conditionalPanel(condition = paste0("input.myOne == false & (", paste0("input.ID == ",c(3),c(rep("|",0)),collapse=""),")",collapse=""), selectInput("myV2X","Indicator 2:",choices=list("Poverty"=1,"Parks"=2,"Healthy Food"=3,"Child Neglect"=4,"Alcohol Outlets"=5),selected=2)), # "none"=0,
conditionalPanel(condition = paste0("input.ID == ",c(1,2),c(rep("|",1),""),collapse=""), selectInput("myV2","Indicator 2:",choices=list("Poverty"=1,"Parks"=2,"Healthy Food"=3,"Child Neglect"=4,"Alcohol Outlets"=5),selected=2)),
#conditionalPanel(condition = paste("input.ID == ",c(5,6),c(rep("|",1),""), collapse=""),selectInput("myRace","Race/Ethnic Group",choices = r1name, selected="Total")),
conditionalPanel(condition = paste("input.ID == ",c(1),c(rep("|",0),""), collapse=""),checkboxInput("mylim100", "full axis range",value=TRUE)),
conditionalPanel(condition = paste("input.ID == ",c(1,2),c(rep("|",1),""), collapse=""),radioButtons("myGeo", "Geographic Level:",choices=c("CO","CT"))),
conditionalPanel(condition = paste("input.ID == ",c(1,3),c(rep("|",1),""), collapse=""),selectInput("myGeoname", "Location:",choices = c("California",lhjL) )),
conditionalPanel(condition = paste("input.ID == ",c(2),c(rep("|",0),""), collapse=""),radioButtons("myD", label=NULL,choices=c("Density","Boxplot"))),
hr(),
helpText("Data Source: California Department of Public Health","Office of Health Equity",
tags$a(href="https://www.cdph.ca.gov/programs/Pages/HealthyCommunityIndicators.aspx#DataIndAv",
"Healthy Communities Data and Indicators Project (HCI)")),
hr(),
includeText("Text1.txt"),
hr(),
helpText(h6("for questions or suggestions please email: samfam921@gmail.com")) #,style="color:blue",
),
mainPanel(
# conditionalPanel(condition = "input.ID == 1",
# helpText("Two Indicator Scatterplot Showing Indicator Status by County and by Race/ethnicity. This visualization allows the #exploration of two indicator value estimates simultaneously, by race-ethnicity and in relation to the state averages (cross lines in the plot). #Points in the upper right corner represent counties with estimates above state average that could be in higher need for interventions.")),
hr(),
tabsetPanel(type = "tabs",
tabPanel("Within-County Disparity", plotOutput("bar1",height=500), textOutput("barTxt"), value=3), # ,width=800,height=600
tabPanel("Scatterplot", plotOutput("scatter1",height=500), textOutput("scatterTxt"), value=1),
tabPanel("Distribition", plotOutput("dist1"), value=2),
tabPanel("County Map", plotOutput("map1"), value=4),
tabPanel("Census Tract Map", plotOutput("map2"), value=5),
tabPanel("Census Tract Map - Zoom", leafletOutput("map3",width=600,height=600), value=6),
id="ID")
# could use includeText("Scatter.txt")textOutput("junk") for external file if more efficient
)))
#tabPanel("Scatterplot", plotOutput("scatter1", click = "scatter1_click"), verbatimTextOutput("click_info")),
|
#!/usr/bin/Rscript
library(methods) # required for get( ) below... ???
usage <- "Makes many images for a single data frame of pairwise comparisons: one for each thing being compared.
Usage:
Rscript distance-maps.R (geog distance file) (genetic distance file) (sample info directory) (output directory)
"
argvec <- if (interactive()) { scan(what="char") } else { commandArgs(TRUE) }
if (length(argvec)<4) { stop(usage) }
dist1.file <- argvec[1]
dist2.file <- argvec[2]
indir <- argvec[3]
outdir <- argvec[4]
dist1 <- read.csv(dist1.file,header=TRUE,stringsAsFactors=FALSE)
dist2 <- read.csv(dist2.file,header=TRUE,stringsAsFactors=FALSE)
dir.create(outdir,showWarnings=FALSE)
# locations
coord.obj <- load(file.path(indir,"geog_coords.RData"))
coords <- get(coord.obj)
tort.ids <- row.names(coords)
# read in other info
pcs <- read.csv(file.path(indir,"pcs.csv"),header=TRUE,stringsAsFactors=FALSE)
stopifnot( all( tort.ids %in% pcs$etort ) )
pc.cols <- adjustcolor( ifelse( pcs$PC1[match(tort.ids,pcs$etort)] > 0, "blue", "purple" ), .75 )
require(raster)
layer <- raster("../visualization/dem_30")
player <- function (main='') { plot(layer,legend=FALSE,xlab="",ylab="",xaxt="n",yaxt="n",legend.mar=0,box=FALSE,main=main) }
##
# find a good ordering
require(TSP)
xy <- coordinates(coords)
etsp <- ETSP( xy, labels=rownames(xy) )
tour <- solve_TSP( etsp, method="linkern" )
tour.labels <- t(outer(letters,letters,paste,sep=''))[seq_len(length(tour))]
png( file=file.path(outdir,"plot-order.png"), width=4*144, height=4*144, pointsize=10, res=144 )
player("tour")
segments(x0=xy[tour,1],x1=xy[c(tour[-1],tour[1]),1],
y0=xy[tour,2],y1=xy[c(tour[-1],tour[1]),2])
dev.off()
# First plot self-comparisons
png( file=file.path(outdir,"self-comparisons.png"), width=12*144, height=4*144, pointsize=10, res=144 )
layout(t(1:3))
opar <- par(mar=c(1,1,2,1))
usethese <- ( dist2$etort1 == dist2$etort2 )
thiscolors <- pc.cols[ match(dist2$etort1,tort.ids) ]
x <- dist2[usethese,3]
player("self-similarities")
points(coords,pch=20,col=pc.cols,cex=3/(1+exp((x-min(x))/sd(x))))
player("self-distances")
points(coords,pch=20,col=pc.cols,cex=(x-min(x))/(2*sd(x)))
par(opar)
plot( dist1[,3], dist2[,3], pch=20, cex=.5,
col=adjustcolor("black",.25), xlab=dist1.file, ylab=dist2.file )
dev.off()
relatives <- ( (dist2$etort1==dist2$etort2) | ( dist2[,3] < quantile(subset(dist2,etort1==etort2)[,3],0.75) ) )
mindist <- min(dist2[!relatives,3])
sddist <- sd(dist2[!relatives,3])
sfn <- function (x,max.cex=7) {
max.cex/( 1 + exp( (x-mindist)/sddist ) )
}
dfn <- function (x) {
(x-mindist)/(2*sddist)
}
for (k in seq_along(tort.ids)) {
tid <- tort.ids[tour[k]]
cat(tid,"\n")
png( file=file.path(outdir,paste(tour.labels[k], "_",gsub("[^0-9a-z-]","_",tid),".png",sep='')), width=12*144, height=4*144, pointsize=10, res=144 )
layout(t(1:3))
opar <- par(mar=c(1,1,2,1))
usethese <- ( dist2$etort1 != dist2$etort2 ) & ( ( dist2$etort1 == tid ) | ( dist2$etort2 == tid ) )
otherone <- ifelse( dist2$etort1[usethese] == tid, dist2$etort2[usethese], dist2$etort1[usethese] )
thiscolors <- pc.cols[ match(otherone,tort.ids) ]
player(paste(tid," similarities"))
points(coords[match(otherone,tort.ids)],pch=20,cex=sfn(dist2[,3][usethese]),col=thiscolors)
points(coords[match(tid,tort.ids)],pch="*",cex=2)
player(paste(tid," distances"))
points(coords[match(otherone,tort.ids)],pch=20,cex=dfn(dist2[,3][usethese]),col=thiscolors)
points(coords[match(tid,tort.ids)],pch="*",cex=2)
par(opar)
plot( dist1[,3], dist2[,3], pch=20, cex=.5,
col=adjustcolor("black",.25), xlab=dist1.file, ylab=dist2.file )
points( dist1[,3][usethese], dist2[,3][usethese], pch=20, col=thiscolors, cex=1.5 )
dev.off()
}
|
/visualization/distance-maps.R
|
no_license
|
petrelharp/tortoisescape
|
R
| false | false | 3,839 |
r
|
#!/usr/bin/Rscript
library(methods) # required for get( ) below... ???
usage <- "Makes many images for a single data frame of pairwise comparisons: one for each thing being compared.
Usage:
Rscript distance-maps.R (geog distance file) (genetic distance file) (sample info directory) (output directory)
"
argvec <- if (interactive()) { scan(what="char") } else { commandArgs(TRUE) }
if (length(argvec)<4) { stop(usage) }
dist1.file <- argvec[1]
dist2.file <- argvec[2]
indir <- argvec[3]
outdir <- argvec[4]
dist1 <- read.csv(dist1.file,header=TRUE,stringsAsFactors=FALSE)
dist2 <- read.csv(dist2.file,header=TRUE,stringsAsFactors=FALSE)
dir.create(outdir,showWarnings=FALSE)
# locations
coord.obj <- load(file.path(indir,"geog_coords.RData"))
coords <- get(coord.obj)
tort.ids <- row.names(coords)
# read in other info
pcs <- read.csv(file.path(indir,"pcs.csv"),header=TRUE,stringsAsFactors=FALSE)
stopifnot( all( tort.ids %in% pcs$etort ) )
pc.cols <- adjustcolor( ifelse( pcs$PC1[match(tort.ids,pcs$etort)] > 0, "blue", "purple" ), .75 )
require(raster)
layer <- raster("../visualization/dem_30")
player <- function (main='') { plot(layer,legend=FALSE,xlab="",ylab="",xaxt="n",yaxt="n",legend.mar=0,box=FALSE,main=main) }
##
# find a good ordering
require(TSP)
xy <- coordinates(coords)
etsp <- ETSP( xy, labels=rownames(xy) )
tour <- solve_TSP( etsp, method="linkern" )
tour.labels <- t(outer(letters,letters,paste,sep=''))[seq_len(length(tour))]
png( file=file.path(outdir,"plot-order.png"), width=4*144, height=4*144, pointsize=10, res=144 )
player("tour")
segments(x0=xy[tour,1],x1=xy[c(tour[-1],tour[1]),1],
y0=xy[tour,2],y1=xy[c(tour[-1],tour[1]),2])
dev.off()
# First plot self-comparisons
png( file=file.path(outdir,"self-comparisons.png"), width=12*144, height=4*144, pointsize=10, res=144 )
layout(t(1:3))
opar <- par(mar=c(1,1,2,1))
usethese <- ( dist2$etort1 == dist2$etort2 )
thiscolors <- pc.cols[ match(dist2$etort1,tort.ids) ]
x <- dist2[usethese,3]
player("self-similarities")
points(coords,pch=20,col=pc.cols,cex=3/(1+exp((x-min(x))/sd(x))))
player("self-distances")
points(coords,pch=20,col=pc.cols,cex=(x-min(x))/(2*sd(x)))
par(opar)
plot( dist1[,3], dist2[,3], pch=20, cex=.5,
col=adjustcolor("black",.25), xlab=dist1.file, ylab=dist2.file )
dev.off()
relatives <- ( (dist2$etort1==dist2$etort2) | ( dist2[,3] < quantile(subset(dist2,etort1==etort2)[,3],0.75) ) )
mindist <- min(dist2[!relatives,3])
sddist <- sd(dist2[!relatives,3])
sfn <- function (x,max.cex=7) {
max.cex/( 1 + exp( (x-mindist)/sddist ) )
}
dfn <- function (x) {
(x-mindist)/(2*sddist)
}
for (k in seq_along(tort.ids)) {
tid <- tort.ids[tour[k]]
cat(tid,"\n")
png( file=file.path(outdir,paste(tour.labels[k], "_",gsub("[^0-9a-z-]","_",tid),".png",sep='')), width=12*144, height=4*144, pointsize=10, res=144 )
layout(t(1:3))
opar <- par(mar=c(1,1,2,1))
usethese <- ( dist2$etort1 != dist2$etort2 ) & ( ( dist2$etort1 == tid ) | ( dist2$etort2 == tid ) )
otherone <- ifelse( dist2$etort1[usethese] == tid, dist2$etort2[usethese], dist2$etort1[usethese] )
thiscolors <- pc.cols[ match(otherone,tort.ids) ]
player(paste(tid," similarities"))
points(coords[match(otherone,tort.ids)],pch=20,cex=sfn(dist2[,3][usethese]),col=thiscolors)
points(coords[match(tid,tort.ids)],pch="*",cex=2)
player(paste(tid," distances"))
points(coords[match(otherone,tort.ids)],pch=20,cex=dfn(dist2[,3][usethese]),col=thiscolors)
points(coords[match(tid,tort.ids)],pch="*",cex=2)
par(opar)
plot( dist1[,3], dist2[,3], pch=20, cex=.5,
col=adjustcolor("black",.25), xlab=dist1.file, ylab=dist2.file )
points( dist1[,3][usethese], dist2[,3][usethese], pch=20, col=thiscolors, cex=1.5 )
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalize.R
\name{step_normalize}
\alias{step_normalize}
\title{Center and scale numeric data}
\usage{
step_normalize(
recipe,
...,
role = NA,
trained = FALSE,
means = NULL,
sds = NULL,
na_rm = TRUE,
skip = FALSE,
id = rand_id("normalize")
)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose variables
for this step. See \code{\link[=selections]{selections()}} for more details.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{means}{A named numeric vector of means. This is \code{NULL} until computed
by \code{\link[=prep]{prep()}}.}
\item{sds}{A named numeric vector of standard deviations This is \code{NULL} until
computed by \code{\link[=prep]{prep()}}.}
\item{na_rm}{A logical value indicating whether \code{NA} values should be removed
when computing the standard deviation and mean.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake]{bake()}}? While all operations are baked
when \code{\link[=prep]{prep()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations.}
\item{id}{A character string that is unique to this step to identify it.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of any existing operations.
}
\description{
\code{step_normalize()} creates a \emph{specification} of a recipe step that will
normalize numeric data to have a standard deviation of one and a mean of
zero.
}
\details{
Centering data means that the average of a variable is subtracted
from the data. Scaling data means that the standard deviation of a variable
is divided out of the data. \code{step_normalize} estimates the variable standard
deviations and means from the data used in the \code{training} argument of
\code{prep.recipe}. \code{\link{bake.recipe}} then applies the scaling to new data sets using
these estimates.
}
\section{Tidying}{
When you \code{\link[=tidy.recipe]{tidy()}} this step, a tibble with columns
\code{terms} (the selectors or variables selected), \code{value} (the standard
deviations and means), and \code{statistic} for the type of value is
returned.
}
\section{Case weights}{
This step performs an unsupervised operation that can utilize case weights.
As a result, case weights are only used with frequency weights. For more
information, see the documentation in \link{case_weights} and the examples on
\code{tidymodels.org}.
}
\examples{
\dontshow{if (rlang::is_installed("modeldata")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
data(biomass, package = "modeldata")
biomass_tr <- biomass[biomass$dataset == "Training", ]
biomass_te <- biomass[biomass$dataset == "Testing", ]
rec <- recipe(
HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr
)
norm_trans <- rec \%>\%
step_normalize(carbon, hydrogen)
norm_obj <- prep(norm_trans, training = biomass_tr)
transformed_te <- bake(norm_obj, biomass_te)
biomass_te[1:10, names(transformed_te)]
transformed_te
tidy(norm_trans, number = 1)
tidy(norm_obj, number = 1)
# To keep the original variables in the output, use `step_mutate_at`:
norm_keep_orig <- rec \%>\%
step_mutate_at(all_numeric_predictors(), fn = list(orig = ~.)) \%>\%
step_normalize(-contains("orig"), -all_outcomes())
keep_orig_obj <- prep(norm_keep_orig, training = biomass_tr)
keep_orig_te <- bake(keep_orig_obj, biomass_te)
keep_orig_te
\dontshow{\}) # examplesIf}
}
\seealso{
Other normalization steps:
\code{\link{step_center}()},
\code{\link{step_range}()},
\code{\link{step_scale}()}
}
\concept{normalization steps}
|
/man/step_normalize.Rd
|
permissive
|
tidymodels/recipes
|
R
| false | true | 4,032 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalize.R
\name{step_normalize}
\alias{step_normalize}
\title{Center and scale numeric data}
\usage{
step_normalize(
recipe,
...,
role = NA,
trained = FALSE,
means = NULL,
sds = NULL,
na_rm = TRUE,
skip = FALSE,
id = rand_id("normalize")
)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose variables
for this step. See \code{\link[=selections]{selections()}} for more details.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{means}{A named numeric vector of means. This is \code{NULL} until computed
by \code{\link[=prep]{prep()}}.}
\item{sds}{A named numeric vector of standard deviations This is \code{NULL} until
computed by \code{\link[=prep]{prep()}}.}
\item{na_rm}{A logical value indicating whether \code{NA} values should be removed
when computing the standard deviation and mean.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake]{bake()}}? While all operations are baked
when \code{\link[=prep]{prep()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations.}
\item{id}{A character string that is unique to this step to identify it.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of any existing operations.
}
\description{
\code{step_normalize()} creates a \emph{specification} of a recipe step that will
normalize numeric data to have a standard deviation of one and a mean of
zero.
}
\details{
Centering data means that the average of a variable is subtracted
from the data. Scaling data means that the standard deviation of a variable
is divided out of the data. \code{step_normalize} estimates the variable standard
deviations and means from the data used in the \code{training} argument of
\code{prep.recipe}. \code{\link{bake.recipe}} then applies the scaling to new data sets using
these estimates.
}
\section{Tidying}{
When you \code{\link[=tidy.recipe]{tidy()}} this step, a tibble with columns
\code{terms} (the selectors or variables selected), \code{value} (the standard
deviations and means), and \code{statistic} for the type of value is
returned.
}
\section{Case weights}{
This step performs an unsupervised operation that can utilize case weights.
As a result, case weights are only used with frequency weights. For more
information, see the documentation in \link{case_weights} and the examples on
\code{tidymodels.org}.
}
\examples{
\dontshow{if (rlang::is_installed("modeldata")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
data(biomass, package = "modeldata")
biomass_tr <- biomass[biomass$dataset == "Training", ]
biomass_te <- biomass[biomass$dataset == "Testing", ]
rec <- recipe(
HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr
)
norm_trans <- rec \%>\%
step_normalize(carbon, hydrogen)
norm_obj <- prep(norm_trans, training = biomass_tr)
transformed_te <- bake(norm_obj, biomass_te)
biomass_te[1:10, names(transformed_te)]
transformed_te
tidy(norm_trans, number = 1)
tidy(norm_obj, number = 1)
# To keep the original variables in the output, use `step_mutate_at`:
norm_keep_orig <- rec \%>\%
step_mutate_at(all_numeric_predictors(), fn = list(orig = ~.)) \%>\%
step_normalize(-contains("orig"), -all_outcomes())
keep_orig_obj <- prep(norm_keep_orig, training = biomass_tr)
keep_orig_te <- bake(keep_orig_obj, biomass_te)
keep_orig_te
\dontshow{\}) # examplesIf}
}
\seealso{
Other normalization steps:
\code{\link{step_center}()},
\code{\link{step_range}()},
\code{\link{step_scale}()}
}
\concept{normalization steps}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MSSpectrum.R
\name{MSSpectrum}
\alias{MSSpectrum}
\title{MSSpectrum Interface}
\description{
MSSpectrum Interface
}
\section{Constructor}{
MSSpectrum$new()
}
\section{Methods}{
\code{$getMSLevel()} Get Spectrum Level.
\code{$setMSLevel()} Set Spectrum Level.
\code{$getRT()} Get the retention time.
\code{$setRT()} Set the retention time.
\code{$get_peaks()} Get the peak values(m/z and intensity) as a list containing two lists, for m/z and intensity.
\code{$set_peaks()} Set the peak values(m/z and intensity) as a list containing two lists, for m/z and intensity.
\code{$size()} Get the count of peaks.
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{MSSpectrum$new()}}
\item \href{#method-getMSLevel}{\code{MSSpectrum$getMSLevel()}}
\item \href{#method-setMSLevel}{\code{MSSpectrum$setMSLevel()}}
\item \href{#method-getRT}{\code{MSSpectrum$getRT()}}
\item \href{#method-setRT}{\code{MSSpectrum$setRT()}}
\item \href{#method-get_peaks}{\code{MSSpectrum$get_peaks()}}
\item \href{#method-set_peaks}{\code{MSSpectrum$set_peaks()}}
\item \href{#method-size}{\code{MSSpectrum$size()}}
\item \href{#method-set_py_obj}{\code{MSSpectrum$set_py_obj()}}
\item \href{#method-get_py_obj}{\code{MSSpectrum$get_py_obj()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$new()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getMSLevel"></a>}}
\if{latex}{\out{\hypertarget{method-getMSLevel}{}}}
\subsection{Method \code{getMSLevel()}}{
Get Spectrum Level
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$getMSLevel()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Spectrum Level.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setMSLevel"></a>}}
\if{latex}{\out{\hypertarget{method-setMSLevel}{}}}
\subsection{Method \code{setMSLevel()}}{
Set Spectrum Level
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$setMSLevel(level)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{level}}{level number.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getRT"></a>}}
\if{latex}{\out{\hypertarget{method-getRT}{}}}
\subsection{Method \code{getRT()}}{
Get Retention Time
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$getRT()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
retention time
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setRT"></a>}}
\if{latex}{\out{\hypertarget{method-setRT}{}}}
\subsection{Method \code{setRT()}}{
Set Retention Time
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$setRT(time)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{time}}{RT for the spectrum}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_peaks"></a>}}
\if{latex}{\out{\hypertarget{method-get_peaks}{}}}
\subsection{Method \code{get_peaks()}}{
Get peak list from the Spectrum
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$get_peaks()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
list of two lists, for m/z and intensity, corresponding to peaks.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-set_peaks"></a>}}
\if{latex}{\out{\hypertarget{method-set_peaks}{}}}
\subsection{Method \code{set_peaks()}}{
Set peak list for the Spectrum
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$set_peaks(peaks)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{peaks}}{list with two lists, for m/z and intensity, corresponding to peaks.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-size"></a>}}
\if{latex}{\out{\hypertarget{method-size}{}}}
\subsection{Method \code{size()}}{
Get the count of peaks.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$size()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
count of peaks.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-set_py_obj"></a>}}
\if{latex}{\out{\hypertarget{method-set_py_obj}{}}}
\subsection{Method \code{set_py_obj()}}{
Setter method for wrapped object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$set_py_obj(Py_obj)}\if{html}{\out{</div>}}
}
\subsection{Details}{
Sets the underlying python object of MSSpectrum.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_py_obj"></a>}}
\if{latex}{\out{\hypertarget{method-get_py_obj}{}}}
\subsection{Method \code{get_py_obj()}}{
Getter method for wrapped object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$get_py_obj()}\if{html}{\out{</div>}}
}
\subsection{Details}{
Returns the underlying python object of MSSpectrum.
}
}
}
|
/man/MSSpectrum.Rd
|
no_license
|
24sharkS/theRopenms
|
R
| false | true | 5,157 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MSSpectrum.R
\name{MSSpectrum}
\alias{MSSpectrum}
\title{MSSpectrum Interface}
\description{
MSSpectrum Interface
}
\section{Constructor}{
MSSpectrum$new()
}
\section{Methods}{
\code{$getMSLevel()} Get Spectrum Level.
\code{$setMSLevel()} Set Spectrum Level.
\code{$getRT()} Get the retention time.
\code{$setRT()} Set the retention time.
\code{$get_peaks()} Get the peak values(m/z and intensity) as a list containing two lists, for m/z and intensity.
\code{$set_peaks()} Set the peak values(m/z and intensity) as a list containing two lists, for m/z and intensity.
\code{$size()} Get the count of peaks.
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{MSSpectrum$new()}}
\item \href{#method-getMSLevel}{\code{MSSpectrum$getMSLevel()}}
\item \href{#method-setMSLevel}{\code{MSSpectrum$setMSLevel()}}
\item \href{#method-getRT}{\code{MSSpectrum$getRT()}}
\item \href{#method-setRT}{\code{MSSpectrum$setRT()}}
\item \href{#method-get_peaks}{\code{MSSpectrum$get_peaks()}}
\item \href{#method-set_peaks}{\code{MSSpectrum$set_peaks()}}
\item \href{#method-size}{\code{MSSpectrum$size()}}
\item \href{#method-set_py_obj}{\code{MSSpectrum$set_py_obj()}}
\item \href{#method-get_py_obj}{\code{MSSpectrum$get_py_obj()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$new()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getMSLevel"></a>}}
\if{latex}{\out{\hypertarget{method-getMSLevel}{}}}
\subsection{Method \code{getMSLevel()}}{
Get Spectrum Level
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$getMSLevel()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Spectrum Level.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setMSLevel"></a>}}
\if{latex}{\out{\hypertarget{method-setMSLevel}{}}}
\subsection{Method \code{setMSLevel()}}{
Set Spectrum Level
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$setMSLevel(level)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{level}}{level number.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getRT"></a>}}
\if{latex}{\out{\hypertarget{method-getRT}{}}}
\subsection{Method \code{getRT()}}{
Get Retention Time
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$getRT()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
retention time
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-setRT"></a>}}
\if{latex}{\out{\hypertarget{method-setRT}{}}}
\subsection{Method \code{setRT()}}{
Set Retention Time
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$setRT(time)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{time}}{RT for the spectrum}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_peaks"></a>}}
\if{latex}{\out{\hypertarget{method-get_peaks}{}}}
\subsection{Method \code{get_peaks()}}{
Get peak list from the Spectrum
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$get_peaks()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
list of two lists, for m/z and intensity, corresponding to peaks.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-set_peaks"></a>}}
\if{latex}{\out{\hypertarget{method-set_peaks}{}}}
\subsection{Method \code{set_peaks()}}{
Set peak list for the Spectrum
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$set_peaks(peaks)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{peaks}}{list with two lists, for m/z and intensity, corresponding to peaks.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-size"></a>}}
\if{latex}{\out{\hypertarget{method-size}{}}}
\subsection{Method \code{size()}}{
Get the count of peaks.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$size()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
count of peaks.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-set_py_obj"></a>}}
\if{latex}{\out{\hypertarget{method-set_py_obj}{}}}
\subsection{Method \code{set_py_obj()}}{
Setter method for wrapped object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$set_py_obj(Py_obj)}\if{html}{\out{</div>}}
}
\subsection{Details}{
Sets the underlying python object of MSSpectrum.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_py_obj"></a>}}
\if{latex}{\out{\hypertarget{method-get_py_obj}{}}}
\subsection{Method \code{get_py_obj()}}{
Getter method for wrapped object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{MSSpectrum$get_py_obj()}\if{html}{\out{</div>}}
}
\subsection{Details}{
Returns the underlying python object of MSSpectrum.
}
}
}
|
library(tidyverse)
#this is a test file
#this is the updated test file
#this is another update as part of the tutorial
|
/gittutorial.R
|
no_license
|
peterwiim/gittutorial
|
R
| false | false | 119 |
r
|
library(tidyverse)
#this is a test file
#this is the updated test file
#this is another update as part of the tutorial
|
library(testthat)
library(conformalVelo)
test_check("conformalVelo")
|
/tests/testthat.R
|
permissive
|
linnykos/conformalVelo
|
R
| false | false | 70 |
r
|
library(testthat)
library(conformalVelo)
test_check("conformalVelo")
|
###########################################################
# Matt Regner
# Franco Lab
# Date: May-December 2020
#
# Sample: All
# Description: This script performs the following tasks
# 1) scATAC-seq processing
# 2) scRNA-seq/scATAC-seq integration
# 3) Peak calling
###########################################################
.libPaths('/home/regnerm/anaconda3/envs/scENDO_scOVAR/lib/R/library')
source("./filterDoublets_modified.R")
###############################################################
library(scater)
library(dplyr)
library(Seurat)
library(patchwork)
library(SingleCellExperiment)
library(scater)
library(ComplexHeatmap)
library(ConsensusClusterPlus)
library(msigdbr)
library(fgsea)
library(dplyr)
library(tibble)
library(Signac)
library(ggplot2)
library(stringr)
library(EnsDb.Hsapiens.v86)
library(Seurat)
library(Signac)
library(ggplot2)
library(ensembldb)
library(EnsDb.Hsapiens.v86)
library(ArchR)
library(SingleR)
library(viridis)
set.seed(123)
addArchRThreads(threads = 24)
addArchRGenome("hg38")
# Set up directories and file variables:
##################################################################################################
SAMPLE.ID <- "All"
output.dir <- "."
####################
setwd(output.dir)
####################
inputFiles <- list.files(pattern = "\\.gz$")
sampleNames <- c("3533EL","3571DL","36186L","36639L","366C5L","37EACL","38FE7L","3BAE2L","3CCF1L","3E4D1L","3E5CFL")
encode.all <- read.delim("./GRCh38-ccREs.bed",header =F)
colnames(encode.all)[1:3] <- c("seqnames","start","end")
# Store patient metadata and colors:
# Make patient sample metadata and color assignments
sampleColors <- RColorBrewer::brewer.pal(11,"Paired")
sampleColors[11] <- "#8c8b8b"
pie(rep(1,11), col=sampleColors)
# Color patient tumors to resemble the cancer ribbon color
sampleColors <- c(sampleColors[5],sampleColors[7],sampleColors[6],sampleColors[8],sampleColors[10],sampleColors[9],sampleColors[4],sampleColors[3],sampleColors[2],sampleColors[11],sampleColors[1])
sampleAnnot <- data.frame(Sample = c("3533EL","3571DL","36186L","36639L",
"366C5L","37EACL","38FE7L","3BAE2L","3CCF1L","3E4D1L","3E5CFL"),
Color = sampleColors,
Cancer = c("endometrial","endometrial","endometrial","endometrial","endometrial","endometrial",
"ovarian","ovarian","ovarian","ovarian","ovarian"),
Histology = c("endometrioid","endometrioid","endometrioid","endometrioid","endometrioid",
"serous","endometrioid","serous","carcinosarcoma","GIST","serous"),
BMI = c(39.89,30.5,38.55,55.29,49.44,29.94,34.8,22.13,23.72,33.96,22.37),
Age = c(70,70,70,49,62,74,76,61,69,59,59),
Race = c("AA","CAU","CAU","CAU","CAU","CAU","CAU","CAU","CAU","CAU","AS"),
Stage = c("IA","IA","IA","IA","IA","IIIA","IA","IIB","IVB","IV","IIIC"),
Site = c("Endometrium","Endometrium","Endometrium","Endometrium","Endometrium","Ovary","Ovary","Ovary","Ovary","Ovary","Ovary"),
Type = c("Endometrial","Endometrial","Endometrial","Endometrial","Endometrial","Endometrial","Ovarian","Ovarian","Ovarian","Gastric","Ovarian"))
#
# # Read in matching scRNA-seq
# ###################################################################################################
rna <- readRDS("./endo_ovar_All_scRNA_processed.rds")
rna$cell.type <- str_replace(rna$cell.type,"/","_")
Idents(rna) <- "cell.type"
# Redo differential expression with new cell type markers
Wilcox.markers <- readRDS("./wilcox_DEGs.rds")
Wilcox.markers$cluster <- str_replace(Wilcox.markers$cluster,"/","_")
# Create Arrow and ArchR project
##########################################################################
ArrowFiles <- createArrowFiles(
inputFiles = inputFiles,
sampleNames = sampleNames,
filterTSS = 0, #Dont set this too high because you can always increase later
filterFrags = 0,
addTileMat = T,
addGeneScoreMat = F
)
ArrowFiles <- list.files(pattern=".arrow")
doubScores <- addDoubletScores(
input = ArrowFiles,
k = 10, #Refers to how many cells near a "pseudo-doublet" to count.
knnMethod = "UMAP",useMatrix = "TileMatrix",nTrials=5,LSIMethod = 1,scaleDims = F,
corCutOff = 0.75,UMAPParams = list(n_neighbors =30, min_dist = 0.3, metric = "cosine", verbose =FALSE),
dimsToUse = 1:50
)
proj <- ArchRProject(
ArrowFiles = ArrowFiles,
outputDirectory = "All",
copyArrows = T #This is recommened so that you maintain an unaltered copy for later usage.
)
# Filter out outlier low quality cells and doublets
###############################################################################
# GMM for fragments per cell
library(mclust)
for (i in sampleNames){
proj.i <- proj[proj$Sample == i]
# GMM for fragments per cell
depth.clust <- Mclust(log10(proj.i$nFrags),G = 2)
proj.i$depth.cluster <- depth.clust$classification
proj.i$depth.cluster.uncertainty <- depth.clust$uncertainty
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$depth.cluster),
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," log10(fragments)"))+
ggsave(paste0(i,"_depth.pdf"),width = 4,height = 4)
# GMM for TSS per cell
TSS.clust <- Mclust(log10(proj.i$TSSEnrichment+1),G = 2)
proj.i$TSS.cluster <- TSS.clust$classification
proj.i$TSS.cluster.uncertainty <- TSS.clust$uncertainty
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$TSS.cluster),
discrete = T,
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," TSS Enrichment"))+
ggsave(paste0(i,"_TSS.pdf"),width = 4,height = 4)
df.TSS <- data.frame(proj.i$cellNames,proj.i$TSS.cluster,proj.i$TSS.cluster.uncertainty,proj.i$TSSEnrichment)
df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster == "2")
df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster.uncertainty <= 0.05)
saveRDS(df.TSS,paste0("df_TSS_",i,".rds"))
df.depth <- data.frame(proj.i$cellNames,proj.i$depth.cluster,proj.i$depth.cluster.uncertainty,proj.i$nFrags)
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster == "2")
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster.uncertainty <= 0.05)
saveRDS(df.depth,paste0("df_depth_",i,".rds"))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
colorDensity = T,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = log10(min(df.TSS$proj.i.TSSEnrichment)+1),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("QC thresholds:\n",i))+
ggsave(paste0(i,"_QC.pdf"),width = 4,height = 4)
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = proj.i$DoubletEnrichment,
discrete = F,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = min(log10(df.TSS$proj.i.TSSEnrichment+1)),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("Doublet Enrichment:\n",i))+
ggsave(paste0(i,"_doublets.pdf"),width = 4,height = 4)
}
for (i in sampleNames[2]){
proj.i <- proj[proj$Sample == i]
# GMM for fragments per cell
depth.clust <- Mclust(log10(proj.i$nFrags),G = 2)
proj.i$depth.cluster <- depth.clust$classification
proj.i$depth.cluster.uncertainty <- depth.clust$uncertainty
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$depth.cluster),
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," log10(fragments)"))+
ggsave(paste0(i,"_depth.pdf"),width = 4,height = 4)
# Manually set TSS threshold
#TSS.clust <- Mclust(log10(proj.i$TSSEnrichment+1),G = 2)
proj.i$TSS.cluster <- ifelse(log10(proj.i$TSSEnrichment+1) >= 0.80,"2","1")
proj.i$TSS.cluster.uncertainty <- rep(NA,nrow(proj.i@cellColData))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$TSS.cluster),
discrete = T,
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," TSS Enrichment"))+
ggsave(paste0(i,"_TSS.pdf"),width = 4,height = 4)
df.TSS <- data.frame(proj.i$cellNames,proj.i$TSS.cluster,proj.i$TSS.cluster.uncertainty,proj.i$TSSEnrichment)
df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster == "2")
#df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster.uncertainty <= 0.05)
saveRDS(df.TSS,paste0("df_TSS_",i,".rds"))
df.depth <- data.frame(proj.i$cellNames,proj.i$depth.cluster,proj.i$depth.cluster.uncertainty,proj.i$nFrags)
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster == "2")
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster.uncertainty <= 0.05)
saveRDS(df.depth,paste0("df_depth_",i,".rds"))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
colorDensity = T,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = log10(min(df.TSS$proj.i.TSSEnrichment)+1),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("QC thresholds:\n",i))+
ggsave(paste0(i,"_QC.pdf"),width = 4,height = 4)
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = proj.i$DoubletEnrichment,
discrete = F,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = min(log10(df.TSS$proj.i.TSSEnrichment+1)),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("Doublet Enrichment:\n",i))+
ggsave(paste0(i,"_doublets.pdf"),width = 4,height = 4)
}
for (i in sampleNames[7]){
proj.i <- proj[proj$Sample == i]
# GMM for fragments per cell
depth.clust <- Mclust(log10(proj.i$nFrags),G = 2)
proj.i$depth.cluster <- depth.clust$classification
proj.i$depth.cluster.uncertainty <- depth.clust$uncertainty
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$depth.cluster),
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," log10(fragments)"))+
ggsave(paste0(i,"_depth.pdf"),width = 4,height = 4)
# Manually set TSS threshold
#TSS.clust <- Mclust(log10(proj.i$TSSEnrichment+1),G = 2)
proj.i$TSS.cluster <- ifelse(log10(proj.i$TSSEnrichment+1) >= 0.80,"2","1")
proj.i$TSS.cluster.uncertainty <- rep(NA,nrow(proj.i@cellColData))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$TSS.cluster),
discrete = T,
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," TSS Enrichment"))+
ggsave(paste0(i,"_TSS.pdf"),width = 4,height = 4)
df.TSS <- data.frame(proj.i$cellNames,proj.i$TSS.cluster,proj.i$TSS.cluster.uncertainty,proj.i$TSSEnrichment)
df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster == "2")
#df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster.uncertainty <= 0.05)
saveRDS(df.TSS,paste0("df_TSS_",i,".rds"))
df.depth <- data.frame(proj.i$cellNames,proj.i$depth.cluster,proj.i$depth.cluster.uncertainty,proj.i$nFrags)
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster == "2")
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster.uncertainty <= 0.05)
saveRDS(df.depth,paste0("df_depth_",i,".rds"))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
colorDensity = T,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = log10(min(df.TSS$proj.i.TSSEnrichment)+1),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("QC thresholds:\n",i))+
ggsave(paste0(i,"_QC.pdf"),width = 4,height = 4)
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = proj.i$DoubletEnrichment,
discrete = F,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = min(log10(df.TSS$proj.i.TSSEnrichment+1)),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("Doublet Enrichment:\n",i))+
ggsave(paste0(i,"_doublets.pdf"),width = 4,height = 4)
}
###############################################################################
dev.off()
# Filter out low quality cells, and remove doublets
##############################################################################
list.depth <- list.files(pattern = "^df_depth")
df.depth <- data.frame(cellNames=character(),
cluster=character(),
cluster.uncertainty=character(),
nFrags = character())
for (i in list.depth){
df <- readRDS(i)
colnames(df) <- c("cellNames","cluster","cluster.uncertainty","nFrags")
df.depth <- rbind(df.depth,df)
}
list.TSS <- list.files(pattern = "^df_TSS")
df.TSS <- data.frame(cellNames=character(),
cluster=character(),
cluster.uncertainty=character(),
TSSEnrichment = character())
for (i in list.TSS){
df <- readRDS(i)
colnames(df) <- c("cellNames","cluster","cluster.uncertainty","TSSEnrichment")
df.TSS <- rbind(df.TSS,df)
}
colnames(df.TSS) <- c("cellNames","TSS.cluster","TSS.cluster.uncertainty","TSSEnrichment")
colnames(df.depth) <- c("cellNames","depth.cluster","depth.cluster.uncertainty","nFrags")
cellsPass <- intersect(df.TSS$cellNames,df.depth$cellNames)
cellsFail <- proj$cellNames[!(proj$cellNames %in% cellsPass)]
# Screen for high quality barcodes (remove non cellular barcodes)
proj.filter <- proj[proj$cellNames %in% cellsPass]
proj <- filterDoublets(proj.filter,filterRatio = 1,cutEnrich = 1,cutScore = -Inf)
plotFragmentSizes(proj)+ggtitle("Fragment Size Histogram")+ggsave("Frags_hist.pdf",width = 6,height = 4)
plotTSSEnrichment(proj)+ggtitle("TSS Enrichment")+ggsave("TSS.pdf",width = 6,height = 4)
###############################################################################################################
# Perform LSI reduction and clustering with ATAC data only
#######################################################################
# Add LSI dimreduc
proj <- addIterativeLSI(
ArchRProj = proj,
useMatrix = "TileMatrix",
name = "IterativeLSI",
iterations = 4,
LSIMethod = 2,
scaleDims = T,
clusterParams = list( #See Seurat::FindClusters
resolution = c(0.2),
sampleCells = 10000,
n.start = 10
),
UMAPParams = list(n_neighbors =30,
min_dist = 0.3,
metric = "cosine",
verbose =FALSE),
varFeatures = 25000,
dimsToUse = 1:50,
binarize = T,
corCutOff = 0.75,
force = T,
seed=6
)
proj <- addClusters(
input = proj,
reducedDims = "IterativeLSI",
method = "Seurat",
name = "ATAC_clusters",
resolution = 0.7,
dimsToUse = 1:50,force = T
)
# Add UMAP based on LSI dims
proj <- addUMAP(proj,nNeighbors = 30,minDist = 0.3,dimsToUse = 1:50,metric = "cosine",force = T,reducedDims="IterativeLSI")
saveRDS(proj,"proj_LSI_AND_UMAP.rds")
###################################################################################################
# Estimate gene activity in ATAC data and perform cell type annotation:
# Add Gene activity matrix using ArchR model
proj <- addGeneScoreMatrix(proj,matrixName = "ArchRGeneScore",force = T)
getAvailableMatrices(proj)
saveRDS(proj,"proj_LSI_AND_GeneScores.rds")
# Constrained Integration to only align cells from the same patient tumor
groupList <- SimpleList()
for (i in levels(factor(proj$Sample))){
rna.sub <- rna[,rna$Sample == i]
RNA.cells <- colnames(rna.sub)
idxSample <- BiocGenerics::which(proj$Sample == i)
cellsSample <- proj$cellNames[idxSample]
proj.filter <- proj[cellsSample, ]
ATAC.cells <- proj.filter$cellNames
groupList[[i]] <- SimpleList(
ATAC = ATAC.cells,
RNA = RNA.cells
)
}
# ###########################################################################################
proj <- addGeneIntegrationMatrix(
ArchRProj = proj,
useMatrix = "ArchRGeneScore",
matrixName = "GeneIntegrationMatrix_ArchR",
reducedDims = "IterativeLSI",
seRNA = rna,
groupList = groupList,
addToArrow = T,
force= TRUE,
groupRNA = "cell.type",
nameCell = "predictedCell_ArchR",
nameGroup = "predictedGroup_ArchR",
nameScore = "predictedScore_ArchR",
plotUMAP = F,
useImputation = F,
transferParams = list(dims = 1:50)
)
getAvailableMatrices(proj)
saveRDS(proj,"proj_LSI_GeneScores_Annotations_Int.rds")
############################################################################################
# Begin Downstream Analysis
# 1) Plotting RNA/ATAC by sample, by cluster, by predicted label
# 2) Marker Gene (RNA/ATAC) intersection
# 3) Peak2GeneLinks/Coaccessiblity
######################################################
# PART 1: Plotting
######################################################################################
# Make embedding highlighting by 1) Predicted group ArchR 2) Predicted group Signac
# 3) Sample 4) ATAC-only clusters
atac.archr <- plotEmbedding(proj,colorBy = "cellColData",name = "predictedGroup_ArchR")
atac.archr.emb <- as.data.frame(atac.archr$data)
atac.archr.emb$cell.type.archr <- atac.archr.emb$color
atac.archr.emb$cell.type.archr <- sub("-", ":", atac.archr.emb$cell.type.archr)
atac.archr.emb$cell.type.archr <- gsub(".*:", "", atac.archr.emb$cell.type.archr)
head(atac.archr.emb)
atac.archr.emb$cell.type.archr <- factor(atac.archr.emb$cell.type.archr, levels = levels(as.factor(rna$cell.type)))
head(atac.archr.emb)
atac <- plotEmbedding(proj,colorBy = "cellColData",name = "Sample")
atac.emb.sample <- as.data.frame(atac$data)
atac.emb.sample$sample <- atac.emb.sample$color
atac.emb.sample$sample <- sub("-", ":", atac.emb.sample$sample )
atac.emb.sample$sample <- gsub(".*:", "", atac.emb.sample$sample )
head(atac.emb.sample)
head(atac.emb.sample)
atac <- plotEmbedding(proj,colorBy = "cellColData",name = "ATAC_clusters")
atac.emb.cluster <- as.data.frame(atac$data)
atac.emb.cluster$sample <- atac.emb.cluster$color
atac.emb.cluster$sample <- sub("-", ":", atac.emb.cluster$sample )
atac.emb.cluster$sample <- gsub(".*:", "", atac.emb.cluster$sample )
head(atac.emb.cluster)
atac.emb.all <- cbind(atac.archr.emb[,c(1:2,4)],
atac.emb.sample[,4],
atac.emb.cluster[,4])
atac.emb.all$plain <- "Plain"
colnames(atac.emb.all) <- c("UMAP1","UMAP2","Predicted.Group.ArchR",
"Sample","ATAC_clusters","Blank")
head(atac.emb.all)
ggplot(atac.emb.all,aes_string(x = "UMAP1",y="UMAP2",color = "Predicted.Group.ArchR"))+
geom_point(size = .1)+
theme_classic()+
ggtitle("scATAC-seq: Predicted Group ArchR")+
theme(plot.title = element_text(face = "bold"))+
xlab("UMAP_1")+
ylab("UMAP_2")+
theme(legend.key.size = unit(0.2, "cm"))+
guides(colour = guide_legend(override.aes = list(size=3)))+
ggsave(paste0("PredictedGroup_ArchR_ATAC.pdf"),width = 8,height = 6)
ggplot(atac.emb.all,aes_string(x = "UMAP1",y="UMAP2",color = "Sample"))+
geom_point(size = .1)+
theme_classic()+
ggtitle(paste0("scATAC-seq: Sample"))+
theme(plot.title = element_text(face = "bold"))+
xlab("UMAP_1")+
ylab("UMAP_2")+
theme(legend.key.size = unit(0.2, "cm"))+
scale_color_manual(values = sampleColors)
#guides(colour = guide_legend(override.aes = list(size=3)))+
ggsave(paste0("Sample_ATAC.pdf"),width = 8,height = 6)
prediction.scores <- data.frame(ArchR= proj$predictedScore_ArchR)
var.list <- colnames(prediction.scores)
for (i in 1:length(var.list)){
ggplot(prediction.scores,aes_string(x = var.list[i]))+
geom_histogram(binwidth = 0.025,fill="#000000", color="#e9ecef", alpha=0.9)+
theme_classic()+
ggsave(paste0(var.list[i],"_ATAC.pdf"),width = 8,height = 6)
}
# Plot matching scRNA-seq plots:
#####################################################################################
rna.emb <- as.data.frame(rna@reductions$umap@cell.embeddings)
rna.emb$cell.type <- as.factor(rna$cell.type)
rna.emb$sample <- rna$Sample
rna.emb$cell.type <- factor(rna.emb$cell.type,levels = levels(atac.emb.all$Predicted.Group.ArchR))
rna.cell.plot <- ggplot(rna.emb,aes(x = UMAP_1,y=UMAP_2,color = cell.type))+
geom_point(size = .1)+
theme_classic()+
ggtitle("scRNAseq")+
theme(plot.title = element_text(face = "bold"))+
xlab("UMAP_1")+
ylab("UMAP_2")+
theme(legend.key.size = unit(0.2, "cm"))+
guides(colour = guide_legend(override.aes = list(size=3)))
rna.cell.plot +ggsave("RNA_All_labels.pdf",width = 8,height = 6)
rna.emb$sample <- factor(rna.emb$sample,levels = levels(factor(atac.emb.all$Sample)))
rna.sample.plot <-ggplot(rna.emb,aes(x = UMAP_1,y=UMAP_2,color = sample))+
geom_point(size = .1)+
theme_classic()+
ggtitle("scRNAseq")+
theme(plot.title = element_text(face = "bold"))+
xlab("UMAP_1")+
ylab("UMAP_2")+
theme(legend.key.size = unit(0.2, "cm"))+
scale_color_manual(values = sampleColors)+
guides(colour = guide_legend(override.aes = list(size=3)))+ggsave("RNA_All_sample.pdf")
rna.sample.plot +ggsave("RNA_All_samples.pdf",width = 8,height = 6)
#########################################################################################################
# # PART 2: Marker gene ovarlap RNA/ATAC
#
#
# # Differential pseudo-gene activity analysis:
# ############################
#
######################################################################################
#ArchR
########################################################################################
# DEGs using ATAC labels
markersGS.archr <- getMarkerFeatures(
ArchRProj = proj,
useMatrix = "ArchRGeneScore",
groupBy = "ATAC_clusters",
bias = c("TSSEnrichment", "log10(nFrags)"),
testMethod = "wilcoxon"
)
heatmapGS.archr <- markerHeatmap(
seMarker = markersGS.archr,
cutOff = "FDR <= 0.01 & Log2FC >= 1.25",
labelMarkers =NULL,
transpose = F,
pal = viridis(n=256),
limits = c(-2,2)
)
ComplexHeatmap::draw(heatmapGS.archr, heatmap_legend_side = "bot", annotation_legend_side = "bot")
plotPDF(heatmapGS.archr , name = "GeneScores-Marker-Heatmap_ArchR", width = 8, height = 6, ArchRProj = proj, addDOC = FALSE)
#
#
#
# DEGs using predicted labels (removing small groups)
idxSample <- BiocGenerics::which(proj$predictedScore_ArchR > 0.5)
cellsSample <- proj$cellNames[idxSample]
proj.filter <- proj[cellsSample, ]
popular.groups <- summary(factor(proj.filter$predictedGroup_ArchR))
popular.groups <- popular.groups[popular.groups > 10]
proj.filter$Mode.Label <- ifelse(proj.filter$predictedGroup_ArchR %in% names(popular.groups),TRUE,FALSE)
idxSample <- BiocGenerics::which(proj.filter$Mode.Label == TRUE)
cellsSample <- proj.filter$cellNames[idxSample]
proj.filter <- proj.filter[cellsSample, ]
# DEGs using predicted labels
markersGS.archr.pred <- getMarkerFeatures(
ArchRProj = proj.filter,
useMatrix = "ArchRGeneScore",
groupBy = "predictedGroup_ArchR",
bias = c("TSSEnrichment", "log10(nFrags)"),
testMethod = "wilcoxon"
)
heatmapGS.archr.pred <- markerHeatmap(
seMarker = markersGS.archr.pred,
cutOff = "FDR <= 0.01 & Log2FC >= 1.25",
labelMarkers =NULL,
transpose = F,
pal = viridis(n=256),
limits = c(-2,2)
)
ComplexHeatmap::draw(heatmapGS.archr.pred, heatmap_legend_side = "bot", annotation_legend_side = "bot")
plotPDF(heatmapGS.archr.pred, name = "GeneScores-Marker-Heatmap_ArchR_pred", width = 8, height = 6, ArchRProj = proj.filter, addDOC = FALSE)
# Differential peak analysis:
############################
# ATAC clusters
proj <- addGroupCoverages(ArchRProj = proj,groupBy = "ATAC_clusters",force = T)
pathToMacs2 <- findMacs2()
proj <- addReproduciblePeakSet(
ArchRProj = proj,
groupBy = "ATAC_clusters",
pathToMacs2 = pathToMacs2,force = T
)
proj <- addPeakMatrix(proj,force = T)
proj <- addBgdPeaks(proj)
markersPeaks <- getMarkerFeatures(
ArchRProj = proj,
useMatrix = "PeakMatrix",
groupBy = "ATAC_clusters",
bias = c("TSSEnrichment", "log10(nFrags)"),
testMethod = "wilcoxon"
)
heatmapPeaks<- markerHeatmap(
seMarker = markersPeaks,
cutOff = "FDR <= 0.01 & Log2FC >= 1.25",
labelMarkers =NULL,
transpose = F,
pal = viridis(n=256),
limits = c(-2,2)
)
ComplexHeatmap::draw(heatmapPeaks, heatmap_legend_side = "bot", annotation_legend_side = "bot")
plotPDF(heatmapPeaks, name = "Markers_peaks_ATAC_clusters", width = 8, height = 6, ArchRProj = proj, addDOC = FALSE)
# ArchR predicted labels
# DEGs using predicted labels (removing small groups)
idxSample <- BiocGenerics::which(proj$predictedScore_ArchR >= 0.5)
cellsSample <- proj$cellNames[idxSample]
proj.filter <- proj[cellsSample, ]
popular.groups <- summary(factor(proj.filter$predictedGroup_ArchR))
popular.groups <- popular.groups[popular.groups > 10]
proj.filter$Mode.Label <- ifelse(proj.filter$predictedGroup_ArchR %in% names(popular.groups),TRUE,FALSE)
idxSample <- BiocGenerics::which(proj.filter$Mode.Label == TRUE)
cellsSample <- proj.filter$cellNames[idxSample]
proj.filter <- proj.filter[cellsSample, ]
proj.archr <- addGroupCoverages(ArchRProj = proj.filter,groupBy = "predictedGroup_ArchR",force = T)
pathToMacs2 <- findMacs2()
proj.archr <- addReproduciblePeakSet(
ArchRProj = proj.archr,
groupBy = "predictedGroup_ArchR",
pathToMacs2 = pathToMacs2,force = T
)
proj.archr <- addPeakMatrix(proj.archr,force = T)
proj.archr <- addBgdPeaks(proj.archr,force = T)
markersPeaks.archr <- getMarkerFeatures(
ArchRProj = proj.archr,
useMatrix = "PeakMatrix",
groupBy = "predictedGroup_ArchR",
bias = c("TSSEnrichment", "log10(nFrags)"),
testMethod = "wilcoxon"
)
heatmapPeaks.archr<- markerHeatmap(
seMarker = markersPeaks.archr,
cutOff = "FDR <= 0.01 & Log2FC >= 1.25",
labelMarkers =NULL,
transpose = F,
pal = viridis(n=256),
limits = c(-2,2)
)
ComplexHeatmap::draw(heatmapPeaks.archr, heatmap_legend_side = "bot", annotation_legend_side = "bot")
plotPDF(heatmapPeaks.archr , name = "Markers_peaks_Archr_Predicted_labels", width = 8, height = 6, ArchRProj = proj.archr, addDOC = FALSE)
saveRDS(proj.archr,"./final_archr_proj_archrGS.rds")
# # RNA heatmap
####################################################################################
topN <-Wilcox.markers %>% group_by(cluster) %>% dplyr::filter(p_val_adj <= 0.01) %>% top_n(30, desc(avg_logFC))
# Downsample cells from each cluster
rna.sub <- subset(rna,downsample =300)
rna.sub <- NormalizeData(rna.sub)
rna.sub <- rna.sub[rownames(rna.sub) %in% topN$gene,]
rna.sub <- ScaleData(rna.sub,features = rownames(rna.sub))
mat <- rna.sub@assays$RNA@scale.data
cluster_anno<- rna.sub@meta.data$cell.type
col_fun = circlize::colorRamp2(c(-2, 0, 2),viridis(n = 3))
heatmapRNA <- Heatmap(mat, name = "Expression",
column_split = factor(cluster_anno),
cluster_columns =T,
show_column_dend = F,
cluster_column_slices = T,
column_title_gp = gpar(fontsize = 8),
column_gap = unit(0.1, "mm"),
cluster_rows = T,
show_row_dend = FALSE,
col = col_fun,
column_title_rot = 90,
show_column_names = F)
plotPDF(heatmapRNA, name = "Heatmap_RNA", width = 8, height = 6)
##############
# END OF SCRIPT
##############
|
/scATAC-seq Processing Scripts/Full_Cohort/Patients1-11_scATAC-seq.R
|
no_license
|
RegnerM2015/scENDO_scOVAR_2020
|
R
| false | false | 28,358 |
r
|
###########################################################
# Matt Regner
# Franco Lab
# Date: May-December 2020
#
# Sample: All
# Description: This script performs the following tasks
# 1) scATAC-seq processing
# 2) scRNA-seq/scATAC-seq integration
# 3) Peak calling
###########################################################
.libPaths('/home/regnerm/anaconda3/envs/scENDO_scOVAR/lib/R/library')
source("./filterDoublets_modified.R")
###############################################################
library(scater)
library(dplyr)
library(Seurat)
library(patchwork)
library(SingleCellExperiment)
library(scater)
library(ComplexHeatmap)
library(ConsensusClusterPlus)
library(msigdbr)
library(fgsea)
library(dplyr)
library(tibble)
library(Signac)
library(ggplot2)
library(stringr)
library(EnsDb.Hsapiens.v86)
library(Seurat)
library(Signac)
library(ggplot2)
library(ensembldb)
library(EnsDb.Hsapiens.v86)
library(ArchR)
library(SingleR)
library(viridis)
set.seed(123)
addArchRThreads(threads = 24)
addArchRGenome("hg38")
# Set up directories and file variables:
##################################################################################################
SAMPLE.ID <- "All"
output.dir <- "."
####################
setwd(output.dir)
####################
inputFiles <- list.files(pattern = "\\.gz$")
sampleNames <- c("3533EL","3571DL","36186L","36639L","366C5L","37EACL","38FE7L","3BAE2L","3CCF1L","3E4D1L","3E5CFL")
encode.all <- read.delim("./GRCh38-ccREs.bed",header =F)
colnames(encode.all)[1:3] <- c("seqnames","start","end")
# Store patient metadata and colors:
# Make patient sample metadata and color assignments
sampleColors <- RColorBrewer::brewer.pal(11,"Paired")
sampleColors[11] <- "#8c8b8b"
pie(rep(1,11), col=sampleColors)
# Color patient tumors to resemble the cancer ribbon color
sampleColors <- c(sampleColors[5],sampleColors[7],sampleColors[6],sampleColors[8],sampleColors[10],sampleColors[9],sampleColors[4],sampleColors[3],sampleColors[2],sampleColors[11],sampleColors[1])
sampleAnnot <- data.frame(Sample = c("3533EL","3571DL","36186L","36639L",
"366C5L","37EACL","38FE7L","3BAE2L","3CCF1L","3E4D1L","3E5CFL"),
Color = sampleColors,
Cancer = c("endometrial","endometrial","endometrial","endometrial","endometrial","endometrial",
"ovarian","ovarian","ovarian","ovarian","ovarian"),
Histology = c("endometrioid","endometrioid","endometrioid","endometrioid","endometrioid",
"serous","endometrioid","serous","carcinosarcoma","GIST","serous"),
BMI = c(39.89,30.5,38.55,55.29,49.44,29.94,34.8,22.13,23.72,33.96,22.37),
Age = c(70,70,70,49,62,74,76,61,69,59,59),
Race = c("AA","CAU","CAU","CAU","CAU","CAU","CAU","CAU","CAU","CAU","AS"),
Stage = c("IA","IA","IA","IA","IA","IIIA","IA","IIB","IVB","IV","IIIC"),
Site = c("Endometrium","Endometrium","Endometrium","Endometrium","Endometrium","Ovary","Ovary","Ovary","Ovary","Ovary","Ovary"),
Type = c("Endometrial","Endometrial","Endometrial","Endometrial","Endometrial","Endometrial","Ovarian","Ovarian","Ovarian","Gastric","Ovarian"))
#
# # Read in matching scRNA-seq
# ###################################################################################################
rna <- readRDS("./endo_ovar_All_scRNA_processed.rds")
rna$cell.type <- str_replace(rna$cell.type,"/","_")
Idents(rna) <- "cell.type"
# Redo differential expression with new cell type markers
Wilcox.markers <- readRDS("./wilcox_DEGs.rds")
Wilcox.markers$cluster <- str_replace(Wilcox.markers$cluster,"/","_")
# Create Arrow and ArchR project
##########################################################################
ArrowFiles <- createArrowFiles(
inputFiles = inputFiles,
sampleNames = sampleNames,
filterTSS = 0, #Dont set this too high because you can always increase later
filterFrags = 0,
addTileMat = T,
addGeneScoreMat = F
)
ArrowFiles <- list.files(pattern=".arrow")
doubScores <- addDoubletScores(
input = ArrowFiles,
k = 10, #Refers to how many cells near a "pseudo-doublet" to count.
knnMethod = "UMAP",useMatrix = "TileMatrix",nTrials=5,LSIMethod = 1,scaleDims = F,
corCutOff = 0.75,UMAPParams = list(n_neighbors =30, min_dist = 0.3, metric = "cosine", verbose =FALSE),
dimsToUse = 1:50
)
proj <- ArchRProject(
ArrowFiles = ArrowFiles,
outputDirectory = "All",
copyArrows = T #This is recommened so that you maintain an unaltered copy for later usage.
)
# Filter out outlier low quality cells and doublets
###############################################################################
# GMM for fragments per cell
library(mclust)
for (i in sampleNames){
proj.i <- proj[proj$Sample == i]
# GMM for fragments per cell
depth.clust <- Mclust(log10(proj.i$nFrags),G = 2)
proj.i$depth.cluster <- depth.clust$classification
proj.i$depth.cluster.uncertainty <- depth.clust$uncertainty
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$depth.cluster),
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," log10(fragments)"))+
ggsave(paste0(i,"_depth.pdf"),width = 4,height = 4)
# GMM for TSS per cell
TSS.clust <- Mclust(log10(proj.i$TSSEnrichment+1),G = 2)
proj.i$TSS.cluster <- TSS.clust$classification
proj.i$TSS.cluster.uncertainty <- TSS.clust$uncertainty
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$TSS.cluster),
discrete = T,
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," TSS Enrichment"))+
ggsave(paste0(i,"_TSS.pdf"),width = 4,height = 4)
df.TSS <- data.frame(proj.i$cellNames,proj.i$TSS.cluster,proj.i$TSS.cluster.uncertainty,proj.i$TSSEnrichment)
df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster == "2")
df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster.uncertainty <= 0.05)
saveRDS(df.TSS,paste0("df_TSS_",i,".rds"))
df.depth <- data.frame(proj.i$cellNames,proj.i$depth.cluster,proj.i$depth.cluster.uncertainty,proj.i$nFrags)
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster == "2")
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster.uncertainty <= 0.05)
saveRDS(df.depth,paste0("df_depth_",i,".rds"))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
colorDensity = T,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = log10(min(df.TSS$proj.i.TSSEnrichment)+1),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("QC thresholds:\n",i))+
ggsave(paste0(i,"_QC.pdf"),width = 4,height = 4)
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = proj.i$DoubletEnrichment,
discrete = F,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = min(log10(df.TSS$proj.i.TSSEnrichment+1)),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("Doublet Enrichment:\n",i))+
ggsave(paste0(i,"_doublets.pdf"),width = 4,height = 4)
}
for (i in sampleNames[2]){
proj.i <- proj[proj$Sample == i]
# GMM for fragments per cell
depth.clust <- Mclust(log10(proj.i$nFrags),G = 2)
proj.i$depth.cluster <- depth.clust$classification
proj.i$depth.cluster.uncertainty <- depth.clust$uncertainty
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$depth.cluster),
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," log10(fragments)"))+
ggsave(paste0(i,"_depth.pdf"),width = 4,height = 4)
# Manually set TSS threshold
#TSS.clust <- Mclust(log10(proj.i$TSSEnrichment+1),G = 2)
proj.i$TSS.cluster <- ifelse(log10(proj.i$TSSEnrichment+1) >= 0.80,"2","1")
proj.i$TSS.cluster.uncertainty <- rep(NA,nrow(proj.i@cellColData))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$TSS.cluster),
discrete = T,
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," TSS Enrichment"))+
ggsave(paste0(i,"_TSS.pdf"),width = 4,height = 4)
df.TSS <- data.frame(proj.i$cellNames,proj.i$TSS.cluster,proj.i$TSS.cluster.uncertainty,proj.i$TSSEnrichment)
df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster == "2")
#df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster.uncertainty <= 0.05)
saveRDS(df.TSS,paste0("df_TSS_",i,".rds"))
df.depth <- data.frame(proj.i$cellNames,proj.i$depth.cluster,proj.i$depth.cluster.uncertainty,proj.i$nFrags)
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster == "2")
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster.uncertainty <= 0.05)
saveRDS(df.depth,paste0("df_depth_",i,".rds"))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
colorDensity = T,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = log10(min(df.TSS$proj.i.TSSEnrichment)+1),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("QC thresholds:\n",i))+
ggsave(paste0(i,"_QC.pdf"),width = 4,height = 4)
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = proj.i$DoubletEnrichment,
discrete = F,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = min(log10(df.TSS$proj.i.TSSEnrichment+1)),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("Doublet Enrichment:\n",i))+
ggsave(paste0(i,"_doublets.pdf"),width = 4,height = 4)
}
for (i in sampleNames[7]){
proj.i <- proj[proj$Sample == i]
# GMM for fragments per cell
depth.clust <- Mclust(log10(proj.i$nFrags),G = 2)
proj.i$depth.cluster <- depth.clust$classification
proj.i$depth.cluster.uncertainty <- depth.clust$uncertainty
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$depth.cluster),
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," log10(fragments)"))+
ggsave(paste0(i,"_depth.pdf"),width = 4,height = 4)
# Manually set TSS threshold
#TSS.clust <- Mclust(log10(proj.i$TSSEnrichment+1),G = 2)
proj.i$TSS.cluster <- ifelse(log10(proj.i$TSSEnrichment+1) >= 0.80,"2","1")
proj.i$TSS.cluster.uncertainty <- rep(NA,nrow(proj.i@cellColData))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = as.character(proj.i$TSS.cluster),
discrete = T,
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) + ggtitle(paste0("GMM classification:\n",i," TSS Enrichment"))+
ggsave(paste0(i,"_TSS.pdf"),width = 4,height = 4)
df.TSS <- data.frame(proj.i$cellNames,proj.i$TSS.cluster,proj.i$TSS.cluster.uncertainty,proj.i$TSSEnrichment)
df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster == "2")
#df.TSS <- dplyr::filter(df.TSS,proj.i.TSS.cluster.uncertainty <= 0.05)
saveRDS(df.TSS,paste0("df_TSS_",i,".rds"))
df.depth <- data.frame(proj.i$cellNames,proj.i$depth.cluster,proj.i$depth.cluster.uncertainty,proj.i$nFrags)
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster == "2")
df.depth <- dplyr::filter(df.depth,proj.i.depth.cluster.uncertainty <= 0.05)
saveRDS(df.depth,paste0("df_depth_",i,".rds"))
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
colorDensity = T,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = log10(min(df.TSS$proj.i.TSSEnrichment)+1),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("QC thresholds:\n",i))+
ggsave(paste0(i,"_QC.pdf"),width = 4,height = 4)
ggPoint(
x = log10(proj.i$nFrags),
y = log10(proj.i$TSSEnrichment+1),
color = proj.i$DoubletEnrichment,
discrete = F,
continuousSet = "sambaNight",
xlabel = "log10(unique fragments)",
ylabel = "log10(TSS Enrichment+1)"
) +geom_hline(yintercept = min(log10(df.TSS$proj.i.TSSEnrichment+1)),linetype = "dashed")+
geom_vline(xintercept = min(log10(df.depth$proj.i.nFrags)),linetype = "dashed")+
ggtitle(paste0("Doublet Enrichment:\n",i))+
ggsave(paste0(i,"_doublets.pdf"),width = 4,height = 4)
}
###############################################################################
dev.off()
# Filter out low quality cells, and remove doublets
##############################################################################
list.depth <- list.files(pattern = "^df_depth")
df.depth <- data.frame(cellNames=character(),
cluster=character(),
cluster.uncertainty=character(),
nFrags = character())
for (i in list.depth){
df <- readRDS(i)
colnames(df) <- c("cellNames","cluster","cluster.uncertainty","nFrags")
df.depth <- rbind(df.depth,df)
}
list.TSS <- list.files(pattern = "^df_TSS")
df.TSS <- data.frame(cellNames=character(),
cluster=character(),
cluster.uncertainty=character(),
TSSEnrichment = character())
for (i in list.TSS){
df <- readRDS(i)
colnames(df) <- c("cellNames","cluster","cluster.uncertainty","TSSEnrichment")
df.TSS <- rbind(df.TSS,df)
}
colnames(df.TSS) <- c("cellNames","TSS.cluster","TSS.cluster.uncertainty","TSSEnrichment")
colnames(df.depth) <- c("cellNames","depth.cluster","depth.cluster.uncertainty","nFrags")
cellsPass <- intersect(df.TSS$cellNames,df.depth$cellNames)
cellsFail <- proj$cellNames[!(proj$cellNames %in% cellsPass)]
# Screen for high quality barcodes (remove non cellular barcodes)
proj.filter <- proj[proj$cellNames %in% cellsPass]
proj <- filterDoublets(proj.filter,filterRatio = 1,cutEnrich = 1,cutScore = -Inf)
plotFragmentSizes(proj)+ggtitle("Fragment Size Histogram")+ggsave("Frags_hist.pdf",width = 6,height = 4)
plotTSSEnrichment(proj)+ggtitle("TSS Enrichment")+ggsave("TSS.pdf",width = 6,height = 4)
###############################################################################################################
# Perform LSI reduction and clustering with ATAC data only
#######################################################################
# Add LSI dimreduc
proj <- addIterativeLSI(
ArchRProj = proj,
useMatrix = "TileMatrix",
name = "IterativeLSI",
iterations = 4,
LSIMethod = 2,
scaleDims = T,
clusterParams = list( #See Seurat::FindClusters
resolution = c(0.2),
sampleCells = 10000,
n.start = 10
),
UMAPParams = list(n_neighbors =30,
min_dist = 0.3,
metric = "cosine",
verbose =FALSE),
varFeatures = 25000,
dimsToUse = 1:50,
binarize = T,
corCutOff = 0.75,
force = T,
seed=6
)
proj <- addClusters(
input = proj,
reducedDims = "IterativeLSI",
method = "Seurat",
name = "ATAC_clusters",
resolution = 0.7,
dimsToUse = 1:50,force = T
)
# Add UMAP based on LSI dims
proj <- addUMAP(proj,nNeighbors = 30,minDist = 0.3,dimsToUse = 1:50,metric = "cosine",force = T,reducedDims="IterativeLSI")
saveRDS(proj,"proj_LSI_AND_UMAP.rds")
###################################################################################################
# Estimate gene activity in ATAC data and perform cell type annotation:
# Add Gene activity matrix using ArchR model
proj <- addGeneScoreMatrix(proj,matrixName = "ArchRGeneScore",force = T)
getAvailableMatrices(proj)
saveRDS(proj,"proj_LSI_AND_GeneScores.rds")
# Constrained Integration to only align cells from the same patient tumor
groupList <- SimpleList()
for (i in levels(factor(proj$Sample))){
rna.sub <- rna[,rna$Sample == i]
RNA.cells <- colnames(rna.sub)
idxSample <- BiocGenerics::which(proj$Sample == i)
cellsSample <- proj$cellNames[idxSample]
proj.filter <- proj[cellsSample, ]
ATAC.cells <- proj.filter$cellNames
groupList[[i]] <- SimpleList(
ATAC = ATAC.cells,
RNA = RNA.cells
)
}
# ###########################################################################################
proj <- addGeneIntegrationMatrix(
ArchRProj = proj,
useMatrix = "ArchRGeneScore",
matrixName = "GeneIntegrationMatrix_ArchR",
reducedDims = "IterativeLSI",
seRNA = rna,
groupList = groupList,
addToArrow = T,
force= TRUE,
groupRNA = "cell.type",
nameCell = "predictedCell_ArchR",
nameGroup = "predictedGroup_ArchR",
nameScore = "predictedScore_ArchR",
plotUMAP = F,
useImputation = F,
transferParams = list(dims = 1:50)
)
getAvailableMatrices(proj)
saveRDS(proj,"proj_LSI_GeneScores_Annotations_Int.rds")
############################################################################################
# Begin Downstream Analysis
# 1) Plotting RNA/ATAC by sample, by cluster, by predicted label
# 2) Marker Gene (RNA/ATAC) intersection
# 3) Peak2GeneLinks/Coaccessiblity
######################################################
# PART 1: Plotting
######################################################################################
# Make embedding highlighting by 1) Predicted group ArchR 2) Predicted group Signac
# 3) Sample 4) ATAC-only clusters
atac.archr <- plotEmbedding(proj,colorBy = "cellColData",name = "predictedGroup_ArchR")
atac.archr.emb <- as.data.frame(atac.archr$data)
atac.archr.emb$cell.type.archr <- atac.archr.emb$color
atac.archr.emb$cell.type.archr <- sub("-", ":", atac.archr.emb$cell.type.archr)
atac.archr.emb$cell.type.archr <- gsub(".*:", "", atac.archr.emb$cell.type.archr)
head(atac.archr.emb)
atac.archr.emb$cell.type.archr <- factor(atac.archr.emb$cell.type.archr, levels = levels(as.factor(rna$cell.type)))
head(atac.archr.emb)
atac <- plotEmbedding(proj,colorBy = "cellColData",name = "Sample")
atac.emb.sample <- as.data.frame(atac$data)
atac.emb.sample$sample <- atac.emb.sample$color
atac.emb.sample$sample <- sub("-", ":", atac.emb.sample$sample )
atac.emb.sample$sample <- gsub(".*:", "", atac.emb.sample$sample )
head(atac.emb.sample)
head(atac.emb.sample)
atac <- plotEmbedding(proj,colorBy = "cellColData",name = "ATAC_clusters")
atac.emb.cluster <- as.data.frame(atac$data)
atac.emb.cluster$sample <- atac.emb.cluster$color
atac.emb.cluster$sample <- sub("-", ":", atac.emb.cluster$sample )
atac.emb.cluster$sample <- gsub(".*:", "", atac.emb.cluster$sample )
head(atac.emb.cluster)
atac.emb.all <- cbind(atac.archr.emb[,c(1:2,4)],
atac.emb.sample[,4],
atac.emb.cluster[,4])
atac.emb.all$plain <- "Plain"
colnames(atac.emb.all) <- c("UMAP1","UMAP2","Predicted.Group.ArchR",
"Sample","ATAC_clusters","Blank")
head(atac.emb.all)
ggplot(atac.emb.all,aes_string(x = "UMAP1",y="UMAP2",color = "Predicted.Group.ArchR"))+
geom_point(size = .1)+
theme_classic()+
ggtitle("scATAC-seq: Predicted Group ArchR")+
theme(plot.title = element_text(face = "bold"))+
xlab("UMAP_1")+
ylab("UMAP_2")+
theme(legend.key.size = unit(0.2, "cm"))+
guides(colour = guide_legend(override.aes = list(size=3)))+
ggsave(paste0("PredictedGroup_ArchR_ATAC.pdf"),width = 8,height = 6)
ggplot(atac.emb.all,aes_string(x = "UMAP1",y="UMAP2",color = "Sample"))+
geom_point(size = .1)+
theme_classic()+
ggtitle(paste0("scATAC-seq: Sample"))+
theme(plot.title = element_text(face = "bold"))+
xlab("UMAP_1")+
ylab("UMAP_2")+
theme(legend.key.size = unit(0.2, "cm"))+
scale_color_manual(values = sampleColors)
#guides(colour = guide_legend(override.aes = list(size=3)))+
ggsave(paste0("Sample_ATAC.pdf"),width = 8,height = 6)
prediction.scores <- data.frame(ArchR= proj$predictedScore_ArchR)
var.list <- colnames(prediction.scores)
for (i in 1:length(var.list)){
ggplot(prediction.scores,aes_string(x = var.list[i]))+
geom_histogram(binwidth = 0.025,fill="#000000", color="#e9ecef", alpha=0.9)+
theme_classic()+
ggsave(paste0(var.list[i],"_ATAC.pdf"),width = 8,height = 6)
}
# Plot matching scRNA-seq plots:
#####################################################################################
rna.emb <- as.data.frame(rna@reductions$umap@cell.embeddings)
rna.emb$cell.type <- as.factor(rna$cell.type)
rna.emb$sample <- rna$Sample
rna.emb$cell.type <- factor(rna.emb$cell.type,levels = levels(atac.emb.all$Predicted.Group.ArchR))
rna.cell.plot <- ggplot(rna.emb,aes(x = UMAP_1,y=UMAP_2,color = cell.type))+
geom_point(size = .1)+
theme_classic()+
ggtitle("scRNAseq")+
theme(plot.title = element_text(face = "bold"))+
xlab("UMAP_1")+
ylab("UMAP_2")+
theme(legend.key.size = unit(0.2, "cm"))+
guides(colour = guide_legend(override.aes = list(size=3)))
rna.cell.plot +ggsave("RNA_All_labels.pdf",width = 8,height = 6)
rna.emb$sample <- factor(rna.emb$sample,levels = levels(factor(atac.emb.all$Sample)))
rna.sample.plot <-ggplot(rna.emb,aes(x = UMAP_1,y=UMAP_2,color = sample))+
geom_point(size = .1)+
theme_classic()+
ggtitle("scRNAseq")+
theme(plot.title = element_text(face = "bold"))+
xlab("UMAP_1")+
ylab("UMAP_2")+
theme(legend.key.size = unit(0.2, "cm"))+
scale_color_manual(values = sampleColors)+
guides(colour = guide_legend(override.aes = list(size=3)))+ggsave("RNA_All_sample.pdf")
rna.sample.plot +ggsave("RNA_All_samples.pdf",width = 8,height = 6)
#########################################################################################################
# # PART 2: Marker gene ovarlap RNA/ATAC
#
#
# # Differential pseudo-gene activity analysis:
# ############################
#
######################################################################################
#ArchR
########################################################################################
# DEGs using ATAC labels
markersGS.archr <- getMarkerFeatures(
ArchRProj = proj,
useMatrix = "ArchRGeneScore",
groupBy = "ATAC_clusters",
bias = c("TSSEnrichment", "log10(nFrags)"),
testMethod = "wilcoxon"
)
heatmapGS.archr <- markerHeatmap(
seMarker = markersGS.archr,
cutOff = "FDR <= 0.01 & Log2FC >= 1.25",
labelMarkers =NULL,
transpose = F,
pal = viridis(n=256),
limits = c(-2,2)
)
ComplexHeatmap::draw(heatmapGS.archr, heatmap_legend_side = "bot", annotation_legend_side = "bot")
plotPDF(heatmapGS.archr , name = "GeneScores-Marker-Heatmap_ArchR", width = 8, height = 6, ArchRProj = proj, addDOC = FALSE)
#
#
#
# DEGs using predicted labels (removing small groups)
idxSample <- BiocGenerics::which(proj$predictedScore_ArchR > 0.5)
cellsSample <- proj$cellNames[idxSample]
proj.filter <- proj[cellsSample, ]
popular.groups <- summary(factor(proj.filter$predictedGroup_ArchR))
popular.groups <- popular.groups[popular.groups > 10]
proj.filter$Mode.Label <- ifelse(proj.filter$predictedGroup_ArchR %in% names(popular.groups),TRUE,FALSE)
idxSample <- BiocGenerics::which(proj.filter$Mode.Label == TRUE)
cellsSample <- proj.filter$cellNames[idxSample]
proj.filter <- proj.filter[cellsSample, ]
# DEGs using predicted labels
markersGS.archr.pred <- getMarkerFeatures(
ArchRProj = proj.filter,
useMatrix = "ArchRGeneScore",
groupBy = "predictedGroup_ArchR",
bias = c("TSSEnrichment", "log10(nFrags)"),
testMethod = "wilcoxon"
)
heatmapGS.archr.pred <- markerHeatmap(
seMarker = markersGS.archr.pred,
cutOff = "FDR <= 0.01 & Log2FC >= 1.25",
labelMarkers =NULL,
transpose = F,
pal = viridis(n=256),
limits = c(-2,2)
)
ComplexHeatmap::draw(heatmapGS.archr.pred, heatmap_legend_side = "bot", annotation_legend_side = "bot")
plotPDF(heatmapGS.archr.pred, name = "GeneScores-Marker-Heatmap_ArchR_pred", width = 8, height = 6, ArchRProj = proj.filter, addDOC = FALSE)
# Differential peak analysis:
############################
# ATAC clusters
proj <- addGroupCoverages(ArchRProj = proj,groupBy = "ATAC_clusters",force = T)
pathToMacs2 <- findMacs2()
proj <- addReproduciblePeakSet(
ArchRProj = proj,
groupBy = "ATAC_clusters",
pathToMacs2 = pathToMacs2,force = T
)
proj <- addPeakMatrix(proj,force = T)
proj <- addBgdPeaks(proj)
markersPeaks <- getMarkerFeatures(
ArchRProj = proj,
useMatrix = "PeakMatrix",
groupBy = "ATAC_clusters",
bias = c("TSSEnrichment", "log10(nFrags)"),
testMethod = "wilcoxon"
)
heatmapPeaks<- markerHeatmap(
seMarker = markersPeaks,
cutOff = "FDR <= 0.01 & Log2FC >= 1.25",
labelMarkers =NULL,
transpose = F,
pal = viridis(n=256),
limits = c(-2,2)
)
ComplexHeatmap::draw(heatmapPeaks, heatmap_legend_side = "bot", annotation_legend_side = "bot")
plotPDF(heatmapPeaks, name = "Markers_peaks_ATAC_clusters", width = 8, height = 6, ArchRProj = proj, addDOC = FALSE)
# ArchR predicted labels
# DEGs using predicted labels (removing small groups)
idxSample <- BiocGenerics::which(proj$predictedScore_ArchR >= 0.5)
cellsSample <- proj$cellNames[idxSample]
proj.filter <- proj[cellsSample, ]
popular.groups <- summary(factor(proj.filter$predictedGroup_ArchR))
popular.groups <- popular.groups[popular.groups > 10]
proj.filter$Mode.Label <- ifelse(proj.filter$predictedGroup_ArchR %in% names(popular.groups),TRUE,FALSE)
idxSample <- BiocGenerics::which(proj.filter$Mode.Label == TRUE)
cellsSample <- proj.filter$cellNames[idxSample]
proj.filter <- proj.filter[cellsSample, ]
proj.archr <- addGroupCoverages(ArchRProj = proj.filter,groupBy = "predictedGroup_ArchR",force = T)
pathToMacs2 <- findMacs2()
proj.archr <- addReproduciblePeakSet(
ArchRProj = proj.archr,
groupBy = "predictedGroup_ArchR",
pathToMacs2 = pathToMacs2,force = T
)
proj.archr <- addPeakMatrix(proj.archr,force = T)
proj.archr <- addBgdPeaks(proj.archr,force = T)
markersPeaks.archr <- getMarkerFeatures(
ArchRProj = proj.archr,
useMatrix = "PeakMatrix",
groupBy = "predictedGroup_ArchR",
bias = c("TSSEnrichment", "log10(nFrags)"),
testMethod = "wilcoxon"
)
heatmapPeaks.archr<- markerHeatmap(
seMarker = markersPeaks.archr,
cutOff = "FDR <= 0.01 & Log2FC >= 1.25",
labelMarkers =NULL,
transpose = F,
pal = viridis(n=256),
limits = c(-2,2)
)
ComplexHeatmap::draw(heatmapPeaks.archr, heatmap_legend_side = "bot", annotation_legend_side = "bot")
plotPDF(heatmapPeaks.archr , name = "Markers_peaks_Archr_Predicted_labels", width = 8, height = 6, ArchRProj = proj.archr, addDOC = FALSE)
saveRDS(proj.archr,"./final_archr_proj_archrGS.rds")
# # RNA heatmap
####################################################################################
topN <-Wilcox.markers %>% group_by(cluster) %>% dplyr::filter(p_val_adj <= 0.01) %>% top_n(30, desc(avg_logFC))
# Downsample cells from each cluster
rna.sub <- subset(rna,downsample =300)
rna.sub <- NormalizeData(rna.sub)
rna.sub <- rna.sub[rownames(rna.sub) %in% topN$gene,]
rna.sub <- ScaleData(rna.sub,features = rownames(rna.sub))
mat <- rna.sub@assays$RNA@scale.data
cluster_anno<- rna.sub@meta.data$cell.type
col_fun = circlize::colorRamp2(c(-2, 0, 2),viridis(n = 3))
heatmapRNA <- Heatmap(mat, name = "Expression",
column_split = factor(cluster_anno),
cluster_columns =T,
show_column_dend = F,
cluster_column_slices = T,
column_title_gp = gpar(fontsize = 8),
column_gap = unit(0.1, "mm"),
cluster_rows = T,
show_row_dend = FALSE,
col = col_fun,
column_title_rot = 90,
show_column_names = F)
plotPDF(heatmapRNA, name = "Heatmap_RNA", width = 8, height = 6)
##############
# END OF SCRIPT
##############
|
context("test-tbl_regression")
library(survival)
library(lme4)
mod_lm <- lm(hp ~ am, data = mtcars)
mod_survreg <- survreg(Surv(time, status) ~ age + ph.ecog, data = lung)
mod_logistic <- glm(response ~ age + stage, trial, family = binomial)
mod_poisson <- glm(count ~ age + trt,
trial %>% dplyr::mutate(count = sample.int(20, size = nrow(trial), replace = TRUE)),
family = poisson
)
mod_lmer <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
mod_glmer <- glmer(am ~ hp + factor(cyl) + (1 | gear), mtcars, family = binomial)
mod_lm_interaction <- lm(age ~ trt * grade * response, data = trial)
lung2 <- lung
Hmisc::label(lung2$sex) <- "Gender"
Hmisc::label(lung2$age) <- "AGE"
cox_hmisclbl <- coxph(Surv(time, status) ~ age + sex, data = lung2)
test_that("glm: logistic and poisson regression", {
expect_error(tbl_regression(mod_logistic), NA)
expect_warning(tbl_regression(mod_logistic), NA)
expect_error(tbl_regression(mod_poisson, show_single_row = "trt"), NA)
expect_warning(tbl_regression(mod_poisson, show_single_row = "trt"), NA)
expect_error(tbl_regression(mod_logistic, exponentiate = TRUE), NA)
expect_warning(tbl_regression(mod_logistic, exponentiate = TRUE), NA)
expect_error(tbl_regression(mod_poisson, exponentiate = TRUE, show_single_row = "trt"), NA)
expect_warning(tbl_regression(mod_poisson, exponentiate = TRUE, show_single_row = "trt"), NA)
})
test_that("lm: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_lm), NA)
expect_warning(tbl_regression(mod_lm), NA)
})
test_that("lm with tidyfun: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_lm, tidy_fun = broom::tidy), NA)
expect_warning(tbl_regression(mod_lm, tidy_fun = broom::tidy), NA)
})
test_that("survreg: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_survreg), NA)
expect_warning(tbl_regression(mod_survreg), NA)
})
test_that("lmer: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_lmer), NA)
expect_warning(tbl_regression(mod_lmer), NA)
})
test_that("glmer: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_glmer), NA)
expect_warning(tbl_regression(mod_glmer), NA)
})
test_that("lm with interactions: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_lm_interaction), NA)
expect_warning(tbl_regression(mod_lm_interaction), NA)
})
test_that("tbl_regression creates errors when non-function in input", {
expect_error(
tbl_regression(mod_lm_interaction, pvalue_fun = mtcars),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, estimate_fun = mtcars),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, tidy_fun = mtcars),
"*"
)
})
test_that("tbl_regression creates errors when inputs are wrong", {
expect_error(
tbl_regression(mod_lm_interaction, label = "Age"),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, label = list("Age")),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, label = list("age" ~ c("Age", "Two"))),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, include = "INCLUDE ME!"),
"*"
)
})
test_that("No errors/warnings when data is labelled using Hmisc", {
expect_error(tbl_regression(cox_hmisclbl), NA)
expect_warning(tbl_regression(cox_hmisclbl), NA)
})
test_that("show_single_row errors print", {
expect_error(
tbl_regression(mod_lm_interaction, show_single_row = "NOT_A_VA"),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, show_single_row = "grade"),
"*"
)
})
test_that("All labels print with cubic splines", {
spline_fun <- Hmisc::rcspline.eval
rsc_mod <- lm(age ~ spline_fun(marker, inclx = TRUE) + response, trial)
expect_equal(
tbl_regression(rsc_mod) %>% purrr::pluck("table_body", "label") %>% {sum(is.na(.))},
0
)
})
test_that("Testing lme4 results", {
mod_glmer <- glmer(am ~ hp + factor(cyl) + (1 | gear), mtcars, family = binomial)
# tbl_regerssion runs without error
expect_error(
tbl_lme4 <- tbl_regression(mod_glmer, exponentiate = TRUE,
conf.level = 0.90),
NA
)
# coefs are exponentiated properly
expect_equivalent(
coef(mod_glmer)[[1]] %>% {.[1, 2:ncol(.)]} %>% map_dbl(exp),
tbl_lme4$table_body %>% pull(estimate) %>% discard(is.na)
)
})
test_that("Interaction modifications", {
# no error with interaction
expect_error(
tbl_i <- lm(age ~ factor(response) * marker, trial) %>%
tbl_regression(
show_single_row = `factor(response):marker`,
label = `factor(response):marker` ~ "Interaction"
),
NA
)
# checking modifications to table
expect_equal(
dplyr::filter(tbl_i$table_body, variable == "factor(response):marker") %>%
dplyr::pull(label),
"Interaction"
)
expect_equal(
dplyr::filter(tbl_i$table_body, variable == "factor(response):marker") %>%
nrow(),
1L
)
})
|
/tests/testthat/test-tbl_regression.R
|
permissive
|
hughjonesd/gtsummary
|
R
| false | false | 5,005 |
r
|
context("test-tbl_regression")
library(survival)
library(lme4)
mod_lm <- lm(hp ~ am, data = mtcars)
mod_survreg <- survreg(Surv(time, status) ~ age + ph.ecog, data = lung)
mod_logistic <- glm(response ~ age + stage, trial, family = binomial)
mod_poisson <- glm(count ~ age + trt,
trial %>% dplyr::mutate(count = sample.int(20, size = nrow(trial), replace = TRUE)),
family = poisson
)
mod_lmer <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
mod_glmer <- glmer(am ~ hp + factor(cyl) + (1 | gear), mtcars, family = binomial)
mod_lm_interaction <- lm(age ~ trt * grade * response, data = trial)
lung2 <- lung
Hmisc::label(lung2$sex) <- "Gender"
Hmisc::label(lung2$age) <- "AGE"
cox_hmisclbl <- coxph(Surv(time, status) ~ age + sex, data = lung2)
test_that("glm: logistic and poisson regression", {
expect_error(tbl_regression(mod_logistic), NA)
expect_warning(tbl_regression(mod_logistic), NA)
expect_error(tbl_regression(mod_poisson, show_single_row = "trt"), NA)
expect_warning(tbl_regression(mod_poisson, show_single_row = "trt"), NA)
expect_error(tbl_regression(mod_logistic, exponentiate = TRUE), NA)
expect_warning(tbl_regression(mod_logistic, exponentiate = TRUE), NA)
expect_error(tbl_regression(mod_poisson, exponentiate = TRUE, show_single_row = "trt"), NA)
expect_warning(tbl_regression(mod_poisson, exponentiate = TRUE, show_single_row = "trt"), NA)
})
test_that("lm: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_lm), NA)
expect_warning(tbl_regression(mod_lm), NA)
})
test_that("lm with tidyfun: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_lm, tidy_fun = broom::tidy), NA)
expect_warning(tbl_regression(mod_lm, tidy_fun = broom::tidy), NA)
})
test_that("survreg: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_survreg), NA)
expect_warning(tbl_regression(mod_survreg), NA)
})
test_that("lmer: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_lmer), NA)
expect_warning(tbl_regression(mod_lmer), NA)
})
test_that("glmer: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_glmer), NA)
expect_warning(tbl_regression(mod_glmer), NA)
})
test_that("lm with interactions: no errors/warnings with standard use", {
expect_error(tbl_regression(mod_lm_interaction), NA)
expect_warning(tbl_regression(mod_lm_interaction), NA)
})
test_that("tbl_regression creates errors when non-function in input", {
expect_error(
tbl_regression(mod_lm_interaction, pvalue_fun = mtcars),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, estimate_fun = mtcars),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, tidy_fun = mtcars),
"*"
)
})
test_that("tbl_regression creates errors when inputs are wrong", {
expect_error(
tbl_regression(mod_lm_interaction, label = "Age"),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, label = list("Age")),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, label = list("age" ~ c("Age", "Two"))),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, include = "INCLUDE ME!"),
"*"
)
})
test_that("No errors/warnings when data is labelled using Hmisc", {
expect_error(tbl_regression(cox_hmisclbl), NA)
expect_warning(tbl_regression(cox_hmisclbl), NA)
})
test_that("show_single_row errors print", {
expect_error(
tbl_regression(mod_lm_interaction, show_single_row = "NOT_A_VA"),
"*"
)
expect_error(
tbl_regression(mod_lm_interaction, show_single_row = "grade"),
"*"
)
})
test_that("All labels print with cubic splines", {
spline_fun <- Hmisc::rcspline.eval
rsc_mod <- lm(age ~ spline_fun(marker, inclx = TRUE) + response, trial)
expect_equal(
tbl_regression(rsc_mod) %>% purrr::pluck("table_body", "label") %>% {sum(is.na(.))},
0
)
})
test_that("Testing lme4 results", {
mod_glmer <- glmer(am ~ hp + factor(cyl) + (1 | gear), mtcars, family = binomial)
# tbl_regerssion runs without error
expect_error(
tbl_lme4 <- tbl_regression(mod_glmer, exponentiate = TRUE,
conf.level = 0.90),
NA
)
# coefs are exponentiated properly
expect_equivalent(
coef(mod_glmer)[[1]] %>% {.[1, 2:ncol(.)]} %>% map_dbl(exp),
tbl_lme4$table_body %>% pull(estimate) %>% discard(is.na)
)
})
test_that("Interaction modifications", {
# no error with interaction
expect_error(
tbl_i <- lm(age ~ factor(response) * marker, trial) %>%
tbl_regression(
show_single_row = `factor(response):marker`,
label = `factor(response):marker` ~ "Interaction"
),
NA
)
# checking modifications to table
expect_equal(
dplyr::filter(tbl_i$table_body, variable == "factor(response):marker") %>%
dplyr::pull(label),
"Interaction"
)
expect_equal(
dplyr::filter(tbl_i$table_body, variable == "factor(response):marker") %>%
nrow(),
1L
)
})
|
library(googleComputeEngineR)
project = "scmerge"
zone = "us-central1-a"
gce_global_project(project)
gce_global_zone(zone)
# gce_get_project()
# gce_list_zones(project)
# View(gce_list_machinetype()$items)
(tag = "gcr.io/scmerge/scmerge_mem_docker:gcbuild")
vm <- gce_vm(template = "rstudio",
name = "biocsing8",
disk_size_gb = 20,
predefined_type = "n1-standard-8",
dynamic_image = tag,
user = "rstudio",
password = "pushu")
vm <- gce_ssh_setup(vm,
username = "rstudio",
key.pub = "~/.ssh/id_rsa.pub",
key.private = "~/.ssh/id_rsa")
|
/gseR.R
|
no_license
|
kevinwang09/scmerge_mem_docker
|
R
| false | false | 679 |
r
|
library(googleComputeEngineR)
project = "scmerge"
zone = "us-central1-a"
gce_global_project(project)
gce_global_zone(zone)
# gce_get_project()
# gce_list_zones(project)
# View(gce_list_machinetype()$items)
(tag = "gcr.io/scmerge/scmerge_mem_docker:gcbuild")
vm <- gce_vm(template = "rstudio",
name = "biocsing8",
disk_size_gb = 20,
predefined_type = "n1-standard-8",
dynamic_image = tag,
user = "rstudio",
password = "pushu")
vm <- gce_ssh_setup(vm,
username = "rstudio",
key.pub = "~/.ssh/id_rsa.pub",
key.private = "~/.ssh/id_rsa")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compareXMats.R
\name{startStompingMultiple}
\alias{startStompingMultiple}
\title{startStompingMultiple This function compares different statistical methods as well as different xMatrices vs the same yVector response factor.}
\usage{
startStompingMultiple(
file_path,
xMatrices,
yVector,
logV,
transformV,
meth,
prop,
seed,
iter,
plsr_ncomp,
rfr_ntree,
svm_gamma,
svm_epsilon,
svm_cost,
knn_knum,
gbm_ntree,
gbm_shrink,
gbm_dist,
gbm_node,
rlr_mscale,
permission = F
)
}
\arguments{
\item{file_path}{string String to a folder where the output plots are stored.}
\item{xMatrices}{A list of matrices where each column is considered a factor to be modelled. Names of columns will automatically be used if provided, and the names in the list will be used as the name of that dataset.}
\item{yVector}{A vector containing the response factor to be modelled}
\item{logV}{Boolean value, if TRUE then transform the xMatrix using log to base 10}
\item{transformV}{Transformation and scaling method to be applied to xMatrix, raw data is used by default, the options are: raw, center, minmax, meannorm, zscore, pareto, vast, level}
\item{meth}{Numerical vector of statistical methods to be compared, all of them are used by default if left blank. c(1:11) are indices of methods: "OLSR", "SLR-both", "SLR-forward", "SLR-backward", "PCR", "PLSR", "RFR", "SVM", "KNN", "GBM", "GLMR"}
\item{prop}{Proportion of data to be used in the training of the model. Value between 0 and 1. For example, 0.7 is 70\% of the data used for training the model.}
\item{seed}{Initial seed for splitting training and testing datasets. Used for reproducability of results.}
\item{iter}{How many models are built to assess the overall accuracy of the method}
\item{plsr_ncomp}{The number of components used for the principal components regression method}
\item{rfr_ntree}{The number of trees used in the Random Forest Regression method}
\item{svm_gamma}{The gamma used for the Support Vector Machine method}
\item{svm_epsilon}{The epsilon used for the Support Vector Machine method}
\item{svm_cost}{The cost used for the Support Vector Machine Method}
\item{knn_knum}{The number of K-Centroids used in the K-Nearest Neighbours method}
\item{gbm_ntree}{The number of trees used in the Generalised Boosted regression method}
\item{gbm_shrink}{The shrink used in the Generalised Boosted regression method}
\item{gbm_dist}{The distribution used in the Generalised Boosted regression method}
\item{gbm_node}{The number of nodes used in the Generalised Boosted regression method}
\item{rlr_mscale}{The mscale used for the Robust Linear regression method}
\item{permission}{true,false Permission for this package to create files in the file path specified.}
}
\value{
The performance plots and final heatmap plots are in the file path specified. Also returned is a dataframe with the following:
\itemize{
\item heatmap_methods - The heatmap methods order
\item heatmap_matrix - The heatmap matrix/dataset name order
\item heatmap_values_mape - The heatmap MAPE values in corresponding order to heatmap_methods and heatmap_matrix
\item heatmap_values_rmse - The heatmap RMSE values in corresponding order to heatmap_methods and heatmap_matrix
\item best_rmse_models - The models which performed the best for RMSE over each xMatrix
\item best_mape_models - The models which performed the best for MAPE over each xMatrix
}
}
\description{
startStompingMultiple This function compares different statistical methods as well as different xMatrices vs the same yVector response factor.
}
\details{
A time-saving method of analysing different xMatrix datasets against the same yVector response factor.
It also compares each xMatrix versus a variety of different statistical models; the available models to compare are:
\itemize{
\item Ordinary Least Squares Regression (OLSR)
\item Stepwise Linear Regression - Both directions (SLR)
\item Stepwise Linear Regression - Forwards direction (SLRf)
\item Stepwise Linear Regression - Backwards direction (SLRb)
\item Principal Components Regression (PCR)
\item Partial Least Squares Regression (PLSR)
\item Random Forest Regression (RFR)
\item Support Vector Machine (SVM)
\item K-Nearest Neighbours Regression (KNN)
\item Generalised Boosted Modelling (GBM)
\item Robust Linear Regression (RLR)
}
There are two methods of analysing the performance of each model, these are the Root Mean Square Error (RMSE) and the Mean Absolute Percentage Error (MAPE);
there are two heatmaps produced and stored as files in the file path specified, one for each of the perfomance analysis methods.
If permission is not specified as true, it will request a user input to confirm consent to create images on the user computer, this step can be skipped/automated by setting that parameter to true.
}
|
/man/startStompingMultiple.Rd
|
no_license
|
EmmaRSims/StompR
|
R
| false | true | 4,916 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compareXMats.R
\name{startStompingMultiple}
\alias{startStompingMultiple}
\title{startStompingMultiple This function compares different statistical methods as well as different xMatrices vs the same yVector response factor.}
\usage{
startStompingMultiple(
file_path,
xMatrices,
yVector,
logV,
transformV,
meth,
prop,
seed,
iter,
plsr_ncomp,
rfr_ntree,
svm_gamma,
svm_epsilon,
svm_cost,
knn_knum,
gbm_ntree,
gbm_shrink,
gbm_dist,
gbm_node,
rlr_mscale,
permission = F
)
}
\arguments{
\item{file_path}{string String to a folder where the output plots are stored.}
\item{xMatrices}{A list of matrices where each column is considered a factor to be modelled. Names of columns will automatically be used if provided, and the names in the list will be used as the name of that dataset.}
\item{yVector}{A vector containing the response factor to be modelled}
\item{logV}{Boolean value, if TRUE then transform the xMatrix using log to base 10}
\item{transformV}{Transformation and scaling method to be applied to xMatrix, raw data is used by default, the options are: raw, center, minmax, meannorm, zscore, pareto, vast, level}
\item{meth}{Numerical vector of statistical methods to be compared, all of them are used by default if left blank. c(1:11) are indices of methods: "OLSR", "SLR-both", "SLR-forward", "SLR-backward", "PCR", "PLSR", "RFR", "SVM", "KNN", "GBM", "GLMR"}
\item{prop}{Proportion of data to be used in the training of the model. Value between 0 and 1. For example, 0.7 is 70\% of the data used for training the model.}
\item{seed}{Initial seed for splitting training and testing datasets. Used for reproducability of results.}
\item{iter}{How many models are built to assess the overall accuracy of the method}
\item{plsr_ncomp}{The number of components used for the principal components regression method}
\item{rfr_ntree}{The number of trees used in the Random Forest Regression method}
\item{svm_gamma}{The gamma used for the Support Vector Machine method}
\item{svm_epsilon}{The epsilon used for the Support Vector Machine method}
\item{svm_cost}{The cost used for the Support Vector Machine Method}
\item{knn_knum}{The number of K-Centroids used in the K-Nearest Neighbours method}
\item{gbm_ntree}{The number of trees used in the Generalised Boosted regression method}
\item{gbm_shrink}{The shrink used in the Generalised Boosted regression method}
\item{gbm_dist}{The distribution used in the Generalised Boosted regression method}
\item{gbm_node}{The number of nodes used in the Generalised Boosted regression method}
\item{rlr_mscale}{The mscale used for the Robust Linear regression method}
\item{permission}{true,false Permission for this package to create files in the file path specified.}
}
\value{
The performance plots and final heatmap plots are in the file path specified. Also returned is a dataframe with the following:
\itemize{
\item heatmap_methods - The heatmap methods order
\item heatmap_matrix - The heatmap matrix/dataset name order
\item heatmap_values_mape - The heatmap MAPE values in corresponding order to heatmap_methods and heatmap_matrix
\item heatmap_values_rmse - The heatmap RMSE values in corresponding order to heatmap_methods and heatmap_matrix
\item best_rmse_models - The models which performed the best for RMSE over each xMatrix
\item best_mape_models - The models which performed the best for MAPE over each xMatrix
}
}
\description{
startStompingMultiple This function compares different statistical methods as well as different xMatrices vs the same yVector response factor.
}
\details{
A time-saving method of analysing different xMatrix datasets against the same yVector response factor.
It also compares each xMatrix versus a variety of different statistical models; the available models to compare are:
\itemize{
\item Ordinary Least Squares Regression (OLSR)
\item Stepwise Linear Regression - Both directions (SLR)
\item Stepwise Linear Regression - Forwards direction (SLRf)
\item Stepwise Linear Regression - Backwards direction (SLRb)
\item Principal Components Regression (PCR)
\item Partial Least Squares Regression (PLSR)
\item Random Forest Regression (RFR)
\item Support Vector Machine (SVM)
\item K-Nearest Neighbours Regression (KNN)
\item Generalised Boosted Modelling (GBM)
\item Robust Linear Regression (RLR)
}
There are two methods of analysing the performance of each model, these are the Root Mean Square Error (RMSE) and the Mean Absolute Percentage Error (MAPE);
there are two heatmaps produced and stored as files in the file path specified, one for each of the perfomance analysis methods.
If permission is not specified as true, it will request a user input to confirm consent to create images on the user computer, this step can be skipped/automated by setting that parameter to true.
}
|
# source: https://github.com/csoneson/ARMOR/blob/master/scripts/run_tximeta.R
suppressMessages({
library(tidyverse)
library(SingleCellExperiment)
})
# snakemake variables
linked_txome <- snakemake@input$linked_txome
samples <- snakemake@input$cts
assembly <- snakemake@wildcards$assembly
out_matrix <- snakemake@output$counts
out_SCE <- snakemake@output$SCE
log_file <- snakemake@log[[1]]
# log all console output
log <- file(log_file, open="wt")
sink(log)
sink(log, type="message")
# log all variables for debugging purposes
cat('# variables used for this analysis:\n')
cat('linked_txome <- "', linked_txome, '"\n', sep = "")
cat('samples <- "', samples, '"\n', sep = "")
cat('assembly <- "', assembly, '"\n', sep = "")
cat('out_matrix <- "', out_matrix, '"\n', sep = "")
cat('out_SCE <- "', out_SCE, '"\n', sep = "")
cat('log_file <- "', log_file, '"\n', sep = "")
cat('\n')
cat('Sessioninfo:\n')
sessionInfo()
cat('\n')
## Load linked_txome.json
tximeta::loadLinkedTxome(linked_txome)
samplenames <- gsub(paste0(assembly, '-'), '', basename(samples))
coldata <- data.frame(files = file.path(samples, 'quant.sf'), names = samplenames, stringsAsFactors = F, check.names = F)
## import annotated abundances in transcript level
st <- tximeta::tximeta(
coldata=coldata,
txOut=TRUE, # output transcripts (default)
# skipMeta=TRUE, # meta = required for transcript outputs
# skipSeqinfo=TRUE, # lookup sizes
useHub=FALSE, # lookup similar indexes
# markDuplicateTxps=TRUE, # mark and track
cleanDuplicateTxps=TRUE, # fix
)
## Summarize to gene level
sg <- tximeta::summarizeToGene(st)
## This section deviates from the source script
## It outputs non-normalized matrixes
## Save TPM matrix
TPM <- data.frame(assay(sg, "abundance"), stringsAsFactors = F, check.names = F) %>% rownames_to_column("gene")
out_TPM_matrix <- sub("-counts.tsv", "-TPM.tsv", out_matrix)
write.table(TPM, file=out_TPM_matrix, quote = F, sep = '\t', row.names = F)
## Save gene counts matrix
counts <- assay(sg, "counts")
mode(counts) <- "integer"
counts <- data.frame(counts, stringsAsFactors = F, check.names = F) %>% rownames_to_column("gene")
write.table(counts, file=out_matrix, quote = F, sep = '\t', row.names = F)
## Save gene length matrix
lengths <- data.frame(assay(sg, "length"), stringsAsFactors = F, check.names = F) %>% rownames_to_column("gene")
out_lengths_matrix <- sub("-counts.tsv", "-gene_lengths.tsv", out_matrix)
write.table(lengths, file=out_lengths_matrix, quote = F, sep = '\t', row.names = F)
## Returning to source script
## If rowData(st)$gene_id is a CharacterList, convert it to character to allow
## the joining below
if (is(rowData(st)$gene_id, "CharacterList")) {
if (any(vapply(rowData(st)$gene_id, length, 1) > 1)) {
warning("Some elements of rowData(st)$gene_id consisted of more than one",
"object. Only the first one is retained.")
}
rowData(st)$gene_id <- vapply(rowData(st)$gene_id, function(w) w[[1]], "")
}
## If rowData(st)$tx_id is of class integer, replace it with the tx_name
## column
if (is(rowData(st)$tx_id, "integer")) {
rowData(st)$tx_id <- rowData(st)$tx_name
}
## Add gene information, e.g. gene_name, entrezid, ... (if provided) to
## transcript-level SE
rowData(st) <- rowData(st) %>%
data.frame() %>%
dplyr::left_join(data.frame(rowData(sg))) %>%
data.frame()
## Change the row names in sg to have geneID__geneSymbol
rownames(sg) <- paste(rowData(sg)$gene_id, rowData(sg)$gene_name, sep = "__")
## Coerce the object from SummarizedExperiment to SingleCellExperiment
st <- as(st, "SingleCellExperiment")
sg <- as(sg, "SingleCellExperiment")
## Save single cell experiment object
saveRDS(list(st = st, sg = sg), file = out_SCE)
|
/seq2science/scripts/quant_to_counts.R
|
permissive
|
NailouZhang/seq2science
|
R
| false | false | 3,847 |
r
|
# source: https://github.com/csoneson/ARMOR/blob/master/scripts/run_tximeta.R
suppressMessages({
library(tidyverse)
library(SingleCellExperiment)
})
# snakemake variables
linked_txome <- snakemake@input$linked_txome
samples <- snakemake@input$cts
assembly <- snakemake@wildcards$assembly
out_matrix <- snakemake@output$counts
out_SCE <- snakemake@output$SCE
log_file <- snakemake@log[[1]]
# log all console output
log <- file(log_file, open="wt")
sink(log)
sink(log, type="message")
# log all variables for debugging purposes
cat('# variables used for this analysis:\n')
cat('linked_txome <- "', linked_txome, '"\n', sep = "")
cat('samples <- "', samples, '"\n', sep = "")
cat('assembly <- "', assembly, '"\n', sep = "")
cat('out_matrix <- "', out_matrix, '"\n', sep = "")
cat('out_SCE <- "', out_SCE, '"\n', sep = "")
cat('log_file <- "', log_file, '"\n', sep = "")
cat('\n')
cat('Sessioninfo:\n')
sessionInfo()
cat('\n')
## Load linked_txome.json
tximeta::loadLinkedTxome(linked_txome)
samplenames <- gsub(paste0(assembly, '-'), '', basename(samples))
coldata <- data.frame(files = file.path(samples, 'quant.sf'), names = samplenames, stringsAsFactors = F, check.names = F)
## import annotated abundances in transcript level
st <- tximeta::tximeta(
coldata=coldata,
txOut=TRUE, # output transcripts (default)
# skipMeta=TRUE, # meta = required for transcript outputs
# skipSeqinfo=TRUE, # lookup sizes
useHub=FALSE, # lookup similar indexes
# markDuplicateTxps=TRUE, # mark and track
cleanDuplicateTxps=TRUE, # fix
)
## Summarize to gene level
sg <- tximeta::summarizeToGene(st)
## This section deviates from the source script
## It outputs non-normalized matrixes
## Save TPM matrix
TPM <- data.frame(assay(sg, "abundance"), stringsAsFactors = F, check.names = F) %>% rownames_to_column("gene")
out_TPM_matrix <- sub("-counts.tsv", "-TPM.tsv", out_matrix)
write.table(TPM, file=out_TPM_matrix, quote = F, sep = '\t', row.names = F)
## Save gene counts matrix
counts <- assay(sg, "counts")
mode(counts) <- "integer"
counts <- data.frame(counts, stringsAsFactors = F, check.names = F) %>% rownames_to_column("gene")
write.table(counts, file=out_matrix, quote = F, sep = '\t', row.names = F)
## Save gene length matrix
lengths <- data.frame(assay(sg, "length"), stringsAsFactors = F, check.names = F) %>% rownames_to_column("gene")
out_lengths_matrix <- sub("-counts.tsv", "-gene_lengths.tsv", out_matrix)
write.table(lengths, file=out_lengths_matrix, quote = F, sep = '\t', row.names = F)
## Returning to source script
## If rowData(st)$gene_id is a CharacterList, convert it to character to allow
## the joining below
if (is(rowData(st)$gene_id, "CharacterList")) {
if (any(vapply(rowData(st)$gene_id, length, 1) > 1)) {
warning("Some elements of rowData(st)$gene_id consisted of more than one",
"object. Only the first one is retained.")
}
rowData(st)$gene_id <- vapply(rowData(st)$gene_id, function(w) w[[1]], "")
}
## If rowData(st)$tx_id is of class integer, replace it with the tx_name
## column
if (is(rowData(st)$tx_id, "integer")) {
rowData(st)$tx_id <- rowData(st)$tx_name
}
## Add gene information, e.g. gene_name, entrezid, ... (if provided) to
## transcript-level SE
rowData(st) <- rowData(st) %>%
data.frame() %>%
dplyr::left_join(data.frame(rowData(sg))) %>%
data.frame()
## Change the row names in sg to have geneID__geneSymbol
rownames(sg) <- paste(rowData(sg)$gene_id, rowData(sg)$gene_name, sep = "__")
## Coerce the object from SummarizedExperiment to SingleCellExperiment
st <- as(st, "SingleCellExperiment")
sg <- as(sg, "SingleCellExperiment")
## Save single cell experiment object
saveRDS(list(st = st, sg = sg), file = out_SCE)
|
#-----------you need to install the following packages. this only needs to be done once.
install.packages(c('sf', 'foreign', 'tidyverse', 'stringi', 'lwgeom'))
#-----------initialize libraries. This needs to be done for each new R session
library(sf)
library(foreign)
library(tidyverse)
library(lwgeom)
library(stringi)
options(stringsAsFactors = FALSE)
#-----------download files
#pick a region and download/unzip the .shp.zip file: http://download.geofabrik.de/
#-----------set the working directory to wherever you unzipped the downloaded files to
setwd("C:/Users/Erin/Documents/DataViz/!Complete/IndivRoads/OSM_Data/London/")
#-----------set some basic info about the city you're mapping
city <- "london"
lat <- 51.508420 #center point latitude
long <- -0.112730 #center point longitude
rad <- 20000 #radius, in meters, around the center point to map
crs <- 102013 #ESRI projection for mapping. I found mine here: https://spatialreference.org/ref/esri/europe-albers-equal-area-conic/ 102013 will give good results for Europe.
#-----------set up the road types you want to plot and what colors they should be
plottypes <- c('Road', 'Street', 'Avenue', 'Lane', 'Close', 'Way', 'Place', 'Embankment')
plotcolors <- c('Road' = '#59c8e5', 'Street' = '#fed032', 'Avenue' ='#4cb580', 'Lane' = '#fe4d64', 'Close' = '#0a7abf',
'Way' = '#2e968c', 'Place' = '#fe9ea5', 'Embankment' = '#fe9ea5', 'Motorway' = "#ff9223", 'Other' = '#cccccc')
#-----------get to plotting
#import road geography
filename <- "gis_osm_roads_free_1"
allroads <- read_sf(".", filename)
#subset the roads into a circle.
pt <- data.frame(lat = lat, long = long)
pt <- pt %>% st_as_sf(coords = c("long", "lat"), crs = 4326) %>% st_transform(crs)
circle <- st_buffer(pt, dist = rad)
circle <- circle %>% st_transform(st_crs(allroads))
allroads <- st_intersection(circle, allroads)
#remove unnamed footpaths
allroads <- allroads[!(allroads$fclass == "footway" & is.na(allroads$name)),]
#add in length
allroads$len <- st_length(allroads)
#-----derive road suffixes-----
#run this line if your suffixes are at the END of the name (e.g. Canal Street)
allroads$TYPE <- substr(allroads$name, stri_locate_last(allroads$name, regex = " ")[, 1] + 1, nchar(allroads$name)) %>% stri_trans_general(id = "Title")
#run this line if your "suffixes" are at the BEGINNING of the name (e.g. Calle de los Gatos)
allroads$TYPE <- substr(allroads$name, 1, str_locate(allroads$name, " ")[, 1] -1) %>% stri_trans_general(id = "Title") #for road prefixes
#--------uncomment and run this code to get the top roads by length.
#--------i usually run this to decide what road types to plot
#plottype <- allroads %>% select(TYPE,len)
#plottype$geometry <- NULL
#plottype <- subset(plottype, !is.na(TYPE))
#plottype <- plottype %>% group_by(TYPE) %>% summarise(Length = sum(len)) %>% arrange(-Length)
#rename motorways that don't have some other designation
allroads$TYPE[allroads$fclass == 'motorway' & !(allroads$TYPE %in% plottypes)] <- "Motorway"
#put other roads into their own dataframe
allroads$TYPE[!(allroads$TYPE %in% plottypes) & allroads$TYPE != 'Motorway'] <- "Other"
otherroads <- allroads[(allroads$TYPE == "Other"),]
allroads <- allroads[(allroads$TYPE != "Other"),]
#plot it
blankbg <-theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(), axis.title.y=element_blank(),
panel.background=element_blank(),panel.border=element_blank(),panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),plot.background=element_blank())
ggplot() + blankbg + theme(panel.grid.major = element_line(colour = "transparent")) +
geom_sf(data=otherroads, size = .8, aes(color=TYPE)) +
geom_sf(data=allroads, size =1, aes(color=TYPE)) +
scale_color_manual(values = plotcolors, guide = "legend")
ggsave(paste0(".", city, ".png"), plot = last_plot(),
scale = 1, width = 24, height = 36, units = "in",
dpi = 500)
|
/Worldwide.R
|
no_license
|
thendrie/RoadColors
|
R
| false | false | 4,182 |
r
|
#-----------you need to install the following packages. this only needs to be done once.
install.packages(c('sf', 'foreign', 'tidyverse', 'stringi', 'lwgeom'))
#-----------initialize libraries. This needs to be done for each new R session
library(sf)
library(foreign)
library(tidyverse)
library(lwgeom)
library(stringi)
options(stringsAsFactors = FALSE)
#-----------download files
#pick a region and download/unzip the .shp.zip file: http://download.geofabrik.de/
#-----------set the working directory to wherever you unzipped the downloaded files to
setwd("C:/Users/Erin/Documents/DataViz/!Complete/IndivRoads/OSM_Data/London/")
#-----------set some basic info about the city you're mapping
city <- "london"
lat <- 51.508420 #center point latitude
long <- -0.112730 #center point longitude
rad <- 20000 #radius, in meters, around the center point to map
crs <- 102013 #ESRI projection for mapping. I found mine here: https://spatialreference.org/ref/esri/europe-albers-equal-area-conic/ 102013 will give good results for Europe.
#-----------set up the road types you want to plot and what colors they should be
plottypes <- c('Road', 'Street', 'Avenue', 'Lane', 'Close', 'Way', 'Place', 'Embankment')
plotcolors <- c('Road' = '#59c8e5', 'Street' = '#fed032', 'Avenue' ='#4cb580', 'Lane' = '#fe4d64', 'Close' = '#0a7abf',
'Way' = '#2e968c', 'Place' = '#fe9ea5', 'Embankment' = '#fe9ea5', 'Motorway' = "#ff9223", 'Other' = '#cccccc')
#-----------get to plotting
#import road geography
filename <- "gis_osm_roads_free_1"
allroads <- read_sf(".", filename)
#subset the roads into a circle.
pt <- data.frame(lat = lat, long = long)
pt <- pt %>% st_as_sf(coords = c("long", "lat"), crs = 4326) %>% st_transform(crs)
circle <- st_buffer(pt, dist = rad)
circle <- circle %>% st_transform(st_crs(allroads))
allroads <- st_intersection(circle, allroads)
#remove unnamed footpaths
allroads <- allroads[!(allroads$fclass == "footway" & is.na(allroads$name)),]
#add in length
allroads$len <- st_length(allroads)
#-----derive road suffixes-----
#run this line if your suffixes are at the END of the name (e.g. Canal Street)
allroads$TYPE <- substr(allroads$name, stri_locate_last(allroads$name, regex = " ")[, 1] + 1, nchar(allroads$name)) %>% stri_trans_general(id = "Title")
#run this line if your "suffixes" are at the BEGINNING of the name (e.g. Calle de los Gatos)
allroads$TYPE <- substr(allroads$name, 1, str_locate(allroads$name, " ")[, 1] -1) %>% stri_trans_general(id = "Title") #for road prefixes
#--------uncomment and run this code to get the top roads by length.
#--------i usually run this to decide what road types to plot
#plottype <- allroads %>% select(TYPE,len)
#plottype$geometry <- NULL
#plottype <- subset(plottype, !is.na(TYPE))
#plottype <- plottype %>% group_by(TYPE) %>% summarise(Length = sum(len)) %>% arrange(-Length)
#rename motorways that don't have some other designation
allroads$TYPE[allroads$fclass == 'motorway' & !(allroads$TYPE %in% plottypes)] <- "Motorway"
#put other roads into their own dataframe
allroads$TYPE[!(allroads$TYPE %in% plottypes) & allroads$TYPE != 'Motorway'] <- "Other"
otherroads <- allroads[(allroads$TYPE == "Other"),]
allroads <- allroads[(allroads$TYPE != "Other"),]
#plot it
blankbg <-theme(axis.line=element_blank(),axis.text.x=element_blank(),
axis.text.y=element_blank(),axis.ticks=element_blank(),
axis.title.x=element_blank(), axis.title.y=element_blank(),
panel.background=element_blank(),panel.border=element_blank(),panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),plot.background=element_blank())
ggplot() + blankbg + theme(panel.grid.major = element_line(colour = "transparent")) +
geom_sf(data=otherroads, size = .8, aes(color=TYPE)) +
geom_sf(data=allroads, size =1, aes(color=TYPE)) +
scale_color_manual(values = plotcolors, guide = "legend")
ggsave(paste0(".", city, ".png"), plot = last_plot(),
scale = 1, width = 24, height = 36, units = "in",
dpi = 500)
|
/* ***********************************************************
* *
* Copyright, (C) Honeywell Information Systems Inc., 1983 *
* *
*********************************************************** */
/* DESCRIPTION:
dm_translate_system_config_ - program to extract DMS configuration
data from an ascii segment and return a pointer to a structure which
summarizes the configuration parameters.
*/
/* HISTORY:
Written by M. Pandolf, December 1982.
Modified:
03/15/83 by M. Pandolf: for default before journal keywords and for
logging process terminations.
05/05/83 by L. A. Newcomb: added recovery_check_mode and fixed the old
subsystem_inhibit to be subsystem_disposition. NOTE: work
needs to be done on error reporting for duplicate settings,
such as specifying "^recover, recovery_check_mode".
11/04/83 by M. Pandolf: to check to see if default before journal ends in ".bj"
05/29/84 by Lindsey Spratt: Changed to use version 2 dm_system_config.
Removed several elements of the config, to whit; maximum
number_of_before_journals, inhibit subsystem, and daemon error
trace. None of these were in use.
06/12/84 by Lindsey Spratt: Added code for shutdown_delay. Changed to check
for before_journal_size > 2 instead of 0.
*/
/* format: style4,indattr,ifthenstmt,ifthen,^indcomtxt,idind33 */
%page;
/*
The DM configuration file contains information used at data management initialization
time to configure the system to site dependent parameters. These
parameters override the values built in to the CDS named
"dm_system_data_".
The form of the DM configuration file is as follows:
<DM config file> ::= [<spec>]...<end statement>
<spec> ::= <default before journal size> |
<maximum number of processes> |
<maximum number of transactions> |
<default before journal> |
<previous bootload status> |
<current bootload enable> |
<daemon idle timeout> |
<shutdown delay> |
<daemon log proc terms>
<default before journal size> ::=
system_before_journal_size: <decimal integer>;
<maximum number of processes> ::=
max_processes: <decimal integer>;
<maximum number of transactions> ::=
max_transactions: <decimal integer>;
<default before journal> ::=
default_before_journal: <path spec>;
<previous bootload status> ::=
prev_bootload_status: <status option>[,<status option>]...;
<current bootload enable> ::=
current_bootload_enable: force | ^force;
<daemon idle timeout local> ::=
idle_timeout: <decimal integer>;
<shutdown delay> ::=
shutdown_delay: <decimal integer>;
<daemon log proc terms> ::=
log_proc_terms: on | off;
<end statement> ::= end;
<path spec> ::= dir=<path> | entry=<segment name> |
dir=<path>,entry=<segment name> |
entry=<segment name>,dir=<path> | <null string>
<status option> ::= hold | adopt | recover | recovery_check_mode | ^hold | ^adopt | ^recover | ^recovery_check_mode
<path> ::= <absolute pathname> | aim_dir | bootload_dir.
/*++
BEGIN
/ system_before_journal_size : <decimal-integer> ; /
LEX(2) [ if token.Nvalue > 2
then dm_system_config.default_bj_size = token.Nvalue;
else call ERROR(4)]
LEX(2) / BEGIN \
/ system_before_journal_size <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ system_before_journal_size <no-token> /
ERROR(8) / BEGIN \
/ max_processes : <decimal-integer> ; /
LEX(2) [ if token.Nvalue > 0
then dm_system_config.max_n_proc = token.Nvalue;
else call ERROR(4)]
LEX(2) / BEGIN \
/ max_processes <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ max_processes <no-token> /
ERROR(8) / BEGIN \
/ max_transactions : <decimal-integer> ; /
LEX(2) [ if token.Nvalue > 0
then dm_system_config.max_n_txn = token.Nvalue;
else call ERROR(4)]
LEX(2) / BEGIN \
/ max_transactions <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ max_transactions <no-token> /
ERROR(8) / BEGIN \
/ idle_timeout : <decimal-integer> ; /
LEX(2) [ if token.Nvalue > 0
then dm_system_config.idle_timeout = token.Nvalue;
else call ERROR(4)]
LEX(2) / BEGIN \
/ idle_timeout <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ idle_timeout <no-token> /
ERROR(8) / BEGIN \
/ shutdown_delay : <date_time_offset> ; /
LEX(2) [ dm_system_config.shutdown_delay = convert_date_time_offset();
]
LEX(2) / BEGIN \
/ shutdown_delay <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ shutdown_delay <no-token> /
ERROR(8) / BEGIN \
/ default_before_journal : /
LEX(2) / path_spec \
/ prev_bootload_status : /
[save_prev_dm_disp = dm_system_config.prev_dm_disp]
LEX(2) / status_spec \
/ current_bootload_enable : force ; /
[dm_system_config.curr_dm_enable = DM_FORCE_ENABLE_NEW_BOOTLOAD] LEX(4) / BEGIN \
/ current_bootload_enable : ^force ; /
[dm_system_config.curr_dm_enable = DM_DO_NOT_FORCE_ENABLE_NEW_BOOTLOAD] LEX(5) / BEGIN \
/ current_bootload_enable <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ current_bootload_enable <no-token> /
ERROR(8) / BEGIN \
/ log_proc_terms : on ; /
[dm_system_config.log_proc_terms = "1"b] LEX(4) / BEGIN \
/ log_proc_terms : off ; /
[dm_system_config.log_proc_terms = "0"b] LEX(4) / BEGIN \
/ log_proc_terms <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ log_proc_terms <no-token> /
ERROR(8) / BEGIN \
/ end ; <no-token> /
return_table / RETURN \
/ end ; <any-token> /
ERROR(2) return_table / RETURN \
/ <any-token> /
ERROR(1) NEXT_STMT / BEGIN \
/ <no-token> /
ERROR(3) return_table / RETURN \
path_spec
/ ; /
LEX(1) / BEGIN \
/ dir = <absolute_path> ; /
LEX(2) [dm_system_config.default_bj.dir = token_value]
LEX(2) / BEGIN \
/ dir = <absolute_path> , /
LEX(2) [dm_system_config.default_bj.dir = token_value]
LEX(2) / path_spec \
/ dir = <any-token> /
LEX(2) ERROR(9) NEXT_STMT / BEGIN \
/ entry = <before_journal_name> ; /
LEX(2) [dm_system_config.default_bj.entry = token_value]
LEX(2) / BEGIN \
/ entry = <before_journal_name> , /
LEX(2) [dm_system_config.default_bj.entry = token_value]
LEX(2) / path_spec \
/ entry = <any-token> /
LEX(2) ERROR(10) NEXT_STMT / BEGIN \
/ <any-token> /
ERROR(4) NEXT_STMT / BEGIN \
/ <no-token> /
ERROR(8) / BEGIN \
status_spec
/ hold ; /
[dm_system_config.prev_dm_disp.hold = DM_HOLD_OLD_BOOTLOAD_DIRECTORY]
LEX(2) / BEGIN \
/ hold , /
[dm_system_config.prev_dm_disp.hold = DM_HOLD_OLD_BOOTLOAD_DIRECTORY]
LEX(2) / status_spec \
/ hold <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ ^hold ; /
[dm_system_config.prev_dm_disp.hold = DM_DO_NOT_HOLD_OLD_BOOTLOAD_DIRECTORY]
LEX(3) / BEGIN \
/ ^hold , /
[dm_system_config.prev_dm_disp.hold = DM_DO_NOT_HOLD_OLD_BOOTLOAD_DIRECTORY]
LEX(3) / status_spec \
/ ^hold <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ adopt ; /
[dm_system_config.prev_dm_disp.adopt = DM_ADOPT_OLD_BOOTLOAD]
LEX(2) / BEGIN \
/ adopt , /
[dm_system_config.prev_dm_disp.adopt = DM_ADOPT_OLD_BOOTLOAD]
LEX(2) / status_spec \
/ adopt <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ ^adopt ; /
[dm_system_config.prev_dm_disp.adopt = DM_DO_NOT_ADOPT_OLD_BOOTLOAD]
LEX(3) / BEGIN \
/ ^adopt , /
[dm_system_config.prev_dm_disp.adopt = DM_DO_NOT_ADOPT_OLD_BOOTLOAD]
LEX(3) / status_spec \
/ ^adopt <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ recover ; /
[dm_system_config.prev_dm_disp.recover = DM_RECOVER_OLD_BOOTLOAD]
LEX(2) / BEGIN \
/ recover , /
[dm_system_config.prev_dm_disp.recover = DM_RECOVER_OLD_BOOTLOAD]
LEX(2) / status_spec \
/ recover <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ ^recover ; /
[dm_system_config.prev_dm_disp.recover = DM_DO_NOT_RECOVER_OLD_BOOTLOAD]
LEX(3) / BEGIN \
/ ^recover , /
[dm_system_config.prev_dm_disp.recover = DM_DO_NOT_RECOVER_OLD_BOOTLOAD]
LEX(3) / status_spec \
/ ^recover <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ recovery_check_mode ; /
[dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_ON]
LEX(2) / BEGIN \
/ recovery_check_mode , /
[dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_ON]
LEX(2) / status_spec \
/ recovery_check_mode <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ ^recovery_check_mode ; /
[dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_OFF]
LEX(3) / BEGIN \
/ ^recovery_check_mode , /
[dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_OFF]
LEX(3) / status_spec \
/ ^recovery_check_mode <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ <any-token> /
ERROR(4) NEXT_STMT / BEGIN \
/ <no-token> /
ERROR(8) / BEGIN \
++*/
/* format: style2,ind3 */
dm_translate_system_config_:
procedure (p_system_config_file_ptr, p_system_config_file_len, p_long_sw, p_area_ptr, p_system_config_ptr, p_code);
%page;
/* DECLARATIONS */
/* Parameter */
dcl p_system_config_file_ptr
pointer parameter; /* INPUT - pointer to ascii data */
dcl p_system_config_file_len
fixed bin (21) parameter;
/* INPUT - length of ascii data in bytes */
dcl p_long_sw bit (1) aligned parameter;
/* INPUT - true if errors to be printed out */
dcl p_area_ptr pointer parameter; /* INPUT - pointer to area where site data to be allocated */
dcl p_system_config_ptr pointer parameter; /* OUTPUT - pointer to structure */
dcl p_code fixed bin (35) parameter;
/* OUTPUT - system status code */
/* Automatic */
dcl code fixed bin (35);
dcl breaks char (128) varying aligned;
dcl ignored_breaks char (128) varying aligned;
dcl lex_delims char (128) varying aligned;
dcl lex_control_chars char (128) varying aligned;
dcl byte_count fixed bin (21);
dcl source_ptr pointer;
dcl temp_storage_ptr pointer;
dcl Pfirst_stmt_descriptor pointer;
dcl Pfirst_token_descriptor
pointer;
dcl abs_path char (168);
dcl 1 my_dm_system_config aligned like dm_system_config;
dcl 1 save_prev_dm_disp aligned like dm_system_config_info.prev_dm_disp;
/* Static, Internal */
dcl 1 error_control_table dimension (10) internal static,
2 severity fixed bin (17) unaligned init (2, 1, 2, 2, 4, 2, 2, 3, 2, 2),
2 Soutput_stmt bit (1) unaligned init ((10) (1)"1"b),
2 message char (96) varying
init ("An unknown statement has been encountered: statement ignored.",
"Text follows the end statement and is being ignored.",
"The end statement is missing: one has been supplied.",
"""^a"" is not a valid keyword option: statement ignored.",
"The end statement has been prematurely encountered.",
"An option delimiter is missing: statement ignored.", "The option list is invalid.",
"The statement has prematurely ended.",
"""^a"" is not a valid directory name: statement ignored.",
"""^a"" is not a valid before journal name: statement ignored."),
2 brief_message char (48) varying
init ("Unknown statement.", "Text follows the end statement.",
"Missing end statement.", "Invalid option.", "Premature end statement.",
"Missing option delimiter.", "Invalid option list.", "Premature statement end.",
"Invalid directory name.", "Invalid before journal name.");
/* Static, External */
dcl error_table_$no_stmt_delim
fixed bin (35) ext static;
dcl error_table_$translation_failed
fixed bin (35) ext static;
dcl error_table_$improper_data_format
fixed bin (35) ext static;
dcl error_table_$notalloc fixed bin (35) ext static;
/* Based */
dcl caller_area area based (p_area_ptr);
/* Constant */
dcl ME char (32) aligned internal static options (constant)
init ("dm_translate_system_config_");
/* Entry */
dcl absolute_pathname_ entry (char (*), char (*), fixed bin (35));
dcl convert_date_to_binary_
entry (char (*), fixed bin (71), fixed bin (35));
dcl dm_gen_checksum_ entry (ptr, fixed bin (18)) returns (fixed bin (35));
dcl get_temp_segment_ entry (char (*) aligned, ptr, fixed bin (35));
dcl lex_string_$init_lex_delims
entry (char (*), char (*), char (*), char (*), char (*), bit (*), char (*) var,
char (*) var, char (*) var, char (*) var);
dcl lex_string_$lex entry (ptr, fixed bin (21), fixed bin (21), ptr, bit (*), char (*), char (*),
char (*), char (*), char (*), char (*) var, char (*) var, char (*) var, char (*) var,
ptr, ptr, fixed bin (35));
dcl sub_err_ entry () options (variable);
dcl translator_temp_$get_segment
entry (char (*) aligned, ptr, fixed bin (35));
dcl translator_temp_$release_all_segments
entry (ptr, fixed bin (35));
dcl release_temp_segment_ entry (char (*) aligned, ptr, fixed bin (35));
/* Builtin */
dcl null builtin;
dcl substr builtin;
dcl collate builtin;
dcl addr builtin;
dcl size builtin;
dcl rtrim builtin;
/* Condition */
dcl cleanup condition;
dcl area condition;
/* END DECLARATIONS */
%page;
/* BEGIN CODE */
/* initialize some values */
source_ptr = p_system_config_file_ptr;
byte_count = p_system_config_file_len;
temp_storage_ptr = null ();
p_code = error_table_$translation_failed;
error_control_table.Soutput_stmt (*) = p_long_sw;
/* set up cleanup handler */
on cleanup
begin;
call translator_temp_$release_all_segments (temp_storage_ptr, code);
call release_temp_segment_ (ME, dm_system_config_ptr, code);
goto dm_translate_system_config_exit;
end;
/* get temp space for translation as required by rd */
call translator_temp_$get_segment (ME, temp_storage_ptr, code);
if code ^= 0
then call dm_translate_system_config_abort ();
/* set up the lex function */
breaks = substr (collate (), 1, 33) || ",:=^" || substr (collate (), 128, 1);
ignored_breaks = substr (collate (), 1, 8) || substr (collate (), 10, 24) || substr (collate (), 128, 1);
call
lex_string_$init_lex_delims ("""", """", "/*", "*/", ";", "10"b, breaks, ignored_breaks, lex_delims,
lex_control_chars);
/* initiate the defaults in the config structure */
dm_system_config_ptr = addr (my_dm_system_config);
dm_system_config.version = DM_CONFIG_VERSION_2;
dm_system_config.idle_timeout = 0;
dm_system_config.shutdown_delay = 0;
dm_system_config.log_proc_terms = "1"b;
dm_system_config.lock = ""b;
dm_system_config.max_n_txn = 0;
dm_system_config.max_n_proc = 0;
dm_system_config.default_bj_size = 0;
dm_system_config.default_bj.dir = "";
dm_system_config.default_bj.entry = "";
dm_system_config.prev_dm_disp.adopt = DM_ADOPT_OLD_BOOTLOAD;
dm_system_config.prev_dm_disp.hold = DM_DO_NOT_HOLD_OLD_BOOTLOAD_DIRECTORY;
dm_system_config.prev_dm_disp.recover = DM_RECOVER_OLD_BOOTLOAD;
dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_OFF;
dm_system_config.curr_dm_enable = DM_DO_NOT_FORCE_ENABLE_NEW_BOOTLOAD;
/* call subroutine to lex the dsdt file */
call
lex_string_$lex (source_ptr, byte_count, 0, temp_storage_ptr, "1000"b, """", """", "/*", "*/", ";", breaks,
ignored_breaks, lex_delims, lex_control_chars, Pfirst_stmt_descriptor, Pfirst_token_descriptor, code);
if ^(code = 0 | code = error_table_$no_stmt_delim)
then
do;
call dm_translate_system_config_abort ();
end;
Pthis_token = Pfirst_token_descriptor;
call SEMANTIC_ANALYSIS ();
call translator_temp_$release_all_segments (temp_storage_ptr, code);
call release_temp_segment_ (ME, dm_system_config_ptr, code);
dm_translate_system_config_exit:
return;
/* BEGIN RELATIVE FUNCTIONS */
absolute_path:
procedure () returns (bit (1) aligned);
/*
this function returns true if the current token is either an absolute
or one of the reserved pathname keywords "aim_dir" or "bootload_dir"
*/
if token_value = "aim_dir" | token_value = "bootload_dir"
then return ("1"b);
if substr (token_value, 1, 1) ^= ">"
then return ("0"b);
call absolute_pathname_ (token_value, abs_path, code);
if code ^= 0
then return ("0"b);
else return ("1"b);
end absolute_path;
date_time_offset:
procedure () returns (bit (1) aligned);
dcl dto_convert bit (1) aligned init ("0"b);
dcl dto_code fixed bin (35) init (0);
dcl dto_constant_time_str char (32) varying init ("January 1, 1980 9am");
dcl dto_new_time_str char (64) varying init ("");
dcl dto_constant_time fixed bin (71) init (0);
dcl dto_new_time fixed bin (71) init (0);
goto DTO_JOIN;
convert_date_time_offset:
entry () returns (fixed bin (71));
dto_convert = "1"b;
DTO_JOIN:
dto_new_time_str = dto_constant_time_str || " + " || token_value;
call convert_date_to_binary_ ((dto_new_time_str), dto_new_time, dto_code);
if ^dto_convert
then if dto_code ^= 0
then return ("0"b);
else return ("1"b);
else if dto_code ^= 0
then
do;
call
sub_err_ (dto_code, ME, ACTION_DEFAULT_RESTART, null, 0, "^/Unable to convert ^a to its binary form.",
token_value);
return (0);
end;
call convert_date_to_binary_ ((dto_constant_time_str), dto_constant_time, dto_code);
if dto_code ^= 0
then
do;
call
sub_err_ (dto_code, ME, ACTION_DEFAULT_RESTART, null, 0, "^/Unable to convert ^a to its binary form.",
dto_new_time_str);
return (0);
end;
return (dto_new_time - dto_constant_time);
end date_time_offset;
before_journal_name:
procedure () returns (bit (1) aligned);
/*
this function returns true if the token is comprised of letters, digits,
the underscore, and the period. additionally, the token must end in
".bj".
*/
if (token.Lvalue < 1) | (token.Lvalue > 32)
then return ("0"b);
if verify (token_value, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_.") ^= 0
then return ("0"b);
if substr (reverse (rtrim (token_value)), 1, 3) ^= "jb."
then return ("0"b);
return ("1"b);
end before_journal_name;
/* BEGIN ACTION PROCEDURES */
return_table:
procedure ();
if p_area_ptr ^= null ()
then
do;
dm_system_config.checksum = dm_gen_checksum_ (dm_system_config_ptr, size (dm_system_config_info) - 1);
on area
begin;
code = error_table_$notalloc;
call dm_translate_system_config_abort ();
end;
allocate dm_system_config in (caller_area) set (p_system_config_ptr);
revert area;
p_system_config_ptr -> dm_system_config = dm_system_config_ptr -> dm_system_config;
end;
if MERROR_SEVERITY < 3
then p_code = 0;
return;
end return_table;
dm_translate_system_config_abort:
procedure ();
p_code = code;
call translator_temp_$release_all_segments (temp_storage_ptr, code);
goto dm_translate_system_config_exit;
end dm_translate_system_config_abort;
%page;
%include dm_system_config;
%page;
%include sub_err_flags;
|
/library_dir_dir/system_library_standard/source/bound_dm_.3.s.archive/dm_translate_system_config_.rd
|
no_license
|
dancrossnyc/multics
|
R
| false | false | 20,833 |
rd
|
/* ***********************************************************
* *
* Copyright, (C) Honeywell Information Systems Inc., 1983 *
* *
*********************************************************** */
/* DESCRIPTION:
dm_translate_system_config_ - program to extract DMS configuration
data from an ascii segment and return a pointer to a structure which
summarizes the configuration parameters.
*/
/* HISTORY:
Written by M. Pandolf, December 1982.
Modified:
03/15/83 by M. Pandolf: for default before journal keywords and for
logging process terminations.
05/05/83 by L. A. Newcomb: added recovery_check_mode and fixed the old
subsystem_inhibit to be subsystem_disposition. NOTE: work
needs to be done on error reporting for duplicate settings,
such as specifying "^recover, recovery_check_mode".
11/04/83 by M. Pandolf: to check to see if default before journal ends in ".bj"
05/29/84 by Lindsey Spratt: Changed to use version 2 dm_system_config.
Removed several elements of the config, to whit; maximum
number_of_before_journals, inhibit subsystem, and daemon error
trace. None of these were in use.
06/12/84 by Lindsey Spratt: Added code for shutdown_delay. Changed to check
for before_journal_size > 2 instead of 0.
*/
/* format: style4,indattr,ifthenstmt,ifthen,^indcomtxt,idind33 */
%page;
/*
The DM configuration file contains information used at data management initialization
time to configure the system to site dependent parameters. These
parameters override the values built in to the CDS named
"dm_system_data_".
The form of the DM configuration file is as follows:
<DM config file> ::= [<spec>]...<end statement>
<spec> ::= <default before journal size> |
<maximum number of processes> |
<maximum number of transactions> |
<default before journal> |
<previous bootload status> |
<current bootload enable> |
<daemon idle timeout> |
<shutdown delay> |
<daemon log proc terms>
<default before journal size> ::=
system_before_journal_size: <decimal integer>;
<maximum number of processes> ::=
max_processes: <decimal integer>;
<maximum number of transactions> ::=
max_transactions: <decimal integer>;
<default before journal> ::=
default_before_journal: <path spec>;
<previous bootload status> ::=
prev_bootload_status: <status option>[,<status option>]...;
<current bootload enable> ::=
current_bootload_enable: force | ^force;
<daemon idle timeout local> ::=
idle_timeout: <decimal integer>;
<shutdown delay> ::=
shutdown_delay: <decimal integer>;
<daemon log proc terms> ::=
log_proc_terms: on | off;
<end statement> ::= end;
<path spec> ::= dir=<path> | entry=<segment name> |
dir=<path>,entry=<segment name> |
entry=<segment name>,dir=<path> | <null string>
<status option> ::= hold | adopt | recover | recovery_check_mode | ^hold | ^adopt | ^recover | ^recovery_check_mode
<path> ::= <absolute pathname> | aim_dir | bootload_dir.
/*++
BEGIN
/ system_before_journal_size : <decimal-integer> ; /
LEX(2) [ if token.Nvalue > 2
then dm_system_config.default_bj_size = token.Nvalue;
else call ERROR(4)]
LEX(2) / BEGIN \
/ system_before_journal_size <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ system_before_journal_size <no-token> /
ERROR(8) / BEGIN \
/ max_processes : <decimal-integer> ; /
LEX(2) [ if token.Nvalue > 0
then dm_system_config.max_n_proc = token.Nvalue;
else call ERROR(4)]
LEX(2) / BEGIN \
/ max_processes <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ max_processes <no-token> /
ERROR(8) / BEGIN \
/ max_transactions : <decimal-integer> ; /
LEX(2) [ if token.Nvalue > 0
then dm_system_config.max_n_txn = token.Nvalue;
else call ERROR(4)]
LEX(2) / BEGIN \
/ max_transactions <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ max_transactions <no-token> /
ERROR(8) / BEGIN \
/ idle_timeout : <decimal-integer> ; /
LEX(2) [ if token.Nvalue > 0
then dm_system_config.idle_timeout = token.Nvalue;
else call ERROR(4)]
LEX(2) / BEGIN \
/ idle_timeout <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ idle_timeout <no-token> /
ERROR(8) / BEGIN \
/ shutdown_delay : <date_time_offset> ; /
LEX(2) [ dm_system_config.shutdown_delay = convert_date_time_offset();
]
LEX(2) / BEGIN \
/ shutdown_delay <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ shutdown_delay <no-token> /
ERROR(8) / BEGIN \
/ default_before_journal : /
LEX(2) / path_spec \
/ prev_bootload_status : /
[save_prev_dm_disp = dm_system_config.prev_dm_disp]
LEX(2) / status_spec \
/ current_bootload_enable : force ; /
[dm_system_config.curr_dm_enable = DM_FORCE_ENABLE_NEW_BOOTLOAD] LEX(4) / BEGIN \
/ current_bootload_enable : ^force ; /
[dm_system_config.curr_dm_enable = DM_DO_NOT_FORCE_ENABLE_NEW_BOOTLOAD] LEX(5) / BEGIN \
/ current_bootload_enable <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ current_bootload_enable <no-token> /
ERROR(8) / BEGIN \
/ log_proc_terms : on ; /
[dm_system_config.log_proc_terms = "1"b] LEX(4) / BEGIN \
/ log_proc_terms : off ; /
[dm_system_config.log_proc_terms = "0"b] LEX(4) / BEGIN \
/ log_proc_terms <any-token> /
LEX(2) ERROR(4) NEXT_STMT / BEGIN \
/ log_proc_terms <no-token> /
ERROR(8) / BEGIN \
/ end ; <no-token> /
return_table / RETURN \
/ end ; <any-token> /
ERROR(2) return_table / RETURN \
/ <any-token> /
ERROR(1) NEXT_STMT / BEGIN \
/ <no-token> /
ERROR(3) return_table / RETURN \
path_spec
/ ; /
LEX(1) / BEGIN \
/ dir = <absolute_path> ; /
LEX(2) [dm_system_config.default_bj.dir = token_value]
LEX(2) / BEGIN \
/ dir = <absolute_path> , /
LEX(2) [dm_system_config.default_bj.dir = token_value]
LEX(2) / path_spec \
/ dir = <any-token> /
LEX(2) ERROR(9) NEXT_STMT / BEGIN \
/ entry = <before_journal_name> ; /
LEX(2) [dm_system_config.default_bj.entry = token_value]
LEX(2) / BEGIN \
/ entry = <before_journal_name> , /
LEX(2) [dm_system_config.default_bj.entry = token_value]
LEX(2) / path_spec \
/ entry = <any-token> /
LEX(2) ERROR(10) NEXT_STMT / BEGIN \
/ <any-token> /
ERROR(4) NEXT_STMT / BEGIN \
/ <no-token> /
ERROR(8) / BEGIN \
status_spec
/ hold ; /
[dm_system_config.prev_dm_disp.hold = DM_HOLD_OLD_BOOTLOAD_DIRECTORY]
LEX(2) / BEGIN \
/ hold , /
[dm_system_config.prev_dm_disp.hold = DM_HOLD_OLD_BOOTLOAD_DIRECTORY]
LEX(2) / status_spec \
/ hold <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ ^hold ; /
[dm_system_config.prev_dm_disp.hold = DM_DO_NOT_HOLD_OLD_BOOTLOAD_DIRECTORY]
LEX(3) / BEGIN \
/ ^hold , /
[dm_system_config.prev_dm_disp.hold = DM_DO_NOT_HOLD_OLD_BOOTLOAD_DIRECTORY]
LEX(3) / status_spec \
/ ^hold <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ adopt ; /
[dm_system_config.prev_dm_disp.adopt = DM_ADOPT_OLD_BOOTLOAD]
LEX(2) / BEGIN \
/ adopt , /
[dm_system_config.prev_dm_disp.adopt = DM_ADOPT_OLD_BOOTLOAD]
LEX(2) / status_spec \
/ adopt <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ ^adopt ; /
[dm_system_config.prev_dm_disp.adopt = DM_DO_NOT_ADOPT_OLD_BOOTLOAD]
LEX(3) / BEGIN \
/ ^adopt , /
[dm_system_config.prev_dm_disp.adopt = DM_DO_NOT_ADOPT_OLD_BOOTLOAD]
LEX(3) / status_spec \
/ ^adopt <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ recover ; /
[dm_system_config.prev_dm_disp.recover = DM_RECOVER_OLD_BOOTLOAD]
LEX(2) / BEGIN \
/ recover , /
[dm_system_config.prev_dm_disp.recover = DM_RECOVER_OLD_BOOTLOAD]
LEX(2) / status_spec \
/ recover <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ ^recover ; /
[dm_system_config.prev_dm_disp.recover = DM_DO_NOT_RECOVER_OLD_BOOTLOAD]
LEX(3) / BEGIN \
/ ^recover , /
[dm_system_config.prev_dm_disp.recover = DM_DO_NOT_RECOVER_OLD_BOOTLOAD]
LEX(3) / status_spec \
/ ^recover <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ recovery_check_mode ; /
[dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_ON]
LEX(2) / BEGIN \
/ recovery_check_mode , /
[dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_ON]
LEX(2) / status_spec \
/ recovery_check_mode <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ ^recovery_check_mode ; /
[dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_OFF]
LEX(3) / BEGIN \
/ ^recovery_check_mode , /
[dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_OFF]
LEX(3) / status_spec \
/ ^recovery_check_mode <any-token> /
[dm_system_config.prev_dm_disp = save_prev_dm_disp]
LEX(1) ERROR(6) NEXT_STMT / BEGIN \
/ <any-token> /
ERROR(4) NEXT_STMT / BEGIN \
/ <no-token> /
ERROR(8) / BEGIN \
++*/
/* format: style2,ind3 */
dm_translate_system_config_:
procedure (p_system_config_file_ptr, p_system_config_file_len, p_long_sw, p_area_ptr, p_system_config_ptr, p_code);
%page;
/* DECLARATIONS */
/* Parameter */
dcl p_system_config_file_ptr
pointer parameter; /* INPUT - pointer to ascii data */
dcl p_system_config_file_len
fixed bin (21) parameter;
/* INPUT - length of ascii data in bytes */
dcl p_long_sw bit (1) aligned parameter;
/* INPUT - true if errors to be printed out */
dcl p_area_ptr pointer parameter; /* INPUT - pointer to area where site data to be allocated */
dcl p_system_config_ptr pointer parameter; /* OUTPUT - pointer to structure */
dcl p_code fixed bin (35) parameter;
/* OUTPUT - system status code */
/* Automatic */
dcl code fixed bin (35);
dcl breaks char (128) varying aligned;
dcl ignored_breaks char (128) varying aligned;
dcl lex_delims char (128) varying aligned;
dcl lex_control_chars char (128) varying aligned;
dcl byte_count fixed bin (21);
dcl source_ptr pointer;
dcl temp_storage_ptr pointer;
dcl Pfirst_stmt_descriptor pointer;
dcl Pfirst_token_descriptor
pointer;
dcl abs_path char (168);
dcl 1 my_dm_system_config aligned like dm_system_config;
dcl 1 save_prev_dm_disp aligned like dm_system_config_info.prev_dm_disp;
/* Static, Internal */
dcl 1 error_control_table dimension (10) internal static,
2 severity fixed bin (17) unaligned init (2, 1, 2, 2, 4, 2, 2, 3, 2, 2),
2 Soutput_stmt bit (1) unaligned init ((10) (1)"1"b),
2 message char (96) varying
init ("An unknown statement has been encountered: statement ignored.",
"Text follows the end statement and is being ignored.",
"The end statement is missing: one has been supplied.",
"""^a"" is not a valid keyword option: statement ignored.",
"The end statement has been prematurely encountered.",
"An option delimiter is missing: statement ignored.", "The option list is invalid.",
"The statement has prematurely ended.",
"""^a"" is not a valid directory name: statement ignored.",
"""^a"" is not a valid before journal name: statement ignored."),
2 brief_message char (48) varying
init ("Unknown statement.", "Text follows the end statement.",
"Missing end statement.", "Invalid option.", "Premature end statement.",
"Missing option delimiter.", "Invalid option list.", "Premature statement end.",
"Invalid directory name.", "Invalid before journal name.");
/* Static, External */
dcl error_table_$no_stmt_delim
fixed bin (35) ext static;
dcl error_table_$translation_failed
fixed bin (35) ext static;
dcl error_table_$improper_data_format
fixed bin (35) ext static;
dcl error_table_$notalloc fixed bin (35) ext static;
/* Based */
dcl caller_area area based (p_area_ptr);
/* Constant */
dcl ME char (32) aligned internal static options (constant)
init ("dm_translate_system_config_");
/* Entry */
dcl absolute_pathname_ entry (char (*), char (*), fixed bin (35));
dcl convert_date_to_binary_
entry (char (*), fixed bin (71), fixed bin (35));
dcl dm_gen_checksum_ entry (ptr, fixed bin (18)) returns (fixed bin (35));
dcl get_temp_segment_ entry (char (*) aligned, ptr, fixed bin (35));
dcl lex_string_$init_lex_delims
entry (char (*), char (*), char (*), char (*), char (*), bit (*), char (*) var,
char (*) var, char (*) var, char (*) var);
dcl lex_string_$lex entry (ptr, fixed bin (21), fixed bin (21), ptr, bit (*), char (*), char (*),
char (*), char (*), char (*), char (*) var, char (*) var, char (*) var, char (*) var,
ptr, ptr, fixed bin (35));
dcl sub_err_ entry () options (variable);
dcl translator_temp_$get_segment
entry (char (*) aligned, ptr, fixed bin (35));
dcl translator_temp_$release_all_segments
entry (ptr, fixed bin (35));
dcl release_temp_segment_ entry (char (*) aligned, ptr, fixed bin (35));
/* Builtin */
dcl null builtin;
dcl substr builtin;
dcl collate builtin;
dcl addr builtin;
dcl size builtin;
dcl rtrim builtin;
/* Condition */
dcl cleanup condition;
dcl area condition;
/* END DECLARATIONS */
%page;
/* BEGIN CODE */
/* initialize some values */
source_ptr = p_system_config_file_ptr;
byte_count = p_system_config_file_len;
temp_storage_ptr = null ();
p_code = error_table_$translation_failed;
error_control_table.Soutput_stmt (*) = p_long_sw;
/* set up cleanup handler */
on cleanup
begin;
call translator_temp_$release_all_segments (temp_storage_ptr, code);
call release_temp_segment_ (ME, dm_system_config_ptr, code);
goto dm_translate_system_config_exit;
end;
/* get temp space for translation as required by rd */
call translator_temp_$get_segment (ME, temp_storage_ptr, code);
if code ^= 0
then call dm_translate_system_config_abort ();
/* set up the lex function */
breaks = substr (collate (), 1, 33) || ",:=^" || substr (collate (), 128, 1);
ignored_breaks = substr (collate (), 1, 8) || substr (collate (), 10, 24) || substr (collate (), 128, 1);
call
lex_string_$init_lex_delims ("""", """", "/*", "*/", ";", "10"b, breaks, ignored_breaks, lex_delims,
lex_control_chars);
/* initiate the defaults in the config structure */
dm_system_config_ptr = addr (my_dm_system_config);
dm_system_config.version = DM_CONFIG_VERSION_2;
dm_system_config.idle_timeout = 0;
dm_system_config.shutdown_delay = 0;
dm_system_config.log_proc_terms = "1"b;
dm_system_config.lock = ""b;
dm_system_config.max_n_txn = 0;
dm_system_config.max_n_proc = 0;
dm_system_config.default_bj_size = 0;
dm_system_config.default_bj.dir = "";
dm_system_config.default_bj.entry = "";
dm_system_config.prev_dm_disp.adopt = DM_ADOPT_OLD_BOOTLOAD;
dm_system_config.prev_dm_disp.hold = DM_DO_NOT_HOLD_OLD_BOOTLOAD_DIRECTORY;
dm_system_config.prev_dm_disp.recover = DM_RECOVER_OLD_BOOTLOAD;
dm_system_config.prev_dm_disp.recovery_check_mode = DM_RECOVERY_CHECK_MODE_OFF;
dm_system_config.curr_dm_enable = DM_DO_NOT_FORCE_ENABLE_NEW_BOOTLOAD;
/* call subroutine to lex the dsdt file */
call
lex_string_$lex (source_ptr, byte_count, 0, temp_storage_ptr, "1000"b, """", """", "/*", "*/", ";", breaks,
ignored_breaks, lex_delims, lex_control_chars, Pfirst_stmt_descriptor, Pfirst_token_descriptor, code);
if ^(code = 0 | code = error_table_$no_stmt_delim)
then
do;
call dm_translate_system_config_abort ();
end;
Pthis_token = Pfirst_token_descriptor;
call SEMANTIC_ANALYSIS ();
call translator_temp_$release_all_segments (temp_storage_ptr, code);
call release_temp_segment_ (ME, dm_system_config_ptr, code);
dm_translate_system_config_exit:
return;
/* BEGIN RELATIVE FUNCTIONS */
absolute_path:
procedure () returns (bit (1) aligned);
/*
this function returns true if the current token is either an absolute
or one of the reserved pathname keywords "aim_dir" or "bootload_dir"
*/
if token_value = "aim_dir" | token_value = "bootload_dir"
then return ("1"b);
if substr (token_value, 1, 1) ^= ">"
then return ("0"b);
call absolute_pathname_ (token_value, abs_path, code);
if code ^= 0
then return ("0"b);
else return ("1"b);
end absolute_path;
date_time_offset:
procedure () returns (bit (1) aligned);
dcl dto_convert bit (1) aligned init ("0"b);
dcl dto_code fixed bin (35) init (0);
dcl dto_constant_time_str char (32) varying init ("January 1, 1980 9am");
dcl dto_new_time_str char (64) varying init ("");
dcl dto_constant_time fixed bin (71) init (0);
dcl dto_new_time fixed bin (71) init (0);
goto DTO_JOIN;
convert_date_time_offset:
entry () returns (fixed bin (71));
dto_convert = "1"b;
DTO_JOIN:
dto_new_time_str = dto_constant_time_str || " + " || token_value;
call convert_date_to_binary_ ((dto_new_time_str), dto_new_time, dto_code);
if ^dto_convert
then if dto_code ^= 0
then return ("0"b);
else return ("1"b);
else if dto_code ^= 0
then
do;
call
sub_err_ (dto_code, ME, ACTION_DEFAULT_RESTART, null, 0, "^/Unable to convert ^a to its binary form.",
token_value);
return (0);
end;
call convert_date_to_binary_ ((dto_constant_time_str), dto_constant_time, dto_code);
if dto_code ^= 0
then
do;
call
sub_err_ (dto_code, ME, ACTION_DEFAULT_RESTART, null, 0, "^/Unable to convert ^a to its binary form.",
dto_new_time_str);
return (0);
end;
return (dto_new_time - dto_constant_time);
end date_time_offset;
before_journal_name:
procedure () returns (bit (1) aligned);
/*
this function returns true if the token is comprised of letters, digits,
the underscore, and the period. additionally, the token must end in
".bj".
*/
if (token.Lvalue < 1) | (token.Lvalue > 32)
then return ("0"b);
if verify (token_value, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_.") ^= 0
then return ("0"b);
if substr (reverse (rtrim (token_value)), 1, 3) ^= "jb."
then return ("0"b);
return ("1"b);
end before_journal_name;
/* BEGIN ACTION PROCEDURES */
return_table:
procedure ();
if p_area_ptr ^= null ()
then
do;
dm_system_config.checksum = dm_gen_checksum_ (dm_system_config_ptr, size (dm_system_config_info) - 1);
on area
begin;
code = error_table_$notalloc;
call dm_translate_system_config_abort ();
end;
allocate dm_system_config in (caller_area) set (p_system_config_ptr);
revert area;
p_system_config_ptr -> dm_system_config = dm_system_config_ptr -> dm_system_config;
end;
if MERROR_SEVERITY < 3
then p_code = 0;
return;
end return_table;
dm_translate_system_config_abort:
procedure ();
p_code = code;
call translator_temp_$release_all_segments (temp_storage_ptr, code);
goto dm_translate_system_config_exit;
end dm_translate_system_config_abort;
%page;
%include dm_system_config;
%page;
%include sub_err_flags;
|
library(diagram)
### Name: textplain
### Title: adds lines of text to a plot
### Aliases: textplain
### Keywords: aplot
### ** Examples
openplotmat(main = "textplain")
textplain(mid = c(0.5, 0.5),
lab = c("this text is", "centered", "4 strings", "on 4 lines"))
textplain(mid = c(0.5, 0.2), adj = c(0, 0.5), font = 2, height = 0.05,
lab = c("this text is","left alligned"))
textplain(mid = c(0.5, 0.8), adj = c(1, 0.5), font = 3, height = 0.05,
lab = c("this text is","right alligned"))
|
/data/genthat_extracted_code/diagram/examples/textplain.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 539 |
r
|
library(diagram)
### Name: textplain
### Title: adds lines of text to a plot
### Aliases: textplain
### Keywords: aplot
### ** Examples
openplotmat(main = "textplain")
textplain(mid = c(0.5, 0.5),
lab = c("this text is", "centered", "4 strings", "on 4 lines"))
textplain(mid = c(0.5, 0.2), adj = c(0, 0.5), font = 2, height = 0.05,
lab = c("this text is","left alligned"))
textplain(mid = c(0.5, 0.8), adj = c(1, 0.5), font = 3, height = 0.05,
lab = c("this text is","right alligned"))
|
/final/code.R
|
no_license
|
Magellen/-gene-project
|
R
| false | false | 15,827 |
r
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.