content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
#------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # #------------------------------------------------------------- # JUnit test class: dml.test.integration.descriptivestats.UnivariateStatsTest.java # command line invocation assuming $C_HOME is set to the home of the R script # Rscript $C_HOME/Categorical.R $C_HOME/in/ $C_HOME/expected/ args <- commandArgs(TRUE) options(digits=22) library("Matrix") V = readMM(paste(args[1], "vector.mtx", sep="")) tab = table(V[,1]) cat = t(as.numeric(names(tab))) Nc = t(as.vector(tab)) # the number of categories of a categorical variable R = length(Nc) # total count s = sum(Nc) # percentage values of each categorical compare to the total case number Pc = Nc / s # all categorical values of a categorical variable C = (Nc > 0) # mode mx = max(Nc) Mode = (Nc == mx) writeMM(as(t(Nc),"CsparseMatrix"), paste(args[2], "Nc", sep=""), format="text"); write(R, paste(args[2], "R", sep="")); writeMM(as(t(Pc),"CsparseMatrix"), paste(args[2], "Pc", sep=""), format="text"); writeMM(as(t(C),"CsparseMatrix"), paste(args[2], "C", sep=""), format="text"); writeMM(as(t(Mode),"CsparseMatrix"), paste(args[2], "Mode", sep=""), format="text");
/src/test/scripts/applications/descriptivestats/Categorical.R
permissive
apache/systemds
R
false
false
1,982
r
#------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # #------------------------------------------------------------- # JUnit test class: dml.test.integration.descriptivestats.UnivariateStatsTest.java # command line invocation assuming $C_HOME is set to the home of the R script # Rscript $C_HOME/Categorical.R $C_HOME/in/ $C_HOME/expected/ args <- commandArgs(TRUE) options(digits=22) library("Matrix") V = readMM(paste(args[1], "vector.mtx", sep="")) tab = table(V[,1]) cat = t(as.numeric(names(tab))) Nc = t(as.vector(tab)) # the number of categories of a categorical variable R = length(Nc) # total count s = sum(Nc) # percentage values of each categorical compare to the total case number Pc = Nc / s # all categorical values of a categorical variable C = (Nc > 0) # mode mx = max(Nc) Mode = (Nc == mx) writeMM(as(t(Nc),"CsparseMatrix"), paste(args[2], "Nc", sep=""), format="text"); write(R, paste(args[2], "R", sep="")); writeMM(as(t(Pc),"CsparseMatrix"), paste(args[2], "Pc", sep=""), format="text"); writeMM(as(t(C),"CsparseMatrix"), paste(args[2], "C", sep=""), format="text"); writeMM(as(t(Mode),"CsparseMatrix"), paste(args[2], "Mode", sep=""), format="text");
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{ECT_item_bank} \alias{ECT_item_bank} \title{ECT item bank} \description{ The ECT's item bank }
/man/ECT_item_bank.Rd
permissive
klausfrieler/ECT
R
false
true
201
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{ECT_item_bank} \alias{ECT_item_bank} \title{ECT item bank} \description{ The ECT's item bank }
#skewness = E[(N - EN)^3 / stdvN^3] #r = 3, p = 0.4 skew <- function(nreps,r,p){ Result_V <- vector(length = nreps) SK_V <- vector(length = nreps) Var_V <- vector(length = nreps) #simulate the game, and put results in Result_V for (rep in 1:nreps){ total <- 0 number <- 0 while (total < r){ get = sample(0:1,size=1,prob = c((1-p),p),replace = FALSE) total = total + get number = number + 1 } Result_V[rep] = number } EN <- mean(Result_V) #print(EN) for (rep_1 in 1:nreps){ Var_V[rep_1] = (Result_V[rep_1] - EN)^2 } Var <- mean(Var_V) stdV <- sqrt(Var) #print(Var) stdV<- sqrt(Var) for (rep_2 in 1:nreps){ SK_V[rep_2] = ((Result_V[rep_2] - EN)^3 /(stdV^3)) } result <- mean(SK_V) print(result) } skew(40000,3,0.4)
/hwk3/ProblemB.R
no_license
JCBreath/ECS132
R
false
false
928
r
#skewness = E[(N - EN)^3 / stdvN^3] #r = 3, p = 0.4 skew <- function(nreps,r,p){ Result_V <- vector(length = nreps) SK_V <- vector(length = nreps) Var_V <- vector(length = nreps) #simulate the game, and put results in Result_V for (rep in 1:nreps){ total <- 0 number <- 0 while (total < r){ get = sample(0:1,size=1,prob = c((1-p),p),replace = FALSE) total = total + get number = number + 1 } Result_V[rep] = number } EN <- mean(Result_V) #print(EN) for (rep_1 in 1:nreps){ Var_V[rep_1] = (Result_V[rep_1] - EN)^2 } Var <- mean(Var_V) stdV <- sqrt(Var) #print(Var) stdV<- sqrt(Var) for (rep_2 in 1:nreps){ SK_V[rep_2] = ((Result_V[rep_2] - EN)^3 /(stdV^3)) } result <- mean(SK_V) print(result) } skew(40000,3,0.4)
setwd("/home/joelerll/espol-2017/analisis_datos/proyecto") library(ggmap) boxes<-data.frame(maxlat = 40.6910569858,minlat = 40.680396,maxlong = -73.9176609858,minlong = -73.907, id="1") boxes<-transform(boxes, laby=(maxlat +minlat )/2, labx=(maxlong+minlong )/2) datos <- read.csv('data/enero3.csv') datos = datos[datos$pickups_place == '7,6',] map <- get_map(location = 'Manhattan', zoom = 12) mapPoints <- ggmap(map) + geom_point(data = datos, aes(x = pickup_longitude, y = pickup_latitude), color = "black", size = 0.001) #+ geom_polygon( data=datos, aes(x=pickup_longitude, y=pickup_latitude,group=group),colour="black", fill="white" ) #+ geom_rect(data=boxes, aes(xmin=minlong , xmax=maxlong, ymin=minlat, ymax=maxlat ), color="red", fill="transparent") #geom_point(data = datos, aes(x = dropoff_longitude, y = dropoff_latitude), color = "red", size = 0.001) + mapPoints ,p <- ggplot(mtcars, aes(wt, mpg)) p + geom_point()
/mapas.R
no_license
jorgermurillo/AED2
R
false
false
933
r
setwd("/home/joelerll/espol-2017/analisis_datos/proyecto") library(ggmap) boxes<-data.frame(maxlat = 40.6910569858,minlat = 40.680396,maxlong = -73.9176609858,minlong = -73.907, id="1") boxes<-transform(boxes, laby=(maxlat +minlat )/2, labx=(maxlong+minlong )/2) datos <- read.csv('data/enero3.csv') datos = datos[datos$pickups_place == '7,6',] map <- get_map(location = 'Manhattan', zoom = 12) mapPoints <- ggmap(map) + geom_point(data = datos, aes(x = pickup_longitude, y = pickup_latitude), color = "black", size = 0.001) #+ geom_polygon( data=datos, aes(x=pickup_longitude, y=pickup_latitude,group=group),colour="black", fill="white" ) #+ geom_rect(data=boxes, aes(xmin=minlong , xmax=maxlong, ymin=minlat, ymax=maxlat ), color="red", fill="transparent") #geom_point(data = datos, aes(x = dropoff_longitude, y = dropoff_latitude), color = "red", size = 0.001) + mapPoints ,p <- ggplot(mtcars, aes(wt, mpg)) p + geom_point()
data(DengueSimR02) r.max<-seq(20,1000,20) r.min<-seq(0,980,20) r.mid<-(r.max+r.min)/2 #Lets see if there's a difference in spatial dependence between those that occurred late versus early in the outbreak type<-2-(DengueSimR02[,"time"]<120) tmp<-cbind(DengueSimR02,type=type) typed.tau<-get.tau.typed(tmp,typeA=1,typeB=2,r=r.max,r.low=r.min,comparison.type = "independent") plot(r.mid,typed.tau,log="y",cex.axis=1.25,xlab="Distance (m)",ylab="Tau",cex.main=0.9,lwd=2,type="l") abline(h=1,lty=2)
/example/get_tau_typed.R
no_license
ffinger/IDSpatialStats
R
false
false
498
r
data(DengueSimR02) r.max<-seq(20,1000,20) r.min<-seq(0,980,20) r.mid<-(r.max+r.min)/2 #Lets see if there's a difference in spatial dependence between those that occurred late versus early in the outbreak type<-2-(DengueSimR02[,"time"]<120) tmp<-cbind(DengueSimR02,type=type) typed.tau<-get.tau.typed(tmp,typeA=1,typeB=2,r=r.max,r.low=r.min,comparison.type = "independent") plot(r.mid,typed.tau,log="y",cex.axis=1.25,xlab="Distance (m)",ylab="Tau",cex.main=0.9,lwd=2,type="l") abline(h=1,lty=2)
\name{drayleigh} \alias{drayleigh} \alias{prayleigh} \alias{qrayleigh} \alias{rrayleigh} \alias{erayleigh} \alias{vrayleigh} \title{The Rayleigh distribution. } \description{ Rayleigh density, distribution, quantile function and random number generation. } \usage{ drayleigh(x, scale=1, log=FALSE) prayleigh(q, scale=1) qrayleigh(p, scale=1) rrayleigh(n, scale=1) erayleigh(scale=1) vrayleigh(scale=1) } \arguments{ \item{x, q}{quantile.} \item{p}{probability.} \item{n}{number of observations.} \item{scale}{scale parameter (\eqn{>0}).} \item{log}{logical; if \code{TRUE}, logarithmic density will be returned.} } \details{ The Rayleigh distribution arises as the distribution of the square root of an exponentially distributed (or \eqn{\chi^2_2}-distributed) random variable. If \eqn{X} follows an exponential distribution with rate \eqn{\lambda} and expectation \eqn{1/\lambda}, then \eqn{Y=\sqrt{X}}{Y=sqrt(X)} follows a Rayleigh distribution with scale \eqn{\sigma=1/\sqrt{2\lambda}}{sigma=1/sqrt(2*lambda)} and expectation \eqn{\sqrt{\pi/(4\lambda)}}{sqrt(pi/(4*lambda))}. Note that the exponential distribution is the \emph{maximum entropy distribution} among distributions supported on the positive real numbers and with a pre-specified expectation; so the Rayleigh distribution gives the corresponding distribution of its square root. } \value{ \sQuote{\code{drayleigh()}} gives the density function, \sQuote{\code{prayleigh()}} gives the cumulative distribution function (CDF), \sQuote{\code{qrayleigh()}} gives the quantile function (inverse CDF), and \sQuote{\code{rrayleigh()}} generates random deviates. The \sQuote{\code{erayleigh()}} and \sQuote{\code{vrayleigh()}} functions return the corresponding Rayleigh distribution's expectation and variance, respectively. } \references{ C. Roever, R. Bender, S. Dias, C.H. Schmid, H. Schmidli, S. Sturtz, S. Weber, T. Friede. On weakly informative prior distributions for the heterogeneity parameter in Bayesian random-effects meta-analysis. \emph{\href{https://arxiv.org/abs/2007.08352}{arXiv preprint 2007.08352}} (submitted for publication), 2020. N.L. Johnson, S. Kotz, N. Balakrishnan. \emph{Continuous univariate distributions}, volume 1. Wiley, New York, 2nd edition, 1994. } \author{ Christian Roever \email{christian.roever@med.uni-goettingen.de} } \seealso{ \code{\link{dexp}}, \code{\link{dlomax}}, \code{\link{dhalfnormal}}, \code{\link{dhalft}}, \code{\link{dhalfcauchy}}, \code{\link{TurnerEtAlPrior}}, \code{\link{RhodesEtAlPrior}}, \code{\link{bayesmeta}}. } \examples{ ######################## # illustrate densities: x <- seq(0,6,le=200) plot(x, drayleigh(x, scale=0.5), type="l", col="green", xlab=expression(tau), ylab=expression("probability density "*f(tau))) lines(x, drayleigh(x, scale=1/sqrt(2)), col="red") lines(x, drayleigh(x, scale=1), col="blue") abline(h=0, v=0, col="grey") ############################################### # illustrate exponential / Rayleigh connection # via a quantile-quantile plot (Q-Q-plot): N <- 10000 exprate <- 5 plot(sort(sqrt(rexp(N, rate=exprate))), qrayleigh(ppoints(N), scale=1/sqrt(2*exprate))) abline(0, 1, col="red") ############################################### # illustrate Maximum Entropy distributions # under similar but different constraints: mu <- 0.5 tau <- seq(0, 4*mu, le=100) plot(tau, dexp(tau, rate=1/mu), type="l", col="red", ylim=c(0,1/mu), xlab=expression(tau), ylab="probability density") lines(tau, drayleigh(tau, scale=1/sqrt(2*1/mu^2)), col="blue") abline(h=0, v=0, col="grey") abline(v=mu, col="darkgrey"); axis(3, at=mu, label=expression(mu)) # explicate constraints: legend("topright", pch=15, col=c("red","blue"), c(expression("Exponential: E["*tau*"]"==mu), expression("Rayleigh: E["*tau^2*"]"==mu^2))) } \keyword{ distribution }
/man/drayleigh.Rd
no_license
gunhanb/bayesmeta
R
false
false
3,910
rd
\name{drayleigh} \alias{drayleigh} \alias{prayleigh} \alias{qrayleigh} \alias{rrayleigh} \alias{erayleigh} \alias{vrayleigh} \title{The Rayleigh distribution. } \description{ Rayleigh density, distribution, quantile function and random number generation. } \usage{ drayleigh(x, scale=1, log=FALSE) prayleigh(q, scale=1) qrayleigh(p, scale=1) rrayleigh(n, scale=1) erayleigh(scale=1) vrayleigh(scale=1) } \arguments{ \item{x, q}{quantile.} \item{p}{probability.} \item{n}{number of observations.} \item{scale}{scale parameter (\eqn{>0}).} \item{log}{logical; if \code{TRUE}, logarithmic density will be returned.} } \details{ The Rayleigh distribution arises as the distribution of the square root of an exponentially distributed (or \eqn{\chi^2_2}-distributed) random variable. If \eqn{X} follows an exponential distribution with rate \eqn{\lambda} and expectation \eqn{1/\lambda}, then \eqn{Y=\sqrt{X}}{Y=sqrt(X)} follows a Rayleigh distribution with scale \eqn{\sigma=1/\sqrt{2\lambda}}{sigma=1/sqrt(2*lambda)} and expectation \eqn{\sqrt{\pi/(4\lambda)}}{sqrt(pi/(4*lambda))}. Note that the exponential distribution is the \emph{maximum entropy distribution} among distributions supported on the positive real numbers and with a pre-specified expectation; so the Rayleigh distribution gives the corresponding distribution of its square root. } \value{ \sQuote{\code{drayleigh()}} gives the density function, \sQuote{\code{prayleigh()}} gives the cumulative distribution function (CDF), \sQuote{\code{qrayleigh()}} gives the quantile function (inverse CDF), and \sQuote{\code{rrayleigh()}} generates random deviates. The \sQuote{\code{erayleigh()}} and \sQuote{\code{vrayleigh()}} functions return the corresponding Rayleigh distribution's expectation and variance, respectively. } \references{ C. Roever, R. Bender, S. Dias, C.H. Schmid, H. Schmidli, S. Sturtz, S. Weber, T. Friede. On weakly informative prior distributions for the heterogeneity parameter in Bayesian random-effects meta-analysis. \emph{\href{https://arxiv.org/abs/2007.08352}{arXiv preprint 2007.08352}} (submitted for publication), 2020. N.L. Johnson, S. Kotz, N. Balakrishnan. \emph{Continuous univariate distributions}, volume 1. Wiley, New York, 2nd edition, 1994. } \author{ Christian Roever \email{christian.roever@med.uni-goettingen.de} } \seealso{ \code{\link{dexp}}, \code{\link{dlomax}}, \code{\link{dhalfnormal}}, \code{\link{dhalft}}, \code{\link{dhalfcauchy}}, \code{\link{TurnerEtAlPrior}}, \code{\link{RhodesEtAlPrior}}, \code{\link{bayesmeta}}. } \examples{ ######################## # illustrate densities: x <- seq(0,6,le=200) plot(x, drayleigh(x, scale=0.5), type="l", col="green", xlab=expression(tau), ylab=expression("probability density "*f(tau))) lines(x, drayleigh(x, scale=1/sqrt(2)), col="red") lines(x, drayleigh(x, scale=1), col="blue") abline(h=0, v=0, col="grey") ############################################### # illustrate exponential / Rayleigh connection # via a quantile-quantile plot (Q-Q-plot): N <- 10000 exprate <- 5 plot(sort(sqrt(rexp(N, rate=exprate))), qrayleigh(ppoints(N), scale=1/sqrt(2*exprate))) abline(0, 1, col="red") ############################################### # illustrate Maximum Entropy distributions # under similar but different constraints: mu <- 0.5 tau <- seq(0, 4*mu, le=100) plot(tau, dexp(tau, rate=1/mu), type="l", col="red", ylim=c(0,1/mu), xlab=expression(tau), ylab="probability density") lines(tau, drayleigh(tau, scale=1/sqrt(2*1/mu^2)), col="blue") abline(h=0, v=0, col="grey") abline(v=mu, col="darkgrey"); axis(3, at=mu, label=expression(mu)) # explicate constraints: legend("topright", pch=15, col=c("red","blue"), c(expression("Exponential: E["*tau*"]"==mu), expression("Rayleigh: E["*tau^2*"]"==mu^2))) } \keyword{ distribution }
% Generated by roxygen2 (4.0.1): do not edit by hand \name{asROCRPrediction} \alias{asROCRPrediction} \title{Converts predictions to a format package ROCR can handle.} \usage{ asROCRPrediction(pred) } \arguments{ \item{pred}{[\code{\link{Prediction}}]\cr Prediction object.} } \description{ Converts predictions to a format package ROCR can handle. }
/man/asROCRPrediction.Rd
no_license
Daron-Wan/mlr
R
false
false
352
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{asROCRPrediction} \alias{asROCRPrediction} \title{Converts predictions to a format package ROCR can handle.} \usage{ asROCRPrediction(pred) } \arguments{ \item{pred}{[\code{\link{Prediction}}]\cr Prediction object.} } \description{ Converts predictions to a format package ROCR can handle. }
\name{NumericTrack-class} \Rdversion{1.1} \docType{class} \alias{NumericTrack-class} \alias{NumericTrack} \alias{drawAxis,NumericTrack-method} \alias{drawGrid,NumericTrack-method} \title{NumericTrack class and methods} \description{ The virtual parent class for all track items in the Gviz package designed to contain numeric data. This class merely exists for dispatching purpose. } \section{Objects from the class}{ A virtual class: No objects may be created from it. } \section{Slots}{ \describe{ \item{\code{range}:}{Object of class \code{\linkS4class{GRanges}}, inherited from class \code{\linkS4class{RangeTrack}}} \item{\code{chromosome}:}{Object of class \code{"character"}, inherited from class \code{\linkS4class{RangeTrack}}} \item{\code{genome}:}{Object of class \code{"character"}, inherited from class \code{\linkS4class{RangeTrack}}} \item{\code{dp}:}{Object of class \code{\linkS4class{DisplayPars}}, inherited from class \code{\linkS4class{GdObject}}} \item{\code{name}:}{Object of class \code{"character"}, inherited from class \code{\linkS4class{GdObject}}} \item{\code{imageMap}:}{Object of class \code{\linkS4class{ImageMap}}, inherited from class \code{\linkS4class{GdObject}}} } } \section{Extends}{ Class \code{"\linkS4class{RangeTrack}"}, directly. Class \code{"\linkS4class{GdObject}"}, by class "RangeTrack", distance 2. } \section{Methods}{ \bold{\emph{Internal methods:}} \describe{ \item{drawAxis}{\code{signature(GdObject="NumericTrack")}: add a y-axis to the title panel of a track. \emph{Usage:} \code{drawAxis(x, from, to, ...)} \emph{Additional Arguments:} \describe{ \item{}{\code{from}, \code{to}: integer scalars, restrict to coordinate range before computing the axis ranges.} \item{}{\code{\dots}: additional arguments are ignored.} } \emph{Examples:} \describe{ \item{}{\code{Gviz:::drawAxis(obj)}} } } \item{drawGrid}{\code{signature(GdObject="NumericTrack")}: superpose a grid on top of a track. \emph{Usage:} \code{drawGrid(GdObject, from, to, ...)} \emph{Additional Arguments:} \describe{ \item{}{\code{from}, \code{to}: integer scalars, restrict to coordinate range before computing the grid lines.} } \emph{Examples:} \describe{ \item{}{\code{Gviz:::drawGrid(obj)}} } } \item{initialize}{\code{signature(.Object="NumericTrack")}: initialize the object.} } \bold{\emph{Inherited methods:}} \describe{ \item{[}{\code{signature(x="NumericTrack", i="ANY", j="ANY", drop="ANY")}: subset the items in the \code{NumericTrack} object. This is essentially similar to subsetting of the \code{\linkS4class{GRanges}} object in the \code{range} slot. For most applications, the \code{subset} method may be more appropriate. \emph{Additional Arguments:} \describe{ \item{}{\code{i}, \code{j}: subsetting indices, \code{j} is ignored.} \item{}{\code{drop}: argument is ignored.} } \emph{Examples:} \describe{ \item{}{\code{obj[1:5]}} } } \item{chromosome}{\code{signature(GdObject="NumericTrack")}: return the chromosome for which the track is defined. \emph{Usage:} \code{chromosome(GdObject)} \emph{Examples:} \describe{ \item{}{\code{chromosome(obj)}} } } \item{chromosome<-}{\code{signature(GdObject="NumericTrack")}: replace the value of the track's chromosome. This has to be a valid UCSC chromosome identifier or an integer or character scalar that can be reasonably coerced into one. \emph{Usage:} \code{chromosome<-(GdObject, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{chromosome(obj) <- "chr12"}} } } \item{start, end, width}{\code{signature(x="NumericTrack")}: the start or end coordinates of the track items, or their width in genomic coordinates. \emph{Usage:} \code{start(x)} \code{end(x)} \code{width(x)} \emph{Examples:} \describe{ \item{}{\code{start(obj)}} \item{}{\code{end(obj)}} \item{}{\code{width(obj)}} } } \item{start<-, end<-, width<-}{\code{signature(x="NumericTrack")}: replace the start or end coordinates of the track items, or their width. \emph{Usage:} \code{start<-(x, value)} \code{end<-(x, value)} \code{width<-(x, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{start(obj) <- 1:10}} \item{}{\code{end(obj) <- 20:30}} \item{}{\code{width(obj) <- 1}} } } \item{position}{\code{signature(GdObject="NumericTrack")}: the arithmetic mean of the track item's coordionates, i.e., \code{(end(obj)-start(obj))/2}. \emph{Usage:} \code{position(GdObject)} \emph{Examples:} \describe{ \item{}{\code{position(obj)}} } } \item{feature}{\code{signature(GdObject="NumericTrack")}: return the grouping information for track items. For certain sub-classes, groups may be indicated by different color schemes when plotting. See \code{\link{grouping}} or \code{\linkS4class{AnnotationTrack}} and \code{\linkS4class{GeneRegionTrack}} for details. \emph{Usage:} \code{feature(GdObject)} \emph{Examples:} \describe{ \item{}{\code{feature(obj)}} } } \item{feature<-}{\code{signature(gdObject="NumericTrack", value="character")}: set the grouping information for track items. This has to be a factor vector (or another type of vector that can be coerced into one) of the same length as the number of items in the \code{NumericTrack}. See \code{\link{grouping}} or \code{\linkS4class{AnnotationTrack}} and \code{\linkS4class{GeneRegionTrack}} for details. \emph{Usage:} \code{feature<-(GdObject, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{feature(obj) <- c("a", "a", "b", "c", "a")}} } } \item{genome}{\code{signature(x="NumericTrack")}: return the track's genome. \emph{Usage:} \code{genome(x)} \emph{Examples:} \describe{ \item{}{\code{genome(obj)}} } } \item{genome<-}{\code{signature(x="NumericTrack")}: set the track's genome. Usually this has to be a valid UCSC identifier, however this is not formally enforced here. \emph{Usage:} \code{genome<-(x, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{genome(obj) <- "mm9"}} } } \item{length}{\code{signature(x="NumericTrack")}: return the number of items in the track. \emph{Usage:} \code{length(x)} \emph{Examples:} \describe{ \item{}{\code{length(obj)}} } } \item{range}{\code{signature(x="NumericTrack")}: return the genomic coordinates for the track as an object of class \code{\linkS4class{IRanges}}. \emph{Usage:} \code{range(x)} \emph{Examples:} \describe{ \item{}{\code{range(obj)}} } } \item{ranges}{\code{signature(x="NumericTrack")}: return the genomic coordinates for the track along with all additional annotation information as an object of class \code{\linkS4class{GRanges}}. \emph{Usage:} \code{ranges(x)} \emph{Examples:} \describe{ \item{}{\code{ranges(obj)}} } } \item{split}{\code{signature(x="NumericTrack")}: split a \code{NumericTrack} object by an appropriate factor vector (or another vector that can be coerced into one). The output of this operation is a list of objects of the same class as the input object, all inheriting from class \code{NumericTrack}. \emph{Usage:} \code{split(x, f, ...)} \emph{Additional Arguments:} \describe{ \item{}{\code{f}: the splitting factor.} \item{}{\code{\dots}: all further arguments are ignored.} } \emph{Examples:} \describe{ \item{}{\code{split(obj, c("a", "a", "b", "c", "a"))}} } } \item{strand}{\code{signature(x="NumericTrack")}: return a vector of strand specifiers for all track items, in the form '+' for the Watson strand, '-' for the Crick strand or '*' for either of the two. \emph{Usage:} \code{strand(x)} \emph{Examples:} \describe{ \item{}{\code{strand(obj)}} } } \item{strand<-}{\code{signature(x="NumericTrack")}: replace the strand information for the track items. The replacement value needs to be an appropriate scalar or vector of strand values. \emph{Usage:} \code{strand<-(x, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{strand(obj) <- "+"}} } } \item{values}{\code{signature(x="NumericTrack")}: return all additional annotation information except for the genomic coordinates for the track items as a data.frame. \emph{Usage:} \code{values(x)} \emph{Examples:} \describe{ \item{}{\code{values(obj)}} } } \item{coerce}{\code{signature(from="NumericTrack", to="data.frame")}: coerce the \code{\linkS4class{GRanges}} object in the \code{range} slot into a regular data.frame. \emph{Examples:} \describe{ \item{}{\code{as(obj, "data.frame")}} } } \item{subset}{\code{signature(x="NumericTrack")}: subset a \code{NumericTrack} by coordinates and sort if necessary. \emph{Usage:} \code{subset(x, from, to, sort=FALSE, ...)} \emph{Additional Arguments:} \describe{ \item{}{\code{from}, \code{to}: the coordinates range to subset to.} \item{}{\code{sort}: sort the object after subsetting. Usually not necessary.} \item{}{\code{\dots}: additional arguments are ignored.} } \emph{Examples:} \describe{ \item{}{\code{subset(obj, from=10, to=20, sort=TRUE)}} } } \item{displayPars}{\code{signature(x="NumericTrack", name="character")}: list the value of the display parameter \code{name}. See \code{\link{settings}} for details on display parameters and customization. \emph{Usage:} \code{displayPars(x, name)} \emph{Examples:} \describe{ \item{}{\code{displayPars(obj, "col")}} } } \item{displayPars}{\code{signature(x="NumericTrack", name="missing")}: list the value of all available display parameters. See \code{\link{settings}} for details on display parameters and customization. \emph{Examples:} \describe{ \item{}{\code{displayPars(obj)}} } } \item{getPar}{\code{signature(x="NumericTrack", name="character")}: alias for the \code{displayPars} method. See \code{\link{settings}} for details on display parameters and customization. \emph{Usage:} \code{getPar(x, name)} \emph{Examples:} \describe{ \item{}{\code{getPar(obj, "col")}} } } \item{getPar}{\code{signature(x="NumericTrack", name="missing")}: alias for the \code{displayPars} method. See \code{\link{settings}} for details on display parameters and customization. \emph{Examples:} \describe{ \item{}{\code{getPar(obj)}} } } \item{displayPars<-}{\code{signature(x="NumericTrack", value="list")}: set display parameters using the values of the named list in \code{value}. See \code{\link{settings}} for details on display parameters and customization. \emph{Usage:} \code{displayPars<-(x, value)} \emph{Examples:} \describe{ \item{}{\code{displayPars(obj) <- list(col="red", lwd=2)}} } } \item{setPar}{\code{signature(x="NumericTrack", value="character")}: set the single display parameter \code{name} to \code{value}. Note that display parameters in the \code{NumericTrack} class are pass-by-reference, so no re-assignmnet to the symbol \code{obj} is necessary. See \code{\link{settings}} for details on display parameters and customization. \emph{Usage:} \code{setPar(x, name, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{name}: the name of the display parameter to set.} } \emph{Examples:} \describe{ \item{}{\code{setPar(obj, "col", "red")}} } } \item{setPar}{\code{signature(x="NumericTrack", value="list")}: set display parameters by the values of the named list in \code{value}. Note that display parameters in the \code{NumericTrack} class are pass-by-reference, so no re-assignmnet to the symbol \code{obj} is necessary. See \code{\link{settings}} for details on display parameters and customization. \emph{Examples:} \describe{ \item{}{\code{setPar(obj, list(col="red", lwd=2))}} } } \item{group}{\code{signature(GdObject="NumericTrack")}: return grouping information for the individual items in the track. Unless overwritten in one of the sub-classes, this usualy returns \code{NULL}. \emph{Usage:} \code{group(GdObject)} \emph{Examples:} \describe{ \item{}{\code{group(obj)}} } } \item{names}{\code{signature(x="NumericTrack")}: return the value of the \code{name} slot. \emph{Usage:} \code{names(x)} \emph{Examples:} \describe{ \item{}{\code{names(obj)}} } } \item{names<-}{\code{signature(x="NumericTrack", value="character")}: set the value of the \code{name} slot. \emph{Usage:} \code{names<-(x, value)} \emph{Examples:} \describe{ \item{}{\code{names(obj) <- "foo"}} } } \item{coords}{\code{signature(ImageMap="NumericTrack")}: return the coordinates from the internal image map. \emph{Usage:} \code{coords(ImageMap)} \emph{Examples:} \describe{ \item{}{\code{coords(obj)}} } } \item{tags}{\code{signature(x="NumericTrack")}: return the tags from the internal image map. \emph{Usage:} \code{tags(x)} \emph{Examples:} \describe{ \item{}{\code{tags(obj)}} } } } } \author{Florian Hahne} \section{Display Parameters}{ No formal display parameters are defined for objects of class \code{NumericTrack}. Additional display parameters are being inherited from the respective parent classes. Note that not all of them may have an effect on the plotting of \code{NumericTrack} objects. \describe{ \item{}{\code{\linkS4class{GdObject}}: \describe{ \item{}{\code{alpha=1}: Numeric scalar. The transparency for all track items.} \item{}{\code{alpha.title=NULL}: Numeric scalar. The transparency for the title panel.} \item{}{\code{background.legend="transparent"}: Integer or character scalar. The background color for the legend.} \item{}{\code{background.panel="transparent"}: Integer or character scalar. The background color of the content panel.} \item{}{\code{background.title="lightgray"}: Integer or character scalar. The background color for the title panel.} \item{}{\code{cex=1}: Numeric scalar. The overall font expansion factor for all text and glyphs, unless a more specific definition exists.} \item{}{\code{cex.axis=NULL}: Numeric scalar. The expansion factor for the axis annotation. Defaults to \code{NULL}, in which case it is automatically determined based on the available space.} \item{}{\code{cex.title=NULL}: Numeric scalar. The expansion factor for the title panel. This effects the fontsize of both the title and the axis, if any. Defaults to \code{NULL}, which means that the text size is automatically adjusted to the available space.} \item{}{\code{col="#0080FF"}: Integer or character scalar. Default line color setting for all plotting elements, unless there is a more specific control defined elsewhere.} \item{}{\code{col.axis="white"}: Integer or character scalar. The font and line color for the y axis, if any.} \item{}{\code{col.border.title="white"}: Integer or character scalar. The border color for the title panels.} \item{}{\code{col.frame="lightgray"}: Integer or character scalar. The line color used for the panel frame, if \code{frame==TRUE}} \item{}{\code{col.grid="#808080"}: Integer or character scalar. Default line color for grid lines, both when \code{type=="g"} in \code{\link{DataTrack}}s and when display parameter \code{grid==TRUE}.} \item{}{\code{col.line=NULL}: Integer or character scalar. Default colors for plot lines. Usually the same as the global \code{col} parameter.} \item{}{\code{col.symbol=NULL}: Integer or character scalar. Default colors for plot symbols. Usually the same as the global \code{col} parameter.} \item{}{\code{col.title="white"} \code{(Aliases: fontcolor.title)}: Integer or character scalar. The border color for the title panels} \item{}{\code{collapse=TRUE}: Boolean controlling whether to collapse the content of the track to accomodate the minimum current device resolution. See \code{\link{collapsing}} for details.} \item{}{\code{fill="lightgray"}: Integer or character scalar. Default fill color setting for all plotting elements, unless there is a more specific control defined elsewhere.} \item{}{\code{fontcolor="black"}: Integer or character scalar. The font color for all text, unless a more specific definition exists.} \item{}{\code{fontface=1}: Integer or character scalar. The font face for all text, unless a more specific definition exists.} \item{}{\code{fontface.title=2}: Integer or character scalar. The font face for the title panels.} \item{}{\code{fontfamily="sans"}: Integer or character scalar. The font family for all text, unless a more specific definition exists.} \item{}{\code{fontfamily.title="sans"}: Integer or character scalar. The font family for the title panels.} \item{}{\code{fontsize=12}: Numeric scalar. The font size for all text, unless a more specific definition exists.} \item{}{\code{frame=FALSE}: Boolean. Draw a frame around the track when plotting.} \item{}{\code{grid=FALSE}: Boolean, switching on/off the plotting of a grid.} \item{}{\code{h=-1}: Integer scalar. Parameter controlling the number of horizontal grid lines, see \code{\link{panel.grid}} for details.} \item{}{\code{lineheight=1}: Numeric scalar. The font line height for all text, unless a more specific definition exists.} \item{}{\code{lty="solid"}: Numeric scalar. Default line type setting for all plotting elements, unless there is a more specific control defined elsewhere.} \item{}{\code{lty.grid="solid"}: Integer or character scalar. Default line type for grid lines, both when \code{type=="g"} in \code{\link{DataTrack}}s and when display parameter \code{grid==TRUE}.} \item{}{\code{lwd=1}: Numeric scalar. Default line width setting for all plotting elements, unless there is a more specific control defined elsewhere.} \item{}{\code{lwd.border.title=1}: Integer scalar. The border width for the title panels.} \item{}{\code{lwd.grid=1}: Numeric scalar. Default line width for grid lines, both when \code{type=="g"} in \code{\link{DataTrack}}s and when display parameter \code{grid==TRUE}.} \item{}{\code{lwd.title=1}: Integer scalar. The border width for the title panels} \item{}{\code{min.distance=1}: Numeric scalar. The minimum pixel distance before collapsing range items, only if \code{collapse==TRUE}. See \code{\link{collapsing}} for details.} \item{}{\code{min.height=3}: Numeric scalar. The minimum range height in pixels to display. All ranges are expanded to this size in order to avoid rendering issues. See \code{\link{collapsing}} for details.} \item{}{\code{min.width=1}: Numeric scalar. The minimum range width in pixels to display. All ranges are expanded to this size in order to avoid rendering issues. See \code{\link{collapsing}} for details.} \item{}{\code{reverseStrand=FALSE}: Logical scalar. Set up the plotting coordinates in 3' -> 5' direction if \code{TRUE}. This will effectively mirror the plot on the vertical axis.} \item{}{\code{rotation=0}: The rotation angle for all text unless a more specific definiton exists.} \item{}{\code{rotation.title=90} \code{(Aliases: rotation.title)}: The rotation angle for the text in the title panel. Even though this can be adjusted, the automatic resizing of the title panel will currently not work, so use at own risk.} \item{}{\code{showAxis=TRUE}: Boolean controlling whether to plot a y axis (only applies to track types where axes are implemented).} \item{}{\code{showTitle=TRUE}: Boolean controlling whether to plot a title panel. Although this can be set individually for each track, in multi-track plots as created by \code{\link{plotTracks}} there will still be an empty placeholder in case any of the other tracks include a title. The same holds true for axes. Note that the the title panel background color could be set to transparent in order to completely hide it.} \item{}{\code{size=1}: Numeric scalar. The relative size of the track. Can be overridden in the \code{\link{plotTracks}} function.} \item{}{\code{v=-1}: Integer scalar. Parameter controlling the number of vertical grid lines, see \code{\link{panel.grid}} for details.} } } } } \seealso{ \code{\linkS4class{AnnotationTrack}} \code{\linkS4class{DisplayPars}} \code{\linkS4class{GdObject}} \code{\linkS4class{GeneRegionTrack}} \code{\linkS4class{GRanges}} \code{\linkS4class{ImageMap}} \code{\linkS4class{IRanges}} \code{\linkS4class{RangeTrack}} \code{\link{collapsing}} \code{\link{DataTrack}} \code{\link{grouping}} \code{\link{panel.grid}} \code{\link{plotTracks}} \code{\link{settings}} } \keyword{classes}
/man/NumericTrack-class.Rd
permissive
shanwai1234/Gviz
R
false
false
25,165
rd
\name{NumericTrack-class} \Rdversion{1.1} \docType{class} \alias{NumericTrack-class} \alias{NumericTrack} \alias{drawAxis,NumericTrack-method} \alias{drawGrid,NumericTrack-method} \title{NumericTrack class and methods} \description{ The virtual parent class for all track items in the Gviz package designed to contain numeric data. This class merely exists for dispatching purpose. } \section{Objects from the class}{ A virtual class: No objects may be created from it. } \section{Slots}{ \describe{ \item{\code{range}:}{Object of class \code{\linkS4class{GRanges}}, inherited from class \code{\linkS4class{RangeTrack}}} \item{\code{chromosome}:}{Object of class \code{"character"}, inherited from class \code{\linkS4class{RangeTrack}}} \item{\code{genome}:}{Object of class \code{"character"}, inherited from class \code{\linkS4class{RangeTrack}}} \item{\code{dp}:}{Object of class \code{\linkS4class{DisplayPars}}, inherited from class \code{\linkS4class{GdObject}}} \item{\code{name}:}{Object of class \code{"character"}, inherited from class \code{\linkS4class{GdObject}}} \item{\code{imageMap}:}{Object of class \code{\linkS4class{ImageMap}}, inherited from class \code{\linkS4class{GdObject}}} } } \section{Extends}{ Class \code{"\linkS4class{RangeTrack}"}, directly. Class \code{"\linkS4class{GdObject}"}, by class "RangeTrack", distance 2. } \section{Methods}{ \bold{\emph{Internal methods:}} \describe{ \item{drawAxis}{\code{signature(GdObject="NumericTrack")}: add a y-axis to the title panel of a track. \emph{Usage:} \code{drawAxis(x, from, to, ...)} \emph{Additional Arguments:} \describe{ \item{}{\code{from}, \code{to}: integer scalars, restrict to coordinate range before computing the axis ranges.} \item{}{\code{\dots}: additional arguments are ignored.} } \emph{Examples:} \describe{ \item{}{\code{Gviz:::drawAxis(obj)}} } } \item{drawGrid}{\code{signature(GdObject="NumericTrack")}: superpose a grid on top of a track. \emph{Usage:} \code{drawGrid(GdObject, from, to, ...)} \emph{Additional Arguments:} \describe{ \item{}{\code{from}, \code{to}: integer scalars, restrict to coordinate range before computing the grid lines.} } \emph{Examples:} \describe{ \item{}{\code{Gviz:::drawGrid(obj)}} } } \item{initialize}{\code{signature(.Object="NumericTrack")}: initialize the object.} } \bold{\emph{Inherited methods:}} \describe{ \item{[}{\code{signature(x="NumericTrack", i="ANY", j="ANY", drop="ANY")}: subset the items in the \code{NumericTrack} object. This is essentially similar to subsetting of the \code{\linkS4class{GRanges}} object in the \code{range} slot. For most applications, the \code{subset} method may be more appropriate. \emph{Additional Arguments:} \describe{ \item{}{\code{i}, \code{j}: subsetting indices, \code{j} is ignored.} \item{}{\code{drop}: argument is ignored.} } \emph{Examples:} \describe{ \item{}{\code{obj[1:5]}} } } \item{chromosome}{\code{signature(GdObject="NumericTrack")}: return the chromosome for which the track is defined. \emph{Usage:} \code{chromosome(GdObject)} \emph{Examples:} \describe{ \item{}{\code{chromosome(obj)}} } } \item{chromosome<-}{\code{signature(GdObject="NumericTrack")}: replace the value of the track's chromosome. This has to be a valid UCSC chromosome identifier or an integer or character scalar that can be reasonably coerced into one. \emph{Usage:} \code{chromosome<-(GdObject, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{chromosome(obj) <- "chr12"}} } } \item{start, end, width}{\code{signature(x="NumericTrack")}: the start or end coordinates of the track items, or their width in genomic coordinates. \emph{Usage:} \code{start(x)} \code{end(x)} \code{width(x)} \emph{Examples:} \describe{ \item{}{\code{start(obj)}} \item{}{\code{end(obj)}} \item{}{\code{width(obj)}} } } \item{start<-, end<-, width<-}{\code{signature(x="NumericTrack")}: replace the start or end coordinates of the track items, or their width. \emph{Usage:} \code{start<-(x, value)} \code{end<-(x, value)} \code{width<-(x, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{start(obj) <- 1:10}} \item{}{\code{end(obj) <- 20:30}} \item{}{\code{width(obj) <- 1}} } } \item{position}{\code{signature(GdObject="NumericTrack")}: the arithmetic mean of the track item's coordionates, i.e., \code{(end(obj)-start(obj))/2}. \emph{Usage:} \code{position(GdObject)} \emph{Examples:} \describe{ \item{}{\code{position(obj)}} } } \item{feature}{\code{signature(GdObject="NumericTrack")}: return the grouping information for track items. For certain sub-classes, groups may be indicated by different color schemes when plotting. See \code{\link{grouping}} or \code{\linkS4class{AnnotationTrack}} and \code{\linkS4class{GeneRegionTrack}} for details. \emph{Usage:} \code{feature(GdObject)} \emph{Examples:} \describe{ \item{}{\code{feature(obj)}} } } \item{feature<-}{\code{signature(gdObject="NumericTrack", value="character")}: set the grouping information for track items. This has to be a factor vector (or another type of vector that can be coerced into one) of the same length as the number of items in the \code{NumericTrack}. See \code{\link{grouping}} or \code{\linkS4class{AnnotationTrack}} and \code{\linkS4class{GeneRegionTrack}} for details. \emph{Usage:} \code{feature<-(GdObject, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{feature(obj) <- c("a", "a", "b", "c", "a")}} } } \item{genome}{\code{signature(x="NumericTrack")}: return the track's genome. \emph{Usage:} \code{genome(x)} \emph{Examples:} \describe{ \item{}{\code{genome(obj)}} } } \item{genome<-}{\code{signature(x="NumericTrack")}: set the track's genome. Usually this has to be a valid UCSC identifier, however this is not formally enforced here. \emph{Usage:} \code{genome<-(x, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{genome(obj) <- "mm9"}} } } \item{length}{\code{signature(x="NumericTrack")}: return the number of items in the track. \emph{Usage:} \code{length(x)} \emph{Examples:} \describe{ \item{}{\code{length(obj)}} } } \item{range}{\code{signature(x="NumericTrack")}: return the genomic coordinates for the track as an object of class \code{\linkS4class{IRanges}}. \emph{Usage:} \code{range(x)} \emph{Examples:} \describe{ \item{}{\code{range(obj)}} } } \item{ranges}{\code{signature(x="NumericTrack")}: return the genomic coordinates for the track along with all additional annotation information as an object of class \code{\linkS4class{GRanges}}. \emph{Usage:} \code{ranges(x)} \emph{Examples:} \describe{ \item{}{\code{ranges(obj)}} } } \item{split}{\code{signature(x="NumericTrack")}: split a \code{NumericTrack} object by an appropriate factor vector (or another vector that can be coerced into one). The output of this operation is a list of objects of the same class as the input object, all inheriting from class \code{NumericTrack}. \emph{Usage:} \code{split(x, f, ...)} \emph{Additional Arguments:} \describe{ \item{}{\code{f}: the splitting factor.} \item{}{\code{\dots}: all further arguments are ignored.} } \emph{Examples:} \describe{ \item{}{\code{split(obj, c("a", "a", "b", "c", "a"))}} } } \item{strand}{\code{signature(x="NumericTrack")}: return a vector of strand specifiers for all track items, in the form '+' for the Watson strand, '-' for the Crick strand or '*' for either of the two. \emph{Usage:} \code{strand(x)} \emph{Examples:} \describe{ \item{}{\code{strand(obj)}} } } \item{strand<-}{\code{signature(x="NumericTrack")}: replace the strand information for the track items. The replacement value needs to be an appropriate scalar or vector of strand values. \emph{Usage:} \code{strand<-(x, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{value}: replacement value.} } \emph{Examples:} \describe{ \item{}{\code{strand(obj) <- "+"}} } } \item{values}{\code{signature(x="NumericTrack")}: return all additional annotation information except for the genomic coordinates for the track items as a data.frame. \emph{Usage:} \code{values(x)} \emph{Examples:} \describe{ \item{}{\code{values(obj)}} } } \item{coerce}{\code{signature(from="NumericTrack", to="data.frame")}: coerce the \code{\linkS4class{GRanges}} object in the \code{range} slot into a regular data.frame. \emph{Examples:} \describe{ \item{}{\code{as(obj, "data.frame")}} } } \item{subset}{\code{signature(x="NumericTrack")}: subset a \code{NumericTrack} by coordinates and sort if necessary. \emph{Usage:} \code{subset(x, from, to, sort=FALSE, ...)} \emph{Additional Arguments:} \describe{ \item{}{\code{from}, \code{to}: the coordinates range to subset to.} \item{}{\code{sort}: sort the object after subsetting. Usually not necessary.} \item{}{\code{\dots}: additional arguments are ignored.} } \emph{Examples:} \describe{ \item{}{\code{subset(obj, from=10, to=20, sort=TRUE)}} } } \item{displayPars}{\code{signature(x="NumericTrack", name="character")}: list the value of the display parameter \code{name}. See \code{\link{settings}} for details on display parameters and customization. \emph{Usage:} \code{displayPars(x, name)} \emph{Examples:} \describe{ \item{}{\code{displayPars(obj, "col")}} } } \item{displayPars}{\code{signature(x="NumericTrack", name="missing")}: list the value of all available display parameters. See \code{\link{settings}} for details on display parameters and customization. \emph{Examples:} \describe{ \item{}{\code{displayPars(obj)}} } } \item{getPar}{\code{signature(x="NumericTrack", name="character")}: alias for the \code{displayPars} method. See \code{\link{settings}} for details on display parameters and customization. \emph{Usage:} \code{getPar(x, name)} \emph{Examples:} \describe{ \item{}{\code{getPar(obj, "col")}} } } \item{getPar}{\code{signature(x="NumericTrack", name="missing")}: alias for the \code{displayPars} method. See \code{\link{settings}} for details on display parameters and customization. \emph{Examples:} \describe{ \item{}{\code{getPar(obj)}} } } \item{displayPars<-}{\code{signature(x="NumericTrack", value="list")}: set display parameters using the values of the named list in \code{value}. See \code{\link{settings}} for details on display parameters and customization. \emph{Usage:} \code{displayPars<-(x, value)} \emph{Examples:} \describe{ \item{}{\code{displayPars(obj) <- list(col="red", lwd=2)}} } } \item{setPar}{\code{signature(x="NumericTrack", value="character")}: set the single display parameter \code{name} to \code{value}. Note that display parameters in the \code{NumericTrack} class are pass-by-reference, so no re-assignmnet to the symbol \code{obj} is necessary. See \code{\link{settings}} for details on display parameters and customization. \emph{Usage:} \code{setPar(x, name, value)} \emph{Additional Arguments:} \describe{ \item{}{\code{name}: the name of the display parameter to set.} } \emph{Examples:} \describe{ \item{}{\code{setPar(obj, "col", "red")}} } } \item{setPar}{\code{signature(x="NumericTrack", value="list")}: set display parameters by the values of the named list in \code{value}. Note that display parameters in the \code{NumericTrack} class are pass-by-reference, so no re-assignmnet to the symbol \code{obj} is necessary. See \code{\link{settings}} for details on display parameters and customization. \emph{Examples:} \describe{ \item{}{\code{setPar(obj, list(col="red", lwd=2))}} } } \item{group}{\code{signature(GdObject="NumericTrack")}: return grouping information for the individual items in the track. Unless overwritten in one of the sub-classes, this usualy returns \code{NULL}. \emph{Usage:} \code{group(GdObject)} \emph{Examples:} \describe{ \item{}{\code{group(obj)}} } } \item{names}{\code{signature(x="NumericTrack")}: return the value of the \code{name} slot. \emph{Usage:} \code{names(x)} \emph{Examples:} \describe{ \item{}{\code{names(obj)}} } } \item{names<-}{\code{signature(x="NumericTrack", value="character")}: set the value of the \code{name} slot. \emph{Usage:} \code{names<-(x, value)} \emph{Examples:} \describe{ \item{}{\code{names(obj) <- "foo"}} } } \item{coords}{\code{signature(ImageMap="NumericTrack")}: return the coordinates from the internal image map. \emph{Usage:} \code{coords(ImageMap)} \emph{Examples:} \describe{ \item{}{\code{coords(obj)}} } } \item{tags}{\code{signature(x="NumericTrack")}: return the tags from the internal image map. \emph{Usage:} \code{tags(x)} \emph{Examples:} \describe{ \item{}{\code{tags(obj)}} } } } } \author{Florian Hahne} \section{Display Parameters}{ No formal display parameters are defined for objects of class \code{NumericTrack}. Additional display parameters are being inherited from the respective parent classes. Note that not all of them may have an effect on the plotting of \code{NumericTrack} objects. \describe{ \item{}{\code{\linkS4class{GdObject}}: \describe{ \item{}{\code{alpha=1}: Numeric scalar. The transparency for all track items.} \item{}{\code{alpha.title=NULL}: Numeric scalar. The transparency for the title panel.} \item{}{\code{background.legend="transparent"}: Integer or character scalar. The background color for the legend.} \item{}{\code{background.panel="transparent"}: Integer or character scalar. The background color of the content panel.} \item{}{\code{background.title="lightgray"}: Integer or character scalar. The background color for the title panel.} \item{}{\code{cex=1}: Numeric scalar. The overall font expansion factor for all text and glyphs, unless a more specific definition exists.} \item{}{\code{cex.axis=NULL}: Numeric scalar. The expansion factor for the axis annotation. Defaults to \code{NULL}, in which case it is automatically determined based on the available space.} \item{}{\code{cex.title=NULL}: Numeric scalar. The expansion factor for the title panel. This effects the fontsize of both the title and the axis, if any. Defaults to \code{NULL}, which means that the text size is automatically adjusted to the available space.} \item{}{\code{col="#0080FF"}: Integer or character scalar. Default line color setting for all plotting elements, unless there is a more specific control defined elsewhere.} \item{}{\code{col.axis="white"}: Integer or character scalar. The font and line color for the y axis, if any.} \item{}{\code{col.border.title="white"}: Integer or character scalar. The border color for the title panels.} \item{}{\code{col.frame="lightgray"}: Integer or character scalar. The line color used for the panel frame, if \code{frame==TRUE}} \item{}{\code{col.grid="#808080"}: Integer or character scalar. Default line color for grid lines, both when \code{type=="g"} in \code{\link{DataTrack}}s and when display parameter \code{grid==TRUE}.} \item{}{\code{col.line=NULL}: Integer or character scalar. Default colors for plot lines. Usually the same as the global \code{col} parameter.} \item{}{\code{col.symbol=NULL}: Integer or character scalar. Default colors for plot symbols. Usually the same as the global \code{col} parameter.} \item{}{\code{col.title="white"} \code{(Aliases: fontcolor.title)}: Integer or character scalar. The border color for the title panels} \item{}{\code{collapse=TRUE}: Boolean controlling whether to collapse the content of the track to accomodate the minimum current device resolution. See \code{\link{collapsing}} for details.} \item{}{\code{fill="lightgray"}: Integer or character scalar. Default fill color setting for all plotting elements, unless there is a more specific control defined elsewhere.} \item{}{\code{fontcolor="black"}: Integer or character scalar. The font color for all text, unless a more specific definition exists.} \item{}{\code{fontface=1}: Integer or character scalar. The font face for all text, unless a more specific definition exists.} \item{}{\code{fontface.title=2}: Integer or character scalar. The font face for the title panels.} \item{}{\code{fontfamily="sans"}: Integer or character scalar. The font family for all text, unless a more specific definition exists.} \item{}{\code{fontfamily.title="sans"}: Integer or character scalar. The font family for the title panels.} \item{}{\code{fontsize=12}: Numeric scalar. The font size for all text, unless a more specific definition exists.} \item{}{\code{frame=FALSE}: Boolean. Draw a frame around the track when plotting.} \item{}{\code{grid=FALSE}: Boolean, switching on/off the plotting of a grid.} \item{}{\code{h=-1}: Integer scalar. Parameter controlling the number of horizontal grid lines, see \code{\link{panel.grid}} for details.} \item{}{\code{lineheight=1}: Numeric scalar. The font line height for all text, unless a more specific definition exists.} \item{}{\code{lty="solid"}: Numeric scalar. Default line type setting for all plotting elements, unless there is a more specific control defined elsewhere.} \item{}{\code{lty.grid="solid"}: Integer or character scalar. Default line type for grid lines, both when \code{type=="g"} in \code{\link{DataTrack}}s and when display parameter \code{grid==TRUE}.} \item{}{\code{lwd=1}: Numeric scalar. Default line width setting for all plotting elements, unless there is a more specific control defined elsewhere.} \item{}{\code{lwd.border.title=1}: Integer scalar. The border width for the title panels.} \item{}{\code{lwd.grid=1}: Numeric scalar. Default line width for grid lines, both when \code{type=="g"} in \code{\link{DataTrack}}s and when display parameter \code{grid==TRUE}.} \item{}{\code{lwd.title=1}: Integer scalar. The border width for the title panels} \item{}{\code{min.distance=1}: Numeric scalar. The minimum pixel distance before collapsing range items, only if \code{collapse==TRUE}. See \code{\link{collapsing}} for details.} \item{}{\code{min.height=3}: Numeric scalar. The minimum range height in pixels to display. All ranges are expanded to this size in order to avoid rendering issues. See \code{\link{collapsing}} for details.} \item{}{\code{min.width=1}: Numeric scalar. The minimum range width in pixels to display. All ranges are expanded to this size in order to avoid rendering issues. See \code{\link{collapsing}} for details.} \item{}{\code{reverseStrand=FALSE}: Logical scalar. Set up the plotting coordinates in 3' -> 5' direction if \code{TRUE}. This will effectively mirror the plot on the vertical axis.} \item{}{\code{rotation=0}: The rotation angle for all text unless a more specific definiton exists.} \item{}{\code{rotation.title=90} \code{(Aliases: rotation.title)}: The rotation angle for the text in the title panel. Even though this can be adjusted, the automatic resizing of the title panel will currently not work, so use at own risk.} \item{}{\code{showAxis=TRUE}: Boolean controlling whether to plot a y axis (only applies to track types where axes are implemented).} \item{}{\code{showTitle=TRUE}: Boolean controlling whether to plot a title panel. Although this can be set individually for each track, in multi-track plots as created by \code{\link{plotTracks}} there will still be an empty placeholder in case any of the other tracks include a title. The same holds true for axes. Note that the the title panel background color could be set to transparent in order to completely hide it.} \item{}{\code{size=1}: Numeric scalar. The relative size of the track. Can be overridden in the \code{\link{plotTracks}} function.} \item{}{\code{v=-1}: Integer scalar. Parameter controlling the number of vertical grid lines, see \code{\link{panel.grid}} for details.} } } } } \seealso{ \code{\linkS4class{AnnotationTrack}} \code{\linkS4class{DisplayPars}} \code{\linkS4class{GdObject}} \code{\linkS4class{GeneRegionTrack}} \code{\linkS4class{GRanges}} \code{\linkS4class{ImageMap}} \code{\linkS4class{IRanges}} \code{\linkS4class{RangeTrack}} \code{\link{collapsing}} \code{\link{DataTrack}} \code{\link{grouping}} \code{\link{panel.grid}} \code{\link{plotTracks}} \code{\link{settings}} } \keyword{classes}
loadall() mod = list() column.labels = c('OLS', 'Probit', 'Logit', 'Probit (No FE)', 'Probit (No Weights)', 'Probit (Horse-race)') column.separate = c(2, 2, 2, 2, 2, 2) dep.var.labels = rep(c('Trump G.E.', ' Trump Prim.'), 6) omit = c('.data_') v.y = c('cc.TrumpGEVote', 'cc.TrumpPVote') # Feature Scaling isdum = sapply(ccesplus, function(x) { all(x %in% c(0:1, NA)) }) isnumeric = sapply(ccesplus, is.numeric) norescale = isdum | !isnumeric t = as.data.frame(ccesplus) ccesplus.fscaled = as.data.frame(ccesplus) ccesplus.fscaled[!norescale] = scale(t[!norescale]) for (var in v.y) { #var = 'cc.TrumpGEVote' ## With State FEs # OLS mod$probit.fscaled$m[[var]] = lm(formula = formula, data = ccesplus.fscaled, weights = ccesplus$cc.commonweight) } stargazer(c(mod), type = 'text', title = 'Regressions', report = 'vc*sp', covariate.labels = xlabels, column.labels = column.labels, column.separate = column.separate, #dep.var.labels = dep.var.labels, model.names = F, model.numbers = F, omit = c('stateabr') )
/sandbox reg.R
no_license
eastnile/proj_010_trump
R
false
false
1,140
r
loadall() mod = list() column.labels = c('OLS', 'Probit', 'Logit', 'Probit (No FE)', 'Probit (No Weights)', 'Probit (Horse-race)') column.separate = c(2, 2, 2, 2, 2, 2) dep.var.labels = rep(c('Trump G.E.', ' Trump Prim.'), 6) omit = c('.data_') v.y = c('cc.TrumpGEVote', 'cc.TrumpPVote') # Feature Scaling isdum = sapply(ccesplus, function(x) { all(x %in% c(0:1, NA)) }) isnumeric = sapply(ccesplus, is.numeric) norescale = isdum | !isnumeric t = as.data.frame(ccesplus) ccesplus.fscaled = as.data.frame(ccesplus) ccesplus.fscaled[!norescale] = scale(t[!norescale]) for (var in v.y) { #var = 'cc.TrumpGEVote' ## With State FEs # OLS mod$probit.fscaled$m[[var]] = lm(formula = formula, data = ccesplus.fscaled, weights = ccesplus$cc.commonweight) } stargazer(c(mod), type = 'text', title = 'Regressions', report = 'vc*sp', covariate.labels = xlabels, column.labels = column.labels, column.separate = column.separate, #dep.var.labels = dep.var.labels, model.names = F, model.numbers = F, omit = c('stateabr') )
dietmodel <- function(solver=NULL, modelDirectory=NULL) { library(rAMPL) # Create an AMPL instance ampl <- new(AMPL) ## If the AMPL installation directory is not in the system search path: # env <- new(Environment, "full path to the AMPL installation directory") # ampl <- new(AMPL, env) if (!is.null(solver)) { ampl$setOption("solver", solver) } # Read the model file. if (is.null(modelDirectory)) { modelDirectory <- "./models" } ampl$read(paste(modelDirectory, "/diet/diet.mod", sep="")) # Set the values for the set FOOD, and for the parameters cost, f_min, and f_max foods <- c("BEEF", "CHK", "FISH", "HAM", "MCH", "MTL", "SPG", "TUR") costs <- c(3.59, 2.59, 2.29, 2.89, 1.89, 1.99, 1.99, 2.49) fmin <- c(2, 2, 2, 2, 2, 2, 2, 2) fmax <- c(10, 10, 10, 10, 10, 10, 10, 10) ampl$setData(data.frame(FOODS=foods, cost=costs, f_min=fmin, f_max=fmax), 1, "FOOD") # Set the values for the set NUTR, and for the parameters n_min and n_max nutrients <- c("A", "C", "B1", "B2", "NA", "CAL") nmin <- c(700, 700, 700, 700, 0, 16000) nmax <- c(20000, 20000, 20000, 20000, 50000, 24000) ampl$setData(data.frame(NUTR=nutrients, n_min=nmin, n_max=nmax), 1, "NUTR") amounts = rbind( c( 60, 8, 8, 40, 15, 70, 25, 60), c( 20, 0, 10, 40, 35, 30, 50, 20), c( 10, 20, 15, 35, 15, 15, 25, 15), c( 15, 20, 10, 10, 15, 15, 15, 10), c(928, 2180, 945, 278, 1182, 896, 1329, 1397), c(295, 770, 440, 430, 315, 400, 379, 450) ) dimnames(amounts) <- list(nutrients, foods) # Convert matrix into data.frame df <- data.frame(as.table(amounts)) colnames(df) <- c("NUTR", "FOOD", "amt") # Set the values for the parameter "amt" ampl$setData(df, 2, "") # Solve the model ampl$solve() # Print out the result cat(sprintf("Objective: %f\n", ampl$getObjective("Total_Cost")$value())) # Get the values of the variable Buy in a data.frame df <- ampl$getVariable("Buy")$getValues() print(df) }
/examples/dietmodel.R
permissive
ampl/rAMPL
R
false
false
2,024
r
dietmodel <- function(solver=NULL, modelDirectory=NULL) { library(rAMPL) # Create an AMPL instance ampl <- new(AMPL) ## If the AMPL installation directory is not in the system search path: # env <- new(Environment, "full path to the AMPL installation directory") # ampl <- new(AMPL, env) if (!is.null(solver)) { ampl$setOption("solver", solver) } # Read the model file. if (is.null(modelDirectory)) { modelDirectory <- "./models" } ampl$read(paste(modelDirectory, "/diet/diet.mod", sep="")) # Set the values for the set FOOD, and for the parameters cost, f_min, and f_max foods <- c("BEEF", "CHK", "FISH", "HAM", "MCH", "MTL", "SPG", "TUR") costs <- c(3.59, 2.59, 2.29, 2.89, 1.89, 1.99, 1.99, 2.49) fmin <- c(2, 2, 2, 2, 2, 2, 2, 2) fmax <- c(10, 10, 10, 10, 10, 10, 10, 10) ampl$setData(data.frame(FOODS=foods, cost=costs, f_min=fmin, f_max=fmax), 1, "FOOD") # Set the values for the set NUTR, and for the parameters n_min and n_max nutrients <- c("A", "C", "B1", "B2", "NA", "CAL") nmin <- c(700, 700, 700, 700, 0, 16000) nmax <- c(20000, 20000, 20000, 20000, 50000, 24000) ampl$setData(data.frame(NUTR=nutrients, n_min=nmin, n_max=nmax), 1, "NUTR") amounts = rbind( c( 60, 8, 8, 40, 15, 70, 25, 60), c( 20, 0, 10, 40, 35, 30, 50, 20), c( 10, 20, 15, 35, 15, 15, 25, 15), c( 15, 20, 10, 10, 15, 15, 15, 10), c(928, 2180, 945, 278, 1182, 896, 1329, 1397), c(295, 770, 440, 430, 315, 400, 379, 450) ) dimnames(amounts) <- list(nutrients, foods) # Convert matrix into data.frame df <- data.frame(as.table(amounts)) colnames(df) <- c("NUTR", "FOOD", "amt") # Set the values for the parameter "amt" ampl$setData(df, 2, "") # Solve the model ampl$solve() # Print out the result cat(sprintf("Objective: %f\n", ampl$getObjective("Total_Cost")$value())) # Get the values of the variable Buy in a data.frame df <- ampl$getVariable("Buy")$getValues() print(df) }
setwd("~/Dropbox/MIT Analytics/Week5") # Install new packages install.packages("tm") install.packages("SnowballC") install.packages("caTools") install.packages("rpart") install.packages("rpart.plot") install.packages("randomForest") library(randomForest) library(caTools) library(rpart) library(rpart.plot) library(randomForest) library(tm) library(SnowballC) Sys.setlocale("LC_ALL", "C") wiki = read.csv("wiki.csv", stringsAsFactors=FALSE) wiki$Vandal = as.factor(wiki$Vandal) str(wiki) table(wiki$Vandal) # Create corpus Added corpusAdded = Corpus(VectorSource(wiki$Added)) # Look at corpus corpusAdded corpusAdded = tm_map(corpusAdded, PlainTextDocument) # Remove stopwords corpusAdded = tm_map(corpusAdded, removeWords, c( stopwords("english"))) corpusAdded[[1]] corpusAdded = tm_map(corpusAdded, stemDocument) findFreqTerms(corpusAdded) corpusAdded[[1]] dtmAdded = DocumentTermMatrix(corpusAdded) dtmAdded sparseAdded = removeSparseTerms(dtmAdded, 0.997) sparseAdded # Convert to a data frame wordsAdded = as.data.frame(as.matrix(sparseAdded)) # Make all variable names R-friendly colnames(wordsAdded) = make.names(colnames(wordsAdded)) colnames(wordsAdded) = paste("A", colnames(wordsAdded)) #end Added # Create corpus Removed corpusRemoved = Corpus(VectorSource(wiki$Removed)) # Look at corpus corpusRemoved corpusRemoved = tm_map(corpusRemoved, PlainTextDocument) # Remove stopwords corpusRemoved = tm_map(corpusRemoved, removeWords, c( stopwords("english"))) corpusRemoved[[1]] corpusRemoved = tm_map(corpusRemoved, stemDocument) findFreqTerms(corpusRemoved) corpusRemoved[[1]] dtmRemoved = DocumentTermMatrix(corpusRemoved) dtmRemoved sparseRemoved = removeSparseTerms(dtmRemoved, 0.997) sparseRemoved # Convert to a data frame wordsRemoved = as.data.frame(as.matrix(sparseRemoved)) # Make all variable names R-friendly colnames(wordsRemoved) = make.names(colnames(wordsRemoved)) colnames(wordsRemoved) = paste("R", colnames(wordsRemoved)) ncol(wordsRemoved) #end Removed wikiWords = cbind(wordsAdded, wordsRemoved) str(wikiWords) # Create dependent variable wikiWords$Vandal = as.factor(wiki$Vandal == 1) table(wikiWords$Vandal) 2061/(1815+2061) set.seed(123) split = sample.split(wikiWords$Vandal , SplitRatio = 0.7) trainSparse = subset(wikiWords , split==TRUE) testSparse = subset(wikiWords, split==FALSE) # Build a CART model wikiCART = rpart(Vandal ~ ., data=trainSparse, method="class") prp(wikiCART) # Evaluate the performance of the model predictCART = predict(wikiCART, newdata=testSparse, type="class") table(testSparse$Vandal, predictCART) (618+12)/(618+533+12) grepl("cat","dogs and cats",fixed=TRUE) # TRUE grepl("cat","dogs and rats",fixed=TRUE) # FALSE wikiWords2 = wikiWords wikiWords2$HTTP = ifelse(grepl("http",wiki$Added,fixed=TRUE), 1, 0) table(wikiWords2$HTTP) #new subset with http wikiTrain2 = subset(wikiWords2, split==TRUE) wikiTest2 = subset(wikiWords2, split==FALSE) #cart Model 2 wikiCART2 = rpart(Vandal ~ ., data=wikiTrain2, method="class") prp(wikiCART2) # Evaluate the performance of the model predictCART2 = predict(wikiCART2, newdata=wikiTest2, type="class") table(wikiTest2$Vandal, predictCART2) (609 + 57)/(609 + 57 + 488 + 9) #Sum rows dataframe wikiWords2$NumWordsAdded = rowSums(as.matrix(dtmAdded)) wikiWords2$NumWordsRemoved = rowSums(as.matrix(dtmRemoved)) mean(wikiWords2$NumWordsAdded) #Model with numWords wikiTrain3 = subset(wikiWords2, split==TRUE) wikiTest3 = subset(wikiWords2, split==FALSE) wikiCART3 = rpart(Vandal ~ ., data=wikiTrain3, method="class") prp(wikiCART3) predictCART3 = predict(wikiCART3, newdata=wikiTest3, type="class") table(wikiTest3$Vandal, predictCART3) (514 + 248)/(514+248+104+297) #Taking into account more variables wikiWords3 = wikiWords2 wikiWords3$Minor = wiki$Minor wikiWords3$Loggedin = wiki$Loggedin wikiTrain4 = subset(wikiWords3, split==TRUE) wikiTest4 = subset(wikiWords3, split==FALSE) wikiCART4 = rpart(Vandal ~ ., data=wikiTrain4, method="class") prp(wikiCART4) predictCART4 = predict(wikiCART4, newdata=wikiTest4, type="class") table(wikiTest4$Vandal, predictCART4) (595 + 241) / (595+241+23+304)
/Week5/Wiki.R
no_license
wiflore/Analytics-on-Edge
R
false
false
4,203
r
setwd("~/Dropbox/MIT Analytics/Week5") # Install new packages install.packages("tm") install.packages("SnowballC") install.packages("caTools") install.packages("rpart") install.packages("rpart.plot") install.packages("randomForest") library(randomForest) library(caTools) library(rpart) library(rpart.plot) library(randomForest) library(tm) library(SnowballC) Sys.setlocale("LC_ALL", "C") wiki = read.csv("wiki.csv", stringsAsFactors=FALSE) wiki$Vandal = as.factor(wiki$Vandal) str(wiki) table(wiki$Vandal) # Create corpus Added corpusAdded = Corpus(VectorSource(wiki$Added)) # Look at corpus corpusAdded corpusAdded = tm_map(corpusAdded, PlainTextDocument) # Remove stopwords corpusAdded = tm_map(corpusAdded, removeWords, c( stopwords("english"))) corpusAdded[[1]] corpusAdded = tm_map(corpusAdded, stemDocument) findFreqTerms(corpusAdded) corpusAdded[[1]] dtmAdded = DocumentTermMatrix(corpusAdded) dtmAdded sparseAdded = removeSparseTerms(dtmAdded, 0.997) sparseAdded # Convert to a data frame wordsAdded = as.data.frame(as.matrix(sparseAdded)) # Make all variable names R-friendly colnames(wordsAdded) = make.names(colnames(wordsAdded)) colnames(wordsAdded) = paste("A", colnames(wordsAdded)) #end Added # Create corpus Removed corpusRemoved = Corpus(VectorSource(wiki$Removed)) # Look at corpus corpusRemoved corpusRemoved = tm_map(corpusRemoved, PlainTextDocument) # Remove stopwords corpusRemoved = tm_map(corpusRemoved, removeWords, c( stopwords("english"))) corpusRemoved[[1]] corpusRemoved = tm_map(corpusRemoved, stemDocument) findFreqTerms(corpusRemoved) corpusRemoved[[1]] dtmRemoved = DocumentTermMatrix(corpusRemoved) dtmRemoved sparseRemoved = removeSparseTerms(dtmRemoved, 0.997) sparseRemoved # Convert to a data frame wordsRemoved = as.data.frame(as.matrix(sparseRemoved)) # Make all variable names R-friendly colnames(wordsRemoved) = make.names(colnames(wordsRemoved)) colnames(wordsRemoved) = paste("R", colnames(wordsRemoved)) ncol(wordsRemoved) #end Removed wikiWords = cbind(wordsAdded, wordsRemoved) str(wikiWords) # Create dependent variable wikiWords$Vandal = as.factor(wiki$Vandal == 1) table(wikiWords$Vandal) 2061/(1815+2061) set.seed(123) split = sample.split(wikiWords$Vandal , SplitRatio = 0.7) trainSparse = subset(wikiWords , split==TRUE) testSparse = subset(wikiWords, split==FALSE) # Build a CART model wikiCART = rpart(Vandal ~ ., data=trainSparse, method="class") prp(wikiCART) # Evaluate the performance of the model predictCART = predict(wikiCART, newdata=testSparse, type="class") table(testSparse$Vandal, predictCART) (618+12)/(618+533+12) grepl("cat","dogs and cats",fixed=TRUE) # TRUE grepl("cat","dogs and rats",fixed=TRUE) # FALSE wikiWords2 = wikiWords wikiWords2$HTTP = ifelse(grepl("http",wiki$Added,fixed=TRUE), 1, 0) table(wikiWords2$HTTP) #new subset with http wikiTrain2 = subset(wikiWords2, split==TRUE) wikiTest2 = subset(wikiWords2, split==FALSE) #cart Model 2 wikiCART2 = rpart(Vandal ~ ., data=wikiTrain2, method="class") prp(wikiCART2) # Evaluate the performance of the model predictCART2 = predict(wikiCART2, newdata=wikiTest2, type="class") table(wikiTest2$Vandal, predictCART2) (609 + 57)/(609 + 57 + 488 + 9) #Sum rows dataframe wikiWords2$NumWordsAdded = rowSums(as.matrix(dtmAdded)) wikiWords2$NumWordsRemoved = rowSums(as.matrix(dtmRemoved)) mean(wikiWords2$NumWordsAdded) #Model with numWords wikiTrain3 = subset(wikiWords2, split==TRUE) wikiTest3 = subset(wikiWords2, split==FALSE) wikiCART3 = rpart(Vandal ~ ., data=wikiTrain3, method="class") prp(wikiCART3) predictCART3 = predict(wikiCART3, newdata=wikiTest3, type="class") table(wikiTest3$Vandal, predictCART3) (514 + 248)/(514+248+104+297) #Taking into account more variables wikiWords3 = wikiWords2 wikiWords3$Minor = wiki$Minor wikiWords3$Loggedin = wiki$Loggedin wikiTrain4 = subset(wikiWords3, split==TRUE) wikiTest4 = subset(wikiWords3, split==FALSE) wikiCART4 = rpart(Vandal ~ ., data=wikiTrain4, method="class") prp(wikiCART4) predictCART4 = predict(wikiCART4, newdata=wikiTest4, type="class") table(wikiTest4$Vandal, predictCART4) (595 + 241) / (595+241+23+304)
% Auto-generated: do not edit by hand \name{htmlMark} \alias{htmlMark} \title{Mark component} \description{ Mark is a wrapper for the <mark> HTML5 element. For detailed attribute info see: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/mark } \usage{ htmlMark(children=NULL, id=NULL, n_clicks=NULL, n_clicks_timestamp=NULL, key=NULL, role=NULL, accessKey=NULL, className=NULL, contentEditable=NULL, contextMenu=NULL, dir=NULL, draggable=NULL, hidden=NULL, lang=NULL, spellCheck=NULL, style=NULL, tabIndex=NULL, title=NULL, loading_state=NULL, ...) } \arguments{ \item{children}{A list of or a singular dash component, string or number. The children of this component} \item{id}{Character. The ID of this component, used to identify dash components in callbacks. The ID needs to be unique across all of the components in an app.} \item{n_clicks}{Numeric. An integer that represents the number of times that this element has been clicked on.} \item{n_clicks_timestamp}{Numeric. An integer that represents the time (in ms since 1970) at which n_clicks changed. This can be used to tell which button was changed most recently.} \item{key}{Character. A unique identifier for the component, used to improve performance by React.js while rendering components See https://reactjs.org/docs/lists-and-keys.html for more info} \item{role}{Character. The ARIA role attribute} \item{accessKey}{Character. Keyboard shortcut to activate or add focus to the element.} \item{className}{Character. Often used with CSS to style elements with common properties.} \item{contentEditable}{Character. Indicates whether the element's content is editable.} \item{contextMenu}{Character. Defines the ID of a <menu> element which will serve as the element's context menu.} \item{dir}{Character. Defines the text direction. Allowed values are ltr (Left-To-Right) or rtl (Right-To-Left)} \item{draggable}{Character. Defines whether the element can be dragged.} \item{hidden}{A value equal to: 'hidden', 'hidden' | logical. Prevents rendering of given element, while keeping child elements, e.g. script elements, active.} \item{lang}{Character. Defines the language used in the element.} \item{spellCheck}{Character. Indicates whether spell checking is allowed for the element.} \item{style}{Named list. Defines CSS styles which will override styles previously set.} \item{tabIndex}{Character. Overrides the browser's default tab order and follows the one specified instead.} \item{title}{Character. Text to be displayed in a tooltip when hovering over the element.} \item{loading_state}{Lists containing elements 'is_loading', 'prop_name', 'component_name'. those elements have the following types: - is_loading (logical; optional): determines if the component is loading or not - prop_name (character; optional): holds which property is loading - component_name (character; optional): holds the name of the component that is loading. Object that holds the loading state object coming from dash-renderer} \item{...}{wildcards allowed have the form: `'data-*', 'aria-*'`} } \value{named list of JSON elements corresponding to React.js properties and their values} \examples{ if (interactive() && require(dash)) { library(dash) library(dashHtmlComponents) app <- Dash$new() app$layout( htmlDiv(list( htmlP(list( htmlMark("Plotly"), " develops online data analytics and visualization tools." )) )) ) app$run_server() } }
/man/htmlMark.Rd
permissive
noisycomputation/dash-html-components
R
false
false
3,484
rd
% Auto-generated: do not edit by hand \name{htmlMark} \alias{htmlMark} \title{Mark component} \description{ Mark is a wrapper for the <mark> HTML5 element. For detailed attribute info see: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/mark } \usage{ htmlMark(children=NULL, id=NULL, n_clicks=NULL, n_clicks_timestamp=NULL, key=NULL, role=NULL, accessKey=NULL, className=NULL, contentEditable=NULL, contextMenu=NULL, dir=NULL, draggable=NULL, hidden=NULL, lang=NULL, spellCheck=NULL, style=NULL, tabIndex=NULL, title=NULL, loading_state=NULL, ...) } \arguments{ \item{children}{A list of or a singular dash component, string or number. The children of this component} \item{id}{Character. The ID of this component, used to identify dash components in callbacks. The ID needs to be unique across all of the components in an app.} \item{n_clicks}{Numeric. An integer that represents the number of times that this element has been clicked on.} \item{n_clicks_timestamp}{Numeric. An integer that represents the time (in ms since 1970) at which n_clicks changed. This can be used to tell which button was changed most recently.} \item{key}{Character. A unique identifier for the component, used to improve performance by React.js while rendering components See https://reactjs.org/docs/lists-and-keys.html for more info} \item{role}{Character. The ARIA role attribute} \item{accessKey}{Character. Keyboard shortcut to activate or add focus to the element.} \item{className}{Character. Often used with CSS to style elements with common properties.} \item{contentEditable}{Character. Indicates whether the element's content is editable.} \item{contextMenu}{Character. Defines the ID of a <menu> element which will serve as the element's context menu.} \item{dir}{Character. Defines the text direction. Allowed values are ltr (Left-To-Right) or rtl (Right-To-Left)} \item{draggable}{Character. Defines whether the element can be dragged.} \item{hidden}{A value equal to: 'hidden', 'hidden' | logical. Prevents rendering of given element, while keeping child elements, e.g. script elements, active.} \item{lang}{Character. Defines the language used in the element.} \item{spellCheck}{Character. Indicates whether spell checking is allowed for the element.} \item{style}{Named list. Defines CSS styles which will override styles previously set.} \item{tabIndex}{Character. Overrides the browser's default tab order and follows the one specified instead.} \item{title}{Character. Text to be displayed in a tooltip when hovering over the element.} \item{loading_state}{Lists containing elements 'is_loading', 'prop_name', 'component_name'. those elements have the following types: - is_loading (logical; optional): determines if the component is loading or not - prop_name (character; optional): holds which property is loading - component_name (character; optional): holds the name of the component that is loading. Object that holds the loading state object coming from dash-renderer} \item{...}{wildcards allowed have the form: `'data-*', 'aria-*'`} } \value{named list of JSON elements corresponding to React.js properties and their values} \examples{ if (interactive() && require(dash)) { library(dash) library(dashHtmlComponents) app <- Dash$new() app$layout( htmlDiv(list( htmlP(list( htmlMark("Plotly"), " develops online data analytics and visualization tools." )) )) ) app$run_server() } }
#install.packages('shiny') #setwd("C:/Users/gabel/Documents/") #setwd("C:/Users/Guy/Documents/Github") setwd("E:/VID/project/") library(shiny) runApp("./wcde", launch.browser = TRUE) #runApp("./shiny/wcde") # install.packages("wppExplorer") # library(wppExplorer) # wpp.explore(2012) #shiny::runGitHub('wicpyr', 'gjabel') #setwd("./shiny/wcde") #setwd("C:/Users/gabel/Documents/") #runApp("./shiny/shiny-partials-master") #shiny::runGitHub('wicpyr', 'gjabel') # runGitHub("shiny-examples", "rstudio", subdir = "050-kmeans-example") # runGitHub("shiny-examples", "rstudio", subdir = "052-navbar-example") # runGitHub("shiny-examples", "rstudio", subdir = "048-including-html-text-and-markdown-files") # # runGitHub("shiny_example", "rstudio") # # runExample("02_text")
/do_wcde.R
no_license
annegoujon/wcde
R
false
false
774
r
#install.packages('shiny') #setwd("C:/Users/gabel/Documents/") #setwd("C:/Users/Guy/Documents/Github") setwd("E:/VID/project/") library(shiny) runApp("./wcde", launch.browser = TRUE) #runApp("./shiny/wcde") # install.packages("wppExplorer") # library(wppExplorer) # wpp.explore(2012) #shiny::runGitHub('wicpyr', 'gjabel') #setwd("./shiny/wcde") #setwd("C:/Users/gabel/Documents/") #runApp("./shiny/shiny-partials-master") #shiny::runGitHub('wicpyr', 'gjabel') # runGitHub("shiny-examples", "rstudio", subdir = "050-kmeans-example") # runGitHub("shiny-examples", "rstudio", subdir = "052-navbar-example") # runGitHub("shiny-examples", "rstudio", subdir = "048-including-html-text-and-markdown-files") # # runGitHub("shiny_example", "rstudio") # # runExample("02_text")
#' Install a local development package. #' #' Uses \code{R CMD INSTALL} to install the package. Will also try to install #' dependencies of the package from CRAN, if they're not already installed. #' #' By default, installation takes place using the current package directory. #' If you have compiled code, this means that artefacts of compilation will be #' created in the \code{src/} directory. If you want to avoid this, you can #' use \code{local = FALSE} to first build a package bundle and then install #' it from a temporary directory. This is slower, but keeps the source #' directory pristine. #' #' If the package is loaded, it will be reloaded after installation. This is #' not always completely possible, see \code{\link{reload}} for caveats. #' #' @param pkg package description, can be path or package name. See #' \code{\link{as.package}} for more information #' @param reload if \code{TRUE} (the default), will automatically reload the #' package after installing. #' @param quick if \code{TRUE} skips docs, multiple-architectures, #' demos, and vignettes, to make installation as fast as possible. #' @param local if \code{FALSE} \code{\link{build}}s the package first: #' this ensures that the installation is completely clean, and prevents any #' binary artefacts (like \file{.o}, \code{.so}) from appearing in your local #' package directory, but is considerably slower, because every compile has #' to start from scratch. #' @param args An optional character vector of additional command line #' arguments to be passed to \code{R CMD install}. This defaults to the #' value of the option \code{"devtools.install.args"}. #' @param quiet if \code{TRUE} suppresses output from this function. #' @param dependencies \code{logical} indicating to also install uninstalled #' packages which this \code{pkg} depends on/links to/suggests. See #' argument \code{dependencies} of \code{\link{install.packages}}. #' @param build_vignettes if \code{TRUE}, will build vignettes. Normally it is #' \code{build} that's responsible for creating vignettes; this argument makes #' sure vignettes are built even if a build never happens (i.e. because #' \code{local = TRUE}. #' @param keep_source If \code{TRUE} will keep the srcrefs from an installed #' package. This is useful for debugging (especially inside of RStudio). #' It defaults to the option \code{"keep.source.pkgs"}. #' @export #' @family package installation #' @seealso \code{\link{with_debug}} to install packages with debugging flags #' set. #' @importFrom utils install.packages install <- function(pkg = ".", reload = TRUE, quick = FALSE, local = TRUE, args = getOption("devtools.install.args"), quiet = FALSE, dependencies = NA, build_vignettes = !quick, keep_source = getOption("keep.source.pkgs")) { pkg <- as.package(pkg) if (!quiet) message("Installing ", pkg$package) install_deps(pkg, dependencies = dependencies) # Build the package. Only build locally if it doesn't have vignettes has_vignettes <- length(pkgVignettes(dir = pkg$path)$doc > 0) if (local && !(has_vignettes && build_vignettes)) { built_path <- pkg$path } else { built_path <- build(pkg, tempdir(), vignettes = build_vignettes, quiet = quiet) on.exit(unlink(built_path)) } opts <- c( paste("--library=", shQuote(.libPaths()[1]), sep = ""), if (keep_source) "--with-keep.source", "--install-tests" ) if (quick) { opts <- c(opts, "--no-docs", "--no-multiarch", "--no-demo") } opts <- paste(paste(opts, collapse = " "), paste(args, collapse = " ")) R(paste("CMD INSTALL ", shQuote(built_path), " ", opts, sep = ""), quiet = quiet) if (reload) reload(pkg, quiet = quiet) invisible(TRUE) } #' Install package dependencies #' #' @inheritParams install #' @export #' @examples #' \dontrun{install_deps(".")} install_deps <- function(pkg = ".", dependencies = NA) { pkg <- as.package(pkg) info <- pkg_deps(pkg, dependencies) # Packages that are not already installed or without required versions needs_install <- function(pkg, compare, version) { if (length(find.package(pkg, quiet = TRUE)) == 0) return(TRUE) if (is.na(compare)) return(FALSE) compare <- match.fun(compare) !compare(packageVersion(pkg), version) } needed <- Map(needs_install, info$name, info$compare, info$version) deps <- info$name[as.logical(needed)] if (length(deps) == 0) return(invisible()) message("Installing dependencies for ", pkg$package, ":\n", paste(deps, collapse = ", ")) install.packages(deps, dependencies = NA) invisible(deps) } pkg_deps <- function(pkg = ".", dependencies = NA) { pkg <- as.package(pkg) deps <- if (identical(dependencies, NA)) { c("Depends", "Imports", "LinkingTo") } else if (isTRUE(dependencies)) { c("Depends", "Imports", "LinkingTo", "Suggests", "VignetteBuilder") } else if (identical(dependencies, FALSE)) { character(0) } else dependencies deps <- unlist(pkg[tolower(deps)], use.names = FALSE) parse_deps(paste(deps, collapse = ',')) }
/R/install.r
no_license
kingo55/devtools
R
false
false
5,125
r
#' Install a local development package. #' #' Uses \code{R CMD INSTALL} to install the package. Will also try to install #' dependencies of the package from CRAN, if they're not already installed. #' #' By default, installation takes place using the current package directory. #' If you have compiled code, this means that artefacts of compilation will be #' created in the \code{src/} directory. If you want to avoid this, you can #' use \code{local = FALSE} to first build a package bundle and then install #' it from a temporary directory. This is slower, but keeps the source #' directory pristine. #' #' If the package is loaded, it will be reloaded after installation. This is #' not always completely possible, see \code{\link{reload}} for caveats. #' #' @param pkg package description, can be path or package name. See #' \code{\link{as.package}} for more information #' @param reload if \code{TRUE} (the default), will automatically reload the #' package after installing. #' @param quick if \code{TRUE} skips docs, multiple-architectures, #' demos, and vignettes, to make installation as fast as possible. #' @param local if \code{FALSE} \code{\link{build}}s the package first: #' this ensures that the installation is completely clean, and prevents any #' binary artefacts (like \file{.o}, \code{.so}) from appearing in your local #' package directory, but is considerably slower, because every compile has #' to start from scratch. #' @param args An optional character vector of additional command line #' arguments to be passed to \code{R CMD install}. This defaults to the #' value of the option \code{"devtools.install.args"}. #' @param quiet if \code{TRUE} suppresses output from this function. #' @param dependencies \code{logical} indicating to also install uninstalled #' packages which this \code{pkg} depends on/links to/suggests. See #' argument \code{dependencies} of \code{\link{install.packages}}. #' @param build_vignettes if \code{TRUE}, will build vignettes. Normally it is #' \code{build} that's responsible for creating vignettes; this argument makes #' sure vignettes are built even if a build never happens (i.e. because #' \code{local = TRUE}. #' @param keep_source If \code{TRUE} will keep the srcrefs from an installed #' package. This is useful for debugging (especially inside of RStudio). #' It defaults to the option \code{"keep.source.pkgs"}. #' @export #' @family package installation #' @seealso \code{\link{with_debug}} to install packages with debugging flags #' set. #' @importFrom utils install.packages install <- function(pkg = ".", reload = TRUE, quick = FALSE, local = TRUE, args = getOption("devtools.install.args"), quiet = FALSE, dependencies = NA, build_vignettes = !quick, keep_source = getOption("keep.source.pkgs")) { pkg <- as.package(pkg) if (!quiet) message("Installing ", pkg$package) install_deps(pkg, dependencies = dependencies) # Build the package. Only build locally if it doesn't have vignettes has_vignettes <- length(pkgVignettes(dir = pkg$path)$doc > 0) if (local && !(has_vignettes && build_vignettes)) { built_path <- pkg$path } else { built_path <- build(pkg, tempdir(), vignettes = build_vignettes, quiet = quiet) on.exit(unlink(built_path)) } opts <- c( paste("--library=", shQuote(.libPaths()[1]), sep = ""), if (keep_source) "--with-keep.source", "--install-tests" ) if (quick) { opts <- c(opts, "--no-docs", "--no-multiarch", "--no-demo") } opts <- paste(paste(opts, collapse = " "), paste(args, collapse = " ")) R(paste("CMD INSTALL ", shQuote(built_path), " ", opts, sep = ""), quiet = quiet) if (reload) reload(pkg, quiet = quiet) invisible(TRUE) } #' Install package dependencies #' #' @inheritParams install #' @export #' @examples #' \dontrun{install_deps(".")} install_deps <- function(pkg = ".", dependencies = NA) { pkg <- as.package(pkg) info <- pkg_deps(pkg, dependencies) # Packages that are not already installed or without required versions needs_install <- function(pkg, compare, version) { if (length(find.package(pkg, quiet = TRUE)) == 0) return(TRUE) if (is.na(compare)) return(FALSE) compare <- match.fun(compare) !compare(packageVersion(pkg), version) } needed <- Map(needs_install, info$name, info$compare, info$version) deps <- info$name[as.logical(needed)] if (length(deps) == 0) return(invisible()) message("Installing dependencies for ", pkg$package, ":\n", paste(deps, collapse = ", ")) install.packages(deps, dependencies = NA) invisible(deps) } pkg_deps <- function(pkg = ".", dependencies = NA) { pkg <- as.package(pkg) deps <- if (identical(dependencies, NA)) { c("Depends", "Imports", "LinkingTo") } else if (isTRUE(dependencies)) { c("Depends", "Imports", "LinkingTo", "Suggests", "VignetteBuilder") } else if (identical(dependencies, FALSE)) { character(0) } else dependencies deps <- unlist(pkg[tolower(deps)], use.names = FALSE) parse_deps(paste(deps, collapse = ',')) }
library(dplyr) library(ggplot2) library(reshape2) load("C:/Users/Lakshmi/Desktop/datasciencecoursera/Regression-using-R/movies.Rdata") # Remove actors and links and dates movies = select(movies,-c(25:32,8:12,6)) attach(movies) # Combine all Oscar data together for easy plotting - since they share the same categories "yes" and "no" and we are plotting only the counts for yes and no oscar.data =apply(movies[c("best_pic_nom", "best_pic_win","best_actor_win","best_actress_win", "best_dir_win","top200_box")],2,table) oscar.data = melt(oscar.data) names(oscar.data) = c("Decision","Category","Counts") oscar.data$Decision = tools::toTitleCase(as.character(oscar.data$Decision)) oscar.data$Category = tools::toTitleCase(as.character(gsub("_", " ", oscar.data$Category, fixed = TRUE))) # Exploratory Data Analysis # Basic plotting to observe linearity and trends ------------------------------------------------ ggplot(movies,aes(x=imdb_rating,y = seq_along(imdb_rating))) + geom_point(color = "mediumorchid4") ggplot(movies, aes(x=critics_score,y=imdb_rating)) + geom_point(color="steelblue",size=2) ggplot(movies, aes(x=runtime,y=imdb_rating)) + geom_point(color="steelblue",size=2) with(movies,cor(critics_score,imdb_rating)) ggplot(movies, aes(x=genre)) + geom_bar(fill = "mediumorchid4") + coord_flip()+ geom_text(stat = "count", aes(label=..count..)) ggplot(movies, aes(x=title_type)) + geom_bar(fill = "mediumorchid4") + geom_text(stat = "count", aes(label=..count..), vjust=-1) + ylim(0,700) ggplot(oscar.data, aes(x = Category, y = Counts, fill = factor(Decision),width=0.25)) + geom_bar(stat = "identity") + scale_fill_manual(name = "Legend", values = c("No" = "mediumorchid4","Yes"="cornflowerblue")) + labs(x = "\n\nCategory", y="Counts\n\n") ggplot(movies, aes(x=top200_box)) + geom_bar(fill = "mediumorchid4")+ geom_text(stat = "count", aes(label=..count..), vjust=-1)+ ylim(0,700) ggplot(movies, aes(x=mpaa_rating)) + geom_bar(fill = "mediumorchid4")+ geom_text(stat = "count", aes(label=..count..), vjust=-1)+ ylim(0,700) # Exploring interaction effects ------------------------------- ggplot(movies, aes(x=critics_score,y=imdb_rating,color = best_actor_win)) + geom_point() + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) ggplot(movies, aes(x=critics_score,y=imdb_rating,color = best_actress_win)) + geom_point() + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) ggplot(movies, aes(x=critics_score,y=imdb_rating,color = genre)) + geom_point() + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) ggplot(movies, aes(x=critics_score,y=imdb_rating,color = mpaa_rating)) + geom_point() + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) # Intercation b/w 2 categorical variables new = movies %>% group_by(title_type,critics_rating) %>% summarise(mir = mean(imdb_rating)) ggplot(new, aes(title_type, mir)) + geom_line(aes(group = critics_rating, color = critics_rating)) + geom_point(aes(color = critics_rating)) + xlab("\n\nTitle Type") + ylab("Mean IMDB Rating\n\n") + labs(color = "Critics Rating\n") # Building the model using backward selection summary(lm(imdb_rating ~ critics_score + critics_rating + genre + best_actor_win + best_actress_win + title_type + mpaa_rating))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + genre + best_actor_win + best_actress_win + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + genre + best_actor_win + best_actress_win))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + genre + best_actor_win + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + genre + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_score + genre + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_rating + genre + title_type))$adj.r.squared model = lm(imdb_rating ~ critics_score + critics_rating + genre + title_type) summary(model) # Validating assumptions of linear regression plot(model.final, which=1, pch = 20, col = "steelblue") plot(model, which=2, pch = 20, col = "steelblue") hist(resid(model), col= "olivedrab3", xlab = "Residuals") plot(model, which=3, pch = 20, col = "steelblue") # Adding Interaction Effects - for explanation and step wise addition of terms look at the RMd/html documentation model = lm(imdb_rating ~ critics_score + critics_rating + genre + title_type + critics_score*genre + critics_rating*title_type) # Diagnostic plots for model with interaction effects par(mfrow=c(1,2)) plot(model, which=1, pch = 20, col = "mediumorchid4") plot(model, which=2, pch = 20, col = "steelblue") par(mfrow=c(1,2)) plot(model, which=3, pch = 20, col = "olivedrab3") plot(resid(model),col = "mediumorchid4", xlab="Index",ylab = "Residuals",pch=20) # Prediction input = list(critics_score=85, genre = "Drama",critics_rating = "Certified Fresh",title_type = "Feature Film") predict(model,input) predict(model,input, interval = "prediction",level = 0.95)
/reg_model_project.R
no_license
pranavi-shekhar/Multiple-Linear-Regression-using-R
R
false
false
5,173
r
library(dplyr) library(ggplot2) library(reshape2) load("C:/Users/Lakshmi/Desktop/datasciencecoursera/Regression-using-R/movies.Rdata") # Remove actors and links and dates movies = select(movies,-c(25:32,8:12,6)) attach(movies) # Combine all Oscar data together for easy plotting - since they share the same categories "yes" and "no" and we are plotting only the counts for yes and no oscar.data =apply(movies[c("best_pic_nom", "best_pic_win","best_actor_win","best_actress_win", "best_dir_win","top200_box")],2,table) oscar.data = melt(oscar.data) names(oscar.data) = c("Decision","Category","Counts") oscar.data$Decision = tools::toTitleCase(as.character(oscar.data$Decision)) oscar.data$Category = tools::toTitleCase(as.character(gsub("_", " ", oscar.data$Category, fixed = TRUE))) # Exploratory Data Analysis # Basic plotting to observe linearity and trends ------------------------------------------------ ggplot(movies,aes(x=imdb_rating,y = seq_along(imdb_rating))) + geom_point(color = "mediumorchid4") ggplot(movies, aes(x=critics_score,y=imdb_rating)) + geom_point(color="steelblue",size=2) ggplot(movies, aes(x=runtime,y=imdb_rating)) + geom_point(color="steelblue",size=2) with(movies,cor(critics_score,imdb_rating)) ggplot(movies, aes(x=genre)) + geom_bar(fill = "mediumorchid4") + coord_flip()+ geom_text(stat = "count", aes(label=..count..)) ggplot(movies, aes(x=title_type)) + geom_bar(fill = "mediumorchid4") + geom_text(stat = "count", aes(label=..count..), vjust=-1) + ylim(0,700) ggplot(oscar.data, aes(x = Category, y = Counts, fill = factor(Decision),width=0.25)) + geom_bar(stat = "identity") + scale_fill_manual(name = "Legend", values = c("No" = "mediumorchid4","Yes"="cornflowerblue")) + labs(x = "\n\nCategory", y="Counts\n\n") ggplot(movies, aes(x=top200_box)) + geom_bar(fill = "mediumorchid4")+ geom_text(stat = "count", aes(label=..count..), vjust=-1)+ ylim(0,700) ggplot(movies, aes(x=mpaa_rating)) + geom_bar(fill = "mediumorchid4")+ geom_text(stat = "count", aes(label=..count..), vjust=-1)+ ylim(0,700) # Exploring interaction effects ------------------------------- ggplot(movies, aes(x=critics_score,y=imdb_rating,color = best_actor_win)) + geom_point() + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) ggplot(movies, aes(x=critics_score,y=imdb_rating,color = best_actress_win)) + geom_point() + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) ggplot(movies, aes(x=critics_score,y=imdb_rating,color = genre)) + geom_point() + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) ggplot(movies, aes(x=critics_score,y=imdb_rating,color = mpaa_rating)) + geom_point() + stat_smooth(method = "lm", se = FALSE, fullrange = TRUE) # Intercation b/w 2 categorical variables new = movies %>% group_by(title_type,critics_rating) %>% summarise(mir = mean(imdb_rating)) ggplot(new, aes(title_type, mir)) + geom_line(aes(group = critics_rating, color = critics_rating)) + geom_point(aes(color = critics_rating)) + xlab("\n\nTitle Type") + ylab("Mean IMDB Rating\n\n") + labs(color = "Critics Rating\n") # Building the model using backward selection summary(lm(imdb_rating ~ critics_score + critics_rating + genre + best_actor_win + best_actress_win + title_type + mpaa_rating))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + genre + best_actor_win + best_actress_win + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + genre + best_actor_win + best_actress_win))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + genre + best_actor_win + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + genre + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_score + critics_rating + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_score + genre + title_type))$adj.r.squared summary(lm(imdb_rating ~ critics_rating + genre + title_type))$adj.r.squared model = lm(imdb_rating ~ critics_score + critics_rating + genre + title_type) summary(model) # Validating assumptions of linear regression plot(model.final, which=1, pch = 20, col = "steelblue") plot(model, which=2, pch = 20, col = "steelblue") hist(resid(model), col= "olivedrab3", xlab = "Residuals") plot(model, which=3, pch = 20, col = "steelblue") # Adding Interaction Effects - for explanation and step wise addition of terms look at the RMd/html documentation model = lm(imdb_rating ~ critics_score + critics_rating + genre + title_type + critics_score*genre + critics_rating*title_type) # Diagnostic plots for model with interaction effects par(mfrow=c(1,2)) plot(model, which=1, pch = 20, col = "mediumorchid4") plot(model, which=2, pch = 20, col = "steelblue") par(mfrow=c(1,2)) plot(model, which=3, pch = 20, col = "olivedrab3") plot(resid(model),col = "mediumorchid4", xlab="Index",ylab = "Residuals",pch=20) # Prediction input = list(critics_score=85, genre = "Drama",critics_rating = "Certified Fresh",title_type = "Feature Film") predict(model,input) predict(model,input, interval = "prediction",level = 0.95)
# Execute e análise o programa. # Defina a area de trabalho para a pasta que # armazena o arquivo: MICRODADOS_ENEM_ESCOLA.csv #setwd() dados <-read.csv2("MICRODADOS_ENEM_ESCOLA.csv", header = T, dec=".") dados <- dados [ , c("NU_ANO", "SG_UF_ESCOLA", "CO_ESCOLA_EDUCACENSO", "NO_ESCOLA_EDUCACENSO", "TP_DEPENDENCIA_ADM_ESCOLA", "NU_MEDIA_CN", "NU_MEDIA_CH", "NU_MEDIA_LP", "NU_MEDIA_MT", "NU_MEDIA_RED") ] # TP_DEPENDENCIA_ADM_ESCOLAR # 1 - FEDERAL # 2 - ESTADUAL # 3 - MUNICIPAL # 4 - PRIVADA #SELECIONAR ANOS DE 2009 A 2015 dados <- dados [ dados$NU_ANO >= "2009", ] # CALCULAR AS ESTATÍSTICAS BÁSICAS DE TODAS # AS NOTAS. summary (dados[ , c("NU_MEDIA_CN", "NU_MEDIA_CH", "NU_MEDIA_LP", "NU_MEDIA_MT", "NU_MEDIA_RED") ]) # CALCULAR A MÉDIA DAS NOTAS POR ESCOLA. # NE - NOTAS POR ESCOLA NE <- aggregate(dados [ , c(6:10)], dados[,c(2:5)], mean, na.rm=T ) # FAZER HISTOGRAM DAS NOTAS MÉDIAS DE MATEMÁTICA. hist (NE$NU_MEDIA_MT, col = "tomato4", main = "Média de matemática", ylab = "N", xlab = "Média", labels=T, ylim = c(0,15000)) # FAZER UM BOXPLOT DAS NOTAS MÉDIAS DE MATEMÁTICA # POR TIPO DE ADM. ESCOLAR. boxplot (NE$NU_MEDIA_MT ~ NE$TP_DEPENDENCIA_ADM_ESCOLA, main="Média de matemática", xlab="Tipo de escola. ") # FAZER UMA TABELA ONDE SE CONTA O TIPO DE DEPEN- # DENCIA ADM ESCOLAR. table(NE$TP_DEPENDENCIA_ADM_ESCOLA)
/atividade1.R
no_license
raucelio/estatistica-basica
R
false
false
1,756
r
# Execute e análise o programa. # Defina a area de trabalho para a pasta que # armazena o arquivo: MICRODADOS_ENEM_ESCOLA.csv #setwd() dados <-read.csv2("MICRODADOS_ENEM_ESCOLA.csv", header = T, dec=".") dados <- dados [ , c("NU_ANO", "SG_UF_ESCOLA", "CO_ESCOLA_EDUCACENSO", "NO_ESCOLA_EDUCACENSO", "TP_DEPENDENCIA_ADM_ESCOLA", "NU_MEDIA_CN", "NU_MEDIA_CH", "NU_MEDIA_LP", "NU_MEDIA_MT", "NU_MEDIA_RED") ] # TP_DEPENDENCIA_ADM_ESCOLAR # 1 - FEDERAL # 2 - ESTADUAL # 3 - MUNICIPAL # 4 - PRIVADA #SELECIONAR ANOS DE 2009 A 2015 dados <- dados [ dados$NU_ANO >= "2009", ] # CALCULAR AS ESTATÍSTICAS BÁSICAS DE TODAS # AS NOTAS. summary (dados[ , c("NU_MEDIA_CN", "NU_MEDIA_CH", "NU_MEDIA_LP", "NU_MEDIA_MT", "NU_MEDIA_RED") ]) # CALCULAR A MÉDIA DAS NOTAS POR ESCOLA. # NE - NOTAS POR ESCOLA NE <- aggregate(dados [ , c(6:10)], dados[,c(2:5)], mean, na.rm=T ) # FAZER HISTOGRAM DAS NOTAS MÉDIAS DE MATEMÁTICA. hist (NE$NU_MEDIA_MT, col = "tomato4", main = "Média de matemática", ylab = "N", xlab = "Média", labels=T, ylim = c(0,15000)) # FAZER UM BOXPLOT DAS NOTAS MÉDIAS DE MATEMÁTICA # POR TIPO DE ADM. ESCOLAR. boxplot (NE$NU_MEDIA_MT ~ NE$TP_DEPENDENCIA_ADM_ESCOLA, main="Média de matemática", xlab="Tipo de escola. ") # FAZER UMA TABELA ONDE SE CONTA O TIPO DE DEPEN- # DENCIA ADM ESCOLAR. table(NE$TP_DEPENDENCIA_ADM_ESCOLA)
library(highcharter) ### Name: datetime_to_timestamp ### Title: Date to timestamps ### Aliases: datetime_to_timestamp ### ** Examples datetime_to_timestamp( as.Date(c("2015-05-08", "2015-09-12"), format = "%Y-%m-%d"))
/data/genthat_extracted_code/highcharter/examples/datetime_to_timestamp.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
231
r
library(highcharter) ### Name: datetime_to_timestamp ### Title: Date to timestamps ### Aliases: datetime_to_timestamp ### ** Examples datetime_to_timestamp( as.Date(c("2015-05-08", "2015-09-12"), format = "%Y-%m-%d"))
jkl <- read.csv("C:/Users/SHYAMSUNDER GOWD/Desktop/exda/exdata_data_household_power_consumption/household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE) jkl$Date<-paste(jkl$Date,jkl$Time) jkl$Date<-strptime(jkl$Date,format = "%d/%m/%Y %H:%M:%S") ac<-subset(jkl,Date>="2007-01-01"&Date<"2007-01-03") png(file = "plot2.png",width = 480,height =480) with(ac,plot(Date,Global_active_power,ylab = "Global Active Power (in kilowatts)",type = "l")) dev.off()
/plot2.R
no_license
sg-99/exda
R
false
false
478
r
jkl <- read.csv("C:/Users/SHYAMSUNDER GOWD/Desktop/exda/exdata_data_household_power_consumption/household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE) jkl$Date<-paste(jkl$Date,jkl$Time) jkl$Date<-strptime(jkl$Date,format = "%d/%m/%Y %H:%M:%S") ac<-subset(jkl,Date>="2007-01-01"&Date<"2007-01-03") png(file = "plot2.png",width = 480,height =480) with(ac,plot(Date,Global_active_power,ylab = "Global Active Power (in kilowatts)",type = "l")) dev.off()
\name{FillEnvelope} \alias{FillEnvelope} \title{ Transform simulation values to an fv } \description{ This function is used internally to calculate envelope values and store them into an \code{\link{fv.object}}. } \usage{ FillEnvelope(Envelope, Alpha, Global) } \arguments{ \item{Envelope}{ An envelope object (\code{\link{envelope}}) containing all the simulated function values. } \item{Alpha}{ The risk level. } \item{Global}{ Logical; if \code{TRUE}, a global envelope sensu Duranton and Overman (2005) is calculated. } } \value{ Returns the envelope object (\code{\link{envelope}}) with \code{hi} and \code{lo} values calculated from the simlations. } \author{ Eric Marcon <Eric.Marcon@ecofog.gf> } \keyword{internal}
/dbmss/man/FillEnvelope.Rd
no_license
albrizre/spatstat.revdep
R
false
false
753
rd
\name{FillEnvelope} \alias{FillEnvelope} \title{ Transform simulation values to an fv } \description{ This function is used internally to calculate envelope values and store them into an \code{\link{fv.object}}. } \usage{ FillEnvelope(Envelope, Alpha, Global) } \arguments{ \item{Envelope}{ An envelope object (\code{\link{envelope}}) containing all the simulated function values. } \item{Alpha}{ The risk level. } \item{Global}{ Logical; if \code{TRUE}, a global envelope sensu Duranton and Overman (2005) is calculated. } } \value{ Returns the envelope object (\code{\link{envelope}}) with \code{hi} and \code{lo} values calculated from the simlations. } \author{ Eric Marcon <Eric.Marcon@ecofog.gf> } \keyword{internal}
library("stringr") expr_raw = readRDS("~/Downloads/Tosti.Seurat.normalized.S78048.RDS") meta_info = read.table("~/Deko_Projekt/Misc/Tosti_Metadaten.tsv", sep ="\t", header = T) rownames(meta_info) = meta_info$Cell meta_data = meta_info[colnames(expr_raw),] subtype_vector = meta_data$Cluster table(subtype_vector) #candidates = which(subtype_vector %in% c("alpha","beta","gamma","delta","acinar-s","acinar-reg+","acinar-i","ductal","muc5b+ ductal")) candidates = which(subtype_vector %in% c("Acinar-REG+","Acinar-i","MUC5B+ Ductal")) expr_raw_tosti = expr_raw[,candidates] meta_data_tosti = meta_info[colnames(expr_raw_tosti),] subtype_vector_reduced_tosti = meta_data_tosti$Cluster table(subtype_vector_reduced_tosti) amount_genes = 300 amount_samples = 300 selected_samples = c() for ( cell_type in unique(subtype_vector_reduced_tosti)){ coords = which(meta_data_tosti$Cluster == cell_type ) if (length(coords) >= amount_samples) coords = sample(coords, size = amount_samples) selected_samples = c(selected_samples, coords) } length(selected_samples) expr_tosti = expr_raw_tosti[,selected_samples] dim(expr_tosti) meta_data_reduced_tosti = meta_info[colnames(expr_tosti),] table(meta_data_reduced_tosti$Cluster) ### meta_info = read.table("~/Deko_Projekt//Misc/Meta_information_scRNA.tsv",sep = "\t",header = T,stringsAsFactors = F) rownames(meta_info) = meta_info$Sample colnames(meta_info) = str_replace(colnames(meta_info),pattern = "\\.","_") expr_raw = read.table("~/Deko_Projekt/Data/Alpha_Beta_Gamma_Delta_Acinar_Ductal_Baron.tsv",sep="\t", stringsAsFactors = F, header = T,row.names = 1) #expr_raw = read.table("~/Deko_Projekt/Data/Cancer_Pancreas_Bulk_Array/Sato.S35.Ninon.tsv",sep="\t", stringsAsFactors = F, header = T,row.names = 1) colnames(expr_raw) = str_replace(colnames(expr_raw), pattern = "^X", "") expr_raw[1:5,1:5] no_match = colnames(expr_raw) %in% meta_info$Sample == F colnames(expr_raw)[no_match] = paste("X",colnames(expr_raw)[no_match],sep ="") no_match = colnames(expr_raw) %in% meta_info$Sample == F table(no_match) meta_data = meta_info[colnames(expr_raw),] subtype_vector = meta_data$Cluster table(subtype_vector) candidates = which(subtype_vector %in% c("Alpha","Beta","Gamma","Delta","Acinar","Ductal")) expr_raw_baron = expr_raw[,candidates] meta_data_baron = meta_info[colnames(expr_raw_baron),] subtype_vector_reduced_baron = meta_data_baron$Cluster table(subtype_vector_reduced_baron) amount_genes = 300 amount_samples = 300 selected_samples = c() for ( cell_type in unique(subtype_vector_reduced_baron)){ coords = which(meta_data_baron$Cluster == cell_type ) if (length(coords) >= amount_samples) coords = sample(coords, size = amount_samples) selected_samples = c(selected_samples, coords) } length(selected_samples) expr_baron = expr_raw_baron[,selected_samples] dim(expr_baron) meta_data_reduced_baron = meta_info[colnames(expr_baron),] table(meta_data_reduced_baron$Cluster) ### merge datasets merge_genes = intersect(rownames(expr_tosti),rownames(expr_baron)) #merge_genes = rownames(bam_data_1) length(merge_genes) table("INS" %in% merge_genes) table("GCG" %in% merge_genes) table("PPY" %in% merge_genes) table("SST" %in% merge_genes) new_mat = as.data.frame( cbind( expr_tosti[merge_genes,], expr_baron[merge_genes,] ) ) rownames(new_mat) = merge_genes row_var = as.double(apply(new_mat, FUN = function(vec){return(var(vec))}, MARGIN = 1)) summary(row_var) new_mat = new_mat[which( row_var >= 1),] new_mat = new_mat[which( rowMeans(new_mat) >= 1),] table(meta_data$Subtype) dim(new_mat) new_mat = new_mat[ rownames(new_mat)!="NA", ] dim(new_mat) new_mat[1:5,1:5] write.table(new_mat[,], "~/Deko_Projekt/Data/Alpha_Beta_Gamma_Delta_Acinar_Ductal_Baron_Alpha-i_Alpha-reg_Muc5+_Tosti_488_genes.tsv", sep ="\t", quote =F , row.names = T)
/Scripts/Merger_Script_scRNA.R
no_license
RaikOtto/Deko_Projekt
R
false
false
3,895
r
library("stringr") expr_raw = readRDS("~/Downloads/Tosti.Seurat.normalized.S78048.RDS") meta_info = read.table("~/Deko_Projekt/Misc/Tosti_Metadaten.tsv", sep ="\t", header = T) rownames(meta_info) = meta_info$Cell meta_data = meta_info[colnames(expr_raw),] subtype_vector = meta_data$Cluster table(subtype_vector) #candidates = which(subtype_vector %in% c("alpha","beta","gamma","delta","acinar-s","acinar-reg+","acinar-i","ductal","muc5b+ ductal")) candidates = which(subtype_vector %in% c("Acinar-REG+","Acinar-i","MUC5B+ Ductal")) expr_raw_tosti = expr_raw[,candidates] meta_data_tosti = meta_info[colnames(expr_raw_tosti),] subtype_vector_reduced_tosti = meta_data_tosti$Cluster table(subtype_vector_reduced_tosti) amount_genes = 300 amount_samples = 300 selected_samples = c() for ( cell_type in unique(subtype_vector_reduced_tosti)){ coords = which(meta_data_tosti$Cluster == cell_type ) if (length(coords) >= amount_samples) coords = sample(coords, size = amount_samples) selected_samples = c(selected_samples, coords) } length(selected_samples) expr_tosti = expr_raw_tosti[,selected_samples] dim(expr_tosti) meta_data_reduced_tosti = meta_info[colnames(expr_tosti),] table(meta_data_reduced_tosti$Cluster) ### meta_info = read.table("~/Deko_Projekt//Misc/Meta_information_scRNA.tsv",sep = "\t",header = T,stringsAsFactors = F) rownames(meta_info) = meta_info$Sample colnames(meta_info) = str_replace(colnames(meta_info),pattern = "\\.","_") expr_raw = read.table("~/Deko_Projekt/Data/Alpha_Beta_Gamma_Delta_Acinar_Ductal_Baron.tsv",sep="\t", stringsAsFactors = F, header = T,row.names = 1) #expr_raw = read.table("~/Deko_Projekt/Data/Cancer_Pancreas_Bulk_Array/Sato.S35.Ninon.tsv",sep="\t", stringsAsFactors = F, header = T,row.names = 1) colnames(expr_raw) = str_replace(colnames(expr_raw), pattern = "^X", "") expr_raw[1:5,1:5] no_match = colnames(expr_raw) %in% meta_info$Sample == F colnames(expr_raw)[no_match] = paste("X",colnames(expr_raw)[no_match],sep ="") no_match = colnames(expr_raw) %in% meta_info$Sample == F table(no_match) meta_data = meta_info[colnames(expr_raw),] subtype_vector = meta_data$Cluster table(subtype_vector) candidates = which(subtype_vector %in% c("Alpha","Beta","Gamma","Delta","Acinar","Ductal")) expr_raw_baron = expr_raw[,candidates] meta_data_baron = meta_info[colnames(expr_raw_baron),] subtype_vector_reduced_baron = meta_data_baron$Cluster table(subtype_vector_reduced_baron) amount_genes = 300 amount_samples = 300 selected_samples = c() for ( cell_type in unique(subtype_vector_reduced_baron)){ coords = which(meta_data_baron$Cluster == cell_type ) if (length(coords) >= amount_samples) coords = sample(coords, size = amount_samples) selected_samples = c(selected_samples, coords) } length(selected_samples) expr_baron = expr_raw_baron[,selected_samples] dim(expr_baron) meta_data_reduced_baron = meta_info[colnames(expr_baron),] table(meta_data_reduced_baron$Cluster) ### merge datasets merge_genes = intersect(rownames(expr_tosti),rownames(expr_baron)) #merge_genes = rownames(bam_data_1) length(merge_genes) table("INS" %in% merge_genes) table("GCG" %in% merge_genes) table("PPY" %in% merge_genes) table("SST" %in% merge_genes) new_mat = as.data.frame( cbind( expr_tosti[merge_genes,], expr_baron[merge_genes,] ) ) rownames(new_mat) = merge_genes row_var = as.double(apply(new_mat, FUN = function(vec){return(var(vec))}, MARGIN = 1)) summary(row_var) new_mat = new_mat[which( row_var >= 1),] new_mat = new_mat[which( rowMeans(new_mat) >= 1),] table(meta_data$Subtype) dim(new_mat) new_mat = new_mat[ rownames(new_mat)!="NA", ] dim(new_mat) new_mat[1:5,1:5] write.table(new_mat[,], "~/Deko_Projekt/Data/Alpha_Beta_Gamma_Delta_Acinar_Ductal_Baron_Alpha-i_Alpha-reg_Muc5+_Tosti_488_genes.tsv", sep ="\t", quote =F , row.names = T)
qvcalc.itempar <- function(object, ...){ if (!(attr(object, "alias"))) stop( "the itempar object was not built with 'alias = TRUE'") vc <- vcov(object) if (any(is.na(vc))) stop("the itempar object was not built with 'vcov = TRUE'") qvcalc.default(vc, estimates = object) }
/R/qvcalc.itempar.R
no_license
DavidFirth/qvcalc
R
false
false
299
r
qvcalc.itempar <- function(object, ...){ if (!(attr(object, "alias"))) stop( "the itempar object was not built with 'alias = TRUE'") vc <- vcov(object) if (any(is.na(vc))) stop("the itempar object was not built with 'vcov = TRUE'") qvcalc.default(vc, estimates = object) }
library(tidyverse) library(wbstats) pull_worldbank_data <- function(vars) { new_cache <- wbcache() all_vars <- as.character(unique(new_cache$indicators$indicatorID)) data_wide <- wb(indicator = vars, mrv = 10, return_wide = TRUE) new_cache$indicators[new_cache$indicators[, "indicatorID"] %in% vars, ] %>% rename(var_name = indicatorID) %>% mutate(var_def = paste( indicator, "\nNote:", indicatorDesc, "\nSource:", sourceOrg )) %>% select(var_name, var_def) -> wb_data_def new_cache$countries %>% select(iso3c, iso2c, country, region, income) -> ctries left_join(data_wide, ctries, by = "iso3c") %>% rename( year = date, iso2c = iso2c.y, country = country.y ) %>% select(iso3c, iso2c, country, region, income, everything()) %>% select(-iso2c.x, -country.x) %>% filter( !is.na(NY.GDP.PCAP.KD), region != "Aggregates" ) -> wb_data wb_data$year <- as.numeric(wb_data$year) wb_data_def <- left_join(data.frame( var_name = names(wb_data), stringsAsFactors = FALSE ), wb_data_def, by = "var_name" ) wb_data_def$var_def[1:6] <- c( "Three letter ISO country code as used by World Bank", "Two letter ISO country code as used by World Bank", "Country name as used by World Bank", "World Bank regional country classification", "World Bank income group classification", "Calendar year of observation" ) wb_data_def$type <- c( "cs_id", rep("factor", 4), "ts_id", rep("numeric", ncol(wb_data) - 6) ) return(list(wb_data, wb_data_def)) } vars <- c("SP.POP.TOTL", "AG.LND.TOTL.K2", "EN.POP.DNST", "EN.URB.LCTY", "SP.DYN.LE00.IN", "NY.GDP.PCAP.KD") wb_list <- pull_worldbank_data(vars) wb_data <- wb_list[[1]] wb_data_def <- wb_list[[2]] wb_data %>% group_by(iso3c) %>% arrange(iso3c, year) %>% summarise( population = last(na.omit(SP.POP.TOTL)), land_area_skm = last(na.omit(AG.LND.TOTL.K2)), pop_density = last(na.omit(EN.POP.DNST)), pop_largest_city = last(na.omit(EN.URB.LCTY)), gdp_capita = last(na.omit(NY.GDP.PCAP.KD)), life_expectancy = last(na.omit(SP.DYN.LE00.IN)) ) %>% left_join(wb_data %>% select(iso3c, region, income) %>% distinct()) -> wb_cs write_csv(wb_cs, "data/jh_add_wbank_data.csv")
/scripts/world_bank.R
no_license
Unco3892/SIR-covid-2020
R
false
false
2,286
r
library(tidyverse) library(wbstats) pull_worldbank_data <- function(vars) { new_cache <- wbcache() all_vars <- as.character(unique(new_cache$indicators$indicatorID)) data_wide <- wb(indicator = vars, mrv = 10, return_wide = TRUE) new_cache$indicators[new_cache$indicators[, "indicatorID"] %in% vars, ] %>% rename(var_name = indicatorID) %>% mutate(var_def = paste( indicator, "\nNote:", indicatorDesc, "\nSource:", sourceOrg )) %>% select(var_name, var_def) -> wb_data_def new_cache$countries %>% select(iso3c, iso2c, country, region, income) -> ctries left_join(data_wide, ctries, by = "iso3c") %>% rename( year = date, iso2c = iso2c.y, country = country.y ) %>% select(iso3c, iso2c, country, region, income, everything()) %>% select(-iso2c.x, -country.x) %>% filter( !is.na(NY.GDP.PCAP.KD), region != "Aggregates" ) -> wb_data wb_data$year <- as.numeric(wb_data$year) wb_data_def <- left_join(data.frame( var_name = names(wb_data), stringsAsFactors = FALSE ), wb_data_def, by = "var_name" ) wb_data_def$var_def[1:6] <- c( "Three letter ISO country code as used by World Bank", "Two letter ISO country code as used by World Bank", "Country name as used by World Bank", "World Bank regional country classification", "World Bank income group classification", "Calendar year of observation" ) wb_data_def$type <- c( "cs_id", rep("factor", 4), "ts_id", rep("numeric", ncol(wb_data) - 6) ) return(list(wb_data, wb_data_def)) } vars <- c("SP.POP.TOTL", "AG.LND.TOTL.K2", "EN.POP.DNST", "EN.URB.LCTY", "SP.DYN.LE00.IN", "NY.GDP.PCAP.KD") wb_list <- pull_worldbank_data(vars) wb_data <- wb_list[[1]] wb_data_def <- wb_list[[2]] wb_data %>% group_by(iso3c) %>% arrange(iso3c, year) %>% summarise( population = last(na.omit(SP.POP.TOTL)), land_area_skm = last(na.omit(AG.LND.TOTL.K2)), pop_density = last(na.omit(EN.POP.DNST)), pop_largest_city = last(na.omit(EN.URB.LCTY)), gdp_capita = last(na.omit(NY.GDP.PCAP.KD)), life_expectancy = last(na.omit(SP.DYN.LE00.IN)) ) %>% left_join(wb_data %>% select(iso3c, region, income) %>% distinct()) -> wb_cs write_csv(wb_cs, "data/jh_add_wbank_data.csv")
library(ape) testtree <- read.tree("11645_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="11645_0_unrooted.txt")
/codeml_files/newick_trees_processed_and_cleaned/11645_0/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
137
r
library(ape) testtree <- read.tree("11645_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="11645_0_unrooted.txt")
% \name{argufy-rd-macros} % \title{argufy Rd macros} % Assertion \newcommand{\assert}{[\code{#1}] \% assert } % Coercion \newcommand{\coerce}{[\code{#1}] \% coerce }
/man/macros/Rdmacros.Rd
no_license
gaborcsardi/argufy
R
false
false
168
rd
% \name{argufy-rd-macros} % \title{argufy Rd macros} % Assertion \newcommand{\assert}{[\code{#1}] \% assert } % Coercion \newcommand{\coerce}{[\code{#1}] \% coerce }
selectA <- function(nDepVar, Lags, K, AforP, ICLagOrder, keepTrackLagOrder) # Create A matrix that contains best-fitting A of every cluster, indicated by ICLagOrder # Update keepTrackLagOrder according to changes in A { VARMatrix = array(0, dim = c(nDepVar, nDepVar * Lags, K)) selectedLagOrder = array(0, dim = c(K)) for (j in 1:K) { selectedLagOrder[j] = which.min(ICLagOrder[j, ]) VARMatrix[ , , j] = AforP[ , , j, selectedLagOrder[j]] # ANJA: is min right here or do you need max } # compute selectedLagOrder to KeepTrackLagOrder$currentLagOrder and after update # KeepTrackLagOrder$currentLagOrder to be selectedLagOrder keepTrackLagOrder$downwardChanges = which(keepTrackLagOrder$currentLagOrder > selectedLagOrder) keepTrackLagOrder$upwardChanges = which(keepTrackLagOrder$currentLagOrder < selectedLagOrder) keepTrackLagOrder$currentLagOrder = selectedLagOrder invisible(list(A = VARMatrix, keepTrackLagOrder = keepTrackLagOrder)) }
/Functions/selectA.R
permissive
AnieBee/LCVAR
R
false
false
1,026
r
selectA <- function(nDepVar, Lags, K, AforP, ICLagOrder, keepTrackLagOrder) # Create A matrix that contains best-fitting A of every cluster, indicated by ICLagOrder # Update keepTrackLagOrder according to changes in A { VARMatrix = array(0, dim = c(nDepVar, nDepVar * Lags, K)) selectedLagOrder = array(0, dim = c(K)) for (j in 1:K) { selectedLagOrder[j] = which.min(ICLagOrder[j, ]) VARMatrix[ , , j] = AforP[ , , j, selectedLagOrder[j]] # ANJA: is min right here or do you need max } # compute selectedLagOrder to KeepTrackLagOrder$currentLagOrder and after update # KeepTrackLagOrder$currentLagOrder to be selectedLagOrder keepTrackLagOrder$downwardChanges = which(keepTrackLagOrder$currentLagOrder > selectedLagOrder) keepTrackLagOrder$upwardChanges = which(keepTrackLagOrder$currentLagOrder < selectedLagOrder) keepTrackLagOrder$currentLagOrder = selectedLagOrder invisible(list(A = VARMatrix, keepTrackLagOrder = keepTrackLagOrder)) }
## Programming Assignment ## ---------------------- ## Assignment: Caching the Inverse of a Matrix ## This source includes 2 functions ## 1 - makeCacheMatrix - constructor class ## 2 - cacheSolve - calculates inverse of matrix (See function description for further info) ## Please note that for the below to work the Matrix must be invertible else solve will throw an error ## especially if your input is a singular matrix ## The makeCacheMatrix function is used to create ## sets and gets the Matrix ## sets and gets the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { invm <- NULL ## the change Flag is used to denote if a matrix has been changed or not. ## Before retrieving cache we check on this changeFlag <- NULL ## Set and get functions set <- function(y) { x <<- y invm <<- NULL changeFlag <<- "Y" ## Whenever new matrix is set or changed set this flag } get <- function() x ## Below set and get functions to retrieve inverse of matrix setInverse <- function(solve) invm <<- solve getInverse <- function() invm ## Below set and get functions to retrieve changed Flag status getChangeFlag <- function() changeFlag setChangeFlag <- function(t) changeFlag <<- t list(set = set, get = get, setInverse = setInverse, getInverse = getInverse, getChangeFlag = getChangeFlag, setChangeFlag = setChangeFlag) } ## Cachesolve function uses the solve function to calculate the inverse of the Matrix ## In the process, it validates if the original matrix has been changed ## If not then it gets the inverse matrix from the cache and skips the computation. ## Else, it calculates, in case where matrix was changed, recalculates the inverse ## in the cache via the setInverse function. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' invm <- x$getInverse() ## Check if object present or if changeFlag was set if(!is.null(invm) && x$getChangeFlag()=="N") { message("getting cached inverse matrix") return(invm) } message("inverse not cached... creating cache") data <- x$get() data invm <- solve(data, ...) ## get inverse x$setInverse(invm) ## Since Inverse has been calculated set changeFlag back to N x$setChangeFlag("N") invm ##Return inverse }
/cachematrix.R
no_license
mvaibhav/ProgrammingAssignment2
R
false
false
2,647
r
## Programming Assignment ## ---------------------- ## Assignment: Caching the Inverse of a Matrix ## This source includes 2 functions ## 1 - makeCacheMatrix - constructor class ## 2 - cacheSolve - calculates inverse of matrix (See function description for further info) ## Please note that for the below to work the Matrix must be invertible else solve will throw an error ## especially if your input is a singular matrix ## The makeCacheMatrix function is used to create ## sets and gets the Matrix ## sets and gets the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { invm <- NULL ## the change Flag is used to denote if a matrix has been changed or not. ## Before retrieving cache we check on this changeFlag <- NULL ## Set and get functions set <- function(y) { x <<- y invm <<- NULL changeFlag <<- "Y" ## Whenever new matrix is set or changed set this flag } get <- function() x ## Below set and get functions to retrieve inverse of matrix setInverse <- function(solve) invm <<- solve getInverse <- function() invm ## Below set and get functions to retrieve changed Flag status getChangeFlag <- function() changeFlag setChangeFlag <- function(t) changeFlag <<- t list(set = set, get = get, setInverse = setInverse, getInverse = getInverse, getChangeFlag = getChangeFlag, setChangeFlag = setChangeFlag) } ## Cachesolve function uses the solve function to calculate the inverse of the Matrix ## In the process, it validates if the original matrix has been changed ## If not then it gets the inverse matrix from the cache and skips the computation. ## Else, it calculates, in case where matrix was changed, recalculates the inverse ## in the cache via the setInverse function. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' invm <- x$getInverse() ## Check if object present or if changeFlag was set if(!is.null(invm) && x$getChangeFlag()=="N") { message("getting cached inverse matrix") return(invm) } message("inverse not cached... creating cache") data <- x$get() data invm <- solve(data, ...) ## get inverse x$setInverse(invm) ## Since Inverse has been calculated set changeFlag back to N x$setChangeFlag("N") invm ##Return inverse }
#' Gmt2GeneCat #' #' Read a gmt file, and return a list with its name #' being a gene id based on gene_anno_file, and each element of this list #' being the pathways that this gene corresponds to #' #' @param gmt_input_file input file #' @param file.type local or url #' @param gene_anno_file annotation file #' #' @return a list with its names being geneID, its element being the pathways #' #' @export #' #' @examples #' #' gene.2.cat.hallmark.hg<-Gmt2GeneCat('/media/H_driver/ #' Annotation/hg38/h.all.v5.1.symbols-1.gmt','local', #' '/media/H_driver/Annotation/hg38/genes_table_02092016.csv') #' Gmt2GeneCat <- function(gmt_input_file, file.type, gene_anno_file) { gene.2.cat.gmt <- gene2cat2(gmt_input_file, file.type) names.gene.gmt <- as.data.frame(names(gene.2.cat.gmt)) colnames(names.gene.gmt) <- "gene_id" dir.name = dirname(gene_anno_file) dir.name = reformatPath(dir.name) file.name = basename(gene_anno_file) gene_anno_file = paste0(dir.name, file.name) gene.ID.conversion <- read.csv(gene_anno_file) names.gene.gmt.2 <- match(names.gene.gmt$gene_id, gene.ID.conversion$gene_id) gene.ID.conversion.2 <- gene.ID.conversion[names.gene.gmt.2, ] gene.2.cat.gmt.2 <- gene.2.cat.gmt names(gene.2.cat.gmt.2) <- gene.ID.conversion.2[, 3] gene.2.cat.gmt.2 } gene2cat <- function(gene_name, re) { z <- re$genesets res <- lapply(z, function(ch) grep(gene_name, ch)) res2 <- sapply(res, function(x) length(x) > 0) gene2cat <- list(re$geneset.names[res2]) gene2cat } GSA.read.gmt.2 <- function(filename, type) { if (type != "url") { dir.name = dirname(filename) dir.name = reformatPath(dir.name) file.name = basename(filename) filename = paste0(dir.name, file.name) } a = scan(filename, what = list("", ""), sep = "\t", quote = NULL, fill = TRUE, flush = TRUE, multi.line = FALSE) geneset.names = a[1][[1]] geneset.descriptions = a[2][[1]] dd = scan(filename, what = "", sep = "\t", quote = NULL) nn = length(geneset.names) n = length(dd) ox = rep(NA, nn) ii = 1 for (i in 1:nn) { while ((dd[ii] != geneset.names[i]) | (dd[ii + 1] != geneset.descriptions[i])) { ii = ii + 1 } ox[i] = ii ii = ii + 1 } genesets = vector("list", nn) for (i in 1:(nn - 1)) { i1 = ox[i] + 2 i2 = ox[i + 1] - 1 geneset.descriptions[i] = dd[ox[i] + 1] genesets[[i]] = dd[i1:i2] } geneset.descriptions[nn] = dd[ox[nn] + 1] genesets[[nn]] = dd[(ox[nn] + 2):n] out = list(genesets = genesets, geneset.names = geneset.names, geneset.descriptions = geneset.descriptions) class(out) = "GSA.genesets" return(out) } gene2cat2 <- function(gmt_input_file, file.type) { re <- GSA.read.gmt.2(gmt_input_file, file.type) gene.name <- unique(do.call(c, re$genesets)) gene.2.cat <- sapply(gene.name, gene2cat, re) names(gene.2.cat) <- gene.name gene.2.cat }
/R/Gmt2GeneCat.R
no_license
aiminy/PathwaySplice
R
false
false
2,908
r
#' Gmt2GeneCat #' #' Read a gmt file, and return a list with its name #' being a gene id based on gene_anno_file, and each element of this list #' being the pathways that this gene corresponds to #' #' @param gmt_input_file input file #' @param file.type local or url #' @param gene_anno_file annotation file #' #' @return a list with its names being geneID, its element being the pathways #' #' @export #' #' @examples #' #' gene.2.cat.hallmark.hg<-Gmt2GeneCat('/media/H_driver/ #' Annotation/hg38/h.all.v5.1.symbols-1.gmt','local', #' '/media/H_driver/Annotation/hg38/genes_table_02092016.csv') #' Gmt2GeneCat <- function(gmt_input_file, file.type, gene_anno_file) { gene.2.cat.gmt <- gene2cat2(gmt_input_file, file.type) names.gene.gmt <- as.data.frame(names(gene.2.cat.gmt)) colnames(names.gene.gmt) <- "gene_id" dir.name = dirname(gene_anno_file) dir.name = reformatPath(dir.name) file.name = basename(gene_anno_file) gene_anno_file = paste0(dir.name, file.name) gene.ID.conversion <- read.csv(gene_anno_file) names.gene.gmt.2 <- match(names.gene.gmt$gene_id, gene.ID.conversion$gene_id) gene.ID.conversion.2 <- gene.ID.conversion[names.gene.gmt.2, ] gene.2.cat.gmt.2 <- gene.2.cat.gmt names(gene.2.cat.gmt.2) <- gene.ID.conversion.2[, 3] gene.2.cat.gmt.2 } gene2cat <- function(gene_name, re) { z <- re$genesets res <- lapply(z, function(ch) grep(gene_name, ch)) res2 <- sapply(res, function(x) length(x) > 0) gene2cat <- list(re$geneset.names[res2]) gene2cat } GSA.read.gmt.2 <- function(filename, type) { if (type != "url") { dir.name = dirname(filename) dir.name = reformatPath(dir.name) file.name = basename(filename) filename = paste0(dir.name, file.name) } a = scan(filename, what = list("", ""), sep = "\t", quote = NULL, fill = TRUE, flush = TRUE, multi.line = FALSE) geneset.names = a[1][[1]] geneset.descriptions = a[2][[1]] dd = scan(filename, what = "", sep = "\t", quote = NULL) nn = length(geneset.names) n = length(dd) ox = rep(NA, nn) ii = 1 for (i in 1:nn) { while ((dd[ii] != geneset.names[i]) | (dd[ii + 1] != geneset.descriptions[i])) { ii = ii + 1 } ox[i] = ii ii = ii + 1 } genesets = vector("list", nn) for (i in 1:(nn - 1)) { i1 = ox[i] + 2 i2 = ox[i + 1] - 1 geneset.descriptions[i] = dd[ox[i] + 1] genesets[[i]] = dd[i1:i2] } geneset.descriptions[nn] = dd[ox[nn] + 1] genesets[[nn]] = dd[(ox[nn] + 2):n] out = list(genesets = genesets, geneset.names = geneset.names, geneset.descriptions = geneset.descriptions) class(out) = "GSA.genesets" return(out) } gene2cat2 <- function(gmt_input_file, file.type) { re <- GSA.read.gmt.2(gmt_input_file, file.type) gene.name <- unique(do.call(c, re$genesets)) gene.2.cat <- sapply(gene.name, gene2cat, re) names(gene.2.cat) <- gene.name gene.2.cat }
# reads the power consumption data and then subsets the dates wanted # into subdata. Make sure setwd() is set properly to the directory containing # the data allthedata <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE, colClasses="character") subdata <- subset(allthedata, allthedata$Date %in% c("1/2/2007","2/2/2007")) # converts the Date and Time fields in subdata into a date time format. subdata$Date <- strptime(paste(subdata$Date,subdata$Time), format="%d/%m/%Y %H:%M:%S") # makes sure the submetering data is numeric subdata$Sub_metering_1 <- as.numeric(subdata$Sub_metering_1) subdata$Sub_metering_2 <- as.numeric(subdata$Sub_metering_2) subdata$Sub_metering_3 <- as.numeric(subdata$Sub_metering_3) # make plots plot(x=subdata$Date, y=subdata$Sub_metering_1, type="l", ylim=c(0,39), ylab="Engergy sub metering", xlab="") lines(x=subdata$Date, y=subdata$Sub_metering_2, type='l', col='red') lines(x=subdata$Date, y=subdata$Sub_metering_3, type='l', col='blue') legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col = c("black", "red", "blue"))
/plot3.R
no_license
Phoenie1/ExData_Plotting1
R
false
false
1,144
r
# reads the power consumption data and then subsets the dates wanted # into subdata. Make sure setwd() is set properly to the directory containing # the data allthedata <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE, colClasses="character") subdata <- subset(allthedata, allthedata$Date %in% c("1/2/2007","2/2/2007")) # converts the Date and Time fields in subdata into a date time format. subdata$Date <- strptime(paste(subdata$Date,subdata$Time), format="%d/%m/%Y %H:%M:%S") # makes sure the submetering data is numeric subdata$Sub_metering_1 <- as.numeric(subdata$Sub_metering_1) subdata$Sub_metering_2 <- as.numeric(subdata$Sub_metering_2) subdata$Sub_metering_3 <- as.numeric(subdata$Sub_metering_3) # make plots plot(x=subdata$Date, y=subdata$Sub_metering_1, type="l", ylim=c(0,39), ylab="Engergy sub metering", xlab="") lines(x=subdata$Date, y=subdata$Sub_metering_2, type='l', col='red') lines(x=subdata$Date, y=subdata$Sub_metering_3, type='l', col='blue') legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col = c("black", "red", "blue"))
# Load the training data into training_data variable from the directory Data which is placed at working directory training_data = read.csv("data/UCI HAR Dataset/train/X_train.txt", sep="", header=FALSE) training_data[,562] = read.csv("data/UCI HAR Dataset/train/Y_train.txt", sep="", header=FALSE) training_data[,563] = read.csv("data/UCI HAR Dataset/train/subject_train.txt", sep="", header=FALSE) # Load the testing data into testing_data variable from the directory Data which is placed at working directory testing_data = read.csv("data/UCI HAR Dataset/test/X_test.txt", sep="", header=FALSE) testing_data[,562] = read.csv("data/UCI HAR Dataset/test/Y_test.txt", sep="", header=FALSE) testing_data[,563] = read.csv("data/UCI HAR Dataset/test/subject_test.txt", sep="", header=FALSE) # Load the labels into activityLabels variable from the directory Data which is placed at working directory activityLabels = read.csv("data/UCI HAR Dataset/activity_labels.txt", sep="", header=FALSE) # Read features from directory Data which is placed at working directory # make the feature names better suited according to R substitutions # set them into features variable features = read.csv("data/UCI HAR Dataset/features.txt", sep="", header=FALSE) features[,2] = gsub('-mean', 'Mean', features[,2]) features[,2] = gsub('-std', 'Std', features[,2]) features[,2] = gsub('[-()]', '', features[,2]) # Merge training and test sets together # set them into allData variable allData = rbind(training_data, testing_data) # Get only the data on mean and std. dev. # set them into requiredCols variable requiredCols <- grep(".*Mean.*|.*Std.*", features[,2]) # Reduce the features table to what we want features <- features[requiredCols,] # Add the last two columns (subject and activity) requiredCols <- c(requiredCols, 562, 563) # And remove the unwanted columns from allData allData <- allData[,requiredCols] # Add the column names (features) to allData colnames(allData) <- c(features$V2, "Activity", "Subject") colnames(allData) <- tolower(colnames(allData)) currentActivity = 1 for (currentActivityLabel in activityLabels$V2) { allData$activity <- gsub(currentActivity, currentActivityLabel, allData$activity) currentActivity <- currentActivity + 1 } allData$activity <- as.factor(allData$activity) allData$subject <- as.factor(allData$subject) tidy = aggregate(allData, by=list(activity = allData$activity, subject=allData$subject), mean) # Remove the subject and activity column tidy[,90] = NULL tidy[,89] = NULL # Write all tidy data as text file into data directory which is placed at working directory write.table(tidy, "data/tidy.txt", sep="\t", row.name = FALSE)
/run_analysis.R
no_license
youngInnovator/Getting-and-Cleaning-Data
R
false
false
2,687
r
# Load the training data into training_data variable from the directory Data which is placed at working directory training_data = read.csv("data/UCI HAR Dataset/train/X_train.txt", sep="", header=FALSE) training_data[,562] = read.csv("data/UCI HAR Dataset/train/Y_train.txt", sep="", header=FALSE) training_data[,563] = read.csv("data/UCI HAR Dataset/train/subject_train.txt", sep="", header=FALSE) # Load the testing data into testing_data variable from the directory Data which is placed at working directory testing_data = read.csv("data/UCI HAR Dataset/test/X_test.txt", sep="", header=FALSE) testing_data[,562] = read.csv("data/UCI HAR Dataset/test/Y_test.txt", sep="", header=FALSE) testing_data[,563] = read.csv("data/UCI HAR Dataset/test/subject_test.txt", sep="", header=FALSE) # Load the labels into activityLabels variable from the directory Data which is placed at working directory activityLabels = read.csv("data/UCI HAR Dataset/activity_labels.txt", sep="", header=FALSE) # Read features from directory Data which is placed at working directory # make the feature names better suited according to R substitutions # set them into features variable features = read.csv("data/UCI HAR Dataset/features.txt", sep="", header=FALSE) features[,2] = gsub('-mean', 'Mean', features[,2]) features[,2] = gsub('-std', 'Std', features[,2]) features[,2] = gsub('[-()]', '', features[,2]) # Merge training and test sets together # set them into allData variable allData = rbind(training_data, testing_data) # Get only the data on mean and std. dev. # set them into requiredCols variable requiredCols <- grep(".*Mean.*|.*Std.*", features[,2]) # Reduce the features table to what we want features <- features[requiredCols,] # Add the last two columns (subject and activity) requiredCols <- c(requiredCols, 562, 563) # And remove the unwanted columns from allData allData <- allData[,requiredCols] # Add the column names (features) to allData colnames(allData) <- c(features$V2, "Activity", "Subject") colnames(allData) <- tolower(colnames(allData)) currentActivity = 1 for (currentActivityLabel in activityLabels$V2) { allData$activity <- gsub(currentActivity, currentActivityLabel, allData$activity) currentActivity <- currentActivity + 1 } allData$activity <- as.factor(allData$activity) allData$subject <- as.factor(allData$subject) tidy = aggregate(allData, by=list(activity = allData$activity, subject=allData$subject), mean) # Remove the subject and activity column tidy[,90] = NULL tidy[,89] = NULL # Write all tidy data as text file into data directory which is placed at working directory write.table(tidy, "data/tidy.txt", sep="\t", row.name = FALSE)
#' Pull average daily weather data by U.S. county. #' #' Given a particular county FIPS code, this function returns data and meta-data #' for weather data, either for all available dates or for dates within a #' requested date range. #' #' @inheritParams daily_df #' @inheritParams daily_stations #' #' @param station_label TRUE / FALSE to indicate if you want your plot of #' weather station locations to include labels with station ids. #' @param verbose TRUE / FALSE to indicate if you want the function to print #' out the name of the county it's processing. #' #' @return A list with three elements. The first element (\code{daily_data}) is a #' dataframe of daily weather data averaged across multiple stations, as well #' as columns (\code{"var"_reporting}) for each weather variable showing the #' number of stations contributing to the average for that variable on that #' day. The second element (\code{station_metadata}) is a dataframe of station #' metadata for stations included in the \code{daily_data} dataframe, as well #' as statistical information about these values. Columns #' include \code{id}, \code{name}, \code{var}, \code{latitude}, #' \code{longitude}, \code{calc_coverage}, \code{standard_dev}, \code{min}, #' \code{max}, and \code{range}. The third element (\code{station_map}) #' is a plot showing locations of all weather stations for a particular county #' satisfying the conditions present in \code{daily_fips}'s arguments #' (\code{coverage}, \code{date_min}, \code{date_max}, and/or \code{var}). #' #' @note Because this function uses the NOAA API to identify the weather #' monitors within a U.S. county, you will need to get an access token from #' NOAA to use this function. Visit NOAA's token request page #' (\url{http://www.ncdc.noaa.gov/cdo-web/token}) to request a token by #' email. You then need to set that API code in your R session (e.g., using #' \code{options(noaakey = "your key")}, replacing "your key" with the API #' key you've requested from NOAA). See the package vignette for more details. #' #' @examples #' \dontrun{ #' denver_ex <- daily_fips("08031", coverage = 0.90, date_min = "2010-01-01", #' date_max = "2010-02-01", var = "prcp") #' #' head(denver_ex$daily_data) #' denver_ex$station_map #' #' mobile_ex <- daily_fips("01097", date_min = "1997-07-13", #' date_max = "1997-07-25", var = "prcp", #' average_data = FALSE) #' library(ggplot2) #' ggplot(mobile_ex$daily_data, aes(x = date, y = prcp, color = id)) + #' geom_line() #' } #' @export daily_fips <- function(fips, coverage = NULL, date_min = NULL, date_max = NULL, var = "all", average_data = TRUE, station_label = FALSE, verbose = TRUE) { census_data <- countyweather::county_centers loc_fips <- which(census_data$fips == fips) if (verbose) { message(paste0("Getting daily weather data for ", census_data[loc_fips, "name"], ".", " This may take a while.")) } stations <- daily_stations(fips = fips, date_min = date_min, date_max = date_max) weather_data <- daily_df(stations = stations, var = var, date_min = date_min, date_max = date_max, coverage = coverage, average_data = average_data) # sp::proj4string not working ## station_map <- daily_stationmap(fips = fips, ## daily_data = weather_data, ## station_label = station_label) list <- list("daily_data" = weather_data$daily_data, "station_metadata" = weather_data$station_df, "station_map" = NULL) return(list) } #' Return average daily weather data for a particular county. #' #' Returns a list with data on weather and stations for a selected county. #' This function serves as a wrapper to several functions from the \code{rnoaa} #' package, which pull weather data from all relevant stations in a county. #' This function filters and averages data returned by \code{rnoaa} functions #' across all weather stations in a county based on user-specified #' coverage specifications. #' #' @note Because this function uses the NOAA API to identify the weather #' monitors within a U.S. county, you will need to get an access token from #' NOAA to use this function. Visit NOAA's token request page #' (\url{http://www.ncdc.noaa.gov/cdo-web/token}) to request a token by #' email. You then need to set that API code in your R session (e.g., using #' \code{options(noaakey = "your key")}, replacing "your key" with the API #' key you've requested from NOAA). See the package vignette for more details. #' #' @param stations A dataframe containing station metadata, returned from #' the function \code{daily_stations}. #' @param coverage A numeric value in the range of 0 to 1 that specifies #' the desired percentage coverage for the weather variable (i.e., what #' percent of each weather variable must be non-missing to include data from #' a monitor when calculating daily values averaged across monitors. The #' default is to include all monitors with any available data (i.e., #' \code{coverage = 0}).) #' @param var A character vector specifying desired weather variables. For #' example, \code{var = c("tmin", "tmax", "prcp")} for maximum temperature, #' minimum temperature, and precipitation. The default is \code{"all"}, #' which includes all available weather variables at any weather station in #' the county. For a full list of all #' possible variable names, see NOAA's README file for the Daily Global #' Historical Climatology Network (GHCN-Daily) at #' \url{http://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt}. Many of #' the weather variables are available for some, but not all, monitors, so #' your output from this function may not include all the variables #' specified using this argument. If you specify a variable here but it is #' not included in the output dataset, it means that it was not available in #' the time range for any monitor in the county. #' @param average_data TRUE / FALSE to indicate if you want the function to #' average daily weather data across multiple monitors. If you choose #' FALSE, the function will return a dataframe with separate entries for #' each monitor, while TRUE (the default) outputs a single estimate #' for each day in the dataset, giving the average value of the weather #' metric across all available monitors in the county that day. #' @inheritParams daily_stations #' #' @return A list with two elements. \code{daily_data} is a dataframe of daily #' weather data averaged across multiple monitors and includes columns #' (\code{"var"_reporting}) for each weather variable showing the number of #' stations contributing to the average for that variable on that day. #' The element \code{station_df} is a dataframe of station metadata for each #' station contributing weather data. A weather station will have one row per #' weather variable to which it contributes data. In addition to information #' such as station id, name, latitude, and longitude, the \code{station_df} #' dataframe includes statistical information about weather values #' contributed by each station for each weather variable. These statistics #' include \code{calc_coverage} (the percent of non-missing values for each #' station-weather variable combination for the specified date range), #' \code{standard_dev} (standard deviation), \code{max}, and \code{min}, #' (giving the minimum and maximum values), and \code{range}, giving the #' range of values in each station-weather variable combination. The #' element \code{radius} is the calculated radius within which stations were #' pulled from the county's center. Elements \code{lat_center} and #' \code{lon_center} are the latitude and longitude of the county's center. #' #' @examples #' \dontrun{ #' stations <- daily_stations(fips = "12086", date_min = "2010-01-01", #' date_max = "2010-02-01") #' fips_list <- daily_df(stations = stations, coverage = 0.90, #' var = c("tmax", "tmin", "prcp"), #' date_min = "2010-01-01", date_max = "2010-02-01") #' averaged_data <- fips_list$daily_data #' head(averaged_data) #' station_info <- fips_list$station_df #' head(station_info) #' } daily_df <- function(stations, coverage = NULL, var = "all", date_min = NULL, date_max = NULL, average_data = TRUE) { # get tidy full dataset for all monitors quiet_pull_monitors <- purrr::quietly(rnoaa::meteo_pull_monitors) if (length(var) == 1) { if (var == "all") { meteo_var <- "all" } } else { meteo_var <- toupper(var) } meteo_df <- quiet_pull_monitors(monitors = stations$id, keep_flags = FALSE, date_min = date_min, date_max = date_max, var = meteo_var)$result # calculate coverage for each weather variable # MD: append $summary since API changed possibly coverage_df <- rnoaa::meteo_coverage(meteo_df, verbose = FALSE)$summary # filter station dataset based on specified coverage filtered <- filter_coverage(coverage_df, coverage = coverage) good_monitors <- unique(filtered$id) # filter weather dataset based on stations with specified coverage filtered_data <- dplyr::filter_(meteo_df, ~ id %in% good_monitors) # steps to filter out erroneous data from individual stations # precipitation if ("prcp" %in% var) { filtered_data$prcp <- filtered_data$prcp / 10 if (max(filtered_data$prcp, na.rm = TRUE) > 1100) { bad_prcp <- which(with(filtered_data, prcp > 1100)) filtered_data <- filtered_data[-bad_prcp,] } } # snowfall if ("snow" %in% var) { if(max(filtered_data$snow, na.rm = TRUE) > 1600) { bad_snow <- which(with(filtered_data, snow > 1600)) filtered_data <- filtered_data[-bad_snow,] } } # snow depth if ("snwd" %in% var) { if (max(filtered_data$snwd, na.rm = TRUE) > 11500) { bad_snwd <- which(with(filtered_data, snwd > 11500)) filtered_data <- filtered_data[-bad_snwd,] } } # tmax if ("tmax" %in% var) { filtered_data$tmax <- filtered_data$tmax / 10 if (max(filtered_data$tmax, na.rm = TRUE) > 57) { bad_tmax <- which(with(filtered_data, tmax > 57)) filtered_data <- filtered_data[-bad_tmax,] } } # tmin if ("tmin" %in% var) { filtered_data$tmin <- filtered_data$tmin / 10 if (min(filtered_data$tmin, na.rm = TRUE) < -62) { bad_tmin <- which(with(filtered_data, tmin < -62)) filtered_data <- filtered_data[-bad_tmin,] } } all_cols <- colnames(filtered_data) not_vars <- c("id", "date") g_cols <- all_cols[!all_cols %in% not_vars] group_cols <- c("id", "key") stats <- filtered_data %>% dplyr::select_(quote(-date)) %>% tidyr::gather_(key_col = "key", value_col = "value", gather_cols = g_cols) %>% dplyr::group_by_(.dots = group_cols) %>% dplyr::summarize_(standard_dev = ~ sd(value, na.rm = TRUE), min = ~ min(value, na.rm = TRUE), max = ~ max(value, na.rm = TRUE), range = ~ max - min) filtered <- dplyr::filter_(filtered, ~ id %in% good_monitors) stats <- dplyr::full_join(stats, filtered, by = c("id", "key")) stations <- dplyr::filter_(stations, ~ id %in% good_monitors) stations <- dplyr::full_join(stats, stations, by = "id") %>% dplyr::select_(quote(id), quote(name), quote(key), quote(latitude), quote(longitude), quote(calc_coverage), quote(standard_dev), quote(min), quote(max), quote(range)) colnames(stations)[3] <- "var" if (average_data == TRUE) { filtered_data <- ave_daily(filtered_data) } out <- list("daily_data" = filtered_data, "station_df" = stations) return(out) } #' Write daily weather timeseries files for U.S. counties. #' #' Given a vector of U.S. county FIPS codes, this function saves each element of #' the lists created from the function \code{daily_fips} to a separate folder #' within a given directory. This function therefore allows you to pull and #' save weather data time series for multiple counties at once. #' The dataframe \code{daily_data} is saved to #' a subdirectory of the given directory called "data." This timeseries #' dataframe gives the values for specified weather variables and the #' number of weather stations contributing to the average value for each day #' within the specified date range. The element \code{station_metadata}, which #' gives information about stations contributing to the time series, as well as #' statistical information about the values contributed by these stations, is saved #' in a subdirectory called "metadata." The element \code{station_map}, which is #' a map of contributing station locations, is saved in a subdirectory called #' "maps." #' #' @return Writes out three subdirectories of a given directory with daily #' weather files saved in "data", station metadata saved in "metadata", #' and a map of weather station locations saved in "maps" for each FIPS code #' specified provided there is available data for that county. The user can #' specify either .rds or .csv format for the data and metadata files, using #' the arguments \code{data_type} and \code{metadata_type}, respectively. #' Maps are saved as .png files. #' #' @inheritParams daily_df #' @inheritParams daily_stations #' @param out_directory The absolute or relative pathname for the directory #' where you would like the three subdirectories ("data", "metadata", and #' "plots") to be created. #' @param data_type A character string indicating that you would like either #' .rds files (data_type = "rds") or .csv files (data_type = "csv") for the #' timeseries output. This option defaults to .rds files. #' @param metadata_type A character string indicating that you would like either #' .rds files (metadata_type = "rds") or .csv files (metadata_type = "csv") #' for the station metadata output. This option defaults to .rds files. #' @param keep_map TRUE / FALSE indicating if a map of the stations should #' be included. The map can substantially increase the size of the files, so #' if file size is a concern, you should consider setting this option to #' FALSE. If FALSE, the "maps" subdirectory will not be created. #' @param verbose TRUE / FALSE to indicate if you want the function to print #' the county or vector of counties it's saving files for as the function runs. #' @param station_label TRUE / FALSE to indicate whether to include station #' labels in the station map. #' #' @note If the function is unable to pull weather data for a particular county #' given the specified percent coverage, date range, and/or weather variables, #' \code{daily_timeseries} will not produce files for that county. #' #' @examples #' \dontrun{ #' write_daily_timeseries(fips = c("37055", "15005"), coverage = 0.90, #' date_min = "1995-01-01", date_max = "1995-01-31", #' var = c("tmax", "tmin", "prcp"), #' out_directory = "~/timeseries") #' } #' @export write_daily_timeseries <- function(fips, coverage = NULL, date_min = NULL, date_max = NULL, var = "all", out_directory, data_type = "rds", metadata_type = "rds", average_data = TRUE, station_label = FALSE, keep_map = TRUE, verbose = TRUE) { if (verbose) { if (length(fips) > 2) { for (i in 1:length(fips)) { if (i == 1) { codes <- (paste0(fips[i], ", ")) } else if (i == length(fips)) { codes <- paste0(codes, "and ", fips[i]) } else { codes <- paste0(codes, fips[i], ", ") } } message(paste0("Saving daily weather files for FIPS codes ", codes, " in the directory ", out_directory, ".", " This may take ", "a while.")) } else if (length(fips == 2)) { for (i in 1:length(fips)) { if (i == 1) { codes <- paste0(fips[i], " ") } else if (i == length(fips)) { codes <- paste0(codes, "and ", fips[i]) } else { codes <- paste0(codes, fips[i], ", ") } } message(paste0("Saving daily weather files for FIPS codes ", codes, " in the directory ", out_directory, ".", " This may take ", "a while.")) } else { message(paste0("Saving daily weather files for FIPS code ", fips, " in the directory ", out_directory, ".", " This may take ", "a while.")) } } if (!dir.exists(out_directory)) { dir.create(out_directory) } if (!dir.exists(paste0(out_directory, "/data"))) { dir.create(paste0(out_directory, "/data")) } if (!dir.exists(paste0(out_directory, "/metadata"))) { dir.create(paste0(out_directory, "/metadata")) } for (i in 1:length(fips)) { possibleError <- tryCatch({ out_list <- daily_fips(fips = fips[i], date_min = date_min, date_max = date_max, var = var, verbose = FALSE, average_data = average_data, station_label = station_label) out_data <- out_list$daily_data out_metadata <- out_list$station_metadata if (data_type == "rds") { data_file <- paste0(out_directory, "/data", "/", fips[i], ".rds") saveRDS(out_data, file = data_file) } else if (data_type == "csv") { data_file <- paste0(out_directory, "/data", "/", fips[i], ".csv") utils::write.csv(out_data, file = data_file, row.names = FALSE) } if (metadata_type == "rds") { metadata_file <- paste0(out_directory, "/metadata", "/", fips[i], ".rds") saveRDS(out_metadata, file = metadata_file) } else if (metadata_type == "csv") { metadata_file <- paste0(out_directory, "/metadata", "/", fips[i], ".csv") utils::write.csv(out_metadata, file = metadata_file) } if (keep_map == TRUE) { if (!dir.exists(paste0(out_directory, "/maps"))) { dir.create(paste0(out_directory, "/maps")) } out_map <- out_list$station_map map_file <- paste0(out_directory, "/maps") map_name <- paste0(fips[i], ".png") suppressMessages(ggplot2::ggsave(file = map_name, path = map_file, plot = out_map)) } } , error = function(e) { e message(paste0("Unable to pull weather data for FIPS code ", fips[i], " for the specified percent coverage, date range, and/or", " weather variables.")) } ) if (inherits(possibleError, "error")) next } } #' Write plot files for daily weather timeseries dataframes. #' #' Writes a directory with plots for every weather data time series file #' in the specified directory (as produced by the \code{daily_timeseries} #' function and saved in the "data" subdirectory of the directory given in that #' function's arguments) for a particular weather variable. #' #' @return Writes out a directory with plots of timeseries data for a given #' weather variable for each file present in the directory specified. #' #' @param var A character string specifying which weather variable for which #' you would like to produce plots (the variable must be present in the #' timeseries dataframe). #' @param data_directory The absolute or relative pathname for the directory #' where your daily timeseries dataframes (produced by \code{daily_timeseries}) #' are saved. #' @param plot_directory The absolute or relative pathname for the directory #' where you would like the plots to be saved. #' @param date_min A character string giving the earliest date present in the #' timeseries dataframe in "yyyy-mm-dd" format. #' @param date_max A character string giving the latest date present in the #' timeseries dataframe in "yyyy-mm-dd" format. #' @param data_type A character string indicating the type of timeseries files #' you would like to produce plots for (either \code{"rds"} or \code{"csv"}). #' This option defaults to .rds files. #' #' @examples #' \dontrun{ #' write_daily_timeseries(fips = c("37055", "15005"), coverage = 0.90, #' date_min = "1995-01-01", date_max = "1995-01-31", #' var = c("tmax", "tmin", "prcp"), #' out_directory = "~/timeseries") #' plot_daily_timeseries(var = "prcp", date_min = "1995-01-01", #' date_max = "1995-01-31", #' data_directory = "~/timeseries/data", #' plot_directory = "~/timeseries/plots_prcp") #' } #' @importFrom dplyr %>% #' #' @export plot_daily_timeseries <- function(var, date_min, date_max, data_directory, plot_directory, data_type = "rds") { files <- list.files(data_directory) if (!dir.exists(plot_directory)) { dir.create(plot_directory) } if (data_type == "rds") { file_names <- gsub(".rds", "", files) } else if (data_type == "csv"){ file_names <- gsub(".csv", "", files) } for (i in 1:length(files)) { dat <- readRDS(paste0(data_directory, "/", files[i])) weather <- dplyr::ungroup(dat) %>% as.data.frame() file_name <- paste0(file_names[i], ".png") grDevices::png(filename = paste0(plot_directory, "/", file_name)) weather$to_plot <- weather[ , var] graphics::plot(weather$date, weather$to_plot, type = "l", col = "red", main = file_names[i], xlab = "date", ylab = var, xlim = c(as.Date(date_min), as.Date(date_max))) grDevices::dev.off() } }
/R/daily_fips.R
no_license
gben1750/countyweather
R
false
false
22,515
r
#' Pull average daily weather data by U.S. county. #' #' Given a particular county FIPS code, this function returns data and meta-data #' for weather data, either for all available dates or for dates within a #' requested date range. #' #' @inheritParams daily_df #' @inheritParams daily_stations #' #' @param station_label TRUE / FALSE to indicate if you want your plot of #' weather station locations to include labels with station ids. #' @param verbose TRUE / FALSE to indicate if you want the function to print #' out the name of the county it's processing. #' #' @return A list with three elements. The first element (\code{daily_data}) is a #' dataframe of daily weather data averaged across multiple stations, as well #' as columns (\code{"var"_reporting}) for each weather variable showing the #' number of stations contributing to the average for that variable on that #' day. The second element (\code{station_metadata}) is a dataframe of station #' metadata for stations included in the \code{daily_data} dataframe, as well #' as statistical information about these values. Columns #' include \code{id}, \code{name}, \code{var}, \code{latitude}, #' \code{longitude}, \code{calc_coverage}, \code{standard_dev}, \code{min}, #' \code{max}, and \code{range}. The third element (\code{station_map}) #' is a plot showing locations of all weather stations for a particular county #' satisfying the conditions present in \code{daily_fips}'s arguments #' (\code{coverage}, \code{date_min}, \code{date_max}, and/or \code{var}). #' #' @note Because this function uses the NOAA API to identify the weather #' monitors within a U.S. county, you will need to get an access token from #' NOAA to use this function. Visit NOAA's token request page #' (\url{http://www.ncdc.noaa.gov/cdo-web/token}) to request a token by #' email. You then need to set that API code in your R session (e.g., using #' \code{options(noaakey = "your key")}, replacing "your key" with the API #' key you've requested from NOAA). See the package vignette for more details. #' #' @examples #' \dontrun{ #' denver_ex <- daily_fips("08031", coverage = 0.90, date_min = "2010-01-01", #' date_max = "2010-02-01", var = "prcp") #' #' head(denver_ex$daily_data) #' denver_ex$station_map #' #' mobile_ex <- daily_fips("01097", date_min = "1997-07-13", #' date_max = "1997-07-25", var = "prcp", #' average_data = FALSE) #' library(ggplot2) #' ggplot(mobile_ex$daily_data, aes(x = date, y = prcp, color = id)) + #' geom_line() #' } #' @export daily_fips <- function(fips, coverage = NULL, date_min = NULL, date_max = NULL, var = "all", average_data = TRUE, station_label = FALSE, verbose = TRUE) { census_data <- countyweather::county_centers loc_fips <- which(census_data$fips == fips) if (verbose) { message(paste0("Getting daily weather data for ", census_data[loc_fips, "name"], ".", " This may take a while.")) } stations <- daily_stations(fips = fips, date_min = date_min, date_max = date_max) weather_data <- daily_df(stations = stations, var = var, date_min = date_min, date_max = date_max, coverage = coverage, average_data = average_data) # sp::proj4string not working ## station_map <- daily_stationmap(fips = fips, ## daily_data = weather_data, ## station_label = station_label) list <- list("daily_data" = weather_data$daily_data, "station_metadata" = weather_data$station_df, "station_map" = NULL) return(list) } #' Return average daily weather data for a particular county. #' #' Returns a list with data on weather and stations for a selected county. #' This function serves as a wrapper to several functions from the \code{rnoaa} #' package, which pull weather data from all relevant stations in a county. #' This function filters and averages data returned by \code{rnoaa} functions #' across all weather stations in a county based on user-specified #' coverage specifications. #' #' @note Because this function uses the NOAA API to identify the weather #' monitors within a U.S. county, you will need to get an access token from #' NOAA to use this function. Visit NOAA's token request page #' (\url{http://www.ncdc.noaa.gov/cdo-web/token}) to request a token by #' email. You then need to set that API code in your R session (e.g., using #' \code{options(noaakey = "your key")}, replacing "your key" with the API #' key you've requested from NOAA). See the package vignette for more details. #' #' @param stations A dataframe containing station metadata, returned from #' the function \code{daily_stations}. #' @param coverage A numeric value in the range of 0 to 1 that specifies #' the desired percentage coverage for the weather variable (i.e., what #' percent of each weather variable must be non-missing to include data from #' a monitor when calculating daily values averaged across monitors. The #' default is to include all monitors with any available data (i.e., #' \code{coverage = 0}).) #' @param var A character vector specifying desired weather variables. For #' example, \code{var = c("tmin", "tmax", "prcp")} for maximum temperature, #' minimum temperature, and precipitation. The default is \code{"all"}, #' which includes all available weather variables at any weather station in #' the county. For a full list of all #' possible variable names, see NOAA's README file for the Daily Global #' Historical Climatology Network (GHCN-Daily) at #' \url{http://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt}. Many of #' the weather variables are available for some, but not all, monitors, so #' your output from this function may not include all the variables #' specified using this argument. If you specify a variable here but it is #' not included in the output dataset, it means that it was not available in #' the time range for any monitor in the county. #' @param average_data TRUE / FALSE to indicate if you want the function to #' average daily weather data across multiple monitors. If you choose #' FALSE, the function will return a dataframe with separate entries for #' each monitor, while TRUE (the default) outputs a single estimate #' for each day in the dataset, giving the average value of the weather #' metric across all available monitors in the county that day. #' @inheritParams daily_stations #' #' @return A list with two elements. \code{daily_data} is a dataframe of daily #' weather data averaged across multiple monitors and includes columns #' (\code{"var"_reporting}) for each weather variable showing the number of #' stations contributing to the average for that variable on that day. #' The element \code{station_df} is a dataframe of station metadata for each #' station contributing weather data. A weather station will have one row per #' weather variable to which it contributes data. In addition to information #' such as station id, name, latitude, and longitude, the \code{station_df} #' dataframe includes statistical information about weather values #' contributed by each station for each weather variable. These statistics #' include \code{calc_coverage} (the percent of non-missing values for each #' station-weather variable combination for the specified date range), #' \code{standard_dev} (standard deviation), \code{max}, and \code{min}, #' (giving the minimum and maximum values), and \code{range}, giving the #' range of values in each station-weather variable combination. The #' element \code{radius} is the calculated radius within which stations were #' pulled from the county's center. Elements \code{lat_center} and #' \code{lon_center} are the latitude and longitude of the county's center. #' #' @examples #' \dontrun{ #' stations <- daily_stations(fips = "12086", date_min = "2010-01-01", #' date_max = "2010-02-01") #' fips_list <- daily_df(stations = stations, coverage = 0.90, #' var = c("tmax", "tmin", "prcp"), #' date_min = "2010-01-01", date_max = "2010-02-01") #' averaged_data <- fips_list$daily_data #' head(averaged_data) #' station_info <- fips_list$station_df #' head(station_info) #' } daily_df <- function(stations, coverage = NULL, var = "all", date_min = NULL, date_max = NULL, average_data = TRUE) { # get tidy full dataset for all monitors quiet_pull_monitors <- purrr::quietly(rnoaa::meteo_pull_monitors) if (length(var) == 1) { if (var == "all") { meteo_var <- "all" } } else { meteo_var <- toupper(var) } meteo_df <- quiet_pull_monitors(monitors = stations$id, keep_flags = FALSE, date_min = date_min, date_max = date_max, var = meteo_var)$result # calculate coverage for each weather variable # MD: append $summary since API changed possibly coverage_df <- rnoaa::meteo_coverage(meteo_df, verbose = FALSE)$summary # filter station dataset based on specified coverage filtered <- filter_coverage(coverage_df, coverage = coverage) good_monitors <- unique(filtered$id) # filter weather dataset based on stations with specified coverage filtered_data <- dplyr::filter_(meteo_df, ~ id %in% good_monitors) # steps to filter out erroneous data from individual stations # precipitation if ("prcp" %in% var) { filtered_data$prcp <- filtered_data$prcp / 10 if (max(filtered_data$prcp, na.rm = TRUE) > 1100) { bad_prcp <- which(with(filtered_data, prcp > 1100)) filtered_data <- filtered_data[-bad_prcp,] } } # snowfall if ("snow" %in% var) { if(max(filtered_data$snow, na.rm = TRUE) > 1600) { bad_snow <- which(with(filtered_data, snow > 1600)) filtered_data <- filtered_data[-bad_snow,] } } # snow depth if ("snwd" %in% var) { if (max(filtered_data$snwd, na.rm = TRUE) > 11500) { bad_snwd <- which(with(filtered_data, snwd > 11500)) filtered_data <- filtered_data[-bad_snwd,] } } # tmax if ("tmax" %in% var) { filtered_data$tmax <- filtered_data$tmax / 10 if (max(filtered_data$tmax, na.rm = TRUE) > 57) { bad_tmax <- which(with(filtered_data, tmax > 57)) filtered_data <- filtered_data[-bad_tmax,] } } # tmin if ("tmin" %in% var) { filtered_data$tmin <- filtered_data$tmin / 10 if (min(filtered_data$tmin, na.rm = TRUE) < -62) { bad_tmin <- which(with(filtered_data, tmin < -62)) filtered_data <- filtered_data[-bad_tmin,] } } all_cols <- colnames(filtered_data) not_vars <- c("id", "date") g_cols <- all_cols[!all_cols %in% not_vars] group_cols <- c("id", "key") stats <- filtered_data %>% dplyr::select_(quote(-date)) %>% tidyr::gather_(key_col = "key", value_col = "value", gather_cols = g_cols) %>% dplyr::group_by_(.dots = group_cols) %>% dplyr::summarize_(standard_dev = ~ sd(value, na.rm = TRUE), min = ~ min(value, na.rm = TRUE), max = ~ max(value, na.rm = TRUE), range = ~ max - min) filtered <- dplyr::filter_(filtered, ~ id %in% good_monitors) stats <- dplyr::full_join(stats, filtered, by = c("id", "key")) stations <- dplyr::filter_(stations, ~ id %in% good_monitors) stations <- dplyr::full_join(stats, stations, by = "id") %>% dplyr::select_(quote(id), quote(name), quote(key), quote(latitude), quote(longitude), quote(calc_coverage), quote(standard_dev), quote(min), quote(max), quote(range)) colnames(stations)[3] <- "var" if (average_data == TRUE) { filtered_data <- ave_daily(filtered_data) } out <- list("daily_data" = filtered_data, "station_df" = stations) return(out) } #' Write daily weather timeseries files for U.S. counties. #' #' Given a vector of U.S. county FIPS codes, this function saves each element of #' the lists created from the function \code{daily_fips} to a separate folder #' within a given directory. This function therefore allows you to pull and #' save weather data time series for multiple counties at once. #' The dataframe \code{daily_data} is saved to #' a subdirectory of the given directory called "data." This timeseries #' dataframe gives the values for specified weather variables and the #' number of weather stations contributing to the average value for each day #' within the specified date range. The element \code{station_metadata}, which #' gives information about stations contributing to the time series, as well as #' statistical information about the values contributed by these stations, is saved #' in a subdirectory called "metadata." The element \code{station_map}, which is #' a map of contributing station locations, is saved in a subdirectory called #' "maps." #' #' @return Writes out three subdirectories of a given directory with daily #' weather files saved in "data", station metadata saved in "metadata", #' and a map of weather station locations saved in "maps" for each FIPS code #' specified provided there is available data for that county. The user can #' specify either .rds or .csv format for the data and metadata files, using #' the arguments \code{data_type} and \code{metadata_type}, respectively. #' Maps are saved as .png files. #' #' @inheritParams daily_df #' @inheritParams daily_stations #' @param out_directory The absolute or relative pathname for the directory #' where you would like the three subdirectories ("data", "metadata", and #' "plots") to be created. #' @param data_type A character string indicating that you would like either #' .rds files (data_type = "rds") or .csv files (data_type = "csv") for the #' timeseries output. This option defaults to .rds files. #' @param metadata_type A character string indicating that you would like either #' .rds files (metadata_type = "rds") or .csv files (metadata_type = "csv") #' for the station metadata output. This option defaults to .rds files. #' @param keep_map TRUE / FALSE indicating if a map of the stations should #' be included. The map can substantially increase the size of the files, so #' if file size is a concern, you should consider setting this option to #' FALSE. If FALSE, the "maps" subdirectory will not be created. #' @param verbose TRUE / FALSE to indicate if you want the function to print #' the county or vector of counties it's saving files for as the function runs. #' @param station_label TRUE / FALSE to indicate whether to include station #' labels in the station map. #' #' @note If the function is unable to pull weather data for a particular county #' given the specified percent coverage, date range, and/or weather variables, #' \code{daily_timeseries} will not produce files for that county. #' #' @examples #' \dontrun{ #' write_daily_timeseries(fips = c("37055", "15005"), coverage = 0.90, #' date_min = "1995-01-01", date_max = "1995-01-31", #' var = c("tmax", "tmin", "prcp"), #' out_directory = "~/timeseries") #' } #' @export write_daily_timeseries <- function(fips, coverage = NULL, date_min = NULL, date_max = NULL, var = "all", out_directory, data_type = "rds", metadata_type = "rds", average_data = TRUE, station_label = FALSE, keep_map = TRUE, verbose = TRUE) { if (verbose) { if (length(fips) > 2) { for (i in 1:length(fips)) { if (i == 1) { codes <- (paste0(fips[i], ", ")) } else if (i == length(fips)) { codes <- paste0(codes, "and ", fips[i]) } else { codes <- paste0(codes, fips[i], ", ") } } message(paste0("Saving daily weather files for FIPS codes ", codes, " in the directory ", out_directory, ".", " This may take ", "a while.")) } else if (length(fips == 2)) { for (i in 1:length(fips)) { if (i == 1) { codes <- paste0(fips[i], " ") } else if (i == length(fips)) { codes <- paste0(codes, "and ", fips[i]) } else { codes <- paste0(codes, fips[i], ", ") } } message(paste0("Saving daily weather files for FIPS codes ", codes, " in the directory ", out_directory, ".", " This may take ", "a while.")) } else { message(paste0("Saving daily weather files for FIPS code ", fips, " in the directory ", out_directory, ".", " This may take ", "a while.")) } } if (!dir.exists(out_directory)) { dir.create(out_directory) } if (!dir.exists(paste0(out_directory, "/data"))) { dir.create(paste0(out_directory, "/data")) } if (!dir.exists(paste0(out_directory, "/metadata"))) { dir.create(paste0(out_directory, "/metadata")) } for (i in 1:length(fips)) { possibleError <- tryCatch({ out_list <- daily_fips(fips = fips[i], date_min = date_min, date_max = date_max, var = var, verbose = FALSE, average_data = average_data, station_label = station_label) out_data <- out_list$daily_data out_metadata <- out_list$station_metadata if (data_type == "rds") { data_file <- paste0(out_directory, "/data", "/", fips[i], ".rds") saveRDS(out_data, file = data_file) } else if (data_type == "csv") { data_file <- paste0(out_directory, "/data", "/", fips[i], ".csv") utils::write.csv(out_data, file = data_file, row.names = FALSE) } if (metadata_type == "rds") { metadata_file <- paste0(out_directory, "/metadata", "/", fips[i], ".rds") saveRDS(out_metadata, file = metadata_file) } else if (metadata_type == "csv") { metadata_file <- paste0(out_directory, "/metadata", "/", fips[i], ".csv") utils::write.csv(out_metadata, file = metadata_file) } if (keep_map == TRUE) { if (!dir.exists(paste0(out_directory, "/maps"))) { dir.create(paste0(out_directory, "/maps")) } out_map <- out_list$station_map map_file <- paste0(out_directory, "/maps") map_name <- paste0(fips[i], ".png") suppressMessages(ggplot2::ggsave(file = map_name, path = map_file, plot = out_map)) } } , error = function(e) { e message(paste0("Unable to pull weather data for FIPS code ", fips[i], " for the specified percent coverage, date range, and/or", " weather variables.")) } ) if (inherits(possibleError, "error")) next } } #' Write plot files for daily weather timeseries dataframes. #' #' Writes a directory with plots for every weather data time series file #' in the specified directory (as produced by the \code{daily_timeseries} #' function and saved in the "data" subdirectory of the directory given in that #' function's arguments) for a particular weather variable. #' #' @return Writes out a directory with plots of timeseries data for a given #' weather variable for each file present in the directory specified. #' #' @param var A character string specifying which weather variable for which #' you would like to produce plots (the variable must be present in the #' timeseries dataframe). #' @param data_directory The absolute or relative pathname for the directory #' where your daily timeseries dataframes (produced by \code{daily_timeseries}) #' are saved. #' @param plot_directory The absolute or relative pathname for the directory #' where you would like the plots to be saved. #' @param date_min A character string giving the earliest date present in the #' timeseries dataframe in "yyyy-mm-dd" format. #' @param date_max A character string giving the latest date present in the #' timeseries dataframe in "yyyy-mm-dd" format. #' @param data_type A character string indicating the type of timeseries files #' you would like to produce plots for (either \code{"rds"} or \code{"csv"}). #' This option defaults to .rds files. #' #' @examples #' \dontrun{ #' write_daily_timeseries(fips = c("37055", "15005"), coverage = 0.90, #' date_min = "1995-01-01", date_max = "1995-01-31", #' var = c("tmax", "tmin", "prcp"), #' out_directory = "~/timeseries") #' plot_daily_timeseries(var = "prcp", date_min = "1995-01-01", #' date_max = "1995-01-31", #' data_directory = "~/timeseries/data", #' plot_directory = "~/timeseries/plots_prcp") #' } #' @importFrom dplyr %>% #' #' @export plot_daily_timeseries <- function(var, date_min, date_max, data_directory, plot_directory, data_type = "rds") { files <- list.files(data_directory) if (!dir.exists(plot_directory)) { dir.create(plot_directory) } if (data_type == "rds") { file_names <- gsub(".rds", "", files) } else if (data_type == "csv"){ file_names <- gsub(".csv", "", files) } for (i in 1:length(files)) { dat <- readRDS(paste0(data_directory, "/", files[i])) weather <- dplyr::ungroup(dat) %>% as.data.frame() file_name <- paste0(file_names[i], ".png") grDevices::png(filename = paste0(plot_directory, "/", file_name)) weather$to_plot <- weather[ , var] graphics::plot(weather$date, weather$to_plot, type = "l", col = "red", main = file_names[i], xlab = "date", ylab = var, xlim = c(as.Date(date_min), as.Date(date_max))) grDevices::dev.off() } }
#' @docType data #' @keywords datasets #' @title Example from Henderson (1988) #' @description Pedigree file including diploids of uncertain parentage #' @usage Henderson.1988 #' @format Data frame with 4 rows and 10 columns #' @author Matthew Hamilton <matthew.hamilton@csiro.au> #' @source Henderson CR (1988) Use of an average numerator relationship matrix for multiple-sire joining. Journal of Animal Science 66, 1614-1621. #' @references Hamilton MG, Kerr RJ. Computation of the inverse additive relationship matrix for autopolyploid and multiple-ploidy populations #4-column example Henderson.1988 <- data.frame( INDIV.ID = c(1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9, 9, 9, 10), SIRE.ID = c(0, 0, 1, 1, 3, 3, 3, 5, 1, 5, 1, 4, 5, 1), DAM.ID = c(0, 0, 2, 2, 4, 0, 6, 6, 4, 4, 6, 6, 6, 4), PROBABILITY = c(1, 1, 1, 1, 1, 1, 0.6, 0.4, 0.3, 0.7, 0.3, 0.6, 0.1, 1) ) usethis::use_data(Henderson.1988, overwrite = TRUE)
/R/Henderson.1988.R
no_license
linleyj/polyAinv
R
false
false
1,022
r
#' @docType data #' @keywords datasets #' @title Example from Henderson (1988) #' @description Pedigree file including diploids of uncertain parentage #' @usage Henderson.1988 #' @format Data frame with 4 rows and 10 columns #' @author Matthew Hamilton <matthew.hamilton@csiro.au> #' @source Henderson CR (1988) Use of an average numerator relationship matrix for multiple-sire joining. Journal of Animal Science 66, 1614-1621. #' @references Hamilton MG, Kerr RJ. Computation of the inverse additive relationship matrix for autopolyploid and multiple-ploidy populations #4-column example Henderson.1988 <- data.frame( INDIV.ID = c(1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9, 9, 9, 10), SIRE.ID = c(0, 0, 1, 1, 3, 3, 3, 5, 1, 5, 1, 4, 5, 1), DAM.ID = c(0, 0, 2, 2, 4, 0, 6, 6, 4, 4, 6, 6, 6, 4), PROBABILITY = c(1, 1, 1, 1, 1, 1, 0.6, 0.4, 0.3, 0.7, 0.3, 0.6, 0.1, 1) ) usethis::use_data(Henderson.1988, overwrite = TRUE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mapsim.R \name{mapsim} \alias{mapsim} \title{mapsim} \usage{ mapsim( x, data = NULL, xlim = NULL, ylim = NULL, res = 2, rec = TRUE, track = TRUE, last = TRUE, det = TRUE, dt = NULL, ol = TRUE, hl = "orange", tcol = "salmon", alpha = 0.5, lwd = 0.25, reccol = "blue", detcol = "red", pal = "Blues 3", prj = "+proj=laea +lat_0=41 +lon_0=-71 +units=km +datum=WGS84", ... ) } \arguments{ \item{x}{a fitted object of class simsmolt} \item{data}{data object created by sim_setup} \item{xlim}{plot x limits} \item{ylim}{plot y limits} \item{res}{downsampling factor to product a plot faster (default = 5, full-resolution = 0)} \item{rec}{should receiver locations be displayed (logical)} \item{alpha}{translucence for smolt track(s)} \item{lwd}{width of smolt track(s)} \item{col}{colour for receiver locations} \item{size}{of smolt track end point(s)} } \description{ Produce a simple map of simulated track(s) with or without receivers }
/man/mapsim.Rd
no_license
ianjonsen/simsmolt
R
false
true
1,060
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mapsim.R \name{mapsim} \alias{mapsim} \title{mapsim} \usage{ mapsim( x, data = NULL, xlim = NULL, ylim = NULL, res = 2, rec = TRUE, track = TRUE, last = TRUE, det = TRUE, dt = NULL, ol = TRUE, hl = "orange", tcol = "salmon", alpha = 0.5, lwd = 0.25, reccol = "blue", detcol = "red", pal = "Blues 3", prj = "+proj=laea +lat_0=41 +lon_0=-71 +units=km +datum=WGS84", ... ) } \arguments{ \item{x}{a fitted object of class simsmolt} \item{data}{data object created by sim_setup} \item{xlim}{plot x limits} \item{ylim}{plot y limits} \item{res}{downsampling factor to product a plot faster (default = 5, full-resolution = 0)} \item{rec}{should receiver locations be displayed (logical)} \item{alpha}{translucence for smolt track(s)} \item{lwd}{width of smolt track(s)} \item{col}{colour for receiver locations} \item{size}{of smolt track end point(s)} } \description{ Produce a simple map of simulated track(s) with or without receivers }
#' Transforms a tibble into a matrix #' #' This function was excluded from dplyr, but is still usefull in many workflows. #' It takes a tibble and transforms it into a matrix. Rownames are taken from the first column, which is subsequently deleted. Colnames are kept. #' Columns should be selected before transformation. #' #' @import dplyr #' @import ggplot2 #' @param tb a tibble #' @param long is the source in long format? Restructures into wide format. Use with caution. #' @param out_type c("numeric", "character"); preferred output data.class. Will introduce NAs from characters if forced to "numeric". #' @return no return value #' @export #' @examples #' # wide format data #' tb_wide <- as_tibble(mtcars, #' rownames = "model") #' #' tb_wide %>% #' as_matrix() #' tb_wide %>% #' as_matrix(out_type = "character") #' #' # from long format, not recommended #' tb_long <- as_tibble(mtcars, #' rownames = "model") %>% #' pivot_longer(2:12) #' #' tb_long %>% #' as_matrix(long = TRUE) as_matrix <- function(tb, long = FALSE, out_type = "numeric") { if(long == TRUE) { tb <- tb %>% pivot_wider(names_from = 2, values_from = 3) } m_rownames <- tb[, 1] %>% pull tb <- tb[, -1] m <- as.matrix(tb) if(out_type == "numeric") m <- apply(m, 2, as.numeric) if(out_type == "character") m <- apply(m, 2, as.character) rownames(m) <- m_rownames return(m) }
/R/as_matrix.R
no_license
OHaggis/workFlo
R
false
false
1,502
r
#' Transforms a tibble into a matrix #' #' This function was excluded from dplyr, but is still usefull in many workflows. #' It takes a tibble and transforms it into a matrix. Rownames are taken from the first column, which is subsequently deleted. Colnames are kept. #' Columns should be selected before transformation. #' #' @import dplyr #' @import ggplot2 #' @param tb a tibble #' @param long is the source in long format? Restructures into wide format. Use with caution. #' @param out_type c("numeric", "character"); preferred output data.class. Will introduce NAs from characters if forced to "numeric". #' @return no return value #' @export #' @examples #' # wide format data #' tb_wide <- as_tibble(mtcars, #' rownames = "model") #' #' tb_wide %>% #' as_matrix() #' tb_wide %>% #' as_matrix(out_type = "character") #' #' # from long format, not recommended #' tb_long <- as_tibble(mtcars, #' rownames = "model") %>% #' pivot_longer(2:12) #' #' tb_long %>% #' as_matrix(long = TRUE) as_matrix <- function(tb, long = FALSE, out_type = "numeric") { if(long == TRUE) { tb <- tb %>% pivot_wider(names_from = 2, values_from = 3) } m_rownames <- tb[, 1] %>% pull tb <- tb[, -1] m <- as.matrix(tb) if(out_type == "numeric") m <- apply(m, 2, as.numeric) if(out_type == "character") m <- apply(m, 2, as.character) rownames(m) <- m_rownames return(m) }
Azure <- F if(Azure){ source('src/Tools.R') Bikes <- maml.mapInputPort(1) Bikes$dteday <- set.asPOSIXct(Bikes) }else{ source('Tools.R') Bikes <- read.csv('bikes.csv', sep=',', header=T, stringsAsFactors = F) Bikes$dteday <- char.toPOSIXct(Bikes) } require(dplyr) Bikes <- Bikes %>% filter(hr == 9) require(ggplot2) ggplot(Bikes, aes(x=dteday, y=cnt)) + geom_line() + ylab('Número de Bikes') + xlab('Linha do Tempo') + ggtitle('Demanda por Bikes as 09:00') + theme(text = element_text(size = 20))
/Big Data Analytics com R e Microsoft Azure Machine Learning/08 - Data Munging no Azure Machine Learning/ggplot2Script.R
no_license
bambrozim/DataScienceAcademy
R
false
false
521
r
Azure <- F if(Azure){ source('src/Tools.R') Bikes <- maml.mapInputPort(1) Bikes$dteday <- set.asPOSIXct(Bikes) }else{ source('Tools.R') Bikes <- read.csv('bikes.csv', sep=',', header=T, stringsAsFactors = F) Bikes$dteday <- char.toPOSIXct(Bikes) } require(dplyr) Bikes <- Bikes %>% filter(hr == 9) require(ggplot2) ggplot(Bikes, aes(x=dteday, y=cnt)) + geom_line() + ylab('Número de Bikes') + xlab('Linha do Tempo') + ggtitle('Demanda por Bikes as 09:00') + theme(text = element_text(size = 20))
make_map <- function(target_name, data_sf_fn, map_categories, map_colors) { # Join color data data_sf <- readRDS(data_sf_fn) map_colors_df <- data.frame(value = map_categories, color = map_colors, stringsAsFactors = FALSE) map_data_ready <- left_join(data_sf, map_colors_df, by = "value") # Create the map image png(target_name, width = 11, height = 8, units="in", res=300) plot(st_geometry(map_data_ready), col = map_data_ready$color, border=NA, axes=FALSE) dev.off() }
/6_visualize/src/make_map.R
permissive
usgs-makerspace/wbeep-map-images
R
false
false
557
r
make_map <- function(target_name, data_sf_fn, map_categories, map_colors) { # Join color data data_sf <- readRDS(data_sf_fn) map_colors_df <- data.frame(value = map_categories, color = map_colors, stringsAsFactors = FALSE) map_data_ready <- left_join(data_sf, map_colors_df, by = "value") # Create the map image png(target_name, width = 11, height = 8, units="in", res=300) plot(st_geometry(map_data_ready), col = map_data_ready$color, border=NA, axes=FALSE) dev.off() }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gen-namespace-docs.R, % R/gen-namespace-examples.R \name{torch_fft} \alias{torch_fft} \title{Fft} \arguments{ \item{input}{(Tensor) the input tensor of at least \code{signal_ndim} \code{+ 1} dimensions} \item{signal_ndim}{(int) the number of dimensions in each signal. \code{signal_ndim} can only be 1, 2 or 3} \item{normalized}{(bool, optional) controls whether to return normalized results. Default: \code{False}} } \description{ Fft } \note{ \preformatted{For CUDA tensors, an LRU cache is used for cuFFT plans to speed up repeatedly running FFT methods on tensors of same geometry with same configuration. See cufft-plan-cache for more details on how to monitor and control the cache. } } \section{fft(input, signal_ndim, normalized=False) -> Tensor }{ Complex-to-complex Discrete Fourier Transform This method computes the complex-to-complex discrete Fourier transform. Ignoring the batch dimensions, it computes the following expression: \deqn{ X[\omega_1, \dots, \omega_d] = \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d] e^{-j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}}, } where \eqn{d} = \code{signal_ndim} is number of dimensions for the signal, and \eqn{N_i} is the size of signal dimension \eqn{i}. This method supports 1D, 2D and 3D complex-to-complex transforms, indicated by \code{signal_ndim}. \code{input} must be a tensor with last dimension of size 2, representing the real and imaginary components of complex numbers, and should have at least \code{signal_ndim + 1} dimensions with optionally arbitrary number of leading batch dimensions. If \code{normalized} is set to \code{True}, this normalizes the result by dividing it with \eqn{\sqrt{\prod_{i=1}^K N_i}} so that the operator is unitary. Returns the real and the imaginary parts together as one tensor of the same shape of \code{input}. The inverse of this function is \code{\link{torch_ifft}}. } \section{Warning}{ For CPU tensors, this method is currently only available with MKL. Use \code{torch_backends.mkl.is_available} to check if MKL is installed. } \examples{ if (torch_is_installed()) { # unbatched 2D FFT x = torch_randn(c(4, 3, 2)) torch_fft(x, 2) # batched 1D FFT torch_fft(x, 1) # arbitrary number of batch dimensions, 2D FFT x = torch_randn(c(3, 3, 5, 5, 2)) torch_fft(x, 2) } }
/man/torch_fft.Rd
permissive
minghao2016/torch
R
false
true
2,429
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gen-namespace-docs.R, % R/gen-namespace-examples.R \name{torch_fft} \alias{torch_fft} \title{Fft} \arguments{ \item{input}{(Tensor) the input tensor of at least \code{signal_ndim} \code{+ 1} dimensions} \item{signal_ndim}{(int) the number of dimensions in each signal. \code{signal_ndim} can only be 1, 2 or 3} \item{normalized}{(bool, optional) controls whether to return normalized results. Default: \code{False}} } \description{ Fft } \note{ \preformatted{For CUDA tensors, an LRU cache is used for cuFFT plans to speed up repeatedly running FFT methods on tensors of same geometry with same configuration. See cufft-plan-cache for more details on how to monitor and control the cache. } } \section{fft(input, signal_ndim, normalized=False) -> Tensor }{ Complex-to-complex Discrete Fourier Transform This method computes the complex-to-complex discrete Fourier transform. Ignoring the batch dimensions, it computes the following expression: \deqn{ X[\omega_1, \dots, \omega_d] = \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d] e^{-j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}}, } where \eqn{d} = \code{signal_ndim} is number of dimensions for the signal, and \eqn{N_i} is the size of signal dimension \eqn{i}. This method supports 1D, 2D and 3D complex-to-complex transforms, indicated by \code{signal_ndim}. \code{input} must be a tensor with last dimension of size 2, representing the real and imaginary components of complex numbers, and should have at least \code{signal_ndim + 1} dimensions with optionally arbitrary number of leading batch dimensions. If \code{normalized} is set to \code{True}, this normalizes the result by dividing it with \eqn{\sqrt{\prod_{i=1}^K N_i}} so that the operator is unitary. Returns the real and the imaginary parts together as one tensor of the same shape of \code{input}. The inverse of this function is \code{\link{torch_ifft}}. } \section{Warning}{ For CPU tensors, this method is currently only available with MKL. Use \code{torch_backends.mkl.is_available} to check if MKL is installed. } \examples{ if (torch_is_installed()) { # unbatched 2D FFT x = torch_randn(c(4, 3, 2)) torch_fft(x, 2) # batched 1D FFT torch_fft(x, 1) # arbitrary number of batch dimensions, 2D FFT x = torch_randn(c(3, 3, 5, 5, 2)) torch_fft(x, 2) } }
# # We require two different subsets of genes, one for all CP-associated # lcoi and another for all gene-trees that # #to be used to isolate ortholog involved in subset #where ortholog id's match that of file supplied #are subsetted from full ortholog id file ortho_long <- read.delim("ortho_long.tsv", header=FALSE, stringsAsFactors=FALSE) pa <- read.delim("pa.scv", header=FALSE, stringsAsFactors=FALSE) #pa.csv to_keep <- unique(pa$V1) CP_orthos <- subset(all_orthos, V1 %in% to_keep) subsetorthos <- subset_ortholog(ortho_long, "pa.csv") write.table(CP_orthos, file = "subsetCP.tsv", row.names = FALSE, col.names = FALSE, quote = FALSE) #this tsv then used to produce dNdS values ######################################### # Probably move this to another script # ######################################### #Subsetting all orthologs for concatenate.py / making branch lengths files <- which(Ntip(<all trees read into R>) == 24) #total number of strains filepaths <- all_filepath_trees[files] og_nums_full <- str_extract(string = filepaths, pattern = "og_\\d+") str_subset(ortho_long$V1, CPorthos[1]) ortho_full_rows <- lapply(og_nums_full, function(x) which(ortho_long$V1 == (x))) rownums <- unlist(ortho_full_rows) subsetorthos <- ortho_long[(rownums),] full <- unique(subsetorthos$V1) write.table(full, file = "subsetfull.tsv", row.names = FALSE, col.names = FALSE, quote = FALSE)
/OrthologSubsetting.R
no_license
dwinter/CyclicPeptidedNdS
R
false
false
1,428
r
# # We require two different subsets of genes, one for all CP-associated # lcoi and another for all gene-trees that # #to be used to isolate ortholog involved in subset #where ortholog id's match that of file supplied #are subsetted from full ortholog id file ortho_long <- read.delim("ortho_long.tsv", header=FALSE, stringsAsFactors=FALSE) pa <- read.delim("pa.scv", header=FALSE, stringsAsFactors=FALSE) #pa.csv to_keep <- unique(pa$V1) CP_orthos <- subset(all_orthos, V1 %in% to_keep) subsetorthos <- subset_ortholog(ortho_long, "pa.csv") write.table(CP_orthos, file = "subsetCP.tsv", row.names = FALSE, col.names = FALSE, quote = FALSE) #this tsv then used to produce dNdS values ######################################### # Probably move this to another script # ######################################### #Subsetting all orthologs for concatenate.py / making branch lengths files <- which(Ntip(<all trees read into R>) == 24) #total number of strains filepaths <- all_filepath_trees[files] og_nums_full <- str_extract(string = filepaths, pattern = "og_\\d+") str_subset(ortho_long$V1, CPorthos[1]) ortho_full_rows <- lapply(og_nums_full, function(x) which(ortho_long$V1 == (x))) rownums <- unlist(ortho_full_rows) subsetorthos <- ortho_long[(rownums),] full <- unique(subsetorthos$V1) write.table(full, file = "subsetfull.tsv", row.names = FALSE, col.names = FALSE, quote = FALSE)
library(tidync) library(tidyverse) library(tictoc) library(future) library(furrr) ## If working outside RStudio: setwd("~/Documents/Projects/ESDL_earlyadopter/ESDL/") ## Load data ## Land cover data was downloaded from ESA Copernicus facility ## Source: https://cds.climate.copernicus.eu/cdsapp#!/dataset/satellite-land-cover?tab=overview # The data is too large to be on the GitHub repo (~40GB) # In a previous exploration `land_cover.Rmd` I've noticed that using only the first and last # value on the timeseries gives already a good approximation for % of land cover change. # Since the regressions will be aggregated in time, no need to use the full time series. # It reduces computation time. files <- fs::dir_ls("~/Documents/Projects/DATA/LULCC/", recurse = 1) %>% str_subset(pattern = ".nc") # where files[1] is 2002 and files[length(files)] is 2018 ## variables: lccs_class, processed_flag, current_pixel_state, observation_count, change_count ## Sampling data: # Remember that sampling and regressions will be done separately for each response variable (GPP, TER, LAI, or ChlorA). So one needs to load as well the sampling the pixels for land use dataset separately. Coordinates come from: sample <- read_csv( file = "~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/sampled_FFT_variables/sample_pixels_delta_TER.csv", col_types = cols( lon = col_double(), lat = col_double(), #biome_code = col_double(), biome = col_character(), n_ews = col_double() )) %>% select(lon, lat) # I only need the coordinates now # You only need to read the datasets once: # 2001 for TER and GPP, 1994 LAI, 1998 ClorA nc_2001 <- files[3] %>% tidync() %>% activate(lccs_class) nc_2018 <- files[length(files)] %>% tidync() %>% activate(lccs_class) # the function reads the files for the begining and end of the time series, not all the files. The years are set manually to 2001 and 2018 since it is the range for most of the vars. For ClorA and LAI that goes back to the 1990s, the initial year needs to be changed. Then it calculates the proportion of change per pixel, and the vector changes per land cover class in number of 900m^2 pixels (30*30). land_cover_change <- function(file1, file2, lons, lats){ # Read files file_list <- list(file1, file2) # tic() df_files <- file_list %>% map(function(x){ x %>% hyper_filter( # original lat lon coords are used as centroids of a 0.25 degree pixel # so I add +/- 0.125 in each direction lat = dplyr::between(lat, (lats - 0.125), (lats + 0.125)), lon = dplyr::between(lon, (lons - 0.125), (lons + 0.125))) %>% hyper_tibble() %>% mutate(year = lubridate::as_date(time) %>% lubridate::year(.)) }) # toc() # Calculate proportion of change per 0.25 pixel # tic() prop_change <- df_files %>% bind_rows() %>% select(lccs_class, lon, lat, year) %>% pivot_wider(values_from = lccs_class, names_from = year) %>% # 2001 for TER and GPP, 1994 LAI, 1998 ClorA mutate(changed = `1994` != `2018`) %>% summarize(prop_change = (sum(changed)/n())*100) # toc() # Calculate summary per land cover class # tic() pxl_summary <- df_files %>% bind_rows() %>% select(lccs_class, lon, lat, year) %>% # 2001 for TER and GPP, 1994 LAI, 1998 ClorA filter(year == 1994 | year == 2018) %>% group_by(lccs_class, year) %>% summarize(pixels = n()) %>% ungroup() %>% group_by(lccs_class) %>% pivot_wider( id_cols = lccs_class,names_from = year, values_from = pixels, # adding 1 as missing value is to avoid division by zero values_fill = 1) %>% #colSums() # 6480 pixels of 30*30mts # 2001 for TER and GPP, 1994 LAI, 1998 ClorA mutate(pxl_change = (`2018`-`1994`)) # toc() return(list(prop_change, pxl_summary)) } ### test: 0.31 sec, passing tic() land_cover_change(nc_2001, nc_2018, lons = -74, lats = 4) toc() tic() test <- map2(head(sample$lon), head(sample$lat), .f = land_cover_change, file1 = nc_2001, file2 = nc_2018) toc() # 1.51 sec for 6 instances # estimated time for computation = (0.31) * nrow(sample) / 60 / 60 = 4.59 hrs # estimated memory = 30 / 6 * nrow(sample) = 266575KB ~ 266MB # Manageable!! plan(multicore, workers = 10) # do it in parallel lcc_output <- list() tic() lcc_output <- future_map2( sample$lon, sample$lat, .f = land_cover_change, file1 = nc_2001, file2 = nc_2018) toc() # 2257.325 sec, 38min , 54min LAI object.size(lcc_output) %>% format("Mb") # 253 Mb lcc_output <- transpose(lcc_output) tic() prop_change_df <- lcc_output[[1]] %>% bind_rows() %>% mutate(lon = sample$lon, lat = sample$lat) toc() # 0.5sec tic() pxl_land_cover_change <- pmap( list( x = (lcc_output[[2]]), lon = (sample$lon), lat = (sample$lat)), .f = function(x, lon, lat) { x <- x %>% mutate(lon = lon, lat = lat) return(x)}) %>% bind_rows() toc() # 346.474 # lobstr::obj_size(pxl_land_cover_change) lobstr::obj_sizes(lcc_output, prop_change_df, pxl_land_cover_change) length(lcc_output) ## skip errors (which are pixesl with only missing values) ## It was not done with the "safely" option, so no way to see if there is errors automatically # is_ok <- lcc_output$error %>% map(function(x) is.null(x)) %>% unlist() ## the end file should be saved in the sample folder with the format: sampled_pixels_terrestrial_LCC_4GPP.RData and repeat procedure for each response variable. save( pxl_land_cover_change, prop_change_df, file = "/Users/juanrocha/Documents/Projects/ESDL_earlyadopter/ESDL/Results/sampled_FFT_variables/sampled_pixels_delta_land_cover_TER.RData" ) ## Remember that the order of the elements in the list corresponds to the order in the "sample" object prop_change_df %>% ggplot(aes(prop_change)) + geom_density()
/land_cover.R
permissive
juanrocha/ESDL
R
false
false
6,131
r
library(tidync) library(tidyverse) library(tictoc) library(future) library(furrr) ## If working outside RStudio: setwd("~/Documents/Projects/ESDL_earlyadopter/ESDL/") ## Load data ## Land cover data was downloaded from ESA Copernicus facility ## Source: https://cds.climate.copernicus.eu/cdsapp#!/dataset/satellite-land-cover?tab=overview # The data is too large to be on the GitHub repo (~40GB) # In a previous exploration `land_cover.Rmd` I've noticed that using only the first and last # value on the timeseries gives already a good approximation for % of land cover change. # Since the regressions will be aggregated in time, no need to use the full time series. # It reduces computation time. files <- fs::dir_ls("~/Documents/Projects/DATA/LULCC/", recurse = 1) %>% str_subset(pattern = ".nc") # where files[1] is 2002 and files[length(files)] is 2018 ## variables: lccs_class, processed_flag, current_pixel_state, observation_count, change_count ## Sampling data: # Remember that sampling and regressions will be done separately for each response variable (GPP, TER, LAI, or ChlorA). So one needs to load as well the sampling the pixels for land use dataset separately. Coordinates come from: sample <- read_csv( file = "~/Documents/Projects/ESDL_earlyadopter/ESDL/Results/sampled_FFT_variables/sample_pixels_delta_TER.csv", col_types = cols( lon = col_double(), lat = col_double(), #biome_code = col_double(), biome = col_character(), n_ews = col_double() )) %>% select(lon, lat) # I only need the coordinates now # You only need to read the datasets once: # 2001 for TER and GPP, 1994 LAI, 1998 ClorA nc_2001 <- files[3] %>% tidync() %>% activate(lccs_class) nc_2018 <- files[length(files)] %>% tidync() %>% activate(lccs_class) # the function reads the files for the begining and end of the time series, not all the files. The years are set manually to 2001 and 2018 since it is the range for most of the vars. For ClorA and LAI that goes back to the 1990s, the initial year needs to be changed. Then it calculates the proportion of change per pixel, and the vector changes per land cover class in number of 900m^2 pixels (30*30). land_cover_change <- function(file1, file2, lons, lats){ # Read files file_list <- list(file1, file2) # tic() df_files <- file_list %>% map(function(x){ x %>% hyper_filter( # original lat lon coords are used as centroids of a 0.25 degree pixel # so I add +/- 0.125 in each direction lat = dplyr::between(lat, (lats - 0.125), (lats + 0.125)), lon = dplyr::between(lon, (lons - 0.125), (lons + 0.125))) %>% hyper_tibble() %>% mutate(year = lubridate::as_date(time) %>% lubridate::year(.)) }) # toc() # Calculate proportion of change per 0.25 pixel # tic() prop_change <- df_files %>% bind_rows() %>% select(lccs_class, lon, lat, year) %>% pivot_wider(values_from = lccs_class, names_from = year) %>% # 2001 for TER and GPP, 1994 LAI, 1998 ClorA mutate(changed = `1994` != `2018`) %>% summarize(prop_change = (sum(changed)/n())*100) # toc() # Calculate summary per land cover class # tic() pxl_summary <- df_files %>% bind_rows() %>% select(lccs_class, lon, lat, year) %>% # 2001 for TER and GPP, 1994 LAI, 1998 ClorA filter(year == 1994 | year == 2018) %>% group_by(lccs_class, year) %>% summarize(pixels = n()) %>% ungroup() %>% group_by(lccs_class) %>% pivot_wider( id_cols = lccs_class,names_from = year, values_from = pixels, # adding 1 as missing value is to avoid division by zero values_fill = 1) %>% #colSums() # 6480 pixels of 30*30mts # 2001 for TER and GPP, 1994 LAI, 1998 ClorA mutate(pxl_change = (`2018`-`1994`)) # toc() return(list(prop_change, pxl_summary)) } ### test: 0.31 sec, passing tic() land_cover_change(nc_2001, nc_2018, lons = -74, lats = 4) toc() tic() test <- map2(head(sample$lon), head(sample$lat), .f = land_cover_change, file1 = nc_2001, file2 = nc_2018) toc() # 1.51 sec for 6 instances # estimated time for computation = (0.31) * nrow(sample) / 60 / 60 = 4.59 hrs # estimated memory = 30 / 6 * nrow(sample) = 266575KB ~ 266MB # Manageable!! plan(multicore, workers = 10) # do it in parallel lcc_output <- list() tic() lcc_output <- future_map2( sample$lon, sample$lat, .f = land_cover_change, file1 = nc_2001, file2 = nc_2018) toc() # 2257.325 sec, 38min , 54min LAI object.size(lcc_output) %>% format("Mb") # 253 Mb lcc_output <- transpose(lcc_output) tic() prop_change_df <- lcc_output[[1]] %>% bind_rows() %>% mutate(lon = sample$lon, lat = sample$lat) toc() # 0.5sec tic() pxl_land_cover_change <- pmap( list( x = (lcc_output[[2]]), lon = (sample$lon), lat = (sample$lat)), .f = function(x, lon, lat) { x <- x %>% mutate(lon = lon, lat = lat) return(x)}) %>% bind_rows() toc() # 346.474 # lobstr::obj_size(pxl_land_cover_change) lobstr::obj_sizes(lcc_output, prop_change_df, pxl_land_cover_change) length(lcc_output) ## skip errors (which are pixesl with only missing values) ## It was not done with the "safely" option, so no way to see if there is errors automatically # is_ok <- lcc_output$error %>% map(function(x) is.null(x)) %>% unlist() ## the end file should be saved in the sample folder with the format: sampled_pixels_terrestrial_LCC_4GPP.RData and repeat procedure for each response variable. save( pxl_land_cover_change, prop_change_df, file = "/Users/juanrocha/Documents/Projects/ESDL_earlyadopter/ESDL/Results/sampled_FFT_variables/sampled_pixels_delta_land_cover_TER.RData" ) ## Remember that the order of the elements in the list corresponds to the order in the "sample" object prop_change_df %>% ggplot(aes(prop_change)) + geom_density()
context("Testing add_pseudotime") id <- "a" cell_ids <- c("truth", "universally", "acknowledged", "that", "a", "single") extras <- list("man") pseudotime <- c(0, .1, .4, .5, .8, 1) %>% set_names(cell_ids) wr_orig <- wrap_data( id = id, cell_ids = cell_ids ) test_that("add_pseudotime works as expected", { trajectory <- wr_orig %>% add_pseudotime(pseudotime = pseudotime) expect_equal(trajectory$pseudotime, pseudotime) expect_error(add_pseudotime(wr_orig, pseudotime = "whatever")) })
/tests/testthat/test-wrap_add_pseudotime.R
permissive
dynverse/dynwrap
R
false
false
504
r
context("Testing add_pseudotime") id <- "a" cell_ids <- c("truth", "universally", "acknowledged", "that", "a", "single") extras <- list("man") pseudotime <- c(0, .1, .4, .5, .8, 1) %>% set_names(cell_ids) wr_orig <- wrap_data( id = id, cell_ids = cell_ids ) test_that("add_pseudotime works as expected", { trajectory <- wr_orig %>% add_pseudotime(pseudotime = pseudotime) expect_equal(trajectory$pseudotime, pseudotime) expect_error(add_pseudotime(wr_orig, pseudotime = "whatever")) })
x<-read.table('isoseq_flnc.fasta.hg19.sam.probe_hit.txt',sep='\t',header=T) print("# hit align hg19: ") print(length(unique(x$read_id))) good <- subset(x, num_probe>0) # reads that hit one or more probes print("# hit hg19 probe: ") print(length(unique(good$read_id))) print("# of hit hg19 genes: ") print(length(unique(good$genes)))
/targeted/count_hits_hg19.R
permissive
Vikash84/cDNA_Cupcake
R
false
false
335
r
x<-read.table('isoseq_flnc.fasta.hg19.sam.probe_hit.txt',sep='\t',header=T) print("# hit align hg19: ") print(length(unique(x$read_id))) good <- subset(x, num_probe>0) # reads that hit one or more probes print("# hit hg19 probe: ") print(length(unique(good$read_id))) print("# of hit hg19 genes: ") print(length(unique(good$genes)))
# data my_data = read.csv("https://wnarifin.github.io/covid-19-malaysia/covid-19_my_full.csv") my_data$date = as.Date(my_data$date) # before MCO my_data1 = subset(my_data, date >= "2020-02-28" & date < "2020-03-18") my_data1$active = my_data1$total_cases - my_data1$total_recover - my_data1$total_deaths # shorter dataset data_my1 = as.data.frame(with(my_data1, cbind(total_cases, recover, total_recover, new_deaths, total_deaths, active))) str(data_my1) data_my1$recover_death = data_my1$recover + data_my1$new_deaths data_my1$days = 1:dim(data_my1)[1] # generate days data_my1 # by glm model1 = glm(recover_death ~ days, data = data_my1, family = "poisson", offset = log(active)) summary(model1) gamma = exp(coef(model1)[[2]]) - 1; gamma recover_days = 1/gamma; recover_days # MCO 1 my_data2 = subset(my_data, date >= "2020-03-18" & date < "2020-04-01") my_data2$active = my_data2$total_cases - my_data2$total_recover - my_data2$total_deaths # shorter dataset data_my2 = as.data.frame(with(my_data2, cbind(total_cases, recover, total_recover, new_deaths, total_deaths, active))) str(data_my2) data_my2$recover_death = data_my2$recover + data_my2$new_deaths data_my2$days = 1:dim(data_my2)[1] # generate days data_my2 # by glm model2 = glm(recover_death ~ days, data = data_my2, family = "poisson", offset = log(active)) summary(model2) gamma = exp(coef(model2)[[2]]) - 1; gamma recover_days = 1/gamma; recover_days # MCO 2 my_data3 = subset(my_data, date >= "2020-04-01") my_data3$active = my_data3$total_cases - my_data3$total_recover - my_data3$total_deaths # shorter dataset data_my3 = as.data.frame(with(my_data3, cbind(total_cases, recover, total_recover, new_deaths, total_deaths, active))) str(data_my3) data_my3$recover_death = data_my3$recover + data_my3$new_deaths data_my3$days = 1:dim(data_my3)[1] # generate days data_my3 # by glm model3 = glm(recover_death ~ days, data = data_my3, family = "poisson", offset = log(active)) summary(model3) gamma = exp(coef(model3)[[2]]) - 1; gamma recover_days = 1/gamma; recover_days
/recovery rate.R
no_license
cwenghowe/sircovid19mys
R
false
false
2,040
r
# data my_data = read.csv("https://wnarifin.github.io/covid-19-malaysia/covid-19_my_full.csv") my_data$date = as.Date(my_data$date) # before MCO my_data1 = subset(my_data, date >= "2020-02-28" & date < "2020-03-18") my_data1$active = my_data1$total_cases - my_data1$total_recover - my_data1$total_deaths # shorter dataset data_my1 = as.data.frame(with(my_data1, cbind(total_cases, recover, total_recover, new_deaths, total_deaths, active))) str(data_my1) data_my1$recover_death = data_my1$recover + data_my1$new_deaths data_my1$days = 1:dim(data_my1)[1] # generate days data_my1 # by glm model1 = glm(recover_death ~ days, data = data_my1, family = "poisson", offset = log(active)) summary(model1) gamma = exp(coef(model1)[[2]]) - 1; gamma recover_days = 1/gamma; recover_days # MCO 1 my_data2 = subset(my_data, date >= "2020-03-18" & date < "2020-04-01") my_data2$active = my_data2$total_cases - my_data2$total_recover - my_data2$total_deaths # shorter dataset data_my2 = as.data.frame(with(my_data2, cbind(total_cases, recover, total_recover, new_deaths, total_deaths, active))) str(data_my2) data_my2$recover_death = data_my2$recover + data_my2$new_deaths data_my2$days = 1:dim(data_my2)[1] # generate days data_my2 # by glm model2 = glm(recover_death ~ days, data = data_my2, family = "poisson", offset = log(active)) summary(model2) gamma = exp(coef(model2)[[2]]) - 1; gamma recover_days = 1/gamma; recover_days # MCO 2 my_data3 = subset(my_data, date >= "2020-04-01") my_data3$active = my_data3$total_cases - my_data3$total_recover - my_data3$total_deaths # shorter dataset data_my3 = as.data.frame(with(my_data3, cbind(total_cases, recover, total_recover, new_deaths, total_deaths, active))) str(data_my3) data_my3$recover_death = data_my3$recover + data_my3$new_deaths data_my3$days = 1:dim(data_my3)[1] # generate days data_my3 # by glm model3 = glm(recover_death ~ days, data = data_my3, family = "poisson", offset = log(active)) summary(model3) gamma = exp(coef(model3)[[2]]) - 1; gamma recover_days = 1/gamma; recover_days
# usage: # R --slave --vanilla --file=BWMCLI_XMCDAv2.R --args "[inDirectory]" "[outDirectory]" rm(list=ls()) # tell R to use the rJava package and the RXMCDA3 package library(rJava) library(XMCDA3) # cf. http://stackoverflow.com/questions/1815606/rscript-determine-path-of-the-executing-script script.dir <- function() { cmdArgs <- commandArgs(trailingOnly = FALSE) needle <- "--file=" match <- grep(needle, cmdArgs) if (length(match) > 0) { # Rscript return(dirname(normalizePath(sub(needle, "", cmdArgs[match])))) } else { # 'source'd via R console return(dirname(normalizePath(sys.frames()[[1]]$ofile))) } } # load the R files in the script's directory script.wd <- setwd(script.dir()) source("utils.R") source("inputsHandler.R") source("outputsHandler.R") source("BWM.R") source("calculationsBWM.R") # restore the working directory so that relative paths passed as # arguments work as expected if (!is.null(script.wd)) setwd(script.wd) # get the in and out directories from the arguments inDirectory <- commandArgs(trailingOnly=TRUE)[1] outDirectory <- commandArgs(trailingOnly=TRUE)[2] # Override the directories here: uncomment this when testing from inside R e.g. # (uncomment this when testing from inside R e.g.) #inDirectory <- "/path/to/BWM/tests/in1.v2" #outDirectory <- "/path/to/BWM/tests/out_tmp/" # filenames criteriaFile <- "criteria.xml" bestPreferencesOfCriteriaFile <- "bestPreferencesOfCriteria.xml" worstPreferencesOfCriteriaFile <- "worstPreferencesOfCriteria.xml" criteriaWeightsFile <- "criteriaWeights.xml" messagesFile <- "messages.xml" # the Java xmcda object for the output messages xmcdaMessages<-.jnew("org/xmcda/XMCDA") xmcdaData <- .jnew("org/xmcda/XMCDA") loadXMCDAv3(xmcdaData, inDirectory, criteriaFile, mandatory = TRUE, xmcdaMessages, "criteria") loadXMCDAv3(xmcdaData, inDirectory, bestPreferencesOfCriteriaFile, mandatory = TRUE, xmcdaMessages, "criteriaValues") loadXMCDAv3(xmcdaData, inDirectory, worstPreferencesOfCriteriaFile, mandatory = TRUE, xmcdaMessages, "criteriaValues") # if we have problem with the inputs, it is time to stop if (xmcdaMessages$programExecutionResultsList$size() > 0){ if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop(paste("An error has occured while loading the input files. For further details, see ", messagesFile, sep="")) } } # let's check the inputs and convert them into our own structures inputs<-checkAndExtractInputs(xmcdaData, programExecutionResult) if (xmcdaMessages$programExecutionResultsList$size()>0){ if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop(paste("An error has occured while checking and extracting the inputs. For further details, see ", messagesFile, sep="")) } } # here we know that everything was loaded as expected # now let's call the calculation method results <- handleException( function() return( BWM(inputs) ), xmcdaMessages, humanMessage = "The calculation could not be performed, reason: " ) if (is.null(results)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Could not calculate BWM.") } # fine, now let's put the results into XMCDA structures xResults = convert(results, xmcdaMessages) if (is.null(xResults)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Could not convert BWM results into XMCDA") } # and last, write them onto the disk for (i in 1:length(xResults)){ outputFilename = paste(outDirectory, paste(names(xResults)[i],".xml",sep=""), sep="/") tmp <- handleException( function() return( writeXMCDA(xResults[[i]], outputFilename, xmcda_v3_tag(names(xResults)[i])) ), xmcdaMessages, humanMessage = paste("Error while writing ", outputFilename,", reason :") ) if (is.null(tmp)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Error while writing ",outputFilename,sep="") } } # then the messages file # TODO faire autrement qu'en ajoutant une info vide pour faire un <status>ok</status> tmp <- handleException( function() return( putProgramExecutionResult(xmcdaMessages, infos="") ), xmcdaMessages ) if (is.null(tmp)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Could not add methodExecutionResult to tree.") } tmp <- handleException( function() return( writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) ), xmcdaMessages ) if (is.null(tmp)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Error while writing messages file.") }
/diviz/src/BWMCLI_XMCDAv3.R
no_license
jakub-tomczak/DecisionDeckBWM
R
false
false
4,795
r
# usage: # R --slave --vanilla --file=BWMCLI_XMCDAv2.R --args "[inDirectory]" "[outDirectory]" rm(list=ls()) # tell R to use the rJava package and the RXMCDA3 package library(rJava) library(XMCDA3) # cf. http://stackoverflow.com/questions/1815606/rscript-determine-path-of-the-executing-script script.dir <- function() { cmdArgs <- commandArgs(trailingOnly = FALSE) needle <- "--file=" match <- grep(needle, cmdArgs) if (length(match) > 0) { # Rscript return(dirname(normalizePath(sub(needle, "", cmdArgs[match])))) } else { # 'source'd via R console return(dirname(normalizePath(sys.frames()[[1]]$ofile))) } } # load the R files in the script's directory script.wd <- setwd(script.dir()) source("utils.R") source("inputsHandler.R") source("outputsHandler.R") source("BWM.R") source("calculationsBWM.R") # restore the working directory so that relative paths passed as # arguments work as expected if (!is.null(script.wd)) setwd(script.wd) # get the in and out directories from the arguments inDirectory <- commandArgs(trailingOnly=TRUE)[1] outDirectory <- commandArgs(trailingOnly=TRUE)[2] # Override the directories here: uncomment this when testing from inside R e.g. # (uncomment this when testing from inside R e.g.) #inDirectory <- "/path/to/BWM/tests/in1.v2" #outDirectory <- "/path/to/BWM/tests/out_tmp/" # filenames criteriaFile <- "criteria.xml" bestPreferencesOfCriteriaFile <- "bestPreferencesOfCriteria.xml" worstPreferencesOfCriteriaFile <- "worstPreferencesOfCriteria.xml" criteriaWeightsFile <- "criteriaWeights.xml" messagesFile <- "messages.xml" # the Java xmcda object for the output messages xmcdaMessages<-.jnew("org/xmcda/XMCDA") xmcdaData <- .jnew("org/xmcda/XMCDA") loadXMCDAv3(xmcdaData, inDirectory, criteriaFile, mandatory = TRUE, xmcdaMessages, "criteria") loadXMCDAv3(xmcdaData, inDirectory, bestPreferencesOfCriteriaFile, mandatory = TRUE, xmcdaMessages, "criteriaValues") loadXMCDAv3(xmcdaData, inDirectory, worstPreferencesOfCriteriaFile, mandatory = TRUE, xmcdaMessages, "criteriaValues") # if we have problem with the inputs, it is time to stop if (xmcdaMessages$programExecutionResultsList$size() > 0){ if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop(paste("An error has occured while loading the input files. For further details, see ", messagesFile, sep="")) } } # let's check the inputs and convert them into our own structures inputs<-checkAndExtractInputs(xmcdaData, programExecutionResult) if (xmcdaMessages$programExecutionResultsList$size()>0){ if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop(paste("An error has occured while checking and extracting the inputs. For further details, see ", messagesFile, sep="")) } } # here we know that everything was loaded as expected # now let's call the calculation method results <- handleException( function() return( BWM(inputs) ), xmcdaMessages, humanMessage = "The calculation could not be performed, reason: " ) if (is.null(results)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Could not calculate BWM.") } # fine, now let's put the results into XMCDA structures xResults = convert(results, xmcdaMessages) if (is.null(xResults)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Could not convert BWM results into XMCDA") } # and last, write them onto the disk for (i in 1:length(xResults)){ outputFilename = paste(outDirectory, paste(names(xResults)[i],".xml",sep=""), sep="/") tmp <- handleException( function() return( writeXMCDA(xResults[[i]], outputFilename, xmcda_v3_tag(names(xResults)[i])) ), xmcdaMessages, humanMessage = paste("Error while writing ", outputFilename,", reason :") ) if (is.null(tmp)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Error while writing ",outputFilename,sep="") } } # then the messages file # TODO faire autrement qu'en ajoutant une info vide pour faire un <status>ok</status> tmp <- handleException( function() return( putProgramExecutionResult(xmcdaMessages, infos="") ), xmcdaMessages ) if (is.null(tmp)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Could not add methodExecutionResult to tree.") } tmp <- handleException( function() return( writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) ), xmcdaMessages ) if (is.null(tmp)){ writeXMCDA(xmcdaMessages, paste(outDirectory,messagesFile, sep="/")) stop("Error while writing messages file.") }
library(dplyr) library(broom) fishdata <- read.csv("data/fisherman_mercury_modified.csv") %>% mutate(fisherman = factor(fisherman)) # Here are our two models fit_univariate <- lm(total_mercury ~ fisherman, data = fishdata) fit_multiple <- lm(total_mercury ~ fisherman + weight + fishmlwk, data = fishdata) # Tidy 'em up fit_univariate_tidy <- ____ fit_multiple_tidy <- ____ # Bind them both_tidy <- bind_rows("univariate" = ___, "multiple" = ___, .id = "model") both_tidy # Same with glance (we can try doing this in one line) both_glance <- bind_rows( "univariate" = glance(___), "multiple" = glance(___), .id = "model" ) both_glance # Show just fisherman's covariate information both_tidy %>% ___(term == ___)
/exercises/exc_05_11.R
permissive
JoeSwinehart/RBootcamp
R
false
false
783
r
library(dplyr) library(broom) fishdata <- read.csv("data/fisherman_mercury_modified.csv") %>% mutate(fisherman = factor(fisherman)) # Here are our two models fit_univariate <- lm(total_mercury ~ fisherman, data = fishdata) fit_multiple <- lm(total_mercury ~ fisherman + weight + fishmlwk, data = fishdata) # Tidy 'em up fit_univariate_tidy <- ____ fit_multiple_tidy <- ____ # Bind them both_tidy <- bind_rows("univariate" = ___, "multiple" = ___, .id = "model") both_tidy # Same with glance (we can try doing this in one line) both_glance <- bind_rows( "univariate" = glance(___), "multiple" = glance(___), .id = "model" ) both_glance # Show just fisherman's covariate information both_tidy %>% ___(term == ___)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/topic_modelling.R \name{loadFiles} \alias{loadFiles} \title{Load Corpus Files} \usage{ loadFiles(parsed.corpus.folder.path, corpus_setup = "/**/*.reply.title_body.txt") } \arguments{ \item{raw.corpus.folder.path}{The path to the corpus folder (e.g. 2012.parsed) Returns a folder used by \code{\link{rawToLDA}}.} } \description{ Used to load the files into memory. Assume the format of the new crawler, where each year of mailing list is inside a folder, and months inside sub-folders. See \code{\link{rawToLDA}} to see it's usage. } \details{ TODO: Parameterize the file extension (currently assumes reply.body.txt) }
/man/loadFiles.Rd
no_license
sailuh/topicflowr
R
false
true
699
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/topic_modelling.R \name{loadFiles} \alias{loadFiles} \title{Load Corpus Files} \usage{ loadFiles(parsed.corpus.folder.path, corpus_setup = "/**/*.reply.title_body.txt") } \arguments{ \item{raw.corpus.folder.path}{The path to the corpus folder (e.g. 2012.parsed) Returns a folder used by \code{\link{rawToLDA}}.} } \description{ Used to load the files into memory. Assume the format of the new crawler, where each year of mailing list is inside a folder, and months inside sub-folders. See \code{\link{rawToLDA}} to see it's usage. } \details{ TODO: Parameterize the file extension (currently assumes reply.body.txt) }
library(ggplot2) # power <- function(lamda) { # N=10000 # power <- pnorm(5.2+lamda*sqrt(N))+1-pnorm(-5.2+lamda*sqrt(N)) # return(power) # } power_1k <- function(lamda) { alphaStar = .0000001 N=1000 power <- pnorm(qnorm(alphaStar/2)+lamda*sqrt(N))+1-pnorm(-qnorm(alphaStar/2)+lamda*sqrt(N)) return(power) } powerSquared_1k <- function(lamda){ power_1k(lamda)*power_1k(lamda) } power_10k <- function(lamda) { alphaStar = .0000001 N=10000 power <- pnorm(qnorm(alphaStar/2)+lamda*sqrt(N))+1-pnorm(-qnorm(alphaStar/2)+lamda*sqrt(N)) return(power) } powerSquared_10k <- function(lamda){ power_10k(lamda)*power_10k(lamda) } power_100k <- function(lamda) { alphaStar = .0000001 N=100000 power <- pnorm(qnorm(alphaStar/2)+lamda*sqrt(N))+1-pnorm(-qnorm(alphaStar/2)+lamda*sqrt(N)) return(power) } powerSquared_100k <- function(lamda){ power_100k(lamda)*power_100k(lamda) } plot<-ggplot(data.frame(x=c(0,0.11)), aes(x=x))+xlab("Lamda")+ylab("Probability")+ stat_function(fun=powerSquared_1k, geom="line", aes(colour="Replication, N=1k"))+ stat_function(fun=power_1k, geom="line", aes(colour="Power, N=1k"))+ stat_function(fun=powerSquared_10k, geom="line", aes(colour="Replication, N=10k"))+ stat_function(fun=power_10k, geom="line", aes(colour="Power, N=10k"))+ stat_function(fun=powerSquared_100k, geom="line", aes(colour="Replication, N=100k"))+ stat_function(fun=power_100k, geom="line", aes(colour="Power, N=100k"))+ ggtitle(" Power and the probability of replication")+ xlim(0,0.3) ggsave(filename = "Power&BothReplication_ProbabilityCurve.jpg")
/probability/PowerVSLamda.R
no_license
jzhou1011/replication
R
false
false
1,630
r
library(ggplot2) # power <- function(lamda) { # N=10000 # power <- pnorm(5.2+lamda*sqrt(N))+1-pnorm(-5.2+lamda*sqrt(N)) # return(power) # } power_1k <- function(lamda) { alphaStar = .0000001 N=1000 power <- pnorm(qnorm(alphaStar/2)+lamda*sqrt(N))+1-pnorm(-qnorm(alphaStar/2)+lamda*sqrt(N)) return(power) } powerSquared_1k <- function(lamda){ power_1k(lamda)*power_1k(lamda) } power_10k <- function(lamda) { alphaStar = .0000001 N=10000 power <- pnorm(qnorm(alphaStar/2)+lamda*sqrt(N))+1-pnorm(-qnorm(alphaStar/2)+lamda*sqrt(N)) return(power) } powerSquared_10k <- function(lamda){ power_10k(lamda)*power_10k(lamda) } power_100k <- function(lamda) { alphaStar = .0000001 N=100000 power <- pnorm(qnorm(alphaStar/2)+lamda*sqrt(N))+1-pnorm(-qnorm(alphaStar/2)+lamda*sqrt(N)) return(power) } powerSquared_100k <- function(lamda){ power_100k(lamda)*power_100k(lamda) } plot<-ggplot(data.frame(x=c(0,0.11)), aes(x=x))+xlab("Lamda")+ylab("Probability")+ stat_function(fun=powerSquared_1k, geom="line", aes(colour="Replication, N=1k"))+ stat_function(fun=power_1k, geom="line", aes(colour="Power, N=1k"))+ stat_function(fun=powerSquared_10k, geom="line", aes(colour="Replication, N=10k"))+ stat_function(fun=power_10k, geom="line", aes(colour="Power, N=10k"))+ stat_function(fun=powerSquared_100k, geom="line", aes(colour="Replication, N=100k"))+ stat_function(fun=power_100k, geom="line", aes(colour="Power, N=100k"))+ ggtitle(" Power and the probability of replication")+ xlim(0,0.3) ggsave(filename = "Power&BothReplication_ProbabilityCurve.jpg")
coding_summary <- function(user_doc_id, allocation_type="all", restrict_double_coded=TRUE){ #' Summary of coding of document allocation. #' #' Returns tibble with number of items of each variable value combination types coded in the document by the user. #' #' @param user_doc_id The user_doc_id(s) to summarize. #' @param allocation_type The allocation type ("training", "testing","coding", "all"). #' @param restrict_double_coded Only summarize articles which have been coded more than once (in FALSE summarize all articles). #' @return Returns tibble with number of items of predefined types coded in the document by the user. #' @export user_docs<-durhamevp::get_allocation(user_doc_id = user_doc_id, allocation_type=allocation_type) user_docs<-dplyr::filter(user_docs, status=="COMPLETED") if(restrict_double_coded){ doublecoded_docs<-user_docs %>% dplyr::group_by(document_id) %>% dplyr::tally() %>% dplyr::filter(n>1) user_docs<-dplyr::filter(user_docs, document_id %in% doublecoded_docs$document_id) } event_report <- durhamevp::get_event_report(user_doc_id=dplyr::pull(user_docs, "id")) #model_event_report <- durhamevp::get_event_report(model_event_report_id) tags<-durhamevp::get_tag(event_report_id = dplyr::pull(event_report, "id")) attributes<-durhamevp::get_attribute(tag_id = dplyr::pull(tags, "id")) event_report<-dplyr::left_join(event_report, user_docs, by=c("user_doc_id"="id"), suffix=c("event_report", "user_doc")) tags<-dplyr::left_join(tags, event_report, by=c("event_report_id"="id"), suffix=c(".tags", "event_report")) attributes<-dplyr::left_join(attributes, tags, by=c("tag_id"="id"), suffix=c("attributes", "tags")) user_doc_coding_counts<-user_docs %>% mutate(level="user_doc") %>% tidyr::gather(variable, value, article_type, geo_relevant, time_relevant, electoral_nature, violence_nature, electoralviolence_nature, legibility, recommend_qualitative) %>% dplyr::rename(user_doc_id=id) %>% dplyr::group_by(user_doc_id, user_id, document_id, level, variable, value) %>% dplyr::tally() event_report_coding_counts<-event_report %>% mutate(level="event_report") %>% tidyr::gather(variable, value, event_type, environment, event_start, event_end) %>% dplyr::group_by(user_doc_id, user_id, document_id, level, variable, value) %>% dplyr::tally() tag_coding_counts <- tags %>% mutate(level="tag") %>% dplyr::rename(value=tag_value) %>% unite(variable, tag_table, tag_variable) %>% group_by(user_doc_id, user_id, document_id, level, variable, value) %>% tally() attribute_coding_counts <- attributes %>% mutate(level="attribute") %>% dplyr::rename(value=attribute_value, variable=attribute) %>% group_by(user_doc_id, user_id, document_id, level, variable, value) %>% tally() coding_summary <- bind_rows(user_doc_coding_counts, event_report_coding_counts, tag_coding_counts, attribute_coding_counts) coding_summary } compare_summaries<- function(coding_summary){ #'Compares summaries of coding by user. #' #'@param coding_summary A coding summary (usually generated by the \code{coding_summary} function.) #'@export #' coding_allocs<-coding_summary %>% ungroup() %>% group_by(user_doc_id, document_id, user_id) %>% summarize() # There are some duplicated coding allocations (9 on 20/9/2018) - need to find out why! # For now remove them coding_allocs <- coding_allocs[!duplicated(coding_allocs[,c("document_id", "user_id")]),] coder_pairs<- dplyr::full_join(coding_allocs, coding_allocs, by=c("document_id"), suffix=c(".case1", ".case2")) # create unique pairs of respondents coder_pairs <- coder_pairs %>% dplyr::group_by(document_id) %>% tidyr::expand(user_id.case1, user_id.case2) %>% dplyr::filter(user_id.case1 < user_id.case2) %>% tibble::rowid_to_column("pair_no") long_coder_pairs <- coder_pairs %>% tidyr::gather(which_user_id, user_id, user_id.case1, user_id.case2) %>% dplyr::left_join(coding_allocs, by = c("document_id", "user_id")) %>% dplyr::arrange(pair_no) long_coding<- dplyr::right_join(coding_summary, long_coder_pairs, by = c("user_doc_id", "user_id", "document_id")) compare_res<-long_coding %>% ungroup() %>% dplyr::select(document_id, pair_no, which_user_id, n, level, variable, value) %>% spread(which_user_id, n, fill=0) %>% mutate(abs_diff=abs(user_id.case1-user_id.case2)) %>% dplyr::select(-user_id.case1, -user_id.case2) %>% left_join(coder_pairs, by=c("pair_no", "document_id"), suffix=c(".val", "")) %>% gather(which_case, user_id, user_id.case1, user_id.case2) compare_res }
/R/report_scoring_grouping_method.R
no_license
gidonc/durhamevp
R
false
false
4,911
r
coding_summary <- function(user_doc_id, allocation_type="all", restrict_double_coded=TRUE){ #' Summary of coding of document allocation. #' #' Returns tibble with number of items of each variable value combination types coded in the document by the user. #' #' @param user_doc_id The user_doc_id(s) to summarize. #' @param allocation_type The allocation type ("training", "testing","coding", "all"). #' @param restrict_double_coded Only summarize articles which have been coded more than once (in FALSE summarize all articles). #' @return Returns tibble with number of items of predefined types coded in the document by the user. #' @export user_docs<-durhamevp::get_allocation(user_doc_id = user_doc_id, allocation_type=allocation_type) user_docs<-dplyr::filter(user_docs, status=="COMPLETED") if(restrict_double_coded){ doublecoded_docs<-user_docs %>% dplyr::group_by(document_id) %>% dplyr::tally() %>% dplyr::filter(n>1) user_docs<-dplyr::filter(user_docs, document_id %in% doublecoded_docs$document_id) } event_report <- durhamevp::get_event_report(user_doc_id=dplyr::pull(user_docs, "id")) #model_event_report <- durhamevp::get_event_report(model_event_report_id) tags<-durhamevp::get_tag(event_report_id = dplyr::pull(event_report, "id")) attributes<-durhamevp::get_attribute(tag_id = dplyr::pull(tags, "id")) event_report<-dplyr::left_join(event_report, user_docs, by=c("user_doc_id"="id"), suffix=c("event_report", "user_doc")) tags<-dplyr::left_join(tags, event_report, by=c("event_report_id"="id"), suffix=c(".tags", "event_report")) attributes<-dplyr::left_join(attributes, tags, by=c("tag_id"="id"), suffix=c("attributes", "tags")) user_doc_coding_counts<-user_docs %>% mutate(level="user_doc") %>% tidyr::gather(variable, value, article_type, geo_relevant, time_relevant, electoral_nature, violence_nature, electoralviolence_nature, legibility, recommend_qualitative) %>% dplyr::rename(user_doc_id=id) %>% dplyr::group_by(user_doc_id, user_id, document_id, level, variable, value) %>% dplyr::tally() event_report_coding_counts<-event_report %>% mutate(level="event_report") %>% tidyr::gather(variable, value, event_type, environment, event_start, event_end) %>% dplyr::group_by(user_doc_id, user_id, document_id, level, variable, value) %>% dplyr::tally() tag_coding_counts <- tags %>% mutate(level="tag") %>% dplyr::rename(value=tag_value) %>% unite(variable, tag_table, tag_variable) %>% group_by(user_doc_id, user_id, document_id, level, variable, value) %>% tally() attribute_coding_counts <- attributes %>% mutate(level="attribute") %>% dplyr::rename(value=attribute_value, variable=attribute) %>% group_by(user_doc_id, user_id, document_id, level, variable, value) %>% tally() coding_summary <- bind_rows(user_doc_coding_counts, event_report_coding_counts, tag_coding_counts, attribute_coding_counts) coding_summary } compare_summaries<- function(coding_summary){ #'Compares summaries of coding by user. #' #'@param coding_summary A coding summary (usually generated by the \code{coding_summary} function.) #'@export #' coding_allocs<-coding_summary %>% ungroup() %>% group_by(user_doc_id, document_id, user_id) %>% summarize() # There are some duplicated coding allocations (9 on 20/9/2018) - need to find out why! # For now remove them coding_allocs <- coding_allocs[!duplicated(coding_allocs[,c("document_id", "user_id")]),] coder_pairs<- dplyr::full_join(coding_allocs, coding_allocs, by=c("document_id"), suffix=c(".case1", ".case2")) # create unique pairs of respondents coder_pairs <- coder_pairs %>% dplyr::group_by(document_id) %>% tidyr::expand(user_id.case1, user_id.case2) %>% dplyr::filter(user_id.case1 < user_id.case2) %>% tibble::rowid_to_column("pair_no") long_coder_pairs <- coder_pairs %>% tidyr::gather(which_user_id, user_id, user_id.case1, user_id.case2) %>% dplyr::left_join(coding_allocs, by = c("document_id", "user_id")) %>% dplyr::arrange(pair_no) long_coding<- dplyr::right_join(coding_summary, long_coder_pairs, by = c("user_doc_id", "user_id", "document_id")) compare_res<-long_coding %>% ungroup() %>% dplyr::select(document_id, pair_no, which_user_id, n, level, variable, value) %>% spread(which_user_id, n, fill=0) %>% mutate(abs_diff=abs(user_id.case1-user_id.case2)) %>% dplyr::select(-user_id.case1, -user_id.case2) %>% left_join(coder_pairs, by=c("pair_no", "document_id"), suffix=c(".val", "")) %>% gather(which_case, user_id, user_id.case1, user_id.case2) compare_res }
#' Reconstruct a network from a random matrix, not taking time series into account. #' @export # inputThreshold <- threshold(inputArray, min = -Inf, max = Inf) #' @param inputArray inputArray #' @param inputThreshold inputThreshold #' @param ... ... #' #' @export construct_random <- function(inputArray, inputThreshold, ...) { UseMethod("construct_random") } #' @export construct_random.igraph <- function(inputArray, inputThreshold, ...) { # rowsinMatrix <- nrow(inputArray) # colsinMatrix <- ncol(inputArray) # randomMatrices <-randn(rowsinMatrix, rowsinMatrix) # A<- threshold(randomMatrices,inputThreshold,...) } # G<-graph_from_literal(A, simplify=FALSE)#allow for nodes
/R/random.R
no_license
zhaoyizhuang/constructnet
R
false
false
689
r
#' Reconstruct a network from a random matrix, not taking time series into account. #' @export # inputThreshold <- threshold(inputArray, min = -Inf, max = Inf) #' @param inputArray inputArray #' @param inputThreshold inputThreshold #' @param ... ... #' #' @export construct_random <- function(inputArray, inputThreshold, ...) { UseMethod("construct_random") } #' @export construct_random.igraph <- function(inputArray, inputThreshold, ...) { # rowsinMatrix <- nrow(inputArray) # colsinMatrix <- ncol(inputArray) # randomMatrices <-randn(rowsinMatrix, rowsinMatrix) # A<- threshold(randomMatrices,inputThreshold,...) } # G<-graph_from_literal(A, simplify=FALSE)#allow for nodes
library(animation) library(raster) brks <- round(seq(floor(cellStats(bio.stack[[1]], stat = "min", na.rm = TRUE)), ceiling(cellStats(bio.stack[[8]], stat = "max", na.rm = TRUE)), length.out = 10), 0) nb <- length(brks)-1 colors <- rev(heat.colors(nb)) years <- as.character(seq(2000, 2070, by = 10)) bio.stack %>% walk(~plot(.x, col = colors, breaks = brks)) library(animation) saveGIF(bio.stack %>% walk(~plot(.x, col = colors, breaks = brks)), col = colors, breaks = brks, main = paste("Mean temp", years[i]))}, movie.name = "Mean_temp.gif", img.name = "Rplot", convert = "convert", clean = TRUE)
/Clase6/loops.R
no_license
derek-corcoran-barrios/derek-corcoran-barrios.github.io
R
false
false
603
r
library(animation) library(raster) brks <- round(seq(floor(cellStats(bio.stack[[1]], stat = "min", na.rm = TRUE)), ceiling(cellStats(bio.stack[[8]], stat = "max", na.rm = TRUE)), length.out = 10), 0) nb <- length(brks)-1 colors <- rev(heat.colors(nb)) years <- as.character(seq(2000, 2070, by = 10)) bio.stack %>% walk(~plot(.x, col = colors, breaks = brks)) library(animation) saveGIF(bio.stack %>% walk(~plot(.x, col = colors, breaks = brks)), col = colors, breaks = brks, main = paste("Mean temp", years[i]))}, movie.name = "Mean_temp.gif", img.name = "Rplot", convert = "convert", clean = TRUE)
library(tidyverse) library(dslabs) #
/code.R
no_license
EvaCaravaca/homework_0
R
false
false
40
r
library(tidyverse) library(dslabs) #
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compute_editing.R \name{compute_editing} \alias{compute_editing} \title{Computing number of (sub)question edits} \usage{ compute_editing( actions, respId = any_of(c("id", "token", "respid")), screenId = "screen", entryId = c(), returnFormat = c("long", "wide") ) } \arguments{ \item{actions}{A data frame containing data regarding \emph{actions} - typically element \emph{actions} of a list returned by \code{\link{separate_logdata_types}}.} \item{respId}{<\link[dplyr:dplyr_tidy_select]{tidy-select}> Variable(s) identifying respondent.} \item{screenId}{<\link[dplyr:dplyr_tidy_select]{tidy-select}> Variable(s) identifying survey screen.} \item{entryId}{<\link[dplyr:dplyr_tidy_select]{tidy-select}> Variable(s) identifying survey screen \emph{entry} (compare \link{separate_returns}). Set to \code{c()} to indicate that results should be returned for the whole survey screens and not separately for each respondent's entry on a given survey screen.} \item{returnFormat}{String indicating whether results should be returned in the \emph{long} (row is respondent-(sub)question) or in the \emph{wide} (row is a respondent, there is a separate column for each (sub)question) format. Can be abbreviated.} } \value{ If \code{returnFormat} is \emph{long} a data frame with columns: \describe{ \item{respId}{Column(s) defined by \code{respId}.} \item{screenId}{Column(s) defined by \code{screenId}.} \item{entryId}{Column(s) defined by \code{entryId} (if any).} \item{questionCode}{Code of a question.} \item{subquestionCode}{Code of a subquestion.} \item{edits}{Number of edits.} } If \code{returnFormat} is \emph{wide}, returned data frame will not contain columns identifying screen, \emph{entry} (if applies), question and subquestion, but values of these variables will be appended to column names (separated by "_"), with each column reporting the number of edits of a given sub(question) for a given screen (or screen-\emph{entry}). } \description{ Function computes number of edits (i.e. marking or changing an answer) for each (sub)question made by each respondent. Be aware that \strong{returned number of edits includes also first marking of an answer}, so correcting one's response is indicated by number of edits greater than 1. } \seealso{ \link{separate_logdata_types} }
/man/compute_editing.Rd
no_license
tzoltak/logLime
R
false
true
2,375
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compute_editing.R \name{compute_editing} \alias{compute_editing} \title{Computing number of (sub)question edits} \usage{ compute_editing( actions, respId = any_of(c("id", "token", "respid")), screenId = "screen", entryId = c(), returnFormat = c("long", "wide") ) } \arguments{ \item{actions}{A data frame containing data regarding \emph{actions} - typically element \emph{actions} of a list returned by \code{\link{separate_logdata_types}}.} \item{respId}{<\link[dplyr:dplyr_tidy_select]{tidy-select}> Variable(s) identifying respondent.} \item{screenId}{<\link[dplyr:dplyr_tidy_select]{tidy-select}> Variable(s) identifying survey screen.} \item{entryId}{<\link[dplyr:dplyr_tidy_select]{tidy-select}> Variable(s) identifying survey screen \emph{entry} (compare \link{separate_returns}). Set to \code{c()} to indicate that results should be returned for the whole survey screens and not separately for each respondent's entry on a given survey screen.} \item{returnFormat}{String indicating whether results should be returned in the \emph{long} (row is respondent-(sub)question) or in the \emph{wide} (row is a respondent, there is a separate column for each (sub)question) format. Can be abbreviated.} } \value{ If \code{returnFormat} is \emph{long} a data frame with columns: \describe{ \item{respId}{Column(s) defined by \code{respId}.} \item{screenId}{Column(s) defined by \code{screenId}.} \item{entryId}{Column(s) defined by \code{entryId} (if any).} \item{questionCode}{Code of a question.} \item{subquestionCode}{Code of a subquestion.} \item{edits}{Number of edits.} } If \code{returnFormat} is \emph{wide}, returned data frame will not contain columns identifying screen, \emph{entry} (if applies), question and subquestion, but values of these variables will be appended to column names (separated by "_"), with each column reporting the number of edits of a given sub(question) for a given screen (or screen-\emph{entry}). } \description{ Function computes number of edits (i.e. marking or changing an answer) for each (sub)question made by each respondent. Be aware that \strong{returned number of edits includes also first marking of an answer}, so correcting one's response is indicated by number of edits greater than 1. } \seealso{ \link{separate_logdata_types} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/accuracy.R \name{accuracy} \alias{accuracy} \title{Model Accuracy} \usage{ accuracy(x, known) } \arguments{ \item{x}{The model classification \code{\link[base]{list}}/\code{\link[base]{vector}} (typically the results of \code{classify}).} \item{known}{The known expert coded \code{\link[base]{list}}/\code{\link[base]{vector}} of outcomes.} } \value{ Returns a list of five elements: \item{exact.in}{A numeric vector between 0-1 (0 no match; 1 perfect match) comparing \code{x} to \code{known} for exact matching.} \item{any.in}{A numeric vector between 0-1 (0 no match; 1 perfect match) comparing \code{x} to \code{known} for non-location specific matching (\code{\%in\%} is used). This ignores the differences in length between \code{x} and \code{known}.} \item{logical.in}{A logical version of \code{exact} with \code{TRUE} being equal to 1 and all else being \code{FALSE}. This can be used to locate perfect and/or non matches.} \item{exact}{The proportion of the vector of tags in \code{x} matching \code{known} exactly.} \item{ordered}{The proportion of the elements of tags in \code{x} matching \code{known} exactly (order matters).} \item{adjusted}{An adjusted mean score of \code{ordered} and \code{unordered}.} \item{unordered}{The proportion of the elements of tags in \code{x} matching \code{known} exactly regardless of order.} } \description{ Check a model's tagging/categorizing accuracy against known expert coded outcomes. } \examples{ known <- list(1:3, 3, NA, 4:5, 2:4, 5, integer(0)) tagged <- list(1:3, 3, 4, 5:4, c(2, 4:3), 5, integer(0)) accuracy(tagged, known) ## Examples library(dplyr) data(presidential_debates_2012) discoure_markers <- list( response_cries = c("\\\\boh", "\\\\bah", "\\\\baha", "\\\\bouch", "yuk"), back_channels = c("uh[- ]huh", "uhuh", "yeah"), summons = "hey", justification = "because" ) ## Only Single Tag Allowed Per Text Element mod1 <- presidential_debates_2012 \%>\% with(., term_count(dialogue, TRUE, discoure_markers)) \%>\% classify() fake_known <- mod1 set.seed(1) fake_known[sample(1:length(fake_known), 300)] <- "random noise" accuracy(mod1, fake_known) ## Multiple Tags Allowed mod2 <- presidential_debates_2012 \%>\% with(., term_count(dialogue, TRUE, discoure_markers)) \%>\% classify(n = 2) fake_known2 <- mod2 set.seed(30) fake_known2[sample(1:length(fake_known2), 500)] <- c("random noise", "back_channels") accuracy(mod2, fake_known2) } \keyword{accuracy} \keyword{fit} \keyword{model}
/man/accuracy.Rd
no_license
jimhester/termco
R
false
true
2,576
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/accuracy.R \name{accuracy} \alias{accuracy} \title{Model Accuracy} \usage{ accuracy(x, known) } \arguments{ \item{x}{The model classification \code{\link[base]{list}}/\code{\link[base]{vector}} (typically the results of \code{classify}).} \item{known}{The known expert coded \code{\link[base]{list}}/\code{\link[base]{vector}} of outcomes.} } \value{ Returns a list of five elements: \item{exact.in}{A numeric vector between 0-1 (0 no match; 1 perfect match) comparing \code{x} to \code{known} for exact matching.} \item{any.in}{A numeric vector between 0-1 (0 no match; 1 perfect match) comparing \code{x} to \code{known} for non-location specific matching (\code{\%in\%} is used). This ignores the differences in length between \code{x} and \code{known}.} \item{logical.in}{A logical version of \code{exact} with \code{TRUE} being equal to 1 and all else being \code{FALSE}. This can be used to locate perfect and/or non matches.} \item{exact}{The proportion of the vector of tags in \code{x} matching \code{known} exactly.} \item{ordered}{The proportion of the elements of tags in \code{x} matching \code{known} exactly (order matters).} \item{adjusted}{An adjusted mean score of \code{ordered} and \code{unordered}.} \item{unordered}{The proportion of the elements of tags in \code{x} matching \code{known} exactly regardless of order.} } \description{ Check a model's tagging/categorizing accuracy against known expert coded outcomes. } \examples{ known <- list(1:3, 3, NA, 4:5, 2:4, 5, integer(0)) tagged <- list(1:3, 3, 4, 5:4, c(2, 4:3), 5, integer(0)) accuracy(tagged, known) ## Examples library(dplyr) data(presidential_debates_2012) discoure_markers <- list( response_cries = c("\\\\boh", "\\\\bah", "\\\\baha", "\\\\bouch", "yuk"), back_channels = c("uh[- ]huh", "uhuh", "yeah"), summons = "hey", justification = "because" ) ## Only Single Tag Allowed Per Text Element mod1 <- presidential_debates_2012 \%>\% with(., term_count(dialogue, TRUE, discoure_markers)) \%>\% classify() fake_known <- mod1 set.seed(1) fake_known[sample(1:length(fake_known), 300)] <- "random noise" accuracy(mod1, fake_known) ## Multiple Tags Allowed mod2 <- presidential_debates_2012 \%>\% with(., term_count(dialogue, TRUE, discoure_markers)) \%>\% classify(n = 2) fake_known2 <- mod2 set.seed(30) fake_known2[sample(1:length(fake_known2), 500)] <- c("random noise", "back_channels") accuracy(mod2, fake_known2) } \keyword{accuracy} \keyword{fit} \keyword{model}
source(file = "base_funs.R") ############################### get the initial train data and feature engineer###################### ctrData <- read.csv.ffdf(file = "./train/train.csv", header=TRUE, colClasses = c("factor", rep("integer", 4), rep("factor", 9), rep("integer", 10))) iniTrain <- ctrData[,] iniTrain$id <- 1:nrow(iniTrain) save(list = "iniTrain", file = "iniTrain.RData") ############################### get the initial test data and feature engineer############## iniTest <- read.csv(file = "./test/test.csv", colClasses = c("id" = "character")) save(list = "iniTest", file = "iniTest.RData") ##############################feature engineer######################################### #feature engineering for initial test data tidyTestData <- featureEngineering(data = iniTest) save(list = "tidyTestData", file = "tidyTestData.RData") #feature engineering for initial train data tidyTrainData <- featureEngineering(data = iniTrain) save(list = "tidyTrainData", file = "tidyTrainData.RData") ##########create one way count features ######site counts iniTrain <- iniTrain %>% group_by(site_id) %>% mutate(site_id_cnt = n()) iniTrain <- iniTrain %>% group_by(site_domain) %>% mutate(site_domain_cnt = n()) iniTrain <- iniTrain %>% group_by(site_category) %>% mutate(site_category_cnt = n()) iniTrain <- iniTrain %>% group_by(site_id,site_domain) %>% mutate(site_id_domain = n()) iniTrain <- iniTrain %>% group_by(site_id,site_category) %>% mutate(site_id_category = n()) iniTrain <- iniTrain %>% group_by(site_domain,site_category) %>% mutate(site_domain_category = n()) iniTrain <- iniTrain %>% group_by(site_id,site_domain,site_category) %>% mutate(site_id_domain_category = n()) temp <- iniTrain %>% group_by(site_id) %>% summarise(site_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_domain) %>% summarise(site_domain_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_category) %>% summarise(site_category_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_id,site_domain) %>% summarise(site_id_domain = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_id,site_category) %>% summarise(site_id_category = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_domain,site_category) %>% summarise(site_domain_category = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_id,site_domain,site_category) %>% summarise(site_id_domain_category = n()) iniTest <- left_join(iniTest, temp) ######app counts iniTrain <- iniTrain %>% group_by(app_id) %>% mutate(app_id_cnt = n()) iniTrain <- iniTrain %>% group_by(app_domain) %>% mutate(app_domain_cnt = n()) iniTrain <- iniTrain %>% group_by(app_category) %>% mutate(app_category_cnt = n()) iniTrain <- iniTrain %>% group_by(app_id,app_domain) %>% mutate(app_id_domain_cnt = n()) iniTrain <- iniTrain %>% group_by(app_id,app_category) %>% mutate(app_id_category_cnt = n()) iniTrain <- iniTrain %>% group_by(app_domain,app_category) %>% mutate(app_domain_category_cnt = n()) iniTrain <- iniTrain %>% group_by(app_id,app_domain,app_category) %>% mutate(app_id_domain_category_cnt = n()) temp <- iniTrain %>% group_by(app_id) %>% summarise(app_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_domain) %>% summarise(app_domain_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_category) %>% summarise(app_category_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_id,app_domain) %>% summarise(app_id_domain_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_id,app_category) %>% summarise(app_id_category_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_domain,app_category) %>% summarise(app_domain_category_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_id,app_domain,app_category) %>% summarise(app_id_domain_category_cnt = n()) iniTest <- left_join(iniTest, temp) ######device counts iniTrain <- iniTrain %>% group_by(device_id) %>% mutate(device_id_cnt = n()) iniTrain <- iniTrain %>% group_by(device_ip) %>% mutate(device_ip_cnt = n()) iniTrain <- iniTrain %>% group_by(device_model) %>% mutate(device_model_cnt = n()) iniTrain <- iniTrain %>% group_by(device_type, device_model) %>% mutate(device_type_model_cnt = n()) iniTrain <- iniTrain %>% group_by(device_conn_type, device_model) %>% mutate(device_conn_model_cnt = n()) temp <- iniTrain %>% group_by(device_id) %>% summarise(device_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(device_ip) %>% summarise(device_ip_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(device_model) %>% summarise(device_model_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(device_type, device_model) %>% summarise(device_type_model_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(device_conn_type, device_model) %>% summarise(device_conn_model_cnt = n()) iniTest <- left_join(iniTest, temp) ######Cs counts iniTrain <- iniTrain %>% group_by(C14) %>% mutate(C14_cnt = n()) iniTrain <- iniTrain %>% group_by(C17) %>% mutate(C17_cnt = n()) iniTrain <- iniTrain %>% group_by(C19) %>% mutate(C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C20) %>% mutate(C20_cnt = n()) iniTrain <- iniTrain %>% group_by(C21) %>% mutate(C21_cnt = n()) temp <- iniTrain %>% group_by(C14) %>% summarise(C14_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C17) %>% summarise(C17_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C19) %>% summarise(C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C20) %>% summarise(C20_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C21) %>% summarise(C21_cnt = n()) iniTest <- left_join(iniTest, temp) ######banner_pos and other counts iniTrain <- iniTrain %>% group_by(banner_pos, site_id) %>% mutate(banner_pos_site_id_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, app_id) %>% mutate(banner_pos_app_id_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, device_id) %>% mutate(banner_pos_device_id_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, device_ip) %>% mutate(banner_pos_device_ip_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C14) %>% mutate(banner_pos_C14_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C21) %>% mutate(banner_pos_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C1) %>% mutate(banner_pos_C1_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C16) %>% mutate(banner_pos_C16_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C17) %>% mutate(banner_pos_C17_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C18) %>% mutate(banner_pos_C18_cnt = n()) temp <- iniTrain %>% group_by(banner_pos, site_id) %>% summarise(banner_pos_site_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, app_id) %>% summarise(banner_pos_app_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, device_id) %>% summarise(banner_pos_device_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, device_ip) %>% summarise(banner_pos_device_ip_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C14) %>% summarise(banner_pos_C14_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C21) %>% summarise(banner_pos_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C1) %>% summarise(banner_pos_C1_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C16) %>% summarise(banner_pos_C16_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C17) %>% summarise(banner_pos_C17_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C18) %>% summarise(banner_pos_C18_cnt = n()) iniTest <- left_join(iniTest, temp) ###### counts of two Cs iniTrain <- iniTrain %>% group_by(C1, C18) %>% mutate(C1_C18_cnt = n()) iniTrain <- iniTrain %>% group_by(C16, C18) %>% mutate(C16_C18_cnt = n()) iniTrain <- iniTrain %>% group_by(C16, C21) %>% mutate(C16_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C1, C19) %>% mutate(C1_C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C18, C19) %>% mutate(C18_C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C16, C19) %>% mutate(C16_C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C1, C16) %>% mutate(C1_C16_cnt = n()) iniTrain <- iniTrain %>% group_by(C19, C21) %>% mutate(C19_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C15, C19) %>% mutate(C15_C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C15, C21) %>% mutate(C15_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C20, C21) %>% mutate(C20_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C18, C20) %>% mutate(C18_C20_cnt = n()) iniTrain <- iniTrain %>% group_by(C15, C18) %>% mutate(C15_C18_cnt = n()) iniTrain <- iniTrain %>% group_by(C18, C21) %>% mutate(C18_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C1, C21) %>% mutate(C1_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C14, C21) %>% mutate(C14_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C15, C17) %>% mutate(C15_C17_cnt = n()) iniTrain <- iniTrain %>% group_by(C14, C16) %>% mutate(C14_C16_cnt = n()) temp <- iniTrain %>% group_by(C1, C18) %>% summarise(C1_C18_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C16, C18) %>% summarise(C16_C18_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C16, C21) %>% summarise(C16_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C1, C19) %>% summarise(C1_C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C18, C19) %>% summarise(C18_C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C16, C19) %>% summarise(C16_C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C1, C16) %>% summarise(C1_C16_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C19, C21) %>% summarise(C19_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C15, C19) %>% summarise(C15_C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C15, C21) %>% summarise(C15_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C20, C21) %>% summarise(C20_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C18, C20) %>% summarise(C18_C20_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C15, C18) %>% summarise(C15_C18_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C18, C21) %>% summarise(C18_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C1, C21) %>% summarise(C1_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C14, C21) %>% summarise(C14_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C15, C17) %>% summarise(C15_C17_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C14, C16) %>% summarise(C14_C16_cnt = n()) iniTest <- left_join(iniTest, temp) ####################################get smoothed likelihood features ############################### iniTest <- iniTest[, 1:23] noise = 0.3 pAve <- mean(iniTrain$click) adj <- 20 len <- nrow(iniTrain) iniTrain <- iniTrain %>% group_by(C1) %>% mutate(temp = (sum(click) - click), C1_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C1_exp <- iniTrain$C1_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C14) %>% mutate(temp = (sum(click) - click), C14_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C14_exp <- iniTrain$C14_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C15) %>% mutate(temp = (sum(click) - click), C15_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C15_exp <- iniTrain$C15_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C16) %>% mutate(temp = (sum(click) - click), C16_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C16_exp <- iniTrain$C16_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C17) %>% mutate(temp = (sum(click) - click), C17_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C17_exp <- iniTrain$C17_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C18) %>% mutate(temp = (sum(click) - click), C18_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C18_exp <- iniTrain$C18_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C19) %>% mutate(temp = (sum(click) - click), C19_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C19_exp <- iniTrain$C19_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C20) %>% mutate(temp = (sum(click) - click), C20_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C20_exp <- iniTrain$C20_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C21) %>% mutate(temp = (sum(click) - click), C21_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C21_exp <- iniTrain$C21_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(banner_pos) %>% mutate(temp = (sum(click) - click), banner_pos_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$banner_pos_exp <- iniTrain$banner_pos_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(site_id) %>% mutate(temp = (sum(click) - click), site_id_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$site_id_exp <- iniTrain$site_id_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(site_domain) %>% mutate(temp = (sum(click) - click), site_domain_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$site_domain_exp <- iniTrain$site_domain_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(site_category) %>% mutate(temp = (sum(click) - click), site_category_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$site_category_exp <- iniTrain$site_category_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(app_id) %>% mutate(temp = (sum(click) - click), app_id_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$app_id_exp <- iniTrain$app_id_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(app_domain) %>% mutate(temp = (sum(click) - click), app_domain_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$app_domain_exp <- iniTrain$app_domain_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(app_category) %>% mutate(temp = (sum(click) - click), app_category_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$app_category_exp <- iniTrain$app_category_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_id) %>% mutate(temp = (sum(click) - click), device_id_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_id_exp <- iniTrain$device_id_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_ip) %>% mutate(temp = (sum(click) - click), device_ip_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_ip_exp <- iniTrain$device_ip_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_model) %>% mutate(temp = (sum(click) - click), device_model_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_model_exp <- iniTrain$device_model_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_type) %>% mutate(temp = (sum(click) - click), device_type_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_type_exp <- iniTrain$device_type_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_conn_type) %>% mutate(temp = (sum(click) - click), device_conn_type_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_conn_type_exp <- iniTrain$device_conn_type_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(hours) %>% mutate(temp = (sum(click) - click), hours_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$hours_exp <- iniTrain$hours_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(banner_pos, C1) %>% mutate(temp = (sum(click) - click), banner_pos_C1_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$banner_pos_C1_exp <- iniTrain$banner_pos_C1_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(banner_pos, C14) %>% mutate(temp = (sum(click) - click), banner_pos_C14_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$banner_pos_C14_exp <- iniTrain$banner_pos_C14_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(banner_pos, C21) %>% mutate(temp = (sum(click) - click), banner_pos_C21_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$banner_pos_C21_exp <- iniTrain$banner_pos_C21_exp * (1 + (runif(len) - 0.5) * noise) #############get expect for test data###################### tmp <- iniTrain %>% group_by(C1) %>% summarise(C1_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C14) %>% summarise(C14_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C15) %>% summarise(C15_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C16) %>% summarise(C16_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C17) %>% summarise(C17_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C18) %>% summarise(C18_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C19) %>% summarise(C19_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C20) %>% summarise(C20_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C21) %>% summarise(C21_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(banner_pos) %>% summarise(banner_pos_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(site_id) %>% summarise(site_id_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(site_domain) %>% summarise(site_domain_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(site_category) %>% summarise(site_category_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(app_id) %>% summarise(app_id_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(app_domain) %>% summarise(app_domain_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(app_category) %>% summarise(app_category_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_id) %>% summarise(device_id_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_ip) %>% summarise(device_ip_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_model) %>% summarise(device_model_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_type) %>% summarise(device_type_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_conn_type) %>% summarise(device_conn_type_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(hours) %>% summarise(hours_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(banner_pos, C14) %>% summarise(banner_pos_C14_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(banner_pos, C21) %>% summarise(banner_pos_C21_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(banner_pos, C1) %>% summarise(banner_pos_C1_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) iniTest[is.na(iniTest)] <- pAve save(list = "iniTest", file = "iniTest.RData") ##################get factor/numerical data for train and test respectively############# ##for test data numericalVars <- names(iniTest)[c(1,3,4,14:75)] numerical_test <- iniTest[, numericalVars] save(list = "numerical_test", file = "numerical_test.RData") factorVars <- names(tidyTestData)[c(1,5:15,24:34)] factor_test <- tidyTestData[, factorVars] save(list = "factor_test", file = "factor_test.RData") ##for train data factorVars <- names(tidyTrainData)[c(1:2,6:16,25:35)] factor_train <- tidyTrainData[, factorVars] save(list = "factor_train", file = "factor_train.RData") numericalVars <- names(iniTrain)[c(1:2,4:5,15:76)] numerical_train <- iniTrain[, numericalVars] save(list = "numerical_train", file = "numerical_train.RData") #################################split train data into 10 folds###################### set.seed(87334) folds <- createFolds(y = factor_train$click, k = 10) for(i in 1:10){ factor_training <- factor_train[folds[[i]],] save(list = "factor_training", file = paste("factor_training", i, ".RData", sep = "")) } for(i in 1:10){ numerical_training <- numerical_train[folds[[i]],] save(list = "numerical_training", file = paste("numerical_training", i, ".RData", sep = "")) }
/Click-ThroughRate/CTR_featureEngineering.R
no_license
nianxue/KaggleProject
R
false
false
23,606
r
source(file = "base_funs.R") ############################### get the initial train data and feature engineer###################### ctrData <- read.csv.ffdf(file = "./train/train.csv", header=TRUE, colClasses = c("factor", rep("integer", 4), rep("factor", 9), rep("integer", 10))) iniTrain <- ctrData[,] iniTrain$id <- 1:nrow(iniTrain) save(list = "iniTrain", file = "iniTrain.RData") ############################### get the initial test data and feature engineer############## iniTest <- read.csv(file = "./test/test.csv", colClasses = c("id" = "character")) save(list = "iniTest", file = "iniTest.RData") ##############################feature engineer######################################### #feature engineering for initial test data tidyTestData <- featureEngineering(data = iniTest) save(list = "tidyTestData", file = "tidyTestData.RData") #feature engineering for initial train data tidyTrainData <- featureEngineering(data = iniTrain) save(list = "tidyTrainData", file = "tidyTrainData.RData") ##########create one way count features ######site counts iniTrain <- iniTrain %>% group_by(site_id) %>% mutate(site_id_cnt = n()) iniTrain <- iniTrain %>% group_by(site_domain) %>% mutate(site_domain_cnt = n()) iniTrain <- iniTrain %>% group_by(site_category) %>% mutate(site_category_cnt = n()) iniTrain <- iniTrain %>% group_by(site_id,site_domain) %>% mutate(site_id_domain = n()) iniTrain <- iniTrain %>% group_by(site_id,site_category) %>% mutate(site_id_category = n()) iniTrain <- iniTrain %>% group_by(site_domain,site_category) %>% mutate(site_domain_category = n()) iniTrain <- iniTrain %>% group_by(site_id,site_domain,site_category) %>% mutate(site_id_domain_category = n()) temp <- iniTrain %>% group_by(site_id) %>% summarise(site_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_domain) %>% summarise(site_domain_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_category) %>% summarise(site_category_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_id,site_domain) %>% summarise(site_id_domain = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_id,site_category) %>% summarise(site_id_category = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_domain,site_category) %>% summarise(site_domain_category = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(site_id,site_domain,site_category) %>% summarise(site_id_domain_category = n()) iniTest <- left_join(iniTest, temp) ######app counts iniTrain <- iniTrain %>% group_by(app_id) %>% mutate(app_id_cnt = n()) iniTrain <- iniTrain %>% group_by(app_domain) %>% mutate(app_domain_cnt = n()) iniTrain <- iniTrain %>% group_by(app_category) %>% mutate(app_category_cnt = n()) iniTrain <- iniTrain %>% group_by(app_id,app_domain) %>% mutate(app_id_domain_cnt = n()) iniTrain <- iniTrain %>% group_by(app_id,app_category) %>% mutate(app_id_category_cnt = n()) iniTrain <- iniTrain %>% group_by(app_domain,app_category) %>% mutate(app_domain_category_cnt = n()) iniTrain <- iniTrain %>% group_by(app_id,app_domain,app_category) %>% mutate(app_id_domain_category_cnt = n()) temp <- iniTrain %>% group_by(app_id) %>% summarise(app_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_domain) %>% summarise(app_domain_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_category) %>% summarise(app_category_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_id,app_domain) %>% summarise(app_id_domain_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_id,app_category) %>% summarise(app_id_category_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_domain,app_category) %>% summarise(app_domain_category_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(app_id,app_domain,app_category) %>% summarise(app_id_domain_category_cnt = n()) iniTest <- left_join(iniTest, temp) ######device counts iniTrain <- iniTrain %>% group_by(device_id) %>% mutate(device_id_cnt = n()) iniTrain <- iniTrain %>% group_by(device_ip) %>% mutate(device_ip_cnt = n()) iniTrain <- iniTrain %>% group_by(device_model) %>% mutate(device_model_cnt = n()) iniTrain <- iniTrain %>% group_by(device_type, device_model) %>% mutate(device_type_model_cnt = n()) iniTrain <- iniTrain %>% group_by(device_conn_type, device_model) %>% mutate(device_conn_model_cnt = n()) temp <- iniTrain %>% group_by(device_id) %>% summarise(device_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(device_ip) %>% summarise(device_ip_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(device_model) %>% summarise(device_model_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(device_type, device_model) %>% summarise(device_type_model_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(device_conn_type, device_model) %>% summarise(device_conn_model_cnt = n()) iniTest <- left_join(iniTest, temp) ######Cs counts iniTrain <- iniTrain %>% group_by(C14) %>% mutate(C14_cnt = n()) iniTrain <- iniTrain %>% group_by(C17) %>% mutate(C17_cnt = n()) iniTrain <- iniTrain %>% group_by(C19) %>% mutate(C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C20) %>% mutate(C20_cnt = n()) iniTrain <- iniTrain %>% group_by(C21) %>% mutate(C21_cnt = n()) temp <- iniTrain %>% group_by(C14) %>% summarise(C14_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C17) %>% summarise(C17_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C19) %>% summarise(C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C20) %>% summarise(C20_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C21) %>% summarise(C21_cnt = n()) iniTest <- left_join(iniTest, temp) ######banner_pos and other counts iniTrain <- iniTrain %>% group_by(banner_pos, site_id) %>% mutate(banner_pos_site_id_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, app_id) %>% mutate(banner_pos_app_id_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, device_id) %>% mutate(banner_pos_device_id_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, device_ip) %>% mutate(banner_pos_device_ip_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C14) %>% mutate(banner_pos_C14_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C21) %>% mutate(banner_pos_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C1) %>% mutate(banner_pos_C1_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C16) %>% mutate(banner_pos_C16_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C17) %>% mutate(banner_pos_C17_cnt = n()) iniTrain <- iniTrain %>% group_by(banner_pos, C18) %>% mutate(banner_pos_C18_cnt = n()) temp <- iniTrain %>% group_by(banner_pos, site_id) %>% summarise(banner_pos_site_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, app_id) %>% summarise(banner_pos_app_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, device_id) %>% summarise(banner_pos_device_id_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, device_ip) %>% summarise(banner_pos_device_ip_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C14) %>% summarise(banner_pos_C14_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C21) %>% summarise(banner_pos_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C1) %>% summarise(banner_pos_C1_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C16) %>% summarise(banner_pos_C16_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C17) %>% summarise(banner_pos_C17_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(banner_pos, C18) %>% summarise(banner_pos_C18_cnt = n()) iniTest <- left_join(iniTest, temp) ###### counts of two Cs iniTrain <- iniTrain %>% group_by(C1, C18) %>% mutate(C1_C18_cnt = n()) iniTrain <- iniTrain %>% group_by(C16, C18) %>% mutate(C16_C18_cnt = n()) iniTrain <- iniTrain %>% group_by(C16, C21) %>% mutate(C16_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C1, C19) %>% mutate(C1_C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C18, C19) %>% mutate(C18_C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C16, C19) %>% mutate(C16_C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C1, C16) %>% mutate(C1_C16_cnt = n()) iniTrain <- iniTrain %>% group_by(C19, C21) %>% mutate(C19_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C15, C19) %>% mutate(C15_C19_cnt = n()) iniTrain <- iniTrain %>% group_by(C15, C21) %>% mutate(C15_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C20, C21) %>% mutate(C20_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C18, C20) %>% mutate(C18_C20_cnt = n()) iniTrain <- iniTrain %>% group_by(C15, C18) %>% mutate(C15_C18_cnt = n()) iniTrain <- iniTrain %>% group_by(C18, C21) %>% mutate(C18_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C1, C21) %>% mutate(C1_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C14, C21) %>% mutate(C14_C21_cnt = n()) iniTrain <- iniTrain %>% group_by(C15, C17) %>% mutate(C15_C17_cnt = n()) iniTrain <- iniTrain %>% group_by(C14, C16) %>% mutate(C14_C16_cnt = n()) temp <- iniTrain %>% group_by(C1, C18) %>% summarise(C1_C18_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C16, C18) %>% summarise(C16_C18_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C16, C21) %>% summarise(C16_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C1, C19) %>% summarise(C1_C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C18, C19) %>% summarise(C18_C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C16, C19) %>% summarise(C16_C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C1, C16) %>% summarise(C1_C16_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C19, C21) %>% summarise(C19_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C15, C19) %>% summarise(C15_C19_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C15, C21) %>% summarise(C15_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C20, C21) %>% summarise(C20_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C18, C20) %>% summarise(C18_C20_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C15, C18) %>% summarise(C15_C18_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C18, C21) %>% summarise(C18_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C1, C21) %>% summarise(C1_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C14, C21) %>% summarise(C14_C21_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C15, C17) %>% summarise(C15_C17_cnt = n()) iniTest <- left_join(iniTest, temp) temp <- iniTrain %>% group_by(C14, C16) %>% summarise(C14_C16_cnt = n()) iniTest <- left_join(iniTest, temp) ####################################get smoothed likelihood features ############################### iniTest <- iniTest[, 1:23] noise = 0.3 pAve <- mean(iniTrain$click) adj <- 20 len <- nrow(iniTrain) iniTrain <- iniTrain %>% group_by(C1) %>% mutate(temp = (sum(click) - click), C1_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C1_exp <- iniTrain$C1_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C14) %>% mutate(temp = (sum(click) - click), C14_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C14_exp <- iniTrain$C14_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C15) %>% mutate(temp = (sum(click) - click), C15_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C15_exp <- iniTrain$C15_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C16) %>% mutate(temp = (sum(click) - click), C16_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C16_exp <- iniTrain$C16_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C17) %>% mutate(temp = (sum(click) - click), C17_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C17_exp <- iniTrain$C17_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C18) %>% mutate(temp = (sum(click) - click), C18_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C18_exp <- iniTrain$C18_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C19) %>% mutate(temp = (sum(click) - click), C19_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C19_exp <- iniTrain$C19_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C20) %>% mutate(temp = (sum(click) - click), C20_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C20_exp <- iniTrain$C20_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(C21) %>% mutate(temp = (sum(click) - click), C21_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$C21_exp <- iniTrain$C21_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(banner_pos) %>% mutate(temp = (sum(click) - click), banner_pos_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$banner_pos_exp <- iniTrain$banner_pos_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(site_id) %>% mutate(temp = (sum(click) - click), site_id_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$site_id_exp <- iniTrain$site_id_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(site_domain) %>% mutate(temp = (sum(click) - click), site_domain_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$site_domain_exp <- iniTrain$site_domain_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(site_category) %>% mutate(temp = (sum(click) - click), site_category_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$site_category_exp <- iniTrain$site_category_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(app_id) %>% mutate(temp = (sum(click) - click), app_id_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$app_id_exp <- iniTrain$app_id_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(app_domain) %>% mutate(temp = (sum(click) - click), app_domain_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$app_domain_exp <- iniTrain$app_domain_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(app_category) %>% mutate(temp = (sum(click) - click), app_category_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$app_category_exp <- iniTrain$app_category_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_id) %>% mutate(temp = (sum(click) - click), device_id_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_id_exp <- iniTrain$device_id_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_ip) %>% mutate(temp = (sum(click) - click), device_ip_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_ip_exp <- iniTrain$device_ip_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_model) %>% mutate(temp = (sum(click) - click), device_model_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_model_exp <- iniTrain$device_model_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_type) %>% mutate(temp = (sum(click) - click), device_type_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_type_exp <- iniTrain$device_type_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(device_conn_type) %>% mutate(temp = (sum(click) - click), device_conn_type_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$device_conn_type_exp <- iniTrain$device_conn_type_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(hours) %>% mutate(temp = (sum(click) - click), hours_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$hours_exp <- iniTrain$hours_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(banner_pos, C1) %>% mutate(temp = (sum(click) - click), banner_pos_C1_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$banner_pos_C1_exp <- iniTrain$banner_pos_C1_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(banner_pos, C14) %>% mutate(temp = (sum(click) - click), banner_pos_C14_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$banner_pos_C14_exp <- iniTrain$banner_pos_C14_exp * (1 + (runif(len) - 0.5) * noise) iniTrain <- iniTrain %>% group_by(banner_pos, C21) %>% mutate(temp = (sum(click) - click), banner_pos_C21_exp = (temp + adj * pAve)/(n() - 1 + adj)) iniTrain$banner_pos_C21_exp <- iniTrain$banner_pos_C21_exp * (1 + (runif(len) - 0.5) * noise) #############get expect for test data###################### tmp <- iniTrain %>% group_by(C1) %>% summarise(C1_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C14) %>% summarise(C14_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C15) %>% summarise(C15_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C16) %>% summarise(C16_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C17) %>% summarise(C17_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C18) %>% summarise(C18_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C19) %>% summarise(C19_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C20) %>% summarise(C20_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(C21) %>% summarise(C21_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(banner_pos) %>% summarise(banner_pos_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(site_id) %>% summarise(site_id_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(site_domain) %>% summarise(site_domain_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(site_category) %>% summarise(site_category_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(app_id) %>% summarise(app_id_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(app_domain) %>% summarise(app_domain_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(app_category) %>% summarise(app_category_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_id) %>% summarise(device_id_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_ip) %>% summarise(device_ip_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_model) %>% summarise(device_model_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_type) %>% summarise(device_type_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(device_conn_type) %>% summarise(device_conn_type_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(hours) %>% summarise(hours_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(banner_pos, C14) %>% summarise(banner_pos_C14_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(banner_pos, C21) %>% summarise(banner_pos_C21_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) tmp <- iniTrain %>% group_by(banner_pos, C1) %>% summarise(banner_pos_C1_exp = (sum(click) + adj * pAve)/(n() + adj)) iniTest <- left_join(iniTest, tmp) iniTest[is.na(iniTest)] <- pAve save(list = "iniTest", file = "iniTest.RData") ##################get factor/numerical data for train and test respectively############# ##for test data numericalVars <- names(iniTest)[c(1,3,4,14:75)] numerical_test <- iniTest[, numericalVars] save(list = "numerical_test", file = "numerical_test.RData") factorVars <- names(tidyTestData)[c(1,5:15,24:34)] factor_test <- tidyTestData[, factorVars] save(list = "factor_test", file = "factor_test.RData") ##for train data factorVars <- names(tidyTrainData)[c(1:2,6:16,25:35)] factor_train <- tidyTrainData[, factorVars] save(list = "factor_train", file = "factor_train.RData") numericalVars <- names(iniTrain)[c(1:2,4:5,15:76)] numerical_train <- iniTrain[, numericalVars] save(list = "numerical_train", file = "numerical_train.RData") #################################split train data into 10 folds###################### set.seed(87334) folds <- createFolds(y = factor_train$click, k = 10) for(i in 1:10){ factor_training <- factor_train[folds[[i]],] save(list = "factor_training", file = paste("factor_training", i, ".RData", sep = "")) } for(i in 1:10){ numerical_training <- numerical_train[folds[[i]],] save(list = "numerical_training", file = paste("numerical_training", i, ".RData", sep = "")) }
corr <- function(directory, threshold = 0){ files <- list.files(directory, full.names = TRUE) result <- numeric() for (i in 1:332){ csv <- read.csv(files[i]) if (sum(complete.cases(csv)) >= threshold) { data <- csv[complete.cases(csv), ] calculate <- cor(data$sulfate, data$nitrate) result <- c(result, calculate) } } result }
/corr.R
no_license
xinyuanwu9/XinyuanRepo
R
false
false
358
r
corr <- function(directory, threshold = 0){ files <- list.files(directory, full.names = TRUE) result <- numeric() for (i in 1:332){ csv <- read.csv(files[i]) if (sum(complete.cases(csv)) >= threshold) { data <- csv[complete.cases(csv), ] calculate <- cor(data$sulfate, data$nitrate) result <- c(result, calculate) } } result }
# This file is created by Chuong Van Nguyen, GIST, South Korea # Install packages #install.packages('neuralnet') #install.packages('ggplot2') #install.packages('nnet') #install.packages('dplyr') #install.packages('reshape2') #install.packages('reshape') #install.packages("magrittr") # import library library(magrittr) library(neuralnet) library(ggplot2) library(nnet) library(dplyr) library(reshape2) library(reshape) library(caTools) num_run = 20 dataset = read.csv('breast_cancer.csv') dataset=dataset[,2:11] for (k in 1:num_run){ split=sample.split(dataset$Class, SplitRatio = 0.75) training_set=subset(dataset, split ==TRUE) test_set=subset(dataset, split ==FALSE) # plot data exploratory_bc <- melt(dataset) exploratory_bc %>% ggplot(aes(x = factor(variable), y = value)) + geom_violin() + geom_jitter(height = 0, width = 0.1, aes(colour = Class), alpha = 0.7) + theme_minimal() # Convert your observation class and Species into one hot vector. labels_training_set <- class.ind(as.factor(training_set$Class)) labels_test_set <- class.ind(as.factor(test_set$Class)) # Standardize column vector standardiser <- function(x){(x-min(x))/(max(x)-min(x))} # Standardize the predictors by using lapply training_set[, 1:9] <- lapply(training_set[, 1:9], standardiser) test_set[, 1:9] <- lapply(test_set[, 1:9], standardiser) # Combine your one hot labels and standardized predictors. pre_process_training_set <- cbind(training_set[,1:9], labels_training_set) pre_process_test_set <- cbind(test_set[,1:9], labels_test_set) # Define your formula that your neuralnet will be run on. You’ll need to use the as.formula function here. f <- as.formula("benign + malignant ~ Cl.thickness + Cell.size + Cell.shape + Marg.adhesion + Epith.c.size + Bare.nuclei + Bl.cromatin + Normal.nucleoli+ Mitoses") # Create a neural network with two hidden layer of size 14, 10 and 5 bc_net <- neuralnet(f, data = pre_process_training_set, hidden = c(14, 10,5), act.fct = "tanh", linear.output = FALSE) # Plot neural network. plot(bc_net) # predict with test_set bc_preds <- neuralnet::compute(bc_net, pre_process_test_set[, 1:9]) # # compute model accuracy origi_vals <- max.col(pre_process_test_set[, 10:11]) pr.nn_2 <- max.col(bc_preds$net.result) if (k==1){ MA=round(mean(pr.nn_2==origi_vals)*100, 2) } else { MA=MA + round(mean(pr.nn_2==origi_vals)*100, 2) } print(paste("Model Accuracy: ", MA, "%.", sep = "")) } MA=MA/num_run print(paste("Model Accuracy: ", MA, "%.", sep = ""))
/Neural network/Neural_Network_breast_cancer_Chuong.R
no_license
nguyenvchuong/Data-classification-in-machine-learning-based-on-virtually-attractive-force
R
false
false
2,586
r
# This file is created by Chuong Van Nguyen, GIST, South Korea # Install packages #install.packages('neuralnet') #install.packages('ggplot2') #install.packages('nnet') #install.packages('dplyr') #install.packages('reshape2') #install.packages('reshape') #install.packages("magrittr") # import library library(magrittr) library(neuralnet) library(ggplot2) library(nnet) library(dplyr) library(reshape2) library(reshape) library(caTools) num_run = 20 dataset = read.csv('breast_cancer.csv') dataset=dataset[,2:11] for (k in 1:num_run){ split=sample.split(dataset$Class, SplitRatio = 0.75) training_set=subset(dataset, split ==TRUE) test_set=subset(dataset, split ==FALSE) # plot data exploratory_bc <- melt(dataset) exploratory_bc %>% ggplot(aes(x = factor(variable), y = value)) + geom_violin() + geom_jitter(height = 0, width = 0.1, aes(colour = Class), alpha = 0.7) + theme_minimal() # Convert your observation class and Species into one hot vector. labels_training_set <- class.ind(as.factor(training_set$Class)) labels_test_set <- class.ind(as.factor(test_set$Class)) # Standardize column vector standardiser <- function(x){(x-min(x))/(max(x)-min(x))} # Standardize the predictors by using lapply training_set[, 1:9] <- lapply(training_set[, 1:9], standardiser) test_set[, 1:9] <- lapply(test_set[, 1:9], standardiser) # Combine your one hot labels and standardized predictors. pre_process_training_set <- cbind(training_set[,1:9], labels_training_set) pre_process_test_set <- cbind(test_set[,1:9], labels_test_set) # Define your formula that your neuralnet will be run on. You’ll need to use the as.formula function here. f <- as.formula("benign + malignant ~ Cl.thickness + Cell.size + Cell.shape + Marg.adhesion + Epith.c.size + Bare.nuclei + Bl.cromatin + Normal.nucleoli+ Mitoses") # Create a neural network with two hidden layer of size 14, 10 and 5 bc_net <- neuralnet(f, data = pre_process_training_set, hidden = c(14, 10,5), act.fct = "tanh", linear.output = FALSE) # Plot neural network. plot(bc_net) # predict with test_set bc_preds <- neuralnet::compute(bc_net, pre_process_test_set[, 1:9]) # # compute model accuracy origi_vals <- max.col(pre_process_test_set[, 10:11]) pr.nn_2 <- max.col(bc_preds$net.result) if (k==1){ MA=round(mean(pr.nn_2==origi_vals)*100, 2) } else { MA=MA + round(mean(pr.nn_2==origi_vals)*100, 2) } print(paste("Model Accuracy: ", MA, "%.", sep = "")) } MA=MA/num_run print(paste("Model Accuracy: ", MA, "%.", sep = ""))
#' Moran internal. #' #' @param Z Vector, matrix or data frame. #' @param con Connection network. #' @param nsim Number of Monte-Carlo simulations. #' @param alternative The alternative hypothesis. If "auto" is selected (default) the #' program determines the hypothesis by difference between the median of the simulations #' and the observed value. Other options are: "two.sided", "greater" and "less". #' if test == cross, for the first interval (d == 0) the p and CI are computed with cor.test. #' @param adjust.n Should be adjusted the number of individuals? (warning, this would #' change variances) #' @param plotit Should be generated a plot of the simulations? #' @author Leandro Roser \email{leandroroser@@ege.fcen.uba.ar} #' #' @keywords internal int.moran <- function(Z, con, nsim, alternative, test = "permutation", adjust.n = FALSE, plotit) { N <- length(Z) wg <- int.check.con(con) #weight adjustment to number of connections if(adjust.n == TRUE) { colTRUE <- apply(wg, 1, sum) colTRUE[colTRUE != 0] <- 1 Nc <- sum(colTRUE) } else { Nc <- N } #Moran's I computation z2 <- Z - mean(Z) SC <- drop(z2 %*% z2) VAR.Z <- SC / Nc W <- sum(wg) moranfun <- function(zc) { SCW <- wg %*% zc AUTOCOV.Z <- drop(zc %*% SCW) / W res <- AUTOCOV.Z / VAR.Z res } #observed value obs <- moranfun(z2) #Monte carlo replicates repsim <- numeric() for(i in 1:nsim) { samp <- sample(N) coefsup.mc <- z2[samp] repsim[i] <- moranfun(coefsup.mc) } #p value or CI computation random.m <- int.random.test(repsim = repsim, obs = obs, nsim = nsim, test = test, alternative = alternative) #listing results if(test == "permutation") { res <- list("analysis" = "Moran's I", "alternative"= random.m$alter, "observation" = round(random.m$obs, 4), "expectation" = round(random.m$exp, 4), "nsim" = nsim, "p.value" = round(random.m$p.val, 5), "quantile" = round(random.m$CI, 4)) }else { res <- list("analysis" = "Moran's I", "observation" = round(random.m$obs, 4), "nsim" = nsim, "quantile" = round(random.m$CI, 4)) } #plot if(plotit == TRUE) { hist(c(repsim, random.m$obs), xlab = "Moran's I", main = "Monte Carlo test") abline(v = obs, col = "red") points(obs, 0, col = "green", pch = 15, cex = 3.6) text(random.m$obs, 0, "obs") } res }
/EcoGenetics/R/int.moran.R
no_license
ingted/R-Examples
R
false
false
2,771
r
#' Moran internal. #' #' @param Z Vector, matrix or data frame. #' @param con Connection network. #' @param nsim Number of Monte-Carlo simulations. #' @param alternative The alternative hypothesis. If "auto" is selected (default) the #' program determines the hypothesis by difference between the median of the simulations #' and the observed value. Other options are: "two.sided", "greater" and "less". #' if test == cross, for the first interval (d == 0) the p and CI are computed with cor.test. #' @param adjust.n Should be adjusted the number of individuals? (warning, this would #' change variances) #' @param plotit Should be generated a plot of the simulations? #' @author Leandro Roser \email{leandroroser@@ege.fcen.uba.ar} #' #' @keywords internal int.moran <- function(Z, con, nsim, alternative, test = "permutation", adjust.n = FALSE, plotit) { N <- length(Z) wg <- int.check.con(con) #weight adjustment to number of connections if(adjust.n == TRUE) { colTRUE <- apply(wg, 1, sum) colTRUE[colTRUE != 0] <- 1 Nc <- sum(colTRUE) } else { Nc <- N } #Moran's I computation z2 <- Z - mean(Z) SC <- drop(z2 %*% z2) VAR.Z <- SC / Nc W <- sum(wg) moranfun <- function(zc) { SCW <- wg %*% zc AUTOCOV.Z <- drop(zc %*% SCW) / W res <- AUTOCOV.Z / VAR.Z res } #observed value obs <- moranfun(z2) #Monte carlo replicates repsim <- numeric() for(i in 1:nsim) { samp <- sample(N) coefsup.mc <- z2[samp] repsim[i] <- moranfun(coefsup.mc) } #p value or CI computation random.m <- int.random.test(repsim = repsim, obs = obs, nsim = nsim, test = test, alternative = alternative) #listing results if(test == "permutation") { res <- list("analysis" = "Moran's I", "alternative"= random.m$alter, "observation" = round(random.m$obs, 4), "expectation" = round(random.m$exp, 4), "nsim" = nsim, "p.value" = round(random.m$p.val, 5), "quantile" = round(random.m$CI, 4)) }else { res <- list("analysis" = "Moran's I", "observation" = round(random.m$obs, 4), "nsim" = nsim, "quantile" = round(random.m$CI, 4)) } #plot if(plotit == TRUE) { hist(c(repsim, random.m$obs), xlab = "Moran's I", main = "Monte Carlo test") abline(v = obs, col = "red") points(obs, 0, col = "green", pch = 15, cex = 3.6) text(random.m$obs, 0, "obs") } res }
# TODO: Reading data for tutorial # # Author: Miguel Alvarez ################################################################################ library(xlsx) setwd("M:/WorkspaceEclipse/Guides") Concepts <- read.xlsx("data/wetlands_syntax.xlsx", sheetName="Concepts", stringsAsFactors=FALSE, encoding="UTF-8") Synonyms <- read.xlsx("data/wetlands_syntax.xlsx", sheetName="Synonyms", stringsAsFactors=FALSE, encoding="UTF-8") Codes <- read.xlsx("data/wetlands_syntax.xlsx", sheetName="Codes", stringsAsFactors=FALSE, encoding="UTF-8") save(Codes, Concepts, Synonyms, file="data/wetland_syntax.rda")
/src/taxlist_syntax/reading_data.R
no_license
kamapu/Guides
R
false
false
643
r
# TODO: Reading data for tutorial # # Author: Miguel Alvarez ################################################################################ library(xlsx) setwd("M:/WorkspaceEclipse/Guides") Concepts <- read.xlsx("data/wetlands_syntax.xlsx", sheetName="Concepts", stringsAsFactors=FALSE, encoding="UTF-8") Synonyms <- read.xlsx("data/wetlands_syntax.xlsx", sheetName="Synonyms", stringsAsFactors=FALSE, encoding="UTF-8") Codes <- read.xlsx("data/wetlands_syntax.xlsx", sheetName="Codes", stringsAsFactors=FALSE, encoding="UTF-8") save(Codes, Concepts, Synonyms, file="data/wetland_syntax.rda")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emr_operations.R \name{emr_update_studio_session_mapping} \alias{emr_update_studio_session_mapping} \title{Updates the session policy attached to the user or group for the specified Amazon EMR Studio} \usage{ emr_update_studio_session_mapping( StudioId, IdentityId = NULL, IdentityName = NULL, IdentityType, SessionPolicyArn ) } \arguments{ \item{StudioId}{[required] The ID of the Amazon EMR Studio.} \item{IdentityId}{The globally unique identifier (GUID) of the user or group. For more information, see \href{https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId}{UserId} and \href{https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-GroupId}{GroupId} in the \emph{IAM Identity Center Identity Store API Reference}. Either \code{IdentityName} or \code{IdentityId} must be specified.} \item{IdentityName}{The name of the user or group to update. For more information, see \href{https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName}{UserName} and \href{https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName}{DisplayName} in the \emph{IAM Identity Center Identity Store API Reference}. Either \code{IdentityName} or \code{IdentityId} must be specified.} \item{IdentityType}{[required] Specifies whether the identity to update is a user or a group.} \item{SessionPolicyArn}{[required] The Amazon Resource Name (ARN) of the session policy to associate with the specified user or group.} } \description{ Updates the session policy attached to the user or group for the specified Amazon EMR Studio. See \url{https://www.paws-r-sdk.com/docs/emr_update_studio_session_mapping/} for full documentation. } \keyword{internal}
/cran/paws.analytics/man/emr_update_studio_session_mapping.Rd
permissive
paws-r/paws
R
false
true
1,973
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emr_operations.R \name{emr_update_studio_session_mapping} \alias{emr_update_studio_session_mapping} \title{Updates the session policy attached to the user or group for the specified Amazon EMR Studio} \usage{ emr_update_studio_session_mapping( StudioId, IdentityId = NULL, IdentityName = NULL, IdentityType, SessionPolicyArn ) } \arguments{ \item{StudioId}{[required] The ID of the Amazon EMR Studio.} \item{IdentityId}{The globally unique identifier (GUID) of the user or group. For more information, see \href{https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId}{UserId} and \href{https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-GroupId}{GroupId} in the \emph{IAM Identity Center Identity Store API Reference}. Either \code{IdentityName} or \code{IdentityId} must be specified.} \item{IdentityName}{The name of the user or group to update. For more information, see \href{https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName}{UserName} and \href{https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName}{DisplayName} in the \emph{IAM Identity Center Identity Store API Reference}. Either \code{IdentityName} or \code{IdentityId} must be specified.} \item{IdentityType}{[required] Specifies whether the identity to update is a user or a group.} \item{SessionPolicyArn}{[required] The Amazon Resource Name (ARN) of the session policy to associate with the specified user or group.} } \description{ Updates the session policy attached to the user or group for the specified Amazon EMR Studio. See \url{https://www.paws-r-sdk.com/docs/emr_update_studio_session_mapping/} for full documentation. } \keyword{internal}
library(ggplot2) library(Rcpp) library(readxl) library(dplyr) setwd("/f/mulinlab/huan/ALL_result_ICGC_ALL_drug/gene_network_merge_repurposing_model/V1/validation/gdkb_cgi_oncokb_mtctscan/") #---------------训练集 dit<-"/f/mulinlab/huan/ALL_result_ICGC_ALL_drug/gene_network_merge_repurposing_model/V1/test_data/" org<-read.table(file.path(dit,"./output/09_filter_test_data_for_logistic_regression.txt"),header = T,sep = "\t") %>% as.data.frame() org2<-org %>% dplyr::select(average_effective_drug_target_score,max_effective_drug_target_score, average_mutation_frequency,max_mutation_frequency,average_mutation_pathogenicity, max_mutation_pathogenicity,average_mutation_map_to_gene_level_score # max_mutation_pathogenicity,average_mutation_map_to_gene_level_score,max_mutation_map_to_gene_level_score ,average_the_shortest_path_length,min_the_shortest_path_length,min_rwr_normal_P_value, median_rwr_normal_P_value,cancer_gene_exact_match_drug_target_ratio,average_del_svscore # ,average_dup_svscore,average_inv_svscore,average_tra_svscore,average_cnv_svscore,drug_repurposing) ,average_dup_svscore,average_inv_svscore,average_cnv_svscore) normalization<-function(x){ return((x -mean(x)) / sd(x))} #将feature 归一化 org1<-apply(org2,2,normalization)%>%data.frame() #apply函数是按照normalization对org2表格按列处理 org1$drug_repurposing <-org$drug_repurposing #需要预测的数据 huan<-read.table("./output/07_merge_negative_positive.txt",header = T,sep = "\t") %>% as.data.frame() #huan_drug_cancer<-huan%>%dplyr::select(Drug_chembl_id_Drug_claim_primary_name,cancer_oncotree_id,cancer_oncotree_id_type) huan_drug_cancer<-huan%>%dplyr::select(Drug_chembl_id_Drug_claim_primary_name,cancer_oncotree_id,cancer_oncotree_id_type,sample_type) huan2<-huan%>%dplyr::select(average_effective_drug_target_score,max_effective_drug_target_score, average_mutation_frequency,max_mutation_frequency,average_mutation_pathogenicity, max_mutation_pathogenicity,average_mutation_map_to_gene_level_score # max_mutation_pathogenicity,average_mutation_map_to_gene_level_score,max_mutation_map_to_gene_level_score ,average_the_shortest_path_length,min_the_shortest_path_length,min_rwr_normal_P_value, median_rwr_normal_P_value,cancer_gene_exact_match_drug_target_ratio,average_del_svscore ,average_dup_svscore,average_inv_svscore,average_cnv_svscore) #-------------------------------------------对test dataset 进行normalization mean_data<-function(x){ return(mean(x)) } training_mean<- apply(org2,2,mean_data)%>%data.frame() sd_data<-function(x){ return(sd(x)) } training_sd<- apply(org2,2,sd_data)%>%data.frame() training<-cbind(training_mean,training_sd) training<-data.frame(t(as.matrix(training))) #行列转换 #此时training_mean是第一行,training_sd是第二行 # training<-training%>%dplyr::select(-drug_repurposing) #把drug_repurposing这一列减掉 test=rbind(training,huan2) huan1<- apply(test,2,function(x){ return((x[3:length(x)]-x[1])/x[2]) #对每一列的第三行往后进行操作,这里和使用training_mix和training_max进行test dataset进行normalization })%>%data.frame #---------------------------------------------------------------------------------------------------- #生成logis模型,用glm函数 #用训练集数据生成logis模型,用glm函数 #family:每一种响应分布(指数分布族)允许各种关联函数将均值和线性预测器关联起来。常用的family:binomal(link='logit')--响应变量服从二项分布,连接函数为logit,即logistic回归 #---------------------------------------------------------------------- #测试集的真实值 #org1=cbind(org1,drug_repo) pre <- glm(drug_repurposing ~.,family=binomial(link = "logit"),data = org1) summary(pre) summary(org1) summary(huan1) #predict函数可以获得模型的预测值。这里预测所需的模型对象为pre,预测对象newdata为测试集,预测所需类型type选择response,对响应变量的区间进行调整 predict. <- predict.glm(pre,type='response',newdata=huan1) #按照预测值为1的概率,>0.5的返回1,其余返回0 predict =ifelse(predict.>0.338,1,0) #把预测的具体值记录下来 predict_value = predict. #数据中加入预测值一列 huan1$predict = predict huan1$predict_value = predict_value final_huan <- cbind(huan_drug_cancer,huan1) setwd("/f/mulinlab/huan/ALL_result_ICGC_ALL_drug/gene_network_merge_repurposing_model/V1/validation/gdkb_cgi_oncokb_mtctscan/output/") write.table(final_huan,"08_Independent_sample_repurposing.txt",row.names = F, col.names = T,quote =F,sep="\t")#把表存下来 #------------------------------------------------
/Huan_link_all_script/ALL_result_ICGC_ALL_drug/gene_network_merge_repurposing_model/V1/validation/gdkb_cgi_oncokb_mtctscan/08_prediction_model1_Independent_sample.R
no_license
Lhhuan/drug_repurposing
R
false
false
5,013
r
library(ggplot2) library(Rcpp) library(readxl) library(dplyr) setwd("/f/mulinlab/huan/ALL_result_ICGC_ALL_drug/gene_network_merge_repurposing_model/V1/validation/gdkb_cgi_oncokb_mtctscan/") #---------------训练集 dit<-"/f/mulinlab/huan/ALL_result_ICGC_ALL_drug/gene_network_merge_repurposing_model/V1/test_data/" org<-read.table(file.path(dit,"./output/09_filter_test_data_for_logistic_regression.txt"),header = T,sep = "\t") %>% as.data.frame() org2<-org %>% dplyr::select(average_effective_drug_target_score,max_effective_drug_target_score, average_mutation_frequency,max_mutation_frequency,average_mutation_pathogenicity, max_mutation_pathogenicity,average_mutation_map_to_gene_level_score # max_mutation_pathogenicity,average_mutation_map_to_gene_level_score,max_mutation_map_to_gene_level_score ,average_the_shortest_path_length,min_the_shortest_path_length,min_rwr_normal_P_value, median_rwr_normal_P_value,cancer_gene_exact_match_drug_target_ratio,average_del_svscore # ,average_dup_svscore,average_inv_svscore,average_tra_svscore,average_cnv_svscore,drug_repurposing) ,average_dup_svscore,average_inv_svscore,average_cnv_svscore) normalization<-function(x){ return((x -mean(x)) / sd(x))} #将feature 归一化 org1<-apply(org2,2,normalization)%>%data.frame() #apply函数是按照normalization对org2表格按列处理 org1$drug_repurposing <-org$drug_repurposing #需要预测的数据 huan<-read.table("./output/07_merge_negative_positive.txt",header = T,sep = "\t") %>% as.data.frame() #huan_drug_cancer<-huan%>%dplyr::select(Drug_chembl_id_Drug_claim_primary_name,cancer_oncotree_id,cancer_oncotree_id_type) huan_drug_cancer<-huan%>%dplyr::select(Drug_chembl_id_Drug_claim_primary_name,cancer_oncotree_id,cancer_oncotree_id_type,sample_type) huan2<-huan%>%dplyr::select(average_effective_drug_target_score,max_effective_drug_target_score, average_mutation_frequency,max_mutation_frequency,average_mutation_pathogenicity, max_mutation_pathogenicity,average_mutation_map_to_gene_level_score # max_mutation_pathogenicity,average_mutation_map_to_gene_level_score,max_mutation_map_to_gene_level_score ,average_the_shortest_path_length,min_the_shortest_path_length,min_rwr_normal_P_value, median_rwr_normal_P_value,cancer_gene_exact_match_drug_target_ratio,average_del_svscore ,average_dup_svscore,average_inv_svscore,average_cnv_svscore) #-------------------------------------------对test dataset 进行normalization mean_data<-function(x){ return(mean(x)) } training_mean<- apply(org2,2,mean_data)%>%data.frame() sd_data<-function(x){ return(sd(x)) } training_sd<- apply(org2,2,sd_data)%>%data.frame() training<-cbind(training_mean,training_sd) training<-data.frame(t(as.matrix(training))) #行列转换 #此时training_mean是第一行,training_sd是第二行 # training<-training%>%dplyr::select(-drug_repurposing) #把drug_repurposing这一列减掉 test=rbind(training,huan2) huan1<- apply(test,2,function(x){ return((x[3:length(x)]-x[1])/x[2]) #对每一列的第三行往后进行操作,这里和使用training_mix和training_max进行test dataset进行normalization })%>%data.frame #---------------------------------------------------------------------------------------------------- #生成logis模型,用glm函数 #用训练集数据生成logis模型,用glm函数 #family:每一种响应分布(指数分布族)允许各种关联函数将均值和线性预测器关联起来。常用的family:binomal(link='logit')--响应变量服从二项分布,连接函数为logit,即logistic回归 #---------------------------------------------------------------------- #测试集的真实值 #org1=cbind(org1,drug_repo) pre <- glm(drug_repurposing ~.,family=binomial(link = "logit"),data = org1) summary(pre) summary(org1) summary(huan1) #predict函数可以获得模型的预测值。这里预测所需的模型对象为pre,预测对象newdata为测试集,预测所需类型type选择response,对响应变量的区间进行调整 predict. <- predict.glm(pre,type='response',newdata=huan1) #按照预测值为1的概率,>0.5的返回1,其余返回0 predict =ifelse(predict.>0.338,1,0) #把预测的具体值记录下来 predict_value = predict. #数据中加入预测值一列 huan1$predict = predict huan1$predict_value = predict_value final_huan <- cbind(huan_drug_cancer,huan1) setwd("/f/mulinlab/huan/ALL_result_ICGC_ALL_drug/gene_network_merge_repurposing_model/V1/validation/gdkb_cgi_oncokb_mtctscan/output/") write.table(final_huan,"08_Independent_sample_repurposing.txt",row.names = F, col.names = T,quote =F,sep="\t")#把表存下来 #------------------------------------------------
library(Rwave) ### Name: signal_W_tilda.1 ### Title: Pixel from Amber Camara ### Aliases: signal_W_tilda.1 ### Keywords: datasets ### ** Examples data(signal_W_tilda.1) plot.ts(signal_W_tilda.1)
/data/genthat_extracted_code/Rwave/examples/signal_W_tilda.1.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
204
r
library(Rwave) ### Name: signal_W_tilda.1 ### Title: Pixel from Amber Camara ### Aliases: signal_W_tilda.1 ### Keywords: datasets ### ** Examples data(signal_W_tilda.1) plot.ts(signal_W_tilda.1)
# Longitudinal check detect_outlier <- function(data_string, cutoff) { # Read in String passed from Java client data <- read.csv(text=data_string) # Enforce types data$site <-as.factor(data$site) data$subject <-as.factor(data$subject) data$time <-as.numeric(data$time) data$val <-as.numeric(data$val) # Build mixed model model <- lmer(val ~ site + (1|subject), data) # Calculate Standardized Residuals res <- residuals(model) H <- hatvalues(model) sigma <- summary(model)$sigma res <- map_dbl(1:length(res), ~ res[[.]]/(sigma*sqrt(1-H[[.]]))) data$residuals <- abs(res) # being loose here but its a demo # Label points as outliers if residuals are above cut off data$outlier <- ifelse(data$residuals >= cutoff, 1, 0) return(data) }
/src/main/resources/R/lmer_outlier.R
no_license
pburnsdata/rserveDemo
R
false
false
790
r
# Longitudinal check detect_outlier <- function(data_string, cutoff) { # Read in String passed from Java client data <- read.csv(text=data_string) # Enforce types data$site <-as.factor(data$site) data$subject <-as.factor(data$subject) data$time <-as.numeric(data$time) data$val <-as.numeric(data$val) # Build mixed model model <- lmer(val ~ site + (1|subject), data) # Calculate Standardized Residuals res <- residuals(model) H <- hatvalues(model) sigma <- summary(model)$sigma res <- map_dbl(1:length(res), ~ res[[.]]/(sigma*sqrt(1-H[[.]]))) data$residuals <- abs(res) # being loose here but its a demo # Label points as outliers if residuals are above cut off data$outlier <- ifelse(data$residuals >= cutoff, 1, 0) return(data) }
library(testthat) # no tests on CRAN because the check limit is 10 mins if (identical(Sys.getenv("NOT_CRAN"), "true")) { set.seed(getOption("mlr.debug.seed")) test_check("mlr", filter = "_surv_") }
/tests/run-surv.R
no_license
DY1995-star/mlr
R
false
false
204
r
library(testthat) # no tests on CRAN because the check limit is 10 mins if (identical(Sys.getenv("NOT_CRAN"), "true")) { set.seed(getOption("mlr.debug.seed")) test_check("mlr", filter = "_surv_") }
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config merge_config NULL #' Amazon Route 53 Domains #' #' @description #' Amazon Route 53 API actions let you register domain names and perform #' related operations. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' \itemize{ #' \item{\strong{credentials}:} {\itemize{ #' \item{\strong{creds}:} {\itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' }} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.} #' \item{\strong{region}:} {The AWS Region used in instantiating the client.} #' }} #' \item{\strong{close_connection}:} {Immediately close all HTTP connections.} #' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.} #' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.} #' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}} #' } #' @param #' credentials #' Optional credentials shorthand for the config parameter #' \itemize{ #' \item{\strong{creds}:} {\itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' }} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' } #' @param #' endpoint #' Optional shorthand for complete URL to use for the constructed client. #' @param #' region #' Optional shorthand for AWS Region used in instantiating the client. #' #' @section Service syntax: #' ``` #' svc <- route53domains( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string", #' close_connection = "logical", #' timeout = "numeric", #' s3_force_path_style = "logical", #' sts_regional_endpoint = "string" #' ), #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- route53domains() #' svc$accept_domain_transfer_from_another_aws_account( #' Foo = 123 #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=route53domains_accept_domain_transfer_from_another_aws_account]{accept_domain_transfer_from_another_aws_account} \tab Accepts the transfer of a domain from another Amazon Web Services account to the currentAmazon Web Services account\cr #' \link[=route53domains_associate_delegation_signer_to_domain]{associate_delegation_signer_to_domain} \tab Creates a delegation signer (DS) record in the registry zone for this domain name\cr #' \link[=route53domains_cancel_domain_transfer_to_another_aws_account]{cancel_domain_transfer_to_another_aws_account} \tab Cancels the transfer of a domain from the current Amazon Web Services account to another Amazon Web Services account\cr #' \link[=route53domains_check_domain_availability]{check_domain_availability} \tab This operation checks the availability of one domain name\cr #' \link[=route53domains_check_domain_transferability]{check_domain_transferability} \tab Checks whether a domain name can be transferred to Amazon Route 53\cr #' \link[=route53domains_delete_domain]{delete_domain} \tab This operation deletes the specified domain\cr #' \link[=route53domains_delete_tags_for_domain]{delete_tags_for_domain} \tab This operation deletes the specified tags for a domain\cr #' \link[=route53domains_disable_domain_auto_renew]{disable_domain_auto_renew} \tab This operation disables automatic renewal of domain registration for the specified domain\cr #' \link[=route53domains_disable_domain_transfer_lock]{disable_domain_transfer_lock} \tab This operation removes the transfer lock on the domain (specifically the clientTransferProhibited status) to allow domain transfers\cr #' \link[=route53domains_disassociate_delegation_signer_from_domain]{disassociate_delegation_signer_from_domain} \tab Deletes a delegation signer (DS) record in the registry zone for this domain name\cr #' \link[=route53domains_enable_domain_auto_renew]{enable_domain_auto_renew} \tab This operation configures Amazon Route 53 to automatically renew the specified domain before the domain registration expires\cr #' \link[=route53domains_enable_domain_transfer_lock]{enable_domain_transfer_lock} \tab This operation sets the transfer lock on the domain (specifically the clientTransferProhibited status) to prevent domain transfers\cr #' \link[=route53domains_get_contact_reachability_status]{get_contact_reachability_status} \tab For operations that require confirmation that the email address for the registrant contact is valid, such as registering a new domain, this operation returns information about whether the registrant contact has responded\cr #' \link[=route53domains_get_domain_detail]{get_domain_detail} \tab This operation returns detailed information about a specified domain that is associated with the current Amazon Web Services account\cr #' \link[=route53domains_get_domain_suggestions]{get_domain_suggestions} \tab The GetDomainSuggestions operation returns a list of suggested domain names\cr #' \link[=route53domains_get_operation_detail]{get_operation_detail} \tab This operation returns the current status of an operation that is not completed\cr #' \link[=route53domains_list_domains]{list_domains} \tab This operation returns all the domain names registered with Amazon Route 53 for the current Amazon Web Services account if no filtering conditions are used\cr #' \link[=route53domains_list_operations]{list_operations} \tab Returns information about all of the operations that return an operation ID and that have ever been performed on domains that were registered by the current account\cr #' \link[=route53domains_list_prices]{list_prices} \tab Lists the following prices for either all the TLDs supported by Route 53, or the specified TLD:\cr #' \link[=route53domains_list_tags_for_domain]{list_tags_for_domain} \tab This operation returns all of the tags that are associated with the specified domain\cr #' \link[=route53domains_push_domain]{push_domain} \tab Moves a domain from Amazon Web Services to another registrar\cr #' \link[=route53domains_register_domain]{register_domain} \tab This operation registers a domain\cr #' \link[=route53domains_reject_domain_transfer_from_another_aws_account]{reject_domain_transfer_from_another_aws_account} \tab Rejects the transfer of a domain from another Amazon Web Services account to the current Amazon Web Services account\cr #' \link[=route53domains_renew_domain]{renew_domain} \tab This operation renews a domain for the specified number of years\cr #' \link[=route53domains_resend_contact_reachability_email]{resend_contact_reachability_email} \tab For operations that require confirmation that the email address for the registrant contact is valid, such as registering a new domain, this operation resends the confirmation email to the current email address for the registrant contact\cr #' \link[=route53domains_resend_operation_authorization]{resend_operation_authorization} \tab Resend the form of authorization email for this operation\cr #' \link[=route53domains_retrieve_domain_auth_code]{retrieve_domain_auth_code} \tab This operation returns the authorization code for the domain\cr #' \link[=route53domains_transfer_domain]{transfer_domain} \tab Transfers a domain from another registrar to Amazon Route 53\cr #' \link[=route53domains_transfer_domain_to_another_aws_account]{transfer_domain_to_another_aws_account} \tab Transfers a domain from the current Amazon Web Services account to another Amazon Web Services account\cr #' \link[=route53domains_update_domain_contact]{update_domain_contact} \tab This operation updates the contact information for a particular domain\cr #' \link[=route53domains_update_domain_contact_privacy]{update_domain_contact_privacy} \tab This operation updates the specified domain contact's privacy setting\cr #' \link[=route53domains_update_domain_nameservers]{update_domain_nameservers} \tab This operation replaces the current set of name servers for the domain with the specified set of name servers\cr #' \link[=route53domains_update_tags_for_domain]{update_tags_for_domain} \tab This operation adds or updates tags for a specified domain\cr #' \link[=route53domains_view_billing]{view_billing} \tab Returns all the domain-related billing records for the current Amazon Web Services account for a specified period #' } #' #' @return #' A client for the service. You can call the service's operations using #' syntax like `svc$operation(...)`, where `svc` is the name you've assigned #' to the client. The available operations are listed in the #' Operations section. #' #' @rdname route53domains #' @export route53domains <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) { config <- merge_config( config, list( credentials = credentials, endpoint = endpoint, region = region ) ) svc <- .route53domains$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .route53domains <- list() .route53domains$operations <- list() .route53domains$metadata <- list( service_name = "route53domains", endpoints = list("*" = list(endpoint = "route53domains.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "route53domains.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "route53domains.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "route53domains.{region}.sc2s.sgov.gov", global = FALSE)), service_id = "Route 53 Domains", api_version = "2014-05-15", signing_name = "route53domains", json_version = "1.1", target_prefix = "Route53Domains_v20140515" ) .route53domains$service <- function(config = list()) { handlers <- new_handlers("jsonrpc", "v4") new_service(.route53domains$metadata, handlers, config) }
/paws/R/route53domains_service.R
permissive
paws-r/paws
R
false
false
11,077
r
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config merge_config NULL #' Amazon Route 53 Domains #' #' @description #' Amazon Route 53 API actions let you register domain names and perform #' related operations. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' \itemize{ #' \item{\strong{credentials}:} {\itemize{ #' \item{\strong{creds}:} {\itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' }} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.} #' \item{\strong{region}:} {The AWS Region used in instantiating the client.} #' }} #' \item{\strong{close_connection}:} {Immediately close all HTTP connections.} #' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.} #' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.} #' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}} #' } #' @param #' credentials #' Optional credentials shorthand for the config parameter #' \itemize{ #' \item{\strong{creds}:} {\itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' }} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' } #' @param #' endpoint #' Optional shorthand for complete URL to use for the constructed client. #' @param #' region #' Optional shorthand for AWS Region used in instantiating the client. #' #' @section Service syntax: #' ``` #' svc <- route53domains( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string", #' close_connection = "logical", #' timeout = "numeric", #' s3_force_path_style = "logical", #' sts_regional_endpoint = "string" #' ), #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- route53domains() #' svc$accept_domain_transfer_from_another_aws_account( #' Foo = 123 #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=route53domains_accept_domain_transfer_from_another_aws_account]{accept_domain_transfer_from_another_aws_account} \tab Accepts the transfer of a domain from another Amazon Web Services account to the currentAmazon Web Services account\cr #' \link[=route53domains_associate_delegation_signer_to_domain]{associate_delegation_signer_to_domain} \tab Creates a delegation signer (DS) record in the registry zone for this domain name\cr #' \link[=route53domains_cancel_domain_transfer_to_another_aws_account]{cancel_domain_transfer_to_another_aws_account} \tab Cancels the transfer of a domain from the current Amazon Web Services account to another Amazon Web Services account\cr #' \link[=route53domains_check_domain_availability]{check_domain_availability} \tab This operation checks the availability of one domain name\cr #' \link[=route53domains_check_domain_transferability]{check_domain_transferability} \tab Checks whether a domain name can be transferred to Amazon Route 53\cr #' \link[=route53domains_delete_domain]{delete_domain} \tab This operation deletes the specified domain\cr #' \link[=route53domains_delete_tags_for_domain]{delete_tags_for_domain} \tab This operation deletes the specified tags for a domain\cr #' \link[=route53domains_disable_domain_auto_renew]{disable_domain_auto_renew} \tab This operation disables automatic renewal of domain registration for the specified domain\cr #' \link[=route53domains_disable_domain_transfer_lock]{disable_domain_transfer_lock} \tab This operation removes the transfer lock on the domain (specifically the clientTransferProhibited status) to allow domain transfers\cr #' \link[=route53domains_disassociate_delegation_signer_from_domain]{disassociate_delegation_signer_from_domain} \tab Deletes a delegation signer (DS) record in the registry zone for this domain name\cr #' \link[=route53domains_enable_domain_auto_renew]{enable_domain_auto_renew} \tab This operation configures Amazon Route 53 to automatically renew the specified domain before the domain registration expires\cr #' \link[=route53domains_enable_domain_transfer_lock]{enable_domain_transfer_lock} \tab This operation sets the transfer lock on the domain (specifically the clientTransferProhibited status) to prevent domain transfers\cr #' \link[=route53domains_get_contact_reachability_status]{get_contact_reachability_status} \tab For operations that require confirmation that the email address for the registrant contact is valid, such as registering a new domain, this operation returns information about whether the registrant contact has responded\cr #' \link[=route53domains_get_domain_detail]{get_domain_detail} \tab This operation returns detailed information about a specified domain that is associated with the current Amazon Web Services account\cr #' \link[=route53domains_get_domain_suggestions]{get_domain_suggestions} \tab The GetDomainSuggestions operation returns a list of suggested domain names\cr #' \link[=route53domains_get_operation_detail]{get_operation_detail} \tab This operation returns the current status of an operation that is not completed\cr #' \link[=route53domains_list_domains]{list_domains} \tab This operation returns all the domain names registered with Amazon Route 53 for the current Amazon Web Services account if no filtering conditions are used\cr #' \link[=route53domains_list_operations]{list_operations} \tab Returns information about all of the operations that return an operation ID and that have ever been performed on domains that were registered by the current account\cr #' \link[=route53domains_list_prices]{list_prices} \tab Lists the following prices for either all the TLDs supported by Route 53, or the specified TLD:\cr #' \link[=route53domains_list_tags_for_domain]{list_tags_for_domain} \tab This operation returns all of the tags that are associated with the specified domain\cr #' \link[=route53domains_push_domain]{push_domain} \tab Moves a domain from Amazon Web Services to another registrar\cr #' \link[=route53domains_register_domain]{register_domain} \tab This operation registers a domain\cr #' \link[=route53domains_reject_domain_transfer_from_another_aws_account]{reject_domain_transfer_from_another_aws_account} \tab Rejects the transfer of a domain from another Amazon Web Services account to the current Amazon Web Services account\cr #' \link[=route53domains_renew_domain]{renew_domain} \tab This operation renews a domain for the specified number of years\cr #' \link[=route53domains_resend_contact_reachability_email]{resend_contact_reachability_email} \tab For operations that require confirmation that the email address for the registrant contact is valid, such as registering a new domain, this operation resends the confirmation email to the current email address for the registrant contact\cr #' \link[=route53domains_resend_operation_authorization]{resend_operation_authorization} \tab Resend the form of authorization email for this operation\cr #' \link[=route53domains_retrieve_domain_auth_code]{retrieve_domain_auth_code} \tab This operation returns the authorization code for the domain\cr #' \link[=route53domains_transfer_domain]{transfer_domain} \tab Transfers a domain from another registrar to Amazon Route 53\cr #' \link[=route53domains_transfer_domain_to_another_aws_account]{transfer_domain_to_another_aws_account} \tab Transfers a domain from the current Amazon Web Services account to another Amazon Web Services account\cr #' \link[=route53domains_update_domain_contact]{update_domain_contact} \tab This operation updates the contact information for a particular domain\cr #' \link[=route53domains_update_domain_contact_privacy]{update_domain_contact_privacy} \tab This operation updates the specified domain contact's privacy setting\cr #' \link[=route53domains_update_domain_nameservers]{update_domain_nameservers} \tab This operation replaces the current set of name servers for the domain with the specified set of name servers\cr #' \link[=route53domains_update_tags_for_domain]{update_tags_for_domain} \tab This operation adds or updates tags for a specified domain\cr #' \link[=route53domains_view_billing]{view_billing} \tab Returns all the domain-related billing records for the current Amazon Web Services account for a specified period #' } #' #' @return #' A client for the service. You can call the service's operations using #' syntax like `svc$operation(...)`, where `svc` is the name you've assigned #' to the client. The available operations are listed in the #' Operations section. #' #' @rdname route53domains #' @export route53domains <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) { config <- merge_config( config, list( credentials = credentials, endpoint = endpoint, region = region ) ) svc <- .route53domains$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .route53domains <- list() .route53domains$operations <- list() .route53domains$metadata <- list( service_name = "route53domains", endpoints = list("*" = list(endpoint = "route53domains.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "route53domains.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "route53domains.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "route53domains.{region}.sc2s.sgov.gov", global = FALSE)), service_id = "Route 53 Domains", api_version = "2014-05-15", signing_name = "route53domains", json_version = "1.1", target_prefix = "Route53Domains_v20140515" ) .route53domains$service <- function(config = list()) { handlers <- new_handlers("jsonrpc", "v4") new_service(.route53domains$metadata, handlers, config) }
\name{Uplot} \alias{Uplot} \title{ Plot of Multipliers in Regression ANOVA Plot } \description{ This function graphically displays the coefficient multipliers used in the Regression Plot for the given predictor. } \usage{ Uplot(X.qr, Xcolumn = 1, ...) } \arguments{ \item{X.qr}{The design matrix or the QR decomposition of the design matrix. } \item{Xcolumn}{The column(s) of the design matrix under study; this can be either integer valued or a character string.} \item{...}{Additional arguments to barchart.} } \value{A bar plot is displayed.} \author{ W. John Braun } \examples{ # Jojoba oil data set X <- p4.18[,-4] Uplot(X, 1:4) # NFL data set; see GFplot result first X <- table.b1[,-1] Uplot(X, c(2,3,9)) # In this example, x8 is the only predictor in # the true model: X <- pathoeg[,-10] y <- pathoeg[,10] pathoeg.F <- GFplot(X, y, plotIt=FALSE) Uplot(X, "x8") Uplot(X, 9) # same as above Uplot(pathoeg.F$QR, 9) # same as above X <- table.b1[,-1] Uplot(X, c("x2", "x3", "x9")) } \keyword{graphics}
/man/Uplot.Rd
no_license
cran/MPV
R
false
false
1,012
rd
\name{Uplot} \alias{Uplot} \title{ Plot of Multipliers in Regression ANOVA Plot } \description{ This function graphically displays the coefficient multipliers used in the Regression Plot for the given predictor. } \usage{ Uplot(X.qr, Xcolumn = 1, ...) } \arguments{ \item{X.qr}{The design matrix or the QR decomposition of the design matrix. } \item{Xcolumn}{The column(s) of the design matrix under study; this can be either integer valued or a character string.} \item{...}{Additional arguments to barchart.} } \value{A bar plot is displayed.} \author{ W. John Braun } \examples{ # Jojoba oil data set X <- p4.18[,-4] Uplot(X, 1:4) # NFL data set; see GFplot result first X <- table.b1[,-1] Uplot(X, c(2,3,9)) # In this example, x8 is the only predictor in # the true model: X <- pathoeg[,-10] y <- pathoeg[,10] pathoeg.F <- GFplot(X, y, plotIt=FALSE) Uplot(X, "x8") Uplot(X, 9) # same as above Uplot(pathoeg.F$QR, 9) # same as above X <- table.b1[,-1] Uplot(X, c("x2", "x3", "x9")) } \keyword{graphics}
library(magrittr) # path -------------------------------------------------------------------- data_path <- c("/project/huff/huff/TKI/data/RNA/F17FTSCCWLJ2064_HUMwcyR/Analysis_Report/BGI_result/Quantify/GeneExpression/GeneExpression") out_path <- c("/project/huff/huff/TKI/result/mRNA_DE/IM_DS") # load data --------------------------------------------------------------- all_exp <- readr::read_tsv(file.path(data_path,"AllSamples.GeneExpression.FPKM.xls")) %>% dplyr::select(Symbol,G1_1_FPKM,G1_2_FPKM,G1_3_FPKM,G2_1_FPKM,G2_2_FPKM,G2_3_FPKM) all_exp %>% readr::write_rds(file.path(out_path,"all_gene_exp.rds.gz"),compress = "gz") G2_1 <- readr::read_tsv(file.path(data_path,"G2_1.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G2_1"="FPKM") %>% dplyr::mutate(G2_1=ifelse(G2_1==0,0.01,G2_1)) G2_2 <- readr::read_tsv(file.path(data_path,"G2_2.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G2_2"="FPKM") %>% dplyr::mutate(G2_2=ifelse(G2_2==0,0.01,G2_2)) G2_3 <- readr::read_tsv(file.path(data_path,"G2_3.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G2_3"="FPKM") %>% dplyr::mutate(G2_3=ifelse(G2_3==0,0.01,G2_3)) G1_1 <- readr::read_tsv(file.path(data_path,"G1_1.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G1_1"="FPKM") %>% dplyr::mutate(G1_1=ifelse(G1_1==0,0.01,G1_1)) G1_2 <- readr::read_tsv(file.path(data_path,"G1_2.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G1_2"="FPKM") %>% dplyr::mutate(G1_2=ifelse(G1_2==0,0.01,G1_2)) G1_3 <- readr::read_tsv(file.path(data_path,"G1_3.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G1_3"="FPKM") %>% dplyr::mutate(G1_3=ifelse(G1_3==0,0.01,G1_3)) DE_all <- readr::read_tsv("/project/huff/huff/TKI/data/RNA/F17FTSCCWLJ2064_HUMwcyR/Analysis_Report/BGI_result/Quantify/DifferentiallyExpressedGene/DEGList/G1-VS-G2.NOIseq_Method.GeneDiffExp.xls") # 3 sample De overlap ----- fc_threshold <- 0.585 all_exp %>% dplyr::mutate(G2_G1_1=ifelse(log2(G2_1_FPKM/G1_1_FPKM)>=fc_threshold,"Up","None")) %>% dplyr::mutate(G2_G1_1=ifelse(log2(G2_1_FPKM/G1_1_FPKM)<=(-fc_threshold),"Down",G2_G1_1)) %>% dplyr::mutate(G2_G1_2=ifelse(log2(G2_2_FPKM/G1_2_FPKM)>=fc_threshold,"Up","None")) %>% dplyr::mutate(G2_G1_2=ifelse(log2(G2_2_FPKM/G1_2_FPKM)<=(-fc_threshold),"Down",G2_G1_2)) %>% dplyr::mutate(G2_G1_3=ifelse(log2(G2_3_FPKM/G1_3_FPKM)>=fc_threshold,"Up","None")) %>% dplyr::mutate(G2_G1_3=ifelse(log2(G2_3_FPKM/G1_3_FPKM)<=(-fc_threshold),"Down",G2_G1_3)) -> all_samples_DE_info # get overlap of 3 samples all_samples_DE_info %>% dplyr::filter(G2_G1_1=="Up" & G2_G1_2=="Up" & G2_G1_3=="Up") -> Up_in_all_samples all_samples_DE_info %>% dplyr::filter(G2_G1_1=="Down" & G2_G1_2=="Down" & G2_G1_3=="Down") -> Down_in_all_samples fn_test <- function(all_samples_DE_info,trend){ all_samples_DE_info %>% dplyr::filter(G2_G1_1==trend & G2_G1_2==trend) -> in_1_2 all_samples_DE_info %>% dplyr::filter(G2_G1_1==trend & G2_G1_3==trend) -> in_1_3 all_samples_DE_info %>% dplyr::filter(G2_G1_2==trend & G2_G1_3==trend) -> in_2_3 rbind(in_1_2,in_1_3) %>% rbind(in_2_3) %>% unique() } all_samples_DE_info %>% fn_test(trend = "Up") -> Up_in_2_samples all_samples_DE_info %>% fn_test(trend = "Down") -> Down_in_2_samples # fn_at_least_2<- function(a,b,c){ # a %>% # intersect(b) -> d_1 # a %>% # intersect(c) -> d_2 # b %>% # intersect(c) -> d_3 # c(d_1,d_2,d_3) %>% unique() ->result # return(result) # } # fn_at_least_2(DE_1_up$Symbol,DE_2_up$Symbol,DE_3_up$Symbol) -> genes_up_in_at_least_2_pairs # fn_at_least_2(DE_1_down$Symbol,DE_2_down$Symbol,DE_3_down$Symbol) -> genes_down_in_at_least_2_pairs # # c(genes_up_in_all_pairs,genes_down_in_all_pairs)-> genes_DE_in_at_least_2_pairs # noiseq result overlap --------------------------------------------------- # BGI result filter # filter condition: FC1.5 probability0.8 DE_all %>% dplyr::select(Symbol,`G2-Expression`,`G1-Expression`,`log2FoldChange(G2/G1)`,Probability) %>% dplyr::filter(Probability>=0.8) %>% dplyr::filter(abs(`log2FoldChange(G2/G1)`)>=fc_threshold) %>% dplyr::rename("log2FC"=`log2FoldChange(G2/G1)`) %>% dplyr::mutate(`DS/IM`=ifelse(log2FC>0,"Up","Down")) -> BGI_DE_all_0.8_1.5 BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(all_samples_DE_info,by="Symbol") -> BGI_DE_all_0.8_1.5.info BGI_DE_all_0.8_1.5.info %>% readr::write_tsv(path = file.path(out_path,"DS-IM_BGI_DE_0.8_1.5_mRNA.info")) BGI_DE_all_0.8_1.5 %>% dplyr::select(Symbol) %>% dplyr::inner_join(all_exp,by="Symbol") -> BGI_DE_all_0.8_1.5_exp BGI_DE_all_0.8_1.5_exp %>% readr::write_tsv(path = file.path(out_path,"BGI_DE_all_0.8_1.5.exp")) # overlap with 3 sample # all samples BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(Down_in_all_samples,by="Symbol") -> Down_in_all_test BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(Up_in_all_samples,by="Symbol") -> Up_in_all_test rbind(Down_in_all_test,Up_in_all_test) %>% readr::write_tsv(path = file.path(out_path,"DE_in_all-AND-in_BGI_0.8_1.5")) c(Down_in_all_test$Symbol,Up_in_all_test$Symbol) -> DE_in_all_test.list # at least 2 samples BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(Down_in_2_samples,by="Symbol") -> Down_in_2_and_all_test BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(Up_in_2_samples,by="Symbol") -> Up_in_2_and_all_test rbind(Down_in_2_and_all_test,Up_in_2_and_all_test) %>% readr::write_tsv(path = file.path(out_path,"DE_in_2-AND-in_BGI_0.8_1.5")) c(Down_in_2_and_all_test$Symbol,Up_in_2_and_all_test$Symbol) -> DE_in_2_and_all_test.list # statistic -------------------------------------------------------------- BGI_DE_all_0.8_1.5$`DS/IM` %>% table() # plot -------------------------------------------------------------------- DE_all%>% dplyr::rename("log2FC"=`log2FoldChange(G2/G1)`) %>% dplyr::mutate(`G2/G1`=ifelse(log2FC>=(0.585) ,"Up","None")) %>% dplyr::mutate(`G2/G1`=ifelse(log2FC<=(-0.585),"Down",`G2/G1`)) %>% dplyr::mutate(`G2/G1`=ifelse(Probability>=0.8,`G2/G1`,"None")) %>% dplyr::mutate(`G2/G1`=ifelse(is.na(Probability),"None",`G2/G1`)) %>% dplyr::select(Symbol,log2FC,Probability,`G2/G1`) %>% dplyr::mutate(alpha=ifelse(Symbol %in% DE_in_2_and_all_test.list & `G2/G1` !="None",0.5,0.1)) %>% dplyr::mutate(alpha=ifelse(Symbol %in% DE_in_all_test.list & `G2/G1` !="None",1,alpha)) %>% # dplyr::mutate(Probability=-log10(1-Probability)) %>% dplyr::mutate(color=ifelse(`G2/G1`=="Up","red","grey")) %>% dplyr::mutate(color=ifelse(`G2/G1`=="Down","blue",color)) -> point_ready library(ggplot2) # FC and Probability distribution point_ready %>% ggplot() + geom_point(aes(x=log2FC,y=Probability,color=`G2/G1`,alpha=alpha)) + #,colour=point_ready$color scale_alpha_continuous( name="Significant Group", limits=c(0.1,1), breaks=c(0.1,0.5,1), labels=c("Only Noiseq","At 2 samples & Noiseq","All 3 samples & Noiseq") ) + xlab("Log2(FC)") + theme( legend.position = 'bottom', axis.title.x = element_text(size = 20), axis.title.y = element_text(size = 17), axis.text = element_text(size = 17), legend.text = element_text(size = 17), legend.title = element_text(size = 20) )-> p;p ggsave(file.path(out_path,"plot/G1-control.MA.plot.pdf"),p,device = "pdf",width = 3,height = 3) # Heatmap of DE genes --------- library(ComplexHeatmap) # exp data prepare DE_exp <- as.data.frame(BGI_DE_all_0.8_1.5_exp %>% dplyr::filter(Symbol %in% DE_in_2_and_all_test.list)) rownames(DE_exp) <- DE_exp$Symbol DE_exp <- DE_exp[,-1] %>% as.matrix() colnames(DE_exp) <- sub("_FPKM","",colnames(DE_exp)) # annotation BGI_DE_all_0.8_1.5 %>% dplyr::filter(Symbol %in% DE_in_2_and_all_test.list) -> DE.info DE.info %>% dplyr::select(`DS/IM`) %>% as.data.frame() -> DE_anno rownames(DE_anno) <- DE.info$Symbol # row annotation gene_anno_plot = rowAnnotation(df = DE_anno, col = list(`DS/IM` = c("Up" = "red", "Down" = "green")), width = unit(0.5, "cm") ) sam_anno <- data.frame(Group = c(rep("IM",3),rep("DS",3))) rownames(sam_anno) <- DE_exp %>% colnames() DE_exp[DE_exp==0] <- 0.01 log2(DE_exp)->log2DE_exp sam_anno_plot = HeatmapAnnotation(df = sam_anno, boxplot = anno_boxplot(log2DE_exp, axis = TRUE), col = list(Group = c("DS" = "pink", "IM" = "purple"))) DE_exp %>% t() %>% scale() %>% t() %>% as.data.frame() -> DE_exp_rowscale pdf(file.path(out_path,"plot/DE_mRNA_exp_heatmap.pdf"),width = 5,height = 6) pdf(file.path(out_path,"plot/DE_all_test_mRNA_exp_heatmap.pdf"),width = 5,height = 6) pdf(file.path(out_path,"plot/DE_2_sample_and_noiseq_mRNA_exp_heatmap.pdf"),width = 5,height = 6) he = Heatmap(DE_exp_rowscale, show_row_names = TRUE, cluster_columns = FALSE, top_annotation = sam_anno_plot, top_annotation_height = unit(3, "cm"), heatmap_legend_param = list(title = c("Experssion"))) he + gene_anno_plot dev.off()
/IM-DS/1_IM-DS_DE.R
no_license
Huffyphenix/IM_dis_proj
R
false
false
9,030
r
library(magrittr) # path -------------------------------------------------------------------- data_path <- c("/project/huff/huff/TKI/data/RNA/F17FTSCCWLJ2064_HUMwcyR/Analysis_Report/BGI_result/Quantify/GeneExpression/GeneExpression") out_path <- c("/project/huff/huff/TKI/result/mRNA_DE/IM_DS") # load data --------------------------------------------------------------- all_exp <- readr::read_tsv(file.path(data_path,"AllSamples.GeneExpression.FPKM.xls")) %>% dplyr::select(Symbol,G1_1_FPKM,G1_2_FPKM,G1_3_FPKM,G2_1_FPKM,G2_2_FPKM,G2_3_FPKM) all_exp %>% readr::write_rds(file.path(out_path,"all_gene_exp.rds.gz"),compress = "gz") G2_1 <- readr::read_tsv(file.path(data_path,"G2_1.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G2_1"="FPKM") %>% dplyr::mutate(G2_1=ifelse(G2_1==0,0.01,G2_1)) G2_2 <- readr::read_tsv(file.path(data_path,"G2_2.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G2_2"="FPKM") %>% dplyr::mutate(G2_2=ifelse(G2_2==0,0.01,G2_2)) G2_3 <- readr::read_tsv(file.path(data_path,"G2_3.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G2_3"="FPKM") %>% dplyr::mutate(G2_3=ifelse(G2_3==0,0.01,G2_3)) G1_1 <- readr::read_tsv(file.path(data_path,"G1_1.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G1_1"="FPKM") %>% dplyr::mutate(G1_1=ifelse(G1_1==0,0.01,G1_1)) G1_2 <- readr::read_tsv(file.path(data_path,"G1_2.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G1_2"="FPKM") %>% dplyr::mutate(G1_2=ifelse(G1_2==0,0.01,G1_2)) G1_3 <- readr::read_tsv(file.path(data_path,"G1_3.gene.fpkm.xls")) %>% dplyr::select(Symbol,FPKM) %>% dplyr::rename("G1_3"="FPKM") %>% dplyr::mutate(G1_3=ifelse(G1_3==0,0.01,G1_3)) DE_all <- readr::read_tsv("/project/huff/huff/TKI/data/RNA/F17FTSCCWLJ2064_HUMwcyR/Analysis_Report/BGI_result/Quantify/DifferentiallyExpressedGene/DEGList/G1-VS-G2.NOIseq_Method.GeneDiffExp.xls") # 3 sample De overlap ----- fc_threshold <- 0.585 all_exp %>% dplyr::mutate(G2_G1_1=ifelse(log2(G2_1_FPKM/G1_1_FPKM)>=fc_threshold,"Up","None")) %>% dplyr::mutate(G2_G1_1=ifelse(log2(G2_1_FPKM/G1_1_FPKM)<=(-fc_threshold),"Down",G2_G1_1)) %>% dplyr::mutate(G2_G1_2=ifelse(log2(G2_2_FPKM/G1_2_FPKM)>=fc_threshold,"Up","None")) %>% dplyr::mutate(G2_G1_2=ifelse(log2(G2_2_FPKM/G1_2_FPKM)<=(-fc_threshold),"Down",G2_G1_2)) %>% dplyr::mutate(G2_G1_3=ifelse(log2(G2_3_FPKM/G1_3_FPKM)>=fc_threshold,"Up","None")) %>% dplyr::mutate(G2_G1_3=ifelse(log2(G2_3_FPKM/G1_3_FPKM)<=(-fc_threshold),"Down",G2_G1_3)) -> all_samples_DE_info # get overlap of 3 samples all_samples_DE_info %>% dplyr::filter(G2_G1_1=="Up" & G2_G1_2=="Up" & G2_G1_3=="Up") -> Up_in_all_samples all_samples_DE_info %>% dplyr::filter(G2_G1_1=="Down" & G2_G1_2=="Down" & G2_G1_3=="Down") -> Down_in_all_samples fn_test <- function(all_samples_DE_info,trend){ all_samples_DE_info %>% dplyr::filter(G2_G1_1==trend & G2_G1_2==trend) -> in_1_2 all_samples_DE_info %>% dplyr::filter(G2_G1_1==trend & G2_G1_3==trend) -> in_1_3 all_samples_DE_info %>% dplyr::filter(G2_G1_2==trend & G2_G1_3==trend) -> in_2_3 rbind(in_1_2,in_1_3) %>% rbind(in_2_3) %>% unique() } all_samples_DE_info %>% fn_test(trend = "Up") -> Up_in_2_samples all_samples_DE_info %>% fn_test(trend = "Down") -> Down_in_2_samples # fn_at_least_2<- function(a,b,c){ # a %>% # intersect(b) -> d_1 # a %>% # intersect(c) -> d_2 # b %>% # intersect(c) -> d_3 # c(d_1,d_2,d_3) %>% unique() ->result # return(result) # } # fn_at_least_2(DE_1_up$Symbol,DE_2_up$Symbol,DE_3_up$Symbol) -> genes_up_in_at_least_2_pairs # fn_at_least_2(DE_1_down$Symbol,DE_2_down$Symbol,DE_3_down$Symbol) -> genes_down_in_at_least_2_pairs # # c(genes_up_in_all_pairs,genes_down_in_all_pairs)-> genes_DE_in_at_least_2_pairs # noiseq result overlap --------------------------------------------------- # BGI result filter # filter condition: FC1.5 probability0.8 DE_all %>% dplyr::select(Symbol,`G2-Expression`,`G1-Expression`,`log2FoldChange(G2/G1)`,Probability) %>% dplyr::filter(Probability>=0.8) %>% dplyr::filter(abs(`log2FoldChange(G2/G1)`)>=fc_threshold) %>% dplyr::rename("log2FC"=`log2FoldChange(G2/G1)`) %>% dplyr::mutate(`DS/IM`=ifelse(log2FC>0,"Up","Down")) -> BGI_DE_all_0.8_1.5 BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(all_samples_DE_info,by="Symbol") -> BGI_DE_all_0.8_1.5.info BGI_DE_all_0.8_1.5.info %>% readr::write_tsv(path = file.path(out_path,"DS-IM_BGI_DE_0.8_1.5_mRNA.info")) BGI_DE_all_0.8_1.5 %>% dplyr::select(Symbol) %>% dplyr::inner_join(all_exp,by="Symbol") -> BGI_DE_all_0.8_1.5_exp BGI_DE_all_0.8_1.5_exp %>% readr::write_tsv(path = file.path(out_path,"BGI_DE_all_0.8_1.5.exp")) # overlap with 3 sample # all samples BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(Down_in_all_samples,by="Symbol") -> Down_in_all_test BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(Up_in_all_samples,by="Symbol") -> Up_in_all_test rbind(Down_in_all_test,Up_in_all_test) %>% readr::write_tsv(path = file.path(out_path,"DE_in_all-AND-in_BGI_0.8_1.5")) c(Down_in_all_test$Symbol,Up_in_all_test$Symbol) -> DE_in_all_test.list # at least 2 samples BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(Down_in_2_samples,by="Symbol") -> Down_in_2_and_all_test BGI_DE_all_0.8_1.5 %>% dplyr::inner_join(Up_in_2_samples,by="Symbol") -> Up_in_2_and_all_test rbind(Down_in_2_and_all_test,Up_in_2_and_all_test) %>% readr::write_tsv(path = file.path(out_path,"DE_in_2-AND-in_BGI_0.8_1.5")) c(Down_in_2_and_all_test$Symbol,Up_in_2_and_all_test$Symbol) -> DE_in_2_and_all_test.list # statistic -------------------------------------------------------------- BGI_DE_all_0.8_1.5$`DS/IM` %>% table() # plot -------------------------------------------------------------------- DE_all%>% dplyr::rename("log2FC"=`log2FoldChange(G2/G1)`) %>% dplyr::mutate(`G2/G1`=ifelse(log2FC>=(0.585) ,"Up","None")) %>% dplyr::mutate(`G2/G1`=ifelse(log2FC<=(-0.585),"Down",`G2/G1`)) %>% dplyr::mutate(`G2/G1`=ifelse(Probability>=0.8,`G2/G1`,"None")) %>% dplyr::mutate(`G2/G1`=ifelse(is.na(Probability),"None",`G2/G1`)) %>% dplyr::select(Symbol,log2FC,Probability,`G2/G1`) %>% dplyr::mutate(alpha=ifelse(Symbol %in% DE_in_2_and_all_test.list & `G2/G1` !="None",0.5,0.1)) %>% dplyr::mutate(alpha=ifelse(Symbol %in% DE_in_all_test.list & `G2/G1` !="None",1,alpha)) %>% # dplyr::mutate(Probability=-log10(1-Probability)) %>% dplyr::mutate(color=ifelse(`G2/G1`=="Up","red","grey")) %>% dplyr::mutate(color=ifelse(`G2/G1`=="Down","blue",color)) -> point_ready library(ggplot2) # FC and Probability distribution point_ready %>% ggplot() + geom_point(aes(x=log2FC,y=Probability,color=`G2/G1`,alpha=alpha)) + #,colour=point_ready$color scale_alpha_continuous( name="Significant Group", limits=c(0.1,1), breaks=c(0.1,0.5,1), labels=c("Only Noiseq","At 2 samples & Noiseq","All 3 samples & Noiseq") ) + xlab("Log2(FC)") + theme( legend.position = 'bottom', axis.title.x = element_text(size = 20), axis.title.y = element_text(size = 17), axis.text = element_text(size = 17), legend.text = element_text(size = 17), legend.title = element_text(size = 20) )-> p;p ggsave(file.path(out_path,"plot/G1-control.MA.plot.pdf"),p,device = "pdf",width = 3,height = 3) # Heatmap of DE genes --------- library(ComplexHeatmap) # exp data prepare DE_exp <- as.data.frame(BGI_DE_all_0.8_1.5_exp %>% dplyr::filter(Symbol %in% DE_in_2_and_all_test.list)) rownames(DE_exp) <- DE_exp$Symbol DE_exp <- DE_exp[,-1] %>% as.matrix() colnames(DE_exp) <- sub("_FPKM","",colnames(DE_exp)) # annotation BGI_DE_all_0.8_1.5 %>% dplyr::filter(Symbol %in% DE_in_2_and_all_test.list) -> DE.info DE.info %>% dplyr::select(`DS/IM`) %>% as.data.frame() -> DE_anno rownames(DE_anno) <- DE.info$Symbol # row annotation gene_anno_plot = rowAnnotation(df = DE_anno, col = list(`DS/IM` = c("Up" = "red", "Down" = "green")), width = unit(0.5, "cm") ) sam_anno <- data.frame(Group = c(rep("IM",3),rep("DS",3))) rownames(sam_anno) <- DE_exp %>% colnames() DE_exp[DE_exp==0] <- 0.01 log2(DE_exp)->log2DE_exp sam_anno_plot = HeatmapAnnotation(df = sam_anno, boxplot = anno_boxplot(log2DE_exp, axis = TRUE), col = list(Group = c("DS" = "pink", "IM" = "purple"))) DE_exp %>% t() %>% scale() %>% t() %>% as.data.frame() -> DE_exp_rowscale pdf(file.path(out_path,"plot/DE_mRNA_exp_heatmap.pdf"),width = 5,height = 6) pdf(file.path(out_path,"plot/DE_all_test_mRNA_exp_heatmap.pdf"),width = 5,height = 6) pdf(file.path(out_path,"plot/DE_2_sample_and_noiseq_mRNA_exp_heatmap.pdf"),width = 5,height = 6) he = Heatmap(DE_exp_rowscale, show_row_names = TRUE, cluster_columns = FALSE, top_annotation = sam_anno_plot, top_annotation_height = unit(3, "cm"), heatmap_legend_param = list(title = c("Experssion"))) he + gene_anno_plot dev.off()
ma.motor.uni.df <- function(x,klasse_label=c("ikke_angivet"),discogrp=FALSE) { #til test # klynger <- manuelle.klynger.lav # klasse_label=c("ikke_angivet") # discogrp=FALSE klynger <- x klynger <- klynger[grep("^1.*", klynger, invert = TRUE)] # mat.e.result <- mob.mat[-274,-274] mat.e.result <- mat.e # etest(mat.e) klynge.liste.samlet.indeks <- list() for (name in sort(klynger)) { klynge.liste.samlet.indeks[name] <- klynge.liste.niveau.5[name] } # klynge.liste.samlet.indeks work.list.foer.sub <- sort(as.vector(unlist(df %>% filter(membership %in% klynger) %>% select(indeks)))) #det samme, det ene som liste og det andet som vector sort(as.vector(unlist(klynge.liste.samlet.indeks)))==work.list.foer.sub # yes de skal være ens det er de # find de disco der går # 1. fra segmenter af interesse aug.work.list.foer.sub <- sort(unique(unlist(lapply(work.list.foer.sub, function(x) which(mat.e[x,] != 0))))) # 2. til segmenter af interesse # aug.work.list.foer.sub <- sort(unique(unlist(lapply(work.list.foer.sub, function(x) which(mat.e[,x] != 0))))) # liste til aggregering af disco klynge.liste.e.seg.manuel.til.fra <- Reduce(function(x, y) replace(x, x[x %in% y], min(y)), c(list(seq_len(273)), unname(klynge.liste.samlet.indeks))) # det store aggregeringsnummer mat.e.result <- t(rowsum(t(rowsum(mat.e.result, klynge.liste.e.seg.manuel.til.fra)), klynge.liste.e.seg.manuel.til.fra)) # ncol(mat.e.result) # # view(mat.e.result)# her lagt sammen med den anden # etest(mat.e.result) # view(mat.e.result) # sum(mat.e.result) # de her to skal være ens, ellers er noget galt: length(unique(which(diag(mat.e.result) == 0)))==length(which(diag(mat.e.result) == 0)) # view(mat.e.result) # max(as.vector(mat.e.result)) # min(colSums(mat.e.result)) # etest(mat.e.result) #view(mat.e.result) # sum(mat.e.result) # diagonaler til at identificere elementer diag.submatrix <- diag(mat.e.result) diag.seg.df <- diag(e.mobmat.seg.niveau.5) diag.discodata <- diag(mob.mat[-274,-274]) diag.seg.df.udisco <-diag.seg.df[1:41] #hvis denne her bruges i which-statemenetet i 2. linje i nedenstående, istedet for diag.seg.df så kommer enkelt-grupperne fra 5-niveau med. men der er er noget der skal ordnes først, ikke aktuelt nu. \#todo seg.index <- which(!(diag.submatrix%in%diag.discodata)) diag.submatrix.seg <- diag.submatrix[seg.index] which(names(diag.seg.df)[which(diag.seg.df %in% diag.submatrix.seg)] %in% klynger)==seq_len(length(klynger)) # skal være sand for alle elementer # find de disco der går fra nye seg-index work.list.efter.sub <- seg.index # 1. fra segmenter af interesse aug.work.list.efter.sub <- sort(unique(unlist(lapply(work.list.efter.sub, function(x) which(mat.e.result[x,] != 0))))) # test tmp1 <- setdiff(aug.work.list.foer.sub,work.list.foer.sub) test.list.1 <- as.character(discodata$disco[tmp1]) tmp1 <- setdiff(aug.work.list.efter.sub,work.list.efter.sub) test.list.2 <- as.character(discodata$disco[tmp1]) length(test.list.1)==length(test.list.2) # de her to skal være ens mat.e.result <- mat.e.result[aug.work.list.efter.sub, aug.work.list.efter.sub] # diagonaler til at identificere elementer diag.submatrix <- diag(mat.e.result) seg.index <- which(!(diag.submatrix%in%diag.discodata)) diag.submatrix.seg <- diag.submatrix[seg.index] which(names(diag.seg.df.udisco)[which(diag.seg.df.udisco %in% diag.submatrix.seg)] %in% klynger)==seq_len(length(klynger)) # skal være sand for alle elementer # find ud af hvilket indeks der passer med diverse segmenter etc ikke.seg.index.i.disco <- as.numeric(names(diag.submatrix[which((diag.submatrix%in%diag.discodata))])) ikke.seg.index.i.submatrix <- which((diag.submatrix%in%diag.discodata)) colnames(mat.e.result)[ikke.seg.index.i.submatrix] <- as.character(discodata$disco[ikke.seg.index.i.disco]) rownames(mat.e.result)[ikke.seg.index.i.submatrix] <- as.character(discodata$disco[ikke.seg.index.i.disco]) colnames(mat.e.result)[ which(diag.submatrix %in% diag.seg.df.udisco)] <- names(diag.seg.df.udisco)[ which(diag.seg.df.udisco %in% diag.submatrix.seg)] rownames(mat.e.result)[ which(diag.submatrix %in% diag.seg.df.udisco)] <- names(diag.seg.df.udisco)[ which(diag.seg.df.udisco %in% diag.submatrix.seg)] ncol(mat.e.result) # view(mat.e.result) # max(as.vector(mat.e.result)) tmp.order <- append(colnames(mat.e.result) [seg.index] , colnames(mat.e.result)[ikke.seg.index.i.submatrix ] ) korrekt <- match(tmp.order,colnames(mat.e.result)) mat.e.result <- mat.e.result[korrekt, korrekt] # head(colnames(mat.e.result)) # yes (gør ikke noget der også er nogle discogrupper med, det vigtige er at der er er de segmenter der skal være) # ncol(mat.e.result) # view(mat.e.result) #og her, det er desuden korrekte dimnames # max(as.vector(mat.e.result)) # min(colSums(mat.e.result)) # etest(mat.e.result) # diagonaler til at identificere elementer diag.submatrix <- diag(mat.e.result) seg.index <- which(!(diag.submatrix%in%diag.discodata)) diag.submatrix.seg <- diag.submatrix[seg.index] ikke.seg.index.i.submatrix <- which((diag.submatrix%in%diag.discodata)) ikke.seg.index.i.disco <- which((diag.discodata%in%diag.submatrix)) which(names(diag.seg.df.udisco)[which(diag.seg.df.udisco %in% diag.submatrix.seg)] %in% klynger)==seq_len(length(klynger)) mat.e.result[ikke.seg.index.i.submatrix,ikke.seg.index.i.submatrix] <- 0 #fjerner interne ties i segmenter af interesse (hvis vi bare vil se hvor de går hen) # etest(mat.e.result) # yep diag.mat.e.result <- diag(mat.e.result) # for later use mat.e.result[ikke.seg.index.i.submatrix,seg.index] <- 0 ########## for aggregerede submatricer shit ############ # de tre matricer af interesse mat.e.result.seg.fra <- mat.e.result[seg.index,seg.index] mat.e.result <- mat.e.result[seg.index,ikke.seg.index.i.submatrix] # slå segmenter af interesse sammen til forsimpling mat.e.result <- rbind(mat.e.result,colSums(mat.e.result)) mat.e.result <- mat.e.result[nrow(mat.e.result),] # save.image("./statistik/R/moneca/vores/voresdata/tmp1_allebeskaeft250.Rdata") # rm(list=ls()) # load("./statistik/R/moneca/vores/voresdata/tmp1_allebeskaeft250.Rdata") if(discogrp==TRUE){ ### restgrupper colnames.tmp <- discodata$disco[which(discodata$disco %in% names(mat.e.result))] names(mat.e.result) == colnames.tmp #yes mat.e.result <- mat.e.result[order(names(mat.e.result))] colnames.tmp <- names(mat.e.result) mat.e.result <- tbl_df(mat.e.result) mat.e.result$disco <- colnames.tmp mat.e.result <- rename(mat.e.result,without.mob=value) %>% mutate(without.mob.andel=without.mob/sum(without.mob)) %>% mutate(without.mob.andel.seg.tot=without.mob/(sum(without.mob)+sum(mat.e.result.seg.fra))) } if(discogrp==FALSE){ ### restgrupper test.af.mat.e.result <- sum(mat.e.result) colnames.tmp <- discodata$indeks[which(discodata$disco %in% names(mat.e.result))] colnames.tmp <- as.character(discodata$membership[which(discodata$disco %in% names(mat.e.result))]) names(mat.e.result) <- colnames.tmp mat.e.result <- mat.e.result[order(names(mat.e.result))] colnames.tmp <- names(mat.e.result) mat.e.result <- tbl_df(mat.e.result) mat.e.result <- cbind(mat.e.result, sort(colnames.tmp)) colnames(mat.e.result)[2] <- c("membership") mat.e.result <- aggregate(mat.e.result[, seq_len(ncol(mat.e.result)-1)], list(mat.e.result$membership), sum) colnames.tmp <- mat.e.result$Group.1 rownames(mat.e.result) <- mat.e.result$Group.1 mat.e.result <- rename(mat.e.result,membership=Group.1,without.mob=x) %>% mutate(without.mob.andel=without.mob/sum(without.mob)) %>% mutate(without.mob.andel.seg.tot=without.mob/(sum(without.mob)+sum(mat.e.result.seg.fra))) } sum(mat.e.result$without.mob.andel.seg.tot)== sum(mat.e.result$without.mob) / (sum(mat.e.result$without.mob) + sum(mat.e.result.seg.fra)) #yestest if(discogrp==TRUE){ mat.e.result <- df %>% left_join(mat.e.result,.) } if(discogrp==FALSE){ mat.e.result <- seg.df %>% left_join(mat.e.result,.) } mat.e.result <- mat.e.result %>% add_row(.,membership=c("fokussegmenter"),without.mob=sum(mat.e.result.seg.fra), without.mob.andel.seg.tot= sum(mat.e.result.seg.fra) / (sum(mat.e.result$without.mob) + sum(mat.e.result.seg.fra)),klasse_begtrupbright1=klasse_label) return(mat.e.result) }
/0_funktion_mobilitetsanalysemotor.uni.df.R
no_license
emilBeBri/Speciale-Moneca
R
false
false
8,382
r
ma.motor.uni.df <- function(x,klasse_label=c("ikke_angivet"),discogrp=FALSE) { #til test # klynger <- manuelle.klynger.lav # klasse_label=c("ikke_angivet") # discogrp=FALSE klynger <- x klynger <- klynger[grep("^1.*", klynger, invert = TRUE)] # mat.e.result <- mob.mat[-274,-274] mat.e.result <- mat.e # etest(mat.e) klynge.liste.samlet.indeks <- list() for (name in sort(klynger)) { klynge.liste.samlet.indeks[name] <- klynge.liste.niveau.5[name] } # klynge.liste.samlet.indeks work.list.foer.sub <- sort(as.vector(unlist(df %>% filter(membership %in% klynger) %>% select(indeks)))) #det samme, det ene som liste og det andet som vector sort(as.vector(unlist(klynge.liste.samlet.indeks)))==work.list.foer.sub # yes de skal være ens det er de # find de disco der går # 1. fra segmenter af interesse aug.work.list.foer.sub <- sort(unique(unlist(lapply(work.list.foer.sub, function(x) which(mat.e[x,] != 0))))) # 2. til segmenter af interesse # aug.work.list.foer.sub <- sort(unique(unlist(lapply(work.list.foer.sub, function(x) which(mat.e[,x] != 0))))) # liste til aggregering af disco klynge.liste.e.seg.manuel.til.fra <- Reduce(function(x, y) replace(x, x[x %in% y], min(y)), c(list(seq_len(273)), unname(klynge.liste.samlet.indeks))) # det store aggregeringsnummer mat.e.result <- t(rowsum(t(rowsum(mat.e.result, klynge.liste.e.seg.manuel.til.fra)), klynge.liste.e.seg.manuel.til.fra)) # ncol(mat.e.result) # # view(mat.e.result)# her lagt sammen med den anden # etest(mat.e.result) # view(mat.e.result) # sum(mat.e.result) # de her to skal være ens, ellers er noget galt: length(unique(which(diag(mat.e.result) == 0)))==length(which(diag(mat.e.result) == 0)) # view(mat.e.result) # max(as.vector(mat.e.result)) # min(colSums(mat.e.result)) # etest(mat.e.result) #view(mat.e.result) # sum(mat.e.result) # diagonaler til at identificere elementer diag.submatrix <- diag(mat.e.result) diag.seg.df <- diag(e.mobmat.seg.niveau.5) diag.discodata <- diag(mob.mat[-274,-274]) diag.seg.df.udisco <-diag.seg.df[1:41] #hvis denne her bruges i which-statemenetet i 2. linje i nedenstående, istedet for diag.seg.df så kommer enkelt-grupperne fra 5-niveau med. men der er er noget der skal ordnes først, ikke aktuelt nu. \#todo seg.index <- which(!(diag.submatrix%in%diag.discodata)) diag.submatrix.seg <- diag.submatrix[seg.index] which(names(diag.seg.df)[which(diag.seg.df %in% diag.submatrix.seg)] %in% klynger)==seq_len(length(klynger)) # skal være sand for alle elementer # find de disco der går fra nye seg-index work.list.efter.sub <- seg.index # 1. fra segmenter af interesse aug.work.list.efter.sub <- sort(unique(unlist(lapply(work.list.efter.sub, function(x) which(mat.e.result[x,] != 0))))) # test tmp1 <- setdiff(aug.work.list.foer.sub,work.list.foer.sub) test.list.1 <- as.character(discodata$disco[tmp1]) tmp1 <- setdiff(aug.work.list.efter.sub,work.list.efter.sub) test.list.2 <- as.character(discodata$disco[tmp1]) length(test.list.1)==length(test.list.2) # de her to skal være ens mat.e.result <- mat.e.result[aug.work.list.efter.sub, aug.work.list.efter.sub] # diagonaler til at identificere elementer diag.submatrix <- diag(mat.e.result) seg.index <- which(!(diag.submatrix%in%diag.discodata)) diag.submatrix.seg <- diag.submatrix[seg.index] which(names(diag.seg.df.udisco)[which(diag.seg.df.udisco %in% diag.submatrix.seg)] %in% klynger)==seq_len(length(klynger)) # skal være sand for alle elementer # find ud af hvilket indeks der passer med diverse segmenter etc ikke.seg.index.i.disco <- as.numeric(names(diag.submatrix[which((diag.submatrix%in%diag.discodata))])) ikke.seg.index.i.submatrix <- which((diag.submatrix%in%diag.discodata)) colnames(mat.e.result)[ikke.seg.index.i.submatrix] <- as.character(discodata$disco[ikke.seg.index.i.disco]) rownames(mat.e.result)[ikke.seg.index.i.submatrix] <- as.character(discodata$disco[ikke.seg.index.i.disco]) colnames(mat.e.result)[ which(diag.submatrix %in% diag.seg.df.udisco)] <- names(diag.seg.df.udisco)[ which(diag.seg.df.udisco %in% diag.submatrix.seg)] rownames(mat.e.result)[ which(diag.submatrix %in% diag.seg.df.udisco)] <- names(diag.seg.df.udisco)[ which(diag.seg.df.udisco %in% diag.submatrix.seg)] ncol(mat.e.result) # view(mat.e.result) # max(as.vector(mat.e.result)) tmp.order <- append(colnames(mat.e.result) [seg.index] , colnames(mat.e.result)[ikke.seg.index.i.submatrix ] ) korrekt <- match(tmp.order,colnames(mat.e.result)) mat.e.result <- mat.e.result[korrekt, korrekt] # head(colnames(mat.e.result)) # yes (gør ikke noget der også er nogle discogrupper med, det vigtige er at der er er de segmenter der skal være) # ncol(mat.e.result) # view(mat.e.result) #og her, det er desuden korrekte dimnames # max(as.vector(mat.e.result)) # min(colSums(mat.e.result)) # etest(mat.e.result) # diagonaler til at identificere elementer diag.submatrix <- diag(mat.e.result) seg.index <- which(!(diag.submatrix%in%diag.discodata)) diag.submatrix.seg <- diag.submatrix[seg.index] ikke.seg.index.i.submatrix <- which((diag.submatrix%in%diag.discodata)) ikke.seg.index.i.disco <- which((diag.discodata%in%diag.submatrix)) which(names(diag.seg.df.udisco)[which(diag.seg.df.udisco %in% diag.submatrix.seg)] %in% klynger)==seq_len(length(klynger)) mat.e.result[ikke.seg.index.i.submatrix,ikke.seg.index.i.submatrix] <- 0 #fjerner interne ties i segmenter af interesse (hvis vi bare vil se hvor de går hen) # etest(mat.e.result) # yep diag.mat.e.result <- diag(mat.e.result) # for later use mat.e.result[ikke.seg.index.i.submatrix,seg.index] <- 0 ########## for aggregerede submatricer shit ############ # de tre matricer af interesse mat.e.result.seg.fra <- mat.e.result[seg.index,seg.index] mat.e.result <- mat.e.result[seg.index,ikke.seg.index.i.submatrix] # slå segmenter af interesse sammen til forsimpling mat.e.result <- rbind(mat.e.result,colSums(mat.e.result)) mat.e.result <- mat.e.result[nrow(mat.e.result),] # save.image("./statistik/R/moneca/vores/voresdata/tmp1_allebeskaeft250.Rdata") # rm(list=ls()) # load("./statistik/R/moneca/vores/voresdata/tmp1_allebeskaeft250.Rdata") if(discogrp==TRUE){ ### restgrupper colnames.tmp <- discodata$disco[which(discodata$disco %in% names(mat.e.result))] names(mat.e.result) == colnames.tmp #yes mat.e.result <- mat.e.result[order(names(mat.e.result))] colnames.tmp <- names(mat.e.result) mat.e.result <- tbl_df(mat.e.result) mat.e.result$disco <- colnames.tmp mat.e.result <- rename(mat.e.result,without.mob=value) %>% mutate(without.mob.andel=without.mob/sum(without.mob)) %>% mutate(without.mob.andel.seg.tot=without.mob/(sum(without.mob)+sum(mat.e.result.seg.fra))) } if(discogrp==FALSE){ ### restgrupper test.af.mat.e.result <- sum(mat.e.result) colnames.tmp <- discodata$indeks[which(discodata$disco %in% names(mat.e.result))] colnames.tmp <- as.character(discodata$membership[which(discodata$disco %in% names(mat.e.result))]) names(mat.e.result) <- colnames.tmp mat.e.result <- mat.e.result[order(names(mat.e.result))] colnames.tmp <- names(mat.e.result) mat.e.result <- tbl_df(mat.e.result) mat.e.result <- cbind(mat.e.result, sort(colnames.tmp)) colnames(mat.e.result)[2] <- c("membership") mat.e.result <- aggregate(mat.e.result[, seq_len(ncol(mat.e.result)-1)], list(mat.e.result$membership), sum) colnames.tmp <- mat.e.result$Group.1 rownames(mat.e.result) <- mat.e.result$Group.1 mat.e.result <- rename(mat.e.result,membership=Group.1,without.mob=x) %>% mutate(without.mob.andel=without.mob/sum(without.mob)) %>% mutate(without.mob.andel.seg.tot=without.mob/(sum(without.mob)+sum(mat.e.result.seg.fra))) } sum(mat.e.result$without.mob.andel.seg.tot)== sum(mat.e.result$without.mob) / (sum(mat.e.result$without.mob) + sum(mat.e.result.seg.fra)) #yestest if(discogrp==TRUE){ mat.e.result <- df %>% left_join(mat.e.result,.) } if(discogrp==FALSE){ mat.e.result <- seg.df %>% left_join(mat.e.result,.) } mat.e.result <- mat.e.result %>% add_row(.,membership=c("fokussegmenter"),without.mob=sum(mat.e.result.seg.fra), without.mob.andel.seg.tot= sum(mat.e.result.seg.fra) / (sum(mat.e.result$without.mob) + sum(mat.e.result.seg.fra)),klasse_begtrupbright1=klasse_label) return(mat.e.result) }
library(glmnet) mydata = read.table("./TrainingSet/LassoBIC/stomach.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.5,family="gaussian",standardize=FALSE) sink('./Model/EN/Lasso/stomach/stomach_060.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Lasso/stomach/stomach_060.R
no_license
leon1003/QSMART
R
false
false
354
r
library(glmnet) mydata = read.table("./TrainingSet/LassoBIC/stomach.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.5,family="gaussian",standardize=FALSE) sink('./Model/EN/Lasso/stomach/stomach_060.txt',append=TRUE) print(glm$glmnet.fit) sink()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compute_implied_volatility.R \name{compute_implied_volatility} \alias{compute_implied_volatility} \title{Compute European Option Implied Volatility} \usage{ compute_implied_volatility(type, value, underlying, strike, maturity, risk_free_rate = 0.01, initial_volatility_guess = 0.5) } \arguments{ \item{type}{One of the two values: 'call' or 'put' (string)} \item{value}{Value of the option (number)} \item{underlying}{Current price of the underlying (number)} \item{strike}{Strike price of the option (number)} \item{maturity}{Time to maturity in fractional years (number)} \item{risk_free_rate}{Risk-free rate (number, default: 0.01)} \item{initial_volatility_guess}{Initial guess for the volatility (number, default: 0.5)} } \value{ implied volatility of a European Option (number) } \description{ \code{compute_implied_volatility} returns the implied volatility of a European Option } \details{ This function is a wrapper around the \code{EuropeanOptionImpliedVolatility} function from the \code{RQUantLib} package. This function is used internally, in this package, to compute a dataframe with implied volatitilies for each row. TODO: Model/financial restrictions on the parameters should be made clear here... The \code{EuropeanOptionImpliedVolatility} function asks for a \code{dividend_yield} which we assume to be 0 as this is not a stock. Fore more information about RQuantLib: https://cran.r-project.org/web/packages/RQuantLib/RQuantLib.pdf For more information about QuantLib: http://quantlib.org/index.shtml } \author{ John Dole <jdoleiv@gmail.com> } \seealso{ \code{EuropeanOptionImpliedVolatility} }
/man/compute_implied_volatility.Rd
no_license
otrenav/implied-volatility-r-package
R
false
true
1,706
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compute_implied_volatility.R \name{compute_implied_volatility} \alias{compute_implied_volatility} \title{Compute European Option Implied Volatility} \usage{ compute_implied_volatility(type, value, underlying, strike, maturity, risk_free_rate = 0.01, initial_volatility_guess = 0.5) } \arguments{ \item{type}{One of the two values: 'call' or 'put' (string)} \item{value}{Value of the option (number)} \item{underlying}{Current price of the underlying (number)} \item{strike}{Strike price of the option (number)} \item{maturity}{Time to maturity in fractional years (number)} \item{risk_free_rate}{Risk-free rate (number, default: 0.01)} \item{initial_volatility_guess}{Initial guess for the volatility (number, default: 0.5)} } \value{ implied volatility of a European Option (number) } \description{ \code{compute_implied_volatility} returns the implied volatility of a European Option } \details{ This function is a wrapper around the \code{EuropeanOptionImpliedVolatility} function from the \code{RQUantLib} package. This function is used internally, in this package, to compute a dataframe with implied volatitilies for each row. TODO: Model/financial restrictions on the parameters should be made clear here... The \code{EuropeanOptionImpliedVolatility} function asks for a \code{dividend_yield} which we assume to be 0 as this is not a stock. Fore more information about RQuantLib: https://cran.r-project.org/web/packages/RQuantLib/RQuantLib.pdf For more information about QuantLib: http://quantlib.org/index.shtml } \author{ John Dole <jdoleiv@gmail.com> } \seealso{ \code{EuropeanOptionImpliedVolatility} }
#this script includes examples of running permulations for RERconverge and PGLS #notes and descriptions in this script are limited #because full vignettes are available on GitHub #functions ##################################### #function to permulate phenotype (continuous) simpermvec=function(namedvec, treewithbranchlengths){ #returns sim/perm vec #tree must be rooted and fully dichotomous #species in tree must match species in vec #simulate vector vec=simulatevec(namedvec, treewithbranchlengths) #assign real values to vec simsorted=sort(vec) realsorted=sort(namedvec) l=length(simsorted) c=1 while(c<=l){ simsorted[c]=realsorted[c] c=c+1 } simsorted } #function to simulate phenotype (continuous) simulatevec=function(namedvec, treewithbranchlengths){ #returns simulated vec #tree must be rooted and fully dichotomous #species in tree must match species in vec library("geiger") rm=ratematrix(treewithbranchlengths, namedvec) sims=sim.char(treewithbranchlengths, rm, nsim = 1) nam=rownames(sims) s=as.data.frame(sims) simulatedvec=s[,1] names(simulatedvec)=nam vec=simulatedvec vec } #function to permute phenotype (continuous or binary) permutevec=function(namedvec){ #returns permuted vec n=names(namedvec) vec=sample(namedvec) names(vec)=n vec } #function to permulate phenotype (binary) simBinPheno=function(trees, root, phenvec, fgnum=NULL, internal=0, drop=NULL){ blsum=0 if(is.null(fgnum)){ fgnum=sum(phenvec) } tips=fgnum-internal while(blsum!=fgnum){ t=root.phylo(trees$masterTree, root, resolve.root = T) t=drop.tip(t, drop) rm=ratematrix(t, phenvec) sims=sim.char(t, rm, nsim = 1) nam=rownames(sims) s=as.data.frame(sims) simulatedvec=s[,1] names(simulatedvec)=nam top=names(sort(simulatedvec, decreasing = TRUE))[1:tips] t=foreground2Tree(top, trees, clade="all", plotTree = F) blsum=sum(t$edge.length) } # plot(t) return(t) } ##################################### #permulations with RERconverge using a continuous phenotype ##################################### trees=readRDS("/home/kowaae22/100way/promotertrees/promotertrees.rds") RERs=readRDS("/home/kowaae22/100way/RERs/promoterRERslongevitySpecs.rds") annots=readRDS("/home/kowaae22/Annotations/fullcodingannots.rds") res=readRDS("/home/kowaae22/100way/RERanalysisresults/PC1cors.rds") enrichment=readRDS("/home/kowaae22/100way/RERanalysisresults/PC1enrich.rds") PC1=readRDS("/home/kowaae22/AnalysisWithThreeTrees/PC1.rds") mt=trees$masterTree mt=drop.tip(mt, "chrAsi1") mt=root.phylo(mt, outgroup="ornAna1", resolve.root=T) perms=RERconverge::getPermsContinuous(1000, PC1, RERs, annots, trees, mt) saveRDS(perms, "/home/kowaae22/100way/RERanalysisresults/PC1perms.rds") corpermpvals=RERconverge::permpvalcor(res, perms) saveRDS(corpermpvals, "/home/kowaae22/100way/RERanalysisresults/PC1correlationpermp.rds") enrichpermpvals=RERconverge::permpvalenrich(enrichment, perms) saveRDS(enrichpermpvals, "/home/kowaae22/100way/RERanalysisresults/PC1enrichpermp.rds") res$permpval=corpermpvals[match(rownames(res), names(corpermpvals))] res$permpvaladj=p.adjust(res$permpval, method="BH") saveRDS(res, "/home/kowaae22/100way/RERanalysisresults/PC1corwithpermp.rds") count=1 while(count<=length(enrichment)){ enrichment[[count]]$permpval=enrichpermpvals[[count]][match(rownames(enrichment[[count]]), names(enrichpermpvals[[count]]))] enrichment[[count]]$permpvaladj=p.adjust(enrichment[[count]]$permpval, method="BH") count=count+1 } saveRDS(enrichment, "/home/kowaae22/100way/RERanalysisresults/PC1enrichwithpermp.rds") ##################################### #permulations with RERconverge using a binary phenotype ##################################### trees=readRDS("/home/kowaae22/100way/promotertrees/promotertrees.rds") RERs=readRDS("/home/kowaae22/100way/RERs/promoterRERsallSpecsweightresid.rds") annots=readRDS("/home/kowaae22/Annotations/fullcodingannots.rds") res=readRDS("/home/kowaae22/100way/RERanalysisresults/hairlesscors.rds") enrichment=readRDS("/home/kowaae22/100way/RERanalysisresults/hairlessenrich.rds") fg=readRDS("/home/kowaae22/AnalysisWithThreeTrees/hairlessSpecs.rds") s=list(clade1=c("orcOrc1", "turTru2")) perms=getPermsBinary(1000, fg, s, "ornAna1", RERs, trees, trees$masterTree, permmode="cc",calculateenrich=T,annotlist=annots) saveRDS(perms, "/home/kowaae22/100way/RERanalysisresults/hairlessperms.rds") permpcor = permpvalcor(res,perms) saveRDS(permpcor, "/home/kowaae22/100way/RERanalysisresults/hairlesscorrelationpermp.rds") enrichpermpvals=permpvalenrich(enrichment, perms) saveRDS(enrichpermpvals, "/home/kowaae22/100way/RERanalysisresults/hairlessenrichpermp.rds") res$permpval=permpcor[match(rownames(res), names(permpcor))] res$permpvaladj=p.adjust(res$permpval, method="BH") saveRDS(res, "/home/kowaae22/100way/RERanalysisresults/hairlesscorwithpermp.rds") count=1 while(count<=length(enrichment)){ enrichment[[count]]$permpval=enrichpermpvals[[count]][match(rownames(enrichment[[count]]), names(enrichpermpvals[[count]]))] enrichment[[count]]$permpvaladj=p.adjust(enrichment[[count]]$permpval, method="BH") count=count+1 } saveRDS(enrichment, "/home/kowaae22/100way/RERanalysisresults/hairlessenrichwithpermp.rds") ##################################### #permulations with PGLS and a continuous phenotype - this will take a very long time ######################################## numperms=500 full=read.table("/home/kowaae22/TFcalls/filteredcustommergedcoords/hg19coords", stringsAsFactors=F) full=full[full$V1=="chr1",] resultsdf=data.frame(matrix(nrow=nrow(full), ncol=numperms)) rownames(resultsdf)=full$V4 allresults=list(resultsdf, resultsdf) names(allresults)=c("PGLSp", "PGLSstat") start=Sys.time() hcount=1 while(hcount<=numperms){ h=statfns[1] fn=paste0("/home/kowaae22/TFcalls/", folder, "/", h, stat) data=read.table(fn, stringsAsFactors =F) colnames(data)=data[1,] data=data[-1,] colnames(data)[colnames(data)=="odoRosDiv1"]="odoRosDi" data=data[data$chr=="chr1",] rownames(allresults$PGLSp)=data$name rownames(allresults$PGLSstat)=data$name #permulate phenotype rtmt=root.phylo(trees1$masterTree, outgroup = "ornAna1", resolve.root = T) rtmt=drop.tip(rtmt, c("sgal", "chrAsi1")) phenvec=simpermvec(PC1, rtmt) count=1 while(count<=nrow(data)){ curcne=data$name[count] TF=setNames(data[count,], colnames(data)) TF=TF[-c(1:4)] TF=TF[match(names(phenvec), names(TF))] TF=as.numeric(TF) df=data.frame(TF, phenvec) df2=na.omit(df) mt2=getTree(curcne) keep=intersect(rownames(df2), mt2$tip.label) if(!is.null(mt2)){ df2=df2[rownames(df2) %in% keep,] mt2=keep.tip(mt2, keep) } #continuous PGLS if(length(unique(df2$TF))!=1 & length(unique(df2$phenvec))!=1 & nrow(unique(df2))>2 & !is.null(mt2)){ pgls=gls(TF~phenvec, correlation = corBrownian(phy=mt2), data=df2) pvalPGLS=summary(pgls)$tTable[2,4] statPGLS=summary(pgls)$tTable[2,3] allresults$PGLSp[count, hcount]=pvalPGLS allresults$PGLSstat[count, hcount]=statPGLS }else{ allresults$PGLSp[count, hcount]=NA allresults$PGLSstat[count, hcount]=NA } if(count %% 10000==0){ print(paste0("CNE count: ", count)) #345786 } count=count+1 } print(paste0("perm count: ", hcount)) #771 # saveRDS(allresults, paste0("/home/kowaae22/TFcalls/allresultsmergedcustomtreesLongevityPC1/allresultsmergedCTfirst",hcount,".rds")) hcount=hcount+1 } end=Sys.time() end-start saveRDS(allresults, "/home/kowaae22/permPGLSpermsSTAT2PC1count.rds") ########################################
/RunPermulations.R
no_license
kowaae22/ClarkLabDocumentation
R
false
false
7,864
r
#this script includes examples of running permulations for RERconverge and PGLS #notes and descriptions in this script are limited #because full vignettes are available on GitHub #functions ##################################### #function to permulate phenotype (continuous) simpermvec=function(namedvec, treewithbranchlengths){ #returns sim/perm vec #tree must be rooted and fully dichotomous #species in tree must match species in vec #simulate vector vec=simulatevec(namedvec, treewithbranchlengths) #assign real values to vec simsorted=sort(vec) realsorted=sort(namedvec) l=length(simsorted) c=1 while(c<=l){ simsorted[c]=realsorted[c] c=c+1 } simsorted } #function to simulate phenotype (continuous) simulatevec=function(namedvec, treewithbranchlengths){ #returns simulated vec #tree must be rooted and fully dichotomous #species in tree must match species in vec library("geiger") rm=ratematrix(treewithbranchlengths, namedvec) sims=sim.char(treewithbranchlengths, rm, nsim = 1) nam=rownames(sims) s=as.data.frame(sims) simulatedvec=s[,1] names(simulatedvec)=nam vec=simulatedvec vec } #function to permute phenotype (continuous or binary) permutevec=function(namedvec){ #returns permuted vec n=names(namedvec) vec=sample(namedvec) names(vec)=n vec } #function to permulate phenotype (binary) simBinPheno=function(trees, root, phenvec, fgnum=NULL, internal=0, drop=NULL){ blsum=0 if(is.null(fgnum)){ fgnum=sum(phenvec) } tips=fgnum-internal while(blsum!=fgnum){ t=root.phylo(trees$masterTree, root, resolve.root = T) t=drop.tip(t, drop) rm=ratematrix(t, phenvec) sims=sim.char(t, rm, nsim = 1) nam=rownames(sims) s=as.data.frame(sims) simulatedvec=s[,1] names(simulatedvec)=nam top=names(sort(simulatedvec, decreasing = TRUE))[1:tips] t=foreground2Tree(top, trees, clade="all", plotTree = F) blsum=sum(t$edge.length) } # plot(t) return(t) } ##################################### #permulations with RERconverge using a continuous phenotype ##################################### trees=readRDS("/home/kowaae22/100way/promotertrees/promotertrees.rds") RERs=readRDS("/home/kowaae22/100way/RERs/promoterRERslongevitySpecs.rds") annots=readRDS("/home/kowaae22/Annotations/fullcodingannots.rds") res=readRDS("/home/kowaae22/100way/RERanalysisresults/PC1cors.rds") enrichment=readRDS("/home/kowaae22/100way/RERanalysisresults/PC1enrich.rds") PC1=readRDS("/home/kowaae22/AnalysisWithThreeTrees/PC1.rds") mt=trees$masterTree mt=drop.tip(mt, "chrAsi1") mt=root.phylo(mt, outgroup="ornAna1", resolve.root=T) perms=RERconverge::getPermsContinuous(1000, PC1, RERs, annots, trees, mt) saveRDS(perms, "/home/kowaae22/100way/RERanalysisresults/PC1perms.rds") corpermpvals=RERconverge::permpvalcor(res, perms) saveRDS(corpermpvals, "/home/kowaae22/100way/RERanalysisresults/PC1correlationpermp.rds") enrichpermpvals=RERconverge::permpvalenrich(enrichment, perms) saveRDS(enrichpermpvals, "/home/kowaae22/100way/RERanalysisresults/PC1enrichpermp.rds") res$permpval=corpermpvals[match(rownames(res), names(corpermpvals))] res$permpvaladj=p.adjust(res$permpval, method="BH") saveRDS(res, "/home/kowaae22/100way/RERanalysisresults/PC1corwithpermp.rds") count=1 while(count<=length(enrichment)){ enrichment[[count]]$permpval=enrichpermpvals[[count]][match(rownames(enrichment[[count]]), names(enrichpermpvals[[count]]))] enrichment[[count]]$permpvaladj=p.adjust(enrichment[[count]]$permpval, method="BH") count=count+1 } saveRDS(enrichment, "/home/kowaae22/100way/RERanalysisresults/PC1enrichwithpermp.rds") ##################################### #permulations with RERconverge using a binary phenotype ##################################### trees=readRDS("/home/kowaae22/100way/promotertrees/promotertrees.rds") RERs=readRDS("/home/kowaae22/100way/RERs/promoterRERsallSpecsweightresid.rds") annots=readRDS("/home/kowaae22/Annotations/fullcodingannots.rds") res=readRDS("/home/kowaae22/100way/RERanalysisresults/hairlesscors.rds") enrichment=readRDS("/home/kowaae22/100way/RERanalysisresults/hairlessenrich.rds") fg=readRDS("/home/kowaae22/AnalysisWithThreeTrees/hairlessSpecs.rds") s=list(clade1=c("orcOrc1", "turTru2")) perms=getPermsBinary(1000, fg, s, "ornAna1", RERs, trees, trees$masterTree, permmode="cc",calculateenrich=T,annotlist=annots) saveRDS(perms, "/home/kowaae22/100way/RERanalysisresults/hairlessperms.rds") permpcor = permpvalcor(res,perms) saveRDS(permpcor, "/home/kowaae22/100way/RERanalysisresults/hairlesscorrelationpermp.rds") enrichpermpvals=permpvalenrich(enrichment, perms) saveRDS(enrichpermpvals, "/home/kowaae22/100way/RERanalysisresults/hairlessenrichpermp.rds") res$permpval=permpcor[match(rownames(res), names(permpcor))] res$permpvaladj=p.adjust(res$permpval, method="BH") saveRDS(res, "/home/kowaae22/100way/RERanalysisresults/hairlesscorwithpermp.rds") count=1 while(count<=length(enrichment)){ enrichment[[count]]$permpval=enrichpermpvals[[count]][match(rownames(enrichment[[count]]), names(enrichpermpvals[[count]]))] enrichment[[count]]$permpvaladj=p.adjust(enrichment[[count]]$permpval, method="BH") count=count+1 } saveRDS(enrichment, "/home/kowaae22/100way/RERanalysisresults/hairlessenrichwithpermp.rds") ##################################### #permulations with PGLS and a continuous phenotype - this will take a very long time ######################################## numperms=500 full=read.table("/home/kowaae22/TFcalls/filteredcustommergedcoords/hg19coords", stringsAsFactors=F) full=full[full$V1=="chr1",] resultsdf=data.frame(matrix(nrow=nrow(full), ncol=numperms)) rownames(resultsdf)=full$V4 allresults=list(resultsdf, resultsdf) names(allresults)=c("PGLSp", "PGLSstat") start=Sys.time() hcount=1 while(hcount<=numperms){ h=statfns[1] fn=paste0("/home/kowaae22/TFcalls/", folder, "/", h, stat) data=read.table(fn, stringsAsFactors =F) colnames(data)=data[1,] data=data[-1,] colnames(data)[colnames(data)=="odoRosDiv1"]="odoRosDi" data=data[data$chr=="chr1",] rownames(allresults$PGLSp)=data$name rownames(allresults$PGLSstat)=data$name #permulate phenotype rtmt=root.phylo(trees1$masterTree, outgroup = "ornAna1", resolve.root = T) rtmt=drop.tip(rtmt, c("sgal", "chrAsi1")) phenvec=simpermvec(PC1, rtmt) count=1 while(count<=nrow(data)){ curcne=data$name[count] TF=setNames(data[count,], colnames(data)) TF=TF[-c(1:4)] TF=TF[match(names(phenvec), names(TF))] TF=as.numeric(TF) df=data.frame(TF, phenvec) df2=na.omit(df) mt2=getTree(curcne) keep=intersect(rownames(df2), mt2$tip.label) if(!is.null(mt2)){ df2=df2[rownames(df2) %in% keep,] mt2=keep.tip(mt2, keep) } #continuous PGLS if(length(unique(df2$TF))!=1 & length(unique(df2$phenvec))!=1 & nrow(unique(df2))>2 & !is.null(mt2)){ pgls=gls(TF~phenvec, correlation = corBrownian(phy=mt2), data=df2) pvalPGLS=summary(pgls)$tTable[2,4] statPGLS=summary(pgls)$tTable[2,3] allresults$PGLSp[count, hcount]=pvalPGLS allresults$PGLSstat[count, hcount]=statPGLS }else{ allresults$PGLSp[count, hcount]=NA allresults$PGLSstat[count, hcount]=NA } if(count %% 10000==0){ print(paste0("CNE count: ", count)) #345786 } count=count+1 } print(paste0("perm count: ", hcount)) #771 # saveRDS(allresults, paste0("/home/kowaae22/TFcalls/allresultsmergedcustomtreesLongevityPC1/allresultsmergedCTfirst",hcount,".rds")) hcount=hcount+1 } end=Sys.time() end-start saveRDS(allresults, "/home/kowaae22/permPGLSpermsSTAT2PC1count.rds") ########################################
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_interface.R \name{spark_insert_table} \alias{spark_insert_table} \title{Inserts a Spark DataFrame into a Spark table} \usage{ spark_insert_table( x, name, mode = NULL, overwrite = FALSE, options = list(), ... ) } \arguments{ \item{x}{A Spark DataFrame or dplyr operation} \item{name}{The name to assign to the newly generated table.} \item{mode}{A \code{character} element. Specifies the behavior when data or table already exists. Supported values include: 'error', 'append', 'overwrite' and ignore. Notice that 'overwrite' will also change the column structure. For more details see also \url{https://spark.apache.org/docs/latest/sql-programming-guide.html#save-modes} for your version of Spark.} \item{overwrite}{Boolean; overwrite the table with the given name if it already exists?} \item{options}{A list of strings with additional options.} \item{...}{Optional arguments; currently unused.} } \description{ Inserts a Spark DataFrame into a Spark table. } \seealso{ Other Spark serialization routines: \code{\link{collect_from_rds}()}, \code{\link{spark_load_table}()}, \code{\link{spark_read_avro}()}, \code{\link{spark_read_binary}()}, \code{\link{spark_read_csv}()}, \code{\link{spark_read_delta}()}, \code{\link{spark_read_image}()}, \code{\link{spark_read_jdbc}()}, \code{\link{spark_read_json}()}, \code{\link{spark_read_libsvm}()}, \code{\link{spark_read_orc}()}, \code{\link{spark_read_parquet}()}, \code{\link{spark_read_source}()}, \code{\link{spark_read_table}()}, \code{\link{spark_read_text}()}, \code{\link{spark_read}()}, \code{\link{spark_save_table}()}, \code{\link{spark_write_avro}()}, \code{\link{spark_write_csv}()}, \code{\link{spark_write_delta}()}, \code{\link{spark_write_jdbc}()}, \code{\link{spark_write_json}()}, \code{\link{spark_write_orc}()}, \code{\link{spark_write_parquet}()}, \code{\link{spark_write_source}()}, \code{\link{spark_write_table}()}, \code{\link{spark_write_text}()} } \concept{Spark serialization routines}
/man/spark_insert_table.Rd
permissive
sparklyr/sparklyr
R
false
true
2,071
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_interface.R \name{spark_insert_table} \alias{spark_insert_table} \title{Inserts a Spark DataFrame into a Spark table} \usage{ spark_insert_table( x, name, mode = NULL, overwrite = FALSE, options = list(), ... ) } \arguments{ \item{x}{A Spark DataFrame or dplyr operation} \item{name}{The name to assign to the newly generated table.} \item{mode}{A \code{character} element. Specifies the behavior when data or table already exists. Supported values include: 'error', 'append', 'overwrite' and ignore. Notice that 'overwrite' will also change the column structure. For more details see also \url{https://spark.apache.org/docs/latest/sql-programming-guide.html#save-modes} for your version of Spark.} \item{overwrite}{Boolean; overwrite the table with the given name if it already exists?} \item{options}{A list of strings with additional options.} \item{...}{Optional arguments; currently unused.} } \description{ Inserts a Spark DataFrame into a Spark table. } \seealso{ Other Spark serialization routines: \code{\link{collect_from_rds}()}, \code{\link{spark_load_table}()}, \code{\link{spark_read_avro}()}, \code{\link{spark_read_binary}()}, \code{\link{spark_read_csv}()}, \code{\link{spark_read_delta}()}, \code{\link{spark_read_image}()}, \code{\link{spark_read_jdbc}()}, \code{\link{spark_read_json}()}, \code{\link{spark_read_libsvm}()}, \code{\link{spark_read_orc}()}, \code{\link{spark_read_parquet}()}, \code{\link{spark_read_source}()}, \code{\link{spark_read_table}()}, \code{\link{spark_read_text}()}, \code{\link{spark_read}()}, \code{\link{spark_save_table}()}, \code{\link{spark_write_avro}()}, \code{\link{spark_write_csv}()}, \code{\link{spark_write_delta}()}, \code{\link{spark_write_jdbc}()}, \code{\link{spark_write_json}()}, \code{\link{spark_write_orc}()}, \code{\link{spark_write_parquet}()}, \code{\link{spark_write_source}()}, \code{\link{spark_write_table}()}, \code{\link{spark_write_text}()} } \concept{Spark serialization routines}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getOMLDataSetQualities.R \name{getOMLDataSetQualities} \alias{getOMLDataSetQualities} \title{List available OpenML qualities with values for given data set.} \usage{ getOMLDataSetQualities(did, verbosity = NULL, name = NULL) } \arguments{ \item{did}{[\code{integer(1)}]\cr The data set ID.} \item{verbosity}{[\code{integer(1)}]\cr Print verbose output on console? Possible values are:\cr \code{0}: normal output,\cr \code{1}: info output,\cr \code{2}: debug output.\cr Default is set via \code{\link{setOMLConfig}}.} \item{name}{[\code{character}]\cr Returns only the data qualities from \dQuote{name} (see also \code{\link{listOMLDataSetQualities}}). Default is \code{NULL} and uses all available data qualities.} } \value{ [\code{data.frame}]. } \description{ The returned \code{data.frame} contains data set quality \dQuote{name} and value \dQuote{value}. } \examples{ \dontrun{ a = getOMLDataSetQualities(did = 9) a[a$name == "NumberOfMissingValues", ] getOMLDataSetQualities(did = 9, name = "NumberOfMissingValues") } } \seealso{ Other downloading functions: \code{\link{getOMLDataSet}}, \code{\link{getOMLFlow}}, \code{\link{getOMLRun}}, \code{\link{getOMLTask}} }
/man/getOMLDataSetQualities.Rd
no_license
mutual-ai/openml-r
R
false
true
1,262
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getOMLDataSetQualities.R \name{getOMLDataSetQualities} \alias{getOMLDataSetQualities} \title{List available OpenML qualities with values for given data set.} \usage{ getOMLDataSetQualities(did, verbosity = NULL, name = NULL) } \arguments{ \item{did}{[\code{integer(1)}]\cr The data set ID.} \item{verbosity}{[\code{integer(1)}]\cr Print verbose output on console? Possible values are:\cr \code{0}: normal output,\cr \code{1}: info output,\cr \code{2}: debug output.\cr Default is set via \code{\link{setOMLConfig}}.} \item{name}{[\code{character}]\cr Returns only the data qualities from \dQuote{name} (see also \code{\link{listOMLDataSetQualities}}). Default is \code{NULL} and uses all available data qualities.} } \value{ [\code{data.frame}]. } \description{ The returned \code{data.frame} contains data set quality \dQuote{name} and value \dQuote{value}. } \examples{ \dontrun{ a = getOMLDataSetQualities(did = 9) a[a$name == "NumberOfMissingValues", ] getOMLDataSetQualities(did = 9, name = "NumberOfMissingValues") } } \seealso{ Other downloading functions: \code{\link{getOMLDataSet}}, \code{\link{getOMLFlow}}, \code{\link{getOMLRun}}, \code{\link{getOMLTask}} }
# PDQ model of memory leakage (from GCAP 2018 student) # Created by NJG on Sat Oct 20 12:50:01 2018 library(pdq) ### Globals ### memWrites <- 1000 writeTime <- 0.002 # seconds swpWrites <- 0 swapTime <- 0.004 # seconds swapStart <- 400 # pages memUtil <- NULL # array for PDQ output swpUtil <- NULL # array for PDQ output diffUtil <- NULL # diff b/w utilizations clipPlot <- 10 # drop last few data points #################################### # Memory consumption phase #################################### for(i in 1:memWrites) { Init("Memory Consumption") # Memory page model CreateClosed("pageWrites", TERM, as.numeric(i), 1.0) CreateNode("Memory", CEN, FCFS) SetDemand("Memory", "pageWrites", writeTime) Solve(EXACT) memUtil[i] <- GetUtilization("Memory", "pageWrites", TERM) } plot(x=1:swapStart+50, y=memUtil[1:swapStart+50] * 100, main="PDQ Model of Memory Leak", xlab="Memory pages", ylab="Resource consumption (%)", xli=c(0,memWrites), ylim=c(0,105), type="l", col="red", lwd=2 ) lines(x=swapStart+50:memWrites, y=memUtil[swapStart+50:memWrites] * 100, col="red", lwd=2, lty="dotted" ) #################################### # Swap out phase #################################### for(i in 1:memWrites) { Init("Page swapping") # Swap rate swpWrites <- 0.8 * i # determines slope of delta curve (blue) CreateClosed("swpWrites", TERM, as.numeric(swpWrites), 1.0) CreateNode("SwapDev", CEN, FCFS) SetDemand("SwapDev", "swpWrites", swapTime) Solve(APPROX) swpUtil[i] <- GetUtilization("SwapDev", "swpWrites", TERM) } # Phase shift by 450 pages on x-axis diffUtil <- c(rep(NA, swapStart), swpUtil[1:(memWrites - swapStart)]) lines(x=1:(memWrites - clipPlot), y=diffUtil[1:(memWrites - clipPlot)] * 100, col="darkgreen", lwd=2, lty="dotted" ) lines(x=1:(swapStart + 50), y=rep(0,(swapStart + 50)), col="darkgreen", lwd=2 ) lines(x=(swapStart + 50):(memWrites - clipPlot), y=diffUtil[(swapStart + 50):(memWrites - clipPlot)] * 100, col="darkgreen", lwd=2 ) lines(x=1:(memWrites-clipPlot), y=(memUtil - diffUtil)[1:(memWrites - clipPlot)] * 100, col="red", lwd=2, lty="dotted" ) lines(x=(swapStart + 50):(memWrites - clipPlot), y=(memUtil - diffUtil)[(swapStart + 50):(memWrites - clipPlot)] * 100, col="red", lwd=2 ) abline(v=450, col="gray") # discontinuity line arrows(x0=450,y0=88,x1=450,y1=73,col="red",lwd=2,angle=20,length=0.1) arrows(x0=450,y0=0,x1=450,y1=15,col="darkgreen",lwd=2,angle=20,length=0.1) text(250,50, "Accumulating RAM pages", cex=0.75) text(730,35, "Active RAM pages", cex=0.75) text(770,80, "Reclaimed\n(inactive) pages\non swap device", cex=0.75) text(550,7, "Page swapping\ncommences", cex=0.75) text(200,90, "(c) 2018 Performance Dynamics",col="gray",cex=0.75)
/mem-leak-pdq.r
no_license
DrQz/random-r
R
false
false
2,904
r
# PDQ model of memory leakage (from GCAP 2018 student) # Created by NJG on Sat Oct 20 12:50:01 2018 library(pdq) ### Globals ### memWrites <- 1000 writeTime <- 0.002 # seconds swpWrites <- 0 swapTime <- 0.004 # seconds swapStart <- 400 # pages memUtil <- NULL # array for PDQ output swpUtil <- NULL # array for PDQ output diffUtil <- NULL # diff b/w utilizations clipPlot <- 10 # drop last few data points #################################### # Memory consumption phase #################################### for(i in 1:memWrites) { Init("Memory Consumption") # Memory page model CreateClosed("pageWrites", TERM, as.numeric(i), 1.0) CreateNode("Memory", CEN, FCFS) SetDemand("Memory", "pageWrites", writeTime) Solve(EXACT) memUtil[i] <- GetUtilization("Memory", "pageWrites", TERM) } plot(x=1:swapStart+50, y=memUtil[1:swapStart+50] * 100, main="PDQ Model of Memory Leak", xlab="Memory pages", ylab="Resource consumption (%)", xli=c(0,memWrites), ylim=c(0,105), type="l", col="red", lwd=2 ) lines(x=swapStart+50:memWrites, y=memUtil[swapStart+50:memWrites] * 100, col="red", lwd=2, lty="dotted" ) #################################### # Swap out phase #################################### for(i in 1:memWrites) { Init("Page swapping") # Swap rate swpWrites <- 0.8 * i # determines slope of delta curve (blue) CreateClosed("swpWrites", TERM, as.numeric(swpWrites), 1.0) CreateNode("SwapDev", CEN, FCFS) SetDemand("SwapDev", "swpWrites", swapTime) Solve(APPROX) swpUtil[i] <- GetUtilization("SwapDev", "swpWrites", TERM) } # Phase shift by 450 pages on x-axis diffUtil <- c(rep(NA, swapStart), swpUtil[1:(memWrites - swapStart)]) lines(x=1:(memWrites - clipPlot), y=diffUtil[1:(memWrites - clipPlot)] * 100, col="darkgreen", lwd=2, lty="dotted" ) lines(x=1:(swapStart + 50), y=rep(0,(swapStart + 50)), col="darkgreen", lwd=2 ) lines(x=(swapStart + 50):(memWrites - clipPlot), y=diffUtil[(swapStart + 50):(memWrites - clipPlot)] * 100, col="darkgreen", lwd=2 ) lines(x=1:(memWrites-clipPlot), y=(memUtil - diffUtil)[1:(memWrites - clipPlot)] * 100, col="red", lwd=2, lty="dotted" ) lines(x=(swapStart + 50):(memWrites - clipPlot), y=(memUtil - diffUtil)[(swapStart + 50):(memWrites - clipPlot)] * 100, col="red", lwd=2 ) abline(v=450, col="gray") # discontinuity line arrows(x0=450,y0=88,x1=450,y1=73,col="red",lwd=2,angle=20,length=0.1) arrows(x0=450,y0=0,x1=450,y1=15,col="darkgreen",lwd=2,angle=20,length=0.1) text(250,50, "Accumulating RAM pages", cex=0.75) text(730,35, "Active RAM pages", cex=0.75) text(770,80, "Reclaimed\n(inactive) pages\non swap device", cex=0.75) text(550,7, "Page swapping\ncommences", cex=0.75) text(200,90, "(c) 2018 Performance Dynamics",col="gray",cex=0.75)
source("chooser.R") library(data.table) diagnose <<- read.csv("data/incnar.csv")$condition nepisode <- 15000 pre <- runif(nepisode, 0, 1) ob <- rep(0, nepisode) for (i in seq(nepisode)) ob[i] <- runif(1, exp(pre[i])/2, exp(pre[i]))/exp(1) mock <<- data.table(ob=ob, pre=pre,diag= diagnose[round(runif(nepisode, 1, length(diagnose)))]) fluidPage( titlePanel("Choose multiple diagnoses: "), sidebarLayout(sidebarPanel( # Generate a row with a sidebar chooserInput("mychooser", "Diagnoses", "Selected diagnoses", diagnose, c(), size = 10, multiple = TRUE)), mainPanel( plotOutput("someplot") ) ), actionButton("interactive", "Interactive plot"), plotOutput("interactive") )
/ui.R
no_license
sinanshi/shiny-apache
R
false
false
776
r
source("chooser.R") library(data.table) diagnose <<- read.csv("data/incnar.csv")$condition nepisode <- 15000 pre <- runif(nepisode, 0, 1) ob <- rep(0, nepisode) for (i in seq(nepisode)) ob[i] <- runif(1, exp(pre[i])/2, exp(pre[i]))/exp(1) mock <<- data.table(ob=ob, pre=pre,diag= diagnose[round(runif(nepisode, 1, length(diagnose)))]) fluidPage( titlePanel("Choose multiple diagnoses: "), sidebarLayout(sidebarPanel( # Generate a row with a sidebar chooserInput("mychooser", "Diagnoses", "Selected diagnoses", diagnose, c(), size = 10, multiple = TRUE)), mainPanel( plotOutput("someplot") ) ), actionButton("interactive", "Interactive plot"), plotOutput("interactive") )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mappable.R \name{align} \alias{align} \title{Return GRanges of uniquely mapping hits.} \usage{ align(views, genome = NULL, BPPARAM = bpparam(), ...) } \arguments{ \item{views}{The \code{\link[BSgenome]{BSgenomeViews}} DNA to be mapped.} \item{genome}{The \code{\link[BSgenome]{BSgenome}} DNA to search for hits.} \item{BPPARAM}{An optional \code{\link[BiocParallel]{BiocParallelParam}} instance determining the parallel back-end to be used during evaluation, or a \code{\link[base]{list}} of \code{\link[BiocParallel]{BiocParallelParam}} instances, to be applied in sequence for nested calls to \code{BiocParallel} functions.} \item{...}{Extra arguments passed on to \code{\link[QuasR]{qAlign}}.} } \value{ The \code{\link[GenomicRanges]{GRanges-class}} of uniquely mapping DNA sequences. } \description{ Return GRanges of uniquely mapping hits. }
/man/align.Rd
no_license
coregenomics/kmap
R
false
true
929
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mappable.R \name{align} \alias{align} \title{Return GRanges of uniquely mapping hits.} \usage{ align(views, genome = NULL, BPPARAM = bpparam(), ...) } \arguments{ \item{views}{The \code{\link[BSgenome]{BSgenomeViews}} DNA to be mapped.} \item{genome}{The \code{\link[BSgenome]{BSgenome}} DNA to search for hits.} \item{BPPARAM}{An optional \code{\link[BiocParallel]{BiocParallelParam}} instance determining the parallel back-end to be used during evaluation, or a \code{\link[base]{list}} of \code{\link[BiocParallel]{BiocParallelParam}} instances, to be applied in sequence for nested calls to \code{BiocParallel} functions.} \item{...}{Extra arguments passed on to \code{\link[QuasR]{qAlign}}.} } \value{ The \code{\link[GenomicRanges]{GRanges-class}} of uniquely mapping DNA sequences. } \description{ Return GRanges of uniquely mapping hits. }
# MOMOpack for R # Originally MOMOpack V 4.3 for Stata, # created by Bernadette Gergonne, SSI-EpiLife for Euro MOMO. # Ported into R by Theodore Lytras <thlytras@gmail.com> # REMINDER: # $WEEK = Week number to study according to the date of aggregation # = complete ISO week, (From monday to Sunday) preceding the date of aggregation # $PRWEEK = the first week when the MOMO registration became smooth and regular # $back = the number of week to remove from the series to model # $WEEK2 is the week number until which we want model the series # WRxx = the number of death registered xx week (FULL WEEK) after the week of death, ACCORDING TO WHAT WE KNOW THE DAY OF AGGREGATION # = number of death registered at WoS who died XX week(s) before # WRxxA = what is registered xx week after (incomplete week) until the day of Aggregation # = what we know in addition if we aggregate i.e. on wednesday instead of sunday. # YW = the ID of the week (concatenation of iso Year and iso week of death) # WoDi = Week of Death Iso # YoDi = Year of Death Iso # closed = the number of day off during the iso week # closed2 = the number of day off during the following week from Monday until the day of Aggregation # nb = weekly number of death in the series provided # nb2 = weekly number of death known (already registered) at the date of Aggregation # wk = iterative number of the week # Ywk = the stata format week number (drop week 53 !) as Stata cannot work sith ISO weeks # nbc = corrected number of death # nbr = registered numbre of death # UCIc = Upper Confidence Interval of the corrected number of deaths # LCIc = Lower Confidence Interval of the corrected number of deaths # UPIc = Upper Prediction Interval of the corrected number of deaths # LPIc = Lower Prediction Interval of the corrected number of deaths delayMOMO <- function(aggr, zvalue=1.96) { aggr <- aggr[order(aggr$wk),] # the period of registration for a death week XX aggr$closed0 <- aggr$closed + vecshift(aggr$closedA, 1) for (VV in 1:attr(aggr, "delayCorr")) { aggr[[paste("closed", VV, sep="")]] <- aggr[[paste("closed", VV-1, sep="")]] + vecshift(aggr$closed0, -VV) } # CORRECTION FOR DELAY # FIRST we model what we know about the previous week. aggr$pred <- NA aggr$UCIc <- NA aggr$LCIc <- NA aggr$UPIc <- NA aggr$LPIc <- NA aggr$GROUP <- attr(aggr, "group") for (XX in 0:attr(aggr, "delayCorr")) { aggr[[paste("CCC", XX, sep="")]] <- vecshift(aggr[[paste("closed", XX, sep="")]], XX) aggr[[paste("a", XX, sep="")]] <- ifelse((aggr$wk>attr(aggr, "PRWEEK") & aggr$wk<=attr(aggr, "WEEK2")), (aggr[[paste("WR", XX, sep="")]]/aggr$nb), NA) m1 <- suppressWarnings(glm(as.formula(paste("a", XX, " ~ CCC", XX, " + wk", sep="")), data=subset(aggr, wk>attr(aggr, "PRWEEK") & wk<attr(aggr, "WEEK2")), family=binomial)) aggr[[paste("Pa", XX, sep="")]] <- predict(m1, aggr, type="response") aggr[[paste("Pa", XX, sep="")]][which(aggr$wk<=attr(aggr, "PRWEEK") | aggr$wk>attr(aggr, "WEEK"))] <- NA aggr[[paste("temp", XX, sep="")]] <- aggr[[paste("WR", XX, sep="")]] / aggr[[paste("Pa", XX, sep="")]] m1 <- glm(as.formula(paste("nb2 ~ WR", XX, " + Pa", XX, " + wk", sep="")), data=subset(aggr, wk>attr(aggr, "PRWEEK") & wk<attr(aggr, "WEEK2")), family=poisson) od <- max(1,sum(m1$weights * m1$residuals^2)/m1$df.r) if (od > 1) m1 <- glm(as.formula(paste("nb2 ~ WR", XX, " + Pa", XX, " + wk", sep="")), data=subset(aggr, wk>attr(aggr, "PRWEEK") & wk<attr(aggr, "WEEK2")), family=quasipoisson) tryCatch( aggr[[paste("pred", XX, sep="")]] <- predict(m1, aggr, type="response"), warning = function(w) if (conditionMessage(w)=="prediction from a rank-deficient fit may be misleading") warning( # Giving a more informative warning "In group '", attr(aggr, "group"), "', the delay Poisson model fit for lag ", XX, " week(s)\n", " is rank deficient. Prediction may be misleading.", call.=FALSE) else warning(w) ) aggr[[paste("pred", XX, sep="")]][aggr$wk<=attr(aggr, "PRWEEK") | aggr$wk>attr(aggr, "WEEK")-XX] <- NA tryCatch( aggr[[paste("stdp", XX, sep="")]] <- predict(m1, aggr, se.fit=TRUE)$se.fit, warning = function(w) # If we have the same warning about rank deficiency as above, there's no reason to print it twice if (conditionMessage(w)!="prediction from a rank-deficient fit may be misleading") warning(w) ) aggr[[paste("stdp", XX, sep="")]][aggr$wk<=attr(aggr, "PRWEEK") | aggr$wk>attr(aggr, "WEEK")-XX] <- NA aggr[[paste("N", XX, sep="")]] <- sum(!is.na(aggr[[paste("stdp", XX, sep="")]])) aggr[[paste("temp", XX, sep="")]] <- NULL # Prediction Interval aggr[[paste("UPI", XX, sep="")]] <- (aggr[[paste("pred", XX, sep="")]]^(2/3) + zvalue*((4/9)*(aggr[[paste("pred", XX, sep="")]]^(1/3))*(od+(aggr[[paste("stdp", XX, sep="")]]^2)*(aggr[[paste("pred", XX, sep="")]])))^(1/2))^(3/2) aggr[[paste("LPI", XX, sep="")]] <- (aggr[[paste("pred", XX, sep="")]]^(2/3) - zvalue*((4/9)*(aggr[[paste("pred", XX, sep="")]]^(1/3))*(od+(aggr[[paste("stdp", XX, sep="")]]^2)*(aggr[[paste("pred", XX, sep="")]])))^(1/2))^(3/2) aggr[[paste("UCI", XX, sep="")]] <- aggr[[paste("pred", XX, sep="")]] + zvalue*aggr[[paste("stdp", XX, sep="")]] aggr[[paste("LCI", XX, sep="")]] <- aggr[[paste("pred", XX, sep="")]] - zvalue*aggr[[paste("stdp", XX, sep="")]] aggr$pred[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("pred", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr$UCIc[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("UCI", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr$LCIc[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("LCI", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr$UPIc[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("UPI", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr$LPIc[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("LPI", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr[[paste("UCI", XX, sep="")]][aggr$wk < attr(aggr, "WEEK2")] <- NA aggr[[paste("LCI", XX, sep="")]][aggr$wk < attr(aggr, "WEEK2")] <- NA aggr[[paste("UPI", XX, sep="")]][aggr$wk < attr(aggr, "WEEK2")] <- NA aggr[[paste("LPI", XX, sep="")]][aggr$wk < attr(aggr, "WEEK2")] <- NA } # we generate the CORRECTED number of death aggr$nbc[aggr$wk < attr(aggr, "WEEK2")] <- aggr$nb[aggr$wk < attr(aggr, "WEEK2")] aggr$nbc[aggr$wk >= attr(aggr, "WEEK2") & aggr$wk <= attr(aggr, "WEEK")] <- pmax(aggr$pred[aggr$wk >= attr(aggr, "WEEK2") & aggr$wk <= attr(aggr, "WEEK")], aggr$nb[aggr$wk >= attr(aggr, "WEEK2") & aggr$wk <= attr(aggr, "WEEK")], na.rm=TRUE) return(aggr) } trimDelayMOMO <- function(aggr) { for (XX in 1:attr(aggr, "delayCorr")) { aggr[[paste("pred", XX, sep="")]] <- NULL aggr[[paste("UCI", XX, sep="")]] <- NULL aggr[[paste("LCI", XX, sep="")]] <- NULL aggr[[paste("UPI", XX, sep="")]] <- NULL aggr[[paste("LPI", XX, sep="")]] <- NULL } aggr$nbc[is.na(aggr$nbc)] <- 0 ret <- aggr[,c("GROUP", "WoDi", "YoDi", "wk", "wk2", "nb", "nb2", "nbr", "nbc", "pred", "UCIc", "LCIc", "UPIc", "LPIc")] # We must preserve the attributes we need transferMOMOattributes(ret, aggr) }
/code/delay.R
no_license
thlytras/MOMOpack-for-R
R
false
false
7,379
r
# MOMOpack for R # Originally MOMOpack V 4.3 for Stata, # created by Bernadette Gergonne, SSI-EpiLife for Euro MOMO. # Ported into R by Theodore Lytras <thlytras@gmail.com> # REMINDER: # $WEEK = Week number to study according to the date of aggregation # = complete ISO week, (From monday to Sunday) preceding the date of aggregation # $PRWEEK = the first week when the MOMO registration became smooth and regular # $back = the number of week to remove from the series to model # $WEEK2 is the week number until which we want model the series # WRxx = the number of death registered xx week (FULL WEEK) after the week of death, ACCORDING TO WHAT WE KNOW THE DAY OF AGGREGATION # = number of death registered at WoS who died XX week(s) before # WRxxA = what is registered xx week after (incomplete week) until the day of Aggregation # = what we know in addition if we aggregate i.e. on wednesday instead of sunday. # YW = the ID of the week (concatenation of iso Year and iso week of death) # WoDi = Week of Death Iso # YoDi = Year of Death Iso # closed = the number of day off during the iso week # closed2 = the number of day off during the following week from Monday until the day of Aggregation # nb = weekly number of death in the series provided # nb2 = weekly number of death known (already registered) at the date of Aggregation # wk = iterative number of the week # Ywk = the stata format week number (drop week 53 !) as Stata cannot work sith ISO weeks # nbc = corrected number of death # nbr = registered numbre of death # UCIc = Upper Confidence Interval of the corrected number of deaths # LCIc = Lower Confidence Interval of the corrected number of deaths # UPIc = Upper Prediction Interval of the corrected number of deaths # LPIc = Lower Prediction Interval of the corrected number of deaths delayMOMO <- function(aggr, zvalue=1.96) { aggr <- aggr[order(aggr$wk),] # the period of registration for a death week XX aggr$closed0 <- aggr$closed + vecshift(aggr$closedA, 1) for (VV in 1:attr(aggr, "delayCorr")) { aggr[[paste("closed", VV, sep="")]] <- aggr[[paste("closed", VV-1, sep="")]] + vecshift(aggr$closed0, -VV) } # CORRECTION FOR DELAY # FIRST we model what we know about the previous week. aggr$pred <- NA aggr$UCIc <- NA aggr$LCIc <- NA aggr$UPIc <- NA aggr$LPIc <- NA aggr$GROUP <- attr(aggr, "group") for (XX in 0:attr(aggr, "delayCorr")) { aggr[[paste("CCC", XX, sep="")]] <- vecshift(aggr[[paste("closed", XX, sep="")]], XX) aggr[[paste("a", XX, sep="")]] <- ifelse((aggr$wk>attr(aggr, "PRWEEK") & aggr$wk<=attr(aggr, "WEEK2")), (aggr[[paste("WR", XX, sep="")]]/aggr$nb), NA) m1 <- suppressWarnings(glm(as.formula(paste("a", XX, " ~ CCC", XX, " + wk", sep="")), data=subset(aggr, wk>attr(aggr, "PRWEEK") & wk<attr(aggr, "WEEK2")), family=binomial)) aggr[[paste("Pa", XX, sep="")]] <- predict(m1, aggr, type="response") aggr[[paste("Pa", XX, sep="")]][which(aggr$wk<=attr(aggr, "PRWEEK") | aggr$wk>attr(aggr, "WEEK"))] <- NA aggr[[paste("temp", XX, sep="")]] <- aggr[[paste("WR", XX, sep="")]] / aggr[[paste("Pa", XX, sep="")]] m1 <- glm(as.formula(paste("nb2 ~ WR", XX, " + Pa", XX, " + wk", sep="")), data=subset(aggr, wk>attr(aggr, "PRWEEK") & wk<attr(aggr, "WEEK2")), family=poisson) od <- max(1,sum(m1$weights * m1$residuals^2)/m1$df.r) if (od > 1) m1 <- glm(as.formula(paste("nb2 ~ WR", XX, " + Pa", XX, " + wk", sep="")), data=subset(aggr, wk>attr(aggr, "PRWEEK") & wk<attr(aggr, "WEEK2")), family=quasipoisson) tryCatch( aggr[[paste("pred", XX, sep="")]] <- predict(m1, aggr, type="response"), warning = function(w) if (conditionMessage(w)=="prediction from a rank-deficient fit may be misleading") warning( # Giving a more informative warning "In group '", attr(aggr, "group"), "', the delay Poisson model fit for lag ", XX, " week(s)\n", " is rank deficient. Prediction may be misleading.", call.=FALSE) else warning(w) ) aggr[[paste("pred", XX, sep="")]][aggr$wk<=attr(aggr, "PRWEEK") | aggr$wk>attr(aggr, "WEEK")-XX] <- NA tryCatch( aggr[[paste("stdp", XX, sep="")]] <- predict(m1, aggr, se.fit=TRUE)$se.fit, warning = function(w) # If we have the same warning about rank deficiency as above, there's no reason to print it twice if (conditionMessage(w)!="prediction from a rank-deficient fit may be misleading") warning(w) ) aggr[[paste("stdp", XX, sep="")]][aggr$wk<=attr(aggr, "PRWEEK") | aggr$wk>attr(aggr, "WEEK")-XX] <- NA aggr[[paste("N", XX, sep="")]] <- sum(!is.na(aggr[[paste("stdp", XX, sep="")]])) aggr[[paste("temp", XX, sep="")]] <- NULL # Prediction Interval aggr[[paste("UPI", XX, sep="")]] <- (aggr[[paste("pred", XX, sep="")]]^(2/3) + zvalue*((4/9)*(aggr[[paste("pred", XX, sep="")]]^(1/3))*(od+(aggr[[paste("stdp", XX, sep="")]]^2)*(aggr[[paste("pred", XX, sep="")]])))^(1/2))^(3/2) aggr[[paste("LPI", XX, sep="")]] <- (aggr[[paste("pred", XX, sep="")]]^(2/3) - zvalue*((4/9)*(aggr[[paste("pred", XX, sep="")]]^(1/3))*(od+(aggr[[paste("stdp", XX, sep="")]]^2)*(aggr[[paste("pred", XX, sep="")]])))^(1/2))^(3/2) aggr[[paste("UCI", XX, sep="")]] <- aggr[[paste("pred", XX, sep="")]] + zvalue*aggr[[paste("stdp", XX, sep="")]] aggr[[paste("LCI", XX, sep="")]] <- aggr[[paste("pred", XX, sep="")]] - zvalue*aggr[[paste("stdp", XX, sep="")]] aggr$pred[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("pred", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr$UCIc[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("UCI", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr$LCIc[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("LCI", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr$UPIc[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("UPI", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr$LPIc[aggr$wk == attr(aggr, "WEEK")-XX] <- aggr[[paste("LPI", XX, sep="")]][aggr$wk == attr(aggr, "WEEK")-XX] aggr[[paste("UCI", XX, sep="")]][aggr$wk < attr(aggr, "WEEK2")] <- NA aggr[[paste("LCI", XX, sep="")]][aggr$wk < attr(aggr, "WEEK2")] <- NA aggr[[paste("UPI", XX, sep="")]][aggr$wk < attr(aggr, "WEEK2")] <- NA aggr[[paste("LPI", XX, sep="")]][aggr$wk < attr(aggr, "WEEK2")] <- NA } # we generate the CORRECTED number of death aggr$nbc[aggr$wk < attr(aggr, "WEEK2")] <- aggr$nb[aggr$wk < attr(aggr, "WEEK2")] aggr$nbc[aggr$wk >= attr(aggr, "WEEK2") & aggr$wk <= attr(aggr, "WEEK")] <- pmax(aggr$pred[aggr$wk >= attr(aggr, "WEEK2") & aggr$wk <= attr(aggr, "WEEK")], aggr$nb[aggr$wk >= attr(aggr, "WEEK2") & aggr$wk <= attr(aggr, "WEEK")], na.rm=TRUE) return(aggr) } trimDelayMOMO <- function(aggr) { for (XX in 1:attr(aggr, "delayCorr")) { aggr[[paste("pred", XX, sep="")]] <- NULL aggr[[paste("UCI", XX, sep="")]] <- NULL aggr[[paste("LCI", XX, sep="")]] <- NULL aggr[[paste("UPI", XX, sep="")]] <- NULL aggr[[paste("LPI", XX, sep="")]] <- NULL } aggr$nbc[is.na(aggr$nbc)] <- 0 ret <- aggr[,c("GROUP", "WoDi", "YoDi", "wk", "wk2", "nb", "nb2", "nbr", "nbc", "pred", "UCIc", "LCIc", "UPIc", "LPIc")] # We must preserve the attributes we need transferMOMOattributes(ret, aggr) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/workmail_operations.R \name{workmail_describe_email_monitoring_configuration} \alias{workmail_describe_email_monitoring_configuration} \title{Describes the current email monitoring configuration for a specified organization} \usage{ workmail_describe_email_monitoring_configuration(OrganizationId) } \arguments{ \item{OrganizationId}{[required] The ID of the organization for which the email monitoring configuration is described.} } \description{ Describes the current email monitoring configuration for a specified organization. See \url{https://www.paws-r-sdk.com/docs/workmail_describe_email_monitoring_configuration/} for full documentation. } \keyword{internal}
/cran/paws.end.user.computing/man/workmail_describe_email_monitoring_configuration.Rd
permissive
paws-r/paws
R
false
true
747
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/workmail_operations.R \name{workmail_describe_email_monitoring_configuration} \alias{workmail_describe_email_monitoring_configuration} \title{Describes the current email monitoring configuration for a specified organization} \usage{ workmail_describe_email_monitoring_configuration(OrganizationId) } \arguments{ \item{OrganizationId}{[required] The ID of the organization for which the email monitoring configuration is described.} } \description{ Describes the current email monitoring configuration for a specified organization. See \url{https://www.paws-r-sdk.com/docs/workmail_describe_email_monitoring_configuration/} for full documentation. } \keyword{internal}
#интегралл методом монтекарло, сравнение с аналитическим значением f <- function(x,p,q){ (x**(p-1))/((1-x**q)**(p/q)) } monte_carlo <- function(p,q,a = 0, b = 1, n = 1000){ rez = (b-a)/n*sum(f(runif(n),p,q)) print(rez) print(pi/(q*sin(p*pi/q))) }
/university/monte_karlo.R
no_license
1um/R
R
false
false
313
r
#интегралл методом монтекарло, сравнение с аналитическим значением f <- function(x,p,q){ (x**(p-1))/((1-x**q)**(p/q)) } monte_carlo <- function(p,q,a = 0, b = 1, n = 1000){ rez = (b-a)/n*sum(f(runif(n),p,q)) print(rez) print(pi/(q*sin(p*pi/q))) }
score.web.attribute <- function(attribute.weight) { # Work out how many attributes are required for the Article # The DQ Attribute fields have already been set to either 1: Populated, 0: Empty, NA: Not required # Divides the attribute weight by the number of attributes required to calculate a score for each individual attribute dq.attribute.fields <- c(37:49) attribute.required <- rowSums(!is.na(web.product.data[dq.attribute.fields])) attribute.actual <- rowSums(web.product.data[,dq.attribute.fields],na.rm = TRUE) attribute.value <- (attribute.weight / attribute.required) attribute.score <- attribute.actual * attribute.value web.product.data$'DQ Attribute Score' <<- attribute.score } score.web.description <- function(description.weight) { # Check 100 Character Field is populated web.product.data$'DQ Web Description Score' <<- 0 web.product.data$'DQ Web Description Score'[web.product.data$`Web Description 1`!=""] <<- description.weight } score.brand.consistency <- function(brand.weight){ # Checks if the brand value in the attribute field appears in the product title # Create column web.product.data$`DQ Brand Consistency Score` <<- 0 # Loop through web.product.data for(i in 1:NROW(web.product.data)) { # Check if brand is NA or wb description is missing if(is.na(web.product.data$`Brand`[i]) | web.product.data$`Web Description 1`[i] == "") { web.product.data$`DQ Brand Consistency Score`[i] <<- 0 } # Convert both attribute and title to uppercase (test is case sensitive) and compare else { if(grepl(toupper(web.product.data$Brand[i]),toupper(web.product.data$`Web Description 1`[i])) == FALSE) { web.product.data$`DQ Brand Consistency Score`[i] <<- brand.weight } } } } score.gtin.issue <- function(gtin.weight) { web.product.data$'DQ GTIN Score' <<- 0 web.product.data$'DQ GTIN Score'[(web.product.data$Article %in% gtin.issue$Article)] <<- gtin.weight } #----------------------------------------------------------------------------------------------------------------------------------------- score.data.quality <- function() { # Run the Score functions score.web.description(0.65) score.web.attribute(0.35) score.brand.consistency(-0.05) score.gtin.issue(-0.05) # Set Score field to Zero web.product.data$'DQ Score' <- 0 # Add the two components of the score together and round to 1 decimal place dq.score.columns <- c("DQ Web Description Score","DQ Attribute Score","DQ Brand Consistency Score","DQ GTIN Score") # Sum the rows, round to 2 dp and multiply by 10 to get a score out of 100 web.product.data$`DQ Score` <<- rowSums(web.product.data[,dq.score.columns],na.rm = TRUE) #web.product.data$`DQ Score` <- signif(web.product.data$`DQ Score`,2) #web.product.data$`DQ Score` <<- web.product.data$`DQ Score` * 10 }
/Scripts/score-product-data-quality.R
no_license
DasOakster/data-quality-management
R
false
false
3,387
r
score.web.attribute <- function(attribute.weight) { # Work out how many attributes are required for the Article # The DQ Attribute fields have already been set to either 1: Populated, 0: Empty, NA: Not required # Divides the attribute weight by the number of attributes required to calculate a score for each individual attribute dq.attribute.fields <- c(37:49) attribute.required <- rowSums(!is.na(web.product.data[dq.attribute.fields])) attribute.actual <- rowSums(web.product.data[,dq.attribute.fields],na.rm = TRUE) attribute.value <- (attribute.weight / attribute.required) attribute.score <- attribute.actual * attribute.value web.product.data$'DQ Attribute Score' <<- attribute.score } score.web.description <- function(description.weight) { # Check 100 Character Field is populated web.product.data$'DQ Web Description Score' <<- 0 web.product.data$'DQ Web Description Score'[web.product.data$`Web Description 1`!=""] <<- description.weight } score.brand.consistency <- function(brand.weight){ # Checks if the brand value in the attribute field appears in the product title # Create column web.product.data$`DQ Brand Consistency Score` <<- 0 # Loop through web.product.data for(i in 1:NROW(web.product.data)) { # Check if brand is NA or wb description is missing if(is.na(web.product.data$`Brand`[i]) | web.product.data$`Web Description 1`[i] == "") { web.product.data$`DQ Brand Consistency Score`[i] <<- 0 } # Convert both attribute and title to uppercase (test is case sensitive) and compare else { if(grepl(toupper(web.product.data$Brand[i]),toupper(web.product.data$`Web Description 1`[i])) == FALSE) { web.product.data$`DQ Brand Consistency Score`[i] <<- brand.weight } } } } score.gtin.issue <- function(gtin.weight) { web.product.data$'DQ GTIN Score' <<- 0 web.product.data$'DQ GTIN Score'[(web.product.data$Article %in% gtin.issue$Article)] <<- gtin.weight } #----------------------------------------------------------------------------------------------------------------------------------------- score.data.quality <- function() { # Run the Score functions score.web.description(0.65) score.web.attribute(0.35) score.brand.consistency(-0.05) score.gtin.issue(-0.05) # Set Score field to Zero web.product.data$'DQ Score' <- 0 # Add the two components of the score together and round to 1 decimal place dq.score.columns <- c("DQ Web Description Score","DQ Attribute Score","DQ Brand Consistency Score","DQ GTIN Score") # Sum the rows, round to 2 dp and multiply by 10 to get a score out of 100 web.product.data$`DQ Score` <<- rowSums(web.product.data[,dq.score.columns],na.rm = TRUE) #web.product.data$`DQ Score` <- signif(web.product.data$`DQ Score`,2) #web.product.data$`DQ Score` <<- web.product.data$`DQ Score` * 10 }
\name{clean_moves_matrix} \alias{clean_moves_matrix} \title{Clean up proposed moves matrix} \description{Cleans up proposed move matrix} \usage{clean_moves_matrix(theta, moves, n_zones)} \arguments{ \item{theta}{original configuration} \item{moves}{proposed moves} \item{n_zones}{total number of single zones in region} } \value{ A matrix with \code{kstar} rows, and each column being a proposed configuration, with the component single zone indices being sorted (i.e. each column). } \author{Albert Y. Kim} \keyword{internal}
/issuestests/SpatialEpi/man/clean_moves_matrix.Rd
no_license
akhikolla/RcppDeepStateTest
R
false
false
535
rd
\name{clean_moves_matrix} \alias{clean_moves_matrix} \title{Clean up proposed moves matrix} \description{Cleans up proposed move matrix} \usage{clean_moves_matrix(theta, moves, n_zones)} \arguments{ \item{theta}{original configuration} \item{moves}{proposed moves} \item{n_zones}{total number of single zones in region} } \value{ A matrix with \code{kstar} rows, and each column being a proposed configuration, with the component single zone indices being sorted (i.e. each column). } \author{Albert Y. Kim} \keyword{internal}
library(lubridate) library(dplyr) library(forecast) library(ggplot2) data <- read.csv('data123.csv') View(data) str(data) ### feature tuning data$Created.Date <- as.Date(data$Start.Date, format = '%m/%d/%Y') data$Start.Date <- as.Date(data$Start.Date, format = '%m/%d/%Y') data$Response.Day <- as.Date(data$Response.Day,format = '%m/%d/%Y') data$Response.Month <- data$Response.Day day(data$Response.Month) <- 1 str(data) ### imputation data$bu_flag <- factor(if_else( year(data$Start.Date) < year(data$Response.Day) & month(data$Start.Date) == 12 & month(data$Response.Day) == 1, 'Uplift', ifelse( year(data$Created.Date) < year(data$Start.Date) & year(data$Response.Day) < year(data$Start.Date) | year(data$Created.Date) == year(data$Start.Date) & year(data$Response.Day) == year(data$Start.Date) & month(data$Created.Date) <= month(data$Start.Date) & month(data$Response.Day) <= month(data$Start.Date) + 1, 'Uplift','Baseline' ) ) ) ### subsetting # isolate trials responses_notrials <- subset(data, Language != '', select = Campaign.ID:bu_flag) responses_trials <- subset(data, Language == '', select = Campaign.ID:bu_flag) # isolate local vs non-local responses local_base <- subset(responses_notrials, Language != 'English' & bu_flag == 'Baseline', select = Campaign.ID:bu_flag) local_up <- subset(responses_notrials, Language != 'English' & bu_flag == 'Uplift', select = Campaign.ID:bu_flag) nonlocal_base <- subset(responses_notrials, Language == 'English' & bu_flag == 'Baseline', select = Campaign.ID:bu_flag) nonlocal_up <- subset(responses_notrials, Language == 'English' & bu_flag == 'Uplift', select = Campaign.ID:bu_flag) ### rollup and convert to time series lb_df <- as.data.frame(table(local_base$Response.Month, local_base$bu_flag)) lb_df <- subset(lb_df, Var2 =='Baseline', select = c('Var1','Freq')) colnames(lb_df)[colnames(lb_df)=='Var1'] <- 'Month' colnames(lb_df)[colnames(lb_df)=='Freq'] <- 'Baseline_Local' lb_df$Month <- as.POSIXct(lb_df$Month, format = '%Y-%m-%d') lu_df <- as.data.frame(table(local_up$Response.Month, local_up$bu_flag)) lu_df <- subset(lu_df, Var2 =='Uplift', select = c('Var1','Freq')) colnames(lu_df)[colnames(lu_df)=='Var1'] <- 'Month' colnames(lu_df)[colnames(lu_df)=='Freq'] <- 'Uplift_Local' lu_df$Month <- as.POSIXct(lu_df$Month, format = '%Y-%m-%d') nb_df <- as.data.frame(table(nonlocal_base$Response.Month, nonlocal_base$bu_flag)) nb_df <- subset(nb_df, Var2 =='Baseline', select = c('Var1','Freq')) colnames(nb_df)[colnames(nb_df)=='Var1'] <- 'Month' colnames(nb_df)[colnames(nb_df)=='Freq'] <- 'Baseline_English' nb_df$Month <- as.POSIXct(nb_df$Month, format = '%Y-%m-%d') nu_df <- as.data.frame(table(nonlocal_up$Response.Month, nonlocal_up$bu_flag)) nu_df <- subset(nu_df, Var2 =='Uplift', select = c('Var1','Freq')) colnames(nu_df)[colnames(nu_df)=='Var1'] <- 'Month' colnames(nu_df)[colnames(nu_df)=='Freq'] <- 'Uplift_English' nu_df$Month <- as.POSIXct(nu_df$Month, format = '%Y-%m-%d') ### impute uplift with months where MR = 0 ts <- seq.POSIXt(as.POSIXct("2015-6-01 0:00",'%Y-%m-%d %H:%M'), as.POSIXct("2019-05-01 0:00",'%Y-%m-%d %H:%M'), by="month") ts <- seq.POSIXt(as.POSIXlt("2015-6-01"), as.POSIXlt("2019-05-01"), by="month") ts <- format.POSIXct(ts,'%Y-%m-%d') df <- data.frame(timestamp=ts) df$Month<-as.POSIXct(df$timestamp,format="%Y-%m-%d") # impute local lu_df <- full_join(df,lu_df) lu_df$timestamp <- NULL lu_df <- lu_df %>% group_by(Month) %>% mutate_each(funs(ifelse(is.na(.),0,.))) lu_df <- as.data.frame(lu_df) lb_df <- full_join(df,lb_df) lb_df$timestamp <- NULL lb_df <- lb_df %>% group_by(Month) %>% mutate_each(funs(ifelse(is.na(.),0,.))) lb_df <- as.data.frame(lb_df) #impute non-local nu_df <- full_join(df, nu_df) nu_df$timestamp <- NULL nu_df <- nu_df %>% group_by(Month) %>% mutate_each(funs(ifelse(is.na(.),0,.))) nu_df <- as.data.frame(nu_df) # ================================= # Exploratory Time Series Analysis # ================================= # overlap individual time series lb_ts <- ts(lb_df$Baseline_Local, start = c(2015, 6), end = c(2019, 5), frequency = 12) lu_ts <- ts(lu_df$Uplift_Local, start = c(2015, 6), end = c(2019, 5), frequency = 12) nb_ts <- ts(nb_df$Baseline_English, start = c(2015, 6), end = c(2019, 5), frequency = 12) nu_ts <- ts(nu_df$Uplift_English, start = c(2015, 6), end = c(2019, 5), frequency = 12) # local <- full_join(lb_df,lu_df) # english <- full_join(nb_df, nu_df) ts.plot(lb_ts, lu_ts, nb_ts, nu_ts, col = c('red', 'pink', 'black', 'gray'), main = 'Local vs Non-local Language Response Trends', ylab = 'Responses', lwd=1, gpars=list(xaxt="n")) axis(1, at=seq(2015-.25,2019,.25),labels=NA) axis(1, at=seq(2015-.25,2019,.25),labels = seq(2015-.25,2019,.25),lwd.ticks=1.2) points(lb_ts, pch = 1, col = 'red') points(lu_ts, pch = 1, col = 'pink') points(nb_ts, pch = 1, col = 'black') points(nu_ts, pch = 1, col = 'gray') legend("topleft", legend=c("English Baseline", "English Uplift", "Local Baseline","Local Uplift"), text.col=c('black','gray','red','pink'), ncol = 2, cex = 0.75) # ================================= # Time Series Forecasting # ================================= boxplot(lb_ts ~ cycle(lb_ts), col = 'mistyrose', main = "Local Language Baseline Seasonality", boxwex = 0.7, staplelty = 0) boxplot(lu_ts ~ cycle(lu_ts), col = 'mistyrose', main = "Local Language Uplift Seasonality", boxwex = 0.7, staplelty = 0) boxplot(nb_ts ~ cycle(nb_ts), col = 'mistyrose', main = "Non-local Language Baseline Seasonality", boxwex = 0.7, staplelty = 0) boxplot(nu_ts ~ cycle(nu_ts), col = 'mistyrose', main = "Non-local Language Uplift Seasonality", boxwex = 0.7, staplelty = 0) # ============================================================ # Create in/out sample & train model for non-local baseline # ============================================================ # MR data for test sample_in_nonlocal <- ts(nb_df$Baseline_English, start = c(2015, 6), end = c(2018, 11), frequency = 12) sample_out_nonlocal <- window(nb_ts, start = c(2018, 12), end = c(2019, 5), frequency = 12) fit <- HoltWinters(sample_in_nonlocal, alpha = 0.2, beta = 0.2, gamma = 0.7, seasonal = "multiplicative") fit forecast <- forecast(fit, h = 6, level = c(80,95)) #forecast$mean<-exp(forecast$mean) #forecast$upper<-exp(forecast$upper) #forecast$lower<-exp(forecast$lower) #forecast$x<-exp(forecast$x) plot(forecast, main = "Test of Non-local Baseline Forecast Model\nHolt-Winters") grid (NULL,NULL, lty = 1, col = "light gray") points(sample_in_nonlocal, pch = 1) points(forecast$mean, pch = 1) points(sample_out_nonlocal, pch = 3) abline(reg = lm(nb_ts ~ time(nb_ts)), col = 'red') out <- as.vector(sample_out_nonlocal) pred <- as.vector(forecast$mean) diff <- pred - out percent.diff <- round((abs(diff)/out)*100,digits=1) t1 <- data.frame(out,pred,diff,percent.diff) t1 # ======================= # Forecast using Model # ======================= fit <- HoltWinters(nb_ts, alpha = 0.4709647, beta = 0, gamma = 0.1511993) summary(fit) forecast <- forecast(fit, h = 8, level = c(80,95)) #forecast$mean<-exp(forecast$mean) #forecast$upper<-exp(forecast$upper) #forecast$lower<-exp(forecast$lower) #forecast$x<-exp(forecast$x) plot(forecast, main = "Non-local Baseline Forecast (c. 80/95)") grid (NULL,NULL, lty = 1, col = "light gray") points(nb_ts, pch = 1) points(forecast$mean, pch = 1) abline(reg = lm(nb_ts ~ time(nb_ts)), col = 'red') t2 <- data.frame(forecast$lower, forecast$mean, forecast$upper) t2 # ===================================================== # Derive uplift levels - avg uplift % by campaign type # ===================================================== # group by campaign View(nonlocal_up) a <- nonlocal_up %>% group_by(Campaign.ID, Campaign.Type, Response.Month) %>% summarize(count = n()) %>% arrange(desc(count)) a_df <- as.data.frame(a) colnames(a_df)[colnames(a_df)=='count'] <- 'Uplift' # join with baseline to pull in monthly baseline b <- nonlocal_base %>% group_by(Response.Month) %>% summarize(count = n()) %>% arrange(Response.Month) b_df <- as.data.frame(b) colnames(b_df)[colnames(b_df)=='count'] <- 'Baseline' c <- full_join(a_df,b_df) # calculate baseline contribution at the campaign level c$Uplift.Ratio <- (c$Uplift / c$Baseline)*100 # visualize uplift ratio distribution boxplot(Uplift.Ratio ~ Campaign.Type, data = c, col = 'mistyrose', main = "Non-local Uplift Contribution by Campaign Type", xlab = "Uplift Ration: % of Baseline", boxwex = 0.7, staplelty = 0, las = 2, horizontal = TRUE) # calc and visualize average % uplift for each campaign type mean_Uplift.Ratio <- c %>% group_by(Campaign.Type) %>% summarize(mean = mean(Uplift.Ratio)) %>% arrange(desc(mean)) View(mean_Uplift.Ratio) d <- full_join(mean_Uplift.Ratio,c) d <- as.data.frame(d) ggplot(d, aes(x = Campaign.Type, y = Uplift.Ratio)) + geom_point(alpha = .2) + geom_point(aes(x = Campaign.Type, y = mean), color = 'red', shape = 4, stroke = 1.5) + coord_flip() + geom_jitter(alpha = .2) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + ggtitle('Non-local Uplift Contribution by Campaign Type') + ylab('Uplift Ratio: % of Baseline') View(mean_Uplift.Ratio) # ============================================================ # Create in/out sample & train model for local baseline # ============================================================ # MR data for test sample_in_local <- ts(lb_df$Baseline_Local, start = c(2015, 6), end = c(2018, 11), frequency = 12) sample_out_local <- window(lb_ts, start = c(2018, 12), end = c(2019, 5), frequency = 12) fit <- HoltWinters(sample_in_local, alpha = 0.7, beta = 0.003861955, gamma = 0.8) fit forecast <- forecast(fit, h = 6, level = c(80,95)) #forecast$mean<-exp(forecast$mean) #forecast$upper<-exp(forecast$upper) #forecast$lower<-exp(forecast$lower) #forecast$x<-exp(forecast$x) plot(forecast, main = "Test of Local Baseline Forecast Model\nHolt-Winters") grid (NULL,NULL, lty = 1, col = "light gray") points(sample_in_local, pch = 1) points(forecast$mean, pch = 1) points(sample_out_local, pch = 3) abline(reg = lm(lb_ts ~ time(lb_ts)), col = 'red') out <- as.vector(sample_out_local) pred <- as.vector(forecast$mean) diff <- pred - out percent.diff <- round((abs(diff)/out)*100,digits=1) t1 <- data.frame(out,pred,diff,percent.diff) t1 # ======================= # Forecast using Model # ======================= fit <- HoltWinters(lb_ts, alpha = 0.7, beta = 0.003861955, gamma = 0.8) fit forecast <- forecast(fit, h = 8, level = c(80,95)) #forecast$mean<-exp(forecast$mean) #forecast$upper<-exp(forecast$upper) #forecast$lower<-exp(forecast$lower) #forecast$x<-exp(forecast$x) plot(forecast, main = "Local Baseline Forecast (c. 80/95)") grid (NULL,NULL, lty = 1, col = "light gray") points(lb_ts, pch = 1) points(forecast$mean, pch = 1) abline(reg = lm(lb_ts ~ time(lb_ts)), col = 'red') t2 <- data.frame(forecast$lower, forecast$mean, forecast$upper) t2 # ===================================================== # Derive uplift levels - avg uplift % by campaign type # ===================================================== View(local_up) a <- local_up %>% group_by(Campaign.ID, Campaign.Type, Response.Month) %>% summarize(count = n()) %>% arrange(desc(count)) a_df <- as.data.frame(a) colnames(a_df)[colnames(a_df)=='count'] <- 'Uplift' # join with baseline table to bring in Baseline for time period under campaign uplift b <- local_base %>% group_by(Response.Month) %>% summarize(count = n()) %>% arrange(Response.Month) b_df <- as.data.frame(b) colnames(b_df)[colnames(b_df)=='count'] <- 'Baseline' c <- full_join(a_df,b_df) c_noNA <- subset(c, !is.na(Baseline), select = Campaign.ID:Baseline) # divide uplift in month by baseline in-month to derive % of baseline boosted by each campaign c_noNA$Uplift.Ratio <- (c_noNA$Uplift / c_noNA$Baseline)*100 # visualize uplift ratio distribution boxplot(Uplift.Ratio ~ Campaign.Type, data = c_noNA, col = 'mistyrose', main = "Local Uplift Contribution by Campaign Type", xlab = "Uplift Ration: % of Baseline", boxwex = 0.7, staplelty = 0, las = 2, horizontal = TRUE) # calc and visualize average % uplift for each campaign type mean_Uplift.Ratio <- c_noNA %>% group_by(Campaign.Type) %>% summarize(mean = mean(Uplift.Ratio)) %>% arrange(desc(mean)) mean_Uplift.Ratio d <- full_join(mean_Uplift.Ratio,c_noNA) d <- as.data.frame(d) ggplot(d, aes(x = Campaign.Type, y = Uplift.Ratio)) + geom_point(alpha = .2) + geom_point(aes(x = Campaign.Type, y = mean), color = 'red', shape = 4, stroke = 1.5) + coord_flip() + geom_jitter(alpha = .2) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + ggtitle('Local Uplift Contribution by Campaign Type') + ylab('Uplift Ratio: % of Baseline') View(mean_Uplift.Ratio)
/BU Model.R
no_license
wesleylongsworth/r_projects
R
false
false
13,951
r
library(lubridate) library(dplyr) library(forecast) library(ggplot2) data <- read.csv('data123.csv') View(data) str(data) ### feature tuning data$Created.Date <- as.Date(data$Start.Date, format = '%m/%d/%Y') data$Start.Date <- as.Date(data$Start.Date, format = '%m/%d/%Y') data$Response.Day <- as.Date(data$Response.Day,format = '%m/%d/%Y') data$Response.Month <- data$Response.Day day(data$Response.Month) <- 1 str(data) ### imputation data$bu_flag <- factor(if_else( year(data$Start.Date) < year(data$Response.Day) & month(data$Start.Date) == 12 & month(data$Response.Day) == 1, 'Uplift', ifelse( year(data$Created.Date) < year(data$Start.Date) & year(data$Response.Day) < year(data$Start.Date) | year(data$Created.Date) == year(data$Start.Date) & year(data$Response.Day) == year(data$Start.Date) & month(data$Created.Date) <= month(data$Start.Date) & month(data$Response.Day) <= month(data$Start.Date) + 1, 'Uplift','Baseline' ) ) ) ### subsetting # isolate trials responses_notrials <- subset(data, Language != '', select = Campaign.ID:bu_flag) responses_trials <- subset(data, Language == '', select = Campaign.ID:bu_flag) # isolate local vs non-local responses local_base <- subset(responses_notrials, Language != 'English' & bu_flag == 'Baseline', select = Campaign.ID:bu_flag) local_up <- subset(responses_notrials, Language != 'English' & bu_flag == 'Uplift', select = Campaign.ID:bu_flag) nonlocal_base <- subset(responses_notrials, Language == 'English' & bu_flag == 'Baseline', select = Campaign.ID:bu_flag) nonlocal_up <- subset(responses_notrials, Language == 'English' & bu_flag == 'Uplift', select = Campaign.ID:bu_flag) ### rollup and convert to time series lb_df <- as.data.frame(table(local_base$Response.Month, local_base$bu_flag)) lb_df <- subset(lb_df, Var2 =='Baseline', select = c('Var1','Freq')) colnames(lb_df)[colnames(lb_df)=='Var1'] <- 'Month' colnames(lb_df)[colnames(lb_df)=='Freq'] <- 'Baseline_Local' lb_df$Month <- as.POSIXct(lb_df$Month, format = '%Y-%m-%d') lu_df <- as.data.frame(table(local_up$Response.Month, local_up$bu_flag)) lu_df <- subset(lu_df, Var2 =='Uplift', select = c('Var1','Freq')) colnames(lu_df)[colnames(lu_df)=='Var1'] <- 'Month' colnames(lu_df)[colnames(lu_df)=='Freq'] <- 'Uplift_Local' lu_df$Month <- as.POSIXct(lu_df$Month, format = '%Y-%m-%d') nb_df <- as.data.frame(table(nonlocal_base$Response.Month, nonlocal_base$bu_flag)) nb_df <- subset(nb_df, Var2 =='Baseline', select = c('Var1','Freq')) colnames(nb_df)[colnames(nb_df)=='Var1'] <- 'Month' colnames(nb_df)[colnames(nb_df)=='Freq'] <- 'Baseline_English' nb_df$Month <- as.POSIXct(nb_df$Month, format = '%Y-%m-%d') nu_df <- as.data.frame(table(nonlocal_up$Response.Month, nonlocal_up$bu_flag)) nu_df <- subset(nu_df, Var2 =='Uplift', select = c('Var1','Freq')) colnames(nu_df)[colnames(nu_df)=='Var1'] <- 'Month' colnames(nu_df)[colnames(nu_df)=='Freq'] <- 'Uplift_English' nu_df$Month <- as.POSIXct(nu_df$Month, format = '%Y-%m-%d') ### impute uplift with months where MR = 0 ts <- seq.POSIXt(as.POSIXct("2015-6-01 0:00",'%Y-%m-%d %H:%M'), as.POSIXct("2019-05-01 0:00",'%Y-%m-%d %H:%M'), by="month") ts <- seq.POSIXt(as.POSIXlt("2015-6-01"), as.POSIXlt("2019-05-01"), by="month") ts <- format.POSIXct(ts,'%Y-%m-%d') df <- data.frame(timestamp=ts) df$Month<-as.POSIXct(df$timestamp,format="%Y-%m-%d") # impute local lu_df <- full_join(df,lu_df) lu_df$timestamp <- NULL lu_df <- lu_df %>% group_by(Month) %>% mutate_each(funs(ifelse(is.na(.),0,.))) lu_df <- as.data.frame(lu_df) lb_df <- full_join(df,lb_df) lb_df$timestamp <- NULL lb_df <- lb_df %>% group_by(Month) %>% mutate_each(funs(ifelse(is.na(.),0,.))) lb_df <- as.data.frame(lb_df) #impute non-local nu_df <- full_join(df, nu_df) nu_df$timestamp <- NULL nu_df <- nu_df %>% group_by(Month) %>% mutate_each(funs(ifelse(is.na(.),0,.))) nu_df <- as.data.frame(nu_df) # ================================= # Exploratory Time Series Analysis # ================================= # overlap individual time series lb_ts <- ts(lb_df$Baseline_Local, start = c(2015, 6), end = c(2019, 5), frequency = 12) lu_ts <- ts(lu_df$Uplift_Local, start = c(2015, 6), end = c(2019, 5), frequency = 12) nb_ts <- ts(nb_df$Baseline_English, start = c(2015, 6), end = c(2019, 5), frequency = 12) nu_ts <- ts(nu_df$Uplift_English, start = c(2015, 6), end = c(2019, 5), frequency = 12) # local <- full_join(lb_df,lu_df) # english <- full_join(nb_df, nu_df) ts.plot(lb_ts, lu_ts, nb_ts, nu_ts, col = c('red', 'pink', 'black', 'gray'), main = 'Local vs Non-local Language Response Trends', ylab = 'Responses', lwd=1, gpars=list(xaxt="n")) axis(1, at=seq(2015-.25,2019,.25),labels=NA) axis(1, at=seq(2015-.25,2019,.25),labels = seq(2015-.25,2019,.25),lwd.ticks=1.2) points(lb_ts, pch = 1, col = 'red') points(lu_ts, pch = 1, col = 'pink') points(nb_ts, pch = 1, col = 'black') points(nu_ts, pch = 1, col = 'gray') legend("topleft", legend=c("English Baseline", "English Uplift", "Local Baseline","Local Uplift"), text.col=c('black','gray','red','pink'), ncol = 2, cex = 0.75) # ================================= # Time Series Forecasting # ================================= boxplot(lb_ts ~ cycle(lb_ts), col = 'mistyrose', main = "Local Language Baseline Seasonality", boxwex = 0.7, staplelty = 0) boxplot(lu_ts ~ cycle(lu_ts), col = 'mistyrose', main = "Local Language Uplift Seasonality", boxwex = 0.7, staplelty = 0) boxplot(nb_ts ~ cycle(nb_ts), col = 'mistyrose', main = "Non-local Language Baseline Seasonality", boxwex = 0.7, staplelty = 0) boxplot(nu_ts ~ cycle(nu_ts), col = 'mistyrose', main = "Non-local Language Uplift Seasonality", boxwex = 0.7, staplelty = 0) # ============================================================ # Create in/out sample & train model for non-local baseline # ============================================================ # MR data for test sample_in_nonlocal <- ts(nb_df$Baseline_English, start = c(2015, 6), end = c(2018, 11), frequency = 12) sample_out_nonlocal <- window(nb_ts, start = c(2018, 12), end = c(2019, 5), frequency = 12) fit <- HoltWinters(sample_in_nonlocal, alpha = 0.2, beta = 0.2, gamma = 0.7, seasonal = "multiplicative") fit forecast <- forecast(fit, h = 6, level = c(80,95)) #forecast$mean<-exp(forecast$mean) #forecast$upper<-exp(forecast$upper) #forecast$lower<-exp(forecast$lower) #forecast$x<-exp(forecast$x) plot(forecast, main = "Test of Non-local Baseline Forecast Model\nHolt-Winters") grid (NULL,NULL, lty = 1, col = "light gray") points(sample_in_nonlocal, pch = 1) points(forecast$mean, pch = 1) points(sample_out_nonlocal, pch = 3) abline(reg = lm(nb_ts ~ time(nb_ts)), col = 'red') out <- as.vector(sample_out_nonlocal) pred <- as.vector(forecast$mean) diff <- pred - out percent.diff <- round((abs(diff)/out)*100,digits=1) t1 <- data.frame(out,pred,diff,percent.diff) t1 # ======================= # Forecast using Model # ======================= fit <- HoltWinters(nb_ts, alpha = 0.4709647, beta = 0, gamma = 0.1511993) summary(fit) forecast <- forecast(fit, h = 8, level = c(80,95)) #forecast$mean<-exp(forecast$mean) #forecast$upper<-exp(forecast$upper) #forecast$lower<-exp(forecast$lower) #forecast$x<-exp(forecast$x) plot(forecast, main = "Non-local Baseline Forecast (c. 80/95)") grid (NULL,NULL, lty = 1, col = "light gray") points(nb_ts, pch = 1) points(forecast$mean, pch = 1) abline(reg = lm(nb_ts ~ time(nb_ts)), col = 'red') t2 <- data.frame(forecast$lower, forecast$mean, forecast$upper) t2 # ===================================================== # Derive uplift levels - avg uplift % by campaign type # ===================================================== # group by campaign View(nonlocal_up) a <- nonlocal_up %>% group_by(Campaign.ID, Campaign.Type, Response.Month) %>% summarize(count = n()) %>% arrange(desc(count)) a_df <- as.data.frame(a) colnames(a_df)[colnames(a_df)=='count'] <- 'Uplift' # join with baseline to pull in monthly baseline b <- nonlocal_base %>% group_by(Response.Month) %>% summarize(count = n()) %>% arrange(Response.Month) b_df <- as.data.frame(b) colnames(b_df)[colnames(b_df)=='count'] <- 'Baseline' c <- full_join(a_df,b_df) # calculate baseline contribution at the campaign level c$Uplift.Ratio <- (c$Uplift / c$Baseline)*100 # visualize uplift ratio distribution boxplot(Uplift.Ratio ~ Campaign.Type, data = c, col = 'mistyrose', main = "Non-local Uplift Contribution by Campaign Type", xlab = "Uplift Ration: % of Baseline", boxwex = 0.7, staplelty = 0, las = 2, horizontal = TRUE) # calc and visualize average % uplift for each campaign type mean_Uplift.Ratio <- c %>% group_by(Campaign.Type) %>% summarize(mean = mean(Uplift.Ratio)) %>% arrange(desc(mean)) View(mean_Uplift.Ratio) d <- full_join(mean_Uplift.Ratio,c) d <- as.data.frame(d) ggplot(d, aes(x = Campaign.Type, y = Uplift.Ratio)) + geom_point(alpha = .2) + geom_point(aes(x = Campaign.Type, y = mean), color = 'red', shape = 4, stroke = 1.5) + coord_flip() + geom_jitter(alpha = .2) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + ggtitle('Non-local Uplift Contribution by Campaign Type') + ylab('Uplift Ratio: % of Baseline') View(mean_Uplift.Ratio) # ============================================================ # Create in/out sample & train model for local baseline # ============================================================ # MR data for test sample_in_local <- ts(lb_df$Baseline_Local, start = c(2015, 6), end = c(2018, 11), frequency = 12) sample_out_local <- window(lb_ts, start = c(2018, 12), end = c(2019, 5), frequency = 12) fit <- HoltWinters(sample_in_local, alpha = 0.7, beta = 0.003861955, gamma = 0.8) fit forecast <- forecast(fit, h = 6, level = c(80,95)) #forecast$mean<-exp(forecast$mean) #forecast$upper<-exp(forecast$upper) #forecast$lower<-exp(forecast$lower) #forecast$x<-exp(forecast$x) plot(forecast, main = "Test of Local Baseline Forecast Model\nHolt-Winters") grid (NULL,NULL, lty = 1, col = "light gray") points(sample_in_local, pch = 1) points(forecast$mean, pch = 1) points(sample_out_local, pch = 3) abline(reg = lm(lb_ts ~ time(lb_ts)), col = 'red') out <- as.vector(sample_out_local) pred <- as.vector(forecast$mean) diff <- pred - out percent.diff <- round((abs(diff)/out)*100,digits=1) t1 <- data.frame(out,pred,diff,percent.diff) t1 # ======================= # Forecast using Model # ======================= fit <- HoltWinters(lb_ts, alpha = 0.7, beta = 0.003861955, gamma = 0.8) fit forecast <- forecast(fit, h = 8, level = c(80,95)) #forecast$mean<-exp(forecast$mean) #forecast$upper<-exp(forecast$upper) #forecast$lower<-exp(forecast$lower) #forecast$x<-exp(forecast$x) plot(forecast, main = "Local Baseline Forecast (c. 80/95)") grid (NULL,NULL, lty = 1, col = "light gray") points(lb_ts, pch = 1) points(forecast$mean, pch = 1) abline(reg = lm(lb_ts ~ time(lb_ts)), col = 'red') t2 <- data.frame(forecast$lower, forecast$mean, forecast$upper) t2 # ===================================================== # Derive uplift levels - avg uplift % by campaign type # ===================================================== View(local_up) a <- local_up %>% group_by(Campaign.ID, Campaign.Type, Response.Month) %>% summarize(count = n()) %>% arrange(desc(count)) a_df <- as.data.frame(a) colnames(a_df)[colnames(a_df)=='count'] <- 'Uplift' # join with baseline table to bring in Baseline for time period under campaign uplift b <- local_base %>% group_by(Response.Month) %>% summarize(count = n()) %>% arrange(Response.Month) b_df <- as.data.frame(b) colnames(b_df)[colnames(b_df)=='count'] <- 'Baseline' c <- full_join(a_df,b_df) c_noNA <- subset(c, !is.na(Baseline), select = Campaign.ID:Baseline) # divide uplift in month by baseline in-month to derive % of baseline boosted by each campaign c_noNA$Uplift.Ratio <- (c_noNA$Uplift / c_noNA$Baseline)*100 # visualize uplift ratio distribution boxplot(Uplift.Ratio ~ Campaign.Type, data = c_noNA, col = 'mistyrose', main = "Local Uplift Contribution by Campaign Type", xlab = "Uplift Ration: % of Baseline", boxwex = 0.7, staplelty = 0, las = 2, horizontal = TRUE) # calc and visualize average % uplift for each campaign type mean_Uplift.Ratio <- c_noNA %>% group_by(Campaign.Type) %>% summarize(mean = mean(Uplift.Ratio)) %>% arrange(desc(mean)) mean_Uplift.Ratio d <- full_join(mean_Uplift.Ratio,c_noNA) d <- as.data.frame(d) ggplot(d, aes(x = Campaign.Type, y = Uplift.Ratio)) + geom_point(alpha = .2) + geom_point(aes(x = Campaign.Type, y = mean), color = 'red', shape = 4, stroke = 1.5) + coord_flip() + geom_jitter(alpha = .2) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + ggtitle('Local Uplift Contribution by Campaign Type') + ylab('Uplift Ratio: % of Baseline') View(mean_Uplift.Ratio)
if (!require(bibliometrix)) { install.packages("bibliometrix") } if (!require(igraph)) { install.packages("igraph") } if (!require(tidyverse)) { install.packages("tidyverse") } if (!require(roadoi)) { install.packages("roadoi") } if (!require(fulltext)) { install.packages("fulltext") } if (!require(tm)) { install.packages("tm") } if (!require(SnowballC)) { install.packages("SnowballC") } if (!require(wordcloud)) { install.packages("wordcloud") } if (!require(cluster)) { install.packages("cluster") } library(bibliometrix) library(igraph) library(tidyverse) library(roadoi) # titles library(fulltext) # Abstract library(tm) library(SnowballC) library(wordcloud) library(cluster) harvest <- function(seed,email){ D <-readFiles(seed) M<- convert2df(D, dbsource="isi",format="plaintext") M$ID_WOS <- rownames(M) M$ID_WOS <- paste(M$ID_WOS,M$VL,sep = ", V") M$ID_WOS <- paste(M$ID_WOS,M$PG,sep = ", P") M$ID_WOS <- paste(M$ID_WOS,M$DI,sep = ", DOI ") enlaces <- data.frame(ID_WOS=character(), CR=character(), stringsAsFactors = FALSE) for (i in M$ID_WOS) { row1=M[M$ID_WOS==i,c("ID_WOS","CR")] df1=data.frame(ID_WOS=i,CR=strsplit(row1$CR,";")) colnames(df1)=c("ID_WOS","CR") enlaces=rbind(enlaces,df1) } grafo_1 <-graph.data.frame(enlaces,directed = TRUE) grafo_2 <-delete.vertices(grafo_1,which(degree(grafo_1, mode = "in")==1 & degree(grafo_1, mode = "out")==0)) giant.component <- function(graph) { cl <- clusters(graph) induced.subgraph(graph, which(cl$membership == which.max(cl$csize)))} grafo_3 <- giant.component(grafo_2) clusters <- cluster_walktrap(grafo_3) clusters_mx <- cbind(clusters$names, clusters$membership) clusters_df <- data.frame(clusters_mx, stringsAsFactors = FALSE) names(clusters_df) <- c("ID_WOS", "cluster") clusters_3 <- head(clusters_df %>% count(cluster, sort = TRUE), 3) df_clusters <- clusters_df[clusters_df$cluster == clusters_3$cluster, ] df_clusters_1 <- clusters_df[clusters_df$cluster == clusters_3$cluster[1],] df_clusters_2 <- clusters_df[clusters_df$cluster == clusters_3$cluster[2],] df_clusters_3 <- clusters_df[clusters_df$cluster == clusters_3$cluster[3],] raw_data <- df_clusters_1 %>% rename(id = "ID_WOS") %>% mutate(id = str_to_lower(id)) raw_data_1 <- raw_data %>% dplyr::filter(grepl(".*doi", id)) raw_data_1$doi <- sub(".*doi", "", raw_data_1$id) raw_data_1 <- raw_data_1 %>% mutate(doi = str_trim(doi)) df <- data.frame(titulo = as.character(), stringsAsFactors = FALSE) for (i in raw_data_1$doi) { row = try(oadoi_fetch(dois = i, email = email), TRUE) if(isTRUE(class(row)=="try-error")) {next} else { df_new = data.frame(titulo = row$title, stringsAsFactors = FALSE) } df = rbind(df_new, df) } jeopCorpus <- Corpus(VectorSource(df$titulo %>% na.omit())) paperCorp <- jeopCorpus paperCorp <- tm_map(paperCorp, removePunctuation) paperCorp <- tm_map(paperCorp, removeNumbers) # added tolower paperCorp <- tm_map(paperCorp, content_transformer(tolower)) paperCorp <- tm_map(paperCorp, removeWords, stopwords("english")) # moved stripWhitespace paperCorp <- tm_map(paperCorp, stripWhitespace) paperCorp <- tm_map(paperCorp, stemDocument) paperCorp_1 <- tm_map(paperCorp, removeWords, c("the")) nube1 <- wordcloud(paperCorp_1, min.freq = 1, max.words=50, random.order=FALSE, rot.per=0.35, colors=brewer.pal(8, "Dark2")) raw_data_2 <- df_clusters_2 %>% rename(id = "ID_WOS") %>% mutate(id = str_to_lower(id)) raw_data_1_2 <- raw_data_2 %>% dplyr::filter(grepl(".*doi", id)) raw_data_1_2$doi <- sub(".*doi", "", raw_data_1_2$id) raw_data_1_2 <- raw_data_1_2 %>% mutate(doi = str_trim(doi)) df_2 <- data.frame(titulo = as.character(), stringsAsFactors = FALSE) for (i in raw_data_1_2$doi) { row = try(oadoi_fetch(dois = i, email = email), TRUE) if(isTRUE(class(row)=="try-error")) {next} else { df_new = data.frame(titulo = row$title, stringsAsFactors = FALSE) } df_2 = rbind(df_new, df_2) } jeopCorpus_2 <- Corpus(VectorSource(df_2$titulo %>% na.omit())) paperCorp_2 <- jeopCorpus_2 paperCorp_2 <- tm_map(paperCorp_2, removePunctuation) paperCorp_2 <- tm_map(paperCorp_2, removeNumbers) # added tolower paperCorp_2 <- tm_map(paperCorp_2, content_transformer(tolower)) paperCorp_2 <- tm_map(paperCorp_2, removeWords, stopwords("english")) # moved stripWhitespace paperCorp_2 <- tm_map(paperCorp_2, stripWhitespace) paperCorp_2 <- tm_map(paperCorp_2, stemDocument) paperCorp_2 <- tm_map(paperCorp_2, removeWords, c("the")) nube2 <- wordcloud(paperCorp_2, min.freq = 1, max.words=50, random.order=FALSE, rot.per=0.35, colors=brewer.pal(8, "Dark2")) raw_data_3 <- df_clusters_3 %>% rename(id = "ID_WOS") %>% mutate(id = str_to_lower(id)) raw_data_1_3 <- raw_data_3 %>% dplyr::filter(grepl(".*doi", id)) raw_data_1_3$doi <- sub(".*doi", "", raw_data_1_3$id) raw_data_1_3 <- raw_data_1_3 %>% mutate(doi = str_trim(doi)) df_3 <- data.frame(titulo = as.character(), stringsAsFactors = FALSE) for (i in raw_data_1_3$doi) { row = try(oadoi_fetch(dois = i, email = email), TRUE) if(isTRUE(class(row)=="try-error")) {next} else { df_new = data.frame(titulo = row$title, stringsAsFactors = FALSE) } df_3 = rbind(df_new, df_3) } jeopCorpus_3 <- Corpus(VectorSource(df_3$titulo %>% na.omit())) paperCorp_3 <- jeopCorpus_3 paperCorp_3 <- tm_map(paperCorp_3, removePunctuation) paperCorp_3 <- tm_map(paperCorp_3, removeNumbers) # added tolower paperCorp_3 <- tm_map(paperCorp_3, content_transformer(tolower)) paperCorp_3 <- tm_map(paperCorp_3, removeWords, stopwords("english")) # moved stripWhitespace paperCorp_3 <- tm_map(paperCorp_3, stripWhitespace) paperCorp_3 <- tm_map(paperCorp_3, stemDocument) paperCorp_3 <- tm_map(paperCorp_3, removeWords, c("the")) nube3 <- wordcloud(paperCorp_3, min.freq = 1, max.words=50, random.order=FALSE, rot.per=0.35, colors=brewer.pal(8, "Dark2")) list(df=M,grafo=grafo_3, cluster_1=paperCorp_1, cluster_2 = paperCorp_2, cluster_3 =paperCorp_3) } source("cluster.R")
/harvest.R
permissive
coreofscience/r-harvest
R
false
false
6,387
r
if (!require(bibliometrix)) { install.packages("bibliometrix") } if (!require(igraph)) { install.packages("igraph") } if (!require(tidyverse)) { install.packages("tidyverse") } if (!require(roadoi)) { install.packages("roadoi") } if (!require(fulltext)) { install.packages("fulltext") } if (!require(tm)) { install.packages("tm") } if (!require(SnowballC)) { install.packages("SnowballC") } if (!require(wordcloud)) { install.packages("wordcloud") } if (!require(cluster)) { install.packages("cluster") } library(bibliometrix) library(igraph) library(tidyverse) library(roadoi) # titles library(fulltext) # Abstract library(tm) library(SnowballC) library(wordcloud) library(cluster) harvest <- function(seed,email){ D <-readFiles(seed) M<- convert2df(D, dbsource="isi",format="plaintext") M$ID_WOS <- rownames(M) M$ID_WOS <- paste(M$ID_WOS,M$VL,sep = ", V") M$ID_WOS <- paste(M$ID_WOS,M$PG,sep = ", P") M$ID_WOS <- paste(M$ID_WOS,M$DI,sep = ", DOI ") enlaces <- data.frame(ID_WOS=character(), CR=character(), stringsAsFactors = FALSE) for (i in M$ID_WOS) { row1=M[M$ID_WOS==i,c("ID_WOS","CR")] df1=data.frame(ID_WOS=i,CR=strsplit(row1$CR,";")) colnames(df1)=c("ID_WOS","CR") enlaces=rbind(enlaces,df1) } grafo_1 <-graph.data.frame(enlaces,directed = TRUE) grafo_2 <-delete.vertices(grafo_1,which(degree(grafo_1, mode = "in")==1 & degree(grafo_1, mode = "out")==0)) giant.component <- function(graph) { cl <- clusters(graph) induced.subgraph(graph, which(cl$membership == which.max(cl$csize)))} grafo_3 <- giant.component(grafo_2) clusters <- cluster_walktrap(grafo_3) clusters_mx <- cbind(clusters$names, clusters$membership) clusters_df <- data.frame(clusters_mx, stringsAsFactors = FALSE) names(clusters_df) <- c("ID_WOS", "cluster") clusters_3 <- head(clusters_df %>% count(cluster, sort = TRUE), 3) df_clusters <- clusters_df[clusters_df$cluster == clusters_3$cluster, ] df_clusters_1 <- clusters_df[clusters_df$cluster == clusters_3$cluster[1],] df_clusters_2 <- clusters_df[clusters_df$cluster == clusters_3$cluster[2],] df_clusters_3 <- clusters_df[clusters_df$cluster == clusters_3$cluster[3],] raw_data <- df_clusters_1 %>% rename(id = "ID_WOS") %>% mutate(id = str_to_lower(id)) raw_data_1 <- raw_data %>% dplyr::filter(grepl(".*doi", id)) raw_data_1$doi <- sub(".*doi", "", raw_data_1$id) raw_data_1 <- raw_data_1 %>% mutate(doi = str_trim(doi)) df <- data.frame(titulo = as.character(), stringsAsFactors = FALSE) for (i in raw_data_1$doi) { row = try(oadoi_fetch(dois = i, email = email), TRUE) if(isTRUE(class(row)=="try-error")) {next} else { df_new = data.frame(titulo = row$title, stringsAsFactors = FALSE) } df = rbind(df_new, df) } jeopCorpus <- Corpus(VectorSource(df$titulo %>% na.omit())) paperCorp <- jeopCorpus paperCorp <- tm_map(paperCorp, removePunctuation) paperCorp <- tm_map(paperCorp, removeNumbers) # added tolower paperCorp <- tm_map(paperCorp, content_transformer(tolower)) paperCorp <- tm_map(paperCorp, removeWords, stopwords("english")) # moved stripWhitespace paperCorp <- tm_map(paperCorp, stripWhitespace) paperCorp <- tm_map(paperCorp, stemDocument) paperCorp_1 <- tm_map(paperCorp, removeWords, c("the")) nube1 <- wordcloud(paperCorp_1, min.freq = 1, max.words=50, random.order=FALSE, rot.per=0.35, colors=brewer.pal(8, "Dark2")) raw_data_2 <- df_clusters_2 %>% rename(id = "ID_WOS") %>% mutate(id = str_to_lower(id)) raw_data_1_2 <- raw_data_2 %>% dplyr::filter(grepl(".*doi", id)) raw_data_1_2$doi <- sub(".*doi", "", raw_data_1_2$id) raw_data_1_2 <- raw_data_1_2 %>% mutate(doi = str_trim(doi)) df_2 <- data.frame(titulo = as.character(), stringsAsFactors = FALSE) for (i in raw_data_1_2$doi) { row = try(oadoi_fetch(dois = i, email = email), TRUE) if(isTRUE(class(row)=="try-error")) {next} else { df_new = data.frame(titulo = row$title, stringsAsFactors = FALSE) } df_2 = rbind(df_new, df_2) } jeopCorpus_2 <- Corpus(VectorSource(df_2$titulo %>% na.omit())) paperCorp_2 <- jeopCorpus_2 paperCorp_2 <- tm_map(paperCorp_2, removePunctuation) paperCorp_2 <- tm_map(paperCorp_2, removeNumbers) # added tolower paperCorp_2 <- tm_map(paperCorp_2, content_transformer(tolower)) paperCorp_2 <- tm_map(paperCorp_2, removeWords, stopwords("english")) # moved stripWhitespace paperCorp_2 <- tm_map(paperCorp_2, stripWhitespace) paperCorp_2 <- tm_map(paperCorp_2, stemDocument) paperCorp_2 <- tm_map(paperCorp_2, removeWords, c("the")) nube2 <- wordcloud(paperCorp_2, min.freq = 1, max.words=50, random.order=FALSE, rot.per=0.35, colors=brewer.pal(8, "Dark2")) raw_data_3 <- df_clusters_3 %>% rename(id = "ID_WOS") %>% mutate(id = str_to_lower(id)) raw_data_1_3 <- raw_data_3 %>% dplyr::filter(grepl(".*doi", id)) raw_data_1_3$doi <- sub(".*doi", "", raw_data_1_3$id) raw_data_1_3 <- raw_data_1_3 %>% mutate(doi = str_trim(doi)) df_3 <- data.frame(titulo = as.character(), stringsAsFactors = FALSE) for (i in raw_data_1_3$doi) { row = try(oadoi_fetch(dois = i, email = email), TRUE) if(isTRUE(class(row)=="try-error")) {next} else { df_new = data.frame(titulo = row$title, stringsAsFactors = FALSE) } df_3 = rbind(df_new, df_3) } jeopCorpus_3 <- Corpus(VectorSource(df_3$titulo %>% na.omit())) paperCorp_3 <- jeopCorpus_3 paperCorp_3 <- tm_map(paperCorp_3, removePunctuation) paperCorp_3 <- tm_map(paperCorp_3, removeNumbers) # added tolower paperCorp_3 <- tm_map(paperCorp_3, content_transformer(tolower)) paperCorp_3 <- tm_map(paperCorp_3, removeWords, stopwords("english")) # moved stripWhitespace paperCorp_3 <- tm_map(paperCorp_3, stripWhitespace) paperCorp_3 <- tm_map(paperCorp_3, stemDocument) paperCorp_3 <- tm_map(paperCorp_3, removeWords, c("the")) nube3 <- wordcloud(paperCorp_3, min.freq = 1, max.words=50, random.order=FALSE, rot.per=0.35, colors=brewer.pal(8, "Dark2")) list(df=M,grafo=grafo_3, cluster_1=paperCorp_1, cluster_2 = paperCorp_2, cluster_3 =paperCorp_3) } source("cluster.R")
## makeCacheMatrix & cacheSolve functions are used to cache potentially time-consuming computations (inverting) of Matrices ## This function, makeCacheMatrix creates a special "Matrix" which can: ## Set the value of Matrix ## Get the value of Matrix ## Set the value of inversion ## Get the value of inversion makeCacheMatrix <- function(x = matrix()) { m <- matrix() set <- function(y) { x <<- y m <<- matrix() } get <- function() x setinverse <- function(inverse) m <<- inverse getinverse <- function() m list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ##cacheSolve:This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the ## inverse from the cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinverse() if(!is.na(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data,...) x$setinverse(m) m }
/cachematrix.R
no_license
mazensibai/ProgrammingAssignment2
R
false
false
1,175
r
## makeCacheMatrix & cacheSolve functions are used to cache potentially time-consuming computations (inverting) of Matrices ## This function, makeCacheMatrix creates a special "Matrix" which can: ## Set the value of Matrix ## Get the value of Matrix ## Set the value of inversion ## Get the value of inversion makeCacheMatrix <- function(x = matrix()) { m <- matrix() set <- function(y) { x <<- y m <<- matrix() } get <- function() x setinverse <- function(inverse) m <<- inverse getinverse <- function() m list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ##cacheSolve:This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the ## inverse from the cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinverse() if(!is.na(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data,...) x$setinverse(m) m }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/methods-quantileHeatmap.R \docType{methods} \name{quantileHeatmap} \alias{quantileHeatmap} \alias{quantileHeatmap,dgCMatrix-method} \alias{quantileHeatmap,matrix-method} \alias{quantileHeatmap,seurat-method} \title{Heatmap with Quantile Breaks} \usage{ quantileHeatmap(object, ...) \S4method{quantileHeatmap}{dgCMatrix}(object, ...) \S4method{quantileHeatmap}{matrix}(object, annotation = NA, clusterRows = TRUE, clusterCols = TRUE) \S4method{quantileHeatmap}{seurat}(object, annotation = NA, clusterRows = TRUE, clusterCols = TRUE) } \arguments{ \item{object}{Matrix of data.} \item{...}{\emph{Additional arguments (for the S4 generic definition).}} \item{annotation}{Column annotations.} \item{clusterRows}{Perform row clustering.} \item{clusterCols}{Perform column clustering.} } \value{ \code{\link[pheatmap:pheatmap]{pheatmap::pheatmap()}}. } \description{ Heatmap with Quantile Breaks } \details{ This is helpful for more usefully visualizing single cell data. Ideas and code from: http://slowkow.com/notes/heatmap-tutorial/ } \author{ Rory Kirchner }
/man/quantileHeatmap.Rd
permissive
larryyang1980/bcbioSingleCell
R
false
true
1,164
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/methods-quantileHeatmap.R \docType{methods} \name{quantileHeatmap} \alias{quantileHeatmap} \alias{quantileHeatmap,dgCMatrix-method} \alias{quantileHeatmap,matrix-method} \alias{quantileHeatmap,seurat-method} \title{Heatmap with Quantile Breaks} \usage{ quantileHeatmap(object, ...) \S4method{quantileHeatmap}{dgCMatrix}(object, ...) \S4method{quantileHeatmap}{matrix}(object, annotation = NA, clusterRows = TRUE, clusterCols = TRUE) \S4method{quantileHeatmap}{seurat}(object, annotation = NA, clusterRows = TRUE, clusterCols = TRUE) } \arguments{ \item{object}{Matrix of data.} \item{...}{\emph{Additional arguments (for the S4 generic definition).}} \item{annotation}{Column annotations.} \item{clusterRows}{Perform row clustering.} \item{clusterCols}{Perform column clustering.} } \value{ \code{\link[pheatmap:pheatmap]{pheatmap::pheatmap()}}. } \description{ Heatmap with Quantile Breaks } \details{ This is helpful for more usefully visualizing single cell data. Ideas and code from: http://slowkow.com/notes/heatmap-tutorial/ } \author{ Rory Kirchner }
#### Things I did not know from the "Functions" chapter #### library(magrittr) library(dplyr) #### THREE MAIN PARTS!!! # Three parts of a function can be extracted with formals() # argument body() environment() ### namespace! # These functions allow you to tell if an object is a function is.function() is.primitive() # Don't have a BODY! oooooooh! # This code makes a list of all functions in the base package objs <- mget(ls("package:base"), inherits = TRUE) funs <- Filter(is.function, objs) objects_in_dplyr <- mget(ls("package:dplyr"), inherits = TRUE) funs_in_dplyr <- Filter(is.function, objects_in_dplyr) # Find all primitive functions primitive.funs <- Filter(is.primitive, objs) # tangential discoveries # Reduce uses a binary function to successively combine the elements of a given vector and a possibly given initial value. # Filter extracts the elements of a vector for which a predicate (logical) function gives true. # Find and Position give the first or last such element and its position in the vector, respectively. # Map applies a function to the corresponding elements of given vectors. # Negate creates the negation of a given function. # Reduce(f, x, init, right = FALSE, accumulate = FALSE) # f is the predicate which gives a logical answer of true or false # x is the vector from which elements which are "true" are extracted # Filter(f, x) # gets first or last element that meets the condition in f # Find(f, x, right = FALSE, nomatch = NULL) # applies f to the corresponding elements of the given vectors # Map(f, ...) # creates the negative of a given function. # Negate(f) # Position(f, x, right = FALSE, nomatch = NA_integer_) # This code makes a list of all functions in any R script out <- (subset(getParseData(parse('~/repos/openR/openr_vocabulary.R')), token == "SYMBOL_FUNCTION_CALL")["text"] %>% unique)$text %>% sort # Which base function has the most arguments: which.max(lapply(funs, function(x) length(formals(x)))) # How many functions have no arguments? length(which(unlist(lapply(funs, function(x) length(formals(x)))) < 1)) # 225 Functions #### Lexical Scoping #### # R has two types of scoping: lexical scoping, implemented automatically at the language level, # and dynamic scoping, used in select functions to save typing during interactive analysis. # # Principles behind lexical scoping # name masking # I thought this was cool! j <- function(x) { y <- 2 function() { # even though y is not defined in the function or the function call, it is in the environment. c(x, y) } } k <- j(1) k() # Environments contain variable definitions! # # functions vs. variables # a fresh start # dynamic lookup # # What is this doing? f <- function() x + 1 # Ideally this should be: l <- function(){x + 1} codetools::findGlobals(l) # # this is very handy! Empty the environment environment(f) <- emptyenv() ### What are the four principles that govern how R looks for values? # 1. Name masking. Variables are evaluated according to the highest-precedence environment in which they are defined, starting from the local environment and working upwards through each parent environment. # 2. Functions vs. variables. For all intents and purposes, function names are evaluated by the same rules as for variables. If it is implicit that a function is being used, R will ignore objects with the same name that are not functions. # 3. Fresh starts. Functions do not have state (unless the environment of the function is changed). # 4. Dynamic lookup. Variables are evaluated when needed, and so variables may be defined outside # of the function's environment. #### Every operation in R is a function call #### # including # ( and ) # infix operators + # control flow operators for if while # subsetting operators [ ] and $ # curly brace {} # backtick lets you refer to functions or variables that have otherwise reserved or illegal names # Note the difference between `+` and "+". # The first one is the value of the object called +, and the second is a string containing the character +. ## Nifty useful stuff ## x <- list(1:3, 4:9, 10:12) # Use the fact that `[` is a function to subset and get the second element from every set above sapply(x, "[", 2) ## The ... argument ## If a function uses ... , you can only specify arguments listed after ... with their full name. ## A list of function arguments can be supplied to a function using do.call() list_of_args <- list(1:10, na.rm = TRUE) # supply to the function, mean() do.call(mean, list_of_args) ## The default value of an argument can be defined in terms of other arguments. ## You can determine if an argument was supplied or not with the missing() function. ## How to add a non-trivial default value as an argument to a function # non-trivial default values might take several lines of code to compute. # Instead of inserting that code in the function definition, you could use # missing() to conditionally compute it if needed. However, this makes it # hard to know which arguments are required and which are optional without # carefully reading the documentation. # # An alternative is to set the default value to NULL and use is.null() # to check if the argument was supplied and then make the computations # to generate the default value conditional on the is.null() call. # # ## If you want to ensure that an argument is evaluated you can use force(): f <- function(x) { 10 } f() f <- function(x) { force(x) 10 } f() # Error in force(x) : argument "x" is missing, with no default add <- function(x) { function(y) x + y } adders <- lapply(1:10, add) adders[[1]](10) adders[[10]](10)
/openr_functions.R
permissive
openpencil/openR
R
false
false
5,626
r
#### Things I did not know from the "Functions" chapter #### library(magrittr) library(dplyr) #### THREE MAIN PARTS!!! # Three parts of a function can be extracted with formals() # argument body() environment() ### namespace! # These functions allow you to tell if an object is a function is.function() is.primitive() # Don't have a BODY! oooooooh! # This code makes a list of all functions in the base package objs <- mget(ls("package:base"), inherits = TRUE) funs <- Filter(is.function, objs) objects_in_dplyr <- mget(ls("package:dplyr"), inherits = TRUE) funs_in_dplyr <- Filter(is.function, objects_in_dplyr) # Find all primitive functions primitive.funs <- Filter(is.primitive, objs) # tangential discoveries # Reduce uses a binary function to successively combine the elements of a given vector and a possibly given initial value. # Filter extracts the elements of a vector for which a predicate (logical) function gives true. # Find and Position give the first or last such element and its position in the vector, respectively. # Map applies a function to the corresponding elements of given vectors. # Negate creates the negation of a given function. # Reduce(f, x, init, right = FALSE, accumulate = FALSE) # f is the predicate which gives a logical answer of true or false # x is the vector from which elements which are "true" are extracted # Filter(f, x) # gets first or last element that meets the condition in f # Find(f, x, right = FALSE, nomatch = NULL) # applies f to the corresponding elements of the given vectors # Map(f, ...) # creates the negative of a given function. # Negate(f) # Position(f, x, right = FALSE, nomatch = NA_integer_) # This code makes a list of all functions in any R script out <- (subset(getParseData(parse('~/repos/openR/openr_vocabulary.R')), token == "SYMBOL_FUNCTION_CALL")["text"] %>% unique)$text %>% sort # Which base function has the most arguments: which.max(lapply(funs, function(x) length(formals(x)))) # How many functions have no arguments? length(which(unlist(lapply(funs, function(x) length(formals(x)))) < 1)) # 225 Functions #### Lexical Scoping #### # R has two types of scoping: lexical scoping, implemented automatically at the language level, # and dynamic scoping, used in select functions to save typing during interactive analysis. # # Principles behind lexical scoping # name masking # I thought this was cool! j <- function(x) { y <- 2 function() { # even though y is not defined in the function or the function call, it is in the environment. c(x, y) } } k <- j(1) k() # Environments contain variable definitions! # # functions vs. variables # a fresh start # dynamic lookup # # What is this doing? f <- function() x + 1 # Ideally this should be: l <- function(){x + 1} codetools::findGlobals(l) # # this is very handy! Empty the environment environment(f) <- emptyenv() ### What are the four principles that govern how R looks for values? # 1. Name masking. Variables are evaluated according to the highest-precedence environment in which they are defined, starting from the local environment and working upwards through each parent environment. # 2. Functions vs. variables. For all intents and purposes, function names are evaluated by the same rules as for variables. If it is implicit that a function is being used, R will ignore objects with the same name that are not functions. # 3. Fresh starts. Functions do not have state (unless the environment of the function is changed). # 4. Dynamic lookup. Variables are evaluated when needed, and so variables may be defined outside # of the function's environment. #### Every operation in R is a function call #### # including # ( and ) # infix operators + # control flow operators for if while # subsetting operators [ ] and $ # curly brace {} # backtick lets you refer to functions or variables that have otherwise reserved or illegal names # Note the difference between `+` and "+". # The first one is the value of the object called +, and the second is a string containing the character +. ## Nifty useful stuff ## x <- list(1:3, 4:9, 10:12) # Use the fact that `[` is a function to subset and get the second element from every set above sapply(x, "[", 2) ## The ... argument ## If a function uses ... , you can only specify arguments listed after ... with their full name. ## A list of function arguments can be supplied to a function using do.call() list_of_args <- list(1:10, na.rm = TRUE) # supply to the function, mean() do.call(mean, list_of_args) ## The default value of an argument can be defined in terms of other arguments. ## You can determine if an argument was supplied or not with the missing() function. ## How to add a non-trivial default value as an argument to a function # non-trivial default values might take several lines of code to compute. # Instead of inserting that code in the function definition, you could use # missing() to conditionally compute it if needed. However, this makes it # hard to know which arguments are required and which are optional without # carefully reading the documentation. # # An alternative is to set the default value to NULL and use is.null() # to check if the argument was supplied and then make the computations # to generate the default value conditional on the is.null() call. # # ## If you want to ensure that an argument is evaluated you can use force(): f <- function(x) { 10 } f() f <- function(x) { force(x) 10 } f() # Error in force(x) : argument "x" is missing, with no default add <- function(x) { function(y) x + y } adders <- lapply(1:10, add) adders[[1]](10) adders[[10]](10)
/glycolysis.diff.R
no_license
a15274241283/-TCGA-
R
false
false
2,393
r
p <- measure_change_over_time_wide(ggplot2::economics, date, pop, unemploy) test_that("Plot layers match expectations",{ expect_is(p$layers[[1]], "ggproto") }) test_that("Plot returns ggplot object",{ expect_is(p, "ggplot") }) test_that("Plot uses correct data",{ expect_that(names(p$data), equals(c("date", "series_type", "value"))) }) test_that("x axis is labeled 'date'",{ expect_identical(p$labels$x, "date") }) test_that("y axis is labeled 'value'",{ expect_identical(p$labels$y, "value") })
/tests/testthat/test_measure_change_over_time_wide.R
permissive
romainfrancois/ezEDA-1
R
false
false
513
r
p <- measure_change_over_time_wide(ggplot2::economics, date, pop, unemploy) test_that("Plot layers match expectations",{ expect_is(p$layers[[1]], "ggproto") }) test_that("Plot returns ggplot object",{ expect_is(p, "ggplot") }) test_that("Plot uses correct data",{ expect_that(names(p$data), equals(c("date", "series_type", "value"))) }) test_that("x axis is labeled 'date'",{ expect_identical(p$labels$x, "date") }) test_that("y axis is labeled 'value'",{ expect_identical(p$labels$y, "value") })
# Items are imported as Embedded data (ED) fields. One file per each condition # (6 pres_format x 2 prob_context x 2 ppv_prob x 2 followUp_risk ) is created. # A total of 48 txt files are to be created. Each text file must contain the # following ED fields: # - presentation format field # - 01 problem context field # - 02 problem context field # - 01 ppv prob field # - 02 ppv prob field # - 01 followup risk # - 02 followup risk # - 01 prevalence field # - 01 item field # - 02 prevalence field # - 02 item field # Resources source("scripts/html_qualtrics_codes.R") source("functions/items2qualtrics.R") # function to convert txt files to qualtrics txt format # separated item folder separated_item_dir <- "materials/qualtrics/output/separated_items/" response_types_dir <- "materials/qualtrics/input/reponse_type/" paired_items_dir <- "materials/qualtrics/output/paired_items/" # All possible prevalences ------------------------------------------------ source("functions/get_prevalences.R") all_prevalences <- get_prevalences() # Convert items to qualtrics txt advanced format -------------------------- text_output_dir <- "materials/text_output/" textual_items <- dir(text_output_dir, pattern = ".txt") %>% map(~readChar(paste0(text_output_dir, .x), file.size(paste0(text_output_dir, .x)))) conditions <- dir(text_output_dir, ".txt") %>% gsub("\\.txt", "", .) get_pair <- function(item_01) { # item_01 <- conditions[1] if (gsub("([a-z]{2})_[a-z]{4}_ppv[a-z]{3,4}", "\\1", item_01) == "ca") { context <- "pr" } else if (gsub("([a-z]{2})_[a-z]{4}_ppv[a-z]{3,4}", "\\1", item_01) == "pr") { context <- "ca" } if (gsub("[a-z]{2}_[a-z]{4}_ppv([a-z]{3,4})", "\\1", item_01) == "high") { ppv_prob <- "low" } else if (gsub("[a-z]{2}_[a-z]{4}_ppv([a-z]{3,4})", "\\1", item_01) == "low") { ppv_prob <- "high" } item_02 <- paste0(context, "_", gsub("[a-z]{2}_([a-z]{4})_ppv[a-z]{3,4}", "\\1", item_01), "_", "ppv", ppv_prob) item_02 } item_pairs <- conditions %>% as.tibble() %>% rename(item_01 = value) %>% mutate(item_02 = conditions %>% map(~get_pair(.x)) %>% unlist()) %>% filter(row_number() <= length(conditions)/2) item_pairs$item_01 %>% walk(~items2qualtrics(list_of_items = .x, outputdir = "materials/text_output/item_blocks/", removePlaceholders = TRUE)) # problems_numbered_ordered_responses %>% # walk(~items2qualtrics(list_of_items = .x, responsesdir = response_types_dir, outputdir = separated_item_dir, removePlaceholders = TRUE)) # # pair items: different context, same presentation format, different ppv prob, same response type # # function to pair items # source("functions/pair_items.R") # # # items_txt <- dir(output_dir, pattern = ".txt") # txt_files <- dir(separated_item_dir, pattern = ".txt") # twins <- character(length(txt_files)/2) # # txt_files %>% # walk(~pair_items(txt_files = .x, separated_item_dir = separated_item_dir, twins = twins, outputdir = paired_items_dir))
/scripts/TODEL_bayes_item_export.R
no_license
gorkang/R_conditions_creation
R
false
false
3,067
r
# Items are imported as Embedded data (ED) fields. One file per each condition # (6 pres_format x 2 prob_context x 2 ppv_prob x 2 followUp_risk ) is created. # A total of 48 txt files are to be created. Each text file must contain the # following ED fields: # - presentation format field # - 01 problem context field # - 02 problem context field # - 01 ppv prob field # - 02 ppv prob field # - 01 followup risk # - 02 followup risk # - 01 prevalence field # - 01 item field # - 02 prevalence field # - 02 item field # Resources source("scripts/html_qualtrics_codes.R") source("functions/items2qualtrics.R") # function to convert txt files to qualtrics txt format # separated item folder separated_item_dir <- "materials/qualtrics/output/separated_items/" response_types_dir <- "materials/qualtrics/input/reponse_type/" paired_items_dir <- "materials/qualtrics/output/paired_items/" # All possible prevalences ------------------------------------------------ source("functions/get_prevalences.R") all_prevalences <- get_prevalences() # Convert items to qualtrics txt advanced format -------------------------- text_output_dir <- "materials/text_output/" textual_items <- dir(text_output_dir, pattern = ".txt") %>% map(~readChar(paste0(text_output_dir, .x), file.size(paste0(text_output_dir, .x)))) conditions <- dir(text_output_dir, ".txt") %>% gsub("\\.txt", "", .) get_pair <- function(item_01) { # item_01 <- conditions[1] if (gsub("([a-z]{2})_[a-z]{4}_ppv[a-z]{3,4}", "\\1", item_01) == "ca") { context <- "pr" } else if (gsub("([a-z]{2})_[a-z]{4}_ppv[a-z]{3,4}", "\\1", item_01) == "pr") { context <- "ca" } if (gsub("[a-z]{2}_[a-z]{4}_ppv([a-z]{3,4})", "\\1", item_01) == "high") { ppv_prob <- "low" } else if (gsub("[a-z]{2}_[a-z]{4}_ppv([a-z]{3,4})", "\\1", item_01) == "low") { ppv_prob <- "high" } item_02 <- paste0(context, "_", gsub("[a-z]{2}_([a-z]{4})_ppv[a-z]{3,4}", "\\1", item_01), "_", "ppv", ppv_prob) item_02 } item_pairs <- conditions %>% as.tibble() %>% rename(item_01 = value) %>% mutate(item_02 = conditions %>% map(~get_pair(.x)) %>% unlist()) %>% filter(row_number() <= length(conditions)/2) item_pairs$item_01 %>% walk(~items2qualtrics(list_of_items = .x, outputdir = "materials/text_output/item_blocks/", removePlaceholders = TRUE)) # problems_numbered_ordered_responses %>% # walk(~items2qualtrics(list_of_items = .x, responsesdir = response_types_dir, outputdir = separated_item_dir, removePlaceholders = TRUE)) # # pair items: different context, same presentation format, different ppv prob, same response type # # function to pair items # source("functions/pair_items.R") # # # items_txt <- dir(output_dir, pattern = ".txt") # txt_files <- dir(separated_item_dir, pattern = ".txt") # twins <- character(length(txt_files)/2) # # txt_files %>% # walk(~pair_items(txt_files = .x, separated_item_dir = separated_item_dir, twins = twins, outputdir = paired_items_dir))
#' Bate and Jones (2002) cross-over design specification #' #' Specifies cross-over designs from Bate and Jones (2002). #' #' \code{seq_bate_jones()} supports the specification of cross-over designs from #' Bate and Jones (2002). Designs for five and eight treatments (see \code{D}) #' are supported, for any chosen treatment labels (see \code{labels}). In #' addition, the designs can be returned in \code{\link[base]{matrix}} or #' \code{\link[tibble]{tibble}} form (see \code{as_matrix}). #' #' Precisely, the \ifelse{html}{\out{(<i>k</i>,<i>j</i>)}}{\eqn{(k,j)}}th #' element of the cross-over design matrix corresponds to the treatment a #' subject on the \ifelse{html}{\out{<i>k</i>}}{\eqn{k}}th sequence would #' receive in the \ifelse{html}{\out{<i>j</i>}}{\eqn{j}}th period. #' #' @param D The number of treatments. Must be either five or eight. Defaults to #' \code{2}. #' @param labels A \code{\link[base]{vector}} of labels for the treatments. #' Should be of \code{\link[base]{length}} \code{D}, containing unique elements. #' Defaults to \code{0:(D - 1)}. #' @param as_matrix A \code{\link[base]{logical}} variable indicating whether #' the design should be returned as a \code{\link[base]{matrix}}, or a #' \code{\link[tibble]{tibble}}. Defaults to \code{T}. #' @param summary A \code{\link[base]{logical}} variable indicating whether a #' summary of the function's progress should be printed to the console. Defaults #' to \code{T}. #' @return Either a \code{\link[base]{matrix}} if \code{as_matrix = T} (with #' rows corresponding to sequences and columns to periods), or a #' \code{\link[tibble]{tibble}} if \code{as_matrix = F} (with rows corresponding #' to a particular period on a particular sequence). In either case, the #' returned object will have class \code{xover_seq}. #' @examples #' # Bate and Jones (2002) design for five treatments #' bate_jones <- seq_bate_jones() #' # Using different labels #' bate_jones_ABCDE <- seq_bate_jones(labels = LETTERS[1:5]) #' # Returning in tibble form #' bate_jones_tibble <- seq_bate_jones(as_matrix = F) #' @references Bate S, Jones B (2002) The construction of universally optimal #' uniform cross-over designs. \emph{GlaxoSmithKline Biomedical Data Sciences #' Technical Report}. #' @author Based on data from the \code{\link[Crossover]{Crossover}} package by #' Kornelius Rohmeyer. #' @export seq_bate_jones <- function(D = 5, labels = 0:(D - 1), as_matrix = T, summary = T) { ##### Input checking ######################################################### check_belong(D, "D", c(5, 8), 1) check_labels(labels, D) check_logical(as_matrix, "as_matrix") check_logical(summary, "summary") ##### Main computations ###################################################### if (summary) { message(" Beginning the design specification...") } if (D == 5) { sequences <- matrix(c(1, 3, 2, 5, 4, 4, 5, 2, 3, 1, 2, 4, 3, 1, 5, 5, 1, 3, 4, 2, 3, 5, 4, 2, 1, 1, 2, 4, 5, 3, 4, 1, 5, 3, 2, 2, 3, 5, 1, 4, 5, 2, 1, 4, 3, 3, 4, 1, 2, 5, 3, 2, 5, 4, 4, 5, 2, 3, 1, 1, 4, 3, 1, 5, 5, 1, 3, 4, 2, 2, 5, 4, 2, 1, 1, 2, 4, 5, 3, 3, 1, 5, 3, 2, 2, 3, 5, 1, 4, 4, 2, 1, 4, 3, 3, 4, 1, 2, 5, 5, 2, 5, 4, 4, 5, 2, 3, 1, 1, 3, 3, 1, 5, 5, 1, 3, 4, 2, 2, 4, 4, 2, 1, 1, 2, 4, 5, 3, 3, 5, 5, 3, 2, 2, 3, 5, 1, 4, 4, 1, 1, 4, 3, 3, 4, 1, 2, 5, 5, 2), 15, 10, byrow = T) } else { sequences <- matrix(c(1, 2, 8, 3, 7, 4, 6, 5, 5, 6, 4, 7, 3, 8, 2, 1, 2, 3, 1, 4, 8, 5, 7, 6, 6, 7, 5, 8, 4, 1, 3, 2, 3, 4, 2, 5, 1, 6, 8, 7, 7, 8, 6, 1, 5, 2, 4, 3, 4, 5, 3, 6, 2, 7, 1, 8, 8, 1, 7, 2, 6, 3, 5, 4, 5, 6, 4, 7, 3, 8, 2, 1, 1, 2, 8, 3, 7, 4, 6, 5, 6, 7, 5, 8, 4, 1, 3, 2, 2, 3, 1, 4, 8, 5, 7, 6, 7, 8, 6, 1, 5, 2, 4, 3, 3, 4, 2, 5, 1, 6, 8, 7, 8, 1, 7, 2, 6, 3, 5, 4, 4, 5, 3, 6, 2, 7, 1, 8, 2, 8, 3, 7, 4, 6, 5, 5, 6, 4, 7, 3, 8, 2, 1, 1, 3, 1, 4, 8, 5, 7, 6, 6, 7, 5, 8, 4, 1, 3, 2, 2, 4, 2, 5, 1, 6, 8, 7, 7, 8, 6, 1, 5, 2, 4, 3, 3, 5, 3, 6, 2, 7, 1, 8, 8, 1, 7, 2, 6, 3, 5, 4, 4, 6, 4, 7, 3, 8, 2, 1, 1, 2, 8, 3, 7, 4, 6, 5, 5, 7, 5, 8, 4, 1, 3, 2, 2, 3, 1, 4, 8, 5, 7, 6, 6, 8, 6, 1, 5, 2, 4, 3, 3, 4, 2, 5, 1, 6, 8, 7, 7, 1, 7, 2, 6, 3, 5, 4, 4, 5, 3, 6, 2, 7, 1, 8, 8), 16, 16, byrow = T) } if (summary) { message("...completed the design specification. Preparing outputs...") } sequences <- convert_labels(sequences, D, labels, 1:D) sequences <- transform_to_xover(sequences, labels, as_matrix) ##### Outputting ############################################################# if (summary) { message("...outputting.") } return(sequences) }
/R/seq_bate_jones.R
no_license
mjg211/xover
R
false
false
5,154
r
#' Bate and Jones (2002) cross-over design specification #' #' Specifies cross-over designs from Bate and Jones (2002). #' #' \code{seq_bate_jones()} supports the specification of cross-over designs from #' Bate and Jones (2002). Designs for five and eight treatments (see \code{D}) #' are supported, for any chosen treatment labels (see \code{labels}). In #' addition, the designs can be returned in \code{\link[base]{matrix}} or #' \code{\link[tibble]{tibble}} form (see \code{as_matrix}). #' #' Precisely, the \ifelse{html}{\out{(<i>k</i>,<i>j</i>)}}{\eqn{(k,j)}}th #' element of the cross-over design matrix corresponds to the treatment a #' subject on the \ifelse{html}{\out{<i>k</i>}}{\eqn{k}}th sequence would #' receive in the \ifelse{html}{\out{<i>j</i>}}{\eqn{j}}th period. #' #' @param D The number of treatments. Must be either five or eight. Defaults to #' \code{2}. #' @param labels A \code{\link[base]{vector}} of labels for the treatments. #' Should be of \code{\link[base]{length}} \code{D}, containing unique elements. #' Defaults to \code{0:(D - 1)}. #' @param as_matrix A \code{\link[base]{logical}} variable indicating whether #' the design should be returned as a \code{\link[base]{matrix}}, or a #' \code{\link[tibble]{tibble}}. Defaults to \code{T}. #' @param summary A \code{\link[base]{logical}} variable indicating whether a #' summary of the function's progress should be printed to the console. Defaults #' to \code{T}. #' @return Either a \code{\link[base]{matrix}} if \code{as_matrix = T} (with #' rows corresponding to sequences and columns to periods), or a #' \code{\link[tibble]{tibble}} if \code{as_matrix = F} (with rows corresponding #' to a particular period on a particular sequence). In either case, the #' returned object will have class \code{xover_seq}. #' @examples #' # Bate and Jones (2002) design for five treatments #' bate_jones <- seq_bate_jones() #' # Using different labels #' bate_jones_ABCDE <- seq_bate_jones(labels = LETTERS[1:5]) #' # Returning in tibble form #' bate_jones_tibble <- seq_bate_jones(as_matrix = F) #' @references Bate S, Jones B (2002) The construction of universally optimal #' uniform cross-over designs. \emph{GlaxoSmithKline Biomedical Data Sciences #' Technical Report}. #' @author Based on data from the \code{\link[Crossover]{Crossover}} package by #' Kornelius Rohmeyer. #' @export seq_bate_jones <- function(D = 5, labels = 0:(D - 1), as_matrix = T, summary = T) { ##### Input checking ######################################################### check_belong(D, "D", c(5, 8), 1) check_labels(labels, D) check_logical(as_matrix, "as_matrix") check_logical(summary, "summary") ##### Main computations ###################################################### if (summary) { message(" Beginning the design specification...") } if (D == 5) { sequences <- matrix(c(1, 3, 2, 5, 4, 4, 5, 2, 3, 1, 2, 4, 3, 1, 5, 5, 1, 3, 4, 2, 3, 5, 4, 2, 1, 1, 2, 4, 5, 3, 4, 1, 5, 3, 2, 2, 3, 5, 1, 4, 5, 2, 1, 4, 3, 3, 4, 1, 2, 5, 3, 2, 5, 4, 4, 5, 2, 3, 1, 1, 4, 3, 1, 5, 5, 1, 3, 4, 2, 2, 5, 4, 2, 1, 1, 2, 4, 5, 3, 3, 1, 5, 3, 2, 2, 3, 5, 1, 4, 4, 2, 1, 4, 3, 3, 4, 1, 2, 5, 5, 2, 5, 4, 4, 5, 2, 3, 1, 1, 3, 3, 1, 5, 5, 1, 3, 4, 2, 2, 4, 4, 2, 1, 1, 2, 4, 5, 3, 3, 5, 5, 3, 2, 2, 3, 5, 1, 4, 4, 1, 1, 4, 3, 3, 4, 1, 2, 5, 5, 2), 15, 10, byrow = T) } else { sequences <- matrix(c(1, 2, 8, 3, 7, 4, 6, 5, 5, 6, 4, 7, 3, 8, 2, 1, 2, 3, 1, 4, 8, 5, 7, 6, 6, 7, 5, 8, 4, 1, 3, 2, 3, 4, 2, 5, 1, 6, 8, 7, 7, 8, 6, 1, 5, 2, 4, 3, 4, 5, 3, 6, 2, 7, 1, 8, 8, 1, 7, 2, 6, 3, 5, 4, 5, 6, 4, 7, 3, 8, 2, 1, 1, 2, 8, 3, 7, 4, 6, 5, 6, 7, 5, 8, 4, 1, 3, 2, 2, 3, 1, 4, 8, 5, 7, 6, 7, 8, 6, 1, 5, 2, 4, 3, 3, 4, 2, 5, 1, 6, 8, 7, 8, 1, 7, 2, 6, 3, 5, 4, 4, 5, 3, 6, 2, 7, 1, 8, 2, 8, 3, 7, 4, 6, 5, 5, 6, 4, 7, 3, 8, 2, 1, 1, 3, 1, 4, 8, 5, 7, 6, 6, 7, 5, 8, 4, 1, 3, 2, 2, 4, 2, 5, 1, 6, 8, 7, 7, 8, 6, 1, 5, 2, 4, 3, 3, 5, 3, 6, 2, 7, 1, 8, 8, 1, 7, 2, 6, 3, 5, 4, 4, 6, 4, 7, 3, 8, 2, 1, 1, 2, 8, 3, 7, 4, 6, 5, 5, 7, 5, 8, 4, 1, 3, 2, 2, 3, 1, 4, 8, 5, 7, 6, 6, 8, 6, 1, 5, 2, 4, 3, 3, 4, 2, 5, 1, 6, 8, 7, 7, 1, 7, 2, 6, 3, 5, 4, 4, 5, 3, 6, 2, 7, 1, 8, 8), 16, 16, byrow = T) } if (summary) { message("...completed the design specification. Preparing outputs...") } sequences <- convert_labels(sequences, D, labels, 1:D) sequences <- transform_to_xover(sequences, labels, as_matrix) ##### Outputting ############################################################# if (summary) { message("...outputting.") } return(sequences) }
library(MASS) trainingSampleNormalization <- function(xl) { n <- dim(xl)[2] - 1 for(i in 1:n) { xl[, i] <- (xl[, i] - mean(xl[, i])) / sd(xl[, i]) } return (xl) } trainingSamplePrepare <- function(xl) { l <- dim(xl)[1] n <- dim(xl)[2] - 1 xl <- cbind(xl[, 1:n], seq(from = -1, to = -1, length.out = l), xl[, n + 1]) } ## Квадратичная функция потерь lossQuad <- function(x) { return ((x-1)^2) } # Стохастический градиент для ADALINE sg.ADALINE <- function(xl, eta = 1, lambda = 1/6) { l <- dim(xl)[1] n <- dim(xl)[2] - 1 w <- c(1/2, 1/2, 1/2) iterCount <- 0 # инициализация Q Q <- 0 for (i in 1:l) { # вычисление скалярного произведения <w,x> wx <- sum(w * xl[i, 1:n]) # вычисление отступа margin <- wx * xl[i, n + 1] Q <- Q + lossQuad(margin) } repeat { # вычисление отступа для все объектов обучаемой выборки margins <- array(dim = l) for (i in 1:l) { xi <- xl[i, 1:n] yi <- xl[i, n + 1] margins[i] <- crossprod(w, xi) * yi } # выбор ошибочных объектов errorIndexes <- which(margins <= 0) if (length(errorIndexes) > 0) { # случайный выбор индексов из ошибок i <- sample(errorIndexes, 1) iterCount <- iterCount + 1 xi <- xl[i, 1:n] yi <- xl[i, n + 1] # скалярное произведение <w,xi> wx <- sum(w * xi) # шаг градиента margin <- wx * yi # вычисление ошибок ex <- lossQuad(margin) eta <- 1 / sqrt(sum(xi * xi)) w <- w - eta * (wx - yi) * xi # вычисление нового Q Qprev <- Q Q <- (1 - lambda) * Q + lambda * ex } else { break } } return (w) } # Кол-во объектов в каждом классе ObjectsCountOfEachClass <- 500 ## обучающие данные Sigma1 <- matrix(c(1, 0, 0, 10), 2, 2) Sigma2 <- matrix(c(10, 0, 0, 1), 2, 2) xy1 <- mvrnorm(n=ObjectsCountOfEachClass, c(0, 0), Sigma1) xy2 <- mvrnorm(n=ObjectsCountOfEachClass, c(10, -10), Sigma2) xl <- rbind(cbind(xy1, -1), cbind(xy2, +1)) colors <- c("red", "white","blue") ## Нормализация данных xlNorm <- trainingSampleNormalization(xl) xlNorm <- trainingSamplePrepare(xlNorm) ## Отображение данных ## ADALINE plot(xlNorm[, 1], xlNorm[, 2], pch = 21, bg = colors[xl[,3] + 2], asp = 1, xlab = "x1", ylab = "x2", main = "ADALINE") w <- sg.ADALINE(xlNorm) abline(a = w[3] / w[2], b = -w[1] / w[2], lwd = 3, col = "red")
/Ada.R
no_license
blackberry26/SMPR
R
false
false
3,023
r
library(MASS) trainingSampleNormalization <- function(xl) { n <- dim(xl)[2] - 1 for(i in 1:n) { xl[, i] <- (xl[, i] - mean(xl[, i])) / sd(xl[, i]) } return (xl) } trainingSamplePrepare <- function(xl) { l <- dim(xl)[1] n <- dim(xl)[2] - 1 xl <- cbind(xl[, 1:n], seq(from = -1, to = -1, length.out = l), xl[, n + 1]) } ## Квадратичная функция потерь lossQuad <- function(x) { return ((x-1)^2) } # Стохастический градиент для ADALINE sg.ADALINE <- function(xl, eta = 1, lambda = 1/6) { l <- dim(xl)[1] n <- dim(xl)[2] - 1 w <- c(1/2, 1/2, 1/2) iterCount <- 0 # инициализация Q Q <- 0 for (i in 1:l) { # вычисление скалярного произведения <w,x> wx <- sum(w * xl[i, 1:n]) # вычисление отступа margin <- wx * xl[i, n + 1] Q <- Q + lossQuad(margin) } repeat { # вычисление отступа для все объектов обучаемой выборки margins <- array(dim = l) for (i in 1:l) { xi <- xl[i, 1:n] yi <- xl[i, n + 1] margins[i] <- crossprod(w, xi) * yi } # выбор ошибочных объектов errorIndexes <- which(margins <= 0) if (length(errorIndexes) > 0) { # случайный выбор индексов из ошибок i <- sample(errorIndexes, 1) iterCount <- iterCount + 1 xi <- xl[i, 1:n] yi <- xl[i, n + 1] # скалярное произведение <w,xi> wx <- sum(w * xi) # шаг градиента margin <- wx * yi # вычисление ошибок ex <- lossQuad(margin) eta <- 1 / sqrt(sum(xi * xi)) w <- w - eta * (wx - yi) * xi # вычисление нового Q Qprev <- Q Q <- (1 - lambda) * Q + lambda * ex } else { break } } return (w) } # Кол-во объектов в каждом классе ObjectsCountOfEachClass <- 500 ## обучающие данные Sigma1 <- matrix(c(1, 0, 0, 10), 2, 2) Sigma2 <- matrix(c(10, 0, 0, 1), 2, 2) xy1 <- mvrnorm(n=ObjectsCountOfEachClass, c(0, 0), Sigma1) xy2 <- mvrnorm(n=ObjectsCountOfEachClass, c(10, -10), Sigma2) xl <- rbind(cbind(xy1, -1), cbind(xy2, +1)) colors <- c("red", "white","blue") ## Нормализация данных xlNorm <- trainingSampleNormalization(xl) xlNorm <- trainingSamplePrepare(xlNorm) ## Отображение данных ## ADALINE plot(xlNorm[, 1], xlNorm[, 2], pch = 21, bg = colors[xl[,3] + 2], asp = 1, xlab = "x1", ylab = "x2", main = "ADALINE") w <- sg.ADALINE(xlNorm) abline(a = w[3] / w[2], b = -w[1] / w[2], lwd = 3, col = "red")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/setariaviridis-package.R \docType{package} \name{setariaviridis-package} \alias{setariaviridis} \alias{setariaviridis-package} \title{setariaviridis: Setaria Viridis Data} \description{ \if{html}{\figure{logo.png}{options: align='right' alt='logo' width='120'}} Setaria viridis is a familiar weed. This package is data measured for each branch of Setaria viridis for practice data analysis. The data was collected from a wild Setaria viridis. } \author{ \strong{Maintainer}: Keisuke Ando \email{ando@maslab.aitech.ac.jp} } \keyword{internal}
/man/setariaviridis-package.Rd
permissive
NONONOexe/setariaviridis
R
false
true
634
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/setariaviridis-package.R \docType{package} \name{setariaviridis-package} \alias{setariaviridis} \alias{setariaviridis-package} \title{setariaviridis: Setaria Viridis Data} \description{ \if{html}{\figure{logo.png}{options: align='right' alt='logo' width='120'}} Setaria viridis is a familiar weed. This package is data measured for each branch of Setaria viridis for practice data analysis. The data was collected from a wild Setaria viridis. } \author{ \strong{Maintainer}: Keisuke Ando \email{ando@maslab.aitech.ac.jp} } \keyword{internal}
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 vapour_geom_name_cpp <- function(dsource, layer, sql, ex) { .Call('_vapour_vapour_geom_name_cpp', PACKAGE = 'vapour', dsource, layer, sql, ex) } geometry_cpp_limit_skip <- function(dsn, layer, sql, ex, format, limit_n, skip_n) { .Call('_vapour_geometry_cpp_limit_skip', PACKAGE = 'vapour', dsn, layer, sql, ex, format, limit_n, skip_n) } geometry_cpp <- function(dsn, layer, sql, ex, format, fid) { .Call('_vapour_geometry_cpp', PACKAGE = 'vapour', dsn, layer, sql, ex, format, fid) } register_gdal_cpp <- function() { .Call('_vapour_register_gdal_cpp', PACKAGE = 'vapour') } cleanup_gdal_cpp <- function() { .Call('_vapour_cleanup_gdal_cpp', PACKAGE = 'vapour') } version_gdal_cpp <- function() { .Call('_vapour_version_gdal_cpp', PACKAGE = 'vapour') } driver_id_gdal_cpp <- function(dsn) { .Call('_vapour_driver_id_gdal_cpp', PACKAGE = 'vapour', dsn) } drivers_list_gdal_cpp <- function() { .Call('_vapour_drivers_list_gdal_cpp', PACKAGE = 'vapour') } proj_to_wkt_gdal_cpp <- function(proj4string) { .Call('_vapour_proj_to_wkt_gdal_cpp', PACKAGE = 'vapour', proj4string) } driver_gdal_cpp <- function(dsn) { .Call('_vapour_driver_gdal_cpp', PACKAGE = 'vapour', dsn) } layer_names_gdal_cpp <- function(dsn) { .Call('_vapour_layer_names_gdal_cpp', PACKAGE = 'vapour', dsn) } feature_count_gdal_cpp <- function(dsn, layer, sql, ex) { .Call('_vapour_feature_count_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql, ex) } read_fields_gdal_cpp <- function(dsn, layer, sql, limit_n, skip_n, ex, fid_column_name) { .Call('_vapour_read_fields_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql, limit_n, skip_n, ex, fid_column_name) } read_geometry_gdal_cpp <- function(dsn, layer, sql, what, textformat, limit_n, skip_n, ex) { .Call('_vapour_read_geometry_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql, what, textformat, limit_n, skip_n, ex) } read_names_gdal_cpp <- function(dsn, layer, sql, limit_n, skip_n, ex) { .Call('_vapour_read_names_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql, limit_n, skip_n, ex) } projection_info_gdal_cpp <- function(dsn, layer, sql) { .Call('_vapour_projection_info_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql) } report_fields_gdal_cpp <- function(dsn, layer, sql) { .Call('_vapour_report_fields_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql) } vsi_list_gdal_cpp <- function(dsn) { .Call('_vapour_vsi_list_gdal_cpp', PACKAGE = 'vapour', dsn) } sds_list_gdal_cpp <- function(dsn) { .Call('_vapour_sds_list_gdal_cpp', PACKAGE = 'vapour', dsn) } warp_in_memory_gdal_cpp <- function(dsn, source_WKT, target_WKT, target_geotransform, target_dim, band) { .Call('_vapour_warp_in_memory_gdal_cpp', PACKAGE = 'vapour', dsn, source_WKT, target_WKT, target_geotransform, target_dim, band) } raster_info_gdal_cpp <- function(dsn, min_max) { .Call('_vapour_raster_info_gdal_cpp', PACKAGE = 'vapour', dsn, min_max) } raster_gcp_gdal_cpp <- function(dsn) { .Call('_vapour_raster_gcp_gdal_cpp', PACKAGE = 'vapour', dsn) } raster_io_gdal_cpp <- function(dsn, window, band, resample) { .Call('_vapour_raster_io_gdal_cpp', PACKAGE = 'vapour', dsn, window, band, resample) } gdal_read_fids_all <- function(dsn, layer, sql, ex) { .Call('_vapour_gdal_read_fids_all', PACKAGE = 'vapour', dsn, layer, sql, ex) } gdal_read_fids_ij <- function(dsn, layer, sql, ex, ij) { .Call('_vapour_gdal_read_fids_ij', PACKAGE = 'vapour', dsn, layer, sql, ex, ij) } gdal_read_fids_ia <- function(dsn, layer, sql, ex, ia) { .Call('_vapour_gdal_read_fids_ia', PACKAGE = 'vapour', dsn, layer, sql, ex, ia) } gdal_dsn_read_geom_all <- function(dsn, layer, sql, ex, format) { .Call('_vapour_gdal_dsn_read_geom_all', PACKAGE = 'vapour', dsn, layer, sql, ex, format) } gdal_dsn_read_geom_ij <- function(dsn, layer, sql, ex, format, ij) { .Call('_vapour_gdal_dsn_read_geom_ij', PACKAGE = 'vapour', dsn, layer, sql, ex, format, ij) } gdal_dsn_read_geom_ia <- function(dsn, layer, sql, ex, format, ia) { .Call('_vapour_gdal_dsn_read_geom_ia', PACKAGE = 'vapour', dsn, layer, sql, ex, format, ia) } gdal_dsn_read_geom_fa <- function(dsn, layer, sql, ex, format, fa) { .Call('_vapour_gdal_dsn_read_geom_fa', PACKAGE = 'vapour', dsn, layer, sql, ex, format, fa) } gdal_dsn_read_fields_all <- function(dsn, layer, sql, ex, fid_column_name) { .Call('_vapour_gdal_dsn_read_fields_all', PACKAGE = 'vapour', dsn, layer, sql, ex, fid_column_name) } gdal_dsn_read_fields_ij <- function(dsn, layer, sql, ex, fid_column_name, ij) { .Call('_vapour_gdal_dsn_read_fields_ij', PACKAGE = 'vapour', dsn, layer, sql, ex, fid_column_name, ij) } gdal_dsn_read_fields_ia <- function(dsn, layer, sql, ex, fid_column_name, ia) { .Call('_vapour_gdal_dsn_read_fields_ia', PACKAGE = 'vapour', dsn, layer, sql, ex, fid_column_name, ia) } gdal_dsn_read_fields_fa <- function(dsn, layer, sql, ex, fid_column_name, fa) { .Call('_vapour_gdal_dsn_read_fields_fa', PACKAGE = 'vapour', dsn, layer, sql, ex, fid_column_name, fa) }
/R/RcppExports.R
no_license
jsta/vapour
R
false
false
5,169
r
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 vapour_geom_name_cpp <- function(dsource, layer, sql, ex) { .Call('_vapour_vapour_geom_name_cpp', PACKAGE = 'vapour', dsource, layer, sql, ex) } geometry_cpp_limit_skip <- function(dsn, layer, sql, ex, format, limit_n, skip_n) { .Call('_vapour_geometry_cpp_limit_skip', PACKAGE = 'vapour', dsn, layer, sql, ex, format, limit_n, skip_n) } geometry_cpp <- function(dsn, layer, sql, ex, format, fid) { .Call('_vapour_geometry_cpp', PACKAGE = 'vapour', dsn, layer, sql, ex, format, fid) } register_gdal_cpp <- function() { .Call('_vapour_register_gdal_cpp', PACKAGE = 'vapour') } cleanup_gdal_cpp <- function() { .Call('_vapour_cleanup_gdal_cpp', PACKAGE = 'vapour') } version_gdal_cpp <- function() { .Call('_vapour_version_gdal_cpp', PACKAGE = 'vapour') } driver_id_gdal_cpp <- function(dsn) { .Call('_vapour_driver_id_gdal_cpp', PACKAGE = 'vapour', dsn) } drivers_list_gdal_cpp <- function() { .Call('_vapour_drivers_list_gdal_cpp', PACKAGE = 'vapour') } proj_to_wkt_gdal_cpp <- function(proj4string) { .Call('_vapour_proj_to_wkt_gdal_cpp', PACKAGE = 'vapour', proj4string) } driver_gdal_cpp <- function(dsn) { .Call('_vapour_driver_gdal_cpp', PACKAGE = 'vapour', dsn) } layer_names_gdal_cpp <- function(dsn) { .Call('_vapour_layer_names_gdal_cpp', PACKAGE = 'vapour', dsn) } feature_count_gdal_cpp <- function(dsn, layer, sql, ex) { .Call('_vapour_feature_count_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql, ex) } read_fields_gdal_cpp <- function(dsn, layer, sql, limit_n, skip_n, ex, fid_column_name) { .Call('_vapour_read_fields_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql, limit_n, skip_n, ex, fid_column_name) } read_geometry_gdal_cpp <- function(dsn, layer, sql, what, textformat, limit_n, skip_n, ex) { .Call('_vapour_read_geometry_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql, what, textformat, limit_n, skip_n, ex) } read_names_gdal_cpp <- function(dsn, layer, sql, limit_n, skip_n, ex) { .Call('_vapour_read_names_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql, limit_n, skip_n, ex) } projection_info_gdal_cpp <- function(dsn, layer, sql) { .Call('_vapour_projection_info_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql) } report_fields_gdal_cpp <- function(dsn, layer, sql) { .Call('_vapour_report_fields_gdal_cpp', PACKAGE = 'vapour', dsn, layer, sql) } vsi_list_gdal_cpp <- function(dsn) { .Call('_vapour_vsi_list_gdal_cpp', PACKAGE = 'vapour', dsn) } sds_list_gdal_cpp <- function(dsn) { .Call('_vapour_sds_list_gdal_cpp', PACKAGE = 'vapour', dsn) } warp_in_memory_gdal_cpp <- function(dsn, source_WKT, target_WKT, target_geotransform, target_dim, band) { .Call('_vapour_warp_in_memory_gdal_cpp', PACKAGE = 'vapour', dsn, source_WKT, target_WKT, target_geotransform, target_dim, band) } raster_info_gdal_cpp <- function(dsn, min_max) { .Call('_vapour_raster_info_gdal_cpp', PACKAGE = 'vapour', dsn, min_max) } raster_gcp_gdal_cpp <- function(dsn) { .Call('_vapour_raster_gcp_gdal_cpp', PACKAGE = 'vapour', dsn) } raster_io_gdal_cpp <- function(dsn, window, band, resample) { .Call('_vapour_raster_io_gdal_cpp', PACKAGE = 'vapour', dsn, window, band, resample) } gdal_read_fids_all <- function(dsn, layer, sql, ex) { .Call('_vapour_gdal_read_fids_all', PACKAGE = 'vapour', dsn, layer, sql, ex) } gdal_read_fids_ij <- function(dsn, layer, sql, ex, ij) { .Call('_vapour_gdal_read_fids_ij', PACKAGE = 'vapour', dsn, layer, sql, ex, ij) } gdal_read_fids_ia <- function(dsn, layer, sql, ex, ia) { .Call('_vapour_gdal_read_fids_ia', PACKAGE = 'vapour', dsn, layer, sql, ex, ia) } gdal_dsn_read_geom_all <- function(dsn, layer, sql, ex, format) { .Call('_vapour_gdal_dsn_read_geom_all', PACKAGE = 'vapour', dsn, layer, sql, ex, format) } gdal_dsn_read_geom_ij <- function(dsn, layer, sql, ex, format, ij) { .Call('_vapour_gdal_dsn_read_geom_ij', PACKAGE = 'vapour', dsn, layer, sql, ex, format, ij) } gdal_dsn_read_geom_ia <- function(dsn, layer, sql, ex, format, ia) { .Call('_vapour_gdal_dsn_read_geom_ia', PACKAGE = 'vapour', dsn, layer, sql, ex, format, ia) } gdal_dsn_read_geom_fa <- function(dsn, layer, sql, ex, format, fa) { .Call('_vapour_gdal_dsn_read_geom_fa', PACKAGE = 'vapour', dsn, layer, sql, ex, format, fa) } gdal_dsn_read_fields_all <- function(dsn, layer, sql, ex, fid_column_name) { .Call('_vapour_gdal_dsn_read_fields_all', PACKAGE = 'vapour', dsn, layer, sql, ex, fid_column_name) } gdal_dsn_read_fields_ij <- function(dsn, layer, sql, ex, fid_column_name, ij) { .Call('_vapour_gdal_dsn_read_fields_ij', PACKAGE = 'vapour', dsn, layer, sql, ex, fid_column_name, ij) } gdal_dsn_read_fields_ia <- function(dsn, layer, sql, ex, fid_column_name, ia) { .Call('_vapour_gdal_dsn_read_fields_ia', PACKAGE = 'vapour', dsn, layer, sql, ex, fid_column_name, ia) } gdal_dsn_read_fields_fa <- function(dsn, layer, sql, ex, fid_column_name, fa) { .Call('_vapour_gdal_dsn_read_fields_fa', PACKAGE = 'vapour', dsn, layer, sql, ex, fid_column_name, fa) }
library(ggplot2) library(reshape) preprocess_dist_m <- function(dist_m){ data <- melt(dist_m) names(data) <- c('names1','names2','distance') data <- data[which(data$names1!=data$names2),] return(data) } add_new_group <- function(data, group_frame){ A1 <- group_frame names(A1) <- paste(names(A1),1,sep='') A2 <- group_frame names(A2) <- paste(names(A2),2,sep='') dm <- merge(data,A1,by.x=c('names1'),by.y=c('names1'),all.x=T,all.y=F) dm <- merge(dm,A2,by.x=c('names2'),by.y=c('names2'),all.x=T,all.y=F) dm <- dm[dm$names1!=dm$names2,] return(dm) } quantile_partition <- function(group_frame){ # drop_ind <- which(is.na(group_frame[,2])) # if (length(drop_ind)>0){ # group_frame <- group_frame[-drop_ind,] # } quantiles <- quantile(group_frame[,2]) quantiles <- cut(group_frame[,2],quantiles,labels=c('1stQ','2ndQ','3rdQ','4thQ')) group_frame$quantiles <- quantiles return(group_frame) } get_distance_histograms <- function(dm){ agg <- means_compute(dm) pp <- ggplot(dm,aes(x=distance,color=country1))+geom_histogram(binwidth=0.001)+facet_grid(country1 ~ country2)+geom_vline(aes(xintercept = x), data = agg) return(pp) } wilcox_test <- function(dm){ countries <- levels(dm$country1) #print(countries) p_vals <- c() groups <- c() for (country1 in countries){ for (country2 in countries){ if (country2 != country1){ groups <- c(groups,paste(country1,'_vs_',country2,sep='')) sub <- dm[dm$country1==country1,] p_val <- wilcox.test(sub$distance[sub$country2==country1],sub$distance[sub$country2==country2],alternative='less') p_vals <- c(p_vals,p_val$p.value) } } } res <- data.frame(groups=groups,p_values=p_vals) return(res) } distance_histogram <- function(setnum,suffix='final_2', treshold =5000){ #distance_m_list <- get_distance_m_list(setnum) distance_m_list <- load_distances(setnum, suffix = suffix, treshold = treshold) distance_m <- distance_m_list[[3]] dm <- melt(distance_m) names(dm) <- c('names1','names2','distance') A1 <- data.frame(names1=data.sample_names2$names,country1=data.sample_names2$country) A2 <- data.frame(names2=data.sample_names2$names,country2=data.sample_names2$country) dm <- merge(dm,A1,by.x=c('names1'),by.y=c('names1'),all.x=F,all.y=F) dm <- merge(dm,A2,by.x=c('names2'),by.y=c('names2'),all.x=F,all.y=F) dm <- dm[dm$names1!=dm$names2,] dm$country1 <- as.factor(as.character(dm$country1)) print(dim(dm)) p_vals <- wilcox_test(dm) agg <- means_compute(dm) dm$country <- NA inds <- which(dm$country1==dm$country2) dm$country[inds] <- as.character(dm$country1[inds]) # pal <- brewer.pal(4,"Set1") # pal <- pal[c(1,2,4)] # names(pal) <-levels(statdata$country) #pp <- ggplot(dm,aes(x=distance,color=country1))+geom_histogram(binwidth=0.001)+facet_grid(country1 ~ country2)+geom_vline(aes(xintercept = x), data = agg) ##Hot to mark mean line? ##Also need to make colors to be uniform return(list(dm,p_vals,agg)) } get_p_vals <- function(setnums, suffix='final_2', treshold=5000){ #len <- length(setnums) res <- data.frame(groups=c(),p_values=c(),set=c()) for (setnum in setnums){ print(setnum) temp <- try(distance_histogram(setnum=setnum,suffix=suffix,treshold=treshold)[[2]]) if (class(temp) != "try-error"){ temp$set <- setnum res <- rbind(res, temp) } } return(res) } cross_naming <- function(names1,names2){ len <- length(names1) v <- rep(x='',times=len) f1 <- function(el){ return(paste(el,'_vs_',el,sep='')) } f2 <- function(el){ return(paste(el,'_vs_others',sep='')) } v[which(names1==names2)] <- sapply(names1[which(names1==names2)],f1) v[which(names1!=names2)] <- sapply(names1[which(names1!=names2)],f2) return(v) } #dir=NA,contignum=NA,mean_CHN,mean_EUR,mean_RUS,mean_USA,CHN_p,EUR_p,RUS_p,USA_P p_vals.data <- data.frame(matrix(ncol=5, nrow= 0)) means.data <- data.frame(matrix(ncol=18, nrow= 0)) means_compute <- function(dm){ agg <- aggregate(dm[,c('distance')],by=list(dm[,4],dm[,5]),FUN=mean) names(agg) <- c(names(dm[,c(4:5)]),'x') #delete repeated els return(agg) } ref_mean_dist <- function(setnums, prefix, treshold,selector='REF'){ i <- 0 for (setnum in setnums){ data <- load_distances(setnum, prefix, treshold) dist_ref <- data.frame(names=names(data[[3]][selector,]), dist=data[[3]][selector,]) dist_cov <- data.frame(names=names(data[[1]][selector,]), cov=data[[1]][,selector],diff=data[[2]][,selector]) dist_ref <- merge(dist_ref, dist_cov,by.x=c("names"),by.y=c("names"),all.x=F,all.y=F) dist_ref <- merge(dist_ref, data.sample_names2,by.x=c("names"),by.y=c("names"),all.x=T,all.y=F) dist_ref$setnum <- setnum if (i==0){ res <- dist_ref } else{ res <- rbind(res, dist_ref) } i <- i + 1 } res <- res[which(res$names != 'REF'),] means <- aggregate(res$dist, by=list(res$country,res$setnum),mean) medians <- aggregate(res$dist, by=list(res$country,res$setnum),median) sds <- aggregate(res$dist, by=list(res$country,res$setnum),sd) agr_res <- data.frame(country=means[,1], setnum=means[,2], mean=means[,3], median=medians[,3], sd=sds[,3]) return(list(res,agr_res)) }
/R/stat_tests.R
no_license
kovarsky/metagenome_old
R
false
false
5,244
r
library(ggplot2) library(reshape) preprocess_dist_m <- function(dist_m){ data <- melt(dist_m) names(data) <- c('names1','names2','distance') data <- data[which(data$names1!=data$names2),] return(data) } add_new_group <- function(data, group_frame){ A1 <- group_frame names(A1) <- paste(names(A1),1,sep='') A2 <- group_frame names(A2) <- paste(names(A2),2,sep='') dm <- merge(data,A1,by.x=c('names1'),by.y=c('names1'),all.x=T,all.y=F) dm <- merge(dm,A2,by.x=c('names2'),by.y=c('names2'),all.x=T,all.y=F) dm <- dm[dm$names1!=dm$names2,] return(dm) } quantile_partition <- function(group_frame){ # drop_ind <- which(is.na(group_frame[,2])) # if (length(drop_ind)>0){ # group_frame <- group_frame[-drop_ind,] # } quantiles <- quantile(group_frame[,2]) quantiles <- cut(group_frame[,2],quantiles,labels=c('1stQ','2ndQ','3rdQ','4thQ')) group_frame$quantiles <- quantiles return(group_frame) } get_distance_histograms <- function(dm){ agg <- means_compute(dm) pp <- ggplot(dm,aes(x=distance,color=country1))+geom_histogram(binwidth=0.001)+facet_grid(country1 ~ country2)+geom_vline(aes(xintercept = x), data = agg) return(pp) } wilcox_test <- function(dm){ countries <- levels(dm$country1) #print(countries) p_vals <- c() groups <- c() for (country1 in countries){ for (country2 in countries){ if (country2 != country1){ groups <- c(groups,paste(country1,'_vs_',country2,sep='')) sub <- dm[dm$country1==country1,] p_val <- wilcox.test(sub$distance[sub$country2==country1],sub$distance[sub$country2==country2],alternative='less') p_vals <- c(p_vals,p_val$p.value) } } } res <- data.frame(groups=groups,p_values=p_vals) return(res) } distance_histogram <- function(setnum,suffix='final_2', treshold =5000){ #distance_m_list <- get_distance_m_list(setnum) distance_m_list <- load_distances(setnum, suffix = suffix, treshold = treshold) distance_m <- distance_m_list[[3]] dm <- melt(distance_m) names(dm) <- c('names1','names2','distance') A1 <- data.frame(names1=data.sample_names2$names,country1=data.sample_names2$country) A2 <- data.frame(names2=data.sample_names2$names,country2=data.sample_names2$country) dm <- merge(dm,A1,by.x=c('names1'),by.y=c('names1'),all.x=F,all.y=F) dm <- merge(dm,A2,by.x=c('names2'),by.y=c('names2'),all.x=F,all.y=F) dm <- dm[dm$names1!=dm$names2,] dm$country1 <- as.factor(as.character(dm$country1)) print(dim(dm)) p_vals <- wilcox_test(dm) agg <- means_compute(dm) dm$country <- NA inds <- which(dm$country1==dm$country2) dm$country[inds] <- as.character(dm$country1[inds]) # pal <- brewer.pal(4,"Set1") # pal <- pal[c(1,2,4)] # names(pal) <-levels(statdata$country) #pp <- ggplot(dm,aes(x=distance,color=country1))+geom_histogram(binwidth=0.001)+facet_grid(country1 ~ country2)+geom_vline(aes(xintercept = x), data = agg) ##Hot to mark mean line? ##Also need to make colors to be uniform return(list(dm,p_vals,agg)) } get_p_vals <- function(setnums, suffix='final_2', treshold=5000){ #len <- length(setnums) res <- data.frame(groups=c(),p_values=c(),set=c()) for (setnum in setnums){ print(setnum) temp <- try(distance_histogram(setnum=setnum,suffix=suffix,treshold=treshold)[[2]]) if (class(temp) != "try-error"){ temp$set <- setnum res <- rbind(res, temp) } } return(res) } cross_naming <- function(names1,names2){ len <- length(names1) v <- rep(x='',times=len) f1 <- function(el){ return(paste(el,'_vs_',el,sep='')) } f2 <- function(el){ return(paste(el,'_vs_others',sep='')) } v[which(names1==names2)] <- sapply(names1[which(names1==names2)],f1) v[which(names1!=names2)] <- sapply(names1[which(names1!=names2)],f2) return(v) } #dir=NA,contignum=NA,mean_CHN,mean_EUR,mean_RUS,mean_USA,CHN_p,EUR_p,RUS_p,USA_P p_vals.data <- data.frame(matrix(ncol=5, nrow= 0)) means.data <- data.frame(matrix(ncol=18, nrow= 0)) means_compute <- function(dm){ agg <- aggregate(dm[,c('distance')],by=list(dm[,4],dm[,5]),FUN=mean) names(agg) <- c(names(dm[,c(4:5)]),'x') #delete repeated els return(agg) } ref_mean_dist <- function(setnums, prefix, treshold,selector='REF'){ i <- 0 for (setnum in setnums){ data <- load_distances(setnum, prefix, treshold) dist_ref <- data.frame(names=names(data[[3]][selector,]), dist=data[[3]][selector,]) dist_cov <- data.frame(names=names(data[[1]][selector,]), cov=data[[1]][,selector],diff=data[[2]][,selector]) dist_ref <- merge(dist_ref, dist_cov,by.x=c("names"),by.y=c("names"),all.x=F,all.y=F) dist_ref <- merge(dist_ref, data.sample_names2,by.x=c("names"),by.y=c("names"),all.x=T,all.y=F) dist_ref$setnum <- setnum if (i==0){ res <- dist_ref } else{ res <- rbind(res, dist_ref) } i <- i + 1 } res <- res[which(res$names != 'REF'),] means <- aggregate(res$dist, by=list(res$country,res$setnum),mean) medians <- aggregate(res$dist, by=list(res$country,res$setnum),median) sds <- aggregate(res$dist, by=list(res$country,res$setnum),sd) agr_res <- data.frame(country=means[,1], setnum=means[,2], mean=means[,3], median=medians[,3], sd=sds[,3]) return(list(res,agr_res)) }
#' Performs quality checks #' #' The function checks each fastq file specified in the "data file" for quality, and writes findings to a report. #' #' @param dataFile An R data frame with the data to be processed. The R object is a standard format, and must contain the following headings: File, PE, Sample, Replicate, FilteredFile. More information about the file is available at \code{\link{datafileTemplate}}. #' @param preFilter A logical - if true (default), the function will select and analyse files which have not yet been processed for quality. If false, the function will select and analyse those files which have been processed for quality, i.e. the "filtered file" in the data file. #' @return FastqQA object. Outputs quality results in the form of raw data (an R FastqQA object) and HTML format (saved to "QA" directory). #' @details The function should be run in the working directory, where all fastq files are found. #' @details \code{\link{runQA}} iterates over each file specified in the "datafile". It runs a quality assessment from the \code{\link{ShortRead}} package. The \code{\link{ShortRead}} package (\url{https://bioconductor.org/packages/release/bioc/html/ShortRead.html}) contains more information about this step. The quality assessment may be performed before and after the filtering step, by setting the "pre-filter" parameter to true or to false, respectively. All quality assessment data is output to the "QA" directory. #' Quality reports are output to the working directory (under the QA directory). R objects of the raw data used to generate the reports are also saved to this directory. #' @export #' @import ShortRead runQA <- function(dataFile, preFilter = TRUE){ ##### PARALLELISATION NOT WORKING, DISABLED FOR NOW. IF RE-INTRODUCED, DON'T FORGET TO ADD MC.CORES PARAMETER TO RUNQA FUNCTION, AND ADD BPPARAM TO QA FUNCTION # if(is.null(mc.cores)){ # BPPARAM = registered()[1] # } # else{ # BPPARAM = MulticoreParam(workers = mc.cores) # } print("QA results will be output to the 'QA' folder") if (preFilter == TRUE){ # pre-filter quality check print("QA on pre-filtered files") QASum_filter <- qa(dirPath = dataFile$FILE, type = "fastq", BPPARAM=bpparam()) print("QA on pre-filtered files completed") QASum_prefilterRpt <- report(x = QASum_filter, dest = "QA/prefilter", type = "html") print("QA report and data are now available in the 'QA' folder") QASum_prefilter <- QASum_filter save(QASum_prefilter, file = "./QA/prefilter/QASum_prefilter.RData") } else if(preFilter == FALSE){ # post-filter quality check print("QA on post-filtered files") QASum_filter <- qa(dirPath = unique(dataFile$FILTEREDFILE), type = "fastq", BPPARAM = bpparam()) print("QA on post-filtered files completed") QASum_postfilterRpt <- report(x = QASum_filter, dest = "QA/postfilter/", type = "html") print("QA report and data are now available in the 'QA' folder") QASum_postfilter <- QASum_filter save(QASum_postfilter, file = "./QA/postfilter/QASum_postfilter.RData") } return(QASum_filter) }
/R/qa.R
no_license
nixstix/RNASeqAnalysis
R
false
false
3,455
r
#' Performs quality checks #' #' The function checks each fastq file specified in the "data file" for quality, and writes findings to a report. #' #' @param dataFile An R data frame with the data to be processed. The R object is a standard format, and must contain the following headings: File, PE, Sample, Replicate, FilteredFile. More information about the file is available at \code{\link{datafileTemplate}}. #' @param preFilter A logical - if true (default), the function will select and analyse files which have not yet been processed for quality. If false, the function will select and analyse those files which have been processed for quality, i.e. the "filtered file" in the data file. #' @return FastqQA object. Outputs quality results in the form of raw data (an R FastqQA object) and HTML format (saved to "QA" directory). #' @details The function should be run in the working directory, where all fastq files are found. #' @details \code{\link{runQA}} iterates over each file specified in the "datafile". It runs a quality assessment from the \code{\link{ShortRead}} package. The \code{\link{ShortRead}} package (\url{https://bioconductor.org/packages/release/bioc/html/ShortRead.html}) contains more information about this step. The quality assessment may be performed before and after the filtering step, by setting the "pre-filter" parameter to true or to false, respectively. All quality assessment data is output to the "QA" directory. #' Quality reports are output to the working directory (under the QA directory). R objects of the raw data used to generate the reports are also saved to this directory. #' @export #' @import ShortRead runQA <- function(dataFile, preFilter = TRUE){ ##### PARALLELISATION NOT WORKING, DISABLED FOR NOW. IF RE-INTRODUCED, DON'T FORGET TO ADD MC.CORES PARAMETER TO RUNQA FUNCTION, AND ADD BPPARAM TO QA FUNCTION # if(is.null(mc.cores)){ # BPPARAM = registered()[1] # } # else{ # BPPARAM = MulticoreParam(workers = mc.cores) # } print("QA results will be output to the 'QA' folder") if (preFilter == TRUE){ # pre-filter quality check print("QA on pre-filtered files") QASum_filter <- qa(dirPath = dataFile$FILE, type = "fastq", BPPARAM=bpparam()) print("QA on pre-filtered files completed") QASum_prefilterRpt <- report(x = QASum_filter, dest = "QA/prefilter", type = "html") print("QA report and data are now available in the 'QA' folder") QASum_prefilter <- QASum_filter save(QASum_prefilter, file = "./QA/prefilter/QASum_prefilter.RData") } else if(preFilter == FALSE){ # post-filter quality check print("QA on post-filtered files") QASum_filter <- qa(dirPath = unique(dataFile$FILTEREDFILE), type = "fastq", BPPARAM = bpparam()) print("QA on post-filtered files completed") QASum_postfilterRpt <- report(x = QASum_filter, dest = "QA/postfilter/", type = "html") print("QA report and data are now available in the 'QA' folder") QASum_postfilter <- QASum_filter save(QASum_postfilter, file = "./QA/postfilter/QASum_postfilter.RData") } return(QASum_filter) }
#' R6 Object for Feature Extraction. #' #' @description #' \code{Xtractor} calculates features from raw data for each ID of a grouping variable individually. This process can be parallelized with the package future. #' #' @format \code{\link{R6Class}} object. #' @name Xtractor #' #' @section Usage: #' \preformatted{ #' xtractor = Xtractor$new("xtractor") #' } #' #' @section Arguments: #' #' For Xtractor$new(): #' \describe{ #' \item{\code{name}: }{(`character(1)`): A user defined name of the Xtractor. All necessary data will be saved on the path: ./fxtract_files/name/} #' \item{\code{load}: }{(`logical(1)`): If TRUE, an existing Xtractor will be loaded.} #' \item{\code{file.dir}: }{(`character(1)`): Path where all files of the Xtractor are saved. Default is the current working directory.} #' } #' @section Details: #' All datasets and feature functions are saved in this R6 object. #' Datasets will be saved as single RDS files (for each ID) and feature functions are calculated on each single dataset. #' A big advantage of this method is that it scales nicely for larger datasets. Data is only read into RAM, when needed. #' #' @section Fields: #' \describe{ #' \item{\code{error_messages}: }{(`data.frame()`): Active binding. A dataframe with information about error messages.} #' \item{\code{ids}: }{(`character()`): Active binding. A character vector with the IDs of the grouping variable.} #' \item{\code{features}: }{(`character()`): Active binding. A character vector with the feature functions which were added.} #' \item{\code{status}: }{(`data.frame()`): Active binding. A dataframe with an overview over which features are calculated on which datasets.} #' \item{\code{results}: }{(`data.frame()`): Active binding. A dataframe with all calculated features of all IDs.} #' } #' #' @section Methods: #' \describe{ #' \item{\code{add_data(data, group_by)}}{[data: (`data.frame` | `data.table`)] A dataframe or data.table which shall be added to the R6 object. \cr #' [group_by: (`character(1)`)] The grouping variable's name of the dataframe. \cr \cr #' This method writes single RDS files for each group.} #' \item{\code{preprocess_data(fun)}}{[fun: (`function`)] A function, which has a dataframe as input and a dataframe as output. \cr \cr #' This method loads the RDS files and applies this function on them. The old RDS files are overwritten.} #' \item{\code{remove_data(ids)}}{[ids: (`character()`)] One or many IDs of the grouping variable. \cr \cr #' This method deletes the RDS files of the given IDs.} #' \item{\code{get_data(ids)}}{[ids: (`character()`)] One or many IDs of the grouping variable. \cr \cr #' This method returns one dataframe with the chosen IDs.} #' \item{\code{add_feature(fun, check_fun)}}{[fun: (`function`)] A function, which has a dataframe as input and a named vector or list as output. \cr #' [check_fun: (`logical(1)`)] The function will be checked if it returns a vector or a list. Defaults to \code{TRUE}. Disable, if calculation takes too long. \cr \cr #' This method adds the feature function to the R6 object. It writes an RDS file of the function which can be retrieved later.} #' \item{\code{remove_feature(fun)}}{[fun: (`function | character(1)`)] A function (or the name of the function as character) which shall be removed. \cr \cr #' This method removes the function from the object and deletes all corresponding files and results.} #' \item{\code{get_feature(fun)}}{[fun: (`character(1)`)] The name of a function as character. \cr \cr #' This method reads the RDS file of the function. Useful for debugging after loading an Xtractor.} #' \item{\code{calc_features(features, ids)}}{[features: (`character()`)] A character vector of the names of the features which shall be calculated. Defaults to all features. \cr #' [ids: (`character()`)] One or many IDs of the grouping variable. Defaults to all IDs. \cr \cr #' This method calculates all features on the chosen IDs.} #' \item{\code{retry_failed_features(features)}}{[features: (`character()`)] A character vector of the names of the features which shall be calculated. Defaults to all features. \cr \cr #' This method retries calculation of failed features. Useful if calculation failed because of memory problems.} #' \item{\code{plot()}}{[internal] method to print the R6 object.} #' \item{\code{clone()}}{[internal] method to clone the R6 object.} #' \item{\code{initialize()}}{[internal] method to initialize the R6 object.} #' } #' #' @examples #' # one feature function #' dir = tempdir() #' xtractor = Xtractor$new("xtractor", file.dir = dir) #' xtractor$add_data(iris, group_by = "Species") #' xtractor$ids #' fun = function(data) { #' c(mean_sepal_length = mean(data$Sepal.Length)) #' } #' xtractor$add_feature(fun) #' xtractor$features #' xtractor$calc_features() #' xtractor$results #' xtractor$status #' xtractor #' #' # failing function on only one ID #' fun2 = function(data) { #' if ("setosa" %in% data$Species) stop("my error") #' c(sd_sepal_length = sd(data$Sepal.Length)) #' } #' xtractor$add_feature(fun2) #' xtractor$calc_features() #' xtractor$results #' xtractor$error_messages #' xtractor #' #' # remove feature function #' xtractor$remove_feature("fun2") #' xtractor$results #' xtractor #' #' # remove ID #' xtractor$remove_data("setosa") #' xtractor$results #' xtractor$ids #' xtractor #' #' # get datasets and functions #' fun3 = xtractor$get_feature("fun") #' df = xtractor$get_data() #' dplyr_wrapper(data = df, group_by = "Species", fun = fun3) #' #' @import R6 #' @import dplyr #' @import future.apply #' @import fs NULL #' @export Xtractor = R6Class("Xtractor", public = list( initialize = function(name, file.dir = ".", load = FALSE) { private$name = checkmate::assert_character(name) newDirPath = file.path(file.dir, "fxtract_files", name) private$dir = newDirPath if (!load) { if (!fs::dir_exists(file.path(file.dir, "fxtract_files"))) fs::dir_create(file.path(file.dir, "fxtract_files")) if (fs::dir_exists(newDirPath)) stop("The Xtractor name already exists. Please choose another name, delete the existing Xtractor, or set load = TRUE, if you want to load the old Xtractor.") fs::dir_create(newDirPath) fs::dir_create(file.path(newDirPath, "rds_files")) fs::dir_create(file.path(newDirPath, "rds_files", "data")) fs::dir_create(file.path(newDirPath, "rds_files", "features")) fs::dir_create(file.path(newDirPath, "rds_files", "results")) fs::dir_create(file.path(newDirPath, "rds_files", "results", "done")) fs::dir_create(file.path(newDirPath, "rds_files", "results", "failed")) saveRDS(NULL, file = file.path(private$dir, "rds_files", "group_by.RDS")) } else { checkmate::assert_subset(name, list.files(file.path(file.dir, "fxtract_files"))) private$group_by = readRDS(file.path(newDirPath, "rds_files", "group_by.RDS")) } }, print = function() { ids = self$ids feats = self$features cat("R6 Object: Xtractor\n") cat(paste0("Name: ", private$name, "\n")) cat(paste0("Grouping variable: ", private$group_by, "\n")) if (length(ids) <= 10) { cat(paste0("IDs: ", paste0(ids, collapse = ", "), "\n")) } else { cat(paste0("Number IDs: ", length(ids), ". See $ids for all ids.\n")) } if (length(feats) <= 10) { cat(paste0("Feature functions: ", paste0(feats, collapse = ", "), "\n")) } else { cat(paste0("Number feature functions: ", length(feats), ". See $features for all feature functions.\n")) } if (ncol(self$status[, -1, drop = FALSE]) >= 1) cat(paste0("Extraction done: ", mean(as.matrix(self$status[, -1]) == "done") * 100, "%\n")) cat(paste0("Errors during calculation: ", nrow(self$error_messages), " \n")) invisible(self) }, add_data = function(data, group_by) { checkmate::assert_subset(group_by, colnames(data)) checkmate::assert( checkmate::checkClass(data, "data.frame"), checkmate::checkClass(data, "data.table") ) checkmate::assert_subset(group_by, colnames(data)) checkmate::assert_character(group_by, len = 1) if (is.null(private$group_by)) { private$group_by = group_by saveRDS(group_by, file = file.path(private$dir, "rds_files", "group_by.RDS")) } if (group_by != private$group_by) stop(paste0("The group_by variable was set to ", private$group_by, ". Only one group_by variable is allowed per Xtractor!")) gb = data %>% dplyr::distinct_(.dots = group_by) %>% data.frame() %>% unlist() if (any(gb %in% self$ids)) stop(paste0("Adding data multiple times is not allowed! Following ID(s) are already added to the R6 object: ", paste0(gb[which(gb %in% self$ids)], collapse = ", "))) #save rds files message("Saving raw RDS files.") pb = utils::txtProgressBar(min = 0, max = length(gb), style = 3) for (i in seq_along(gb)) { data_i = data %>% dplyr::filter(!!as.name(group_by) == gb[i]) %>% data.frame() saveRDS(data_i, file = file.path(private$dir, "rds_files", "data", paste0(gb[i], ".RDS"))) utils::setTxtProgressBar(pb, i) } close(pb) return(invisible(self)) }, preprocess_data = function(fun) { message("Updating raw RDS files.") future.apply::future_lapply(self$ids, function(i) { data_i = readRDS(file.path(private$dir, "rds_files", "data", paste0(i, ".RDS"))) data_preproc = fun(data_i) saveRDS(data_preproc, file = file.path(private$dir, "rds_files", "data", paste0(i, ".RDS"))) }, future.seed = TRUE) return(invisible(self)) }, remove_data = function(ids) { checkmate::assert_character(ids, min.len = 1L) checkmate::assert_subset(ids, self$ids) for (id in ids) { message("Deleting RDS file ", id, ".RDS") fs::file_delete(file.path(file.path(private$dir, "rds_files", "data", paste0(id, ".RDS")))) } #delete done done_dir = file.path(private$dir, "rds_files", "results", "done") done_features = list.files(done_dir) for (feature in done_features) { for (id in ids) { done_feat_path = file.path(private$dir, "rds_files", "results", "done", feature, paste0(id, ".RDS")) if (file.exists(done_feat_path)) { message(paste0("Deleting results from id: ", id)) fs::file_delete(done_feat_path) } } } #delete error failed_dir = file.path(private$dir, "rds_files", "results", "failed") failed_features = list.files(failed_dir) for (feature in failed_features) { for (id in ids) { failed_feat_path = file.path(private$dir, "rds_files", "results", "failed", feature, paste0(id, ".RDS")) if (file.exists(failed_feat_path)) { message(paste0("Deleting error messages from id: ", id)) fs::file_delete(failed_feat_path) } } } return(invisible(self)) }, get_data = function(ids) { if (missing(ids)) ids = self$ids checkmate::assert_character(ids, min.len = 1L) checkmate::assert_subset(ids, self$ids) data = future.apply::future_lapply(ids, function(i) { readRDS(file.path(private$dir, "rds_files", "data", paste0(i, ".RDS"))) }) if (length(unique(lapply(data, function(df) typeof(df[, private$group_by])))) > 1) { message("Different vector types detected. Converting group column to character.") for (i in seq_along(data)) { data[[i]][, private$group_by] = as.character(data[[i]][, private$group_by]) } } dplyr::bind_rows(data) }, add_feature = function(fun, check_fun = TRUE) { checkmate::assert_logical(check_fun) checkmate::assert_function(fun) if (deparse(substitute(fun)) %in% self$features) stop(paste0("Feature function '", deparse(substitute(fun)), "' was already added.")) saveRDS(list(fun = fun, check_fun = check_fun), file = file.path(private$dir, "rds_files", "features", paste0(deparse(substitute(fun)), ".RDS"))) fs::dir_create(file.path(private$dir, "rds_files", "results", "done", deparse(substitute(fun)))) fs::dir_create(file.path(private$dir, "rds_files", "results", "failed", deparse(substitute(fun)))) return(invisible(self)) }, remove_feature = function(fun) { if (is.function(fun)) fun = as.character(substitute(fun)) checkmate::assert_character(fun, min.len = 1L) checkmate::assert_subset(fun, self$features) for (f in fun) { fs::file_delete(file.path(private$dir, "rds_files", "features", paste0(f, ".RDS"))) fs::dir_delete(file.path(private$dir, "rds_files", "results", "done", f)) fs::dir_delete(file.path(private$dir, "rds_files", "results", "failed", f)) } return(invisible(self)) }, get_feature = function(fun) { checkmate::assert_character(fun, len = 1L) checkmate::assert_subset(fun, self$features) readRDS(file.path(private$dir, "rds_files", "features", paste0(fun, ".RDS")))$fun }, calc_features = function(features, ids, retry_failed = TRUE) { if (missing(features)) features = self$features checkmate::assert_character(features) checkmate::assert_subset(features, self$features) checkmate::assert_logical(retry_failed) if (!missing(ids)) checkmate::assert_character(ids) if (!missing(ids)) checkmate::assert_subset(ids, self$ids) if (length(self$ids) == 0) stop("Please add datasets with method $add_data().") if (length(self$features) == 0) stop("Please add feature functions with method $add_feature().") features_new = features status = self$status for (feature in features) { if (all(self$status[[feature]] == "done")) { features_new = setdiff(features_new, feature) message(paste0("Feature function '", feature, "' was already applied on every ID and will be skipped.")) } } #calculating features using future.apply message(paste0("Calculating features on ", future::nbrOfWorkers(), " core(s).")) for (feature in features_new) { idm = ifelse(missing(ids), "", paste0(" on IDs: ", paste0(ids, collapse = ", "))) message(paste0("Calculating feature function: ", feature, idm)) feat_fun = self$get_feature(feature) if (missing(ids)) { ids_calc = status[which(status[, feature] == "not_done"), private$group_by] } else { ids_calc = ids } if (retry_failed) { ids_failed = status[which(status[, feature] == "failed"), private$group_by] if (length(ids_failed) > 0) message("Failed features will be calculated again. For stochastical features set a seed inside your function!") ids_calc = unique(c(ids_calc, ids_failed)) } if (length(ids_calc) == 0) message("Nothing to calculate.") future.apply::future_lapply(ids_calc, function(x) { data = self$get_data(x) group_by = private$group_by res_id = tryCatch(fxtract::dplyr_wrapper(data, group_by, feat_fun, check_fun = private$get_check_fun(feature)), error = function(e) e$message) #if error, save as error, else save result feat_fail_path = file.path(private$dir, "rds_files", "results", "failed", feature, paste0(x, ".RDS")) if (is.character(res_id)) { saveRDS(res_id, file = feat_fail_path) } else { if (fs::file_exists(feat_fail_path)) fs::file_delete(feat_fail_path) saveRDS(res_id, file = file.path(private$dir, "rds_files", "results", "done", feature, paste0(x, ".RDS"))) } }, future.seed = TRUE) } return(invisible(self)) } ), private = list( name = NULL, group_by = NULL, dir = NULL, get_check_fun = function(fun) { checkmate::assert_character(fun, len = 1L) checkmate::assert_subset(fun, self$features) readRDS(file.path(private$dir, "rds_files", "features", paste0(fun, ".RDS")))$check_fun } ), active = list( error_messages = function() { error_df = setNames(data.frame(matrix(ncol = 3, nrow = 0), stringsAsFactors = FALSE), c("feature_function", "id", "error_message")) for (feat in self$features) { error_feats = list.files(file.path(private$dir, "rds_files", "results", "failed", feat)) if (length(error_feats) == 0) next for (file in error_feats) { error_message = readRDS(file.path(private$dir, "rds_files", "results", "failed", feat, file)) error_df = dplyr::bind_rows(error_df, data.frame(feature_function = feat, id = gsub(".RDS", "", file), error_message = error_message, stringsAsFactors = FALSE)) } } data.table::setDF(error_df) }, ids = function() { gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "data"))) }, features = function() { gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "features"))) }, results = function() { todo_data = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "data"))) final_result = setNames(data.frame(todo_data, stringsAsFactors = FALSE), private$group_by) if (nrow(final_result) == 0) return(final_result) for (feat in self$features) { results_feat = future.apply::future_lapply(list.files(file.path(private$dir, "rds_files", "results", "done", feat), full.names = TRUE), readRDS) results_feat = data.table::rbindlist(results_feat, fill = TRUE) %>% data.frame() if (nrow(results_feat) == 0) next results_feat[, private$group_by] = as.character(results_feat[, private$group_by]) final_result = dplyr::full_join(final_result, results_feat, by = private$group_by) } data.table::setDF(final_result) }, status = function() { todo_data = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "data"))) todo_feats = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "features"))) status = setNames(data.frame(todo_data, stringsAsFactors = FALSE), private$group_by) if (nrow(status) == 0) return(status) for (feat in todo_feats) { #make done df done_feat = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "results", "done", feat))) if (length(done_feat) >= 1) { done_df = setNames(data.frame(done_feat, "done", stringsAsFactors = FALSE), c(private$group_by, feat)) } else { done_df = setNames(data.frame(matrix(ncol = 2, nrow = 0), stringsAsFactors = FALSE), c(private$group_by, feat)) } #make error df error_feat = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "results", "failed", feat))) if (length(error_feat) >= 1) { error_df = setNames(data.frame(error_feat, "failed", stringsAsFactors = FALSE), c(private$group_by, feat)) } else { error_df = setNames(data.frame(matrix(ncol = 2, nrow = 0), stringsAsFactors = FALSE), c(private$group_by, feat)) } #make not done df not_done_feat = setdiff(todo_data, c(done_feat, error_feat)) if (length(not_done_feat) >= 1) { not_done_df = setNames(data.frame(not_done_feat, "not_done", stringsAsFactors = FALSE), c(private$group_by, feat)) } else { not_done_df = setNames(data.frame(matrix(ncol = 2, nrow = 0), stringsAsFactors = FALSE), c(private$group_by, feat)) } status_feat = dplyr::bind_rows(done_df, error_df, not_done_df) status = dplyr::left_join(status, status_feat, by = private$group_by) } status } ) )
/R/xtractor.R
no_license
frycast/fxtract
R
false
false
19,881
r
#' R6 Object for Feature Extraction. #' #' @description #' \code{Xtractor} calculates features from raw data for each ID of a grouping variable individually. This process can be parallelized with the package future. #' #' @format \code{\link{R6Class}} object. #' @name Xtractor #' #' @section Usage: #' \preformatted{ #' xtractor = Xtractor$new("xtractor") #' } #' #' @section Arguments: #' #' For Xtractor$new(): #' \describe{ #' \item{\code{name}: }{(`character(1)`): A user defined name of the Xtractor. All necessary data will be saved on the path: ./fxtract_files/name/} #' \item{\code{load}: }{(`logical(1)`): If TRUE, an existing Xtractor will be loaded.} #' \item{\code{file.dir}: }{(`character(1)`): Path where all files of the Xtractor are saved. Default is the current working directory.} #' } #' @section Details: #' All datasets and feature functions are saved in this R6 object. #' Datasets will be saved as single RDS files (for each ID) and feature functions are calculated on each single dataset. #' A big advantage of this method is that it scales nicely for larger datasets. Data is only read into RAM, when needed. #' #' @section Fields: #' \describe{ #' \item{\code{error_messages}: }{(`data.frame()`): Active binding. A dataframe with information about error messages.} #' \item{\code{ids}: }{(`character()`): Active binding. A character vector with the IDs of the grouping variable.} #' \item{\code{features}: }{(`character()`): Active binding. A character vector with the feature functions which were added.} #' \item{\code{status}: }{(`data.frame()`): Active binding. A dataframe with an overview over which features are calculated on which datasets.} #' \item{\code{results}: }{(`data.frame()`): Active binding. A dataframe with all calculated features of all IDs.} #' } #' #' @section Methods: #' \describe{ #' \item{\code{add_data(data, group_by)}}{[data: (`data.frame` | `data.table`)] A dataframe or data.table which shall be added to the R6 object. \cr #' [group_by: (`character(1)`)] The grouping variable's name of the dataframe. \cr \cr #' This method writes single RDS files for each group.} #' \item{\code{preprocess_data(fun)}}{[fun: (`function`)] A function, which has a dataframe as input and a dataframe as output. \cr \cr #' This method loads the RDS files and applies this function on them. The old RDS files are overwritten.} #' \item{\code{remove_data(ids)}}{[ids: (`character()`)] One or many IDs of the grouping variable. \cr \cr #' This method deletes the RDS files of the given IDs.} #' \item{\code{get_data(ids)}}{[ids: (`character()`)] One or many IDs of the grouping variable. \cr \cr #' This method returns one dataframe with the chosen IDs.} #' \item{\code{add_feature(fun, check_fun)}}{[fun: (`function`)] A function, which has a dataframe as input and a named vector or list as output. \cr #' [check_fun: (`logical(1)`)] The function will be checked if it returns a vector or a list. Defaults to \code{TRUE}. Disable, if calculation takes too long. \cr \cr #' This method adds the feature function to the R6 object. It writes an RDS file of the function which can be retrieved later.} #' \item{\code{remove_feature(fun)}}{[fun: (`function | character(1)`)] A function (or the name of the function as character) which shall be removed. \cr \cr #' This method removes the function from the object and deletes all corresponding files and results.} #' \item{\code{get_feature(fun)}}{[fun: (`character(1)`)] The name of a function as character. \cr \cr #' This method reads the RDS file of the function. Useful for debugging after loading an Xtractor.} #' \item{\code{calc_features(features, ids)}}{[features: (`character()`)] A character vector of the names of the features which shall be calculated. Defaults to all features. \cr #' [ids: (`character()`)] One or many IDs of the grouping variable. Defaults to all IDs. \cr \cr #' This method calculates all features on the chosen IDs.} #' \item{\code{retry_failed_features(features)}}{[features: (`character()`)] A character vector of the names of the features which shall be calculated. Defaults to all features. \cr \cr #' This method retries calculation of failed features. Useful if calculation failed because of memory problems.} #' \item{\code{plot()}}{[internal] method to print the R6 object.} #' \item{\code{clone()}}{[internal] method to clone the R6 object.} #' \item{\code{initialize()}}{[internal] method to initialize the R6 object.} #' } #' #' @examples #' # one feature function #' dir = tempdir() #' xtractor = Xtractor$new("xtractor", file.dir = dir) #' xtractor$add_data(iris, group_by = "Species") #' xtractor$ids #' fun = function(data) { #' c(mean_sepal_length = mean(data$Sepal.Length)) #' } #' xtractor$add_feature(fun) #' xtractor$features #' xtractor$calc_features() #' xtractor$results #' xtractor$status #' xtractor #' #' # failing function on only one ID #' fun2 = function(data) { #' if ("setosa" %in% data$Species) stop("my error") #' c(sd_sepal_length = sd(data$Sepal.Length)) #' } #' xtractor$add_feature(fun2) #' xtractor$calc_features() #' xtractor$results #' xtractor$error_messages #' xtractor #' #' # remove feature function #' xtractor$remove_feature("fun2") #' xtractor$results #' xtractor #' #' # remove ID #' xtractor$remove_data("setosa") #' xtractor$results #' xtractor$ids #' xtractor #' #' # get datasets and functions #' fun3 = xtractor$get_feature("fun") #' df = xtractor$get_data() #' dplyr_wrapper(data = df, group_by = "Species", fun = fun3) #' #' @import R6 #' @import dplyr #' @import future.apply #' @import fs NULL #' @export Xtractor = R6Class("Xtractor", public = list( initialize = function(name, file.dir = ".", load = FALSE) { private$name = checkmate::assert_character(name) newDirPath = file.path(file.dir, "fxtract_files", name) private$dir = newDirPath if (!load) { if (!fs::dir_exists(file.path(file.dir, "fxtract_files"))) fs::dir_create(file.path(file.dir, "fxtract_files")) if (fs::dir_exists(newDirPath)) stop("The Xtractor name already exists. Please choose another name, delete the existing Xtractor, or set load = TRUE, if you want to load the old Xtractor.") fs::dir_create(newDirPath) fs::dir_create(file.path(newDirPath, "rds_files")) fs::dir_create(file.path(newDirPath, "rds_files", "data")) fs::dir_create(file.path(newDirPath, "rds_files", "features")) fs::dir_create(file.path(newDirPath, "rds_files", "results")) fs::dir_create(file.path(newDirPath, "rds_files", "results", "done")) fs::dir_create(file.path(newDirPath, "rds_files", "results", "failed")) saveRDS(NULL, file = file.path(private$dir, "rds_files", "group_by.RDS")) } else { checkmate::assert_subset(name, list.files(file.path(file.dir, "fxtract_files"))) private$group_by = readRDS(file.path(newDirPath, "rds_files", "group_by.RDS")) } }, print = function() { ids = self$ids feats = self$features cat("R6 Object: Xtractor\n") cat(paste0("Name: ", private$name, "\n")) cat(paste0("Grouping variable: ", private$group_by, "\n")) if (length(ids) <= 10) { cat(paste0("IDs: ", paste0(ids, collapse = ", "), "\n")) } else { cat(paste0("Number IDs: ", length(ids), ". See $ids for all ids.\n")) } if (length(feats) <= 10) { cat(paste0("Feature functions: ", paste0(feats, collapse = ", "), "\n")) } else { cat(paste0("Number feature functions: ", length(feats), ". See $features for all feature functions.\n")) } if (ncol(self$status[, -1, drop = FALSE]) >= 1) cat(paste0("Extraction done: ", mean(as.matrix(self$status[, -1]) == "done") * 100, "%\n")) cat(paste0("Errors during calculation: ", nrow(self$error_messages), " \n")) invisible(self) }, add_data = function(data, group_by) { checkmate::assert_subset(group_by, colnames(data)) checkmate::assert( checkmate::checkClass(data, "data.frame"), checkmate::checkClass(data, "data.table") ) checkmate::assert_subset(group_by, colnames(data)) checkmate::assert_character(group_by, len = 1) if (is.null(private$group_by)) { private$group_by = group_by saveRDS(group_by, file = file.path(private$dir, "rds_files", "group_by.RDS")) } if (group_by != private$group_by) stop(paste0("The group_by variable was set to ", private$group_by, ". Only one group_by variable is allowed per Xtractor!")) gb = data %>% dplyr::distinct_(.dots = group_by) %>% data.frame() %>% unlist() if (any(gb %in% self$ids)) stop(paste0("Adding data multiple times is not allowed! Following ID(s) are already added to the R6 object: ", paste0(gb[which(gb %in% self$ids)], collapse = ", "))) #save rds files message("Saving raw RDS files.") pb = utils::txtProgressBar(min = 0, max = length(gb), style = 3) for (i in seq_along(gb)) { data_i = data %>% dplyr::filter(!!as.name(group_by) == gb[i]) %>% data.frame() saveRDS(data_i, file = file.path(private$dir, "rds_files", "data", paste0(gb[i], ".RDS"))) utils::setTxtProgressBar(pb, i) } close(pb) return(invisible(self)) }, preprocess_data = function(fun) { message("Updating raw RDS files.") future.apply::future_lapply(self$ids, function(i) { data_i = readRDS(file.path(private$dir, "rds_files", "data", paste0(i, ".RDS"))) data_preproc = fun(data_i) saveRDS(data_preproc, file = file.path(private$dir, "rds_files", "data", paste0(i, ".RDS"))) }, future.seed = TRUE) return(invisible(self)) }, remove_data = function(ids) { checkmate::assert_character(ids, min.len = 1L) checkmate::assert_subset(ids, self$ids) for (id in ids) { message("Deleting RDS file ", id, ".RDS") fs::file_delete(file.path(file.path(private$dir, "rds_files", "data", paste0(id, ".RDS")))) } #delete done done_dir = file.path(private$dir, "rds_files", "results", "done") done_features = list.files(done_dir) for (feature in done_features) { for (id in ids) { done_feat_path = file.path(private$dir, "rds_files", "results", "done", feature, paste0(id, ".RDS")) if (file.exists(done_feat_path)) { message(paste0("Deleting results from id: ", id)) fs::file_delete(done_feat_path) } } } #delete error failed_dir = file.path(private$dir, "rds_files", "results", "failed") failed_features = list.files(failed_dir) for (feature in failed_features) { for (id in ids) { failed_feat_path = file.path(private$dir, "rds_files", "results", "failed", feature, paste0(id, ".RDS")) if (file.exists(failed_feat_path)) { message(paste0("Deleting error messages from id: ", id)) fs::file_delete(failed_feat_path) } } } return(invisible(self)) }, get_data = function(ids) { if (missing(ids)) ids = self$ids checkmate::assert_character(ids, min.len = 1L) checkmate::assert_subset(ids, self$ids) data = future.apply::future_lapply(ids, function(i) { readRDS(file.path(private$dir, "rds_files", "data", paste0(i, ".RDS"))) }) if (length(unique(lapply(data, function(df) typeof(df[, private$group_by])))) > 1) { message("Different vector types detected. Converting group column to character.") for (i in seq_along(data)) { data[[i]][, private$group_by] = as.character(data[[i]][, private$group_by]) } } dplyr::bind_rows(data) }, add_feature = function(fun, check_fun = TRUE) { checkmate::assert_logical(check_fun) checkmate::assert_function(fun) if (deparse(substitute(fun)) %in% self$features) stop(paste0("Feature function '", deparse(substitute(fun)), "' was already added.")) saveRDS(list(fun = fun, check_fun = check_fun), file = file.path(private$dir, "rds_files", "features", paste0(deparse(substitute(fun)), ".RDS"))) fs::dir_create(file.path(private$dir, "rds_files", "results", "done", deparse(substitute(fun)))) fs::dir_create(file.path(private$dir, "rds_files", "results", "failed", deparse(substitute(fun)))) return(invisible(self)) }, remove_feature = function(fun) { if (is.function(fun)) fun = as.character(substitute(fun)) checkmate::assert_character(fun, min.len = 1L) checkmate::assert_subset(fun, self$features) for (f in fun) { fs::file_delete(file.path(private$dir, "rds_files", "features", paste0(f, ".RDS"))) fs::dir_delete(file.path(private$dir, "rds_files", "results", "done", f)) fs::dir_delete(file.path(private$dir, "rds_files", "results", "failed", f)) } return(invisible(self)) }, get_feature = function(fun) { checkmate::assert_character(fun, len = 1L) checkmate::assert_subset(fun, self$features) readRDS(file.path(private$dir, "rds_files", "features", paste0(fun, ".RDS")))$fun }, calc_features = function(features, ids, retry_failed = TRUE) { if (missing(features)) features = self$features checkmate::assert_character(features) checkmate::assert_subset(features, self$features) checkmate::assert_logical(retry_failed) if (!missing(ids)) checkmate::assert_character(ids) if (!missing(ids)) checkmate::assert_subset(ids, self$ids) if (length(self$ids) == 0) stop("Please add datasets with method $add_data().") if (length(self$features) == 0) stop("Please add feature functions with method $add_feature().") features_new = features status = self$status for (feature in features) { if (all(self$status[[feature]] == "done")) { features_new = setdiff(features_new, feature) message(paste0("Feature function '", feature, "' was already applied on every ID and will be skipped.")) } } #calculating features using future.apply message(paste0("Calculating features on ", future::nbrOfWorkers(), " core(s).")) for (feature in features_new) { idm = ifelse(missing(ids), "", paste0(" on IDs: ", paste0(ids, collapse = ", "))) message(paste0("Calculating feature function: ", feature, idm)) feat_fun = self$get_feature(feature) if (missing(ids)) { ids_calc = status[which(status[, feature] == "not_done"), private$group_by] } else { ids_calc = ids } if (retry_failed) { ids_failed = status[which(status[, feature] == "failed"), private$group_by] if (length(ids_failed) > 0) message("Failed features will be calculated again. For stochastical features set a seed inside your function!") ids_calc = unique(c(ids_calc, ids_failed)) } if (length(ids_calc) == 0) message("Nothing to calculate.") future.apply::future_lapply(ids_calc, function(x) { data = self$get_data(x) group_by = private$group_by res_id = tryCatch(fxtract::dplyr_wrapper(data, group_by, feat_fun, check_fun = private$get_check_fun(feature)), error = function(e) e$message) #if error, save as error, else save result feat_fail_path = file.path(private$dir, "rds_files", "results", "failed", feature, paste0(x, ".RDS")) if (is.character(res_id)) { saveRDS(res_id, file = feat_fail_path) } else { if (fs::file_exists(feat_fail_path)) fs::file_delete(feat_fail_path) saveRDS(res_id, file = file.path(private$dir, "rds_files", "results", "done", feature, paste0(x, ".RDS"))) } }, future.seed = TRUE) } return(invisible(self)) } ), private = list( name = NULL, group_by = NULL, dir = NULL, get_check_fun = function(fun) { checkmate::assert_character(fun, len = 1L) checkmate::assert_subset(fun, self$features) readRDS(file.path(private$dir, "rds_files", "features", paste0(fun, ".RDS")))$check_fun } ), active = list( error_messages = function() { error_df = setNames(data.frame(matrix(ncol = 3, nrow = 0), stringsAsFactors = FALSE), c("feature_function", "id", "error_message")) for (feat in self$features) { error_feats = list.files(file.path(private$dir, "rds_files", "results", "failed", feat)) if (length(error_feats) == 0) next for (file in error_feats) { error_message = readRDS(file.path(private$dir, "rds_files", "results", "failed", feat, file)) error_df = dplyr::bind_rows(error_df, data.frame(feature_function = feat, id = gsub(".RDS", "", file), error_message = error_message, stringsAsFactors = FALSE)) } } data.table::setDF(error_df) }, ids = function() { gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "data"))) }, features = function() { gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "features"))) }, results = function() { todo_data = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "data"))) final_result = setNames(data.frame(todo_data, stringsAsFactors = FALSE), private$group_by) if (nrow(final_result) == 0) return(final_result) for (feat in self$features) { results_feat = future.apply::future_lapply(list.files(file.path(private$dir, "rds_files", "results", "done", feat), full.names = TRUE), readRDS) results_feat = data.table::rbindlist(results_feat, fill = TRUE) %>% data.frame() if (nrow(results_feat) == 0) next results_feat[, private$group_by] = as.character(results_feat[, private$group_by]) final_result = dplyr::full_join(final_result, results_feat, by = private$group_by) } data.table::setDF(final_result) }, status = function() { todo_data = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "data"))) todo_feats = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "features"))) status = setNames(data.frame(todo_data, stringsAsFactors = FALSE), private$group_by) if (nrow(status) == 0) return(status) for (feat in todo_feats) { #make done df done_feat = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "results", "done", feat))) if (length(done_feat) >= 1) { done_df = setNames(data.frame(done_feat, "done", stringsAsFactors = FALSE), c(private$group_by, feat)) } else { done_df = setNames(data.frame(matrix(ncol = 2, nrow = 0), stringsAsFactors = FALSE), c(private$group_by, feat)) } #make error df error_feat = gsub(".RDS", "", list.files(file.path(private$dir, "rds_files", "results", "failed", feat))) if (length(error_feat) >= 1) { error_df = setNames(data.frame(error_feat, "failed", stringsAsFactors = FALSE), c(private$group_by, feat)) } else { error_df = setNames(data.frame(matrix(ncol = 2, nrow = 0), stringsAsFactors = FALSE), c(private$group_by, feat)) } #make not done df not_done_feat = setdiff(todo_data, c(done_feat, error_feat)) if (length(not_done_feat) >= 1) { not_done_df = setNames(data.frame(not_done_feat, "not_done", stringsAsFactors = FALSE), c(private$group_by, feat)) } else { not_done_df = setNames(data.frame(matrix(ncol = 2, nrow = 0), stringsAsFactors = FALSE), c(private$group_by, feat)) } status_feat = dplyr::bind_rows(done_df, error_df, not_done_df) status = dplyr::left_join(status, status_feat, by = private$group_by) } status } ) )
library(FinCal) get.ohlc.google(symbol="AAPL") get.ohlc.google(symbol="AAPL") get.ohlc.google(symbol="AAPL",start="2013-08-01") get.ohlc.google(symbol="AAPL",start="2013-07-01",end="2013-08-01") apple <- get.ohlc.yahoo(symbol = "AAPL", start = "2013-07-01", end = "2013-08-01") #Example 25 Download historical financial data from Yahoo finance and Google Finance #Download historical financial data from Yahoo finance get.ohlc.yahoo(symbol = "AAPL", start = "firstDay", end = "today", freq = "d") apple <- get.ohlc.yahoo(symbol = "AAPL", start = "2013-07-01", end = "2013-08-01") head(apple) all <- get.ohlcs.google(symbols = c("YHOO", "SPY", "SINA"), start = "2013-01-01", end = "2013-07-31") head(all$YHOO) Example 26 Plots open-high-low-close chart of (financial) time series. Line chart apple <- get.ohlc.yahoo(symbol = "AAPL", start = "2013-07-01", end = "2013-08-01") google <- get.ohlc.yahoo("GOOG", start = "2013-07-01", end = "2013-08-01") candlestickChart(google) apple <- get.ohlc.google("AAPL") volumeChart(apple) lineChart(apple) #Not Working library(quantmod) getSymbols("GOOG", start = "2013-07-01", end = "2013-08-01") head(GOOG) volumeChart(GOOG$GOOG.Volume) candlestickChart(GOOG)
/74-finCal/11-finacal-stocks-NW.R
no_license
dupadhyaya/rfats
R
false
false
1,232
r
library(FinCal) get.ohlc.google(symbol="AAPL") get.ohlc.google(symbol="AAPL") get.ohlc.google(symbol="AAPL",start="2013-08-01") get.ohlc.google(symbol="AAPL",start="2013-07-01",end="2013-08-01") apple <- get.ohlc.yahoo(symbol = "AAPL", start = "2013-07-01", end = "2013-08-01") #Example 25 Download historical financial data from Yahoo finance and Google Finance #Download historical financial data from Yahoo finance get.ohlc.yahoo(symbol = "AAPL", start = "firstDay", end = "today", freq = "d") apple <- get.ohlc.yahoo(symbol = "AAPL", start = "2013-07-01", end = "2013-08-01") head(apple) all <- get.ohlcs.google(symbols = c("YHOO", "SPY", "SINA"), start = "2013-01-01", end = "2013-07-31") head(all$YHOO) Example 26 Plots open-high-low-close chart of (financial) time series. Line chart apple <- get.ohlc.yahoo(symbol = "AAPL", start = "2013-07-01", end = "2013-08-01") google <- get.ohlc.yahoo("GOOG", start = "2013-07-01", end = "2013-08-01") candlestickChart(google) apple <- get.ohlc.google("AAPL") volumeChart(apple) lineChart(apple) #Not Working library(quantmod) getSymbols("GOOG", start = "2013-07-01", end = "2013-08-01") head(GOOG) volumeChart(GOOG$GOOG.Volume) candlestickChart(GOOG)
## This file contains functions for creating and solving a special matrix ## object that is able to cache said matrix's inverse ## makeCacheMatrix creates a special matrix object capable of remembering its ## inverse. This function takes a matrix as an argument and returns a list of ## functions for accessing the special matrix object makeCacheMatrix <- function(mat = matrix()) { inverse <- NULL set <- function(newmat) { mat <- newmat inverse <- NULL } get <- function() mat setinverse <- function(newinverse) inverse <<- newinverse getinverse <- function() inverse list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve takes in a matrix created via makeCahceMatrix, solves and assigns ## the inverse of that matrix. This functions takes in a list of functions for ## accessing a special cached matrix and returns the calculated inverse cacheSolve <- function(mat, ...) { inverse <- mat$getinverse() if(!is.null(inverse)) { message("Getting cached inverse") return(inverse) } data <- mat$get() inverse <- solve(mat$get()) mat$setinverse(inverse) inverse }
/cachematrix.R
no_license
SingingTree/ProgrammingAssignment2
R
false
false
1,210
r
## This file contains functions for creating and solving a special matrix ## object that is able to cache said matrix's inverse ## makeCacheMatrix creates a special matrix object capable of remembering its ## inverse. This function takes a matrix as an argument and returns a list of ## functions for accessing the special matrix object makeCacheMatrix <- function(mat = matrix()) { inverse <- NULL set <- function(newmat) { mat <- newmat inverse <- NULL } get <- function() mat setinverse <- function(newinverse) inverse <<- newinverse getinverse <- function() inverse list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve takes in a matrix created via makeCahceMatrix, solves and assigns ## the inverse of that matrix. This functions takes in a list of functions for ## accessing a special cached matrix and returns the calculated inverse cacheSolve <- function(mat, ...) { inverse <- mat$getinverse() if(!is.null(inverse)) { message("Getting cached inverse") return(inverse) } data <- mat$get() inverse <- solve(mat$get()) mat$setinverse(inverse) inverse }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.r \name{get.mce} \alias{get.mce} \title{Extracting Monte Carlo error} \usage{ get.mce(fit, estimate) } \arguments{ \item{fit}{A fitted model from \link{fit.ascr}.} \item{estimate}{A character string, either \code{"bias"} or \code{"se"}, which determines whether Monte Carlo errors for bias estimates or standard errors are reported.} } \description{ Extracts calculated Monte Carlo errors from a bootstrap procedure carried out by \link{boot.ascr}. } \seealso{ \link{boot.ascr} for the bootstrap procedure. \link{stdEr.ascr.boot} for standard errors. \link{get.bias} for estimated biases. }
/man/get.mce.Rd
no_license
dill/ascr
R
false
true
681
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.r \name{get.mce} \alias{get.mce} \title{Extracting Monte Carlo error} \usage{ get.mce(fit, estimate) } \arguments{ \item{fit}{A fitted model from \link{fit.ascr}.} \item{estimate}{A character string, either \code{"bias"} or \code{"se"}, which determines whether Monte Carlo errors for bias estimates or standard errors are reported.} } \description{ Extracts calculated Monte Carlo errors from a bootstrap procedure carried out by \link{boot.ascr}. } \seealso{ \link{boot.ascr} for the bootstrap procedure. \link{stdEr.ascr.boot} for standard errors. \link{get.bias} for estimated biases. }
# library(qvalue) # # DF <- as.data.frame(read.table("a")) # # qvalue(DF$V2, fdr.level=0.01) Args <- commandArgs() library(Exact) library(qvalue) # library(Barnard) th <- as.double(Args[6]) DFall <- as.data.frame(read.table(Args[5])) # DF <- as.data.frame(DFall[which((DFall$V2/DFall$V3 >= th) | (DFall$V4/DFall$V5 >= th)),]) DF <- as.data.frame(DFall[which(DFall$V2/DFall$V3 >= th),]) DF$V3 <- DF$V3-DF$V2; DF$V5 <- DF$V5-DF$V4; # DF$V2 <- DF$V2 # DF$V4 <- DF$V4 Pval <- vector(length=dim(DF)[1]) for( i in seq(1,dim(DF)[1],1) ){ m <- matrix(c(DF[i,]$V2, DF[i,]$V3, DF[i,]$V4, DF[i,]$V5), 2, 2) # if( (DF[i,]$V3 == 0) & (DF[i,]$V5 == 0) ){ # Pval[i] <- 0.0000000000000001 # }else{ Pval[i] <- fisher.test(m, alternative="greater")$p.value # } # if( (DF[i,]$V3 == 0) & (DF[i,]$V5 == 0) ){ # Pval[i] <- 0.0000000000000001 # }else{ # Pval[i] <- exact.test(m, alternative="greater", method="Boschloo", to.plot = FALSE)$p.value # } # Pval[i] <- barnardw.test(DF[i,]$V2, DF[i,]$V3, DF[i,]$V4, DF[i,]$V5)$p.value[1] } DF$Pval <- Pval DF <- DF[which(DF$Pval <= quantile(qvalue(p.adjust(Pval, method = "holm", n = length(Pval)), fdr.level=0.01)$qvalue, 0.05)),] # print.data.frame(DF, row.names=FALSE, col.names=FALSE) # write.table(DF[which(DF$Pval < 0.05),], Args[5], row.names=FALSE, col.names=FALSE, quote = FALSE) # write.table(DF[which(DF$Pval < 0.05),], "tmp/last.txt", row.names=FALSE, col.names=FALSE, quote = FALSE) write.table(DF, Args[5], row.names=FALSE, col.names=FALSE, quote = FALSE)
/pvalues.R
no_license
fagostini/SeAMotE
R
false
false
1,522
r
# library(qvalue) # # DF <- as.data.frame(read.table("a")) # # qvalue(DF$V2, fdr.level=0.01) Args <- commandArgs() library(Exact) library(qvalue) # library(Barnard) th <- as.double(Args[6]) DFall <- as.data.frame(read.table(Args[5])) # DF <- as.data.frame(DFall[which((DFall$V2/DFall$V3 >= th) | (DFall$V4/DFall$V5 >= th)),]) DF <- as.data.frame(DFall[which(DFall$V2/DFall$V3 >= th),]) DF$V3 <- DF$V3-DF$V2; DF$V5 <- DF$V5-DF$V4; # DF$V2 <- DF$V2 # DF$V4 <- DF$V4 Pval <- vector(length=dim(DF)[1]) for( i in seq(1,dim(DF)[1],1) ){ m <- matrix(c(DF[i,]$V2, DF[i,]$V3, DF[i,]$V4, DF[i,]$V5), 2, 2) # if( (DF[i,]$V3 == 0) & (DF[i,]$V5 == 0) ){ # Pval[i] <- 0.0000000000000001 # }else{ Pval[i] <- fisher.test(m, alternative="greater")$p.value # } # if( (DF[i,]$V3 == 0) & (DF[i,]$V5 == 0) ){ # Pval[i] <- 0.0000000000000001 # }else{ # Pval[i] <- exact.test(m, alternative="greater", method="Boschloo", to.plot = FALSE)$p.value # } # Pval[i] <- barnardw.test(DF[i,]$V2, DF[i,]$V3, DF[i,]$V4, DF[i,]$V5)$p.value[1] } DF$Pval <- Pval DF <- DF[which(DF$Pval <= quantile(qvalue(p.adjust(Pval, method = "holm", n = length(Pval)), fdr.level=0.01)$qvalue, 0.05)),] # print.data.frame(DF, row.names=FALSE, col.names=FALSE) # write.table(DF[which(DF$Pval < 0.05),], Args[5], row.names=FALSE, col.names=FALSE, quote = FALSE) # write.table(DF[which(DF$Pval < 0.05),], "tmp/last.txt", row.names=FALSE, col.names=FALSE, quote = FALSE) write.table(DF, Args[5], row.names=FALSE, col.names=FALSE, quote = FALSE)
# make sure WT and KO results share the same rows (interacting pairs) library(stringr) # input arguments: # 1. path to the CPDB result directory # 2. character 'WT' or 'KO' args = commandArgs(trailingOnly=T) res_path = args[1] prefix = args[2] PN = c('L2_3_CPN_1','L2_3_CPN_2','L2_3_CPN_3','L2_3_CPN_4','L4_Stellate','L5_CPN_1','L5_CPN_2','L5_CStrPN','L5_NP','L5_PT','L6_CPN_1','L6_CPN_2','L6_CThPN_1','L6_CThPN_2','L6b_Subplate') if (prefix =='KO') { PN = c('L2_3_CPN_1','L2_3_CPN_2','L2_3_CPN_3','L2_3_CPN_4','L4_Stellate','L5_CPN_1','L6_CPN_1','L6_CPN_2','KO_Mismatch_1','KO_Mismatch_2','KO_Mismatch_3','KO_Mismatch_4') } MG = c('Homeostatic1', 'Homeostatic2') pval_file = file.path(res_path, "adjust_pvalues.txt") means_file = file.path(res_path, "means.txt") if (!all(sapply(c(pval_file, means_file), FUN=file.exists))) { print("check input file names.") q() } pval = read.table(pval_file, sep='\t', header=T) means = read.table(means_file, sep='\t', header=T) tab.pairs = expand.grid(MG, PN) pn.mg.pairs = c(paste0(tab.pairs[,1], '.', tab.pairs[,2]), paste0(tab.pairs[,2], '.', tab.pairs[,1])) stopifnot(all(sapply(pn.mg.pairs, FUN=function(x){x %in% colnames(pval)}))) info.cols = colnames(pval)[1:11] pval = pval[,c(info.cols, pn.mg.pairs)] means = means[,c(info.cols, pn.mg.pairs)] comb.pair = readRDS("combined_sig_pairs.rds") rows.use = match(comb.pair, pval$interacting_pair) # resulting matrices pval = pval[rows.use,] means = means[rows.use,] # save! write.table(pval, paste0(prefix, "_pvalues_for_comb.txt"), sep='\t', quote=F, row.names=F, col.names=T) write.table(means, paste0(prefix, "_means_for_comb.txt"), sep='\t', quote=F, row.names=F, col.names=T) print("DONE!")
/CellPhoneDB/match_interacting_pairs_for_comparison.R
no_license
kimkh415/MicrogliaLayers
R
false
false
1,701
r
# make sure WT and KO results share the same rows (interacting pairs) library(stringr) # input arguments: # 1. path to the CPDB result directory # 2. character 'WT' or 'KO' args = commandArgs(trailingOnly=T) res_path = args[1] prefix = args[2] PN = c('L2_3_CPN_1','L2_3_CPN_2','L2_3_CPN_3','L2_3_CPN_4','L4_Stellate','L5_CPN_1','L5_CPN_2','L5_CStrPN','L5_NP','L5_PT','L6_CPN_1','L6_CPN_2','L6_CThPN_1','L6_CThPN_2','L6b_Subplate') if (prefix =='KO') { PN = c('L2_3_CPN_1','L2_3_CPN_2','L2_3_CPN_3','L2_3_CPN_4','L4_Stellate','L5_CPN_1','L6_CPN_1','L6_CPN_2','KO_Mismatch_1','KO_Mismatch_2','KO_Mismatch_3','KO_Mismatch_4') } MG = c('Homeostatic1', 'Homeostatic2') pval_file = file.path(res_path, "adjust_pvalues.txt") means_file = file.path(res_path, "means.txt") if (!all(sapply(c(pval_file, means_file), FUN=file.exists))) { print("check input file names.") q() } pval = read.table(pval_file, sep='\t', header=T) means = read.table(means_file, sep='\t', header=T) tab.pairs = expand.grid(MG, PN) pn.mg.pairs = c(paste0(tab.pairs[,1], '.', tab.pairs[,2]), paste0(tab.pairs[,2], '.', tab.pairs[,1])) stopifnot(all(sapply(pn.mg.pairs, FUN=function(x){x %in% colnames(pval)}))) info.cols = colnames(pval)[1:11] pval = pval[,c(info.cols, pn.mg.pairs)] means = means[,c(info.cols, pn.mg.pairs)] comb.pair = readRDS("combined_sig_pairs.rds") rows.use = match(comb.pair, pval$interacting_pair) # resulting matrices pval = pval[rows.use,] means = means[rows.use,] # save! write.table(pval, paste0(prefix, "_pvalues_for_comb.txt"), sep='\t', quote=F, row.names=F, col.names=T) write.table(means, paste0(prefix, "_means_for_comb.txt"), sep='\t', quote=F, row.names=F, col.names=T) print("DONE!")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ReturnUniqueBases.R \name{ReturnUniqueBases} \alias{ReturnUniqueBases} \title{Return Unique Bases} \usage{ ReturnUniqueBases(SNP) } \arguments{ \item{SNP}{A single SNP} } \value{ Returns a character vector with base possibilities. } \description{ This function will return the unique nucleotide bases of an SNP accounting for an ambiguity code } \examples{ ReturnUniqueBases(c("A", "A", "R")) data(fakeData) fakeData <- SplitSNP(fakeData) ReturnUniqueBases(fakeData[,1]) ReturnUniqueBases(fakeData[,12]) } \seealso{ \link{ReadSNP} \link{WriteSNP} \link{ReturnNucs} }
/man/ReturnUniqueBases.Rd
no_license
bbanbury/phrynomics
R
false
true
646
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ReturnUniqueBases.R \name{ReturnUniqueBases} \alias{ReturnUniqueBases} \title{Return Unique Bases} \usage{ ReturnUniqueBases(SNP) } \arguments{ \item{SNP}{A single SNP} } \value{ Returns a character vector with base possibilities. } \description{ This function will return the unique nucleotide bases of an SNP accounting for an ambiguity code } \examples{ ReturnUniqueBases(c("A", "A", "R")) data(fakeData) fakeData <- SplitSNP(fakeData) ReturnUniqueBases(fakeData[,1]) ReturnUniqueBases(fakeData[,12]) } \seealso{ \link{ReadSNP} \link{WriteSNP} \link{ReturnNucs} }
# script to try and untangle how each vital rate affects R0 library(dplyr) library(tidyr) library(rstan) library(arrayhelpers) library(grid) library(gridExtra) library(ggplot2) library(cowplot) library(lme4) library(lmtest) library(optimx) library(MuMIn) library(here) # used to construct a path to you local version of the scripts # build the file path form the local root, may need to modify if repo is saved in a different directory # This will build /home/some_user_account/spatial_dem_perf dir_path = here('spatial_dem_perf') setwd(dir_path) vr_loc_gen = read.csv('vr_loc_gen_postburn.csv', header = TRUE, stringsAsFactors = FALSE) source('dist_neigh_setup.R') source('model_perf_output_helper.R') source('demographic_perf.R') # First step is to see how the spatial efect for each vital rate correlates with each other to get # a sense of what is going on. # this will take some data massaging to work out which knot point is in which gap, and match up knot # locations between the vital rates, since each vital rate has a slightly differnt data set, and so # different knot locations # get the data sets used to fit the model sur_dat = read.csv('sur_loc_gen_postburn.csv', header = TRUE, stringsAsFactors = FALSE) rep_dat = read.csv('rep_loc_gen_postburn.csv', header = TRUE, stringsAsFactors = FALSE) gr_dat = read.csv('gr_loc_gen_postburn.csv', header = TRUE, stringsAsFactors = FALSE) ob_name = load('R_E_samp_df.Rdata') # survival ob_name = load('SPP_NK_sur_stan.Rdata') SPP_sur = SPP_NK_sur_stan # reproduction ob_name = load('SPP_NK_rep_stan_ap.Rdata') SPP_rep = SPP_NK_rep_stan # growth ob_name = load('SPP_NK_gr_stan.Rdata') SPP_gr = SPP_gr_stan # fruit production ob_name = load('fruit_num_stan.Rdata') fruit_stan = fruit_num_stan # pull the spp from the models sur_spp = apply(extract_flat(SPP_sur, pars = c('spp')), MARGIN = 2, FUN = quantile, probs = c(0.025, 0.5, 0.975)) sur_kl = knot_coords2(dat_x = sur_dat$X, dat_y = sur_dat$Y, min_dist = 0.5) rep_spp = apply(extract_flat(SPP_rep, pars = c('spp')), MARGIN = 2, FUN = quantile, probs = c(0.025, 0.5, 0.975)) rep_kl = knot_coords2(dat_x = rep_dat$X, dat_y = rep_dat$Y, min_dist = 0.5) gr_spp = apply(extract_flat(SPP_gr, pars = c('spp')), MARGIN = 2, FUN = quantile, probs = c(0.025, 0.5, 0.975)) gr_kl = knot_coords2(dat_x = gr_dat$X, dat_y = gr_dat$Y, min_dist = 0.5) # find the knots common to all vital rates, and ensure they get the right knot index sur_kl_df = data.frame(X = round(sur_kl[, 1], 3), Y = round(sur_kl[, 2], 3)) sur_kl_df = mutate(sur_kl_df, locID = paste0(X, ':', Y)) sur_kl_df$rowID = 1:length(sur_kl_df$X) rep_kl_df = data.frame(X = rep_kl[, 1], Y = rep_kl[, 2]) rep_kl_df = mutate(rep_kl_df, locID = paste0(X, ':', Y)) rep_kl_df$rowID = 1:length(rep_kl_df$X) gr_kl_df = data.frame(X = gr_kl[, 1], Y = gr_kl[, 2]) gr_kl_df = mutate(gr_kl_df, locID = paste0(X, ':', Y)) gr_kl_df$rowID = 1:length(gr_kl_df$X) # associate a gap ID to each knot location sur_dat$gapID = sapply(sur_dat$uID, FUN = function(x) strsplit(x, split = ':')[[1]][1]) rep_dat$gapID = sapply(rep_dat$uID, FUN = function(x) strsplit(x, split = ':')[[1]][1]) gr_dat$gapID = sapply(gr_dat$uID, FUN = function(x) strsplit(x, split = ':')[[1]][1]) sur_kl_df$gapID = NA for(i in 1:length(sur_kl_df$X)){ gap = unique(sur_dat$gapID[find_near(sur_kl_df$X[i], sur_kl_df$Y[i], sur_dat$X, sur_dat$Y, dist = 0.25)]) sur_kl_df$gapID[i] = ifelse(length(gap) > 0, gap, NA) } rep_kl_df$gapID = NA for(i in 1:length(rep_kl_df$X)){ gap = unique(rep_dat$gapID[find_near(rep_kl_df$X[i], rep_kl_df$Y[i], rep_dat$X, rep_dat$Y, dist = 0.25)]) rep_kl_df$gapID[i] = ifelse(length(gap) > 0, gap, NA) } gr_kl_df$gapID = NA for(i in 1:length(gr_kl_df$X)){ gap = unique(gr_dat$gapID[find_near(gr_kl_df$X[i], gr_kl_df$Y[i], gr_dat$X, gr_dat$Y, dist = 0.25)]) gr_kl_df$gapID[i] = ifelse(length(gap) > 0, gap, NA) } # build a common set of knot locations by taking vital rate with least number of # knots (gr_kl_df), then finding all the knot locatons in the other 2 that are within # 25 cm of the knot locations in gr_kl, and add those indexes to a key kl_common = list() count = 1 for(i in 1:length(gr_kl_df$X)){ sur_inds = find_near(gr_kl_df$X[i], gr_kl_df$Y[i], sur_kl_df$X, sur_kl_df$Y, 0.25) rep_inds = find_near(gr_kl_df$X[i], gr_kl_df$Y[i], rep_kl_df$X, rep_kl_df$Y, 0.25) if(length(sur_inds) > 0 & length(rep_inds) > 0 ){ kl_common[[count]] = list(gr_ind = i, sur_ind = sur_inds, rep_ind = rep_inds) count = count + 1 } } # make pair wise knot location index key for survial and reproduction kl_sur_rep_key = list() count = 1 for(i in 1:length(sur_kl_df$X)){ rep_inds = find_near(sur_kl_df$X[i], sur_kl_df$Y[i], rep_kl_df$X, rep_kl_df$Y, 0.25) if(length(rep_inds) > 0){ kl_sur_rep_key[[count]] = list(sur_ind = i, rep_ind = rep_inds) count = count + 1 } } # make data frame with knot locations, gap ID and spatial effect for survival and reproduction sur_rep_kl_df = sur_kl_df sur_rep_kl_df$sur_medGPP = NA sur_rep_kl_df$sur_lqGPP = NA sur_rep_kl_df$sur_uqGPP = NA sur_rep_kl_df$rep_medGPP = NA sur_rep_kl_df$rep_lqGPP = NA sur_rep_kl_df$rep_uqGPP = NA for(i in 1:length(kl_sur_rep_key)){ sur_rep_kl_df[i, c('sur_lqGPP', 'sur_medGPP', 'sur_uqGPP')] = sur_spp[, kl_sur_rep_key[[i]]$sur_ind] sur_rep_kl_df[i, c('rep_lqGPP', 'rep_medGPP', 'rep_uqGPP')] = rep_spp[, kl_sur_rep_key[[i]]$rep_ind] } # make data frame with knot locations, gap ID and spatial effect for growth survival and reproduction, # and estimate of fruit_prod vr_kl_df = data.frame(row_ID = 1:length(kl_common), gapID = NA, X = NA, Y = NA, sur_medGPP = NA, sur_lqGPP = NA, sur_uqGPP = NA, rep_medGPP = NA, rep_lqGPP = NA, rep_uqGPP = NA, gr_medGPP = NA, gr_lqGPP = NA, gr_uqGPP = NA, med_E_Rf = NA, lq_E_Rf = NA, uq_E_Rf = NA) for(i in 1:length(kl_common)){ vr_kl_df$gapID[i] = gr_kl_df$gapID[kl_common[[i]]$gr_ind] vr_kl_df$X[i] = gr_kl_df$X[kl_common[[i]]$gr_ind] vr_kl_df$Y[i] = gr_kl_df$Y[kl_common[[i]]$gr_ind] vr_kl_df[i, c('sur_lqGPP', 'sur_medGPP', 'sur_uqGPP')] = sur_spp[, kl_common[[i]]$sur_ind] vr_kl_df[i, c('rep_lqGPP', 'rep_medGPP', 'rep_uqGPP')] = rep_spp[, kl_common[[i]]$rep_ind] vr_kl_df[i, c('gr_lqGPP', 'gr_medGPP', 'gr_uqGPP')] = gr_spp[, kl_common[[i]]$gr_ind] vr_kl_df[i, c('med_E_Rf', 'lq_E_Rf', 'uq_E_Rf')] = fn_df[i, c('median', 'lq', 'uq')] } #################################################################################################### # now I have the data all set up I can start plotting plot(vr_kl_df[, c('sur_medGPP', 'rep_medGPP', 'gr_medGPP', 'med_E_Rf')]) # look at each vital rate correlated with each other, also look within each patch ggplot(vr_kl_df, aes(sur_medGPP, rep_medGPP, colour = gapID)) + geom_point() + facet_wrap(~ gapID, nrow = 6) # make a reduced data set with only those data points in patches shared by more than three locations gap_groups = group_by(vr_kl_df, gapID) gap_counts = summarize(gap_groups, count = n(), cor_sur_rep = cor(sur_medGPP, rep_medGPP), cor_sur_gr = cor(sur_medGPP, gr_medGPP), cor_rep_gr = cor(rep_medGPP, gr_medGPP)) gaps_n4 = gap_counts$gapID[gap_counts$count > 2] vr_kl_redu = filter(vr_kl_df, gapID %in% gaps_n4) vr_kl_n4 = filter(gap_counts, gapID %in% gaps_n4) pdf(file = 'gap_vr_correlation.pdf', width = 15, height = 15) ggplot(vr_kl_redu, aes(sur_medGPP, rep_medGPP, colour = gapID)) + geom_point() + annotate('text', label = round(cor(vr_kl_redu$sur_medGPP, vr_kl_redu$rep_medGPP), 2), x = -2.5, y = -2.5) ggplot(vr_kl_redu, aes(sur_medGPP, rep_medGPP)) + geom_point() + geom_text(data = vr_kl_n4, aes(label = round(cor_sur_rep, 2), x = 1, y = 1)) + facet_wrap(~ gapID, nrow = 6) ggplot(vr_kl_redu, aes(sur_medGPP, gr_medGPP, colour = gapID)) + geom_point() + annotate('text', label = round(cor(vr_kl_redu$sur_medGPP, vr_kl_redu$gr_medGPP), 2), x = -2.5, y = -2.5) ggplot(vr_kl_redu, aes(sur_medGPP, gr_medGPP)) + geom_point() + geom_text(data = vr_kl_n4, aes(label = round(cor_sur_gr, 2), x = 1, y = 1)) + facet_wrap(~ gapID, nrow = 6) ggplot(vr_kl_redu, aes(rep_medGPP, gr_medGPP, colour = gapID)) + geom_point() + annotate('text', label = round(cor(vr_kl_redu$rep_medGPP, vr_kl_redu$gr_medGPP), 2), x = -2.5, y = -2.5) ggplot(vr_kl_redu, aes(rep_medGPP, gr_medGPP)) + geom_point() + geom_text(data = vr_kl_n4, aes(label = round(cor_rep_gr, 2), x = 1, y = 1)) + facet_wrap(~ gapID, nrow = 6) dev.off() # plot these same vital rates against fruit production ggplot(vr_kl_redu, aes(sur_medGPP, med_E_Rf)) + geom_point() + facet_wrap(~ gapID, ncol = 4) ggplot(vr_kl_redu, aes(rep_medGPP, med_E_Rf)) + geom_point() + facet_wrap(~ gapID, ncol = 4) ggplot(vr_kl_redu, aes(gr_medGPP, med_E_Rf)) + geom_point() + facet_wrap(~ gapID, ncol = 4) # try and see how much variance in R0 is explained by the spatial effect of each vitial rate # (only part of the R0 equation that can change). Rf_full = lmer(med_E_Rf ~ sur_medGPP + rep_medGPP + gr_medGPP + (sur_medGPP + rep_medGPP + gr_medGPP | gapID), data = vr_kl_redu, REML = TRUE) # think about how to work out how much variance explained with just pairs, might not be doable # try taking patches with 3 locations see if that helps # try take out the random effects Rf_rand_int = lmer(med_E_Rf ~ sur_medGPP + rep_medGPP + gr_medGPP + (1 | gapID), data = vr_kl_redu, REML = TRUE) lrtest(Rf_full, Rf_rand_int) # cannot take out random effect summary(Rf_full) ranef(Rf_full) # take out each vr to see how much R squared ## two tasks, first calculate R^2 for full model and then models without each vital rate to assess how much spatial effect of each vr ## has on R0. Second redo but with data generated under the null model that in each location vital rates are independent (that is no trade-offs). ## If a vital rate explains very little of the variation in the observed, and more under the null, then it is evidence that in the observed ## data there are spatial tradeoffs, becuase if all vr were independent they would be less correlated # I need to re-implement a bit of stuff so a lot of prep shit first to get the right data and some constants from the data burn_dat = read.csv('hcdem_ABS_2015_checked.csv') fruit_ht_dat = select(burn_dat, year, burn_yr, site, gap, tag, ht94, ht95, ht96, ht97, ht98, ht99, ht00, ht01, ht02, ht03) fruit_rep_dat = select(burn_dat, year, site, gap, tag, rep94, rep95, rep96, rep97, rep98, rep99, rep00, rep01, rep02, rep03) ht_long = gather(fruit_ht_dat, ht_lab, height, ht94:ht03) rep_long = gather(fruit_rep_dat, rep_lab, fruit_num, rep94:rep03) # drop NA's ht_long = ht_long[ht_long$height != '#NULL!', ] rep_long = rep_long[rep_long$fruit_num != '#NULL!', ] # make the year labels numeric ht_long$m_year = sapply(ht_long$ht_lab, FUN = function(x){ a = as.numeric(strsplit(x, split = 'ht')[[1]][2]) if(a >= 50) return(1900 + a) else return(2000 + a) }) rep_long$m_year = sapply(rep_long$rep_lab, FUN = function(x){ a = as.numeric(strsplit(x, split = 'rep')[[1]][2]) if(a >= 50) return(1900 + a) else return(2000 + a) }) # add some columns for ID ht_long = mutate(ht_long, ID = paste0(site, ':', gap, ':', tag), join_ID = paste0(m_year, ':', site, ':', gap, ':', tag), height = as.numeric(height)) rep_long = mutate(rep_long, ID = paste0(site, ':', gap, ':', tag), join_ID = paste0(m_year, ':', site, ':', gap, ':', tag), fruit_num = as.numeric(fruit_num)) # merg the data frames fruit_dat = inner_join(ht_long, rep_long, by = 'join_ID') fruit_dat = select(fruit_dat, burn_yr = burn_yr, site = site.x, ID = ID.x, join_ID = join_ID, m_year = m_year.x, height = height, fruit_num = fruit_num) # only take the rows with fruit number greater than 1, as these are counts of fruits fruit_dat = filter(fruit_dat, fruit_num >= 2) # get time since fire for each observation fruit_dat = mutate(fruit_dat, time_since_fire = m_year - burn_yr, site = as.character(site)) mean_height = mean(fruit_dat$height) site_num = 2 # get the rep data to get mean height from this data for the centering # get rep data vr_loc_gen = read.csv('vr_loc_gen_postburn.csv', header = TRUE, stringsAsFactor = FALSE) vr_loc_gen = vr_loc_gen[!is.na(vr_loc_gen$X), ] # find the mean height for centering rep_dat = vr_loc_gen[!is.na(vr_loc_gen$rep), c('uID', 'uLoc', 'rep', 'year', 'height', 'X', 'Y')] rep_dat = rep_dat[!is.na(rep_dat$height),] rep_dat = rep_dat[rep_dat$rep <= 1, ] rep_mean_height = mean(rep_dat$height) # find the mean height for centering survival sur_dat = vr_loc_gen[!is.na(vr_loc_gen$sur), c('uID', 'uLoc', 'year','sur', 'height', 'height_prev', 'X', 'Y')] # neighbour data we can use all data, even the first years observed height neigh_dat = sur_dat # take out data from the first year observed since nothing can be observed dead in the first year first_year = sapply(seq_along(sur_dat$year), FUN = function(x){ min_year_group = min(sur_dat$year[sur_dat$uID == sur_dat$uID[x]]) return(ifelse(sur_dat$year[x] == min_year_group, FALSE, TRUE)) }) sur_dat = sur_dat[first_year, ] sur_dat = sur_dat[!is.na(sur_dat$height_prev), ] sur_mean_height = mean(sur_dat$height_prev) # first step create matrix of observed R0 from the stan objects # I will have to re-implement some fo these calculations to do this dz = 1 Z = seq(0, max(rep_dat$height) * 1.3, dz) num_samps = 500 # get intial distribution gr_dat_firt = filter(gr_dat, year == 2003) z0_den = density(gr_dat_firt$height_prev) # look at log normal distribution z0_mean = mean(log(gr_dat_firt$height_prev)) z0_sd = sd(log(gr_dat_firt$height_prev)) RJJ_space = matrix(NA, nrow = length(kl_common), ncol = num_samps) sur_space = matrix(NA, nrow = length(kl_common), ncol = num_samps) rep_space = matrix(NA, nrow = length(kl_common), ncol = num_samps) gr_space = matrix(NA, nrow = length(kl_common), ncol = num_samps) for(i in 1:length(kl_common)){ print(i) out_ob = R_E_samp(SPP_sur, SPP_gr, SPP_rep, fruit_stan, Z, dz, z0_mean, z0_sd, mean_height, rep_mean_height, sur_mean_height, i, kl_common, num_samps) RJJ_space[i, ] = out_ob$RJJ sur_space[i, ] = out_ob$sur_spp rep_space[i, ] = out_ob$rep_spp gr_space[i, ] = out_ob$gr_spp } RJJ_space_list = list(RJJ = RJJ_space, sur_spp = sur_space, rep_spp = rep_space, gr_spp = gr_space) # this takes ages to produce so save for future used save(RJJ_space_list, file = 'R0_space_dist_list.Rdata') #space_ob_name = load('R0_space_dist_list.Rdata') # randomization over parameter uncertianty under the null distribution where vital rates are not associated with a location num_rands = num_samps * length(kl_common) RJJ_null = R_E_null(SPP_sur, SPP_gr, SPP_rep, fruit_stan, Z, dz, z0_mean, z0_sd, mean_height, rep_mean_height, sur_mean_height, kl_common, num_rands, num_samps) # this takes ages to produce so save for future used save(RJJ_null, file = 'R0_null_dist_list.Rdata') #null_ob_name = load('R0_null_dist_list.Rdata') # create a relization of a null distribution using same shuffled knot for all re-sampels # within a location RJJ_null_struct = R_E_null_struct(SPP_sur, SPP_gr, SPP_rep, fruit_stan, Z, dz, z0_mean, z0_sd, mean_height, rep_mean_height, sur_mean_height, kl_common, num_samps) # this takes ages to produce so save for future used save(RJJ_null_struct, file = 'R0_null_stru_dist_list.Rdata') #null_ob_name = load('R0_null_stru_dist_list.Rdata') #Find the shift in the spatial distribuition between observed and the structured null # first make a simple histagram to just see how the two distribtuions look over all # turn the dists of Rjj over all space and under the null dist into a data frame hist_df = data.frame(model = rep(c('spatially structured', 'null model'), each = length(RJJ_space_list$RJJ)), RJJ = c(as.numeric(RJJ_space_list$RJJ), as.numeric(RJJ_null$RJJ))) pdf(file = 'RJJ_hist_space_V_null.pdf') ggplot(hist_df, aes(RJJ, color = model, fill = model)) + geom_density(alpha = 0.2) + xlim(0, 750) + theme(legend.position = c(0.8, 0.5)) dev.off() # also make a plot where we take the median for each location, so see spread over space med_RJJ_df = data.frame(model = rep(c('spatially structured', 'null'), each = dim(RJJ_space_list$RJJ)[1]), med_RJJ = c(apply(RJJ_space_list$RJJ, MARGIN = 1, FUN = median), apply(RJJ_null$RJJ, MARGIN = 1, FUN = median))) pdf(file = 'RJJ_den_med_loc_space_v_null.pdf') ggplot(med_RJJ_df, aes(med_RJJ, color = model, fill = model)) + geom_density(alpha = 0.2) + xlim(0, 500) + theme(legend.position = c(0.8, 0.5)) dev.off() # now make a plot for the different R^2 # first set up the matricies so the rows only refer to locations in gaps that have 3 or more locations gaps_n3 = gap_counts$gapID[gap_counts$count > 2] gaps_n3_test = vr_kl_df$gapID %in% gaps_n3 RJJ_space_n3 = list(RJJ = RJJ_space_list$RJJ[gaps_n3_test, ], sur_spp = RJJ_space_list$sur_spp[gaps_n3_test, ], rep_spp = RJJ_space_list$rep_spp[gaps_n3_test, ], gr_spp = RJJ_space_list$gr_spp[gaps_n3_test, ]) RJJ_null_n3 = list(RJJ = RJJ_null2$RJJ[gaps_n3_test, ], sur_spp = RJJ_null2$sur_spp[gaps_n3_test, ], rep_spp = RJJ_null2$rep_spp[gaps_n3_test, ], gr_spp = RJJ_null2$gr_spp[gaps_n3_test, ]) # set up the different models form_full = 'RJJ ~ sur_spp + rep_spp + gr_spp + (sur_spp + rep_spp + gr_spp | gapID)' form_sur_rep = 'RJJ ~ sur_spp + rep_spp + (sur_spp + rep_spp | gapID)' form_sur_gr = 'RJJ ~ sur_spp + gr_spp + (sur_spp + gr_spp | gapID)' form_rep_gr = 'RJJ ~ rep_spp + gr_spp + (rep_spp + gr_spp | gapID)' # fit model to each sample in posterior and put in list full_mod_list = lmer_fitter(form_full, RJJ_space_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(full_mod_list, file = 'RJJ_model_list_space.Rdata') full_mod_list_null = lmer_fitter(form_full, RJJ_null_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(full_mod_list_null, file = 'RJJ_model_list_null.Rdata') sur_rep_mod_list = lmer_fitter(form_sur_rep, RJJ_space_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(sur_rep_mod_list, file = 'RJJ_sur_rep_model_list_space.Rdata') sur_rep_mod_list_null = lmer_fitter(form_sur_rep, RJJ_null_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(sur_rep_mod_list_null, file = 'RJJ_sur_rep_model_list_null.Rdata') sur_gr_mod_list = lmer_fitter(form_sur_gr, RJJ_space_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(sur_gr_mod_list, file = 'RJJ_sur_gr_model_list_space.Rdata') sur_gr_mod_list_null = lmer_fitter(form_sur_gr, RJJ_null_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(sur_gr_mod_list_null, file = 'RJJ_sur_gr_model_list_null.Rdata') rep_gr_mod_list = lmer_fitter(form_rep_gr, RJJ_space_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(rep_gr_mod_list, file = 'RJJ_rep_gr_model_list_space.Rdata') rep_gr_mod_list_null = lmer_fitter(form_rep_gr, RJJ_null_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(rep_gr_mod_list_null, file = 'RJJ_rep_gr_model_list_null.Rdata') num_samps = 500 # make a violin plot of the R^2 for each model to see how much each of the fixed effects are explaining Rsq_full_space = sapply(full_mod_list, FUN = r.squaredGLMM) Rsq_full_null = sapply(full_mod_list_null, FUN = r.squaredGLMM) Rsq_sur_rep_space = sapply(sur_rep_mod_list, FUN = r.squaredGLMM) Rsq_sur_rep_null = sapply(sur_rep_mod_list_null, FUN = r.squaredGLMM) Rsq_sur_gr_space = sapply(sur_gr_mod_list, FUN = r.squaredGLMM) Rsq_sur_gr_null = sapply(sur_gr_mod_list_null, FUN = r.squaredGLMM) Rsq_rep_gr_space = sapply(rep_gr_mod_list, FUN = r.squaredGLMM) Rsq_rep_gr_null = sapply(rep_gr_mod_list_null, FUN = r.squaredGLMM) Rsq_mar_df = data.frame(struct = rep(c('spatial', 'null'), each = num_samps * 4), model = rep(rep(c('sur + rep + gro', 'sur + rep', 'sur + gro', 'rep + gro'), each = num_samps), times = 2), Rsq = c(Rsq_full_space[1, ], Rsq_sur_rep_space[1, ], Rsq_sur_gr_space[1, ], Rsq_rep_gr_space[1, ], Rsq_full_null[1, ], Rsq_sur_rep_null[1, ], Rsq_sur_gr_null[1, ], Rsq_rep_gr_null[1, ])) Rsq_con_df = data.frame(struct = rep(c('spatial', 'null'), each = num_samps * 4), model = rep(rep(c('full model', 'growth', 'flowering prob.', 'survival'), each = num_samps), times = 2), Rsq = c(Rsq_full_space[2, ], Rsq_sur_rep_space[2, ], Rsq_sur_gr_space[2, ], Rsq_rep_gr_space[2, ], Rsq_full_null[2, ], Rsq_sur_rep_null[2, ], Rsq_sur_gr_null[1, ], Rsq_rep_gr_null[2, ])) Rsq_con_groups = group_by(Rsq_con_df, struct, model) Rsq_con_shift = summarize(Rsq_con_groups, count = n(), max_Rsq = max(Rsq), min_Rsq = min(Rsq)) #calculate the shifts dx = 0.02 X = seq(0, 1, dx) drop_rep_space_shift = dist_shift(Rsq_full_space[2, ], Rsq_sur_gr_space[2, ], X, dx) drop_sur_space_shift = dist_shift(Rsq_full_space[2, ], Rsq_rep_gr_space[2, ], X, dx) drop_gr_space_shift = dist_shift(Rsq_full_space[2, ], Rsq_sur_rep_space[2, ], X, dx) drop_rep_null_shift = dist_shift(Rsq_full_null[2, ], Rsq_sur_gr_null[2, ], X, dx) drop_sur_null_shift = dist_shift(Rsq_full_null[2, ], Rsq_rep_gr_null[2, ], X, dx) drop_gr_null_shift = dist_shift(Rsq_full_null[2, ], Rsq_sur_rep_null[2, ], X, dx) Rsq_con_shift$shift = c(drop_rep_null_shift, NA, drop_gr_null_shift, drop_sur_null_shift, drop_rep_space_shift, NA, drop_gr_space_shift, drop_sur_space_shift) # set up dummy data frame for annotation ann_df = data.frame(struct = factor(c('null', 'null'), levels = c('spatial', 'null')), model = c(1.75), Rsq = c(0.865), ann_text = c("''*R[shift]^2*' :'")) # make the violin plots pdf(file = 'var_Rf_explained.pdf', width = 12, height = 10) ggplot(Rsq_con_df, aes(model, Rsq, colour = struct, fill = struct)) + geom_violin(alpha = 0.3) + ylim(0, 1) + scale_x_discrete(limits = c('full model', 'flowering prob.', 'survival', 'growth'), labels = c('', '', '', '')) + theme(legend.position = 'none', axis.title = element_text(size = 20), axis.text = element_text(size = 15), strip.text = element_text(size = 20)) + labs(x = 'vital rate dropped', y = bquote(''*R^2*'')) + geom_text(data = Rsq_con_shift, aes(x = model, y = max_Rsq + 0.015, label = round(shift, 3)), inherit.aes = FALSE, size = 5) + geom_text(data = Rsq_con_shift, aes(x = model, y = min_Rsq - 0.02, label = model), inherit.aes = FALSE, size = 5) + facet_wrap(~ struct) + geom_text(data = ann_df, aes(x = model, y = Rsq, label = ann_text), inherit.aes = FALSE, size = 5, parse = TRUE, hjust = 1) dev.off()
/vr_R0_relationship.R
permissive
ShaunCoutts/spatial_demogrpahy
R
false
false
22,944
r
# script to try and untangle how each vital rate affects R0 library(dplyr) library(tidyr) library(rstan) library(arrayhelpers) library(grid) library(gridExtra) library(ggplot2) library(cowplot) library(lme4) library(lmtest) library(optimx) library(MuMIn) library(here) # used to construct a path to you local version of the scripts # build the file path form the local root, may need to modify if repo is saved in a different directory # This will build /home/some_user_account/spatial_dem_perf dir_path = here('spatial_dem_perf') setwd(dir_path) vr_loc_gen = read.csv('vr_loc_gen_postburn.csv', header = TRUE, stringsAsFactors = FALSE) source('dist_neigh_setup.R') source('model_perf_output_helper.R') source('demographic_perf.R') # First step is to see how the spatial efect for each vital rate correlates with each other to get # a sense of what is going on. # this will take some data massaging to work out which knot point is in which gap, and match up knot # locations between the vital rates, since each vital rate has a slightly differnt data set, and so # different knot locations # get the data sets used to fit the model sur_dat = read.csv('sur_loc_gen_postburn.csv', header = TRUE, stringsAsFactors = FALSE) rep_dat = read.csv('rep_loc_gen_postburn.csv', header = TRUE, stringsAsFactors = FALSE) gr_dat = read.csv('gr_loc_gen_postburn.csv', header = TRUE, stringsAsFactors = FALSE) ob_name = load('R_E_samp_df.Rdata') # survival ob_name = load('SPP_NK_sur_stan.Rdata') SPP_sur = SPP_NK_sur_stan # reproduction ob_name = load('SPP_NK_rep_stan_ap.Rdata') SPP_rep = SPP_NK_rep_stan # growth ob_name = load('SPP_NK_gr_stan.Rdata') SPP_gr = SPP_gr_stan # fruit production ob_name = load('fruit_num_stan.Rdata') fruit_stan = fruit_num_stan # pull the spp from the models sur_spp = apply(extract_flat(SPP_sur, pars = c('spp')), MARGIN = 2, FUN = quantile, probs = c(0.025, 0.5, 0.975)) sur_kl = knot_coords2(dat_x = sur_dat$X, dat_y = sur_dat$Y, min_dist = 0.5) rep_spp = apply(extract_flat(SPP_rep, pars = c('spp')), MARGIN = 2, FUN = quantile, probs = c(0.025, 0.5, 0.975)) rep_kl = knot_coords2(dat_x = rep_dat$X, dat_y = rep_dat$Y, min_dist = 0.5) gr_spp = apply(extract_flat(SPP_gr, pars = c('spp')), MARGIN = 2, FUN = quantile, probs = c(0.025, 0.5, 0.975)) gr_kl = knot_coords2(dat_x = gr_dat$X, dat_y = gr_dat$Y, min_dist = 0.5) # find the knots common to all vital rates, and ensure they get the right knot index sur_kl_df = data.frame(X = round(sur_kl[, 1], 3), Y = round(sur_kl[, 2], 3)) sur_kl_df = mutate(sur_kl_df, locID = paste0(X, ':', Y)) sur_kl_df$rowID = 1:length(sur_kl_df$X) rep_kl_df = data.frame(X = rep_kl[, 1], Y = rep_kl[, 2]) rep_kl_df = mutate(rep_kl_df, locID = paste0(X, ':', Y)) rep_kl_df$rowID = 1:length(rep_kl_df$X) gr_kl_df = data.frame(X = gr_kl[, 1], Y = gr_kl[, 2]) gr_kl_df = mutate(gr_kl_df, locID = paste0(X, ':', Y)) gr_kl_df$rowID = 1:length(gr_kl_df$X) # associate a gap ID to each knot location sur_dat$gapID = sapply(sur_dat$uID, FUN = function(x) strsplit(x, split = ':')[[1]][1]) rep_dat$gapID = sapply(rep_dat$uID, FUN = function(x) strsplit(x, split = ':')[[1]][1]) gr_dat$gapID = sapply(gr_dat$uID, FUN = function(x) strsplit(x, split = ':')[[1]][1]) sur_kl_df$gapID = NA for(i in 1:length(sur_kl_df$X)){ gap = unique(sur_dat$gapID[find_near(sur_kl_df$X[i], sur_kl_df$Y[i], sur_dat$X, sur_dat$Y, dist = 0.25)]) sur_kl_df$gapID[i] = ifelse(length(gap) > 0, gap, NA) } rep_kl_df$gapID = NA for(i in 1:length(rep_kl_df$X)){ gap = unique(rep_dat$gapID[find_near(rep_kl_df$X[i], rep_kl_df$Y[i], rep_dat$X, rep_dat$Y, dist = 0.25)]) rep_kl_df$gapID[i] = ifelse(length(gap) > 0, gap, NA) } gr_kl_df$gapID = NA for(i in 1:length(gr_kl_df$X)){ gap = unique(gr_dat$gapID[find_near(gr_kl_df$X[i], gr_kl_df$Y[i], gr_dat$X, gr_dat$Y, dist = 0.25)]) gr_kl_df$gapID[i] = ifelse(length(gap) > 0, gap, NA) } # build a common set of knot locations by taking vital rate with least number of # knots (gr_kl_df), then finding all the knot locatons in the other 2 that are within # 25 cm of the knot locations in gr_kl, and add those indexes to a key kl_common = list() count = 1 for(i in 1:length(gr_kl_df$X)){ sur_inds = find_near(gr_kl_df$X[i], gr_kl_df$Y[i], sur_kl_df$X, sur_kl_df$Y, 0.25) rep_inds = find_near(gr_kl_df$X[i], gr_kl_df$Y[i], rep_kl_df$X, rep_kl_df$Y, 0.25) if(length(sur_inds) > 0 & length(rep_inds) > 0 ){ kl_common[[count]] = list(gr_ind = i, sur_ind = sur_inds, rep_ind = rep_inds) count = count + 1 } } # make pair wise knot location index key for survial and reproduction kl_sur_rep_key = list() count = 1 for(i in 1:length(sur_kl_df$X)){ rep_inds = find_near(sur_kl_df$X[i], sur_kl_df$Y[i], rep_kl_df$X, rep_kl_df$Y, 0.25) if(length(rep_inds) > 0){ kl_sur_rep_key[[count]] = list(sur_ind = i, rep_ind = rep_inds) count = count + 1 } } # make data frame with knot locations, gap ID and spatial effect for survival and reproduction sur_rep_kl_df = sur_kl_df sur_rep_kl_df$sur_medGPP = NA sur_rep_kl_df$sur_lqGPP = NA sur_rep_kl_df$sur_uqGPP = NA sur_rep_kl_df$rep_medGPP = NA sur_rep_kl_df$rep_lqGPP = NA sur_rep_kl_df$rep_uqGPP = NA for(i in 1:length(kl_sur_rep_key)){ sur_rep_kl_df[i, c('sur_lqGPP', 'sur_medGPP', 'sur_uqGPP')] = sur_spp[, kl_sur_rep_key[[i]]$sur_ind] sur_rep_kl_df[i, c('rep_lqGPP', 'rep_medGPP', 'rep_uqGPP')] = rep_spp[, kl_sur_rep_key[[i]]$rep_ind] } # make data frame with knot locations, gap ID and spatial effect for growth survival and reproduction, # and estimate of fruit_prod vr_kl_df = data.frame(row_ID = 1:length(kl_common), gapID = NA, X = NA, Y = NA, sur_medGPP = NA, sur_lqGPP = NA, sur_uqGPP = NA, rep_medGPP = NA, rep_lqGPP = NA, rep_uqGPP = NA, gr_medGPP = NA, gr_lqGPP = NA, gr_uqGPP = NA, med_E_Rf = NA, lq_E_Rf = NA, uq_E_Rf = NA) for(i in 1:length(kl_common)){ vr_kl_df$gapID[i] = gr_kl_df$gapID[kl_common[[i]]$gr_ind] vr_kl_df$X[i] = gr_kl_df$X[kl_common[[i]]$gr_ind] vr_kl_df$Y[i] = gr_kl_df$Y[kl_common[[i]]$gr_ind] vr_kl_df[i, c('sur_lqGPP', 'sur_medGPP', 'sur_uqGPP')] = sur_spp[, kl_common[[i]]$sur_ind] vr_kl_df[i, c('rep_lqGPP', 'rep_medGPP', 'rep_uqGPP')] = rep_spp[, kl_common[[i]]$rep_ind] vr_kl_df[i, c('gr_lqGPP', 'gr_medGPP', 'gr_uqGPP')] = gr_spp[, kl_common[[i]]$gr_ind] vr_kl_df[i, c('med_E_Rf', 'lq_E_Rf', 'uq_E_Rf')] = fn_df[i, c('median', 'lq', 'uq')] } #################################################################################################### # now I have the data all set up I can start plotting plot(vr_kl_df[, c('sur_medGPP', 'rep_medGPP', 'gr_medGPP', 'med_E_Rf')]) # look at each vital rate correlated with each other, also look within each patch ggplot(vr_kl_df, aes(sur_medGPP, rep_medGPP, colour = gapID)) + geom_point() + facet_wrap(~ gapID, nrow = 6) # make a reduced data set with only those data points in patches shared by more than three locations gap_groups = group_by(vr_kl_df, gapID) gap_counts = summarize(gap_groups, count = n(), cor_sur_rep = cor(sur_medGPP, rep_medGPP), cor_sur_gr = cor(sur_medGPP, gr_medGPP), cor_rep_gr = cor(rep_medGPP, gr_medGPP)) gaps_n4 = gap_counts$gapID[gap_counts$count > 2] vr_kl_redu = filter(vr_kl_df, gapID %in% gaps_n4) vr_kl_n4 = filter(gap_counts, gapID %in% gaps_n4) pdf(file = 'gap_vr_correlation.pdf', width = 15, height = 15) ggplot(vr_kl_redu, aes(sur_medGPP, rep_medGPP, colour = gapID)) + geom_point() + annotate('text', label = round(cor(vr_kl_redu$sur_medGPP, vr_kl_redu$rep_medGPP), 2), x = -2.5, y = -2.5) ggplot(vr_kl_redu, aes(sur_medGPP, rep_medGPP)) + geom_point() + geom_text(data = vr_kl_n4, aes(label = round(cor_sur_rep, 2), x = 1, y = 1)) + facet_wrap(~ gapID, nrow = 6) ggplot(vr_kl_redu, aes(sur_medGPP, gr_medGPP, colour = gapID)) + geom_point() + annotate('text', label = round(cor(vr_kl_redu$sur_medGPP, vr_kl_redu$gr_medGPP), 2), x = -2.5, y = -2.5) ggplot(vr_kl_redu, aes(sur_medGPP, gr_medGPP)) + geom_point() + geom_text(data = vr_kl_n4, aes(label = round(cor_sur_gr, 2), x = 1, y = 1)) + facet_wrap(~ gapID, nrow = 6) ggplot(vr_kl_redu, aes(rep_medGPP, gr_medGPP, colour = gapID)) + geom_point() + annotate('text', label = round(cor(vr_kl_redu$rep_medGPP, vr_kl_redu$gr_medGPP), 2), x = -2.5, y = -2.5) ggplot(vr_kl_redu, aes(rep_medGPP, gr_medGPP)) + geom_point() + geom_text(data = vr_kl_n4, aes(label = round(cor_rep_gr, 2), x = 1, y = 1)) + facet_wrap(~ gapID, nrow = 6) dev.off() # plot these same vital rates against fruit production ggplot(vr_kl_redu, aes(sur_medGPP, med_E_Rf)) + geom_point() + facet_wrap(~ gapID, ncol = 4) ggplot(vr_kl_redu, aes(rep_medGPP, med_E_Rf)) + geom_point() + facet_wrap(~ gapID, ncol = 4) ggplot(vr_kl_redu, aes(gr_medGPP, med_E_Rf)) + geom_point() + facet_wrap(~ gapID, ncol = 4) # try and see how much variance in R0 is explained by the spatial effect of each vitial rate # (only part of the R0 equation that can change). Rf_full = lmer(med_E_Rf ~ sur_medGPP + rep_medGPP + gr_medGPP + (sur_medGPP + rep_medGPP + gr_medGPP | gapID), data = vr_kl_redu, REML = TRUE) # think about how to work out how much variance explained with just pairs, might not be doable # try taking patches with 3 locations see if that helps # try take out the random effects Rf_rand_int = lmer(med_E_Rf ~ sur_medGPP + rep_medGPP + gr_medGPP + (1 | gapID), data = vr_kl_redu, REML = TRUE) lrtest(Rf_full, Rf_rand_int) # cannot take out random effect summary(Rf_full) ranef(Rf_full) # take out each vr to see how much R squared ## two tasks, first calculate R^2 for full model and then models without each vital rate to assess how much spatial effect of each vr ## has on R0. Second redo but with data generated under the null model that in each location vital rates are independent (that is no trade-offs). ## If a vital rate explains very little of the variation in the observed, and more under the null, then it is evidence that in the observed ## data there are spatial tradeoffs, becuase if all vr were independent they would be less correlated # I need to re-implement a bit of stuff so a lot of prep shit first to get the right data and some constants from the data burn_dat = read.csv('hcdem_ABS_2015_checked.csv') fruit_ht_dat = select(burn_dat, year, burn_yr, site, gap, tag, ht94, ht95, ht96, ht97, ht98, ht99, ht00, ht01, ht02, ht03) fruit_rep_dat = select(burn_dat, year, site, gap, tag, rep94, rep95, rep96, rep97, rep98, rep99, rep00, rep01, rep02, rep03) ht_long = gather(fruit_ht_dat, ht_lab, height, ht94:ht03) rep_long = gather(fruit_rep_dat, rep_lab, fruit_num, rep94:rep03) # drop NA's ht_long = ht_long[ht_long$height != '#NULL!', ] rep_long = rep_long[rep_long$fruit_num != '#NULL!', ] # make the year labels numeric ht_long$m_year = sapply(ht_long$ht_lab, FUN = function(x){ a = as.numeric(strsplit(x, split = 'ht')[[1]][2]) if(a >= 50) return(1900 + a) else return(2000 + a) }) rep_long$m_year = sapply(rep_long$rep_lab, FUN = function(x){ a = as.numeric(strsplit(x, split = 'rep')[[1]][2]) if(a >= 50) return(1900 + a) else return(2000 + a) }) # add some columns for ID ht_long = mutate(ht_long, ID = paste0(site, ':', gap, ':', tag), join_ID = paste0(m_year, ':', site, ':', gap, ':', tag), height = as.numeric(height)) rep_long = mutate(rep_long, ID = paste0(site, ':', gap, ':', tag), join_ID = paste0(m_year, ':', site, ':', gap, ':', tag), fruit_num = as.numeric(fruit_num)) # merg the data frames fruit_dat = inner_join(ht_long, rep_long, by = 'join_ID') fruit_dat = select(fruit_dat, burn_yr = burn_yr, site = site.x, ID = ID.x, join_ID = join_ID, m_year = m_year.x, height = height, fruit_num = fruit_num) # only take the rows with fruit number greater than 1, as these are counts of fruits fruit_dat = filter(fruit_dat, fruit_num >= 2) # get time since fire for each observation fruit_dat = mutate(fruit_dat, time_since_fire = m_year - burn_yr, site = as.character(site)) mean_height = mean(fruit_dat$height) site_num = 2 # get the rep data to get mean height from this data for the centering # get rep data vr_loc_gen = read.csv('vr_loc_gen_postburn.csv', header = TRUE, stringsAsFactor = FALSE) vr_loc_gen = vr_loc_gen[!is.na(vr_loc_gen$X), ] # find the mean height for centering rep_dat = vr_loc_gen[!is.na(vr_loc_gen$rep), c('uID', 'uLoc', 'rep', 'year', 'height', 'X', 'Y')] rep_dat = rep_dat[!is.na(rep_dat$height),] rep_dat = rep_dat[rep_dat$rep <= 1, ] rep_mean_height = mean(rep_dat$height) # find the mean height for centering survival sur_dat = vr_loc_gen[!is.na(vr_loc_gen$sur), c('uID', 'uLoc', 'year','sur', 'height', 'height_prev', 'X', 'Y')] # neighbour data we can use all data, even the first years observed height neigh_dat = sur_dat # take out data from the first year observed since nothing can be observed dead in the first year first_year = sapply(seq_along(sur_dat$year), FUN = function(x){ min_year_group = min(sur_dat$year[sur_dat$uID == sur_dat$uID[x]]) return(ifelse(sur_dat$year[x] == min_year_group, FALSE, TRUE)) }) sur_dat = sur_dat[first_year, ] sur_dat = sur_dat[!is.na(sur_dat$height_prev), ] sur_mean_height = mean(sur_dat$height_prev) # first step create matrix of observed R0 from the stan objects # I will have to re-implement some fo these calculations to do this dz = 1 Z = seq(0, max(rep_dat$height) * 1.3, dz) num_samps = 500 # get intial distribution gr_dat_firt = filter(gr_dat, year == 2003) z0_den = density(gr_dat_firt$height_prev) # look at log normal distribution z0_mean = mean(log(gr_dat_firt$height_prev)) z0_sd = sd(log(gr_dat_firt$height_prev)) RJJ_space = matrix(NA, nrow = length(kl_common), ncol = num_samps) sur_space = matrix(NA, nrow = length(kl_common), ncol = num_samps) rep_space = matrix(NA, nrow = length(kl_common), ncol = num_samps) gr_space = matrix(NA, nrow = length(kl_common), ncol = num_samps) for(i in 1:length(kl_common)){ print(i) out_ob = R_E_samp(SPP_sur, SPP_gr, SPP_rep, fruit_stan, Z, dz, z0_mean, z0_sd, mean_height, rep_mean_height, sur_mean_height, i, kl_common, num_samps) RJJ_space[i, ] = out_ob$RJJ sur_space[i, ] = out_ob$sur_spp rep_space[i, ] = out_ob$rep_spp gr_space[i, ] = out_ob$gr_spp } RJJ_space_list = list(RJJ = RJJ_space, sur_spp = sur_space, rep_spp = rep_space, gr_spp = gr_space) # this takes ages to produce so save for future used save(RJJ_space_list, file = 'R0_space_dist_list.Rdata') #space_ob_name = load('R0_space_dist_list.Rdata') # randomization over parameter uncertianty under the null distribution where vital rates are not associated with a location num_rands = num_samps * length(kl_common) RJJ_null = R_E_null(SPP_sur, SPP_gr, SPP_rep, fruit_stan, Z, dz, z0_mean, z0_sd, mean_height, rep_mean_height, sur_mean_height, kl_common, num_rands, num_samps) # this takes ages to produce so save for future used save(RJJ_null, file = 'R0_null_dist_list.Rdata') #null_ob_name = load('R0_null_dist_list.Rdata') # create a relization of a null distribution using same shuffled knot for all re-sampels # within a location RJJ_null_struct = R_E_null_struct(SPP_sur, SPP_gr, SPP_rep, fruit_stan, Z, dz, z0_mean, z0_sd, mean_height, rep_mean_height, sur_mean_height, kl_common, num_samps) # this takes ages to produce so save for future used save(RJJ_null_struct, file = 'R0_null_stru_dist_list.Rdata') #null_ob_name = load('R0_null_stru_dist_list.Rdata') #Find the shift in the spatial distribuition between observed and the structured null # first make a simple histagram to just see how the two distribtuions look over all # turn the dists of Rjj over all space and under the null dist into a data frame hist_df = data.frame(model = rep(c('spatially structured', 'null model'), each = length(RJJ_space_list$RJJ)), RJJ = c(as.numeric(RJJ_space_list$RJJ), as.numeric(RJJ_null$RJJ))) pdf(file = 'RJJ_hist_space_V_null.pdf') ggplot(hist_df, aes(RJJ, color = model, fill = model)) + geom_density(alpha = 0.2) + xlim(0, 750) + theme(legend.position = c(0.8, 0.5)) dev.off() # also make a plot where we take the median for each location, so see spread over space med_RJJ_df = data.frame(model = rep(c('spatially structured', 'null'), each = dim(RJJ_space_list$RJJ)[1]), med_RJJ = c(apply(RJJ_space_list$RJJ, MARGIN = 1, FUN = median), apply(RJJ_null$RJJ, MARGIN = 1, FUN = median))) pdf(file = 'RJJ_den_med_loc_space_v_null.pdf') ggplot(med_RJJ_df, aes(med_RJJ, color = model, fill = model)) + geom_density(alpha = 0.2) + xlim(0, 500) + theme(legend.position = c(0.8, 0.5)) dev.off() # now make a plot for the different R^2 # first set up the matricies so the rows only refer to locations in gaps that have 3 or more locations gaps_n3 = gap_counts$gapID[gap_counts$count > 2] gaps_n3_test = vr_kl_df$gapID %in% gaps_n3 RJJ_space_n3 = list(RJJ = RJJ_space_list$RJJ[gaps_n3_test, ], sur_spp = RJJ_space_list$sur_spp[gaps_n3_test, ], rep_spp = RJJ_space_list$rep_spp[gaps_n3_test, ], gr_spp = RJJ_space_list$gr_spp[gaps_n3_test, ]) RJJ_null_n3 = list(RJJ = RJJ_null2$RJJ[gaps_n3_test, ], sur_spp = RJJ_null2$sur_spp[gaps_n3_test, ], rep_spp = RJJ_null2$rep_spp[gaps_n3_test, ], gr_spp = RJJ_null2$gr_spp[gaps_n3_test, ]) # set up the different models form_full = 'RJJ ~ sur_spp + rep_spp + gr_spp + (sur_spp + rep_spp + gr_spp | gapID)' form_sur_rep = 'RJJ ~ sur_spp + rep_spp + (sur_spp + rep_spp | gapID)' form_sur_gr = 'RJJ ~ sur_spp + gr_spp + (sur_spp + gr_spp | gapID)' form_rep_gr = 'RJJ ~ rep_spp + gr_spp + (rep_spp + gr_spp | gapID)' # fit model to each sample in posterior and put in list full_mod_list = lmer_fitter(form_full, RJJ_space_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(full_mod_list, file = 'RJJ_model_list_space.Rdata') full_mod_list_null = lmer_fitter(form_full, RJJ_null_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(full_mod_list_null, file = 'RJJ_model_list_null.Rdata') sur_rep_mod_list = lmer_fitter(form_sur_rep, RJJ_space_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(sur_rep_mod_list, file = 'RJJ_sur_rep_model_list_space.Rdata') sur_rep_mod_list_null = lmer_fitter(form_sur_rep, RJJ_null_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(sur_rep_mod_list_null, file = 'RJJ_sur_rep_model_list_null.Rdata') sur_gr_mod_list = lmer_fitter(form_sur_gr, RJJ_space_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(sur_gr_mod_list, file = 'RJJ_sur_gr_model_list_space.Rdata') sur_gr_mod_list_null = lmer_fitter(form_sur_gr, RJJ_null_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(sur_gr_mod_list_null, file = 'RJJ_sur_gr_model_list_null.Rdata') rep_gr_mod_list = lmer_fitter(form_rep_gr, RJJ_space_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(rep_gr_mod_list, file = 'RJJ_rep_gr_model_list_space.Rdata') rep_gr_mod_list_null = lmer_fitter(form_rep_gr, RJJ_null_n3, vr_kl_redu$gapID, control = lmerControl(optimizer = 'optimx', optCtrl = list(method = 'nlminb', maxit = 1000))) save(rep_gr_mod_list_null, file = 'RJJ_rep_gr_model_list_null.Rdata') num_samps = 500 # make a violin plot of the R^2 for each model to see how much each of the fixed effects are explaining Rsq_full_space = sapply(full_mod_list, FUN = r.squaredGLMM) Rsq_full_null = sapply(full_mod_list_null, FUN = r.squaredGLMM) Rsq_sur_rep_space = sapply(sur_rep_mod_list, FUN = r.squaredGLMM) Rsq_sur_rep_null = sapply(sur_rep_mod_list_null, FUN = r.squaredGLMM) Rsq_sur_gr_space = sapply(sur_gr_mod_list, FUN = r.squaredGLMM) Rsq_sur_gr_null = sapply(sur_gr_mod_list_null, FUN = r.squaredGLMM) Rsq_rep_gr_space = sapply(rep_gr_mod_list, FUN = r.squaredGLMM) Rsq_rep_gr_null = sapply(rep_gr_mod_list_null, FUN = r.squaredGLMM) Rsq_mar_df = data.frame(struct = rep(c('spatial', 'null'), each = num_samps * 4), model = rep(rep(c('sur + rep + gro', 'sur + rep', 'sur + gro', 'rep + gro'), each = num_samps), times = 2), Rsq = c(Rsq_full_space[1, ], Rsq_sur_rep_space[1, ], Rsq_sur_gr_space[1, ], Rsq_rep_gr_space[1, ], Rsq_full_null[1, ], Rsq_sur_rep_null[1, ], Rsq_sur_gr_null[1, ], Rsq_rep_gr_null[1, ])) Rsq_con_df = data.frame(struct = rep(c('spatial', 'null'), each = num_samps * 4), model = rep(rep(c('full model', 'growth', 'flowering prob.', 'survival'), each = num_samps), times = 2), Rsq = c(Rsq_full_space[2, ], Rsq_sur_rep_space[2, ], Rsq_sur_gr_space[2, ], Rsq_rep_gr_space[2, ], Rsq_full_null[2, ], Rsq_sur_rep_null[2, ], Rsq_sur_gr_null[1, ], Rsq_rep_gr_null[2, ])) Rsq_con_groups = group_by(Rsq_con_df, struct, model) Rsq_con_shift = summarize(Rsq_con_groups, count = n(), max_Rsq = max(Rsq), min_Rsq = min(Rsq)) #calculate the shifts dx = 0.02 X = seq(0, 1, dx) drop_rep_space_shift = dist_shift(Rsq_full_space[2, ], Rsq_sur_gr_space[2, ], X, dx) drop_sur_space_shift = dist_shift(Rsq_full_space[2, ], Rsq_rep_gr_space[2, ], X, dx) drop_gr_space_shift = dist_shift(Rsq_full_space[2, ], Rsq_sur_rep_space[2, ], X, dx) drop_rep_null_shift = dist_shift(Rsq_full_null[2, ], Rsq_sur_gr_null[2, ], X, dx) drop_sur_null_shift = dist_shift(Rsq_full_null[2, ], Rsq_rep_gr_null[2, ], X, dx) drop_gr_null_shift = dist_shift(Rsq_full_null[2, ], Rsq_sur_rep_null[2, ], X, dx) Rsq_con_shift$shift = c(drop_rep_null_shift, NA, drop_gr_null_shift, drop_sur_null_shift, drop_rep_space_shift, NA, drop_gr_space_shift, drop_sur_space_shift) # set up dummy data frame for annotation ann_df = data.frame(struct = factor(c('null', 'null'), levels = c('spatial', 'null')), model = c(1.75), Rsq = c(0.865), ann_text = c("''*R[shift]^2*' :'")) # make the violin plots pdf(file = 'var_Rf_explained.pdf', width = 12, height = 10) ggplot(Rsq_con_df, aes(model, Rsq, colour = struct, fill = struct)) + geom_violin(alpha = 0.3) + ylim(0, 1) + scale_x_discrete(limits = c('full model', 'flowering prob.', 'survival', 'growth'), labels = c('', '', '', '')) + theme(legend.position = 'none', axis.title = element_text(size = 20), axis.text = element_text(size = 15), strip.text = element_text(size = 20)) + labs(x = 'vital rate dropped', y = bquote(''*R^2*'')) + geom_text(data = Rsq_con_shift, aes(x = model, y = max_Rsq + 0.015, label = round(shift, 3)), inherit.aes = FALSE, size = 5) + geom_text(data = Rsq_con_shift, aes(x = model, y = min_Rsq - 0.02, label = model), inherit.aes = FALSE, size = 5) + facet_wrap(~ struct) + geom_text(data = ann_df, aes(x = model, y = Rsq, label = ann_text), inherit.aes = FALSE, size = 5, parse = TRUE, hjust = 1) dev.off()