content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include marketplacemetering_service.R
NULL
#' BatchMeterUsage is called from a SaaS application listed on the AWS
#' Marketplace to post metering records for a set of customers
#'
#' @description
#' BatchMeterUsage is called from a SaaS application listed on the AWS
#' Marketplace to post metering records for a set of customers.
#'
#' For identical requests, the API is idempotent; requests can be retried
#' with the same records or a subset of the input records.
#'
#' Every request to BatchMeterUsage is for one product. If you need to
#' meter usage for multiple products, you must make multiple calls to
#' BatchMeterUsage.
#'
#' BatchMeterUsage can process up to 25 UsageRecords at a time.
#'
#' A UsageRecord can optionally include multiple usage allocations, to
#' provide customers with usagedata split into buckets by tags that you
#' define (or allow the customer to define).
#'
#' BatchMeterUsage requests must be less than 1MB in size.
#'
#' @usage
#' marketplacemetering_batch_meter_usage(UsageRecords, ProductCode)
#'
#' @param UsageRecords [required] The set of UsageRecords to submit. BatchMeterUsage accepts up to 25
#' UsageRecords at a time.
#' @param ProductCode [required] Product code is used to uniquely identify a product in AWS Marketplace.
#' The product code should be the same as the one used during the
#' publishing of a new product.
#'
#' @section Request syntax:
#' ```
#' svc$batch_meter_usage(
#' UsageRecords = list(
#' list(
#' Timestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' CustomerIdentifier = "string",
#' Dimension = "string",
#' Quantity = 123,
#' UsageAllocations = list(
#' list(
#' AllocatedUsageQuantity = 123,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' ProductCode = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname marketplacemetering_batch_meter_usage
marketplacemetering_batch_meter_usage <- function(UsageRecords, ProductCode) {
op <- new_operation(
name = "BatchMeterUsage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .marketplacemetering$batch_meter_usage_input(UsageRecords = UsageRecords, ProductCode = ProductCode)
output <- .marketplacemetering$batch_meter_usage_output()
config <- get_config()
svc <- .marketplacemetering$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.marketplacemetering$operations$batch_meter_usage <- marketplacemetering_batch_meter_usage
#' API to emit metering records
#'
#' @description
#' API to emit metering records. For identical requests, the API is
#' idempotent. It simply returns the metering record ID.
#'
#' MeterUsage is authenticated on the buyer's AWS account using credentials
#' from the EC2 instance, ECS task, or EKS pod.
#'
#' MeterUsage can optionally include multiple usage allocations, to provide
#' customers with usage data split into buckets by tags that you define (or
#' allow the customer to define).
#'
#' @usage
#' marketplacemetering_meter_usage(ProductCode, Timestamp, UsageDimension,
#' UsageQuantity, DryRun, UsageAllocations)
#'
#' @param ProductCode [required] Product code is used to uniquely identify a product in AWS Marketplace.
#' The product code should be the same as the one used during the
#' publishing of a new product.
#' @param Timestamp [required] Timestamp, in UTC, for which the usage is being reported. Your
#' application can meter usage for up to one hour in the past. Make sure
#' the timestamp value is not before the start of the software usage.
#' @param UsageDimension [required] It will be one of the fcp dimension name provided during the publishing
#' of the product.
#' @param UsageQuantity Consumption value for the hour. Defaults to `0` if not specified.
#' @param DryRun Checks whether you have the permissions required for the action, but
#' does not make the request. If you have the permissions, the request
#' returns DryRunOperation; otherwise, it returns UnauthorizedException.
#' Defaults to `false` if not specified.
#' @param UsageAllocations The set of UsageAllocations to submit.
#'
#' The sum of all UsageAllocation quantities must equal the UsageQuantity
#' of the MeterUsage request, and each UsageAllocation must have a unique
#' set of tags (include no tags).
#'
#' @section Request syntax:
#' ```
#' svc$meter_usage(
#' ProductCode = "string",
#' Timestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' UsageDimension = "string",
#' UsageQuantity = 123,
#' DryRun = TRUE|FALSE,
#' UsageAllocations = list(
#' list(
#' AllocatedUsageQuantity = 123,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname marketplacemetering_meter_usage
marketplacemetering_meter_usage <- function(ProductCode, Timestamp, UsageDimension, UsageQuantity = NULL, DryRun = NULL, UsageAllocations = NULL) {
op <- new_operation(
name = "MeterUsage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .marketplacemetering$meter_usage_input(ProductCode = ProductCode, Timestamp = Timestamp, UsageDimension = UsageDimension, UsageQuantity = UsageQuantity, DryRun = DryRun, UsageAllocations = UsageAllocations)
output <- .marketplacemetering$meter_usage_output()
config <- get_config()
svc <- .marketplacemetering$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.marketplacemetering$operations$meter_usage <- marketplacemetering_meter_usage
#' Paid container software products sold through AWS Marketplace must
#' integrate with the AWS Marketplace Metering Service and call the
#' RegisterUsage operation for software entitlement and metering
#'
#' @description
#' Paid container software products sold through AWS Marketplace must
#' integrate with the AWS Marketplace Metering Service and call the
#' RegisterUsage operation for software entitlement and metering. Free and
#' BYOL products for Amazon ECS or Amazon EKS aren't required to call
#' RegisterUsage, but you may choose to do so if you would like to receive
#' usage data in your seller reports. The sections below explain the
#' behavior of RegisterUsage. RegisterUsage performs two primary functions:
#' metering and entitlement.
#'
#' - *Entitlement*: RegisterUsage allows you to verify that the customer
#' running your paid software is subscribed to your product on AWS
#' Marketplace, enabling you to guard against unauthorized use. Your
#' container image that integrates with RegisterUsage is only required
#' to guard against unauthorized use at container startup, as such a
#' CustomerNotSubscribedException/PlatformNotSupportedException will
#' only be thrown on the initial call to RegisterUsage. Subsequent
#' calls from the same Amazon ECS task instance (e.g. task-id) or
#' Amazon EKS pod will not throw a CustomerNotSubscribedException, even
#' if the customer unsubscribes while the Amazon ECS task or Amazon EKS
#' pod is still running.
#'
#' - *Metering*: RegisterUsage meters software use per ECS task, per
#' hour, or per pod for Amazon EKS with usage prorated to the second. A
#' minimum of 1 minute of usage applies to tasks that are short lived.
#' For example, if a customer has a 10 node Amazon ECS or Amazon EKS
#' cluster and a service configured as a Daemon Set, then Amazon ECS or
#' Amazon EKS will launch a task on all 10 cluster nodes and the
#' customer will be charged: (10 * hourly\\_rate). Metering for
#' software use is automatically handled by the AWS Marketplace
#' Metering Control Plane -- your software is not required to perform
#' any metering specific actions, other than call RegisterUsage once
#' for metering of software use to commence. The AWS Marketplace
#' Metering Control Plane will also continue to bill customers for
#' running ECS tasks and Amazon EKS pods, regardless of the customers
#' subscription state, removing the need for your software to perform
#' entitlement checks at runtime.
#'
#' @usage
#' marketplacemetering_register_usage(ProductCode, PublicKeyVersion, Nonce)
#'
#' @param ProductCode [required] Product code is used to uniquely identify a product in AWS Marketplace.
#' The product code should be the same as the one used during the
#' publishing of a new product.
#' @param PublicKeyVersion [required] Public Key Version provided by AWS Marketplace
#' @param Nonce (Optional) To scope down the registration to a specific running software
#' instance and guard against replay attacks.
#'
#' @section Request syntax:
#' ```
#' svc$register_usage(
#' ProductCode = "string",
#' PublicKeyVersion = 123,
#' Nonce = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname marketplacemetering_register_usage
marketplacemetering_register_usage <- function(ProductCode, PublicKeyVersion, Nonce = NULL) {
op <- new_operation(
name = "RegisterUsage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .marketplacemetering$register_usage_input(ProductCode = ProductCode, PublicKeyVersion = PublicKeyVersion, Nonce = Nonce)
output <- .marketplacemetering$register_usage_output()
config <- get_config()
svc <- .marketplacemetering$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.marketplacemetering$operations$register_usage <- marketplacemetering_register_usage
#' ResolveCustomer is called by a SaaS application during the registration
#' process
#'
#' @description
#' ResolveCustomer is called by a SaaS application during the registration
#' process. When a buyer visits your website during the registration
#' process, the buyer submits a registration token through their browser.
#' The registration token is resolved through this API to obtain a
#' CustomerIdentifier and product code.
#'
#' @usage
#' marketplacemetering_resolve_customer(RegistrationToken)
#'
#' @param RegistrationToken [required] When a buyer visits your website during the registration process, the
#' buyer submits a registration token through the browser. The registration
#' token is resolved to obtain a CustomerIdentifier and product code.
#'
#' @section Request syntax:
#' ```
#' svc$resolve_customer(
#' RegistrationToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname marketplacemetering_resolve_customer
marketplacemetering_resolve_customer <- function(RegistrationToken) {
op <- new_operation(
name = "ResolveCustomer",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .marketplacemetering$resolve_customer_input(RegistrationToken = RegistrationToken)
output <- .marketplacemetering$resolve_customer_output()
config <- get_config()
svc <- .marketplacemetering$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.marketplacemetering$operations$resolve_customer <- marketplacemetering_resolve_customer
|
/cran/paws.cost.management/R/marketplacemetering_operations.R
|
permissive
|
sanchezvivi/paws
|
R
| false | false | 11,583 |
r
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include marketplacemetering_service.R
NULL
#' BatchMeterUsage is called from a SaaS application listed on the AWS
#' Marketplace to post metering records for a set of customers
#'
#' @description
#' BatchMeterUsage is called from a SaaS application listed on the AWS
#' Marketplace to post metering records for a set of customers.
#'
#' For identical requests, the API is idempotent; requests can be retried
#' with the same records or a subset of the input records.
#'
#' Every request to BatchMeterUsage is for one product. If you need to
#' meter usage for multiple products, you must make multiple calls to
#' BatchMeterUsage.
#'
#' BatchMeterUsage can process up to 25 UsageRecords at a time.
#'
#' A UsageRecord can optionally include multiple usage allocations, to
#' provide customers with usagedata split into buckets by tags that you
#' define (or allow the customer to define).
#'
#' BatchMeterUsage requests must be less than 1MB in size.
#'
#' @usage
#' marketplacemetering_batch_meter_usage(UsageRecords, ProductCode)
#'
#' @param UsageRecords [required] The set of UsageRecords to submit. BatchMeterUsage accepts up to 25
#' UsageRecords at a time.
#' @param ProductCode [required] Product code is used to uniquely identify a product in AWS Marketplace.
#' The product code should be the same as the one used during the
#' publishing of a new product.
#'
#' @section Request syntax:
#' ```
#' svc$batch_meter_usage(
#' UsageRecords = list(
#' list(
#' Timestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' CustomerIdentifier = "string",
#' Dimension = "string",
#' Quantity = 123,
#' UsageAllocations = list(
#' list(
#' AllocatedUsageQuantity = 123,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' ProductCode = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname marketplacemetering_batch_meter_usage
marketplacemetering_batch_meter_usage <- function(UsageRecords, ProductCode) {
op <- new_operation(
name = "BatchMeterUsage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .marketplacemetering$batch_meter_usage_input(UsageRecords = UsageRecords, ProductCode = ProductCode)
output <- .marketplacemetering$batch_meter_usage_output()
config <- get_config()
svc <- .marketplacemetering$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.marketplacemetering$operations$batch_meter_usage <- marketplacemetering_batch_meter_usage
#' API to emit metering records
#'
#' @description
#' API to emit metering records. For identical requests, the API is
#' idempotent. It simply returns the metering record ID.
#'
#' MeterUsage is authenticated on the buyer's AWS account using credentials
#' from the EC2 instance, ECS task, or EKS pod.
#'
#' MeterUsage can optionally include multiple usage allocations, to provide
#' customers with usage data split into buckets by tags that you define (or
#' allow the customer to define).
#'
#' @usage
#' marketplacemetering_meter_usage(ProductCode, Timestamp, UsageDimension,
#' UsageQuantity, DryRun, UsageAllocations)
#'
#' @param ProductCode [required] Product code is used to uniquely identify a product in AWS Marketplace.
#' The product code should be the same as the one used during the
#' publishing of a new product.
#' @param Timestamp [required] Timestamp, in UTC, for which the usage is being reported. Your
#' application can meter usage for up to one hour in the past. Make sure
#' the timestamp value is not before the start of the software usage.
#' @param UsageDimension [required] It will be one of the fcp dimension name provided during the publishing
#' of the product.
#' @param UsageQuantity Consumption value for the hour. Defaults to `0` if not specified.
#' @param DryRun Checks whether you have the permissions required for the action, but
#' does not make the request. If you have the permissions, the request
#' returns DryRunOperation; otherwise, it returns UnauthorizedException.
#' Defaults to `false` if not specified.
#' @param UsageAllocations The set of UsageAllocations to submit.
#'
#' The sum of all UsageAllocation quantities must equal the UsageQuantity
#' of the MeterUsage request, and each UsageAllocation must have a unique
#' set of tags (include no tags).
#'
#' @section Request syntax:
#' ```
#' svc$meter_usage(
#' ProductCode = "string",
#' Timestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' UsageDimension = "string",
#' UsageQuantity = 123,
#' DryRun = TRUE|FALSE,
#' UsageAllocations = list(
#' list(
#' AllocatedUsageQuantity = 123,
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname marketplacemetering_meter_usage
marketplacemetering_meter_usage <- function(ProductCode, Timestamp, UsageDimension, UsageQuantity = NULL, DryRun = NULL, UsageAllocations = NULL) {
op <- new_operation(
name = "MeterUsage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .marketplacemetering$meter_usage_input(ProductCode = ProductCode, Timestamp = Timestamp, UsageDimension = UsageDimension, UsageQuantity = UsageQuantity, DryRun = DryRun, UsageAllocations = UsageAllocations)
output <- .marketplacemetering$meter_usage_output()
config <- get_config()
svc <- .marketplacemetering$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.marketplacemetering$operations$meter_usage <- marketplacemetering_meter_usage
#' Paid container software products sold through AWS Marketplace must
#' integrate with the AWS Marketplace Metering Service and call the
#' RegisterUsage operation for software entitlement and metering
#'
#' @description
#' Paid container software products sold through AWS Marketplace must
#' integrate with the AWS Marketplace Metering Service and call the
#' RegisterUsage operation for software entitlement and metering. Free and
#' BYOL products for Amazon ECS or Amazon EKS aren't required to call
#' RegisterUsage, but you may choose to do so if you would like to receive
#' usage data in your seller reports. The sections below explain the
#' behavior of RegisterUsage. RegisterUsage performs two primary functions:
#' metering and entitlement.
#'
#' - *Entitlement*: RegisterUsage allows you to verify that the customer
#' running your paid software is subscribed to your product on AWS
#' Marketplace, enabling you to guard against unauthorized use. Your
#' container image that integrates with RegisterUsage is only required
#' to guard against unauthorized use at container startup, as such a
#' CustomerNotSubscribedException/PlatformNotSupportedException will
#' only be thrown on the initial call to RegisterUsage. Subsequent
#' calls from the same Amazon ECS task instance (e.g. task-id) or
#' Amazon EKS pod will not throw a CustomerNotSubscribedException, even
#' if the customer unsubscribes while the Amazon ECS task or Amazon EKS
#' pod is still running.
#'
#' - *Metering*: RegisterUsage meters software use per ECS task, per
#' hour, or per pod for Amazon EKS with usage prorated to the second. A
#' minimum of 1 minute of usage applies to tasks that are short lived.
#' For example, if a customer has a 10 node Amazon ECS or Amazon EKS
#' cluster and a service configured as a Daemon Set, then Amazon ECS or
#' Amazon EKS will launch a task on all 10 cluster nodes and the
#' customer will be charged: (10 * hourly\\_rate). Metering for
#' software use is automatically handled by the AWS Marketplace
#' Metering Control Plane -- your software is not required to perform
#' any metering specific actions, other than call RegisterUsage once
#' for metering of software use to commence. The AWS Marketplace
#' Metering Control Plane will also continue to bill customers for
#' running ECS tasks and Amazon EKS pods, regardless of the customers
#' subscription state, removing the need for your software to perform
#' entitlement checks at runtime.
#'
#' @usage
#' marketplacemetering_register_usage(ProductCode, PublicKeyVersion, Nonce)
#'
#' @param ProductCode [required] Product code is used to uniquely identify a product in AWS Marketplace.
#' The product code should be the same as the one used during the
#' publishing of a new product.
#' @param PublicKeyVersion [required] Public Key Version provided by AWS Marketplace
#' @param Nonce (Optional) To scope down the registration to a specific running software
#' instance and guard against replay attacks.
#'
#' @section Request syntax:
#' ```
#' svc$register_usage(
#' ProductCode = "string",
#' PublicKeyVersion = 123,
#' Nonce = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname marketplacemetering_register_usage
marketplacemetering_register_usage <- function(ProductCode, PublicKeyVersion, Nonce = NULL) {
op <- new_operation(
name = "RegisterUsage",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .marketplacemetering$register_usage_input(ProductCode = ProductCode, PublicKeyVersion = PublicKeyVersion, Nonce = Nonce)
output <- .marketplacemetering$register_usage_output()
config <- get_config()
svc <- .marketplacemetering$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.marketplacemetering$operations$register_usage <- marketplacemetering_register_usage
#' ResolveCustomer is called by a SaaS application during the registration
#' process
#'
#' @description
#' ResolveCustomer is called by a SaaS application during the registration
#' process. When a buyer visits your website during the registration
#' process, the buyer submits a registration token through their browser.
#' The registration token is resolved through this API to obtain a
#' CustomerIdentifier and product code.
#'
#' @usage
#' marketplacemetering_resolve_customer(RegistrationToken)
#'
#' @param RegistrationToken [required] When a buyer visits your website during the registration process, the
#' buyer submits a registration token through the browser. The registration
#' token is resolved to obtain a CustomerIdentifier and product code.
#'
#' @section Request syntax:
#' ```
#' svc$resolve_customer(
#' RegistrationToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname marketplacemetering_resolve_customer
marketplacemetering_resolve_customer <- function(RegistrationToken) {
op <- new_operation(
name = "ResolveCustomer",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .marketplacemetering$resolve_customer_input(RegistrationToken = RegistrationToken)
output <- .marketplacemetering$resolve_customer_output()
config <- get_config()
svc <- .marketplacemetering$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.marketplacemetering$operations$resolve_customer <- marketplacemetering_resolve_customer
|
#' Value matching
#'
#' @name notin
#' @rdname notin
#'
#' @param x Vector with the values to be matched.
#' @param table Vector with the values to be matched against.
#'
#' @return A logical vector indicating which values are not in \code{table}.
#'
#' @export
#'
#' @seealso \code{\link[base:match]{match()}}.
#'
#' @examples
#' x <- 8:12
#' x %!in% 1:10
'%!in%' <- function(x, table) !(x %in% table)
|
/R/operators.R
|
no_license
|
cran/lvmisc
|
R
| false | false | 403 |
r
|
#' Value matching
#'
#' @name notin
#' @rdname notin
#'
#' @param x Vector with the values to be matched.
#' @param table Vector with the values to be matched against.
#'
#' @return A logical vector indicating which values are not in \code{table}.
#'
#' @export
#'
#' @seealso \code{\link[base:match]{match()}}.
#'
#' @examples
#' x <- 8:12
#' x %!in% 1:10
'%!in%' <- function(x, table) !(x %in% table)
|
#' Get WFS available layer information
#'
#' @param wfs A `WFSClient` R6 object with methods for interfacing an OGC Web Feature Service.
#' @inheritParams emodnet_init_wfs_client
#' @importFrom rlang .data
#' @return a tibble containg metadata on each layer available from the service.
#' @export
#' @describeIn emodnet_get_wfs_info Get info on all layers from am EMODnet WFS service.
#' @examples
#' # Query the default service
#' emodnet_get_wfs_info()
#' # Query a service
#' emodnet_get_wfs_info(service = "bathymetry")
#' # Query a wfs object
#' wfs_cml <- emodnet_init_wfs_client("chemistry_marine_litter")
#' emodnet_get_wfs_info(wfs_cml)
#' # Get info for specific layers from wfs object
#' layers <- c("bl_fishing_monitoring",
#' "bl_beacheslocations_monitoring")
#' emodnet_get_layer_info(wfs = wfs_cml, layers = layers)
emodnet_get_wfs_info <- function(wfs = NULL,
service = "seabed_habitats_individual_habitat_map_and_model_datasets",
service_version = "2.0.0") {
if(is.null(wfs)){
wfs <- emodnet_init_wfs_client(service, service_version)
}else{check_wfs(wfs)}
caps <- wfs$getCapabilities()
tibble::tibble(
data_source = "emodnet_wfs",
service_name = service,
service_url = get_service_url(service),
layer_name = purrr::map_chr(caps$getFeatureTypes(), ~.x$getName()),
title = purrr::map_chr(caps$getFeatureTypes(), ~.x$getTitle()),
abstract = purrr::map_chr(caps$getFeatureTypes(), ~getAbstractNull(.x)),
class = purrr::map_chr(caps$getFeatureTypes(), ~.x$getClassName()),
format = "sf"
) %>%
tidyr::separate(.data$layer_name, into = c("layer_namespace", "layer_name"),
sep = ":")
}
#' @describeIn emodnet_get_wfs_info Get metadata for specific layers. Requires a
#' `wfs` object as input.
#' @inheritParams emodnet_get_layers
#' @export
emodnet_get_layer_info <- function(wfs, layers) {
check_wfs(wfs)
layers <- match.arg(layers, choices = emodnet_get_wfs_info(wfs)$layer_name,
several.ok = TRUE)
caps <- wfs$getCapabilities()
wfs_layers <- purrr::map(layers,
~caps$findFeatureTypeByName(.x))
tibble::tibble(
data_source = "emodnet_wfs",
service_name = wfs$getUrl(),
service_url = get_service_name(wfs$getUrl()),
layer_name = purrr::map_chr(wfs_layers, ~.x$getName()),
title = purrr::map_chr(wfs_layers, ~.x$getTitle()),
abstract = purrr::map_chr(wfs_layers, ~getAbstractNull(.x)),
class = purrr::map_chr(wfs_layers, ~.x$getClassName()),
format = "sf"
) %>%
tidyr::separate(.data$layer_name,
into = c("layer_namespace", "layer_name"),
sep = ":")
}
#' @describeIn emodnet_get_wfs_info Get metadata on all layers and all available
#' services from server.
#' @export
emodnet_get_all_wfs_info <- function() {
purrr::map_df(emodnet_wfs$service_name,
~suppressMessages(emodnet_get_wfs_info(service = .x)))
}
getAbstractNull <- function(x){
abstract <- x$getAbstract()
ifelse(is.null(abstract), "", abstract)
}
|
/R/info.R
|
permissive
|
ldbk/EMODnetWFS
|
R
| false | false | 3,255 |
r
|
#' Get WFS available layer information
#'
#' @param wfs A `WFSClient` R6 object with methods for interfacing an OGC Web Feature Service.
#' @inheritParams emodnet_init_wfs_client
#' @importFrom rlang .data
#' @return a tibble containg metadata on each layer available from the service.
#' @export
#' @describeIn emodnet_get_wfs_info Get info on all layers from am EMODnet WFS service.
#' @examples
#' # Query the default service
#' emodnet_get_wfs_info()
#' # Query a service
#' emodnet_get_wfs_info(service = "bathymetry")
#' # Query a wfs object
#' wfs_cml <- emodnet_init_wfs_client("chemistry_marine_litter")
#' emodnet_get_wfs_info(wfs_cml)
#' # Get info for specific layers from wfs object
#' layers <- c("bl_fishing_monitoring",
#' "bl_beacheslocations_monitoring")
#' emodnet_get_layer_info(wfs = wfs_cml, layers = layers)
emodnet_get_wfs_info <- function(wfs = NULL,
service = "seabed_habitats_individual_habitat_map_and_model_datasets",
service_version = "2.0.0") {
if(is.null(wfs)){
wfs <- emodnet_init_wfs_client(service, service_version)
}else{check_wfs(wfs)}
caps <- wfs$getCapabilities()
tibble::tibble(
data_source = "emodnet_wfs",
service_name = service,
service_url = get_service_url(service),
layer_name = purrr::map_chr(caps$getFeatureTypes(), ~.x$getName()),
title = purrr::map_chr(caps$getFeatureTypes(), ~.x$getTitle()),
abstract = purrr::map_chr(caps$getFeatureTypes(), ~getAbstractNull(.x)),
class = purrr::map_chr(caps$getFeatureTypes(), ~.x$getClassName()),
format = "sf"
) %>%
tidyr::separate(.data$layer_name, into = c("layer_namespace", "layer_name"),
sep = ":")
}
#' @describeIn emodnet_get_wfs_info Get metadata for specific layers. Requires a
#' `wfs` object as input.
#' @inheritParams emodnet_get_layers
#' @export
emodnet_get_layer_info <- function(wfs, layers) {
check_wfs(wfs)
layers <- match.arg(layers, choices = emodnet_get_wfs_info(wfs)$layer_name,
several.ok = TRUE)
caps <- wfs$getCapabilities()
wfs_layers <- purrr::map(layers,
~caps$findFeatureTypeByName(.x))
tibble::tibble(
data_source = "emodnet_wfs",
service_name = wfs$getUrl(),
service_url = get_service_name(wfs$getUrl()),
layer_name = purrr::map_chr(wfs_layers, ~.x$getName()),
title = purrr::map_chr(wfs_layers, ~.x$getTitle()),
abstract = purrr::map_chr(wfs_layers, ~getAbstractNull(.x)),
class = purrr::map_chr(wfs_layers, ~.x$getClassName()),
format = "sf"
) %>%
tidyr::separate(.data$layer_name,
into = c("layer_namespace", "layer_name"),
sep = ":")
}
#' @describeIn emodnet_get_wfs_info Get metadata on all layers and all available
#' services from server.
#' @export
emodnet_get_all_wfs_info <- function() {
purrr::map_df(emodnet_wfs$service_name,
~suppressMessages(emodnet_get_wfs_info(service = .x)))
}
getAbstractNull <- function(x){
abstract <- x$getAbstract()
ifelse(is.null(abstract), "", abstract)
}
|
#' module_aglu_L2041.resbio_input_irr
#'
#' Briefly describe what this chunk does.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L2041.AgResBio_For}, \code{L2041.GlobalResBio_Mill}, \code{L2041.AgResBio_ag_irr}, \code{L2041.AgResBioCurve_For}, \code{L2041.StubResBioCurve_Mill}, \code{L2041.AgResBioCurve_ag_irr}. The corresponding file in the
#' original data system was \code{L2041.resbio_input_irr.R} (aglu level2).
#' @details Describe in detail what this chunk does.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter mutate select
#' @importFrom tidyr gather spread
#' @author YourInitials CurrentMonthName 2017
#' @export
module_aglu_L2041.resbio_input_irr_DISABLED <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c("L204.AgResBio_For",
"L204.GlobalResBio_Mill",
"L204.AgResBio_ag",
"L204.AgResBioCurve_For",
"L204.StubResBioCurve_Mill",
"L204.AgResBioCurve_ag"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L2041.AgResBio_For",
"L2041.GlobalResBio_Mill",
"L2041.AgResBio_ag_irr",
"L2041.AgResBioCurve_For",
"L2041.StubResBioCurve_Mill",
"L2041.AgResBioCurve_ag_irr"))
} else if(command == driver.MAKE) {
all_data <- list(...)[[1]]
# Load required inputs
L204.AgResBio_For <- get_data(all_data, "L204.AgResBio_For")
L204.GlobalResBio_Mill <- get_data(all_data, "L204.GlobalResBio_Mill")
L204.AgResBio_ag <- get_data(all_data, "L204.AgResBio_ag")
L204.AgResBioCurve_For <- get_data(all_data, "L204.AgResBioCurve_For")
L204.StubResBioCurve_Mill <- get_data(all_data, "L204.StubResBioCurve_Mill")
L204.AgResBioCurve_ag <- get_data(all_data, "L204.AgResBioCurve_ag")
# ===================================================
# TRANSLATED PROCESSING CODE GOES HERE...
#
# If you find a mistake/thing to update in the old code and
# fixing it will change the output data, causing the tests to fail,
# (i) open an issue on GitHub, (ii) consult with colleagues, and
# then (iii) code a fix:
#
# if(OLD_DATA_SYSTEM_BEHAVIOR) {
# ... code that replicates old, incorrect behavior
# } else {
# ... new code with a fix
# }
#
#
# NOTE: This code uses repeat_and_add_vector
# This function can be removed; see https://github.com/JGCRI/gcamdata/wiki/Name-That-Function
# ===================================================
# Produce outputs
# Temporary code below sends back empty data frames marked "don't test"
# Note that all precursor names (in `add_precursor`) must be in this chunk's inputs
# There's also a `same_precursors_as(x)` you can use
# If no precursors (very rare) don't call `add_precursor` at all
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.AgResBio_For") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.AgResBio_For
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.GlobalResBio_Mill") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.GlobalResBio_Mill
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.AgResBio_ag_irr") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.AgResBio_ag_irr
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.AgResBioCurve_For") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.AgResBioCurve_For
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.StubResBioCurve_Mill") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.StubResBioCurve_Mill
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.AgResBioCurve_ag_irr") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.AgResBioCurve_ag_irr
return_data(L2041.AgResBio_For, L2041.GlobalResBio_Mill, L2041.AgResBio_ag_irr, L2041.AgResBioCurve_For, L2041.StubResBioCurve_Mill, L2041.AgResBioCurve_ag_irr)
} else {
stop("Unknown command")
}
}
|
/R/zchunk_L2041.resbio_input_irr.R
|
no_license
|
shaohuizhang/gcamdata
|
R
| false | false | 6,092 |
r
|
#' module_aglu_L2041.resbio_input_irr
#'
#' Briefly describe what this chunk does.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L2041.AgResBio_For}, \code{L2041.GlobalResBio_Mill}, \code{L2041.AgResBio_ag_irr}, \code{L2041.AgResBioCurve_For}, \code{L2041.StubResBioCurve_Mill}, \code{L2041.AgResBioCurve_ag_irr}. The corresponding file in the
#' original data system was \code{L2041.resbio_input_irr.R} (aglu level2).
#' @details Describe in detail what this chunk does.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter mutate select
#' @importFrom tidyr gather spread
#' @author YourInitials CurrentMonthName 2017
#' @export
module_aglu_L2041.resbio_input_irr_DISABLED <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c("L204.AgResBio_For",
"L204.GlobalResBio_Mill",
"L204.AgResBio_ag",
"L204.AgResBioCurve_For",
"L204.StubResBioCurve_Mill",
"L204.AgResBioCurve_ag"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L2041.AgResBio_For",
"L2041.GlobalResBio_Mill",
"L2041.AgResBio_ag_irr",
"L2041.AgResBioCurve_For",
"L2041.StubResBioCurve_Mill",
"L2041.AgResBioCurve_ag_irr"))
} else if(command == driver.MAKE) {
all_data <- list(...)[[1]]
# Load required inputs
L204.AgResBio_For <- get_data(all_data, "L204.AgResBio_For")
L204.GlobalResBio_Mill <- get_data(all_data, "L204.GlobalResBio_Mill")
L204.AgResBio_ag <- get_data(all_data, "L204.AgResBio_ag")
L204.AgResBioCurve_For <- get_data(all_data, "L204.AgResBioCurve_For")
L204.StubResBioCurve_Mill <- get_data(all_data, "L204.StubResBioCurve_Mill")
L204.AgResBioCurve_ag <- get_data(all_data, "L204.AgResBioCurve_ag")
# ===================================================
# TRANSLATED PROCESSING CODE GOES HERE...
#
# If you find a mistake/thing to update in the old code and
# fixing it will change the output data, causing the tests to fail,
# (i) open an issue on GitHub, (ii) consult with colleagues, and
# then (iii) code a fix:
#
# if(OLD_DATA_SYSTEM_BEHAVIOR) {
# ... code that replicates old, incorrect behavior
# } else {
# ... new code with a fix
# }
#
#
# NOTE: This code uses repeat_and_add_vector
# This function can be removed; see https://github.com/JGCRI/gcamdata/wiki/Name-That-Function
# ===================================================
# Produce outputs
# Temporary code below sends back empty data frames marked "don't test"
# Note that all precursor names (in `add_precursor`) must be in this chunk's inputs
# There's also a `same_precursors_as(x)` you can use
# If no precursors (very rare) don't call `add_precursor` at all
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.AgResBio_For") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.AgResBio_For
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.GlobalResBio_Mill") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.GlobalResBio_Mill
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.AgResBio_ag_irr") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.AgResBio_ag_irr
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.AgResBioCurve_For") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.AgResBioCurve_For
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.StubResBioCurve_Mill") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.StubResBioCurve_Mill
tibble() %>%
add_title("descriptive title of data") %>%
add_units("units") %>%
add_comments("comments describing how data generated") %>%
add_comments("can be multiple lines") %>%
add_legacy_name("L2041.AgResBioCurve_ag_irr") %>%
add_precursors("precursor1", "precursor2", "etc") %>%
# typical flags, but there are others--see `constants.R`
add_flags(FLAG_LONG_YEAR_FORM, FLAG_NO_XYEAR) ->
L2041.AgResBioCurve_ag_irr
return_data(L2041.AgResBio_For, L2041.GlobalResBio_Mill, L2041.AgResBio_ag_irr, L2041.AgResBioCurve_For, L2041.StubResBioCurve_Mill, L2041.AgResBioCurve_ag_irr)
} else {
stop("Unknown command")
}
}
|
library('ggplot2')
library('reshape2')
library('ggpubr')
library(glmnet)
library(doMC)
library(survival)
library(data.table)
library(mltools)
library(CoxBoost)
library(randomForestSRC)
library(CoxHD)
library(Hmisc)
library(gridExtra)
library("survminer")
library(dplyr)
library(broom)√
library(tidyr)
library(tidyverse)
source("../../../../src/tools.R")
source('../run_prognosis.R')
df_final <- read.table("../prognosis_comp_final.tsv",sep='\t',header=T)
### Features that we can use
###-----------------------------------------------------------------------------
all_features <-c(1:180) #not used
clin_demo_comp <-c(155:180) #not used
clin_demo_cyto_gen_comp <- c(2:180) #not used
comp <- c(164:180) #not used
cyto_comp <-c(86:154,164:180) #not used
cyto_gen_comp <- c(2:154,164:180) #not used
eln_clin_demo_comp <- c(1,155:180) #not used
eln_cyto_comp <- c(1,86:154,164:180) #not used
eln_cyto_gen_comp <- c(1:154,164:180) #not used
eln_gen_comp <- c(1:85,164:180) #not used
gen_comp <- c(2:85,164:180) #not used
clin_comp <- c(155:161,164:180) #not used
clin_cyto_comp <- c(86:161,164:180) #not used
clin_gen_comp <- c(2:85,155:161,164:180) #not used
eln_clin_comp <- c(1,155:161,164:180) #not used
age <- c(163)
gen_age <- c(2:85,163)
eln_clin_gen <- c(1:85,155:161)
eln_demo_gen <- c(1:85,162:163)
eln_clin_demo_cyto_gen <- c(1:163)
eln_clin_demo_cyto <- c(1,86:163)
eln_clin_demo_gen <- c(1:85,155:163) ##START HERE
eln_clin_demo <- c(1,155:163)
eln_clin <- c(1,155:161)
eln_cyto_gen <- c(1:154)
clin_demo_cyto_gen <- c(2:163)
clin_demo_cyto <- c(86:163)
clin_demo_gen <- c(2:85,155:163)
clin_demo <- c(155:163)
cyto_gen <- c(2:154)
cyto <- c(86:154)
gen <- c(2:85)
clin_gen <- c(2:85,155:161)
clin_cyto <- c(86:161)
demo_gen <- c(2:85,162:163)
demo_cyto <- c(86:154,162:163)
###Without age:
all_features_without_age <-c(1:162,164:180) #not used
clin_demo_comp_without_age <-c(155:162,164:180) #not used
clin_demo_cyto_gen_comp_without_age <- c(2:162,164:180) #not used
eln_clin_demo_comp_without_age <- c(1,155:162,164:180) #not used
eln_demo_gen_without_age <- c(1:85,162)
eln_clin_demo_cyto_gen_without_age <- c(1:162)
eln_clin_demo_cyto_without_age <- c(1,86:162)
eln_clin_demo_gen_without_age <- c(1:85,155:162)
eln_clin_demo_without_age <- c(1,155:162)
clin_demo_cyto_gen_without_age <- c(2:162)
clin_demo_cyto_without_age <- c(86:162)
clin_demo_gen_without_age <- c(2:85,155:162)
clin_demo_without_age <- c(155:162)
demo_gen_without_age <- c(2:85,162)
demo_cyto_without_age <- c(86:154,162)
bootstrapping <- function(features=all_features,x,y,n_exp=100,alpha=0.7,mc.cores=50,model="glm"){
set.seed(17)
res_bootstrap <- data.frame('feature' = character(),
'coef' = numeric())
design=x[,features]
n = nrow(design)
folds <- list()
for (i in seq(n_exp)) {
folds[[i]] <- sample(1:n, 0.8 * n, replace = TRUE)
}
nexp = length(folds)
print("Start Bootstrapping")
rescv = mclapply(seq(nexp),
FUN=function(iexp) {
set.seed(17)
cat(".")
x_sampling = design[folds[[iexp]],]
y_sampling = y[folds[[iexp]],]
if (model=="glm"){
cvfit <- cv.glmnet(x_sampling, y_sampling, family = 'cox', alpha=alpha, nfolds = 20, grouped = TRUE)
tmp <- as.data.frame(as.matrix(coef(cvfit, s = "lambda.min")))
} else if (model=="boost"){
cvfit<-CoxBoost(time=y_sampling[,1],status=y_sampling[,2],x=x_sampling)
tmp <- as.data.frame(as.matrix(coefficients(cvfit)))
} else if (model=="rfx"){
cvfit<-CoxRFX(data.frame(x_sampling),Surv(time=y_sampling[,1],event=y_sampling[,2]) , max.iter =50,tol=1e-3)
tmp <- as.data.frame(as.matrix(coef(cvfit)))
} else if (model=="rfs"){
cvfit <- rfsrc(Surv(time, status) ~ ., data=data.frame(x_sampling,y_sampling), ntree=1050, importance="TRUE",nodesize=20)
tmp <- as.data.frame(as.matrix(cvfit$importance))
}
colnames(tmp) <- 'coef'
tmp <- rownames_to_column(tmp, var = 'feature')
},
mc.cores=50
)
for(i in 1:length(rescv)){
res_bootstrap <- rbind(res_bootstrap,rescv[[i]])
}
res_bootstrap <- res_bootstrap[res_bootstrap$coef != 0,]
return (res_bootstrap)
}
x <- data.matrix(df_final)
y <- data.matrix(df_final[,c("os","os_status")])
colnames(y) = c("time","status")
response=y
prognosis_features<- list(clin_gen_comp=clin_gen_comp)
algos <-c("glm","rfs","boost","rfx")
alphas=c(0,0.7,1)
for (i in 1:length(prognosis_features)){
for (algo in algos){
if (algo=="glm"){
for (alpha in alphas){
print(alpha)
print(algo)
bootstrap <- bootstrapping(prognosis_features[[i]],x,y,100,alpha,8,algo)
tmp_1 <- bootstrap %>% group_by(feature) %>% summarise_all(sum)
tmp_2 <- bootstrap %>% group_by(feature) %>% count(feature)
print(paste(paste(names(prognosis_features)[i],paste(algo,alpha,sep="_"),sep="_bootstrap_"),".tsv",sep=""))
write.table(data.frame(merge(tmp_1,tmp_2,by='feature')),paste(paste(names(prognosis_features)[i],paste(algo,alpha,sep="_"),sep="_bootstrap_"),".tsv",sep=""),quote=F,sep='\t')
if (alpha==0.7){
tmp_1_pos <- tmp_1[tmp_1$coef>0,]
tmp_1_neg <- tmp_1[tmp_1$coef<0,]
features_reduced <- union(union(tmp_1_pos[tmp_1_pos$coef > quantile(tmp_1_pos$coef,0.90),]$feature,tmp_1_neg[tmp_1_neg$coef < quantile(tmp_1_neg$coef,0.15),]$feature),tmp_2[tmp_2$n > quantile(tmp_2$n,0.85),]$feature)
if (length(features_reduced)<2){features_reduced <- union(union(tmp_1_pos[tmp_1_pos$coef > quantile(tmp_1_pos$coef,0.90),]$feature,tmp_1_neg[tmp_1_neg$coef < quantile(tmp_1_neg$coef,0.15),]$feature),tmp_2[tmp_2$n > 0,]$feature)}
print(features_reduced)
predictors <- c(rep(list(predictorGLM),11),rep(list(predictorRF),1),predictorBoost,predictorRFX)
str_predictors <-c(rep("CoxGLM",11),"RFS","CoxBoost","RFX")
l_alpha <-seq(0,1,0.1)
l_ntree <- c(1050)
mc.cores <- 50
nodesize <- c(20)
print("DONE")
write.table(launch_prognosis(data.matrix(df_final[,features_reduced]),y=y,predictors=predictors,str_predictors=str_predictors,l_alpha=l_alpha,nrepeats=2,l_ntree=l_ntree,nodesize=nodesize,
mc.cores=mc.cores),paste(names(prognosis_features)[i],"_reduced.tsv",sep=""),quote=F,sep='\t')
print("DONE")
}
}
} else {
print(algo)
if(algo=="rfs"){
bootstrap <- bootstrapping(prognosis_features[[i]],x,y,10,0.7,8,algo)
}else {
bootstrap <- bootstrapping(prognosis_features[[i]],x,y,100,0.7,8,algo)
tmp_1 <- bootstrap %>% group_by(feature) %>% summarise_all(sum)
tmp_2 <- bootstrap %>% group_by(feature) %>% count(feature)
}
write.table(data.frame(merge(tmp_1,tmp_2,by='feature')),paste(paste(names(prognosis_features)[i],algo,sep="_bootstrap_"),".tsv",sep=""),quote=F,sep='\t')
print ('next')
}
}
}
|
/analysis/prognosis/InitialPrognosis/comparison_dataframes/untitled1.R
|
no_license
|
reyear/AML_Analysis
|
R
| false | false | 7,758 |
r
|
library('ggplot2')
library('reshape2')
library('ggpubr')
library(glmnet)
library(doMC)
library(survival)
library(data.table)
library(mltools)
library(CoxBoost)
library(randomForestSRC)
library(CoxHD)
library(Hmisc)
library(gridExtra)
library("survminer")
library(dplyr)
library(broom)√
library(tidyr)
library(tidyverse)
source("../../../../src/tools.R")
source('../run_prognosis.R')
df_final <- read.table("../prognosis_comp_final.tsv",sep='\t',header=T)
### Features that we can use
###-----------------------------------------------------------------------------
all_features <-c(1:180) #not used
clin_demo_comp <-c(155:180) #not used
clin_demo_cyto_gen_comp <- c(2:180) #not used
comp <- c(164:180) #not used
cyto_comp <-c(86:154,164:180) #not used
cyto_gen_comp <- c(2:154,164:180) #not used
eln_clin_demo_comp <- c(1,155:180) #not used
eln_cyto_comp <- c(1,86:154,164:180) #not used
eln_cyto_gen_comp <- c(1:154,164:180) #not used
eln_gen_comp <- c(1:85,164:180) #not used
gen_comp <- c(2:85,164:180) #not used
clin_comp <- c(155:161,164:180) #not used
clin_cyto_comp <- c(86:161,164:180) #not used
clin_gen_comp <- c(2:85,155:161,164:180) #not used
eln_clin_comp <- c(1,155:161,164:180) #not used
age <- c(163)
gen_age <- c(2:85,163)
eln_clin_gen <- c(1:85,155:161)
eln_demo_gen <- c(1:85,162:163)
eln_clin_demo_cyto_gen <- c(1:163)
eln_clin_demo_cyto <- c(1,86:163)
eln_clin_demo_gen <- c(1:85,155:163) ##START HERE
eln_clin_demo <- c(1,155:163)
eln_clin <- c(1,155:161)
eln_cyto_gen <- c(1:154)
clin_demo_cyto_gen <- c(2:163)
clin_demo_cyto <- c(86:163)
clin_demo_gen <- c(2:85,155:163)
clin_demo <- c(155:163)
cyto_gen <- c(2:154)
cyto <- c(86:154)
gen <- c(2:85)
clin_gen <- c(2:85,155:161)
clin_cyto <- c(86:161)
demo_gen <- c(2:85,162:163)
demo_cyto <- c(86:154,162:163)
###Without age:
all_features_without_age <-c(1:162,164:180) #not used
clin_demo_comp_without_age <-c(155:162,164:180) #not used
clin_demo_cyto_gen_comp_without_age <- c(2:162,164:180) #not used
eln_clin_demo_comp_without_age <- c(1,155:162,164:180) #not used
eln_demo_gen_without_age <- c(1:85,162)
eln_clin_demo_cyto_gen_without_age <- c(1:162)
eln_clin_demo_cyto_without_age <- c(1,86:162)
eln_clin_demo_gen_without_age <- c(1:85,155:162)
eln_clin_demo_without_age <- c(1,155:162)
clin_demo_cyto_gen_without_age <- c(2:162)
clin_demo_cyto_without_age <- c(86:162)
clin_demo_gen_without_age <- c(2:85,155:162)
clin_demo_without_age <- c(155:162)
demo_gen_without_age <- c(2:85,162)
demo_cyto_without_age <- c(86:154,162)
bootstrapping <- function(features=all_features,x,y,n_exp=100,alpha=0.7,mc.cores=50,model="glm"){
set.seed(17)
res_bootstrap <- data.frame('feature' = character(),
'coef' = numeric())
design=x[,features]
n = nrow(design)
folds <- list()
for (i in seq(n_exp)) {
folds[[i]] <- sample(1:n, 0.8 * n, replace = TRUE)
}
nexp = length(folds)
print("Start Bootstrapping")
rescv = mclapply(seq(nexp),
FUN=function(iexp) {
set.seed(17)
cat(".")
x_sampling = design[folds[[iexp]],]
y_sampling = y[folds[[iexp]],]
if (model=="glm"){
cvfit <- cv.glmnet(x_sampling, y_sampling, family = 'cox', alpha=alpha, nfolds = 20, grouped = TRUE)
tmp <- as.data.frame(as.matrix(coef(cvfit, s = "lambda.min")))
} else if (model=="boost"){
cvfit<-CoxBoost(time=y_sampling[,1],status=y_sampling[,2],x=x_sampling)
tmp <- as.data.frame(as.matrix(coefficients(cvfit)))
} else if (model=="rfx"){
cvfit<-CoxRFX(data.frame(x_sampling),Surv(time=y_sampling[,1],event=y_sampling[,2]) , max.iter =50,tol=1e-3)
tmp <- as.data.frame(as.matrix(coef(cvfit)))
} else if (model=="rfs"){
cvfit <- rfsrc(Surv(time, status) ~ ., data=data.frame(x_sampling,y_sampling), ntree=1050, importance="TRUE",nodesize=20)
tmp <- as.data.frame(as.matrix(cvfit$importance))
}
colnames(tmp) <- 'coef'
tmp <- rownames_to_column(tmp, var = 'feature')
},
mc.cores=50
)
for(i in 1:length(rescv)){
res_bootstrap <- rbind(res_bootstrap,rescv[[i]])
}
res_bootstrap <- res_bootstrap[res_bootstrap$coef != 0,]
return (res_bootstrap)
}
x <- data.matrix(df_final)
y <- data.matrix(df_final[,c("os","os_status")])
colnames(y) = c("time","status")
response=y
prognosis_features<- list(clin_gen_comp=clin_gen_comp)
algos <-c("glm","rfs","boost","rfx")
alphas=c(0,0.7,1)
for (i in 1:length(prognosis_features)){
for (algo in algos){
if (algo=="glm"){
for (alpha in alphas){
print(alpha)
print(algo)
bootstrap <- bootstrapping(prognosis_features[[i]],x,y,100,alpha,8,algo)
tmp_1 <- bootstrap %>% group_by(feature) %>% summarise_all(sum)
tmp_2 <- bootstrap %>% group_by(feature) %>% count(feature)
print(paste(paste(names(prognosis_features)[i],paste(algo,alpha,sep="_"),sep="_bootstrap_"),".tsv",sep=""))
write.table(data.frame(merge(tmp_1,tmp_2,by='feature')),paste(paste(names(prognosis_features)[i],paste(algo,alpha,sep="_"),sep="_bootstrap_"),".tsv",sep=""),quote=F,sep='\t')
if (alpha==0.7){
tmp_1_pos <- tmp_1[tmp_1$coef>0,]
tmp_1_neg <- tmp_1[tmp_1$coef<0,]
features_reduced <- union(union(tmp_1_pos[tmp_1_pos$coef > quantile(tmp_1_pos$coef,0.90),]$feature,tmp_1_neg[tmp_1_neg$coef < quantile(tmp_1_neg$coef,0.15),]$feature),tmp_2[tmp_2$n > quantile(tmp_2$n,0.85),]$feature)
if (length(features_reduced)<2){features_reduced <- union(union(tmp_1_pos[tmp_1_pos$coef > quantile(tmp_1_pos$coef,0.90),]$feature,tmp_1_neg[tmp_1_neg$coef < quantile(tmp_1_neg$coef,0.15),]$feature),tmp_2[tmp_2$n > 0,]$feature)}
print(features_reduced)
predictors <- c(rep(list(predictorGLM),11),rep(list(predictorRF),1),predictorBoost,predictorRFX)
str_predictors <-c(rep("CoxGLM",11),"RFS","CoxBoost","RFX")
l_alpha <-seq(0,1,0.1)
l_ntree <- c(1050)
mc.cores <- 50
nodesize <- c(20)
print("DONE")
write.table(launch_prognosis(data.matrix(df_final[,features_reduced]),y=y,predictors=predictors,str_predictors=str_predictors,l_alpha=l_alpha,nrepeats=2,l_ntree=l_ntree,nodesize=nodesize,
mc.cores=mc.cores),paste(names(prognosis_features)[i],"_reduced.tsv",sep=""),quote=F,sep='\t')
print("DONE")
}
}
} else {
print(algo)
if(algo=="rfs"){
bootstrap <- bootstrapping(prognosis_features[[i]],x,y,10,0.7,8,algo)
}else {
bootstrap <- bootstrapping(prognosis_features[[i]],x,y,100,0.7,8,algo)
tmp_1 <- bootstrap %>% group_by(feature) %>% summarise_all(sum)
tmp_2 <- bootstrap %>% group_by(feature) %>% count(feature)
}
write.table(data.frame(merge(tmp_1,tmp_2,by='feature')),paste(paste(names(prognosis_features)[i],algo,sep="_bootstrap_"),".tsv",sep=""),quote=F,sep='\t')
print ('next')
}
}
}
|
fit_g1_values_and_plot <- function(inDF) {
inDF <- myDF
inDF <- inDF[inDF$Dataset!="DeAngelis_Macchia",]
inDF<- inDF[!(inDF$Species =="Acer campestre" & inDF$Dataset == "L_SCC"),]
inDF<- inDF[!(inDF$Species =="Prunus avium" & inDF$Dataset == "L_SCC"),]
### split by dataset
inDF$fits <- paste0(inDF$Dataset, "-", inDF$Species, "-", inDF$Treatment)
list <- split(inDF, inDF$fits)
# Not sure how to apply the code to the data.
getr2 <- function(x){
lmfit <- lm(x$data$Cond ~ fitted(x$fit))
summary(lmfit)$r.squared
}
### fit g1 values
fit <- lapply(list,fitBB,gsmodel="BBOpti",
varnames=list(VPD="VPD",ALEAF="Photo",GS="Cond",Ca="CO2S"))
lapply(fit,coef)
g1pars <- sapply(fit,function(x)x$coef[[2]])
g1cilows <- lapply(fit,function(x)confint(x$fit)[1])
g1cihighs <- lapply(fit,function(x)confint(x$fit)[2])
ret <- data.frame(stack(g1pars),stack(g1cilows),stack(g1cihighs))
g1pars <- ret[,c(2,1,3,5)]
names(g1pars) <- c("fitgroup","g1","lowCI","highCI")
out <- strsplit(as.character(g1pars$fitgroup),'-')
out2<- do.call(rbind, out)
out3<- data.frame(g1pars$g1, do.call(rbind, out))
out3 <- renameCol(out3, c("g1pars.g1","X1","X2","X3"),
c("g1","Dataset","Species","Treatment"))
g1DF<- merge(g1pars, out3, by="g1")
g1DF<- subset(g1DF, select = -c(fitgroup))
test <- sapply(fit,getr2)
### make plot
p1 <- ggplot(inDF) +
geom_point(aes(Photo/sqrt(VPD)/CO2S,Cond, fill=Dataset, group=Dataset), pch=21)+
geom_smooth(aes(Photo/sqrt(VPD)/CO2S,Cond, color=Dataset, group=Dataset), se=F)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
#xlab("VPD (kPa)")+
ylab("Conductance");p1
pdf("output/fit_g1_plot_dataset.pdf")
plot(p1)
dev.off()
### make plot
p1 <- ggplot(inDF) +
geom_point(aes(Photo/sqrt(VPD)/CO2S,Cond, fill=Treatment, group=Treatment), pch=21)+
geom_smooth(aes(Photo/sqrt(VPD)/CO2S,Cond, color=Treatment, group=Treatment), se=F)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
#xlab("VPD (kPa)")+
ylab("Conductance");p1
pdf("output/fit_g1_plot_all.pdf")
plot(p1)
dev.off()
#rsq <- function (x, y) cor(x, y) ^ 2
## Supplemental figure to show the data distribution with CO2 treatment.
p2<- ggplot(inDF, aes(x=Photo/sqrt(VPD)/CO2S, y = Cond,
shape = Treatment, fill = Treatment))+
theme_bw()+
geom_point(colour="black")+
facet_wrap(~ Dataset, nrow = 5)+
scale_shape_manual(values=c(21,24)) +
scale_fill_manual(values=c("blue","red"))+
scale_y_continuous(name="Stomatal Conductance",expand = c(0, 0),limits=c(0,1.5), breaks=seq(0,1.5,0.5)) +
scale_x_continuous(expand = c(0, 0),limits=c(0,0.1), breaks=seq(0,0.1,0.05)) +
theme(legend.box = 'horizontal', legend.justification=c(1,1),
legend.position=c(1,1), legend.title = element_blank(),
legend.text = element_text(size = 11), legend.key = element_blank(),
legend.background = element_blank(),legend.spacing.x = unit(0.25, "cm"),
legend.key.height = unit(0.55, "cm"),legend.key.width = unit(0.2, "cm"));p2
pdf("output/g1_scatterplots_by_dataset_R2.pdf", width=10, height=10)
plot(p2)
dev.off()
# Add in PFT for facet
ENF <- g1DF$Dataset %in% c("Flakaliden", "Flakaliden_2","B_Glencorse", "Duke FACE", "B_SCC")
EBF <- g1DF$Dataset %in% c("EucFACE", "Richmond_WTC1", "Richmond_WTC2")
DBF <- g1DF$Dataset %in% c("BIFOR", "Rhinelander", "R_Glencorse", "Gribskov", "ORNL", "POPFACE","L_SCC")
g1DF$PFT[ENF] <- "Evergreen Gymnosperm"
g1DF$PFT[EBF] <- "Evergreen Angiosperm"
g1DF$PFT[DBF] <- "Deciduous Angiosperm"
g1DF$PFT<- as.factor(g1DF$PFT)
g1DF$Dataset<-as.factor(g1DF$Dataset)
g1DF$Species<-as.factor(g1DF$Species)
g1DF$Treatment<-as.factor(g1DF$Treatment)
# Assign an age
mature <- g1DF$Dataset %in% c("EucFACE", "BIFOR","L_SCC", "B_SCC")
young <- g1DF$Dataset %in% c("Duke FACE", "ORNL", "B_Glencorse","Flakaliden", "Flakaliden_2","Gribskov","R_Glencorse")
sapling <- g1DF$Dataset %in% c("Richmond_WTC1", "Richmond_WTC2", "Rhinelander", "POPFACE")
g1DF$Age[mature] <- "Mature"
g1DF$Age[young] <- "Young"
g1DF$Age[sapling] <- "Sapling"
g1DF$Age = factor(g1DF$Age, levels=c('Mature','Young','Sapling'))
### plot g1_Dataset, Species by PFT
dodge <- position_dodge2(width = 0.5)
p3 <- g1DF %>%
mutate(name = fct_reorder(Dataset,g1)) %>%
ggplot(aes(x=interaction(Species,name), g1,
group=interaction(Species,Treatment),
shape= PFT, fill=Treatment)) +
theme_bw()+
geom_errorbar(aes(ymin = lowCI, ymax = highCI), position=dodge, width=0.5, size=0.2)+
geom_point(size=2, position=dodge) +
scale_y_continuous(name="g1",expand = c(0, 0),limits=c(0,10), breaks=seq(0,10,2)) +
scale_x_discrete(name="Dataset") +
scale_shape_manual(values=c(21,22,23)) +
scale_fill_manual(values=c("blue","red")) +
facet_grid(PFT~. , scales="free", space = "free")+
theme(legend.position="none") +
coord_flip()+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank());p3
pdf("output/g1_DatasetSp_PFT_TreeNum.pdf")
plot(p3)
dev.off()
### plot g1 by Dataset, Species by Age
dodge <- position_dodge2(width = 0.5)
p4 <- g1DF %>%
mutate(name = fct_reorder(Dataset, g1)) %>%
ggplot(aes(x=interaction(Species,name), g1,
group=interaction(Species,Treatment),
shape= PFT, fill=Treatment)) +
theme_bw()+
geom_errorbar(aes(ymin = lowCI, ymax = highCI), position=dodge, width=0.5, size=0.2)+
geom_point(size=2, position=dodge) +
scale_y_continuous(name="g1",expand = c(0, 0),limits=c(0,10), breaks=seq(0,10,2)) +
scale_x_discrete(name="Dataset") +
scale_shape_manual(values=c(21,22,23)) +
scale_fill_manual(values=c("blue","red")) +
facet_grid(Age~. , scales="free", space = "free")+
theme(legend.position="none") +
coord_flip()+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank());p4
pdf("output/g1_DatasetSp_Age.pdf")
plot(p4)
dev.off()
### plot g1 by Dataset, Species by Watered/ not watered ### NOT DONE YET
Watered <- g1DF$Dataset %in% c("POPFACE","Richmond_WTC1", "Richmond_WTC2")
NotWatered <- g1DF$Dataset %in% c("BIFOR", "Rhinelander", "R_Glencorse", "Gribskov", "ORNL", "L_SCC","EucFACE", "Flakaliden", "Flakaliden_2","B_Glencorse", "Duke FACE", "B_SCC")
g1DF$Water[Watered] <- "Watered"
g1DF$Water[NotWatered] <- "NotWatered"
g1DF$Water<- as.factor(g1DF$Water)
dodge <- position_dodge2(width = 0.5)
p5 <- g1DF %>%
mutate(name = fct_reorder(Dataset, g1)) %>%
ggplot(aes(x=interaction(Species,name), g1,
group=interaction(Species,Treatment),
shape= PFT, fill=Treatment)) +
theme_bw()+
geom_errorbar(aes(ymin = lowCI, ymax = highCI), position=dodge, width=0.5, size=0.2)+
geom_point(size=2, position=dodge) +
scale_y_continuous(name="g1",expand = c(0, 0),limits=c(0,10), breaks=seq(0,10,2)) +
scale_x_discrete(name="Dataset") +
scale_shape_manual(values=c(21,22,23)) +
scale_fill_manual(values=c("blue","red")) +
facet_grid(Water~. , scales="free", space = "free")+
theme(legend.position="none") +
coord_flip()+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank());p5
pdf("output/g1_DatasetSp_Water.pdf")
plot(p5)
dev.off()
}
# r2 values for each graph
#rsq <- function (x, y) cor(x, y) ^ 2
my.formula <- y ~ x
p1 <- ggplot(inDF, aes(Photo/sqrt(VPD)/CO2S,Cond, group=Dataset)) +
geom_point(aes(fill=Dataset, group=Dataset), pch=21)+
geom_smooth(aes(color=Dataset), formula = my.formula, se=F)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
#xlab("VPD (kPa)")+
ylab("Stomatal Conductance");p1
pdf("output/fit_g1_plot_dataset_R2.pdf")
plot(p1)
dev.off()
##------------------------------------ ## Start here
## Forest plot for g1 values
g1_forest <- function(g1DF) {
### now separate by CO2 treatment
g1DF1 <- g1DF[g1DF$Treatment == "Ambient CO2",]
g1DF2 <- g1DF[g1DF$Treatment == "Elevated CO2",]
### merge the two
g1DF <- merge(g1DF1, g1DF2, by=c("Dataset", "Species","PFT","Age"))
### re-label all columns
colnames(g1DF) <- c("Dataset","Species","PFT","Age",
"g1_aCO2","lowCI_aCO2", "highCI_aCO2", "Treatment_aCO2", "Water_aCO2",
"g1_eCO2","lowCI_eCO2", "highCI_eCO2", "Treatment_eCO2","Water_eCO2")
### obtain sample size for each CO2 treatment of each dataset
inDF$count_variable <- 1.0
tmpDF <- summaryBy(count_variable~Dataset+Species+Treatment, FUN=sum,
data=inDF, keep.names=T, na.rm=T)
outDF <- merge(g1DF, tmpDF, by.x=c("Dataset", "Species", "Treatment_aCO2"),
by.y=c("Dataset", "Species", "Treatment"))
names(outDF)[names(outDF)=="count_variable"] <- "g1_aCO2_n"
outDF <- merge(outDF, tmpDF, by.x=c("Dataset", "Species", "Treatment_eCO2"),
by.y=c("Dataset", "Species", "Treatment"))
names(outDF)[names(outDF)=="count_variable"] <- "g1_eCO2_n"
g1DF <- outDF
### calculate response ratios
g1DF$g1_resp <- with(g1DF, g1_eCO2/g1_aCO2)
### convert from CI to standard deviation
g1DF$g1_aCO2_sd <- sqrt(g1DF$g1_aCO2_n) * (g1DF$highCI_aCO2 - g1DF$lowCI_aCO2) / 3.92
g1DF$g1_eCO2_sd <- sqrt(g1DF$g1_eCO2_n) * (g1DF$highCI_eCO2 - g1DF$lowCI_eCO2) / 3.92
### calculate variance
g1DF$g1_var <- with(g1DF, g1_resp*sqrt(((g1_aCO2_sd/g1_aCO2)^2 + (g1_eCO2_sd/g1_eCO2)^2)/2))
#### Make simplified forest plot
g1DF <- g1DF [complete.cases(g1DF$g1_resp),]
g1DF <- g1DF [order(g1DF$PFT, g1DF$Species, g1DF$Dataset),]
l1 <- length(g1DF$Dataset)
ns1 <- length(unique(g1DF$Dataset))
#--------------------------------
# Simple plot - one data entry per species per dataset, no VPD effect.
pdf(paste0(getwd(), "/output/forest_g1.pdf"),width=10, height=10)
## WUE --------
res_g1 <- rma.mv(g1_resp, g1_var, random = ~1|Dataset, data = g1DF)
forest(res_g1, slab=paste(g1DF$Dataset, g1DF$Species, sep=", "),
xlim=c(-10, 10),
ylim=c(0, 26),
rows=c(22:11,9:7,5:1),
at=c(-1,-0.5,0,0.5,1,2,3),
refline=1,
mlab="", psize=1,
cex=0.6,
order=order(g1DF$PFT,g1DF$Dataset,g1DF$Species),
header="g1 response to eCO2")
text(2, 25, "Relative Response [95% CI]", pos = 2, cex = 0.7)
text(-3.5, c(23,10,6), c("Decidious Broadleaf Forest", "Evergreen Broadleaf Forest", "Evergreen Needle Forest"), pos=2, font=4, cex=0.7)
dev.off()
print(res_g1)
}
### LOOKING AT L_SCC data
LCC<- filter(inDF, Dataset == "L_SCC")
p1 <- ggplot(LCC, aes(Photo/sqrt(VPD)/CO2S,Cond, group=Species)) +
geom_point(aes(fill=Species, group=Species, shape=Treatment))+
geom_smooth(aes(color=Species), formula = my.formula, se=F)+
theme_linedraw() +
scale_shape_manual(values=c(21,23)) +
stat_poly_eq(formula = my.formula,
aes(color=Species,label = paste(..rr.label..)),
parse = TRUE, size= 4, vstep = 0.02) +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
#xlab("VPD (kPa)")+
ylab("Stomatal Conductance");p1
pdf("output/fit_g1_Just L_CC_R2.pdf")
plot(p1)
dev.off()
LCC_A<- filter(inDF, Dataset == "L_SCC" & Species =="Acer campestre")
LCC_F<- filter(inDF, Dataset == "L_SCC" & Species =="Fagus sylvatica")
LCC_P<- filter(inDF, Dataset == "L_SCC" & Species =="Prunus avium")
LCC_Q<- filter(inDF, Dataset == "L_SCC" & Species =="Quercus petraea")
LCC_T<- filter(inDF, Dataset == "L_SCC" & Species =="Tilia platyphyllos")
p <- ggplot(LCC_A, aes(Photo/sqrt(VPD)/CO2S,Cond, group=Treatment)) +
geom_point(aes(fill=Treatment, shape=Treatment))+
theme_linedraw() +
scale_shape_manual(values=c(21,23)) +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
ylab("Stomatal Conductance");p
LCC$fits <- paste0(LCC$Species, "-", LCC$Treatment)
list <- split(LCC, LCC$fits)
### fit g1 values
fit <- lapply(list,fitBB,gsmodel="BBOpti",
varnames=list(VPD="VPD",ALEAF="Photo",GS="Cond",Ca="CO2S"))
lapply(fit,coef)
g1pars <- sapply(fit,function(x)x$coef[[2]])
g1cilows <- lapply(fit,function(x)confint(x$fit)[1])
g1cihighs <- lapply(fit,function(x)confint(x$fit)[2])
ret <- data.frame(stack(g1pars),stack(g1cilows),stack(g1cihighs))
g1pars <- ret[,c(2,1,3,5)]
names(g1pars) <- c("fitgroup","g1","lowCI","highCI")
|
/fit_g1_values_and_plot_new.R
|
no_license
|
mingkaijiang/FACE_WUE
|
R
| false | false | 15,442 |
r
|
fit_g1_values_and_plot <- function(inDF) {
inDF <- myDF
inDF <- inDF[inDF$Dataset!="DeAngelis_Macchia",]
inDF<- inDF[!(inDF$Species =="Acer campestre" & inDF$Dataset == "L_SCC"),]
inDF<- inDF[!(inDF$Species =="Prunus avium" & inDF$Dataset == "L_SCC"),]
### split by dataset
inDF$fits <- paste0(inDF$Dataset, "-", inDF$Species, "-", inDF$Treatment)
list <- split(inDF, inDF$fits)
# Not sure how to apply the code to the data.
getr2 <- function(x){
lmfit <- lm(x$data$Cond ~ fitted(x$fit))
summary(lmfit)$r.squared
}
### fit g1 values
fit <- lapply(list,fitBB,gsmodel="BBOpti",
varnames=list(VPD="VPD",ALEAF="Photo",GS="Cond",Ca="CO2S"))
lapply(fit,coef)
g1pars <- sapply(fit,function(x)x$coef[[2]])
g1cilows <- lapply(fit,function(x)confint(x$fit)[1])
g1cihighs <- lapply(fit,function(x)confint(x$fit)[2])
ret <- data.frame(stack(g1pars),stack(g1cilows),stack(g1cihighs))
g1pars <- ret[,c(2,1,3,5)]
names(g1pars) <- c("fitgroup","g1","lowCI","highCI")
out <- strsplit(as.character(g1pars$fitgroup),'-')
out2<- do.call(rbind, out)
out3<- data.frame(g1pars$g1, do.call(rbind, out))
out3 <- renameCol(out3, c("g1pars.g1","X1","X2","X3"),
c("g1","Dataset","Species","Treatment"))
g1DF<- merge(g1pars, out3, by="g1")
g1DF<- subset(g1DF, select = -c(fitgroup))
test <- sapply(fit,getr2)
### make plot
p1 <- ggplot(inDF) +
geom_point(aes(Photo/sqrt(VPD)/CO2S,Cond, fill=Dataset, group=Dataset), pch=21)+
geom_smooth(aes(Photo/sqrt(VPD)/CO2S,Cond, color=Dataset, group=Dataset), se=F)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
#xlab("VPD (kPa)")+
ylab("Conductance");p1
pdf("output/fit_g1_plot_dataset.pdf")
plot(p1)
dev.off()
### make plot
p1 <- ggplot(inDF) +
geom_point(aes(Photo/sqrt(VPD)/CO2S,Cond, fill=Treatment, group=Treatment), pch=21)+
geom_smooth(aes(Photo/sqrt(VPD)/CO2S,Cond, color=Treatment, group=Treatment), se=F)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
#xlab("VPD (kPa)")+
ylab("Conductance");p1
pdf("output/fit_g1_plot_all.pdf")
plot(p1)
dev.off()
#rsq <- function (x, y) cor(x, y) ^ 2
## Supplemental figure to show the data distribution with CO2 treatment.
p2<- ggplot(inDF, aes(x=Photo/sqrt(VPD)/CO2S, y = Cond,
shape = Treatment, fill = Treatment))+
theme_bw()+
geom_point(colour="black")+
facet_wrap(~ Dataset, nrow = 5)+
scale_shape_manual(values=c(21,24)) +
scale_fill_manual(values=c("blue","red"))+
scale_y_continuous(name="Stomatal Conductance",expand = c(0, 0),limits=c(0,1.5), breaks=seq(0,1.5,0.5)) +
scale_x_continuous(expand = c(0, 0),limits=c(0,0.1), breaks=seq(0,0.1,0.05)) +
theme(legend.box = 'horizontal', legend.justification=c(1,1),
legend.position=c(1,1), legend.title = element_blank(),
legend.text = element_text(size = 11), legend.key = element_blank(),
legend.background = element_blank(),legend.spacing.x = unit(0.25, "cm"),
legend.key.height = unit(0.55, "cm"),legend.key.width = unit(0.2, "cm"));p2
pdf("output/g1_scatterplots_by_dataset_R2.pdf", width=10, height=10)
plot(p2)
dev.off()
# Add in PFT for facet
ENF <- g1DF$Dataset %in% c("Flakaliden", "Flakaliden_2","B_Glencorse", "Duke FACE", "B_SCC")
EBF <- g1DF$Dataset %in% c("EucFACE", "Richmond_WTC1", "Richmond_WTC2")
DBF <- g1DF$Dataset %in% c("BIFOR", "Rhinelander", "R_Glencorse", "Gribskov", "ORNL", "POPFACE","L_SCC")
g1DF$PFT[ENF] <- "Evergreen Gymnosperm"
g1DF$PFT[EBF] <- "Evergreen Angiosperm"
g1DF$PFT[DBF] <- "Deciduous Angiosperm"
g1DF$PFT<- as.factor(g1DF$PFT)
g1DF$Dataset<-as.factor(g1DF$Dataset)
g1DF$Species<-as.factor(g1DF$Species)
g1DF$Treatment<-as.factor(g1DF$Treatment)
# Assign an age
mature <- g1DF$Dataset %in% c("EucFACE", "BIFOR","L_SCC", "B_SCC")
young <- g1DF$Dataset %in% c("Duke FACE", "ORNL", "B_Glencorse","Flakaliden", "Flakaliden_2","Gribskov","R_Glencorse")
sapling <- g1DF$Dataset %in% c("Richmond_WTC1", "Richmond_WTC2", "Rhinelander", "POPFACE")
g1DF$Age[mature] <- "Mature"
g1DF$Age[young] <- "Young"
g1DF$Age[sapling] <- "Sapling"
g1DF$Age = factor(g1DF$Age, levels=c('Mature','Young','Sapling'))
### plot g1_Dataset, Species by PFT
dodge <- position_dodge2(width = 0.5)
p3 <- g1DF %>%
mutate(name = fct_reorder(Dataset,g1)) %>%
ggplot(aes(x=interaction(Species,name), g1,
group=interaction(Species,Treatment),
shape= PFT, fill=Treatment)) +
theme_bw()+
geom_errorbar(aes(ymin = lowCI, ymax = highCI), position=dodge, width=0.5, size=0.2)+
geom_point(size=2, position=dodge) +
scale_y_continuous(name="g1",expand = c(0, 0),limits=c(0,10), breaks=seq(0,10,2)) +
scale_x_discrete(name="Dataset") +
scale_shape_manual(values=c(21,22,23)) +
scale_fill_manual(values=c("blue","red")) +
facet_grid(PFT~. , scales="free", space = "free")+
theme(legend.position="none") +
coord_flip()+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank());p3
pdf("output/g1_DatasetSp_PFT_TreeNum.pdf")
plot(p3)
dev.off()
### plot g1 by Dataset, Species by Age
dodge <- position_dodge2(width = 0.5)
p4 <- g1DF %>%
mutate(name = fct_reorder(Dataset, g1)) %>%
ggplot(aes(x=interaction(Species,name), g1,
group=interaction(Species,Treatment),
shape= PFT, fill=Treatment)) +
theme_bw()+
geom_errorbar(aes(ymin = lowCI, ymax = highCI), position=dodge, width=0.5, size=0.2)+
geom_point(size=2, position=dodge) +
scale_y_continuous(name="g1",expand = c(0, 0),limits=c(0,10), breaks=seq(0,10,2)) +
scale_x_discrete(name="Dataset") +
scale_shape_manual(values=c(21,22,23)) +
scale_fill_manual(values=c("blue","red")) +
facet_grid(Age~. , scales="free", space = "free")+
theme(legend.position="none") +
coord_flip()+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank());p4
pdf("output/g1_DatasetSp_Age.pdf")
plot(p4)
dev.off()
### plot g1 by Dataset, Species by Watered/ not watered ### NOT DONE YET
Watered <- g1DF$Dataset %in% c("POPFACE","Richmond_WTC1", "Richmond_WTC2")
NotWatered <- g1DF$Dataset %in% c("BIFOR", "Rhinelander", "R_Glencorse", "Gribskov", "ORNL", "L_SCC","EucFACE", "Flakaliden", "Flakaliden_2","B_Glencorse", "Duke FACE", "B_SCC")
g1DF$Water[Watered] <- "Watered"
g1DF$Water[NotWatered] <- "NotWatered"
g1DF$Water<- as.factor(g1DF$Water)
dodge <- position_dodge2(width = 0.5)
p5 <- g1DF %>%
mutate(name = fct_reorder(Dataset, g1)) %>%
ggplot(aes(x=interaction(Species,name), g1,
group=interaction(Species,Treatment),
shape= PFT, fill=Treatment)) +
theme_bw()+
geom_errorbar(aes(ymin = lowCI, ymax = highCI), position=dodge, width=0.5, size=0.2)+
geom_point(size=2, position=dodge) +
scale_y_continuous(name="g1",expand = c(0, 0),limits=c(0,10), breaks=seq(0,10,2)) +
scale_x_discrete(name="Dataset") +
scale_shape_manual(values=c(21,22,23)) +
scale_fill_manual(values=c("blue","red")) +
facet_grid(Water~. , scales="free", space = "free")+
theme(legend.position="none") +
coord_flip()+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank());p5
pdf("output/g1_DatasetSp_Water.pdf")
plot(p5)
dev.off()
}
# r2 values for each graph
#rsq <- function (x, y) cor(x, y) ^ 2
my.formula <- y ~ x
p1 <- ggplot(inDF, aes(Photo/sqrt(VPD)/CO2S,Cond, group=Dataset)) +
geom_point(aes(fill=Dataset, group=Dataset), pch=21)+
geom_smooth(aes(color=Dataset), formula = my.formula, se=F)+
theme_linedraw() +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
#xlab("VPD (kPa)")+
ylab("Stomatal Conductance");p1
pdf("output/fit_g1_plot_dataset_R2.pdf")
plot(p1)
dev.off()
##------------------------------------ ## Start here
## Forest plot for g1 values
g1_forest <- function(g1DF) {
### now separate by CO2 treatment
g1DF1 <- g1DF[g1DF$Treatment == "Ambient CO2",]
g1DF2 <- g1DF[g1DF$Treatment == "Elevated CO2",]
### merge the two
g1DF <- merge(g1DF1, g1DF2, by=c("Dataset", "Species","PFT","Age"))
### re-label all columns
colnames(g1DF) <- c("Dataset","Species","PFT","Age",
"g1_aCO2","lowCI_aCO2", "highCI_aCO2", "Treatment_aCO2", "Water_aCO2",
"g1_eCO2","lowCI_eCO2", "highCI_eCO2", "Treatment_eCO2","Water_eCO2")
### obtain sample size for each CO2 treatment of each dataset
inDF$count_variable <- 1.0
tmpDF <- summaryBy(count_variable~Dataset+Species+Treatment, FUN=sum,
data=inDF, keep.names=T, na.rm=T)
outDF <- merge(g1DF, tmpDF, by.x=c("Dataset", "Species", "Treatment_aCO2"),
by.y=c("Dataset", "Species", "Treatment"))
names(outDF)[names(outDF)=="count_variable"] <- "g1_aCO2_n"
outDF <- merge(outDF, tmpDF, by.x=c("Dataset", "Species", "Treatment_eCO2"),
by.y=c("Dataset", "Species", "Treatment"))
names(outDF)[names(outDF)=="count_variable"] <- "g1_eCO2_n"
g1DF <- outDF
### calculate response ratios
g1DF$g1_resp <- with(g1DF, g1_eCO2/g1_aCO2)
### convert from CI to standard deviation
g1DF$g1_aCO2_sd <- sqrt(g1DF$g1_aCO2_n) * (g1DF$highCI_aCO2 - g1DF$lowCI_aCO2) / 3.92
g1DF$g1_eCO2_sd <- sqrt(g1DF$g1_eCO2_n) * (g1DF$highCI_eCO2 - g1DF$lowCI_eCO2) / 3.92
### calculate variance
g1DF$g1_var <- with(g1DF, g1_resp*sqrt(((g1_aCO2_sd/g1_aCO2)^2 + (g1_eCO2_sd/g1_eCO2)^2)/2))
#### Make simplified forest plot
g1DF <- g1DF [complete.cases(g1DF$g1_resp),]
g1DF <- g1DF [order(g1DF$PFT, g1DF$Species, g1DF$Dataset),]
l1 <- length(g1DF$Dataset)
ns1 <- length(unique(g1DF$Dataset))
#--------------------------------
# Simple plot - one data entry per species per dataset, no VPD effect.
pdf(paste0(getwd(), "/output/forest_g1.pdf"),width=10, height=10)
## WUE --------
res_g1 <- rma.mv(g1_resp, g1_var, random = ~1|Dataset, data = g1DF)
forest(res_g1, slab=paste(g1DF$Dataset, g1DF$Species, sep=", "),
xlim=c(-10, 10),
ylim=c(0, 26),
rows=c(22:11,9:7,5:1),
at=c(-1,-0.5,0,0.5,1,2,3),
refline=1,
mlab="", psize=1,
cex=0.6,
order=order(g1DF$PFT,g1DF$Dataset,g1DF$Species),
header="g1 response to eCO2")
text(2, 25, "Relative Response [95% CI]", pos = 2, cex = 0.7)
text(-3.5, c(23,10,6), c("Decidious Broadleaf Forest", "Evergreen Broadleaf Forest", "Evergreen Needle Forest"), pos=2, font=4, cex=0.7)
dev.off()
print(res_g1)
}
### LOOKING AT L_SCC data
LCC<- filter(inDF, Dataset == "L_SCC")
p1 <- ggplot(LCC, aes(Photo/sqrt(VPD)/CO2S,Cond, group=Species)) +
geom_point(aes(fill=Species, group=Species, shape=Treatment))+
geom_smooth(aes(color=Species), formula = my.formula, se=F)+
theme_linedraw() +
scale_shape_manual(values=c(21,23)) +
stat_poly_eq(formula = my.formula,
aes(color=Species,label = paste(..rr.label..)),
parse = TRUE, size= 4, vstep = 0.02) +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
#xlab("VPD (kPa)")+
ylab("Stomatal Conductance");p1
pdf("output/fit_g1_Just L_CC_R2.pdf")
plot(p1)
dev.off()
LCC_A<- filter(inDF, Dataset == "L_SCC" & Species =="Acer campestre")
LCC_F<- filter(inDF, Dataset == "L_SCC" & Species =="Fagus sylvatica")
LCC_P<- filter(inDF, Dataset == "L_SCC" & Species =="Prunus avium")
LCC_Q<- filter(inDF, Dataset == "L_SCC" & Species =="Quercus petraea")
LCC_T<- filter(inDF, Dataset == "L_SCC" & Species =="Tilia platyphyllos")
p <- ggplot(LCC_A, aes(Photo/sqrt(VPD)/CO2S,Cond, group=Treatment)) +
geom_point(aes(fill=Treatment, shape=Treatment))+
theme_linedraw() +
scale_shape_manual(values=c(21,23)) +
theme(panel.grid.minor=element_blank(),
axis.title.x = element_text(size=12),
axis.text.x = element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.y=element_text(size=12),
legend.text=element_text(size=10),
legend.title=element_text(size=12),
panel.grid.major=element_blank(),
legend.position="right",
legend.text.align=0)+
ylab("Stomatal Conductance");p
LCC$fits <- paste0(LCC$Species, "-", LCC$Treatment)
list <- split(LCC, LCC$fits)
### fit g1 values
fit <- lapply(list,fitBB,gsmodel="BBOpti",
varnames=list(VPD="VPD",ALEAF="Photo",GS="Cond",Ca="CO2S"))
lapply(fit,coef)
g1pars <- sapply(fit,function(x)x$coef[[2]])
g1cilows <- lapply(fit,function(x)confint(x$fit)[1])
g1cihighs <- lapply(fit,function(x)confint(x$fit)[2])
ret <- data.frame(stack(g1pars),stack(g1cilows),stack(g1cihighs))
g1pars <- ret[,c(2,1,3,5)]
names(g1pars) <- c("fitgroup","g1","lowCI","highCI")
|
### -----------------------------------------------------------------
### normalisation of the mass spectrum
### Exported!
normaliseSpectrum <- function(x, method=c("sum", "max", "unit")){
if(any(x < 0)){
stop("The spectrum intensity values must be non-negative.")
}
method <- match.arg(method)
if(method == "sum"){
x <- x / sum(x)
}else if(method == "max"){
x <- x / max(x)
}else{
x <- x / sqrt(sum(x^2))
}
return(x)
}
### -----------------------------------------------------------------
### medthos of comparison of two spectra
### Exported!
### Euclidean geometric distance matching factor
geometricMF <- function(x, y){
if(length(x) != length(y)){
stop("The length of two spectra must be same!")
}
x <- normaliseSpectrum(x, method="unit")
y <- normaliseSpectrum(y, method="unit")
ans <- 1 + sum((x-y)^2)
ans <- 1 / ans
return(ans)
}
|
/R/spectrum-utils.R
|
no_license
|
Yang0014/MassSpectrometry
|
R
| false | false | 889 |
r
|
### -----------------------------------------------------------------
### normalisation of the mass spectrum
### Exported!
normaliseSpectrum <- function(x, method=c("sum", "max", "unit")){
if(any(x < 0)){
stop("The spectrum intensity values must be non-negative.")
}
method <- match.arg(method)
if(method == "sum"){
x <- x / sum(x)
}else if(method == "max"){
x <- x / max(x)
}else{
x <- x / sqrt(sum(x^2))
}
return(x)
}
### -----------------------------------------------------------------
### medthos of comparison of two spectra
### Exported!
### Euclidean geometric distance matching factor
geometricMF <- function(x, y){
if(length(x) != length(y)){
stop("The length of two spectra must be same!")
}
x <- normaliseSpectrum(x, method="unit")
y <- normaliseSpectrum(y, method="unit")
ans <- 1 + sum((x-y)^2)
ans <- 1 / ans
return(ans)
}
|
##Read the data
dat <- read.table("household_power_consumption.txt",header = TRUE, sep = ";", nrows = 1000000, na.strings = "?", stringsAsFactors = FALSE)
##Convert Date
dat$Date <- as.Date(dat$Date, format = "%d/%m/%Y")
##Load dplyr library
library(dplyr)
##Filter data by date
datsub <- filter(dat, dat$Date >= "2007-02-01" & dat$Date < "2007-02-03")
##Remove old data
rm(dat)
##Convert Date and Time
datsub$DateTime <- as.POSIXct(paste(as.Date(datsub$Date), datsub$Time))
##Construct histogram as PNG file
png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "white")
hist(datsub$Global_active_power, xlab = "Global Active Power (kilowatts)", col = "red", main = "Global Active Power")
dev.off()
|
/plot1.R
|
no_license
|
tangoh8088/ExData_Plotting1
|
R
| false | false | 727 |
r
|
##Read the data
dat <- read.table("household_power_consumption.txt",header = TRUE, sep = ";", nrows = 1000000, na.strings = "?", stringsAsFactors = FALSE)
##Convert Date
dat$Date <- as.Date(dat$Date, format = "%d/%m/%Y")
##Load dplyr library
library(dplyr)
##Filter data by date
datsub <- filter(dat, dat$Date >= "2007-02-01" & dat$Date < "2007-02-03")
##Remove old data
rm(dat)
##Convert Date and Time
datsub$DateTime <- as.POSIXct(paste(as.Date(datsub$Date), datsub$Time))
##Construct histogram as PNG file
png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "white")
hist(datsub$Global_active_power, xlab = "Global Active Power (kilowatts)", col = "red", main = "Global Active Power")
dev.off()
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/central_nervous_system/central_nervous_system_021.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/central_nervous_system/central_nervous_system_021.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 399 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/central_nervous_system/central_nervous_system_021.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(shinystan)
setwd('/Users/AM/Documents/_CU Masters/2020 fall Bayesian_7393/Final_Project/data')
SP_500_1 = fread("SANDP-500_201006_201204.csv")
temp = readRDS("RealGARCH11 2020-11-28 for 2017-05-22 2017-11-22 .rda")
sso <- launch_shinystan(temp)
log_lik = extract_log_lik(temp, merge_chains = FALSE)
r_eff = exp(relative_eff(log_lik))
waic = waic(log_lik)
waic$waic
looic = loo(log_lik, r_eff = r_eff)
looic$looic
i=1
start_date = "2017-05-22"
step_size = 2
subset_duration = 6
t_0 = as.Date(start_date) + months((i-1) * step_size)
t_1 = as.Date(t_0) + months(subset_duration + 1) #interim end_date, for subset_long
subset_long = build_vix9_rv_subset(t_0, t_1)
t_1 = as.Date(t_0) + months(subset_duration) #stored end_date, for subset
subset = build_vix9_rv_subset(t_0, t_1)
y = subset$vix_lin_ret
temp_name = gsub(" ", "", paste("r_out[", as.character(i),"]", ""))
r_out=extract(temp, pars=temp_name)[[1]]
|
/model_shiny_test.R
|
no_license
|
Andrey776/Bayesian-FP-R
|
R
| false | false | 918 |
r
|
library(shinystan)
setwd('/Users/AM/Documents/_CU Masters/2020 fall Bayesian_7393/Final_Project/data')
SP_500_1 = fread("SANDP-500_201006_201204.csv")
temp = readRDS("RealGARCH11 2020-11-28 for 2017-05-22 2017-11-22 .rda")
sso <- launch_shinystan(temp)
log_lik = extract_log_lik(temp, merge_chains = FALSE)
r_eff = exp(relative_eff(log_lik))
waic = waic(log_lik)
waic$waic
looic = loo(log_lik, r_eff = r_eff)
looic$looic
i=1
start_date = "2017-05-22"
step_size = 2
subset_duration = 6
t_0 = as.Date(start_date) + months((i-1) * step_size)
t_1 = as.Date(t_0) + months(subset_duration + 1) #interim end_date, for subset_long
subset_long = build_vix9_rv_subset(t_0, t_1)
t_1 = as.Date(t_0) + months(subset_duration) #stored end_date, for subset
subset = build_vix9_rv_subset(t_0, t_1)
y = subset$vix_lin_ret
temp_name = gsub(" ", "", paste("r_out[", as.character(i),"]", ""))
r_out=extract(temp, pars=temp_name)[[1]]
|
library(DBI)
library(yaml)
flog.info(Sys.time())
big_data_flag<-TRUE
config = yaml.load_file(g_config_path)
#establish connection to database
con <- establish_database_connection_OHDSI(config)
table_name<-"observation"
#df_table <- retrieve_dataframe_OHDSI(con,config,table_name)
# flog.info(nrow(df_table))
#writing to the final DQA Report
fileConn<-file(paste(normalize_directory_path(config$reporting$site_directory),"./reports/",table_name,"_Report_Automatic.md",sep=""))
fileContent <-get_report_header(table_name,config)
test <-1
#PRIMARY FIELD
field_name<-"observation_id"
fileContent<-c(fileContent,paste("The total number of",field_name,"is: ", retrieve_dataframe_count(con, config,table_name,field_name),"\n"))
#NOMINAL Fields
field_name<-"person_id" #
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeForeignKeyIdentifiers(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name),paste_image_name_sorted(table_name,field_name),message);
# flog.info(fileContent)
field_name<-"associated_provider_id" #
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeForeignKeyIdentifiers(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name),paste_image_name_sorted(table_name,field_name),message);
# flog.info(fileContent)
field_name<-"visit_occurrence_id" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeForeignKeyIdentifiers(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name),paste_image_name_sorted(table_name,field_name));
# flog.info(fileContent)
flog.info(Sys.time())
# ORDINAL Fields
flog.info(Sys.time())
field_name<-"observation_date" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeDateField(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name));
message<-describeTimeField(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,paste(field_name,"_time",sep="")));
#print (fileContent)
#print (fileContent)
#print (fileContent)
# not plotting the value_as_string column as it's a free text field
field_name<-"unit_concept_id" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
#print (fileContent)
field_name<-"units_source_value" # 3 minutes
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
#print (fileContent)
field_name<-"observation_type_concept_id" # 3 minutes
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
#print (fileContent)
field_name<-"relevant_condition_concept_id" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeOrdinalField_large(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name));
#print (fileContent)
#ordinal field
field_name="observation_source_value"
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
describeNominalField_basic(df_table, table_name, field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
# this is a nominal field - work on it
field_name<-"observation_concept_id" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name, big_data_flag))
describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
# flog.info(fileContent)
# get a list of all observation_concept_ids
concept_id_list <- unique(df_table[,1])
#generating concept wise graphs for numerical readings
field_name<-"value_as_number"
#column_index <- which(colnames(df_table)==field_name)
for (i in 1:length(concept_id_list))
{
df_table_subset<-retrieve_dataframe_group_clause(con,config,table_name,field_name,paste(" observation_concept_id=",concept_id_list[i]))
field_name_subset<-paste(field_name,concept_id_list[i],sep="_")
colnames(df_table_subset)[1] <- field_name_subset
fileContent <-c(fileContent,paste("## Barplot for",field_name_subset,"(",get_concept_name(concept_id_list[i],con, g_config),")","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table_subset,table_name,field_name_subset,big_data_flag))
message<-describeRatioField(df_table_subset, table_name,field_name_subset,"",big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name_subset));
#print (fileContent)
}
flog.info(Sys.time())
field_name<-"range_high" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name));
#print (fileContent)
field_name<-"range_low" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name));
#write all contents to the report file and close it.
writeLines(fileContent, fileConn)
close(fileConn)
#close the connection
close_database_connection_OHDSI(con,config)
|
/Main/Level1/v1/scripts/GenerateObservationReport_QueryWise.R
|
permissive
|
rtmill/Data-Quality-Analysis
|
R
| false | false | 7,811 |
r
|
library(DBI)
library(yaml)
flog.info(Sys.time())
big_data_flag<-TRUE
config = yaml.load_file(g_config_path)
#establish connection to database
con <- establish_database_connection_OHDSI(config)
table_name<-"observation"
#df_table <- retrieve_dataframe_OHDSI(con,config,table_name)
# flog.info(nrow(df_table))
#writing to the final DQA Report
fileConn<-file(paste(normalize_directory_path(config$reporting$site_directory),"./reports/",table_name,"_Report_Automatic.md",sep=""))
fileContent <-get_report_header(table_name,config)
test <-1
#PRIMARY FIELD
field_name<-"observation_id"
fileContent<-c(fileContent,paste("The total number of",field_name,"is: ", retrieve_dataframe_count(con, config,table_name,field_name),"\n"))
#NOMINAL Fields
field_name<-"person_id" #
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeForeignKeyIdentifiers(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name),paste_image_name_sorted(table_name,field_name),message);
# flog.info(fileContent)
field_name<-"associated_provider_id" #
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeForeignKeyIdentifiers(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name),paste_image_name_sorted(table_name,field_name),message);
# flog.info(fileContent)
field_name<-"visit_occurrence_id" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeForeignKeyIdentifiers(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name),paste_image_name_sorted(table_name,field_name));
# flog.info(fileContent)
flog.info(Sys.time())
# ORDINAL Fields
flog.info(Sys.time())
field_name<-"observation_date" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeDateField(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name));
message<-describeTimeField(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,paste(field_name,"_time",sep="")));
#print (fileContent)
#print (fileContent)
#print (fileContent)
# not plotting the value_as_string column as it's a free text field
field_name<-"unit_concept_id" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
#print (fileContent)
field_name<-"units_source_value" # 3 minutes
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
#print (fileContent)
field_name<-"observation_type_concept_id" # 3 minutes
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
#print (fileContent)
field_name<-"relevant_condition_concept_id" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeOrdinalField_large(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name));
#print (fileContent)
#ordinal field
field_name="observation_source_value"
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
describeNominalField_basic(df_table, table_name, field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
# this is a nominal field - work on it
field_name<-"observation_concept_id" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name, big_data_flag))
describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,paste_image_name(table_name,field_name));
# flog.info(fileContent)
# get a list of all observation_concept_ids
concept_id_list <- unique(df_table[,1])
#generating concept wise graphs for numerical readings
field_name<-"value_as_number"
#column_index <- which(colnames(df_table)==field_name)
for (i in 1:length(concept_id_list))
{
df_table_subset<-retrieve_dataframe_group_clause(con,config,table_name,field_name,paste(" observation_concept_id=",concept_id_list[i]))
field_name_subset<-paste(field_name,concept_id_list[i],sep="_")
colnames(df_table_subset)[1] <- field_name_subset
fileContent <-c(fileContent,paste("## Barplot for",field_name_subset,"(",get_concept_name(concept_id_list[i],con, g_config),")","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table_subset,table_name,field_name_subset,big_data_flag))
message<-describeRatioField(df_table_subset, table_name,field_name_subset,"",big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name_subset));
#print (fileContent)
}
flog.info(Sys.time())
field_name<-"range_high" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name));
#print (fileContent)
field_name<-"range_low" #
df_table<-retrieve_dataframe_group(con,config,table_name,field_name)
fileContent <-c(fileContent,paste("## Barplot for",field_name,"","\n"))
fileContent<-c(fileContent,reportMissingCount(df_table,table_name,field_name,big_data_flag))
message<-describeNominalField_basic(df_table, table_name,field_name,big_data_flag)
fileContent<-c(fileContent,message,paste_image_name(table_name,field_name));
#write all contents to the report file and close it.
writeLines(fileContent, fileConn)
close(fileConn)
#close the connection
close_database_connection_OHDSI(con,config)
|
#install.packages("sqldf")
library(sqldf)
##Store the data in a sql table(on disk)
##Create/connect to a database named "data_db.sqlite
con <- dbConnect(RSQLite::SQLite(), dbname = "data_db.sqlite")
#Write txt file into the database
dbWriteTable(con, name = "data_table", value = "household_power_consumption.txt",
row.names = FALSE, header = TRUE, sep = ";")
#select required dataset
df<- dbGetQuery(con, "SELECT * FROM data_table WHERE Date in ('1/2/2007', '2/2/2007')")
#do some basic exploration on Global_active_power
head(df$Global_active_power)
summary(df$Global_active_power)
#determine the values for the hist
GlobalActivePowerKW <- as.numeric(as.character(df$Global_active_power))
png("plot1.png")
#plot
hist(GlobalActivePowerKW, col = "red", main = "Global Active Power",
xlab = "Global Active Power(kilowatts)",
cex.axis=0.70)
dev.off()
|
/plot1.R
|
no_license
|
rashed2014/ExData_Plotting1
|
R
| false | false | 887 |
r
|
#install.packages("sqldf")
library(sqldf)
##Store the data in a sql table(on disk)
##Create/connect to a database named "data_db.sqlite
con <- dbConnect(RSQLite::SQLite(), dbname = "data_db.sqlite")
#Write txt file into the database
dbWriteTable(con, name = "data_table", value = "household_power_consumption.txt",
row.names = FALSE, header = TRUE, sep = ";")
#select required dataset
df<- dbGetQuery(con, "SELECT * FROM data_table WHERE Date in ('1/2/2007', '2/2/2007')")
#do some basic exploration on Global_active_power
head(df$Global_active_power)
summary(df$Global_active_power)
#determine the values for the hist
GlobalActivePowerKW <- as.numeric(as.character(df$Global_active_power))
png("plot1.png")
#plot
hist(GlobalActivePowerKW, col = "red", main = "Global Active Power",
xlab = "Global Active Power(kilowatts)",
cex.axis=0.70)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation.R
\name{get_ids}
\alias{get_ids}
\title{Get unique ids of the dataset}
\usage{
get_ids(d)
}
\arguments{
\item{d}{a dataset}
}
\description{
It will return a vector with the unique participant id in the dataset
}
|
/man/get_ids.Rd
|
no_license
|
zsigmas/rtsimpack
|
R
| false | true | 301 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation.R
\name{get_ids}
\alias{get_ids}
\title{Get unique ids of the dataset}
\usage{
get_ids(d)
}
\arguments{
\item{d}{a dataset}
}
\description{
It will return a vector with the unique participant id in the dataset
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog4ga3.R
\docType{data}
\name{trips_by_mode}
\alias{trips_by_mode}
\title{trips_by_mode}
\format{An excel file with 270 rows and 20 variables}
\source{
\url{http://www.transportationtomorrow.on.ca/}
\url{http://www12.statcan.gc.ca/census-recensement/index-eng.cfm}
}
\usage{
data(trips_by_mode)
}
\description{
An dataframe with the number of trips by mode of transportation by Traffic Analysis Zone (TAZ), and other useful information from the 2011 census for the Hamilton, CMA, Canada.
}
\details{
\itemize{
\item GTA06. identifier used for spatial joins (4050--6020)
\item Cycle. list of Hamiltonians that cycle to work (0--623)
\item Auto_driver. list of Hamiltonians that drive to work (0--17743)
\item Auto_passenger. list of Hamiltonians that get a ride to work (0--4321)
\item Walk. list of Hamiltonians that walk to work (0--1599)
\item Population. population based on a unique spatial polygon (38.88097--12770.552)
\item Worked_in_2010_Full-time. number of Hamiltonians that worked full-time in 2010 (0--5925.9434)
\item Worked_in_2010_Part-time. number of Hamiltonians that worked part-time in 2010 (0--1661.16313)
\item Worked_at_home. number of Hamiltonians that worked from home (0--559.97542)
\item Pop_Density. population denisty based on a unique spatial polygon (26.20745--14232.5677)
\item Median_Age. median age of Hamiltonians based on a unique spatial polygon (3.845238--56.85006)
\item Family_Size_2. size of family based on unique a spatial polygon (7.250167--1489.0255)
\item Family_Size_3. size of family based on unique a spatial polygon (3.237384--859.09030)
\item Family_Size_4. size of family based on unique a spatial polygon (1.619751--1281.18323)
\item Family_Size_5_more. size of family based on a unique spatial polygon (1.617209--387.37487)
\item Median_income. median income based on unique spatial polygon (9.496379--52496.09)
\item Average_income. average income based on unique spatial polygon (11.44593--81235.73)
\item Employment_rate. average employment rate based on a unique spatial polygon (32.74746--76.69758)
\item Unemployment)rate. average unemployment rate based on a unique polygon (0.001258--23.200001)
\item Median_commuting_duration. median commuting duration based on a unique polygon (15.41049--30.59950)
}
}
\keyword{datasets}
|
/geog4ga3/man/trips_by_mode.Rd
|
no_license
|
snowdj/Spatial-Statistics-Course
|
R
| false | true | 2,426 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog4ga3.R
\docType{data}
\name{trips_by_mode}
\alias{trips_by_mode}
\title{trips_by_mode}
\format{An excel file with 270 rows and 20 variables}
\source{
\url{http://www.transportationtomorrow.on.ca/}
\url{http://www12.statcan.gc.ca/census-recensement/index-eng.cfm}
}
\usage{
data(trips_by_mode)
}
\description{
An dataframe with the number of trips by mode of transportation by Traffic Analysis Zone (TAZ), and other useful information from the 2011 census for the Hamilton, CMA, Canada.
}
\details{
\itemize{
\item GTA06. identifier used for spatial joins (4050--6020)
\item Cycle. list of Hamiltonians that cycle to work (0--623)
\item Auto_driver. list of Hamiltonians that drive to work (0--17743)
\item Auto_passenger. list of Hamiltonians that get a ride to work (0--4321)
\item Walk. list of Hamiltonians that walk to work (0--1599)
\item Population. population based on a unique spatial polygon (38.88097--12770.552)
\item Worked_in_2010_Full-time. number of Hamiltonians that worked full-time in 2010 (0--5925.9434)
\item Worked_in_2010_Part-time. number of Hamiltonians that worked part-time in 2010 (0--1661.16313)
\item Worked_at_home. number of Hamiltonians that worked from home (0--559.97542)
\item Pop_Density. population denisty based on a unique spatial polygon (26.20745--14232.5677)
\item Median_Age. median age of Hamiltonians based on a unique spatial polygon (3.845238--56.85006)
\item Family_Size_2. size of family based on unique a spatial polygon (7.250167--1489.0255)
\item Family_Size_3. size of family based on unique a spatial polygon (3.237384--859.09030)
\item Family_Size_4. size of family based on unique a spatial polygon (1.619751--1281.18323)
\item Family_Size_5_more. size of family based on a unique spatial polygon (1.617209--387.37487)
\item Median_income. median income based on unique spatial polygon (9.496379--52496.09)
\item Average_income. average income based on unique spatial polygon (11.44593--81235.73)
\item Employment_rate. average employment rate based on a unique spatial polygon (32.74746--76.69758)
\item Unemployment)rate. average unemployment rate based on a unique polygon (0.001258--23.200001)
\item Median_commuting_duration. median commuting duration based on a unique polygon (15.41049--30.59950)
}
}
\keyword{datasets}
|
#
# (c) 2012 -- 2014 Georgios Gousios <gousiosg@gmail.com>
#
# BSD licensed, see LICENSE in top level dir
#
rm(list = ls(all = TRUE))
source(file = "R/packages.R")
source(file = "R/utils.R")
source(file = "R/cmdline.R")
library(ggplot2)
merge.time.data = read.csv("merge-time-cv-10k.csv")
merge.decision.data = read.csv("merge-decision-cv-10k.csv")
aggregate(auc ~ classifier, merge.time.data, mean)
aggregate(acc ~ classifier, merge.time.data, mean)
aggregate(prec ~ classifier, merge.time.data, mean)
aggregate(rec ~ classifier, merge.time.data, mean)
aggregate(auc ~ classifier, merge.decision.data, mean)
aggregate(acc ~ classifier, merge.decision.data, mean)
aggregate(prec ~ classifier, merge.decision.data, mean)
aggregate(rec ~ classifier, merge.decision.data, mean)
rf.merge.decision <- subset(merge.decision.data, classifier == "randomforest")
rf.merge.time <- subset(merge.time.data, classifier == "randomforest")
print(sprintf("std dev auc merge time: %f", sd(rf.merge.time$auc)))
print(sprintf("std dev auc merge decision: %f", sd(rf.merge.decision$auc)))
|
/R/classification-analysis.R
|
permissive
|
igorsteinmacher/pullreqs
|
R
| false | false | 1,078 |
r
|
#
# (c) 2012 -- 2014 Georgios Gousios <gousiosg@gmail.com>
#
# BSD licensed, see LICENSE in top level dir
#
rm(list = ls(all = TRUE))
source(file = "R/packages.R")
source(file = "R/utils.R")
source(file = "R/cmdline.R")
library(ggplot2)
merge.time.data = read.csv("merge-time-cv-10k.csv")
merge.decision.data = read.csv("merge-decision-cv-10k.csv")
aggregate(auc ~ classifier, merge.time.data, mean)
aggregate(acc ~ classifier, merge.time.data, mean)
aggregate(prec ~ classifier, merge.time.data, mean)
aggregate(rec ~ classifier, merge.time.data, mean)
aggregate(auc ~ classifier, merge.decision.data, mean)
aggregate(acc ~ classifier, merge.decision.data, mean)
aggregate(prec ~ classifier, merge.decision.data, mean)
aggregate(rec ~ classifier, merge.decision.data, mean)
rf.merge.decision <- subset(merge.decision.data, classifier == "randomforest")
rf.merge.time <- subset(merge.time.data, classifier == "randomforest")
print(sprintf("std dev auc merge time: %f", sd(rf.merge.time$auc)))
print(sprintf("std dev auc merge decision: %f", sd(rf.merge.decision$auc)))
|
## Coursera - Exploratory Data Analysis
## Course Project 1
#=================================================================================================
## Store the source data "household_power_consumption.txt" in R working directory. If the source data file
## does not exist in R working directory, the script will unzip/download from the source url.
## The script (1) downloads/imports source data;
## (2) construct plot1 and send to plot1.png in R working directory (no plot appears on screen)
## Warning: output "plot1.png" will replace existing file with the same file name.
#=================================================================================================
#1.Import source data
url<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zip<- "exdata-data-household_power_consumption.zip"
fid<- "household_power_consumption.txt"
#1.1 check if source data file already exists. If not, download from url and/or unzip
if (!file.exists(fid)) {
if (!file.exists(zip)) {
download.file(url,destfile=zip)
}
unzip(zip)
}
#1.2 load only first 100 rows to get column classes
initial<- read.table(fid,header=TRUE,sep=";",nrow=100)
classes<- sapply(initial,class)
#1.3 read all data as text lines and keep only data from Date 1/2/2007 and 2/2/2007
dataLines<- readLines(fid)
dataLines<- dataLines[c(1,grep("^(1/2/2007|2/2/2007)",dataLines))]
#1.4 convert text lines to data frame
data<- read.table(textConnection(dataLines),head=TRUE,sep=";",colClasses=classes,comment.char="",na.strings="?")
rm(list=setdiff(ls(), c("classes","data","fid")))
#1.5 convert variables Date/Time from string to Date/POSIXlt format
data$Time<- strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S")
data$Date<- as.Date(data$Date,format="%d/%m/%Y")
#2. Plotting : create plot and send to a png file (no plot appears on screen)
#2.1 open png device, create "plot1.png" in R working directory
png(filename = "plot1.png",width = 480, height = 480, bg = "transparent")
#2.2 plotting
hist(data[[3]],col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
#2.3 close png file device and set to default device
dev.off()
dev.set(1)
|
/plot1.R
|
no_license
|
blackszu/ExData_Plotting1
|
R
| false | false | 2,380 |
r
|
## Coursera - Exploratory Data Analysis
## Course Project 1
#=================================================================================================
## Store the source data "household_power_consumption.txt" in R working directory. If the source data file
## does not exist in R working directory, the script will unzip/download from the source url.
## The script (1) downloads/imports source data;
## (2) construct plot1 and send to plot1.png in R working directory (no plot appears on screen)
## Warning: output "plot1.png" will replace existing file with the same file name.
#=================================================================================================
#1.Import source data
url<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zip<- "exdata-data-household_power_consumption.zip"
fid<- "household_power_consumption.txt"
#1.1 check if source data file already exists. If not, download from url and/or unzip
if (!file.exists(fid)) {
if (!file.exists(zip)) {
download.file(url,destfile=zip)
}
unzip(zip)
}
#1.2 load only first 100 rows to get column classes
initial<- read.table(fid,header=TRUE,sep=";",nrow=100)
classes<- sapply(initial,class)
#1.3 read all data as text lines and keep only data from Date 1/2/2007 and 2/2/2007
dataLines<- readLines(fid)
dataLines<- dataLines[c(1,grep("^(1/2/2007|2/2/2007)",dataLines))]
#1.4 convert text lines to data frame
data<- read.table(textConnection(dataLines),head=TRUE,sep=";",colClasses=classes,comment.char="",na.strings="?")
rm(list=setdiff(ls(), c("classes","data","fid")))
#1.5 convert variables Date/Time from string to Date/POSIXlt format
data$Time<- strptime(paste(data$Date,data$Time),format="%d/%m/%Y %H:%M:%S")
data$Date<- as.Date(data$Date,format="%d/%m/%Y")
#2. Plotting : create plot and send to a png file (no plot appears on screen)
#2.1 open png device, create "plot1.png" in R working directory
png(filename = "plot1.png",width = 480, height = 480, bg = "transparent")
#2.2 plotting
hist(data[[3]],col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
#2.3 close png file device and set to default device
dev.off()
dev.set(1)
|
#'Calculate Standardized Precipitation Evapotranspiration Index (SPEI)
#'
#'Calculate SPEI and the drought specifications with the length, the drought
#'type and the intensity
#'
#'@param prec_data [zoo] rainfall monthly data in zoo class with date
#'in \%Y-\%m-\%d
#'@param evapo_data [zoo] evapotranspiration monthly data in zoo class
#'with date in \%Y-\%m-\%d
#'@param time_step [numeric] by default = 12, time step to sum monthly data
#'(1, 3, 6, 9, 12, 24 and 48)
#'@param distribution [character] distribution of data
#'(log_Logistic, gamma, grev, genlog, normal)
#'
#'@return list that contains
#'@return \emph{spei} [zoo] zoo with the spei values with date in \%Y-\%m-\%d
#'@return \emph{drought_type} [zoo] zoo with the type of the period for each
#'month
#'@return \emph{drought_number} [data.frame] dataframe with the number of
#'different period by type
#'\itemize{
#'\item Extwet (spei > 2)\cr
#'\item Verywet (1.99 > spei > 1.5)\cr
#'\item Wet (1.49 > spei > 1)\cr
#'\item Normal (0.99 > spei > -0.99)\cr
#'\item Dry (-1 > spei > -1.49)\cr
#'\item VeryDry (-1.5 > spei > -1.99)\cr
#'\item ExtDry (-2 > spei)
#'}
#'
#'@author Florine Garcia (florine.garcia@gmail.com)
#'@author Pierre L'Hermite (pierrelhermite@yahoo.fr)
#'
#'@examples
#'How to use function
#'
#'@references
#'Vincente-Serrano, S.M. et al, (2010) A multiscalar drought index sensitive
#'to global warming: the standardized precipitation evapotranspiration index.
#'\emph{Journal of Climate, 23}
#'\url{https://www.researchgate.net/profile/Sergio_Vicente-Serrano/publication/262012840_A_Multiscalar_Drought_Index_Sensitive_to_Global_Warming_The_Standardized_Precipitation_Evapotranspiration_Index/links/540c6b1d0cf2f2b29a377f27/A-Multiscalar-Drought-Index-Sensitive-to-Global-Warming-The-Standardized-Precipitation-Evapotranspiration-Index.pdf}
#'
#'@seealso
#'\code{\link[piflowtest]{plot_trend}}: plot the index
spei <- function(prec_data, evapo_data, time_step = 12,
distribution = "log-Logistic") {
##__Checking______________________________________________________________####
# Data input checking
if (!is.zoo(prec_data)) { stop("prec_data must be a zoo"); return(NULL)}
if (!is.zoo(evapo_data)) { stop("evapo_data must be a zoo"); return(NULL)}
# Time step checking
if (periodicity(prec_data)$scale != "monthly") {
stop("prec_data must be a monthly serie \n"); return(NULL)
}
if (periodicity(evapo_data)$scale != "monthly") {
stop("evapo_data must be a monthly serie \n"); return(NULL)
}
##__Calculation___________________________________________________________####
diff <- prec_data - evapo_data
# Using SPEI package to calculate spei
res_spei <- SPEI::spei(coredata(diff[which(!is.na(diff))]), scale = time_step,
distribution = distribution, na.rm = TRUE)
spei <- zoo(as.numeric(res_spei$fitted),
order.by = index(diff[which(!is.na(diff))]))
##__Index analysis________________________________________________________####
# Drought type and number of drought
ext_wet <- very_wet <- wet <- normal <- dry <- very_dry <- ext_dry <- 0
drought_type <- rep(NA, length(spei))
for (i in 1:length(coredata(spei))) {
if (is.na(coredata(spei)[i])) {
} else if ((coredata(spei)[i] >= 3)) {
ext_wet <- ext_wet + 1
drought_type[i] <- 3
} else if ((2.99 > coredata(spei)[i]) && (coredata(spei)[i] > 2)) {
very_wet <- very_wet + 1
drought_type[i] <- 2
} else if ((1.99 > coredata(spei)[i]) && (coredata(spei)[i] > 1)) {
wet <- wet + 1
drought_type[i] <- 1
} else if ((0.99 > coredata(spei)[i]) && (coredata(spei)[i] > -0.99)) {
normal <- normal+1
drought_type[i] <- 0
} else if ((-1 >= coredata(spei)[i]) && (coredata(spei)[i] > -1.99)) {
dry <- dry + 1
drought_type[i] <- - 1
} else if ((-2 >= coredata(spei)[i]) && (coredata(spei)[i] > -2.99)) {
very_dry <- very_dry + 1
drought_type[i] <- - 2
} else if ((coredata(spei)[i] <= -3)) {
ext_dry <- ext_dry + 1
drought_type[i] <- - 3
} else {}
}
drought_number <- rbind.data.frame(ext_wet, very_wet, wet, normal, dry,
very_dry, ext_dry)
colnames(drought_number) <- c("Rain gauge")
row.names(drought_number) <- c("Extreme Wet", "Very Wet", "Wet", "Normal",
"Dry", "Very Dry", "Extreme Dry")
# Calculation of the drought length
length_drought <- numeric()
n <- 0
p <- 0
for (ilength in 1:length(spei)) {
if (is.na(spei[ilength])){
length_drought[ilength] <- NA
} else if (spei[ilength] > 0) {
n <- 0
p <- p + 1
length_drought[ilength] <- p
} else {
p <- 0
n <- n - 1
length_drought[ilength] <- n
}
}
length_zoo <- zoo(as.numeric(length_drought), index(spei))
resspei <- list(spei = spei, drougth_length = length_zoo,
drought_number_type = drought_number, type_time = drought_type)
return(Resultat)
}
|
/R/spei.R
|
no_license
|
flogrc/piflow
|
R
| false | false | 5,050 |
r
|
#'Calculate Standardized Precipitation Evapotranspiration Index (SPEI)
#'
#'Calculate SPEI and the drought specifications with the length, the drought
#'type and the intensity
#'
#'@param prec_data [zoo] rainfall monthly data in zoo class with date
#'in \%Y-\%m-\%d
#'@param evapo_data [zoo] evapotranspiration monthly data in zoo class
#'with date in \%Y-\%m-\%d
#'@param time_step [numeric] by default = 12, time step to sum monthly data
#'(1, 3, 6, 9, 12, 24 and 48)
#'@param distribution [character] distribution of data
#'(log_Logistic, gamma, grev, genlog, normal)
#'
#'@return list that contains
#'@return \emph{spei} [zoo] zoo with the spei values with date in \%Y-\%m-\%d
#'@return \emph{drought_type} [zoo] zoo with the type of the period for each
#'month
#'@return \emph{drought_number} [data.frame] dataframe with the number of
#'different period by type
#'\itemize{
#'\item Extwet (spei > 2)\cr
#'\item Verywet (1.99 > spei > 1.5)\cr
#'\item Wet (1.49 > spei > 1)\cr
#'\item Normal (0.99 > spei > -0.99)\cr
#'\item Dry (-1 > spei > -1.49)\cr
#'\item VeryDry (-1.5 > spei > -1.99)\cr
#'\item ExtDry (-2 > spei)
#'}
#'
#'@author Florine Garcia (florine.garcia@gmail.com)
#'@author Pierre L'Hermite (pierrelhermite@yahoo.fr)
#'
#'@examples
#'How to use function
#'
#'@references
#'Vincente-Serrano, S.M. et al, (2010) A multiscalar drought index sensitive
#'to global warming: the standardized precipitation evapotranspiration index.
#'\emph{Journal of Climate, 23}
#'\url{https://www.researchgate.net/profile/Sergio_Vicente-Serrano/publication/262012840_A_Multiscalar_Drought_Index_Sensitive_to_Global_Warming_The_Standardized_Precipitation_Evapotranspiration_Index/links/540c6b1d0cf2f2b29a377f27/A-Multiscalar-Drought-Index-Sensitive-to-Global-Warming-The-Standardized-Precipitation-Evapotranspiration-Index.pdf}
#'
#'@seealso
#'\code{\link[piflowtest]{plot_trend}}: plot the index
spei <- function(prec_data, evapo_data, time_step = 12,
distribution = "log-Logistic") {
##__Checking______________________________________________________________####
# Data input checking
if (!is.zoo(prec_data)) { stop("prec_data must be a zoo"); return(NULL)}
if (!is.zoo(evapo_data)) { stop("evapo_data must be a zoo"); return(NULL)}
# Time step checking
if (periodicity(prec_data)$scale != "monthly") {
stop("prec_data must be a monthly serie \n"); return(NULL)
}
if (periodicity(evapo_data)$scale != "monthly") {
stop("evapo_data must be a monthly serie \n"); return(NULL)
}
##__Calculation___________________________________________________________####
diff <- prec_data - evapo_data
# Using SPEI package to calculate spei
res_spei <- SPEI::spei(coredata(diff[which(!is.na(diff))]), scale = time_step,
distribution = distribution, na.rm = TRUE)
spei <- zoo(as.numeric(res_spei$fitted),
order.by = index(diff[which(!is.na(diff))]))
##__Index analysis________________________________________________________####
# Drought type and number of drought
ext_wet <- very_wet <- wet <- normal <- dry <- very_dry <- ext_dry <- 0
drought_type <- rep(NA, length(spei))
for (i in 1:length(coredata(spei))) {
if (is.na(coredata(spei)[i])) {
} else if ((coredata(spei)[i] >= 3)) {
ext_wet <- ext_wet + 1
drought_type[i] <- 3
} else if ((2.99 > coredata(spei)[i]) && (coredata(spei)[i] > 2)) {
very_wet <- very_wet + 1
drought_type[i] <- 2
} else if ((1.99 > coredata(spei)[i]) && (coredata(spei)[i] > 1)) {
wet <- wet + 1
drought_type[i] <- 1
} else if ((0.99 > coredata(spei)[i]) && (coredata(spei)[i] > -0.99)) {
normal <- normal+1
drought_type[i] <- 0
} else if ((-1 >= coredata(spei)[i]) && (coredata(spei)[i] > -1.99)) {
dry <- dry + 1
drought_type[i] <- - 1
} else if ((-2 >= coredata(spei)[i]) && (coredata(spei)[i] > -2.99)) {
very_dry <- very_dry + 1
drought_type[i] <- - 2
} else if ((coredata(spei)[i] <= -3)) {
ext_dry <- ext_dry + 1
drought_type[i] <- - 3
} else {}
}
drought_number <- rbind.data.frame(ext_wet, very_wet, wet, normal, dry,
very_dry, ext_dry)
colnames(drought_number) <- c("Rain gauge")
row.names(drought_number) <- c("Extreme Wet", "Very Wet", "Wet", "Normal",
"Dry", "Very Dry", "Extreme Dry")
# Calculation of the drought length
length_drought <- numeric()
n <- 0
p <- 0
for (ilength in 1:length(spei)) {
if (is.na(spei[ilength])){
length_drought[ilength] <- NA
} else if (spei[ilength] > 0) {
n <- 0
p <- p + 1
length_drought[ilength] <- p
} else {
p <- 0
n <- n - 1
length_drought[ilength] <- n
}
}
length_zoo <- zoo(as.numeric(length_drought), index(spei))
resspei <- list(spei = spei, drougth_length = length_zoo,
drought_number_type = drought_number, type_time = drought_type)
return(Resultat)
}
|
\name{dhglm-package}
\Rdversion{2.0}
\alias{dhglm-package}
%\alias{dhglm}
\docType{package}
\title{Double Hierarchical Genearlized Linear Models}
\description{The dhglm package is used to fit double hierarchical generalized linear models (DHGLMs) in which random effects
can be specified in both the mean and the dispersion components (Lee and Nelder, 2006; Lee, Nelder, and Pawitan, 2006).
It can also be used to fit generalized linear models (GLMs) of Nedler and Wedderburn (1972), joint GLMs of Nelder and Lee (1991),
and hierarchical GLMs (HGLMs) of Lee and Nelder (1996, 2001). Dispersion parameters of the random effects in the mean model can also be modeled
with random effects (Noh, Lee and Pawitan, 2005). The response variable is allowed to follow a Gaussain, binomial, Poisson, or gamma distribution. The distribution of random
effects can be specified as Gaussian, gamma, inverse-gamma or beta. It can handle complex structures such as crossed or nested designs
in which various combinations of different distributions for random effects can be specified. Fixed effects in the mean can be estimated by maximizing
the h-likelihood or a first-order Laplace approximation to the marginal likelihood. Dispersion parameters are estimated by using first-order
adjusted profile likelihood, an extension of the restricted maximum likelihood; alternatively,
these parameters can be assigned fixed values. The dhglm package also produces model-checking plots for various component of the model.}
\details{
\tabular{ll}{
Package: \tab dhglm\cr
Type: \tab Package\cr
Version: \tab 1.6\cr
Date: \tab 2016-09-19\cr
License: \tab Unlimited\cr
LazyLoad: \tab yes\cr
}
This is version 1.6 of the dhglm package.
}
\author{
Manegseok Noh, Youngjo Lee
Maintainer: Maengseok Noh <msnoh@pknu.ac.kr>
}
\references{
Lee, Y. and Nelder, J. A. (1996). Hierarchical generalised linear models (with discussion), Journal of the Royal Statistical Society B,
58, 619--678.
Lee, Y. and Nelder, J. A. (2001). Hierarchical generalised linear models : A synthesis of generalised linear
models, random-effect model and structured dispersion, Biometrika, 88, 987--1006.
Lee, Y. and Nelder, J. A. (2006). Double hierarchical generalized linear models (with discussion), Applied Statistics 55, 139--185.
Lee, Y. Nelder, J. A. and Pawitan, Y. (2006). Generalised linear models with random effects: unified analysis via
h-likelihood. Chapman & Hall: London.
Nelder, J. A. and Lee, Y. (1991). Generalised linear models for the analysis of Taguchi-type experiments,
Applied Stochastic Models and Data Analysis, 7, 107--120.
Nelder, J. A. and Wedderburn, R. W. M. (1972). Generalised linear models, Journal of the Royal Statistical Society A, 135, 370--384.
Noh, M., Lee, Y. and Pawitan, Y. (2005). Robust ascertainment-adjusted parameter estimation, Genetic Epidemiology, 29, 68--75.
}
\keyword{ package }
\seealso{
<\code{\link{dhglmfit}}>
}
\examples{
### DHGLM introducing random effects in the overdispersion for crack growth data
data(crack_growth)
model_mu<-DHGLMMODELING(Model="mean", Link="log", LinPred=y~crack0+(1|specimen),
RandDist="inverse-gamma")
model_phi<-DHGLMMODELING(Model="dispersion", Link="log",
LinPred=phi~cycle+(1|specimen), RandDist="gaussian")
res_crack<-dhglmfit(RespDist="gamma",DataMain=crack_growth,
MeanModel=model_mu,DispersionModel=model_phi,Maxiter=1)
}
|
/man/dhglm-package.Rd
|
no_license
|
cran/dhglm
|
R
| false | false | 3,443 |
rd
|
\name{dhglm-package}
\Rdversion{2.0}
\alias{dhglm-package}
%\alias{dhglm}
\docType{package}
\title{Double Hierarchical Genearlized Linear Models}
\description{The dhglm package is used to fit double hierarchical generalized linear models (DHGLMs) in which random effects
can be specified in both the mean and the dispersion components (Lee and Nelder, 2006; Lee, Nelder, and Pawitan, 2006).
It can also be used to fit generalized linear models (GLMs) of Nedler and Wedderburn (1972), joint GLMs of Nelder and Lee (1991),
and hierarchical GLMs (HGLMs) of Lee and Nelder (1996, 2001). Dispersion parameters of the random effects in the mean model can also be modeled
with random effects (Noh, Lee and Pawitan, 2005). The response variable is allowed to follow a Gaussain, binomial, Poisson, or gamma distribution. The distribution of random
effects can be specified as Gaussian, gamma, inverse-gamma or beta. It can handle complex structures such as crossed or nested designs
in which various combinations of different distributions for random effects can be specified. Fixed effects in the mean can be estimated by maximizing
the h-likelihood or a first-order Laplace approximation to the marginal likelihood. Dispersion parameters are estimated by using first-order
adjusted profile likelihood, an extension of the restricted maximum likelihood; alternatively,
these parameters can be assigned fixed values. The dhglm package also produces model-checking plots for various component of the model.}
\details{
\tabular{ll}{
Package: \tab dhglm\cr
Type: \tab Package\cr
Version: \tab 1.6\cr
Date: \tab 2016-09-19\cr
License: \tab Unlimited\cr
LazyLoad: \tab yes\cr
}
This is version 1.6 of the dhglm package.
}
\author{
Manegseok Noh, Youngjo Lee
Maintainer: Maengseok Noh <msnoh@pknu.ac.kr>
}
\references{
Lee, Y. and Nelder, J. A. (1996). Hierarchical generalised linear models (with discussion), Journal of the Royal Statistical Society B,
58, 619--678.
Lee, Y. and Nelder, J. A. (2001). Hierarchical generalised linear models : A synthesis of generalised linear
models, random-effect model and structured dispersion, Biometrika, 88, 987--1006.
Lee, Y. and Nelder, J. A. (2006). Double hierarchical generalized linear models (with discussion), Applied Statistics 55, 139--185.
Lee, Y. Nelder, J. A. and Pawitan, Y. (2006). Generalised linear models with random effects: unified analysis via
h-likelihood. Chapman & Hall: London.
Nelder, J. A. and Lee, Y. (1991). Generalised linear models for the analysis of Taguchi-type experiments,
Applied Stochastic Models and Data Analysis, 7, 107--120.
Nelder, J. A. and Wedderburn, R. W. M. (1972). Generalised linear models, Journal of the Royal Statistical Society A, 135, 370--384.
Noh, M., Lee, Y. and Pawitan, Y. (2005). Robust ascertainment-adjusted parameter estimation, Genetic Epidemiology, 29, 68--75.
}
\keyword{ package }
\seealso{
<\code{\link{dhglmfit}}>
}
\examples{
### DHGLM introducing random effects in the overdispersion for crack growth data
data(crack_growth)
model_mu<-DHGLMMODELING(Model="mean", Link="log", LinPred=y~crack0+(1|specimen),
RandDist="inverse-gamma")
model_phi<-DHGLMMODELING(Model="dispersion", Link="log",
LinPred=phi~cycle+(1|specimen), RandDist="gaussian")
res_crack<-dhglmfit(RespDist="gamma",DataMain=crack_growth,
MeanModel=model_mu,DispersionModel=model_phi,Maxiter=1)
}
|
library(planor)
### Name: summary-methods
### Title: Summarize the Design Properties
### Aliases: summary,designkey-method summary.designkey
### summary,keymatrix-method summary.keymatrix summary,keyring-method
### summary.keyring summary,listofdesignkeys-method
### summary.listofdesignkeys summary,listofkeyrings-method
### summary.listofkeyrings summary,planordesign-method
### summary.planordesign
### Keywords: methods
### ** Examples
### Creation of a listofdesignkeys object
K0 <- planor.designkey(factors=c("R","C","U","A","B1","B2"),
nlevels=c(3,2,2,3,2,2), model=~R*C + (A+B1+B2)^2, estimate=~A:B1+A:B2,
nunits=12, base=~R+C+U, max.sol=2)
### Method summary applied on a keymatrix object
r <- summary(K0[[1]][[1]])
### Method summary applied on a designkey object
summary(K0[1], save=NULL)
### Method summary applied on the listofdesignkeys object
r <-summary(K0, show="dt")
### Creation of a listofkeyrings object
K0 <- planor.designkey(factors=c(LETTERS[1:4], "block"), nlevels=rep(3,5),
model=~block+(A+B+C+D)^2, estimate=~A+B+C+D,
nunits=3^3, base=~A+B+C, max.sol=2)
### Method summary applied on the keymatrix object
r <-summary(K0[[1]][[1]])
### Method summary applied on the keyring object
r <-summary(K0[[1]])
### Method summary applied on the listofkeyrings object
r <- summary(K0, show="dtb", save ="k")
print(r)
|
/data/genthat_extracted_code/planor/examples/summary-methods.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,362 |
r
|
library(planor)
### Name: summary-methods
### Title: Summarize the Design Properties
### Aliases: summary,designkey-method summary.designkey
### summary,keymatrix-method summary.keymatrix summary,keyring-method
### summary.keyring summary,listofdesignkeys-method
### summary.listofdesignkeys summary,listofkeyrings-method
### summary.listofkeyrings summary,planordesign-method
### summary.planordesign
### Keywords: methods
### ** Examples
### Creation of a listofdesignkeys object
K0 <- planor.designkey(factors=c("R","C","U","A","B1","B2"),
nlevels=c(3,2,2,3,2,2), model=~R*C + (A+B1+B2)^2, estimate=~A:B1+A:B2,
nunits=12, base=~R+C+U, max.sol=2)
### Method summary applied on a keymatrix object
r <- summary(K0[[1]][[1]])
### Method summary applied on a designkey object
summary(K0[1], save=NULL)
### Method summary applied on the listofdesignkeys object
r <-summary(K0, show="dt")
### Creation of a listofkeyrings object
K0 <- planor.designkey(factors=c(LETTERS[1:4], "block"), nlevels=rep(3,5),
model=~block+(A+B+C+D)^2, estimate=~A+B+C+D,
nunits=3^3, base=~A+B+C, max.sol=2)
### Method summary applied on the keymatrix object
r <-summary(K0[[1]][[1]])
### Method summary applied on the keyring object
r <-summary(K0[[1]])
### Method summary applied on the listofkeyrings object
r <- summary(K0, show="dtb", save ="k")
print(r)
|
sizeof (char) = 1
sizeof (signed char) = 1
sizeof (unsigned char) = 1
sizeof (short) = 2
sizeof (signed short) = 2
sizeof (unsigned short) = 2
sizeof (int) = 4
sizeof (signed int) = 4
sizeof (unsigned int) = 4
sizeof (long long) = 8
sizeof (signed long long) = 8
sizeof (unsigned long long) = 8
sizeof (float) = 4
sizeof (double) = 8
|
/third_party/virtualbox/src/VBox/ExtPacks/VBoxDTrace/onnv/cmd/dtrace/test/tst/common/types/tst.basics.d.out
|
permissive
|
thalium/icebox
|
R
| false | false | 336 |
out
|
sizeof (char) = 1
sizeof (signed char) = 1
sizeof (unsigned char) = 1
sizeof (short) = 2
sizeof (signed short) = 2
sizeof (unsigned short) = 2
sizeof (int) = 4
sizeof (signed int) = 4
sizeof (unsigned int) = 4
sizeof (long long) = 8
sizeof (signed long long) = 8
sizeof (unsigned long long) = 8
sizeof (float) = 4
sizeof (double) = 8
|
library(ggplot2)
library(dplyr)
library(maps)
library(ggmap)
#2
?map_data
states_map=map_data("state")
#3
class(states_map)
#4
head(states_map,3)
#5
ggplot(states_map,aes(x=long,y=lat))+geom_point()
#6
ggplot(states_map,aes(x=long,y=lat,group=group))+geom_polygon(fill="white",color="black")
#7
world_map=map_data("world")
#8
ggplot(world_map,aes(x=long,y=lat,group=group))+geom_polygon(fill="white",color="black")
#9
Lithuania= map_data("world",region="Lithuania")
ggplot(Lithuania,aes(x=long,y=lat,group=group))+geom_polygon(fill="white",color="black")
#10
head(world_map)
countries=world_map %>% distinct(region) %>% arrange(region)
countries
#11
far_east=map_data("world",region=c("Japan","China","North Korea","South Korea"))
ggplot(far_east,aes(x=long,y=lat,group=group))+geom_polygon(fill="white",color="black")
#####################
#1
head(USArrests)
#2
crimes=data.frame(state=tolower(rownames(USArrests)),USArrests)
head(crimes,3)
?tolower
#3
?full_join
?merge
crime_map=merge(states_map,crimes,by.x="region",by.y = "state", all.x = T)
head(crime_map)
#4
crime_map=arrange(crime_map,group,order)
#5
ggplot(crime_map,aes(x=long,y=lat,group=group,fill=Assault))+geom_polygon(color="black")+scale_fill_gradient2(low="white",high="Darkred")
#6
ggplot(crime_map,aes(x=long,y=lat,group=group,fill=Assault))+geom_polygon(color="black")+scale_fill_gradient2(low="white",high="Darkred")
#7
|
/maps.R
|
no_license
|
xueyingwang/my_R
|
R
| false | false | 1,411 |
r
|
library(ggplot2)
library(dplyr)
library(maps)
library(ggmap)
#2
?map_data
states_map=map_data("state")
#3
class(states_map)
#4
head(states_map,3)
#5
ggplot(states_map,aes(x=long,y=lat))+geom_point()
#6
ggplot(states_map,aes(x=long,y=lat,group=group))+geom_polygon(fill="white",color="black")
#7
world_map=map_data("world")
#8
ggplot(world_map,aes(x=long,y=lat,group=group))+geom_polygon(fill="white",color="black")
#9
Lithuania= map_data("world",region="Lithuania")
ggplot(Lithuania,aes(x=long,y=lat,group=group))+geom_polygon(fill="white",color="black")
#10
head(world_map)
countries=world_map %>% distinct(region) %>% arrange(region)
countries
#11
far_east=map_data("world",region=c("Japan","China","North Korea","South Korea"))
ggplot(far_east,aes(x=long,y=lat,group=group))+geom_polygon(fill="white",color="black")
#####################
#1
head(USArrests)
#2
crimes=data.frame(state=tolower(rownames(USArrests)),USArrests)
head(crimes,3)
?tolower
#3
?full_join
?merge
crime_map=merge(states_map,crimes,by.x="region",by.y = "state", all.x = T)
head(crime_map)
#4
crime_map=arrange(crime_map,group,order)
#5
ggplot(crime_map,aes(x=long,y=lat,group=group,fill=Assault))+geom_polygon(color="black")+scale_fill_gradient2(low="white",high="Darkred")
#6
ggplot(crime_map,aes(x=long,y=lat,group=group,fill=Assault))+geom_polygon(color="black")+scale_fill_gradient2(low="white",high="Darkred")
#7
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadRun.R
\name{loadSmallRnaRun}
\alias{loadSmallRnaRun}
\title{Load small RNA-seq bcbio-nextgen run}
\usage{
loadSmallRnaRun(projectDir = "date-final",
interestingGroups = "sample", maxSamples = 50, minHits = 5,
dataDir = NULL, colData = NULL, ...)
}
\arguments{
\item{projectDir}{Path to final upload directory. This path is set when
running \code{bcbio_nextgen -w template}.}
\item{interestingGroups}{Character vector of interesting groups. First entry
is used for plot colors during quality control (QC) analysis. Entire vector
is used for PCA and heatmap QC functions.}
\item{maxSamples}{\emph{Optional}. Maximum number of samples to calculate rlog
and variance stabilization object from DESeq2.}
\item{minHits}{\emph{Optional}. Minimum lines to have in the miRNA output
to load the sample.}
\item{dataDir}{Folder to keep a cache of the object.}
\item{colData}{\emph{Optional} External metadata to be used while reading samples.}
\item{...}{Additional arguments, saved as metadata.}
}
\value{
\link{bcbioSmallRnaDataSet}.
}
\description{
Simply point to the final upload directory output by
\href{https://bcbio-nextgen.readthedocs.io/}{bcbio-nextgen}, and this function
will take care of the rest. It automatically imports small RNA-seq counts,
metadata, and program versions used.
}
\note{
When working in RStudio, we recommend connecting to the bcbio-nextgen
run directory as a remote connection over
\href{https://github.com/osxfuse/osxfuse/wiki/SSHFS}{sshfs}.
}
\examples{
path <- system.file("extra", package="bcbioSmallRna")
sbcb <- loadSmallRnaRun(file.path(path, "geu_tiny", "final",
"2018-12-05_geu_tiny"), "population")
}
\author{
Michael Steinbaugh, Lorena Pantano
}
|
/man/loadSmallRnaRun.Rd
|
permissive
|
lpantano/bcbioSmallRna
|
R
| false | true | 1,805 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadRun.R
\name{loadSmallRnaRun}
\alias{loadSmallRnaRun}
\title{Load small RNA-seq bcbio-nextgen run}
\usage{
loadSmallRnaRun(projectDir = "date-final",
interestingGroups = "sample", maxSamples = 50, minHits = 5,
dataDir = NULL, colData = NULL, ...)
}
\arguments{
\item{projectDir}{Path to final upload directory. This path is set when
running \code{bcbio_nextgen -w template}.}
\item{interestingGroups}{Character vector of interesting groups. First entry
is used for plot colors during quality control (QC) analysis. Entire vector
is used for PCA and heatmap QC functions.}
\item{maxSamples}{\emph{Optional}. Maximum number of samples to calculate rlog
and variance stabilization object from DESeq2.}
\item{minHits}{\emph{Optional}. Minimum lines to have in the miRNA output
to load the sample.}
\item{dataDir}{Folder to keep a cache of the object.}
\item{colData}{\emph{Optional} External metadata to be used while reading samples.}
\item{...}{Additional arguments, saved as metadata.}
}
\value{
\link{bcbioSmallRnaDataSet}.
}
\description{
Simply point to the final upload directory output by
\href{https://bcbio-nextgen.readthedocs.io/}{bcbio-nextgen}, and this function
will take care of the rest. It automatically imports small RNA-seq counts,
metadata, and program versions used.
}
\note{
When working in RStudio, we recommend connecting to the bcbio-nextgen
run directory as a remote connection over
\href{https://github.com/osxfuse/osxfuse/wiki/SSHFS}{sshfs}.
}
\examples{
path <- system.file("extra", package="bcbioSmallRna")
sbcb <- loadSmallRnaRun(file.path(path, "geu_tiny", "final",
"2018-12-05_geu_tiny"), "population")
}
\author{
Michael Steinbaugh, Lorena Pantano
}
|
################ ~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~ ##################
## ##
## Daytons Weather ##
## ##
## ##
## Marco R. Morales ##
## ##
## ##
## created: 06.08.2017 last update: 06.08.2017 ##
################# ~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~ #################
make_filename <- function(CityABBR) {
filePathSep <- "/"
fileNamesep <- "."
fileExt <- "txt"
baseURL <- "http://academic.udayton.edu/kissock/http/Weather/gsod95-current"
filename <- paste(CityABBR, fileExt, sep = fileNamesep)
finalURL <- paste(baseURL, filename, sep = filePathSep)
} # END make_filename()
get_FileInfo <- function(CityFile, CountryABBR, City){
# start with an empty data frame:
# not really needed if only one file is looked at
# df <- data.frame(name = c(), size = c())
fileInfo <- object.size(CityFile)
fileSizeInMb <- paste(round(fileInfo / 1024 / 1024, 2), "MB")
df <- data.frame(name = paste(CountryABBR, City), size = fileSizeInMb)
} #END get_FileInfo
read_and_load <- function(finalURL){
ext_tracks_colnames <- c("Month", "Day", "Year", "TempInF")
ext_tracks_widths <- c(8,9,17,17)
# data <- readr::read_fwf(finalURL) #col_names = FALSE
data <- readr::read_fwf(finalURL,
fwf_widths(ext_tracks_widths,
ext_tracks_colnames)
)
return(data)
}
|
/R/load_clean.R
|
no_license
|
moralmar/DaytonsWeather
|
R
| false | false | 2,048 |
r
|
################ ~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~ ##################
## ##
## Daytons Weather ##
## ##
## ##
## Marco R. Morales ##
## ##
## ##
## created: 06.08.2017 last update: 06.08.2017 ##
################# ~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~ #################
make_filename <- function(CityABBR) {
filePathSep <- "/"
fileNamesep <- "."
fileExt <- "txt"
baseURL <- "http://academic.udayton.edu/kissock/http/Weather/gsod95-current"
filename <- paste(CityABBR, fileExt, sep = fileNamesep)
finalURL <- paste(baseURL, filename, sep = filePathSep)
} # END make_filename()
get_FileInfo <- function(CityFile, CountryABBR, City){
# start with an empty data frame:
# not really needed if only one file is looked at
# df <- data.frame(name = c(), size = c())
fileInfo <- object.size(CityFile)
fileSizeInMb <- paste(round(fileInfo / 1024 / 1024, 2), "MB")
df <- data.frame(name = paste(CountryABBR, City), size = fileSizeInMb)
} #END get_FileInfo
read_and_load <- function(finalURL){
ext_tracks_colnames <- c("Month", "Day", "Year", "TempInF")
ext_tracks_widths <- c(8,9,17,17)
# data <- readr::read_fwf(finalURL) #col_names = FALSE
data <- readr::read_fwf(finalURL,
fwf_widths(ext_tracks_widths,
ext_tracks_colnames)
)
return(data)
}
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
#mobile.no@fssai
library(shiny)
if(!require(Hmisc)){
install.packages("Hmisc")
library(Hmisc)
}
if(!require(reshape2)){
install.packages("reshape2")
library(reshape2)
}
if(!require(MASS)){
install.packages("MASS")
library(MASS)
}
library(dplyr)
library(ggplot2)
library(openxlsx)
if(!require(scales)){
install.packages("scales")
library(scales)
}
library(stringr)
shinyServer(function(input, output) {
load(file.path("data","13_L04_Short3.RData"))
# load(file.path("data","13_L05_Data2.RData"))
# load(file.path("data","17_L05_Data3.RData"))
load(file.path("data","18_L05_Data3.RData"))
# load(file.path("data","15_HH_Summary_All.RData"))
load(file.path("data","17_HH_Summary_All_Heme.RData"))
load(file.path("data","15_L03_Data2 with StateDistMap.RData"))
load(file.path("data","district_map.RData"))
load(file.path("data","district_shp_df2.RData"))
load(file.path("data","states_shp.RData"))
load(file.path("data","fg_dbMapping.RData"))
load(file.path("data","16_RDA TUL.RData"))
load(file.path("data","desc.RData"))
load(file.path("data","agristats_summary08to12_map.RData"))
load(file.path("data","nfhs4_complete3.RData"))
load(file.path("data","nfhs4_outcomelist.RData"))
options(scipen = 999)
district_map1 <- district_map[!duplicated(district_map$NSS_DID)& district_map$MM_in_NSS!="No"&!is.na(district_map$NSS_DID),c("MM_Unique","ST_CEN_CD","DISTRICT","NSS_DID","censuscode")]
district_map1 <- district_map1[!is.na(district_map1$NSS_DID),]
state_map1 <- district_map[!duplicated(district_map$ST_CEN_CD)&!is.na(district_map$ST_CEN_CD),c("ST_CEN_CD","NSS_State")]
#Merging it upstream so that it is not caught between Reactives
district_shp_df2 <- merge(district_shp_df2,state_map1,by="ST_CEN_CD")
hh_summary_all <- hh_summary_all[hh_summary_all$Moisture.WATER!=0,]
hh_summary_all.orig <- hh_summary_all
observeEvent(input$goButton1,{
fooditem1 = "rice - PDS"
fooditem1 = input$fooditem1
# print(fooditem1)
# food1 = l05_data2[l05_data2$name==fooditem1,c("hhuid","finalweight","cq100","state_L05","state_dist_L05")]
food1 = l05_data3[l05_data3$name==fooditem1,c("hhuid","finalweight","cq100","state_L05","state_dist_L05")]
food1 <- merge(food1,district_map1,by.x="state_dist_L05",by.y="NSS_DID")
#Includes district name and censuscode
food1 <- merge(food1,l03_data2[,c("hhuid","mpce_mrp","st_quintile","NSS_State")],by="hhuid")
food1 <- merge(food1,l04_short3[,c("hhuid","consumerunits","hh_size_L04")],by="hhuid")
food1$percon_qty <- with(food1,cq100/consumerunits)
# food1 <- food1[food1$st_quintile %in% c(1,2,3),]
food1 <- food1[food1$st_quintile %in% input$quintiles1,]
#Summarising datasets for entire country by state, district
summarydf_dist <- food1 %>% group_by(state_L05,NSS_State,state_dist_L05,DISTRICT,censuscode) %>% summarise(mean=wtd.mean(percon_qty*100,w=consumerunits*finalweight,na.rm=TRUE),sd_qty = sqrt(wtd.var(percon_qty*100,w=consumerunits*finalweight,na.rm=TRUE)),median=wtd.quantile(percon_qty*100,w=consumerunits*finalweight,probs=0.5,na.rm=TRUE))
summarydf_state <- food1 %>% group_by(NSS_State,ST_CEN_CD) %>% summarise(mean=wtd.mean(percon_qty*100,w=consumerunits*finalweight,na.rm=TRUE),sd_qty = sqrt(wtd.var(percon_qty*100,w=consumerunits*finalweight,na.rm=TRUE)),median=wtd.quantile(percon_qty*100,w=consumerunits*finalweight,probs=0.5,na.rm=TRUE))
# #Reactive dataset for selection of State
map_df_dist <- reactive({
if(input$state1=="India"){
dataset <- merge(district_shp_df2,summarydf_dist[,c("median","censuscode","state_L05","state_dist_L05","DISTRICT")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset[order(dataset$order),]
}
else{
dataset <- merge(district_shp_df2[district_shp_df2$NSS_State==input$state1,],summarydf_dist[summarydf_dist$NSS_State==input$state1,c("median","censuscode","state_L05","state_dist_L05","DISTRICT")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset[order(dataset$order),]
}
})
# print("About to plot Map")
# map_df_dist <- dataset[order(dataset$order),]
#Plotting map
output$mapPlot1<- renderPlot({
mp1 <- ggplot() + geom_polygon(data=map_df_dist(),aes(x=long,y=lat,group=group,fill=median))
# mp1 <- ggplot() + geom_polygon(data=map_df_dist,aes(x=long,y=lat,group=group,fill=median))
mp1 <- mp1 + coord_map() + xlab("Longitude") + ylab("Latitude") + ggtitle(paste0(fooditem1," Monthly Intake in grams per consumer unit- ",input$state1," by District")) + theme_grey()
mp1 <- mp1 + scale_fill_distiller(name="Monthly Intake in grams", palette = "YlGnBu",direction=1)
# print("Plotting Map")
print(mp1)
# mp1
},height=600)
if(input$state1=="India"){
food2 <- food1
}
else{
food2 <- food1[food1$NSS_State==input$state1,]
}
# print("About to plot Hist")
#Plotting histogram
output$distPlot1 <- renderPlot({
dp1 <- ggplot(data=food2,aes(percon_qty*100,weight=consumerunits*finalweight)) + geom_histogram() +scale_y_continuous(labels = comma)
dp1 <- dp1 + xlab("Food Intake") + ylab("Count") + theme(text = element_text(size=12)) + ggtitle(paste0("Distribution of Monthly Intake in grams per consumer unit in ",input$state1))
# print("Plotting Hist")
return(dp1)
},height=600)
summarytable <- reactive({
if(input$state1=="India"){
temp <- summarydf_state[,c("NSS_State","mean","sd_qty","median")]
colnames(temp) <- c("State","Mean","SD","Median")
temp
}
else{
temp <- summarydf_dist[summarydf_dist$NSS_State==input$state1,c("NSS_State","DISTRICT","mean","sd_qty","median")]
colnames(temp) <- c("State","District","Mean","SD","Median")
temp
}
})
output$summary1 <- renderTable({
print(summarytable())
})
})
observeEvent(input$goButton2,{
# "Total Saturated Fatty Acids\n(TSFA)", "Total Ascorbic Acid","Phytate",'Calcium(Ca)' ,'Magnesium(Mg)''Zinc(Zn)' "Protein" "Energy in KiloCal" "Total Fat"
# "Vitamin B-12 "," Total Folates (B9)",'Iron(Fe)',"Total Polyphenols", "Vitamin A, RAE "
# "Protein" "Energy in KiloCal" "Total Fat","Carbo-hydrate" "Vitamin A, RAE "
nutrient2 = "Vitamin B-12 " # #Link with input
nutrient2 = input$nutrient2
print(nutrient2)
var_nutrient <- fg_dbMapping[fg_dbMapping$nin.nutrient==nutrient2,"nin.var_nutrient"]
# var_nutrient = "NonHeme.IronFe.FE", "Retinol.RETOL"
unit_nutrient <- fg_dbMapping[fg_dbMapping$nin.nutrient==nutrient2,"nin.unit_nutrient"]
# unit_nutrient = "mg"
multiplier=1
if(unit_nutrient=="µg"){
multiplier=1000000
}
if(unit_nutrient=="mg"){
multiplier=1000
}
# hh_summary_all <- hh_summary_all %>% group_by(hhuid) %>% mutate(totalfatprovided=sum(TotalPolyUnsaturatedFattyAcids.FAPU,TotalMonoUnsaturatedFattyAcids.FAMS,TotalSaturatedFattyAcids.FASAT,na.rm=TRUE))
# hh_summary_all <- as.data.frame(hh_summary_all)
bioavailability = 1
quintiles = c(1,2,3)
quintiles = as.numeric(input$quintiles2)
hh_summary_all <- hh_summary_all.orig
hh_summary_all <- hh_summary_all[hh_summary_all$st_quintile %in% quintiles,]
# hh_summary_all <- hh_summary_all[hh_summary_all$w.st_quintile %in% quintiles,]
dataset.nutrient2 <- reactive({
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
var_no <- desc[desc$desc==type2,"variable2"]
var_cu <- desc[desc$desc==type2,"cu"]
# var_no <- "hh_size_L04"
# var_cu <- "consumerunits"
# # For consumer units
hh_summary_all$nutrient.no <- hh_summary_all[,var_no]
hh_summary_all$nutrient.cu <- hh_summary_all[,var_cu]
hh_summary_all$nutrient.pd <- (hh_summary_all[,var_nutrient]*hh_summary_all$nutrient.cu)/(30*hh_summary_all$consumerunits*hh_summary_all$nutrient.no)
# q99.9 <- quantile(hh_summary_all$nutrient.pd,probs=0.999,na.rm=TRUE)
q99.9 <- with(hh_summary_all,wtd.quantile(nutrient.pd,w=finalweight*nutrient.no,probs=0.999,na.rm=TRUE))
q00.1 <- with(hh_summary_all,wtd.quantile(nutrient.pd,w=finalweight*nutrient.no,probs=0.001,na.rm=TRUE))
# hh_summary_all <- hh_summary_all[hh_summary_all$nutrient.pd<q99.9 & !is.na(hh_summary_all$nutrient.pd),]
hh_summary_all2 <- hh_summary_all[hh_summary_all$nutrient.pd<q99.9&hh_summary_all$nutrient.pd>q00.1,]
hh_summary_all2
})
# hh_summary_all.nutrient <- hh_summary_all2
#
summarydf.r <- reactive({
hh_summary_all.nutrient <- dataset.nutrient2()
hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd),] %>% summarise(mean_intake_hmisc_n=wtd.mean(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE))
})
# summarydf.r <- hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd),] %>% summarise(mean_intake_hmisc_n=wtd.mean(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE))
summarydf_state.r <- reactive({
hh_summary_all.nutrient <- dataset.nutrient2()
# q99.9 <- quantile(hh_summary_all.nutrient$nutrient.pd,probs=0.999,na.rm=TRUE)
# hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd)&hh_summary_all.nutrient$nutrient.pd<q99.9,]
# View(hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9,])
# summarydf_state.r <-
hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd),] %>% group_by(NSS_State,RDS_State) %>% summarise(mean_intake_hmisc_n=wtd.mean(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE),
sd_intake_hmisc_n=sqrt(wtd.var(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE)),
q25_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.25),
median_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.5),
q75_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.75),
q90_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.9),
q99_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.99),
q99.9_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.999),
max_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=1),
no_households = n(),no_individuals=sum(nutrient.no,na.rm=TRUE))
})
summarydf_dist.r <- reactive({
hh_summary_all.nutrient <- dataset.nutrient2()
# q99.9 <- quantile(hh_summary_all.nutrient$nutrient.pd,probs=0.999,na.rm=TRUE)
# summarydf_dist.r <-
hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd),] %>% group_by(NSS_State,RDS_State,DISTRICT,RDS_District,censuscode) %>% summarise(mean_intake_hmisc_n=wtd.mean(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE),
sd_intake_hmisc_n=sqrt(wtd.var(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE)),
q25_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.25),
median_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.5),
q75_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.75),
q90_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.9),
q99_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.99),
q99.9_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.999),
max_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=1),
no_households = n(),no_individuals=sum(nutrient.no,na.rm=TRUE))
})
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
output$mapPlot2<- renderPlot({
write.csv(summarydf.r(),file=paste0("National Summary-",Sys.Date(),"_",nutrient2,"_",type2,".csv"))
write.csv(summarydf_state.r(),file=paste0("State Summary-",Sys.Date(),"_",nutrient2,"_",type2,".csv"))
write.csv(summarydf_dist.r(),file=paste0("District Summary-",Sys.Date(),"_",nutrient2,"_",type2,".csv"))
state2="India"
state2 = input$state2
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
summarydf_dist <- summarydf_dist.r()
summarydf_state <- summarydf_state.r()
if(state2=="India"){
dataset.map2 <- merge(district_shp_df2,summarydf_dist[,c("median_hmisc_n","censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset.map2 <- dataset.map2[order(dataset.map2$order),]
states_shp2 <- states_shp
}
else{
dataset.map2 <- merge(district_shp_df2[district_shp_df2$NSS_State==state2,],summarydf_dist[summarydf_dist$NSS_State==state2,c("median_hmisc_n","censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset.map2 <- dataset.map2[order(dataset.map2$order),]
states_shp2 <- states_shp[states_shp$NSS_State==state2,]
}
legend = paste0("Intake in ",unit_nutrient)
title2 = paste0(type2," ",nutrient2," Intake- ",state2," by District")
mp2 <- ggplot() + geom_polygon(data=states_shp2,aes(x=long,y=lat,group=group),color="black",fill="white",size=0.2)
mp2 <- mp2 + geom_polygon(data=dataset.map2,aes(x=long,y=lat,group=group,fill=median_hmisc_n),alpha=0.8)
mp2 <- mp2 + coord_map() + xlab("Longitude") + ylab("Latitude") + ggtitle(title2) + theme_grey()
mp2 <- mp2 + scale_fill_distiller(name=legend, palette = "YlGnBu",direction=1)
print(mp2)
},height=600)
output$summary2 <- renderTable({
summarydf_dist <- summarydf_dist.r()
summarydf_state <- summarydf_state.r()
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
state2 = "Gujarat"
state2 = input$state2
if(state2=="India"){
temp <- summarydf_state[,c("NSS_State","mean_intake_hmisc_n","sd_intake_hmisc_n","q25_hmisc_n","median_hmisc_n","q75_hmisc_n")]
colnames(temp) <- c("State","Mean","SD","Quartile 25","Median","Quartile 75")
temp
}
else{
temp <- summarydf_dist[summarydf_dist$NSS_State==state2,c("NSS_State","DISTRICT","mean_intake_hmisc_n","sd_intake_hmisc_n","q25_hmisc_n","median_hmisc_n","q75_hmisc_n")]
colnames(temp) <- c("State","District","Mean","SD","Quartile 25","Median","Quartile 75")
temp
}
print(temp)
})
output$distPlot2 <- renderPlot({
hh_summary_all.nutrient <- dataset.nutrient2()
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
state2 = "Gujarat"
state2 = input$state2
# q99.9 <- quantile(hh_summary_all.nutrient$nutrient.pd,probs=0.999,na.rm=TRUE)
if(state2=="India"){
dataset <- hh_summary_all.nutrient
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9,]
}
else{
dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$NSS_State==state2,]
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9 & hh_summary_all.nutrient$NSS_State==state2,]
}
dp2 <- ggplot(data=dataset,aes(nutrient.pd*multiplier,weight=nutrient.no*finalweight)) + geom_histogram() +scale_y_continuous(labels = comma)
dp2 <- dp2 + xlab(paste0("Nutrient Intake in ",unit_nutrient)) + ylab("Count (excl. top 0.1%ile)") + theme(text = element_text(size=12)) + ggtitle(paste0("Distribution of Monthly Intake in ",unit_nutrient," in ",state2))
print(dp2)
},height=600)
})
observeEvent(input$goButton3,{
# 'Calcium(Ca)' " Total Folates (B9)"
nutrient3 = 'Calcium(Ca)' # #Link with input
nutrient3 = input$nutrient3
print(nutrient3)
var_nutrient <- fg_dbMapping[fg_dbMapping$nin.nutrient==nutrient3,"nin.var_nutrient"]
# var_nutrient = "NonHeme.IronFe.FE"
unit_nutrient <- fg_dbMapping[fg_dbMapping$nin.nutrient==nutrient3,"nin.unit_nutrient"]
# unit_nutrient = "mg"
multiplier=1
if(unit_nutrient=="µg"){
multiplier=1000000
}
if(unit_nutrient=="mg"){
multiplier=1000
}
bioavailability = 1
#Nutrient intake from food = consumed quantity (in 100g units) * composition
# fortificant = 0 #From input (same unit as nutrient per 100g)
# fooditem3 = "All Rice" #Link with input
fooditem3 = input$fooditem3
# fortificant = 50
fortificant = input$fortificant3
# f.unit = 2
f.unit = input$unit3
if(f.unit==1){
fortificant = fortificant/1000000
}
if(f.unit==2){
fortificant = fortificant/1000
}
# fooditem3 = "All Rice"
food = l05_data3[l05_data3$name==fooditem3,c("hhuid","finalweight","cq100")]
food <- merge(food,l03_data2[,c("hhuid","state_L03","state_dist_L03","NSS_State","mpce_mrp","st_quintile","DISTRICT","RDS_District","RDS_State")],by="hhuid")
quintiles3 <- c(1,2,3)
quintiles3 <- as.numeric(input$quintiles3)
food <- food[food$st_quintile %in% quintiles3 ,]
coverage=100 #Percentage
coverage = input$coverage3
food <- food %>% dplyr::group_by(state_L03) %>% dplyr::sample_frac(size=coverage/100,weight=finalweight)
food$totalnutrient <- fortificant*food$cq100
food$state_L03 <- as.numeric(food$state_L03)
print(input$scenario3_2)
if(input$scenario3b==TRUE){
# fooditem3_2 = "All Rice Products"
fooditem3_2 = input$fooditem3_2
# fortificant_2 = 10
fortificant_2 = input$fortificant3_2
# f.unit_2=2
f.unit_2 = input$unit3_2
if(f.unit_2==1){
fortificant_2 = fortificant_2/1000000
}
if(f.unit_2==2){
fortificant_2 = fortificant_2/1000
}
food_2 = l05_data3[l05_data3$name==fooditem3_2,c("hhuid","finalweight","cq100")]
food_2 <- merge(food_2,l03_data2[,c("hhuid","state_L03","state_dist_L03","NSS_State","mpce_mrp","st_quintile","DISTRICT","RDS_District","RDS_State")],by="hhuid")
quintiles3_2 <- c(1,2,3)
quintiles3_2 <- as.numeric(input$quintiles3_2)
food_2 <- food_2[food_2$st_quintile %in% quintiles3_2,]
# coverage_2=100 #Percentage
coverage_2 = input$coverage3_2
food_2 <- food_2 %>% dplyr::group_by(state_L03) %>% dplyr::sample_frac(size=coverage_2/100,weight=finalweight)
# food_2$totalnutrient_2 <- fortificant_2*food_2$cq100
food_2$totalnutrient <- fortificant_2*food_2$cq100
food_2$state_L03 <- as.numeric(food_2$state_L03)
}
if(input$scenario3c==TRUE){
fooditem3_3 = input$fooditem3_3
fortificant_3 = input$fortificant3_3
f.unit_3=3
f.unit_3 = input$unit3_3
if(f.unit_3==1){
fortificant_3 = fortificant_3/1000000
}
if(f.unit_3==2){
fortificant_3 = fortificant_3/1000
}
food_3 = l05_data3[l05_data3$name==fooditem3_3,c("hhuid","finalweight","cq100")]
food_3 <- merge(food_3,l03_data2[,c("hhuid","state_L03","state_dist_L03","NSS_State","mpce_mrp","st_quintile","DISTRICT","RDS_District","RDS_State")],by="hhuid")
quintiles3_3 <- c(1,2,3)
quintiles3_3 <- as.numeric(input$quintiles3_3)
food_3 <- food_3[food_3$st_quintile %in% quintiles3_3,]
coverage_3=50 #Percentage
coverage_3 = input$coverage3_3
food_3 <- food_3 %>% dplyr::group_by(state_L03) %>% dplyr::sample_frac(size=coverage_3/100,weight=finalweight)
# food_3$totalnutrient_3 <- fortificant_3*food_3$cq100
food_3$totalnutrient <- fortificant_3*food_3$cq100
food_3$state_L03 <- as.numeric(food_3$state_L03)
}
food_all <- food[,c("hhuid","totalnutrient")]
n=2
if(input$scenario3b==TRUE){
# food_all <- merge(food_all[,c("hhuid","totalnutrient")],food_2[,c("hhuid","totalnutrient_2")],by="hhuid",all=TRUE)
# n = n+1
food_all <- rbind(food_all,food_2[,c("hhuid","totalnutrient")])
}
if(input$scenario3c==TRUE){
# food_all <- merge(food_all[,c("hhuid","totalnutrient")],food_3[,c("hhuid","totalnutrient_3")],by="hhuid",all=TRUE)
# n = n+1
food_all <- rbind(food_all,food_3[,c("hhuid","totalnutrient")])
}
# temp <- array(food_all[,c(2:n)],dim=c(length(food_all$hhuid),n-1))
# food_all$fortificant <- base::rowSums(temp,na.rm=TRUE)
# rm(temp)
food_all <- food_all %>% group_by(hhuid) %>% summarise(fortificant=sum(totalnutrient,na.rm=TRUE))
lambda_pop <- function(nutrient.a,finalweight,number){
# quantile99.9 <- wtd.quantile(nutrient.a,weights=finalweight*number,probs=0.999,na.rm=TRUE)
# nutrient.a1 <- nutrient.a[nutrient.a<quantile99.9]
error.add <- min(nutrient.a[nutrient.a>0&!is.na(nutrient.a)])/1000
nutrient.a1 <- nutrient.a + error.add
# nutrient.a1 <- nutrient.a
# lambda <- tryCatch({
boxcox.pop <- as.data.frame({
boxcox(nutrient.a1~1,lambda=seq(-7,7,by=0.2),plotit=FALSE)
})
lambda <- boxcox.pop[which.max(boxcox.pop$y),"x"]
# )}
# error=function()
return(lambda)
}
risk_estimation <- function(dataset.nutrient,lambda,grouping.var,method){
# dataset.nutrient <- dataset
# grouping.var <- grouping.var3
if(!is.na(lambda)){
dataset.nutrient$lambda <- lambda
dataset.nutrient$bc.value <- with(dataset.nutrient,ifelse(lambda==0,log(value),(value^lambda-1)/lambda))
dataset.nutrient$bc.RDA2g <- with(dataset.nutrient,ifelse(lambda==0,log(RDA2g),(RDA2g^lambda-1)/lambda))
dataset.nutrient$bc.TUL2.1g <- with(dataset.nutrient,ifelse(lambda==0,log(TUL2.1g),(TUL2.1g^lambda-1)/lambda))
dataset.nutrient <- dataset.nutrient %>% mutate(quantile.prob= as.numeric(cut(value,breaks=wtd.quantile(value,probs=seq(0,1,by=0.05),na.rm=TRUE)),right=FALSE,include.lowest=TRUE))
bc.RDA2g <- unique(dataset.nutrient$bc.RDA2g)
bc.TUL2.1g <- unique(dataset.nutrient$bc.TUL2.1g)
}
#Method 1: EAR Cutpoint Method----
if(method==1){
# summary.dataset.nutrient <- dataset.nutrient[dataset.nutrient$value<dataset.nutrient$re.q99.9,]
summary.dataset.nutrient <- dataset.nutrient[!is.na(dataset.nutrient$value),] %>% group_by_(grouping.var[1],grouping.var[2]) %>% summarise(Mean=wtd.mean(value,w=nutrient.no*finalweight,na.rm=TRUE),
SD=sqrt(wtd.var(value,w=nutrient.no*finalweight,na.rm=TRUE)),
Median = wtd.quantile(value,w=nutrient.no*finalweight,probs=0.5,na.rm=TRUE),
bc.Mean= wtd.mean(bc.value,w=nutrient.no*finalweight,na.rm=TRUE),
bc.SD =sqrt(wtd.var(bc.value,w=nutrient.no*finalweight,na.rm=TRUE)))
summary.dataset.nutrient <- summary.dataset.nutrient %>% mutate(inadequacy = round(pnorm(bc.RDA2g,mean=bc.Mean,sd=bc.SD),5))
summary.dataset.nutrient <- summary.dataset.nutrient %>% mutate(risk = round(1-pnorm(bc.TUL2.1g,mean=bc.Mean,sd=bc.SD),5))
summary.dataset.nutrient <- summary.dataset.nutrient[,c(grouping.var,"Mean","SD","Median","inadequacy","risk")]
}
#Method 2: Probability Approach----
if(method==2){
dataset.nutrient <- dataset.nutrient %>% group_by_(grouping.var[1],grouping.var[2],"quantile.prob") %>% mutate(mean.quantile=wtd.mean(value,w=nutrient.no*finalweight,na.rm=TRUE))
dataset.nutrient <- dataset.nutrient[!is.na(dataset.nutrient$value),]
dataset.nutrient <- dataset.nutrient %>% ungroup() %>% group_by_(grouping.var[1],grouping.var[2]) %>% mutate(inadequacy = 1-pnorm(mean.quantile,mean=RDA2g,sd=abs(RDA2g*0.1)),
risk = pnorm(mean.quantile,mean=TUL2.1g,sd=abs(TUL2.1g*0.1)),
total.weight = sum(nutrient.no*finalweight,na.rm=TRUE))
# summary.dataset.nutrient <- dataset.nutrient[dataset.nutrient$value<dataset.nutrient$re.q99.9 & dataset.nutrient$value>0,]
# summary.dataset.nutrient <- dataset.nutrient[dataset.nutrient$value<dataset.nutrient$re.q99.9,]
summary.dataset.nutrient <- dataset.nutrient %>% summarise(Mean=wtd.mean(value,w=nutrient.no*finalweight,na.rm=TRUE),
SD=sqrt(wtd.var(value,w=nutrient.no*finalweight,na.rm=TRUE)),
Median = wtd.quantile(value,w=nutrient.no*finalweight,probs=0.5,na.rm=TRUE),
#bc.Mean= wtd.mean(bc.value,w=nutrient.no*finalweight,na.rm=TRUE),
#bc.SD =sqrt(wtd.var(bc.value,w=nutrient.no*finalweight,na.rm=TRUE)),
inadequacy = round(wtd.mean(inadequacy,w=nutrient.no*finalweight,na.rm=TRUE),5),
risk = round(wtd.mean(risk,w=nutrient.no*finalweight,na.rm=TRUE),5))
}
if(method==3){
dataset.nutrient$direct.inadequacy <- with(dataset.nutrient,ifelse(bc.value<bc.RDA2g,1,0))
dataset.nutrient$direct.risk <- with(dataset.nutrient,ifelse(bc.value>bc.TUL2.1g,1,0))
# proportion.inadequacy <- with(dataset.nutrient,wtd.mean(direct.inadequacy,w=finalweight*nutrient.no,na.rm=TRUE))
# proportion.risk <- with(dataset.nutrient,wtd.mean(direct.risk,w=finalweight*nutrient.no,na.rm=TRUE))
# summary.dataset.nutrient$inadequacy <- proportion.inadequacy
# summary.dataset.nutrient$risk <- proportion.risk
# summary.dataset.nutrient <- dataset.nutrient[dataset.nutrient$value<dataset.nutrient$re.q99.9 & dataset.nutrient$value>0,]
summary.dataset.nutrient <- dataset.nutrient %>% summarise(Mean=wtd.mean(value,w=nutrient.no*finalweight,na.rm=TRUE),
SD=sqrt(wtd.var(value,w=nutrient.no*finalweight,na.rm=TRUE)),
Median = wtd.quantile(value,w=nutrient.no*finalweight,probs=0.5,na.rm=TRUE),
#bc.Mean= wtd.mean(bc.value,w=nutrient.no*finalweight,na.rm=TRUE),
#bc.SD =sqrt(wtd.var(bc.value,w=nutrient.no*finalweight,na.rm=TRUE)),
inadequacy = round(wtd.mean(direct.inadequacy,w=finalweight*nutrient.no,na.rm=TRUE),5),
risk = round(wtd.mean(direct.risk,w=finalweight*nutrient.no,na.rm=TRUE),5))
}
return(summary.dataset.nutrient)
}
dataset.nutrient3 <- reactive({
if(is.null(input$type3)|input$type3==""){
type3 = "Adult Women"
}
else{
type3 = input$type3
}
RDA2g <- as.numeric(rda_tul[rda_tul$nin.var_nutrient==var_nutrient & rda_tul$desc==type3,"RDA2g"])
TUL2.1g <- as.numeric(rda_tul[rda_tul$nin.var_nutrient==var_nutrient & rda_tul$desc==type3,"TUL2.1g"])
var_no <- desc[desc$desc==type3,"variable2"]
var_cu <- desc[desc$desc==type3,"cu"]
hh_summary_all <- hh_summary_all.orig
hh_summary_all$nutrient.no <- hh_summary_all[,var_no]
hh_summary_all$nutrient.cu <- hh_summary_all[,var_cu]
hh_summary_all$nutrient.pd <- (hh_summary_all[,var_nutrient]*hh_summary_all$nutrient.cu)/(30*hh_summary_all$consumerunits*hh_summary_all$nutrient.no)
hh_summary_all <- merge(hh_summary_all,food_all[,c("hhuid","fortificant")],by="hhuid",all.x=TRUE)
hh_summary_all$fortificant.pd <- with(hh_summary_all,(fortificant*nutrient.cu)/(30*consumerunits*nutrient.no))
#There are 10,231 cases who do not consume any wheat
#There are 5,074 cases who do not have 0 for no of Adult women
hh_summary_all$fortificant.pd <- with(hh_summary_all,ifelse(is.na(fortificant.pd),0,fortificant.pd))
# hh_summary_all$total.pd <- rowSums(hh_summary_all[,c("fortificant.pd","nutrient.pd")]) #Include NA
hh_summary_all$total.pd <- rowSums(hh_summary_all[,c("fortificant.pd","nutrient.pd")],na.rm = TRUE) #Include NA
# View(hh_summary_all[,c("hhuid","nutrient.pd","fortificant.pd","total.pd")])
hh_summary_all$total.pd <- with(hh_summary_all,ifelse(is.na(nutrient.pd)&fortificant.pd==0,NA,total.pd))
lambda.test = lambda_pop(hh_summary_all$nutrient.pd,hh_summary_all$finalweight,hh_summary_all$nutrient.no)
hh_summary_all$RDA2g <- RDA2g
hh_summary_all$TUL2.1g <- TUL2.1g
q99.9 <- with(hh_summary_all,wtd.quantile(nutrient.pd,w=finalweight*nutrient.no,probs=0.999,na.rm=TRUE))
q00.1 <- with(hh_summary_all,wtd.quantile(nutrient.pd,w=finalweight*nutrient.no,probs=0.001,na.rm=TRUE))
# hh_summary_all3 <- hh_summary_all[hh_summary_all$nutrient.pd<q99.9 & !is.na(hh_summary_all$nutrient.pd),]
hh_summary_all3 <- hh_summary_all[hh_summary_all$nutrient.pd<q99.9&hh_summary_all$nutrient.pd>q00.1 & !is.na(hh_summary_all$nutrient.pd),]
hh_summary_all3
})
f.bioavailability = 1 #From input
# hh_summary_all.nutrient <- hh_summary_all3
#-------------------------------------------------------#
output$distPlot3 <- renderPlot({
hh_summary_all.nutrient <- dataset.nutrient3()
# hh_summary_all.nutrient <- hh_summary_all3
state3 <- "India"
state3 <- input$state3
#
if(is.null(input$type3)|input$type3==""){
type3 = "Adult Women"
}
else{
type3 = input$type3
}
# q99.9 <- with(hh_summary_all.nutrient,wtd.quantile(nutrient.pd,w=nutrient.no*finalweight,probs=0.999,na.rm=TRUE))
if(state3 == "India"){
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9,c("NSS_State","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
dataset <- hh_summary_all.nutrient[,c("NSS_State","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
dataset <- melt(dataset,id.vars=c("NSS_State","hhuid","finalweight","nutrient.no","RDA2g","TUL2.1g"),measure.vars=c("nutrient.pd","total.pd"))
}
else{
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9 & hh_summary_all.nutrient$NSS_State==state3,c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$NSS_State==state3,c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
dataset <- melt(dataset,id.vars=c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","RDA2g","TUL2.1g"),measure.vars=c("nutrient.pd","total.pd"))
}
dataset$fort <- with(dataset,ifelse(variable=="nutrient.pd","1. Before Fortification","2. After Fortification"))
title = paste0("Intake distribution of ",nutrient3," in ",state3," for ",type3)
legend = paste0("Intake in ",unit_nutrient)
hp3 <- ggplot() + geom_histogram(data=dataset,aes(x=value*multiplier,weight=nutrient.no*finalweight,group=fort)) + facet_grid(~fort)
hp3 <- hp3 + geom_vline(data=dataset,aes(xintercept=RDA2g*multiplier,group=fort),col="blue")
hp3 <- hp3 + geom_vline(data=dataset,aes(xintercept=TUL2.1g*multiplier,group=fort),col="red")
hp3 <- hp3 + xlab(legend) + ylab("Count (excl top 0.1%ile)") + scale_y_continuous(labels = comma)
hp3 <- hp3 + ggtitle(title) + theme(text = element_text(size=15))
print(hp3)
})
output$summary3 <- renderTable({
hh_summary_all.nutrient <- dataset.nutrient3()
# hh_summary_all.nutrient <- hh_summary_all3
state3 <- "India"
state3 <- input$state3
#Adult women is default
if(is.null(input$type3)|input$type3==""){
type3 = "Adult Women"
}
else{
type3 = input$type3
}
#Calculate 99.9%ile for outlier detection
# q99.9 <- with(hh_summary_all.nutrient,wtd.quantile(nutrient.pd,w=nutrient.no*finalweight,probs=0.999,na.rm=TRUE))
lambda = lambda_pop(hh_summary_all.nutrient$nutrient.pd,hh_summary_all.nutrient$finalweight,hh_summary_all.nutrient$nutrient.no)
#Eliminate outliers
if(state3 == "India"){
dataset <- hh_summary_all.nutrient[,c("NSS_State","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9,c("NSS_State","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
grouping.var3=c("NSS_State","fort")
}
else{
dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$NSS_State==state3,c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9 & hh_summary_all.nutrient$NSS_State==state3,c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
grouping.var3=c("DISTRICT","fort")
}
dataset <- melt(dataset,measure.vars=c("nutrient.pd","total.pd"))
dataset$fort <- with(dataset,ifelse(variable=="nutrient.pd","1. Before Fortification","2. After Fortification"))
# dataset <- dataset[!is.na(dataset$value),]
method3 = 1
if(var_nutrient=="IronFe.FE"|is.null(lambda)|is.na(lambda)){
method3 = 2
}
summary.fortification <- dataset %>% group_by_(grouping.var3[1],grouping.var3[2]) %>% risk_estimation(.,grouping.var=grouping.var3,lambda=lambda,method=method3)
# summary.fortification <- dataset %>% risk_estimation(.,lambda=lambda,method=2) #Matches
# summary.fortification <- dataset %>% group_by(NSS_State) %>% risk_estimation(.,lambda=lambda,method=1) #Matches
summary.fortification[,3:5] <- round(summary.fortification[,3:5]*multiplier,2)
summary.fortification[,6:7] <- round(summary.fortification[,6:7],3)*100
colnames(summary.fortification)[3:5] <- paste0(colnames(summary.fortification)[3:5]," (in ",unit_nutrient,")")
colnames(summary.fortification)[6:7] <- paste0(colnames(summary.fortification)[6:7]," (%)")
print(summary.fortification)
})
})
observeEvent(input$goButton4,{
crop4 = "Paddy"
crop4 = input$crop4
map_df_dist <- reactive({
# state2 = "Gujarat"
state4="India"
state4 = input$state4
year4=2008
year4 = input$year4
statistic4 = "Area"
statistic4 = input$statistic4
if(state4=="India"){
dataset <- merge(district_shp_df2,agristats.summary[agristats.summary$mag.Year2==year4 & agristats.summary$mag.CROP==crop4,c(statistic4,"censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset <- dataset[order(dataset$order),]
dataset
}
else{
dataset <- merge(district_shp_df2[district_shp_df2$NSS_State==state4,],agristats.summary[agristats.summary$mag.Year2==year4 & agristats.summary$mag.CROP==crop4 & agristats.summary$NSS_State==state4,c(statistic4,"censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset <- dataset[order(dataset$order),]
dataset
}
})
# map_df_dist <- dataset
output$mapPlot4<- renderPlot({
state4="India"
state4 = input$state4
year4=2010
year4 = input$year4
statistic4 = "Production"
statistic4 = input$statistic4
unit = "Tonnes"
if(statistic4=="Area"){
unit = "Hectare"
}
if(statistic4=="Yield"){
unit = "Tonnes per Hectare"
}
crop4 = "All Pulses"
mp2 <- ggplot() + geom_polygon(data=map_df_dist(),aes(x=long,y=lat,group=group,fill=eval(parse(text=statistic4))))
# mp2 <- ggplot() + geom_polygon(data=map_df_dist,aes(x=long,y=lat,group=group,fill=eval(parse(text = statistic4))))
mp2 <- mp2 + coord_map() + xlab("Longitude") + ylab("Latitude") + ggtitle(paste0(statistic4," of ",crop4," - ",state4," by District")) + theme_grey()
mp2 <- mp2 + scale_fill_distiller(name=paste0(statistic4," in ",unit), palette = "YlGnBu",direction=1)
print(mp2)
},height=600)
output$summary4 <- renderTable({
state4="India"
state4 = input$state4
year4=2008
year4 = input$year4
crop4 = "Rice"
crop4 = input$crop4
if(state4=="India"){
agristats.summary_state <- agristats.summary[agristats.summary$mag.CROP==crop4&agristats.summary$mag.Year2==year4,] %>% group_by(NSS_State) %>% summarise(Area=sum(Area,na.rm=TRUE),Production=sum(Production,na.rm=TRUE))
agristats.summary_state$Yield <- with(agristats.summary_state,ifelse(Area==0,0,Production/Area))
colnames(agristats.summary_state) <- c("State","Production in Tonnes","Area in Hectares","Yield in Tonnes per Hectare")
print(agristats.summary_state)
}
else{
agristats.summary_dist <- agristats.summary[agristats.summary$mag.CROP==crop4&agristats.summary$mag.Year2==year4&agristats.summary$NSS_State==state4,c("NSS_State","DISTRICT","Production","Area","Yield")]
colnames(agristats.summary_dist) <- c("State","District","Production in Tonnes","Area in Hectares","Yield in Tonnes per Hectare")
print(agristats.summary_dist)
}
})
})
observeEvent(input$goButton5,{
# outcome5 = "77. Non-pregnant women age 15-49 years who are anaemic (<12.0 g/dl) (%)"
outcome5 = input$outcome5
outcome.variable = outcomelist[outcomelist$Description==outcome5,"variable.ITEMID2"]
map_df_dist <- reactive({
# state5 = "Andhra Pradesh"
state5="India"
state5 = input$state5
area5 = "Total"
area5 = input$area5
if(state5=="India"){
dataset <- merge(district_shp_df2,nfhs4.complete3[nfhs4.complete3$variable.ITEMID2==outcome.variable,c(area5,"censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset <- dataset[order(dataset$order),]
dataset
}
else{
dataset <- merge(district_shp_df2[district_shp_df2$NSS_State==state5,],nfhs4.complete3[nfhs4.complete3$variable.ITEMID2==outcome.variable & nfhs4.complete3$NSS_State==state5,c(area5,"censuscode","DISTRICT")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset <- dataset[order(dataset$order),]
dataset
}
})
output$mapPlot5<- renderPlot({
state5="India"
state5 = input$state5
area5 = "Total"
area5 = input$area5
title = paste0(substr(outcome5,4,str_length(outcome5))," - ",state5," by District")
# title = paste0(substr(outcome5,4,str_length(outcome5))," - Kurnool, AP")
# mp2 <- ggmap(india) #+ geom_polygon(data=states_shp,aes(x=long,y=lat,group=group),color="black",fill="white",size=0.2,alpha=0.8)
mp2 <- ggplot() + geom_polygon(data=states_shp[states_shp$id==28,],aes(x=long,y=lat,group=group),color="black",fill="white",size=0.2,alpha=0.8)
mp2 <- mp2 + geom_polygon(data=map_df_dist(),aes(x=long,y=lat,group=group,fill=eval(parse(text=area5))))
# mp2 <- ggplot() + geom_polygon(data=map_df_dist,aes(x=long,y=lat,group=group,fill=eval(parse(text = area5))))
# mp2 <- mp2 + geom_text(data=label_dist,aes(long,lat,label=DISTRICT),size=2)
mp2 <- mp2 + coord_map() + xlab("Longitude") + ylab("Latitude") + ggtitle(title) + theme_grey()
mp2 <- mp2 + scale_fill_distiller(name=title, palette = "RdYlGn",direction=-1,limits=c(10,80))
print(mp2)
},height=600)
})
})
|
/code/server.R
|
no_license
|
jvargh7/fortification-simulation-india
|
R
| false | false | 44,374 |
r
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
#mobile.no@fssai
library(shiny)
if(!require(Hmisc)){
install.packages("Hmisc")
library(Hmisc)
}
if(!require(reshape2)){
install.packages("reshape2")
library(reshape2)
}
if(!require(MASS)){
install.packages("MASS")
library(MASS)
}
library(dplyr)
library(ggplot2)
library(openxlsx)
if(!require(scales)){
install.packages("scales")
library(scales)
}
library(stringr)
shinyServer(function(input, output) {
load(file.path("data","13_L04_Short3.RData"))
# load(file.path("data","13_L05_Data2.RData"))
# load(file.path("data","17_L05_Data3.RData"))
load(file.path("data","18_L05_Data3.RData"))
# load(file.path("data","15_HH_Summary_All.RData"))
load(file.path("data","17_HH_Summary_All_Heme.RData"))
load(file.path("data","15_L03_Data2 with StateDistMap.RData"))
load(file.path("data","district_map.RData"))
load(file.path("data","district_shp_df2.RData"))
load(file.path("data","states_shp.RData"))
load(file.path("data","fg_dbMapping.RData"))
load(file.path("data","16_RDA TUL.RData"))
load(file.path("data","desc.RData"))
load(file.path("data","agristats_summary08to12_map.RData"))
load(file.path("data","nfhs4_complete3.RData"))
load(file.path("data","nfhs4_outcomelist.RData"))
options(scipen = 999)
district_map1 <- district_map[!duplicated(district_map$NSS_DID)& district_map$MM_in_NSS!="No"&!is.na(district_map$NSS_DID),c("MM_Unique","ST_CEN_CD","DISTRICT","NSS_DID","censuscode")]
district_map1 <- district_map1[!is.na(district_map1$NSS_DID),]
state_map1 <- district_map[!duplicated(district_map$ST_CEN_CD)&!is.na(district_map$ST_CEN_CD),c("ST_CEN_CD","NSS_State")]
#Merging it upstream so that it is not caught between Reactives
district_shp_df2 <- merge(district_shp_df2,state_map1,by="ST_CEN_CD")
hh_summary_all <- hh_summary_all[hh_summary_all$Moisture.WATER!=0,]
hh_summary_all.orig <- hh_summary_all
observeEvent(input$goButton1,{
fooditem1 = "rice - PDS"
fooditem1 = input$fooditem1
# print(fooditem1)
# food1 = l05_data2[l05_data2$name==fooditem1,c("hhuid","finalweight","cq100","state_L05","state_dist_L05")]
food1 = l05_data3[l05_data3$name==fooditem1,c("hhuid","finalweight","cq100","state_L05","state_dist_L05")]
food1 <- merge(food1,district_map1,by.x="state_dist_L05",by.y="NSS_DID")
#Includes district name and censuscode
food1 <- merge(food1,l03_data2[,c("hhuid","mpce_mrp","st_quintile","NSS_State")],by="hhuid")
food1 <- merge(food1,l04_short3[,c("hhuid","consumerunits","hh_size_L04")],by="hhuid")
food1$percon_qty <- with(food1,cq100/consumerunits)
# food1 <- food1[food1$st_quintile %in% c(1,2,3),]
food1 <- food1[food1$st_quintile %in% input$quintiles1,]
#Summarising datasets for entire country by state, district
summarydf_dist <- food1 %>% group_by(state_L05,NSS_State,state_dist_L05,DISTRICT,censuscode) %>% summarise(mean=wtd.mean(percon_qty*100,w=consumerunits*finalweight,na.rm=TRUE),sd_qty = sqrt(wtd.var(percon_qty*100,w=consumerunits*finalweight,na.rm=TRUE)),median=wtd.quantile(percon_qty*100,w=consumerunits*finalweight,probs=0.5,na.rm=TRUE))
summarydf_state <- food1 %>% group_by(NSS_State,ST_CEN_CD) %>% summarise(mean=wtd.mean(percon_qty*100,w=consumerunits*finalweight,na.rm=TRUE),sd_qty = sqrt(wtd.var(percon_qty*100,w=consumerunits*finalweight,na.rm=TRUE)),median=wtd.quantile(percon_qty*100,w=consumerunits*finalweight,probs=0.5,na.rm=TRUE))
# #Reactive dataset for selection of State
map_df_dist <- reactive({
if(input$state1=="India"){
dataset <- merge(district_shp_df2,summarydf_dist[,c("median","censuscode","state_L05","state_dist_L05","DISTRICT")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset[order(dataset$order),]
}
else{
dataset <- merge(district_shp_df2[district_shp_df2$NSS_State==input$state1,],summarydf_dist[summarydf_dist$NSS_State==input$state1,c("median","censuscode","state_L05","state_dist_L05","DISTRICT")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset[order(dataset$order),]
}
})
# print("About to plot Map")
# map_df_dist <- dataset[order(dataset$order),]
#Plotting map
output$mapPlot1<- renderPlot({
mp1 <- ggplot() + geom_polygon(data=map_df_dist(),aes(x=long,y=lat,group=group,fill=median))
# mp1 <- ggplot() + geom_polygon(data=map_df_dist,aes(x=long,y=lat,group=group,fill=median))
mp1 <- mp1 + coord_map() + xlab("Longitude") + ylab("Latitude") + ggtitle(paste0(fooditem1," Monthly Intake in grams per consumer unit- ",input$state1," by District")) + theme_grey()
mp1 <- mp1 + scale_fill_distiller(name="Monthly Intake in grams", palette = "YlGnBu",direction=1)
# print("Plotting Map")
print(mp1)
# mp1
},height=600)
if(input$state1=="India"){
food2 <- food1
}
else{
food2 <- food1[food1$NSS_State==input$state1,]
}
# print("About to plot Hist")
#Plotting histogram
output$distPlot1 <- renderPlot({
dp1 <- ggplot(data=food2,aes(percon_qty*100,weight=consumerunits*finalweight)) + geom_histogram() +scale_y_continuous(labels = comma)
dp1 <- dp1 + xlab("Food Intake") + ylab("Count") + theme(text = element_text(size=12)) + ggtitle(paste0("Distribution of Monthly Intake in grams per consumer unit in ",input$state1))
# print("Plotting Hist")
return(dp1)
},height=600)
summarytable <- reactive({
if(input$state1=="India"){
temp <- summarydf_state[,c("NSS_State","mean","sd_qty","median")]
colnames(temp) <- c("State","Mean","SD","Median")
temp
}
else{
temp <- summarydf_dist[summarydf_dist$NSS_State==input$state1,c("NSS_State","DISTRICT","mean","sd_qty","median")]
colnames(temp) <- c("State","District","Mean","SD","Median")
temp
}
})
output$summary1 <- renderTable({
print(summarytable())
})
})
observeEvent(input$goButton2,{
# "Total Saturated Fatty Acids\n(TSFA)", "Total Ascorbic Acid","Phytate",'Calcium(Ca)' ,'Magnesium(Mg)''Zinc(Zn)' "Protein" "Energy in KiloCal" "Total Fat"
# "Vitamin B-12 "," Total Folates (B9)",'Iron(Fe)',"Total Polyphenols", "Vitamin A, RAE "
# "Protein" "Energy in KiloCal" "Total Fat","Carbo-hydrate" "Vitamin A, RAE "
nutrient2 = "Vitamin B-12 " # #Link with input
nutrient2 = input$nutrient2
print(nutrient2)
var_nutrient <- fg_dbMapping[fg_dbMapping$nin.nutrient==nutrient2,"nin.var_nutrient"]
# var_nutrient = "NonHeme.IronFe.FE", "Retinol.RETOL"
unit_nutrient <- fg_dbMapping[fg_dbMapping$nin.nutrient==nutrient2,"nin.unit_nutrient"]
# unit_nutrient = "mg"
multiplier=1
if(unit_nutrient=="µg"){
multiplier=1000000
}
if(unit_nutrient=="mg"){
multiplier=1000
}
# hh_summary_all <- hh_summary_all %>% group_by(hhuid) %>% mutate(totalfatprovided=sum(TotalPolyUnsaturatedFattyAcids.FAPU,TotalMonoUnsaturatedFattyAcids.FAMS,TotalSaturatedFattyAcids.FASAT,na.rm=TRUE))
# hh_summary_all <- as.data.frame(hh_summary_all)
bioavailability = 1
quintiles = c(1,2,3)
quintiles = as.numeric(input$quintiles2)
hh_summary_all <- hh_summary_all.orig
hh_summary_all <- hh_summary_all[hh_summary_all$st_quintile %in% quintiles,]
# hh_summary_all <- hh_summary_all[hh_summary_all$w.st_quintile %in% quintiles,]
dataset.nutrient2 <- reactive({
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
var_no <- desc[desc$desc==type2,"variable2"]
var_cu <- desc[desc$desc==type2,"cu"]
# var_no <- "hh_size_L04"
# var_cu <- "consumerunits"
# # For consumer units
hh_summary_all$nutrient.no <- hh_summary_all[,var_no]
hh_summary_all$nutrient.cu <- hh_summary_all[,var_cu]
hh_summary_all$nutrient.pd <- (hh_summary_all[,var_nutrient]*hh_summary_all$nutrient.cu)/(30*hh_summary_all$consumerunits*hh_summary_all$nutrient.no)
# q99.9 <- quantile(hh_summary_all$nutrient.pd,probs=0.999,na.rm=TRUE)
q99.9 <- with(hh_summary_all,wtd.quantile(nutrient.pd,w=finalweight*nutrient.no,probs=0.999,na.rm=TRUE))
q00.1 <- with(hh_summary_all,wtd.quantile(nutrient.pd,w=finalweight*nutrient.no,probs=0.001,na.rm=TRUE))
# hh_summary_all <- hh_summary_all[hh_summary_all$nutrient.pd<q99.9 & !is.na(hh_summary_all$nutrient.pd),]
hh_summary_all2 <- hh_summary_all[hh_summary_all$nutrient.pd<q99.9&hh_summary_all$nutrient.pd>q00.1,]
hh_summary_all2
})
# hh_summary_all.nutrient <- hh_summary_all2
#
summarydf.r <- reactive({
hh_summary_all.nutrient <- dataset.nutrient2()
hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd),] %>% summarise(mean_intake_hmisc_n=wtd.mean(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE))
})
# summarydf.r <- hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd),] %>% summarise(mean_intake_hmisc_n=wtd.mean(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE))
summarydf_state.r <- reactive({
hh_summary_all.nutrient <- dataset.nutrient2()
# q99.9 <- quantile(hh_summary_all.nutrient$nutrient.pd,probs=0.999,na.rm=TRUE)
# hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd)&hh_summary_all.nutrient$nutrient.pd<q99.9,]
# View(hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9,])
# summarydf_state.r <-
hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd),] %>% group_by(NSS_State,RDS_State) %>% summarise(mean_intake_hmisc_n=wtd.mean(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE),
sd_intake_hmisc_n=sqrt(wtd.var(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE)),
q25_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.25),
median_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.5),
q75_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.75),
q90_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.9),
q99_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.99),
q99.9_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.999),
max_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=1),
no_households = n(),no_individuals=sum(nutrient.no,na.rm=TRUE))
})
summarydf_dist.r <- reactive({
hh_summary_all.nutrient <- dataset.nutrient2()
# q99.9 <- quantile(hh_summary_all.nutrient$nutrient.pd,probs=0.999,na.rm=TRUE)
# summarydf_dist.r <-
hh_summary_all.nutrient[!is.na(hh_summary_all.nutrient$nutrient.pd),] %>% group_by(NSS_State,RDS_State,DISTRICT,RDS_District,censuscode) %>% summarise(mean_intake_hmisc_n=wtd.mean(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE),
sd_intake_hmisc_n=sqrt(wtd.var(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE)),
q25_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.25),
median_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.5),
q75_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.75),
q90_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.9),
q99_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.99),
q99.9_hmisc_n = wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=0.999),
max_hmisc_n=wtd.quantile(nutrient.pd*multiplier,w=nutrient.no*finalweight,na.rm=TRUE,probs=1),
no_households = n(),no_individuals=sum(nutrient.no,na.rm=TRUE))
})
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
output$mapPlot2<- renderPlot({
write.csv(summarydf.r(),file=paste0("National Summary-",Sys.Date(),"_",nutrient2,"_",type2,".csv"))
write.csv(summarydf_state.r(),file=paste0("State Summary-",Sys.Date(),"_",nutrient2,"_",type2,".csv"))
write.csv(summarydf_dist.r(),file=paste0("District Summary-",Sys.Date(),"_",nutrient2,"_",type2,".csv"))
state2="India"
state2 = input$state2
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
summarydf_dist <- summarydf_dist.r()
summarydf_state <- summarydf_state.r()
if(state2=="India"){
dataset.map2 <- merge(district_shp_df2,summarydf_dist[,c("median_hmisc_n","censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset.map2 <- dataset.map2[order(dataset.map2$order),]
states_shp2 <- states_shp
}
else{
dataset.map2 <- merge(district_shp_df2[district_shp_df2$NSS_State==state2,],summarydf_dist[summarydf_dist$NSS_State==state2,c("median_hmisc_n","censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset.map2 <- dataset.map2[order(dataset.map2$order),]
states_shp2 <- states_shp[states_shp$NSS_State==state2,]
}
legend = paste0("Intake in ",unit_nutrient)
title2 = paste0(type2," ",nutrient2," Intake- ",state2," by District")
mp2 <- ggplot() + geom_polygon(data=states_shp2,aes(x=long,y=lat,group=group),color="black",fill="white",size=0.2)
mp2 <- mp2 + geom_polygon(data=dataset.map2,aes(x=long,y=lat,group=group,fill=median_hmisc_n),alpha=0.8)
mp2 <- mp2 + coord_map() + xlab("Longitude") + ylab("Latitude") + ggtitle(title2) + theme_grey()
mp2 <- mp2 + scale_fill_distiller(name=legend, palette = "YlGnBu",direction=1)
print(mp2)
},height=600)
output$summary2 <- renderTable({
summarydf_dist <- summarydf_dist.r()
summarydf_state <- summarydf_state.r()
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
state2 = "Gujarat"
state2 = input$state2
if(state2=="India"){
temp <- summarydf_state[,c("NSS_State","mean_intake_hmisc_n","sd_intake_hmisc_n","q25_hmisc_n","median_hmisc_n","q75_hmisc_n")]
colnames(temp) <- c("State","Mean","SD","Quartile 25","Median","Quartile 75")
temp
}
else{
temp <- summarydf_dist[summarydf_dist$NSS_State==state2,c("NSS_State","DISTRICT","mean_intake_hmisc_n","sd_intake_hmisc_n","q25_hmisc_n","median_hmisc_n","q75_hmisc_n")]
colnames(temp) <- c("State","District","Mean","SD","Quartile 25","Median","Quartile 75")
temp
}
print(temp)
})
output$distPlot2 <- renderPlot({
hh_summary_all.nutrient <- dataset.nutrient2()
if(is.null(input$type2)|input$type2==""){
type2 = "Adult Women"
}
else{
type2 = input$type2
}
state2 = "Gujarat"
state2 = input$state2
# q99.9 <- quantile(hh_summary_all.nutrient$nutrient.pd,probs=0.999,na.rm=TRUE)
if(state2=="India"){
dataset <- hh_summary_all.nutrient
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9,]
}
else{
dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$NSS_State==state2,]
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9 & hh_summary_all.nutrient$NSS_State==state2,]
}
dp2 <- ggplot(data=dataset,aes(nutrient.pd*multiplier,weight=nutrient.no*finalweight)) + geom_histogram() +scale_y_continuous(labels = comma)
dp2 <- dp2 + xlab(paste0("Nutrient Intake in ",unit_nutrient)) + ylab("Count (excl. top 0.1%ile)") + theme(text = element_text(size=12)) + ggtitle(paste0("Distribution of Monthly Intake in ",unit_nutrient," in ",state2))
print(dp2)
},height=600)
})
observeEvent(input$goButton3,{
# 'Calcium(Ca)' " Total Folates (B9)"
nutrient3 = 'Calcium(Ca)' # #Link with input
nutrient3 = input$nutrient3
print(nutrient3)
var_nutrient <- fg_dbMapping[fg_dbMapping$nin.nutrient==nutrient3,"nin.var_nutrient"]
# var_nutrient = "NonHeme.IronFe.FE"
unit_nutrient <- fg_dbMapping[fg_dbMapping$nin.nutrient==nutrient3,"nin.unit_nutrient"]
# unit_nutrient = "mg"
multiplier=1
if(unit_nutrient=="µg"){
multiplier=1000000
}
if(unit_nutrient=="mg"){
multiplier=1000
}
bioavailability = 1
#Nutrient intake from food = consumed quantity (in 100g units) * composition
# fortificant = 0 #From input (same unit as nutrient per 100g)
# fooditem3 = "All Rice" #Link with input
fooditem3 = input$fooditem3
# fortificant = 50
fortificant = input$fortificant3
# f.unit = 2
f.unit = input$unit3
if(f.unit==1){
fortificant = fortificant/1000000
}
if(f.unit==2){
fortificant = fortificant/1000
}
# fooditem3 = "All Rice"
food = l05_data3[l05_data3$name==fooditem3,c("hhuid","finalweight","cq100")]
food <- merge(food,l03_data2[,c("hhuid","state_L03","state_dist_L03","NSS_State","mpce_mrp","st_quintile","DISTRICT","RDS_District","RDS_State")],by="hhuid")
quintiles3 <- c(1,2,3)
quintiles3 <- as.numeric(input$quintiles3)
food <- food[food$st_quintile %in% quintiles3 ,]
coverage=100 #Percentage
coverage = input$coverage3
food <- food %>% dplyr::group_by(state_L03) %>% dplyr::sample_frac(size=coverage/100,weight=finalweight)
food$totalnutrient <- fortificant*food$cq100
food$state_L03 <- as.numeric(food$state_L03)
print(input$scenario3_2)
if(input$scenario3b==TRUE){
# fooditem3_2 = "All Rice Products"
fooditem3_2 = input$fooditem3_2
# fortificant_2 = 10
fortificant_2 = input$fortificant3_2
# f.unit_2=2
f.unit_2 = input$unit3_2
if(f.unit_2==1){
fortificant_2 = fortificant_2/1000000
}
if(f.unit_2==2){
fortificant_2 = fortificant_2/1000
}
food_2 = l05_data3[l05_data3$name==fooditem3_2,c("hhuid","finalweight","cq100")]
food_2 <- merge(food_2,l03_data2[,c("hhuid","state_L03","state_dist_L03","NSS_State","mpce_mrp","st_quintile","DISTRICT","RDS_District","RDS_State")],by="hhuid")
quintiles3_2 <- c(1,2,3)
quintiles3_2 <- as.numeric(input$quintiles3_2)
food_2 <- food_2[food_2$st_quintile %in% quintiles3_2,]
# coverage_2=100 #Percentage
coverage_2 = input$coverage3_2
food_2 <- food_2 %>% dplyr::group_by(state_L03) %>% dplyr::sample_frac(size=coverage_2/100,weight=finalweight)
# food_2$totalnutrient_2 <- fortificant_2*food_2$cq100
food_2$totalnutrient <- fortificant_2*food_2$cq100
food_2$state_L03 <- as.numeric(food_2$state_L03)
}
if(input$scenario3c==TRUE){
fooditem3_3 = input$fooditem3_3
fortificant_3 = input$fortificant3_3
f.unit_3=3
f.unit_3 = input$unit3_3
if(f.unit_3==1){
fortificant_3 = fortificant_3/1000000
}
if(f.unit_3==2){
fortificant_3 = fortificant_3/1000
}
food_3 = l05_data3[l05_data3$name==fooditem3_3,c("hhuid","finalweight","cq100")]
food_3 <- merge(food_3,l03_data2[,c("hhuid","state_L03","state_dist_L03","NSS_State","mpce_mrp","st_quintile","DISTRICT","RDS_District","RDS_State")],by="hhuid")
quintiles3_3 <- c(1,2,3)
quintiles3_3 <- as.numeric(input$quintiles3_3)
food_3 <- food_3[food_3$st_quintile %in% quintiles3_3,]
coverage_3=50 #Percentage
coverage_3 = input$coverage3_3
food_3 <- food_3 %>% dplyr::group_by(state_L03) %>% dplyr::sample_frac(size=coverage_3/100,weight=finalweight)
# food_3$totalnutrient_3 <- fortificant_3*food_3$cq100
food_3$totalnutrient <- fortificant_3*food_3$cq100
food_3$state_L03 <- as.numeric(food_3$state_L03)
}
food_all <- food[,c("hhuid","totalnutrient")]
n=2
if(input$scenario3b==TRUE){
# food_all <- merge(food_all[,c("hhuid","totalnutrient")],food_2[,c("hhuid","totalnutrient_2")],by="hhuid",all=TRUE)
# n = n+1
food_all <- rbind(food_all,food_2[,c("hhuid","totalnutrient")])
}
if(input$scenario3c==TRUE){
# food_all <- merge(food_all[,c("hhuid","totalnutrient")],food_3[,c("hhuid","totalnutrient_3")],by="hhuid",all=TRUE)
# n = n+1
food_all <- rbind(food_all,food_3[,c("hhuid","totalnutrient")])
}
# temp <- array(food_all[,c(2:n)],dim=c(length(food_all$hhuid),n-1))
# food_all$fortificant <- base::rowSums(temp,na.rm=TRUE)
# rm(temp)
food_all <- food_all %>% group_by(hhuid) %>% summarise(fortificant=sum(totalnutrient,na.rm=TRUE))
lambda_pop <- function(nutrient.a,finalweight,number){
# quantile99.9 <- wtd.quantile(nutrient.a,weights=finalweight*number,probs=0.999,na.rm=TRUE)
# nutrient.a1 <- nutrient.a[nutrient.a<quantile99.9]
error.add <- min(nutrient.a[nutrient.a>0&!is.na(nutrient.a)])/1000
nutrient.a1 <- nutrient.a + error.add
# nutrient.a1 <- nutrient.a
# lambda <- tryCatch({
boxcox.pop <- as.data.frame({
boxcox(nutrient.a1~1,lambda=seq(-7,7,by=0.2),plotit=FALSE)
})
lambda <- boxcox.pop[which.max(boxcox.pop$y),"x"]
# )}
# error=function()
return(lambda)
}
risk_estimation <- function(dataset.nutrient,lambda,grouping.var,method){
# dataset.nutrient <- dataset
# grouping.var <- grouping.var3
if(!is.na(lambda)){
dataset.nutrient$lambda <- lambda
dataset.nutrient$bc.value <- with(dataset.nutrient,ifelse(lambda==0,log(value),(value^lambda-1)/lambda))
dataset.nutrient$bc.RDA2g <- with(dataset.nutrient,ifelse(lambda==0,log(RDA2g),(RDA2g^lambda-1)/lambda))
dataset.nutrient$bc.TUL2.1g <- with(dataset.nutrient,ifelse(lambda==0,log(TUL2.1g),(TUL2.1g^lambda-1)/lambda))
dataset.nutrient <- dataset.nutrient %>% mutate(quantile.prob= as.numeric(cut(value,breaks=wtd.quantile(value,probs=seq(0,1,by=0.05),na.rm=TRUE)),right=FALSE,include.lowest=TRUE))
bc.RDA2g <- unique(dataset.nutrient$bc.RDA2g)
bc.TUL2.1g <- unique(dataset.nutrient$bc.TUL2.1g)
}
#Method 1: EAR Cutpoint Method----
if(method==1){
# summary.dataset.nutrient <- dataset.nutrient[dataset.nutrient$value<dataset.nutrient$re.q99.9,]
summary.dataset.nutrient <- dataset.nutrient[!is.na(dataset.nutrient$value),] %>% group_by_(grouping.var[1],grouping.var[2]) %>% summarise(Mean=wtd.mean(value,w=nutrient.no*finalweight,na.rm=TRUE),
SD=sqrt(wtd.var(value,w=nutrient.no*finalweight,na.rm=TRUE)),
Median = wtd.quantile(value,w=nutrient.no*finalweight,probs=0.5,na.rm=TRUE),
bc.Mean= wtd.mean(bc.value,w=nutrient.no*finalweight,na.rm=TRUE),
bc.SD =sqrt(wtd.var(bc.value,w=nutrient.no*finalweight,na.rm=TRUE)))
summary.dataset.nutrient <- summary.dataset.nutrient %>% mutate(inadequacy = round(pnorm(bc.RDA2g,mean=bc.Mean,sd=bc.SD),5))
summary.dataset.nutrient <- summary.dataset.nutrient %>% mutate(risk = round(1-pnorm(bc.TUL2.1g,mean=bc.Mean,sd=bc.SD),5))
summary.dataset.nutrient <- summary.dataset.nutrient[,c(grouping.var,"Mean","SD","Median","inadequacy","risk")]
}
#Method 2: Probability Approach----
if(method==2){
dataset.nutrient <- dataset.nutrient %>% group_by_(grouping.var[1],grouping.var[2],"quantile.prob") %>% mutate(mean.quantile=wtd.mean(value,w=nutrient.no*finalweight,na.rm=TRUE))
dataset.nutrient <- dataset.nutrient[!is.na(dataset.nutrient$value),]
dataset.nutrient <- dataset.nutrient %>% ungroup() %>% group_by_(grouping.var[1],grouping.var[2]) %>% mutate(inadequacy = 1-pnorm(mean.quantile,mean=RDA2g,sd=abs(RDA2g*0.1)),
risk = pnorm(mean.quantile,mean=TUL2.1g,sd=abs(TUL2.1g*0.1)),
total.weight = sum(nutrient.no*finalweight,na.rm=TRUE))
# summary.dataset.nutrient <- dataset.nutrient[dataset.nutrient$value<dataset.nutrient$re.q99.9 & dataset.nutrient$value>0,]
# summary.dataset.nutrient <- dataset.nutrient[dataset.nutrient$value<dataset.nutrient$re.q99.9,]
summary.dataset.nutrient <- dataset.nutrient %>% summarise(Mean=wtd.mean(value,w=nutrient.no*finalweight,na.rm=TRUE),
SD=sqrt(wtd.var(value,w=nutrient.no*finalweight,na.rm=TRUE)),
Median = wtd.quantile(value,w=nutrient.no*finalweight,probs=0.5,na.rm=TRUE),
#bc.Mean= wtd.mean(bc.value,w=nutrient.no*finalweight,na.rm=TRUE),
#bc.SD =sqrt(wtd.var(bc.value,w=nutrient.no*finalweight,na.rm=TRUE)),
inadequacy = round(wtd.mean(inadequacy,w=nutrient.no*finalweight,na.rm=TRUE),5),
risk = round(wtd.mean(risk,w=nutrient.no*finalweight,na.rm=TRUE),5))
}
if(method==3){
dataset.nutrient$direct.inadequacy <- with(dataset.nutrient,ifelse(bc.value<bc.RDA2g,1,0))
dataset.nutrient$direct.risk <- with(dataset.nutrient,ifelse(bc.value>bc.TUL2.1g,1,0))
# proportion.inadequacy <- with(dataset.nutrient,wtd.mean(direct.inadequacy,w=finalweight*nutrient.no,na.rm=TRUE))
# proportion.risk <- with(dataset.nutrient,wtd.mean(direct.risk,w=finalweight*nutrient.no,na.rm=TRUE))
# summary.dataset.nutrient$inadequacy <- proportion.inadequacy
# summary.dataset.nutrient$risk <- proportion.risk
# summary.dataset.nutrient <- dataset.nutrient[dataset.nutrient$value<dataset.nutrient$re.q99.9 & dataset.nutrient$value>0,]
summary.dataset.nutrient <- dataset.nutrient %>% summarise(Mean=wtd.mean(value,w=nutrient.no*finalweight,na.rm=TRUE),
SD=sqrt(wtd.var(value,w=nutrient.no*finalweight,na.rm=TRUE)),
Median = wtd.quantile(value,w=nutrient.no*finalweight,probs=0.5,na.rm=TRUE),
#bc.Mean= wtd.mean(bc.value,w=nutrient.no*finalweight,na.rm=TRUE),
#bc.SD =sqrt(wtd.var(bc.value,w=nutrient.no*finalweight,na.rm=TRUE)),
inadequacy = round(wtd.mean(direct.inadequacy,w=finalweight*nutrient.no,na.rm=TRUE),5),
risk = round(wtd.mean(direct.risk,w=finalweight*nutrient.no,na.rm=TRUE),5))
}
return(summary.dataset.nutrient)
}
dataset.nutrient3 <- reactive({
if(is.null(input$type3)|input$type3==""){
type3 = "Adult Women"
}
else{
type3 = input$type3
}
RDA2g <- as.numeric(rda_tul[rda_tul$nin.var_nutrient==var_nutrient & rda_tul$desc==type3,"RDA2g"])
TUL2.1g <- as.numeric(rda_tul[rda_tul$nin.var_nutrient==var_nutrient & rda_tul$desc==type3,"TUL2.1g"])
var_no <- desc[desc$desc==type3,"variable2"]
var_cu <- desc[desc$desc==type3,"cu"]
hh_summary_all <- hh_summary_all.orig
hh_summary_all$nutrient.no <- hh_summary_all[,var_no]
hh_summary_all$nutrient.cu <- hh_summary_all[,var_cu]
hh_summary_all$nutrient.pd <- (hh_summary_all[,var_nutrient]*hh_summary_all$nutrient.cu)/(30*hh_summary_all$consumerunits*hh_summary_all$nutrient.no)
hh_summary_all <- merge(hh_summary_all,food_all[,c("hhuid","fortificant")],by="hhuid",all.x=TRUE)
hh_summary_all$fortificant.pd <- with(hh_summary_all,(fortificant*nutrient.cu)/(30*consumerunits*nutrient.no))
#There are 10,231 cases who do not consume any wheat
#There are 5,074 cases who do not have 0 for no of Adult women
hh_summary_all$fortificant.pd <- with(hh_summary_all,ifelse(is.na(fortificant.pd),0,fortificant.pd))
# hh_summary_all$total.pd <- rowSums(hh_summary_all[,c("fortificant.pd","nutrient.pd")]) #Include NA
hh_summary_all$total.pd <- rowSums(hh_summary_all[,c("fortificant.pd","nutrient.pd")],na.rm = TRUE) #Include NA
# View(hh_summary_all[,c("hhuid","nutrient.pd","fortificant.pd","total.pd")])
hh_summary_all$total.pd <- with(hh_summary_all,ifelse(is.na(nutrient.pd)&fortificant.pd==0,NA,total.pd))
lambda.test = lambda_pop(hh_summary_all$nutrient.pd,hh_summary_all$finalweight,hh_summary_all$nutrient.no)
hh_summary_all$RDA2g <- RDA2g
hh_summary_all$TUL2.1g <- TUL2.1g
q99.9 <- with(hh_summary_all,wtd.quantile(nutrient.pd,w=finalweight*nutrient.no,probs=0.999,na.rm=TRUE))
q00.1 <- with(hh_summary_all,wtd.quantile(nutrient.pd,w=finalweight*nutrient.no,probs=0.001,na.rm=TRUE))
# hh_summary_all3 <- hh_summary_all[hh_summary_all$nutrient.pd<q99.9 & !is.na(hh_summary_all$nutrient.pd),]
hh_summary_all3 <- hh_summary_all[hh_summary_all$nutrient.pd<q99.9&hh_summary_all$nutrient.pd>q00.1 & !is.na(hh_summary_all$nutrient.pd),]
hh_summary_all3
})
f.bioavailability = 1 #From input
# hh_summary_all.nutrient <- hh_summary_all3
#-------------------------------------------------------#
output$distPlot3 <- renderPlot({
hh_summary_all.nutrient <- dataset.nutrient3()
# hh_summary_all.nutrient <- hh_summary_all3
state3 <- "India"
state3 <- input$state3
#
if(is.null(input$type3)|input$type3==""){
type3 = "Adult Women"
}
else{
type3 = input$type3
}
# q99.9 <- with(hh_summary_all.nutrient,wtd.quantile(nutrient.pd,w=nutrient.no*finalweight,probs=0.999,na.rm=TRUE))
if(state3 == "India"){
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9,c("NSS_State","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
dataset <- hh_summary_all.nutrient[,c("NSS_State","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
dataset <- melt(dataset,id.vars=c("NSS_State","hhuid","finalweight","nutrient.no","RDA2g","TUL2.1g"),measure.vars=c("nutrient.pd","total.pd"))
}
else{
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9 & hh_summary_all.nutrient$NSS_State==state3,c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$NSS_State==state3,c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
dataset <- melt(dataset,id.vars=c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","RDA2g","TUL2.1g"),measure.vars=c("nutrient.pd","total.pd"))
}
dataset$fort <- with(dataset,ifelse(variable=="nutrient.pd","1. Before Fortification","2. After Fortification"))
title = paste0("Intake distribution of ",nutrient3," in ",state3," for ",type3)
legend = paste0("Intake in ",unit_nutrient)
hp3 <- ggplot() + geom_histogram(data=dataset,aes(x=value*multiplier,weight=nutrient.no*finalweight,group=fort)) + facet_grid(~fort)
hp3 <- hp3 + geom_vline(data=dataset,aes(xintercept=RDA2g*multiplier,group=fort),col="blue")
hp3 <- hp3 + geom_vline(data=dataset,aes(xintercept=TUL2.1g*multiplier,group=fort),col="red")
hp3 <- hp3 + xlab(legend) + ylab("Count (excl top 0.1%ile)") + scale_y_continuous(labels = comma)
hp3 <- hp3 + ggtitle(title) + theme(text = element_text(size=15))
print(hp3)
})
output$summary3 <- renderTable({
hh_summary_all.nutrient <- dataset.nutrient3()
# hh_summary_all.nutrient <- hh_summary_all3
state3 <- "India"
state3 <- input$state3
#Adult women is default
if(is.null(input$type3)|input$type3==""){
type3 = "Adult Women"
}
else{
type3 = input$type3
}
#Calculate 99.9%ile for outlier detection
# q99.9 <- with(hh_summary_all.nutrient,wtd.quantile(nutrient.pd,w=nutrient.no*finalweight,probs=0.999,na.rm=TRUE))
lambda = lambda_pop(hh_summary_all.nutrient$nutrient.pd,hh_summary_all.nutrient$finalweight,hh_summary_all.nutrient$nutrient.no)
#Eliminate outliers
if(state3 == "India"){
dataset <- hh_summary_all.nutrient[,c("NSS_State","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9,c("NSS_State","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
grouping.var3=c("NSS_State","fort")
}
else{
dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$NSS_State==state3,c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
# dataset <- hh_summary_all.nutrient[hh_summary_all.nutrient$nutrient.pd<q99.9 & hh_summary_all.nutrient$NSS_State==state3,c("NSS_State","DISTRICT","hhuid","finalweight","nutrient.no","nutrient.pd","total.pd","RDA2g","TUL2.1g")]
grouping.var3=c("DISTRICT","fort")
}
dataset <- melt(dataset,measure.vars=c("nutrient.pd","total.pd"))
dataset$fort <- with(dataset,ifelse(variable=="nutrient.pd","1. Before Fortification","2. After Fortification"))
# dataset <- dataset[!is.na(dataset$value),]
method3 = 1
if(var_nutrient=="IronFe.FE"|is.null(lambda)|is.na(lambda)){
method3 = 2
}
summary.fortification <- dataset %>% group_by_(grouping.var3[1],grouping.var3[2]) %>% risk_estimation(.,grouping.var=grouping.var3,lambda=lambda,method=method3)
# summary.fortification <- dataset %>% risk_estimation(.,lambda=lambda,method=2) #Matches
# summary.fortification <- dataset %>% group_by(NSS_State) %>% risk_estimation(.,lambda=lambda,method=1) #Matches
summary.fortification[,3:5] <- round(summary.fortification[,3:5]*multiplier,2)
summary.fortification[,6:7] <- round(summary.fortification[,6:7],3)*100
colnames(summary.fortification)[3:5] <- paste0(colnames(summary.fortification)[3:5]," (in ",unit_nutrient,")")
colnames(summary.fortification)[6:7] <- paste0(colnames(summary.fortification)[6:7]," (%)")
print(summary.fortification)
})
})
observeEvent(input$goButton4,{
crop4 = "Paddy"
crop4 = input$crop4
map_df_dist <- reactive({
# state2 = "Gujarat"
state4="India"
state4 = input$state4
year4=2008
year4 = input$year4
statistic4 = "Area"
statistic4 = input$statistic4
if(state4=="India"){
dataset <- merge(district_shp_df2,agristats.summary[agristats.summary$mag.Year2==year4 & agristats.summary$mag.CROP==crop4,c(statistic4,"censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset <- dataset[order(dataset$order),]
dataset
}
else{
dataset <- merge(district_shp_df2[district_shp_df2$NSS_State==state4,],agristats.summary[agristats.summary$mag.Year2==year4 & agristats.summary$mag.CROP==crop4 & agristats.summary$NSS_State==state4,c(statistic4,"censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset <- dataset[order(dataset$order),]
dataset
}
})
# map_df_dist <- dataset
output$mapPlot4<- renderPlot({
state4="India"
state4 = input$state4
year4=2010
year4 = input$year4
statistic4 = "Production"
statistic4 = input$statistic4
unit = "Tonnes"
if(statistic4=="Area"){
unit = "Hectare"
}
if(statistic4=="Yield"){
unit = "Tonnes per Hectare"
}
crop4 = "All Pulses"
mp2 <- ggplot() + geom_polygon(data=map_df_dist(),aes(x=long,y=lat,group=group,fill=eval(parse(text=statistic4))))
# mp2 <- ggplot() + geom_polygon(data=map_df_dist,aes(x=long,y=lat,group=group,fill=eval(parse(text = statistic4))))
mp2 <- mp2 + coord_map() + xlab("Longitude") + ylab("Latitude") + ggtitle(paste0(statistic4," of ",crop4," - ",state4," by District")) + theme_grey()
mp2 <- mp2 + scale_fill_distiller(name=paste0(statistic4," in ",unit), palette = "YlGnBu",direction=1)
print(mp2)
},height=600)
output$summary4 <- renderTable({
state4="India"
state4 = input$state4
year4=2008
year4 = input$year4
crop4 = "Rice"
crop4 = input$crop4
if(state4=="India"){
agristats.summary_state <- agristats.summary[agristats.summary$mag.CROP==crop4&agristats.summary$mag.Year2==year4,] %>% group_by(NSS_State) %>% summarise(Area=sum(Area,na.rm=TRUE),Production=sum(Production,na.rm=TRUE))
agristats.summary_state$Yield <- with(agristats.summary_state,ifelse(Area==0,0,Production/Area))
colnames(agristats.summary_state) <- c("State","Production in Tonnes","Area in Hectares","Yield in Tonnes per Hectare")
print(agristats.summary_state)
}
else{
agristats.summary_dist <- agristats.summary[agristats.summary$mag.CROP==crop4&agristats.summary$mag.Year2==year4&agristats.summary$NSS_State==state4,c("NSS_State","DISTRICT","Production","Area","Yield")]
colnames(agristats.summary_dist) <- c("State","District","Production in Tonnes","Area in Hectares","Yield in Tonnes per Hectare")
print(agristats.summary_dist)
}
})
})
observeEvent(input$goButton5,{
# outcome5 = "77. Non-pregnant women age 15-49 years who are anaemic (<12.0 g/dl) (%)"
outcome5 = input$outcome5
outcome.variable = outcomelist[outcomelist$Description==outcome5,"variable.ITEMID2"]
map_df_dist <- reactive({
# state5 = "Andhra Pradesh"
state5="India"
state5 = input$state5
area5 = "Total"
area5 = input$area5
if(state5=="India"){
dataset <- merge(district_shp_df2,nfhs4.complete3[nfhs4.complete3$variable.ITEMID2==outcome.variable,c(area5,"censuscode")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset <- dataset[order(dataset$order),]
dataset
}
else{
dataset <- merge(district_shp_df2[district_shp_df2$NSS_State==state5,],nfhs4.complete3[nfhs4.complete3$variable.ITEMID2==outcome.variable & nfhs4.complete3$NSS_State==state5,c(area5,"censuscode","DISTRICT")],by.x="id",by.y="censuscode",all.x=TRUE)
dataset <- dataset[order(dataset$order),]
dataset
}
})
output$mapPlot5<- renderPlot({
state5="India"
state5 = input$state5
area5 = "Total"
area5 = input$area5
title = paste0(substr(outcome5,4,str_length(outcome5))," - ",state5," by District")
# title = paste0(substr(outcome5,4,str_length(outcome5))," - Kurnool, AP")
# mp2 <- ggmap(india) #+ geom_polygon(data=states_shp,aes(x=long,y=lat,group=group),color="black",fill="white",size=0.2,alpha=0.8)
mp2 <- ggplot() + geom_polygon(data=states_shp[states_shp$id==28,],aes(x=long,y=lat,group=group),color="black",fill="white",size=0.2,alpha=0.8)
mp2 <- mp2 + geom_polygon(data=map_df_dist(),aes(x=long,y=lat,group=group,fill=eval(parse(text=area5))))
# mp2 <- ggplot() + geom_polygon(data=map_df_dist,aes(x=long,y=lat,group=group,fill=eval(parse(text = area5))))
# mp2 <- mp2 + geom_text(data=label_dist,aes(long,lat,label=DISTRICT),size=2)
mp2 <- mp2 + coord_map() + xlab("Longitude") + ylab("Latitude") + ggtitle(title) + theme_grey()
mp2 <- mp2 + scale_fill_distiller(name=title, palette = "RdYlGn",direction=-1,limits=c(10,80))
print(mp2)
},height=600)
})
})
|
xyplot.eda8 <-
function (xx, yy, zz, sfact = 1, xlim = NULL, ylim = NULL, xlab = deparse(substitute(xx)),
ylab = deparse(substitute(yy)), zlab = deparse(substitute(zz)),
main = "", log = NULL, ifgrey = FALSE, symcolr = NULL, iflgnd = FALSE,
pctile = FALSE, title = deparse(substitute(zz)), cex.lgnd = 0.8, ...)
{
frame()
oldpar <- par()
on.exit(par(oldpar))
temp.z <- remove.na(cbind(xx, yy, zz))
x <- temp.z$x[1:temp.z$n, 1]
y <- temp.z$x[1:temp.z$n, 2]
z <- temp.z$x[1:temp.z$n, 3]
nz <- temp.z$n
if (main == "")
if (zlab == "")
banner <- ""
else banner <- paste("EDA Percentile Based Plot for",
zlab)
else banner <- main
if (is.null(log))
log = ""
plot(x, y, type = "n", log = log, xlab = xlab, ylab = ylab,
xlim = xlim, ylim = ylim, main = banner, ...)
zcut <- quantile(z, probs = c(0.02, 0.05, 0.25, 0.5, 0.75,
0.95, 0.98))
zzz <- cutter(z, zcut)
npch <- c(1, 1, 1, 1, 0, 0, 0, 0)
size <- c(2, 1.5, 1, 0.5, 0.5, 1, 1.5, 2) * sfact
if (ifgrey) {
symcolr <- grey(c(0, 0.15, 0.3, 0.4, 0.4, 0.3, 0.15,
0))
}
else {
palette(rainbow(36))
if (length(symcolr) != 8)
symcolr <- c(25, 22, 20, 13, 13, 6, 4, 1)
}
for (i in 1:nz) {
points(x[i], y[i], pch = npch[zzz[i]], cex = size[zzz[i]],
col = symcolr[zzz[i]])
}
cat("\tCut Levels\t No. of Symbols Symbol - size - Colour\n\t\t\t\t\t\tsfact =",
format(sfact, nsmall = 2), "\n\n")
stype <- character(8)
stype[1:4] <- "Circle"
stype[5:8] <- "Square"
pct <- 0
for (i in 1:7) {
ni <- length(zzz[zzz == i])
pct <- pct + 100 * ni/nz
cat("\t\t\t ", ni, "\t ", stype[i], format(size[i],
nsmall = 2), " ", symcolr[i], "\n\t", signif(zcut[i],
4), "\t", round(pct, 1), "%\n")
}
ni <- length(zzz[zzz == 8])
cat("\t\t\t ", ni, "\t ", stype[8], format(size[8],
nsmall = 2), " ", symcolr[8], "\n")
if (iflgnd) {
lgnd.line <- numeric(8)
zcut <- signif(zcut, 3)
if (pctile) {
title <- paste(deparse(substitute(zz)), "Percentiles")
lgnd.line[1] <- "> 98th"
lgnd.line[2] <- "95th - 98th"
lgnd.line[3] <- "75th - 95th"
lgnd.line[4] <- "50th - 75th"
lgnd.line[5] <- "25th - 50th"
lgnd.line[6] <- "5th - 25th"
lgnd.line[7] <- "2nd - 5th"
lgnd.line[8] <- "< 2nd"
}
else {
lgnd.line[1] <- paste(">", zcut[7])
lgnd.line[2] <- paste(zcut[6], "-", zcut[7])
lgnd.line[3] <- paste(zcut[5], "-", zcut[6])
lgnd.line[4] <- paste(zcut[4], "-", zcut[5])
lgnd.line[5] <- paste(zcut[3], "-", zcut[4])
lgnd.line[6] <- paste(zcut[2], "-", zcut[3])
lgnd.line[7] <- paste(zcut[1], "-", zcut[2])
lgnd.line[8] <- paste("<", zcut[1])
}
legend(locator(1), pch = npch[8:1], col = symcolr[8:1],
pt.cex = size[8:1], lgnd.line[1:8], cex = cex.lgnd,
title = title, ...)
}
palette("default")
invisible()
}
|
/rgr/R/xyplot.eda8.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 3,360 |
r
|
xyplot.eda8 <-
function (xx, yy, zz, sfact = 1, xlim = NULL, ylim = NULL, xlab = deparse(substitute(xx)),
ylab = deparse(substitute(yy)), zlab = deparse(substitute(zz)),
main = "", log = NULL, ifgrey = FALSE, symcolr = NULL, iflgnd = FALSE,
pctile = FALSE, title = deparse(substitute(zz)), cex.lgnd = 0.8, ...)
{
frame()
oldpar <- par()
on.exit(par(oldpar))
temp.z <- remove.na(cbind(xx, yy, zz))
x <- temp.z$x[1:temp.z$n, 1]
y <- temp.z$x[1:temp.z$n, 2]
z <- temp.z$x[1:temp.z$n, 3]
nz <- temp.z$n
if (main == "")
if (zlab == "")
banner <- ""
else banner <- paste("EDA Percentile Based Plot for",
zlab)
else banner <- main
if (is.null(log))
log = ""
plot(x, y, type = "n", log = log, xlab = xlab, ylab = ylab,
xlim = xlim, ylim = ylim, main = banner, ...)
zcut <- quantile(z, probs = c(0.02, 0.05, 0.25, 0.5, 0.75,
0.95, 0.98))
zzz <- cutter(z, zcut)
npch <- c(1, 1, 1, 1, 0, 0, 0, 0)
size <- c(2, 1.5, 1, 0.5, 0.5, 1, 1.5, 2) * sfact
if (ifgrey) {
symcolr <- grey(c(0, 0.15, 0.3, 0.4, 0.4, 0.3, 0.15,
0))
}
else {
palette(rainbow(36))
if (length(symcolr) != 8)
symcolr <- c(25, 22, 20, 13, 13, 6, 4, 1)
}
for (i in 1:nz) {
points(x[i], y[i], pch = npch[zzz[i]], cex = size[zzz[i]],
col = symcolr[zzz[i]])
}
cat("\tCut Levels\t No. of Symbols Symbol - size - Colour\n\t\t\t\t\t\tsfact =",
format(sfact, nsmall = 2), "\n\n")
stype <- character(8)
stype[1:4] <- "Circle"
stype[5:8] <- "Square"
pct <- 0
for (i in 1:7) {
ni <- length(zzz[zzz == i])
pct <- pct + 100 * ni/nz
cat("\t\t\t ", ni, "\t ", stype[i], format(size[i],
nsmall = 2), " ", symcolr[i], "\n\t", signif(zcut[i],
4), "\t", round(pct, 1), "%\n")
}
ni <- length(zzz[zzz == 8])
cat("\t\t\t ", ni, "\t ", stype[8], format(size[8],
nsmall = 2), " ", symcolr[8], "\n")
if (iflgnd) {
lgnd.line <- numeric(8)
zcut <- signif(zcut, 3)
if (pctile) {
title <- paste(deparse(substitute(zz)), "Percentiles")
lgnd.line[1] <- "> 98th"
lgnd.line[2] <- "95th - 98th"
lgnd.line[3] <- "75th - 95th"
lgnd.line[4] <- "50th - 75th"
lgnd.line[5] <- "25th - 50th"
lgnd.line[6] <- "5th - 25th"
lgnd.line[7] <- "2nd - 5th"
lgnd.line[8] <- "< 2nd"
}
else {
lgnd.line[1] <- paste(">", zcut[7])
lgnd.line[2] <- paste(zcut[6], "-", zcut[7])
lgnd.line[3] <- paste(zcut[5], "-", zcut[6])
lgnd.line[4] <- paste(zcut[4], "-", zcut[5])
lgnd.line[5] <- paste(zcut[3], "-", zcut[4])
lgnd.line[6] <- paste(zcut[2], "-", zcut[3])
lgnd.line[7] <- paste(zcut[1], "-", zcut[2])
lgnd.line[8] <- paste("<", zcut[1])
}
legend(locator(1), pch = npch[8:1], col = symcolr[8:1],
pt.cex = size[8:1], lgnd.line[1:8], cex = cex.lgnd,
title = title, ...)
}
palette("default")
invisible()
}
|
## ----packages------------------------------------------------------------
library(foreign)
library(devtools)
library(dplyr)
library(pryr)
## ----load_data, eval=FALSE-----------------------------------------------
# pdat = read.dta("final_stars_supp.dta")
# glimpse(pdat)
## ----load_data_hidden, echo=FALSE----------------------------------------
pdat = read.dta("~/data/economics/final_stars_supp.dta")
glimpse(pdat)
## ----na_pvals------------------------------------------------------------
table(is.na(pdat$p_value_num))
## ----na_tstats-----------------------------------------------------------
tstat_pvals = 2*(1-pnorm(pdat$t_stat_raw))
table(is.na(tstat_pvals))
## ----compare-------------------------------------------------------------
quantile((tstat_pvals - pdat$p_value_num),na.rm=T)
plot(tstat_pvals, pdat$p_value_num,pch=19)
## ----nomatch-------------------------------------------------------------
ind = which(abs(tstat_pvals - pdat$p_value_num) > 0.05)
pdat[ind,] %>% select(journal_id,article_page,first_author)
## ------------------------------------------------------------------------
pdat = pdat[-ind,]
## ----select--------------------------------------------------------------
brodeur2016 = pdat %>% mutate(pvalue=2*(1-pnorm(t_stat_raw)),journal = journal_id) %>%
mutate(field="Economics", abstract=FALSE) %>%
mutate(operator = NA, doi = NA, pmid=NA) %>%
select(pvalue,year,journal,field,
abstract,operator,doi,pmid) %>%
filter(!is.na(pvalue)) %>%
as_tibble()
## ----save_pvals----------------------------------------------------------
use_data(brodeur2016,overwrite=TRUE)
## ----session_info--------------------------------------------------------
session_info()
|
/inst/doc/brodeur-2016.R
|
no_license
|
jayhesselberth/tidypvals
|
R
| false | false | 1,725 |
r
|
## ----packages------------------------------------------------------------
library(foreign)
library(devtools)
library(dplyr)
library(pryr)
## ----load_data, eval=FALSE-----------------------------------------------
# pdat = read.dta("final_stars_supp.dta")
# glimpse(pdat)
## ----load_data_hidden, echo=FALSE----------------------------------------
pdat = read.dta("~/data/economics/final_stars_supp.dta")
glimpse(pdat)
## ----na_pvals------------------------------------------------------------
table(is.na(pdat$p_value_num))
## ----na_tstats-----------------------------------------------------------
tstat_pvals = 2*(1-pnorm(pdat$t_stat_raw))
table(is.na(tstat_pvals))
## ----compare-------------------------------------------------------------
quantile((tstat_pvals - pdat$p_value_num),na.rm=T)
plot(tstat_pvals, pdat$p_value_num,pch=19)
## ----nomatch-------------------------------------------------------------
ind = which(abs(tstat_pvals - pdat$p_value_num) > 0.05)
pdat[ind,] %>% select(journal_id,article_page,first_author)
## ------------------------------------------------------------------------
pdat = pdat[-ind,]
## ----select--------------------------------------------------------------
brodeur2016 = pdat %>% mutate(pvalue=2*(1-pnorm(t_stat_raw)),journal = journal_id) %>%
mutate(field="Economics", abstract=FALSE) %>%
mutate(operator = NA, doi = NA, pmid=NA) %>%
select(pvalue,year,journal,field,
abstract,operator,doi,pmid) %>%
filter(!is.na(pvalue)) %>%
as_tibble()
## ----save_pvals----------------------------------------------------------
use_data(brodeur2016,overwrite=TRUE)
## ----session_info--------------------------------------------------------
session_info()
|
#
# drools.R, 18 Oct 18
# Data from:
# Parameter-Free Probabilistic {API} Mining across {GitHub}
# Jaroslav Fowkes and Charles Sutton
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG API_mining method_call call_sequence-mining
source("ESEUR_config.r")
library("arules")
# Convert original Fowkes and Sutton data into two column data transactions
# library("foreign")
# library("plyr")
#
#
# split_calls=function(df)
# {
# return(data.frame(called=unlist(strsplit(df$fqCalls, " "))))
# }
#
#
# drool=read.arff(paste0(ESEUR_dir, "odds-and-ends/drools.arff"))
#
# d=ddply(drool, .(fqCaller), split_calls)
#
# write.csv(d, file="drools.csv.xz", row.names=FALSE)
drools=read.transactions(paste0(ESEUR_dir, "odds-and-ends/drools.csv.xz"),
format="single", cols=c(1, 2))
rules=apriori(drools, parameter=list(support=0.0001, confidence=0.1))
summary(rules)
inspect(head(rules, n=3, by = "confidence"))
|
/odds-and-ends/drools.R
|
no_license
|
Derek-Jones/ESEUR-code-data
|
R
| false | false | 984 |
r
|
#
# drools.R, 18 Oct 18
# Data from:
# Parameter-Free Probabilistic {API} Mining across {GitHub}
# Jaroslav Fowkes and Charles Sutton
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG API_mining method_call call_sequence-mining
source("ESEUR_config.r")
library("arules")
# Convert original Fowkes and Sutton data into two column data transactions
# library("foreign")
# library("plyr")
#
#
# split_calls=function(df)
# {
# return(data.frame(called=unlist(strsplit(df$fqCalls, " "))))
# }
#
#
# drool=read.arff(paste0(ESEUR_dir, "odds-and-ends/drools.arff"))
#
# d=ddply(drool, .(fqCaller), split_calls)
#
# write.csv(d, file="drools.csv.xz", row.names=FALSE)
drools=read.transactions(paste0(ESEUR_dir, "odds-and-ends/drools.csv.xz"),
format="single", cols=c(1, 2))
rules=apriori(drools, parameter=list(support=0.0001, confidence=0.1))
summary(rules)
inspect(head(rules, n=3, by = "confidence"))
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/filter.R
\name{\%=\%}
\alias{\%=\%}
\title{Construct a selector filter}
\usage{
dimension \%=\% pattern
}
\arguments{
\item{dimension}{dimension to match}
\item{pattern}{pattern to match}
}
\description{
Construct a selector filter
}
|
/man/grapes-equals-grapes.Rd
|
permissive
|
bbarrett90/RDruid
|
R
| false | false | 322 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/filter.R
\name{\%=\%}
\alias{\%=\%}
\title{Construct a selector filter}
\usage{
dimension \%=\% pattern
}
\arguments{
\item{dimension}{dimension to match}
\item{pattern}{pattern to match}
}
\description{
Construct a selector filter
}
|
## put the parameters you want hard coded into the program in this one
source("functions.R")
dbname <- Sys.getenv("DBNAME")
host <- Sys.getenv("HOSTNAME")
port <- Sys.getenv("PORT")
user <- Sys.getenv("USERNAME")
password <- Sys.getenv("PASSWORD")
options <- Sys.getenv("SCHEMA")
|
/push/parameters.R
|
permissive
|
ryanbieber/model-deployment-kubernetes
|
R
| false | false | 282 |
r
|
## put the parameters you want hard coded into the program in this one
source("functions.R")
dbname <- Sys.getenv("DBNAME")
host <- Sys.getenv("HOSTNAME")
port <- Sys.getenv("PORT")
user <- Sys.getenv("USERNAME")
password <- Sys.getenv("PASSWORD")
options <- Sys.getenv("SCHEMA")
|
##### City Center Visits Database #####
library(readr)
library(lubridate)
library(data.table)
Visit_Frequencies_Q1_2017 <- read_delim("providedData/Visit Frequencies Q1 2017.csv",
";", escape_double = FALSE,
col_types = cols(Average_Time = col_time(format = "%H:%M:%S")),
trim_ws = TRUE)
Visit_Frequencies_Q2_2017 <- read_delim("providedData/Visit Frequencies Q2 2017.csv",
";", escape_double = FALSE,
col_types = cols(Average_Time = col_time(format = "%H:%M:%S")),
trim_ws = TRUE)
Visit_Frequencies_Q3_2017 <- read_delim("providedData/Visit Frequencies Q3 2017.csv",
";", escape_double = FALSE,
col_types = cols(Average_Time = col_time(format = "%H:%M:%S")),
trim_ws = TRUE)
Visit_Frequencies_Q4_2016 <- read_delim("providedData/Visit Frequencies Q4 2016.csv",
";", escape_double = FALSE,
col_types = cols(Average_Time = col_time(format = "%H:%M:%S")),
trim_ws = TRUE)
Visit_Times_Q1_2017 <- read_delim("providedData/Visit Times Q1 2017.csv",
";", escape_double = FALSE,
trim_ws = TRUE)
Visit_Times_Q2_2017 <- read_delim("providedData/Visit Times Q2 2017.csv",
";", escape_double = FALSE,
trim_ws = TRUE)
Visit_Times_Q3_2017 <- read_delim("providedData/Visit Times Q3 2017.csv",
";", escape_double = FALSE,
trim_ws = TRUE)
Visit_Times_Q4_2016 <- read_delim("providedData/Visit Times Q4 2016.csv",
";", escape_double = FALSE,
trim_ws = TRUE)
##### Clean / Understand Stedelijke_Evenementen dataset #####
# read in dataset
ds.urban.events <- read.csv(paste0(dir.providedData, "Stedelijke_Evenementen_2010_2017.csv"))
# translate columns to english
colnames(ds.urban.events) <- c("id",
"event_name",
"organizer",
"entree_fee",
"initial_year",
"start_date",
"end_date",
"nr_days",
"location",
"inside_or_outside",
"international_national_regional",
"nr_visitors",
"year")
write.csv(ds.urban.events, file = paste0(dir.providedData, "ds.urban.events.csv"), row.names = FALSE)
ds.events <- read.csv(paste0(dir.providedData, "ds.urban.events.csv"))
##### Clean / Understand Rotterdampas dataset #####
load(paste0(dir.providedData, "rotterdampas.RData"))
ds.rotterdamPas <- Rotterdampas_2017_2018
colnames(ds.rotterdamPas) <- c("id", "passH_nb", "age_category", "passH_postcode",
"passH_p4", "passH_neighborhood", "passH_district", "partner_nb",
"partner_postcode", "partner_p4", "partner_neighborhood", "partner_district",
"activity_nb", "discount", "activity_validity", "inside",
"nice_weather", "bad_weather", "fun_for_kids", "fun_without_kids",
"highlight", "use_date", "compensation_incl_tax", "social_group",
"activity_category", "activity_type", "year", "time")
# limit rotterdamPas dataset to activities with partners that are located within rotterdam (based on 30XX postcode)
dt.rotterdamPas <- as.data.table(ds.rotterdamPas)
dt.rotterdamPas <- dt.rotterdamPas[, "activity_within_rotterdam" := ifelse(substr(partner_p4, 1, 2) == 30, 1, 0)]
dt.rotterdamPas$activity_within_rotterdam <- factor(dt.rotterdamPas$activity_within_rotterdam)
dt.rotterdamPas <- dt.rotterdamPas[activity_within_rotterdam == 1, ]
dt.rotterdamPas$partner_postcode <- dt.rotterdamPas[, gsub(" ", "", dt.rotterdamPas$partner_postcode)]
# Translate Dutch Activity types into English
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Natuurparken"] <- "Nature Park"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Overig"] <- "Others"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Film"] <- "Film"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Watersport"] <- "Water Sports"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Cafe (koffie & gebak)"] <- "Café (coffee & pastry)"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Iconen"] <- "Icons"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Musea"] <- "Museum"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Dieren(parken)"] <- "Zoo"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Sightseeing"] <- "Sightseeing"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Restaurant (lunch & diner)"] <- "Restaurant (lunch & diner)"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Actief"] <- "Active"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Contributie"] <- "Contribution"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "(Pret)parken"] <- "Amusement Park"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Creatief"] <- "Creative"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Zwemsport"] <- "Swimming"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Taal, lezen & leren"] <- "Language, reading & learning"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Wintersport"] <- "Winter sports"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Schoonheid van binnen"] <- "Beauty on the inside"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Krachtsport"] <- "Weight lifting"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Schoonheid van buiten"] <- "Beauty on the outside"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Rondleiding"] <- "Guided tours"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Muziek & dans"] <- "Music & Dance"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "IJsje"] <- "Ice cream"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Alles"] <- "Everything"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Theater, muziek en dans"] <- "Theatre, music and dance"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Culinair"] <- "Culinary"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Relaxen"] <- "Relaxing"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Vechtsport"] <- "Martial arts"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Theater"] <- "Theatre"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Gouda"] <- "Gouda"
# save dataset
saveRDS(dt.rotterdamPas, file = paste0(dir.providedData, "dt.rotterdamPas.RData"))
# Compensation is what the government pays which the people don't, in order to provide the discount
##### Clean sport data: Sportparticipatie_Rotterdam_2015_2017.csv #####
sportPart.ds <- read_csv("providedData/Sportparticipatie_Rotterdam_2015_2017.csv")
sportPart.ds <- sportPart.ds[, 2:ncol(sportPart.ds)]
colnames(sportPart.ds) <- c("Neighbourhood",
"Postcode",
"Year",
"Total %",
"4-11 years %",
"12-17 years %",
"18-64 years %",
"65-80 years %",
"81+ years %",
"4-11 years % men",
"4-11 years % women",
"12-17 years % men",
"12-17 years % women",
"18-64 years % men",
"18-64 years % women",
"65-80 years % men",
"65-80 years % women",
"81+ years % men",
"81+ years % women")
# Save cleaned data
write.csv(sportPart.ds,'providedData/cleanSports.csv')
##### Import and translate postalcodes_with geoloc #####
ds.postalCodes <- read.csv(paste0(dir.providedData, "Postalcodes_with_GeoLoc.csv"))
##### Import and prepare weather data #####
library(stringr)
ds.weather <- read.delim(paste0(dir.additionalData, "ds.weather.txt"))
df.weather <- as.data.frame(ds.weather)
names(df.weather) <- c("wt")
df.weather <- str_split_fixed(df.weather$wt, ",", 12)
df.weather <- df.weather[19:1115, 2:12]
df.weather <- df.weather[, c(-3,-5,-6,-8,-10)]
df.weather <- as.data.frame(df.weather)
names(df.weather) <- c("Date",
"Daily Avg. Wind Speed",
"Daily Avg. Temperature",
"Sunshine Duration",
"Prec. Duration",
"Highest h. amount prec.")
df.weather <- df.weather[3:1097, ]
df.weather$Date <- as.character(df.weather$Date)
df.weather$Date <- sub("([[:digit:]]{4,4})$", "/\\1", df.weather$Date)
df.weather$Date <- sub("(.{7})(/*)", "\\1/\\2", df.weather$Date)
df.weather$Date <- as.Date(df.weather$Date)
saveRDS(df.weather, file = paste0(dir.providedData, "df.weather.RData"))
# save script as pdf
knitr::stitch('cleaning_data.R')
|
/cleaning_data.R
|
no_license
|
oldstretch/Swaggathon
|
R
| false | false | 9,942 |
r
|
##### City Center Visits Database #####
library(readr)
library(lubridate)
library(data.table)
Visit_Frequencies_Q1_2017 <- read_delim("providedData/Visit Frequencies Q1 2017.csv",
";", escape_double = FALSE,
col_types = cols(Average_Time = col_time(format = "%H:%M:%S")),
trim_ws = TRUE)
Visit_Frequencies_Q2_2017 <- read_delim("providedData/Visit Frequencies Q2 2017.csv",
";", escape_double = FALSE,
col_types = cols(Average_Time = col_time(format = "%H:%M:%S")),
trim_ws = TRUE)
Visit_Frequencies_Q3_2017 <- read_delim("providedData/Visit Frequencies Q3 2017.csv",
";", escape_double = FALSE,
col_types = cols(Average_Time = col_time(format = "%H:%M:%S")),
trim_ws = TRUE)
Visit_Frequencies_Q4_2016 <- read_delim("providedData/Visit Frequencies Q4 2016.csv",
";", escape_double = FALSE,
col_types = cols(Average_Time = col_time(format = "%H:%M:%S")),
trim_ws = TRUE)
Visit_Times_Q1_2017 <- read_delim("providedData/Visit Times Q1 2017.csv",
";", escape_double = FALSE,
trim_ws = TRUE)
Visit_Times_Q2_2017 <- read_delim("providedData/Visit Times Q2 2017.csv",
";", escape_double = FALSE,
trim_ws = TRUE)
Visit_Times_Q3_2017 <- read_delim("providedData/Visit Times Q3 2017.csv",
";", escape_double = FALSE,
trim_ws = TRUE)
Visit_Times_Q4_2016 <- read_delim("providedData/Visit Times Q4 2016.csv",
";", escape_double = FALSE,
trim_ws = TRUE)
##### Clean / Understand Stedelijke_Evenementen dataset #####
# read in dataset
ds.urban.events <- read.csv(paste0(dir.providedData, "Stedelijke_Evenementen_2010_2017.csv"))
# translate columns to english
colnames(ds.urban.events) <- c("id",
"event_name",
"organizer",
"entree_fee",
"initial_year",
"start_date",
"end_date",
"nr_days",
"location",
"inside_or_outside",
"international_national_regional",
"nr_visitors",
"year")
write.csv(ds.urban.events, file = paste0(dir.providedData, "ds.urban.events.csv"), row.names = FALSE)
ds.events <- read.csv(paste0(dir.providedData, "ds.urban.events.csv"))
##### Clean / Understand Rotterdampas dataset #####
load(paste0(dir.providedData, "rotterdampas.RData"))
ds.rotterdamPas <- Rotterdampas_2017_2018
colnames(ds.rotterdamPas) <- c("id", "passH_nb", "age_category", "passH_postcode",
"passH_p4", "passH_neighborhood", "passH_district", "partner_nb",
"partner_postcode", "partner_p4", "partner_neighborhood", "partner_district",
"activity_nb", "discount", "activity_validity", "inside",
"nice_weather", "bad_weather", "fun_for_kids", "fun_without_kids",
"highlight", "use_date", "compensation_incl_tax", "social_group",
"activity_category", "activity_type", "year", "time")
# limit rotterdamPas dataset to activities with partners that are located within rotterdam (based on 30XX postcode)
dt.rotterdamPas <- as.data.table(ds.rotterdamPas)
dt.rotterdamPas <- dt.rotterdamPas[, "activity_within_rotterdam" := ifelse(substr(partner_p4, 1, 2) == 30, 1, 0)]
dt.rotterdamPas$activity_within_rotterdam <- factor(dt.rotterdamPas$activity_within_rotterdam)
dt.rotterdamPas <- dt.rotterdamPas[activity_within_rotterdam == 1, ]
dt.rotterdamPas$partner_postcode <- dt.rotterdamPas[, gsub(" ", "", dt.rotterdamPas$partner_postcode)]
# Translate Dutch Activity types into English
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Natuurparken"] <- "Nature Park"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Overig"] <- "Others"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Film"] <- "Film"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Watersport"] <- "Water Sports"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Cafe (koffie & gebak)"] <- "Café (coffee & pastry)"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Iconen"] <- "Icons"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Musea"] <- "Museum"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Dieren(parken)"] <- "Zoo"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Sightseeing"] <- "Sightseeing"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Restaurant (lunch & diner)"] <- "Restaurant (lunch & diner)"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Actief"] <- "Active"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Contributie"] <- "Contribution"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "(Pret)parken"] <- "Amusement Park"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Creatief"] <- "Creative"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Zwemsport"] <- "Swimming"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Taal, lezen & leren"] <- "Language, reading & learning"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Wintersport"] <- "Winter sports"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Schoonheid van binnen"] <- "Beauty on the inside"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Krachtsport"] <- "Weight lifting"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Schoonheid van buiten"] <- "Beauty on the outside"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Rondleiding"] <- "Guided tours"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Muziek & dans"] <- "Music & Dance"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "IJsje"] <- "Ice cream"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Alles"] <- "Everything"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Theater, muziek en dans"] <- "Theatre, music and dance"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Culinair"] <- "Culinary"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Relaxen"] <- "Relaxing"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Vechtsport"] <- "Martial arts"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Theater"] <- "Theatre"
dt.rotterdamPas$activity_type[dt.rotterdamPas$activity_type == "Gouda"] <- "Gouda"
# save dataset
saveRDS(dt.rotterdamPas, file = paste0(dir.providedData, "dt.rotterdamPas.RData"))
# Compensation is what the government pays which the people don't, in order to provide the discount
##### Clean sport data: Sportparticipatie_Rotterdam_2015_2017.csv #####
sportPart.ds <- read_csv("providedData/Sportparticipatie_Rotterdam_2015_2017.csv")
sportPart.ds <- sportPart.ds[, 2:ncol(sportPart.ds)]
colnames(sportPart.ds) <- c("Neighbourhood",
"Postcode",
"Year",
"Total %",
"4-11 years %",
"12-17 years %",
"18-64 years %",
"65-80 years %",
"81+ years %",
"4-11 years % men",
"4-11 years % women",
"12-17 years % men",
"12-17 years % women",
"18-64 years % men",
"18-64 years % women",
"65-80 years % men",
"65-80 years % women",
"81+ years % men",
"81+ years % women")
# Save cleaned data
write.csv(sportPart.ds,'providedData/cleanSports.csv')
##### Import and translate postalcodes_with geoloc #####
ds.postalCodes <- read.csv(paste0(dir.providedData, "Postalcodes_with_GeoLoc.csv"))
##### Import and prepare weather data #####
library(stringr)
ds.weather <- read.delim(paste0(dir.additionalData, "ds.weather.txt"))
df.weather <- as.data.frame(ds.weather)
names(df.weather) <- c("wt")
df.weather <- str_split_fixed(df.weather$wt, ",", 12)
df.weather <- df.weather[19:1115, 2:12]
df.weather <- df.weather[, c(-3,-5,-6,-8,-10)]
df.weather <- as.data.frame(df.weather)
names(df.weather) <- c("Date",
"Daily Avg. Wind Speed",
"Daily Avg. Temperature",
"Sunshine Duration",
"Prec. Duration",
"Highest h. amount prec.")
df.weather <- df.weather[3:1097, ]
df.weather$Date <- as.character(df.weather$Date)
df.weather$Date <- sub("([[:digit:]]{4,4})$", "/\\1", df.weather$Date)
df.weather$Date <- sub("(.{7})(/*)", "\\1/\\2", df.weather$Date)
df.weather$Date <- as.Date(df.weather$Date)
saveRDS(df.weather, file = paste0(dir.providedData, "df.weather.RData"))
# save script as pdf
knitr::stitch('cleaning_data.R')
|
# Copyright 2013, 2018, 2023 Christian Sigg
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#' Multi-Domain Additional Explained Correlation
#'
#' \code{macor} generalizes \code{\link{acor}} to the case of more than two data
#' domains.
#'
#' @export
#'
#' @param x a list of numeric matrices which contain the data from the different
#' domains
#' @param coef a list of matrices containing the canonical vectors related to
#' each data domain. Each matrix contains the respective canonical vectors as
#' its columns.
#' @param center a list of logical values indicating whether the empirical mean
#' of (each column of) the corresponding data matrix should be subtracted.
#' Alternatively, a list of vectors can be supplied, where each vector
#' specifies the mean to be subtracted from the corresponding data matrix.
#' Each list element is passed to \code{\link{scale}}.
#' @param scale_ a list of logical values indicating whether the columns of the
#' corresponding data matrix should be scaled to have unit variance before the
#' analysis takes place. The default is \code{FALSE} for consistency with
#' \code{acor}. Alternatively, a list of vectors can be supplied, where each
#' vector specifies the standard deviations used to rescale the columns of the
#' corresponding data matrix. Each list element is passed to
#' \code{\link{scale}}.
#'
#' @return A list of class \code{mcancor} with the
#' following elements: \item{cor}{a multi-dimensional array containing the
#' additional correlations explained by each pair of canonical variables. The
#' first two dimensions correspond to the domains, and the third dimension
#' corresponds to the different canonical variables per domain.}
#' \item{coef}{copied from the input arguments} \item{center}{the list of
#' empirical means used to center the data matrices} \item{scale}{the list of
#' empirical standard deviations used to scale the data matrices}\item{xp}{the
#' list of deflated data matrices corresponding to \code{x}}
#'
#' @example inst/atexample/macor_examples.R
#'
macor <- function(x, coef, center = TRUE, scale_ = FALSE) {
X <- x
W <- coef
m <- length(X) # number of domains
n <- nrow(X[[1]]) # number of observations
nvar <- ncol(W[[1]]) # number of canonical variables for each domain
Xp <- list(); # deflated data sets
cen <- list(); sc <- list(); # centering and scaling
dx <- numeric(m) # dimensionality of data domain
for (mm in 1:m) {
X[[mm]] <- scale(as.matrix(x[[mm]]),
if (is.list(center)) center[[mm]] else center,
if (is.list(scale_)) scale_[[mm]] else scale_
)
dx[mm] <- ncol(X[[mm]])
cent <- attr(X[[mm]], "scaled:center")
cen[[mm]] <- if(is.null(cent)) rep.int(0, dx[mm]) else cent # follows cancor convention
scal <- attr(X[[mm]], "scaled:scale")
if(any(scal == 0))
stop("cannot rescale a constant column to unit variance in domain ", mm)
sc[[mm]] <- if(is.null(scal)) FALSE else scal
Xp[[mm]] <- X[[mm]]
attr(Xp[[mm]], "scaled:center") <- NULL
attr(Xp[[mm]], "scaled:scale") <- NULL
}
corr <- array(NA, dim = c(m, m, nvar)) # additional explained correlation
for (pp in seq(nvar)) {
XpW <- matrix(NA, n, m)
for (mm in 1:m) {
w <- W[[mm]][ , pp]
XpW[ , mm] <- Xp[[mm]]%*%w
# deflate data matrix
q <- t(Xp[[mm]])%*%(X[[mm]]%*%w)
q <- q/normv(q)
Xp[[mm]] <- Xp[[mm]] - Xp[[mm]]%*%q%*%t(q)
}
corr[ , , pp] <- cor(XpW, XpW)
}
mcc <- list(cor = corr, coef = coef, center = cen, scale = sc, xp = Xp)
class(mcc) <- "mcancor"
return(mcc)
}
|
/R/macor.R
|
no_license
|
chrsigg/nscancor
|
R
| false | false | 4,216 |
r
|
# Copyright 2013, 2018, 2023 Christian Sigg
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#' Multi-Domain Additional Explained Correlation
#'
#' \code{macor} generalizes \code{\link{acor}} to the case of more than two data
#' domains.
#'
#' @export
#'
#' @param x a list of numeric matrices which contain the data from the different
#' domains
#' @param coef a list of matrices containing the canonical vectors related to
#' each data domain. Each matrix contains the respective canonical vectors as
#' its columns.
#' @param center a list of logical values indicating whether the empirical mean
#' of (each column of) the corresponding data matrix should be subtracted.
#' Alternatively, a list of vectors can be supplied, where each vector
#' specifies the mean to be subtracted from the corresponding data matrix.
#' Each list element is passed to \code{\link{scale}}.
#' @param scale_ a list of logical values indicating whether the columns of the
#' corresponding data matrix should be scaled to have unit variance before the
#' analysis takes place. The default is \code{FALSE} for consistency with
#' \code{acor}. Alternatively, a list of vectors can be supplied, where each
#' vector specifies the standard deviations used to rescale the columns of the
#' corresponding data matrix. Each list element is passed to
#' \code{\link{scale}}.
#'
#' @return A list of class \code{mcancor} with the
#' following elements: \item{cor}{a multi-dimensional array containing the
#' additional correlations explained by each pair of canonical variables. The
#' first two dimensions correspond to the domains, and the third dimension
#' corresponds to the different canonical variables per domain.}
#' \item{coef}{copied from the input arguments} \item{center}{the list of
#' empirical means used to center the data matrices} \item{scale}{the list of
#' empirical standard deviations used to scale the data matrices}\item{xp}{the
#' list of deflated data matrices corresponding to \code{x}}
#'
#' @example inst/atexample/macor_examples.R
#'
macor <- function(x, coef, center = TRUE, scale_ = FALSE) {
X <- x
W <- coef
m <- length(X) # number of domains
n <- nrow(X[[1]]) # number of observations
nvar <- ncol(W[[1]]) # number of canonical variables for each domain
Xp <- list(); # deflated data sets
cen <- list(); sc <- list(); # centering and scaling
dx <- numeric(m) # dimensionality of data domain
for (mm in 1:m) {
X[[mm]] <- scale(as.matrix(x[[mm]]),
if (is.list(center)) center[[mm]] else center,
if (is.list(scale_)) scale_[[mm]] else scale_
)
dx[mm] <- ncol(X[[mm]])
cent <- attr(X[[mm]], "scaled:center")
cen[[mm]] <- if(is.null(cent)) rep.int(0, dx[mm]) else cent # follows cancor convention
scal <- attr(X[[mm]], "scaled:scale")
if(any(scal == 0))
stop("cannot rescale a constant column to unit variance in domain ", mm)
sc[[mm]] <- if(is.null(scal)) FALSE else scal
Xp[[mm]] <- X[[mm]]
attr(Xp[[mm]], "scaled:center") <- NULL
attr(Xp[[mm]], "scaled:scale") <- NULL
}
corr <- array(NA, dim = c(m, m, nvar)) # additional explained correlation
for (pp in seq(nvar)) {
XpW <- matrix(NA, n, m)
for (mm in 1:m) {
w <- W[[mm]][ , pp]
XpW[ , mm] <- Xp[[mm]]%*%w
# deflate data matrix
q <- t(Xp[[mm]])%*%(X[[mm]]%*%w)
q <- q/normv(q)
Xp[[mm]] <- Xp[[mm]] - Xp[[mm]]%*%q%*%t(q)
}
corr[ , , pp] <- cor(XpW, XpW)
}
mcc <- list(cor = corr, coef = coef, center = cen, scale = sc, xp = Xp)
class(mcc) <- "mcancor"
return(mcc)
}
|
# Script to analyse demographic data and rating scales from the oxazepam and emotion project
# Gustav Nilsonne 2015-01-09
# Require packages
library(RCurl) # To read data from GitHub
library(nlme) # To build mixed-effects models
library(effects) # To get confidence intervals on estimates
library(RColorBrewer) # To get good diverging colors for graphs
# Define colors for later
col1 = brewer.pal(3, "Dark2")[1]
col2 = brewer.pal(3, "Dark2")[2]
add.alpha <- function(col, alpha=1){ ## Function to add an alpha value to a colour, from: http://www.magesblog.com/2013/04/how-to-change-alpha-value-of-colours-in.html
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
col3 <- add.alpha(col1, alpha = 0.2)
# Read data
demDataURL <- getURL("https://raw.githubusercontent.com/GNilsonne/Data-and-analysis-code-Oxazepam-and-emotion/master/demographics.csv", ssl.verifypeer = FALSE)
demData <- read.csv(text = demDataURL)
# Descriptive analyses, n per group
length(demData$Subject[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"])
length(demData$Subject[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"])
length(demData$Subject[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"])
length(demData$Subject[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"])
# Descriptive analyses, IRI
mean(demData$IRI_EC[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_EC[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_EC[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_EC[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_EC[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_EC[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_EC[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_EC[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_PT[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_PT[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_PT[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_PT[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_PT[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_PT[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_PT[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_PT[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_PD[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_PD[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_PD[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_PD[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_PD[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_PD[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_PD[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_PD[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_F[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_F[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_F[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_F[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_F[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_F[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_F[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_F[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
# Descriptive analyses, TAS-20
mean(demData$TAS.20[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$TAS.20[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$TAS.20[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$TAS.20[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$TAS.20[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$TAS.20[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$TAS.20[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$TAS.20[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Difficulty.identifying.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Difficulty.identifying.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Difficulty.identifying.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Difficulty.identifying.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Difficulty.identifying.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Difficulty.identifying.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Difficulty.identifying.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Difficulty.identifying.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Difficulty.describing.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Difficulty.describing.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Difficulty.describing.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Difficulty.describing.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Difficulty.describing.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Difficulty.describing.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Difficulty.describing.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Difficulty.describing.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Externally.oriented.thinking[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Externally.oriented.thinking[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Externally.oriented.thinking[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Externally.oriented.thinking[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Externally.oriented.thinking[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Externally.oriented.thinking[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Externally.oriented.thinking[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Externally.oriented.thinking[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
# Descriptive analyses, STAI-T
mean(demData$STAI.T[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$STAI.T[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$STAI.T[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$STAI.T[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$STAI.T[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$STAI.T[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$STAI.T[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$STAI.T[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
# Descriptive analyses, PPI-R
mean(demData$PPI_1_SCI_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_SCI_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_SCI_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_SCI_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_SCI_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_SCI_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_SCI_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_SCI_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_FD_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_FD_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_FD_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_FD_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_FD_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_FD_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_FD_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_FD_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_C_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_C_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_C_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_C_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_C_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_C_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_C_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_C_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
# IRI, test-retest
demData$IRIdiff <- demData$IRI_retest_EC - demData$IRI_EC
mean(demData$IRIdiff[demData$Included_EP == TRUE], na.rm = T)
sd(demData$IRIdiff[demData$Included_EP == TRUE], na.rm = T)
demData$IRIdiff2 <- demData$IRI_scrambled_EC - demData$IRI_EC
t.test(IRIdiff2 ~ Treatment, data = demData[demData$Included_EP == TRUE, ])
# Analyse effect of oxazepam on rated state anxiety
# Make dataframe for mixed-effects model
STAISData <- rbind(demData[, c("Subject", "Treatment", "Wave", "Included_EP", "STAI.S", "STAI.S.Scrambled")], demData[, c("Subject", "Treatment", "Wave", "Included_EP", "STAI.S", "STAI.S.Scrambled")])
STAISData <- STAISData[STAISData$Included_EP == T, ] # Remove participants not included in this experiment
STAISData$FirstOrSecond <- c(rep.int(1, 0.5*length(STAISData$Subject)), rep.int(2, 0.5*length(STAISData$Subject)))
STAISData$STAIS <- NA # Make new column for STAI-S rating, then fill it with values for the first and second ratings, respectively
STAISData$STAIS[STAISData$FirstOrSecond == 1] <- STAISData$STAI.S[STAISData$FirstOrSecond == 1]
STAISData$STAIS[STAISData$FirstOrSecond == 2] <- STAISData$STAI.S.Scrambled[STAISData$FirstOrSecond == 2]
lme1 <- lme(STAIS ~ Treatment * FirstOrSecond + Wave, data = STAISData, random = ~1|Subject, na.action = na.omit)
summary(lme1)
# Inspect residuals
plot(lme1)
# Get estimates
intervals(lme1)
# Plot effects
eff1 <- effect("Treatment * FirstOrSecond", lme1)
pdf("Fig_STAIS.pdf", width = 4, height = 4)
plot(eff1$fit[c(2, 4)],
frame.plot = F,
xaxt = "n",
yaxt = "n",
type = "b",
xlab = "",
ylab = "STAI-S score",
xlim = c(1, 2.1),
ylim = c(32, 38),
col = col1,
main = "B. State anxiety")
lines(c(1.1,2.1), eff1$fit[c(1, 3)], type = "b", col = col2, pch = 16)
lines(c(1, 1), c((eff1$upper[2]), (eff1$lower[2])), col = col1)
lines(c(2, 2), c((eff1$upper[4]), (eff1$lower[4])), col = col1)
lines(c(1.1, 1.1), c((eff1$upper[1]), (eff1$lower[1])), col = col2)
lines(c(2.1, 2.1), c((eff1$upper[3]), (eff1$lower[3])), col = col2)
axis(1, labels = c("Before", "After"), at = c(1.05, 2.05))
axis(2, at = c(32, 34, 36, 38))
#legend("top", col = c("blue", "red"), pch = c(1, 16), legend = c("Placebo", "Oxazepam"), bty = "n")
dev.off()
# Analyse effect of oxazepam on pain thresholds
# Make dataframe for mixed-effects model
VASData <- rbind(demData[, c("Subject", "Treatment", "Wave", "Included_EP", "VAS80_before", "VAS80_after")], demData[, c("Subject", "Treatment", "Wave", "Included_EP", "VAS80_before", "VAS80_after")])
VASData <- VASData[VASData$Included_EP == T, ] # Remove participants not included in this experiment
VASData$FirstOrSecond <- c(rep.int(1, 0.5*length(VASData$Subject)), rep.int(2, 0.5*length(VASData$Subject)))
VASData$VAS80 <- NA # Make new column for STAI-S rating, then fill it with values for the first and second ratings, respectively
VASData$VAS80[VASData$FirstOrSecond == 1] <- VASData$VAS80_before[VASData$FirstOrSecond == 1]
VASData$VAS80[VASData$FirstOrSecond == 2] <- VASData$VAS80_after[VASData$FirstOrSecond == 2]
lme2 <- lme(VAS80 ~ Treatment * FirstOrSecond + Wave, data = VASData, random = ~1|Subject, na.action = na.omit)
summary(lme2)
# Inspect residuals
plot(lme2)
# Get estimates
intervals(lme2)
# Plot effects
eff2 <- effect("Treatment * FirstOrSecond", lme2)
pdf("Fig_VAS.pdf", width = 4, height = 4)
plot(eff2$fit[c(2, 4)],
frame.plot = F,
xaxt = "n",
type = "b",
xlab = "",
ylab = "Volts required for VAS 80",
xlim = c(1, 2.1),
ylim = c(65, 85),
col = col1,
main = "C. Pain thresholds")
lines(c(1.1,2.1), eff2$fit[c(1, 3)], type = "b", col = col2, pch = 16)
lines(c(1, 1), c((eff2$upper[2]), (eff2$lower[2])), col = col1)
lines(c(2, 2), c((eff2$upper[4]), (eff2$lower[4])), col = col1)
lines(c(1.1, 1.1), c((eff2$upper[1]), (eff2$lower[1])), col = col2)
lines(c(2.1, 2.1), c((eff2$upper[3]), (eff2$lower[3])), col = col2)
axis(1, labels = c("Before", "After"), at = c(1.05, 2.05))
#legend("top", col = c("blue", "red"), pch = c(1, 16), legend = c("Placebo", "Oxazepam"), bty = "n")
dev.off()
# Analyse participants' guesses of treatment group membership
demData$Guessed.group <- factor(demData$Guessed.group, levels = c("Placebo", "Likely_placebo", "Equivocal", "Likely_oxa", "Oxazepam"), ordered = TRUE)
demData$Guessed.group[demData$Included_EP == 0] <- NA
pdf("Fig_Blinding.pdf", width = 4, height = 4)
barplot(t(matrix(c(table(demData$Guessed.group[demData$Treatment == "Placebo"]), table(demData$Guessed.group[demData$Treatment == "Oxazepam"])), nr = 5)),
beside = TRUE,
names.arg = c("Placebo", "", "Equivocal", "", "Oxazepam"),
xlab = "Guessed group",
ylab = "n",
yaxt = "n",
col = c(col3, col2),
border = c(col1, col2),
lwd = 5,
main = "D. Efficacy of blinding")
axis(2, at = c(0, 2, 4, 6))
legend(c(0, 6.4), legend = c("Placebo", "Oxazepam"), fill = c(col3, col2), border = c(col1, col2), bty = "n")
dev.off()
test1 <- wilcox.test(as.numeric(Guessed.group) ~ Treatment, data = demData, alternative = "greater", paired = F, conf.int = T)
test1
|
/Analyses_of_demographic_data_and_rating_scales.R
|
permissive
|
SandraTamm/Data-and-analysis-code-Oxazepam-and-emotion
|
R
| false | false | 18,053 |
r
|
# Script to analyse demographic data and rating scales from the oxazepam and emotion project
# Gustav Nilsonne 2015-01-09
# Require packages
library(RCurl) # To read data from GitHub
library(nlme) # To build mixed-effects models
library(effects) # To get confidence intervals on estimates
library(RColorBrewer) # To get good diverging colors for graphs
# Define colors for later
col1 = brewer.pal(3, "Dark2")[1]
col2 = brewer.pal(3, "Dark2")[2]
add.alpha <- function(col, alpha=1){ ## Function to add an alpha value to a colour, from: http://www.magesblog.com/2013/04/how-to-change-alpha-value-of-colours-in.html
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
col3 <- add.alpha(col1, alpha = 0.2)
# Read data
demDataURL <- getURL("https://raw.githubusercontent.com/GNilsonne/Data-and-analysis-code-Oxazepam-and-emotion/master/demographics.csv", ssl.verifypeer = FALSE)
demData <- read.csv(text = demDataURL)
# Descriptive analyses, n per group
length(demData$Subject[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"])
length(demData$Subject[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"])
length(demData$Subject[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"])
length(demData$Subject[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"])
# Descriptive analyses, IRI
mean(demData$IRI_EC[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_EC[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_EC[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_EC[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_EC[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_EC[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_EC[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_EC[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_PT[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_PT[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_PT[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_PT[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_PT[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_PT[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_PT[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_PT[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_PD[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_PD[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_PD[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_PD[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_PD[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_PD[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_PD[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_PD[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_F[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_F[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_F[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_F[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$IRI_F[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$IRI_F[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$IRI_F[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$IRI_F[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
# Descriptive analyses, TAS-20
mean(demData$TAS.20[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$TAS.20[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$TAS.20[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$TAS.20[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$TAS.20[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$TAS.20[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$TAS.20[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$TAS.20[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Difficulty.identifying.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Difficulty.identifying.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Difficulty.identifying.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Difficulty.identifying.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Difficulty.identifying.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Difficulty.identifying.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Difficulty.identifying.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Difficulty.identifying.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Difficulty.describing.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Difficulty.describing.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Difficulty.describing.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Difficulty.describing.feelings[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Difficulty.describing.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Difficulty.describing.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Difficulty.describing.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Difficulty.describing.feelings[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Externally.oriented.thinking[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Externally.oriented.thinking[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Externally.oriented.thinking[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Externally.oriented.thinking[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$Externally.oriented.thinking[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$Externally.oriented.thinking[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$Externally.oriented.thinking[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$Externally.oriented.thinking[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
# Descriptive analyses, STAI-T
mean(demData$STAI.T[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$STAI.T[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$STAI.T[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$STAI.T[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$STAI.T[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$STAI.T[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$STAI.T[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$STAI.T[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
# Descriptive analyses, PPI-R
mean(demData$PPI_1_SCI_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_SCI_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_SCI_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_SCI_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_SCI_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_SCI_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_SCI_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_SCI_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_FD_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_FD_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_FD_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_FD_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_FD_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_FD_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_FD_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_FD_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_C_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_C_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_C_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_C_R[demData$Wave == 1 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
mean(demData$PPI_1_C_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
sd(demData$PPI_1_C_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Placebo"], na.rm = T)
mean(demData$PPI_1_C_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
sd(demData$PPI_1_C_R[demData$Wave == 2 & demData$Included_EP == 1 & demData$Treatment == "Oxazepam"], na.rm = T)
# IRI, test-retest
demData$IRIdiff <- demData$IRI_retest_EC - demData$IRI_EC
mean(demData$IRIdiff[demData$Included_EP == TRUE], na.rm = T)
sd(demData$IRIdiff[demData$Included_EP == TRUE], na.rm = T)
demData$IRIdiff2 <- demData$IRI_scrambled_EC - demData$IRI_EC
t.test(IRIdiff2 ~ Treatment, data = demData[demData$Included_EP == TRUE, ])
# Analyse effect of oxazepam on rated state anxiety
# Make dataframe for mixed-effects model
STAISData <- rbind(demData[, c("Subject", "Treatment", "Wave", "Included_EP", "STAI.S", "STAI.S.Scrambled")], demData[, c("Subject", "Treatment", "Wave", "Included_EP", "STAI.S", "STAI.S.Scrambled")])
STAISData <- STAISData[STAISData$Included_EP == T, ] # Remove participants not included in this experiment
STAISData$FirstOrSecond <- c(rep.int(1, 0.5*length(STAISData$Subject)), rep.int(2, 0.5*length(STAISData$Subject)))
STAISData$STAIS <- NA # Make new column for STAI-S rating, then fill it with values for the first and second ratings, respectively
STAISData$STAIS[STAISData$FirstOrSecond == 1] <- STAISData$STAI.S[STAISData$FirstOrSecond == 1]
STAISData$STAIS[STAISData$FirstOrSecond == 2] <- STAISData$STAI.S.Scrambled[STAISData$FirstOrSecond == 2]
lme1 <- lme(STAIS ~ Treatment * FirstOrSecond + Wave, data = STAISData, random = ~1|Subject, na.action = na.omit)
summary(lme1)
# Inspect residuals
plot(lme1)
# Get estimates
intervals(lme1)
# Plot effects
eff1 <- effect("Treatment * FirstOrSecond", lme1)
pdf("Fig_STAIS.pdf", width = 4, height = 4)
plot(eff1$fit[c(2, 4)],
frame.plot = F,
xaxt = "n",
yaxt = "n",
type = "b",
xlab = "",
ylab = "STAI-S score",
xlim = c(1, 2.1),
ylim = c(32, 38),
col = col1,
main = "B. State anxiety")
lines(c(1.1,2.1), eff1$fit[c(1, 3)], type = "b", col = col2, pch = 16)
lines(c(1, 1), c((eff1$upper[2]), (eff1$lower[2])), col = col1)
lines(c(2, 2), c((eff1$upper[4]), (eff1$lower[4])), col = col1)
lines(c(1.1, 1.1), c((eff1$upper[1]), (eff1$lower[1])), col = col2)
lines(c(2.1, 2.1), c((eff1$upper[3]), (eff1$lower[3])), col = col2)
axis(1, labels = c("Before", "After"), at = c(1.05, 2.05))
axis(2, at = c(32, 34, 36, 38))
#legend("top", col = c("blue", "red"), pch = c(1, 16), legend = c("Placebo", "Oxazepam"), bty = "n")
dev.off()
# Analyse effect of oxazepam on pain thresholds
# Make dataframe for mixed-effects model
VASData <- rbind(demData[, c("Subject", "Treatment", "Wave", "Included_EP", "VAS80_before", "VAS80_after")], demData[, c("Subject", "Treatment", "Wave", "Included_EP", "VAS80_before", "VAS80_after")])
VASData <- VASData[VASData$Included_EP == T, ] # Remove participants not included in this experiment
VASData$FirstOrSecond <- c(rep.int(1, 0.5*length(VASData$Subject)), rep.int(2, 0.5*length(VASData$Subject)))
VASData$VAS80 <- NA # Make new column for STAI-S rating, then fill it with values for the first and second ratings, respectively
VASData$VAS80[VASData$FirstOrSecond == 1] <- VASData$VAS80_before[VASData$FirstOrSecond == 1]
VASData$VAS80[VASData$FirstOrSecond == 2] <- VASData$VAS80_after[VASData$FirstOrSecond == 2]
lme2 <- lme(VAS80 ~ Treatment * FirstOrSecond + Wave, data = VASData, random = ~1|Subject, na.action = na.omit)
summary(lme2)
# Inspect residuals
plot(lme2)
# Get estimates
intervals(lme2)
# Plot effects
eff2 <- effect("Treatment * FirstOrSecond", lme2)
pdf("Fig_VAS.pdf", width = 4, height = 4)
plot(eff2$fit[c(2, 4)],
frame.plot = F,
xaxt = "n",
type = "b",
xlab = "",
ylab = "Volts required for VAS 80",
xlim = c(1, 2.1),
ylim = c(65, 85),
col = col1,
main = "C. Pain thresholds")
lines(c(1.1,2.1), eff2$fit[c(1, 3)], type = "b", col = col2, pch = 16)
lines(c(1, 1), c((eff2$upper[2]), (eff2$lower[2])), col = col1)
lines(c(2, 2), c((eff2$upper[4]), (eff2$lower[4])), col = col1)
lines(c(1.1, 1.1), c((eff2$upper[1]), (eff2$lower[1])), col = col2)
lines(c(2.1, 2.1), c((eff2$upper[3]), (eff2$lower[3])), col = col2)
axis(1, labels = c("Before", "After"), at = c(1.05, 2.05))
#legend("top", col = c("blue", "red"), pch = c(1, 16), legend = c("Placebo", "Oxazepam"), bty = "n")
dev.off()
# Analyse participants' guesses of treatment group membership
demData$Guessed.group <- factor(demData$Guessed.group, levels = c("Placebo", "Likely_placebo", "Equivocal", "Likely_oxa", "Oxazepam"), ordered = TRUE)
demData$Guessed.group[demData$Included_EP == 0] <- NA
pdf("Fig_Blinding.pdf", width = 4, height = 4)
barplot(t(matrix(c(table(demData$Guessed.group[demData$Treatment == "Placebo"]), table(demData$Guessed.group[demData$Treatment == "Oxazepam"])), nr = 5)),
beside = TRUE,
names.arg = c("Placebo", "", "Equivocal", "", "Oxazepam"),
xlab = "Guessed group",
ylab = "n",
yaxt = "n",
col = c(col3, col2),
border = c(col1, col2),
lwd = 5,
main = "D. Efficacy of blinding")
axis(2, at = c(0, 2, 4, 6))
legend(c(0, 6.4), legend = c("Placebo", "Oxazepam"), fill = c(col3, col2), border = c(col1, col2), bty = "n")
dev.off()
test1 <- wilcox.test(as.numeric(Guessed.group) ~ Treatment, data = demData, alternative = "greater", paired = F, conf.int = T)
test1
|
###################################
# Script setup sdm_data
# R version 4.1 .1
# modler version 0.0.1
###################################
## Carregague as bibliotecas instaladas
library(sp)
library(modleR)
library(raster)
# Ao criar um projeto no Rstudio (e neste exemplo integrado ao Git e Github), o R já entende qual é o diretório de trabalho, ou seja, não é preciso informar o caminho completo (absoluto). Além disso, caminhos absolutos em geral são uma má prática, pois deixam o código irreprodutível, ou seja, se você trocar de computador ou passar o script para outra pessoa rodar, o código não vai funcionar, pois o caminho absoluto geralmente está em um computador específico com uma estrutura de pasta (caminhos) pessoal.
# Uma boa prática é optar, sempre que possível, trabalhar com projetos no RStudio, dessa forma voce pode usar os caminhos relativos, que são aqueles que tem início no diretório de trabalho da sua sessão. Isso nos incentiva a colocar todos os arquivos da análise dentro da pasta do projeto. Assim, se você usar apenas caminhos relativos e compartilhar a pasta do projeto com alguém (por exemplo via github), todos os caminhos existentes nos códigos continuarão a funcionar em qualquer computador! No exemplo da nossa prática, trabalharemos dessa forma, e para isso vamos inicar o caminho relativo sempre com "./" (mas tem outras formas).
### Importando e lendo sua planilha no ambiente R. read.csv é uma função para ler a extensão .csv. NO argumento "file" coloque o caminho relativo do arquivo .csv , no arquivo "sep" indique qual o tipo de separado dos campos (o que separa as colunas).
sp_input <- read.csv(file = "./dados/ocorrencias/fusp_input_setupsdmdata.csv", sep = ",") #
#####Colocando os (-) entre gênero e espécie
sp_input$species <-
gsub(x = sp_input$species,
pattern = " ",
replacement = "_")
##### Visualizando os dados
(sp_input)
### View(sp_input) = abrir tabela de dados
## Carregando as variáveis ambientais
lista_arquivos <- list.files("./dados/raster/Variaveis_Cortadas_Brasil/", full.names = T, pattern = ".tif")
vars_stack <-stack(lista_arquivos)
plot(vars_stack)
### plot(vars_stack[[1]]) = para ver cada uma das variaveis, é só mudar o número
## Verificando os pontos nas variáveis mais um vez. Isso pode ser feito previamente no Qgis. Em termos de verificação de pontos, ou verificar valores de pixel de forma mais rápida, o Qgis pode ser mais apropriado.
## Talvez não seja possível gerar a imagem e código abaixo não funcione acuse um erro. Por isso é aconselhável que seja feita a verificação dos pontos em cimas das camadas ambientais no Qgis.
par(mfrow = c(1, 1), mar = c(2, 2, 3, 1))
for (i in 1:length(sp_input)) {
plot(!is.na(vars_stack[[1]]),
legend = FALSE,
col = c("white", "#00A08A"))
points(lat ~ lon, data = sp_input, pch = 19)
}
## modler função 1
setup_sdmdata_1 <- setup_sdmdata(species_name = unique(sp_input[1]),
occurrences = sp_input,
lon = "lon",
lat = "lat",
predictors = vars_stack,
models_dir = "./resultados",
partition_type = "crossvalidation",
cv_partitions = 3,
cv_n = 1,
seed = 512,
buffer_type = "mean",
png_sdmdata = TRUE,
n_back = 30,
clean_dupl = TRUE,
clean_uni = TRUE,
clean_nas = TRUE,
geo_filt = FALSE,
geo_filt_dist = 10,
select_variables = TRUE,
sample_proportion = 0.5,
cutoff = 0.7)
|
/R/2-Scripts_ENMs_SDMs/1-setup_sdmdata.R
|
no_license
|
th88019635/Tacinga-funalis
|
R
| false | false | 4,052 |
r
|
###################################
# Script setup sdm_data
# R version 4.1 .1
# modler version 0.0.1
###################################
## Carregague as bibliotecas instaladas
library(sp)
library(modleR)
library(raster)
# Ao criar um projeto no Rstudio (e neste exemplo integrado ao Git e Github), o R já entende qual é o diretório de trabalho, ou seja, não é preciso informar o caminho completo (absoluto). Além disso, caminhos absolutos em geral são uma má prática, pois deixam o código irreprodutível, ou seja, se você trocar de computador ou passar o script para outra pessoa rodar, o código não vai funcionar, pois o caminho absoluto geralmente está em um computador específico com uma estrutura de pasta (caminhos) pessoal.
# Uma boa prática é optar, sempre que possível, trabalhar com projetos no RStudio, dessa forma voce pode usar os caminhos relativos, que são aqueles que tem início no diretório de trabalho da sua sessão. Isso nos incentiva a colocar todos os arquivos da análise dentro da pasta do projeto. Assim, se você usar apenas caminhos relativos e compartilhar a pasta do projeto com alguém (por exemplo via github), todos os caminhos existentes nos códigos continuarão a funcionar em qualquer computador! No exemplo da nossa prática, trabalharemos dessa forma, e para isso vamos inicar o caminho relativo sempre com "./" (mas tem outras formas).
### Importando e lendo sua planilha no ambiente R. read.csv é uma função para ler a extensão .csv. NO argumento "file" coloque o caminho relativo do arquivo .csv , no arquivo "sep" indique qual o tipo de separado dos campos (o que separa as colunas).
sp_input <- read.csv(file = "./dados/ocorrencias/fusp_input_setupsdmdata.csv", sep = ",") #
#####Colocando os (-) entre gênero e espécie
sp_input$species <-
gsub(x = sp_input$species,
pattern = " ",
replacement = "_")
##### Visualizando os dados
(sp_input)
### View(sp_input) = abrir tabela de dados
## Carregando as variáveis ambientais
lista_arquivos <- list.files("./dados/raster/Variaveis_Cortadas_Brasil/", full.names = T, pattern = ".tif")
vars_stack <-stack(lista_arquivos)
plot(vars_stack)
### plot(vars_stack[[1]]) = para ver cada uma das variaveis, é só mudar o número
## Verificando os pontos nas variáveis mais um vez. Isso pode ser feito previamente no Qgis. Em termos de verificação de pontos, ou verificar valores de pixel de forma mais rápida, o Qgis pode ser mais apropriado.
## Talvez não seja possível gerar a imagem e código abaixo não funcione acuse um erro. Por isso é aconselhável que seja feita a verificação dos pontos em cimas das camadas ambientais no Qgis.
par(mfrow = c(1, 1), mar = c(2, 2, 3, 1))
for (i in 1:length(sp_input)) {
plot(!is.na(vars_stack[[1]]),
legend = FALSE,
col = c("white", "#00A08A"))
points(lat ~ lon, data = sp_input, pch = 19)
}
## modler função 1
setup_sdmdata_1 <- setup_sdmdata(species_name = unique(sp_input[1]),
occurrences = sp_input,
lon = "lon",
lat = "lat",
predictors = vars_stack,
models_dir = "./resultados",
partition_type = "crossvalidation",
cv_partitions = 3,
cv_n = 1,
seed = 512,
buffer_type = "mean",
png_sdmdata = TRUE,
n_back = 30,
clean_dupl = TRUE,
clean_uni = TRUE,
clean_nas = TRUE,
geo_filt = FALSE,
geo_filt_dist = 10,
select_variables = TRUE,
sample_proportion = 0.5,
cutoff = 0.7)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Simpson.R
\docType{class}
\name{Simpson-class}
\alias{Simpson-class}
\title{An estimated integral of a function}
\description{
Object of class \code{SquaresPack} are created by the \code{addSquares} and \code{subtractSquares} functions
}
\details{
An object of the class `Simpson' has the following slots:
\itemize{
\item \code{bounds} The lower bound and upper bound of the intergrand
\item \code{X} An ordered list of X values, between a and b
\item \code{Y} An ordered list of Y values, where Yn = F(Xn)
\item \code{integral} An estimate of the integral
}
}
\author{
Jonah Klein-Barton: \email{jonahkleinbarton@gmail.com}
}
|
/integrateIt/man/Simpson.Rd
|
no_license
|
JonahK-B/PS5
|
R
| false | true | 705 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Simpson.R
\docType{class}
\name{Simpson-class}
\alias{Simpson-class}
\title{An estimated integral of a function}
\description{
Object of class \code{SquaresPack} are created by the \code{addSquares} and \code{subtractSquares} functions
}
\details{
An object of the class `Simpson' has the following slots:
\itemize{
\item \code{bounds} The lower bound and upper bound of the intergrand
\item \code{X} An ordered list of X values, between a and b
\item \code{Y} An ordered list of Y values, where Yn = F(Xn)
\item \code{integral} An estimate of the integral
}
}
\author{
Jonah Klein-Barton: \email{jonahkleinbarton@gmail.com}
}
|
#Load the files
NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
#Creating table with total values from 1999,2002,2005 and 2008 from Baltimore
NEI <- NEI[NEI$fips=='24510',]
total <- tapply(NEI$Emissions,NEI$year,sum)
years <- names(total)
table <- data.frame(years = years, total = total)
#Plotting graph
png('plot2.png')
plot(table$years,table$total,type='l', main='Total PM2.5 Emissions
by Year in Baltimore', xlab='Year',ylab='Total Emissions (tons)',
ylim=c(0,max(table$total)*1.05))
dev.off()
|
/plot2.R
|
no_license
|
cmakemesay/Exploratory-Data-Analysis-Project
|
R
| false | false | 550 |
r
|
#Load the files
NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
#Creating table with total values from 1999,2002,2005 and 2008 from Baltimore
NEI <- NEI[NEI$fips=='24510',]
total <- tapply(NEI$Emissions,NEI$year,sum)
years <- names(total)
table <- data.frame(years = years, total = total)
#Plotting graph
png('plot2.png')
plot(table$years,table$total,type='l', main='Total PM2.5 Emissions
by Year in Baltimore', xlab='Year',ylab='Total Emissions (tons)',
ylim=c(0,max(table$total)*1.05))
dev.off()
|
#getting inputs from csv file
data=read.csv('C:/Users/Parashar Parikh/Desktop/UTD/Sem3/stats/Miniprojects/Miniproject_6/prostate_cancer.csv')
#storing all data
psa=data[,2]
cancervol=data[,3]
weight=data[,4]
age=data[,5]
benpros=data[,6]
vesinv=data[,7]
capspen=data[,8]
gleason=data[,9]
#Exploratory Analysis of PSA Feature
#Histogram
hist(psa, xlab="PSA Level",main= "Histogram of PSA Level",breaks=20)
#Q-Q Plots
qqnorm(psa)
qqline(psa)
#Boxplot
boxplot(psa)
#we can see from box plot that psa has lot of outliers so we need to tranform it using log transformation
#Boxplot of transformed psa level (log(psa))
boxplot(log(psa))
#now we are finding corrilation between features regarding our predictor
trandata=data
trandata$psa=log(psa)
cor(trandata[,2:9])
#transforming psa in log(psa)
logpsa=log(psa)
#we can visualize pair plot for linear dependencies based on good corrilation
pairs(~logpsa + cancervol + capspen + vesinv + gleason + benpros)
# Drawing scatterplots of each variables (except categorical) with log(psa).
plot(cancervol, logpsa, xlab="Cancer Volume",ylab="Log of PSA level")
abline(lm(logpsa ~ cancervol))
plot(weight, logpsa,xlab="Weight", ylab="Log of PSA level")
abline(lm(logpsa ~ weight))
plot(age, logpsa,xlab="Age", ylab="Log of PSA level")
abline(lm(logpsa ~ age))
plot(benpros, logpsa,xlab="Benign prostatic hyperplasia", ylab="Log of PSA level")
abline(lm(logpsa~ benpros ))
plot(capspen, logpsa,xlab="Capsular penetration",ylab="Log of PSA level")
abline(lm(logpsa ~ capspen))
# cheking for all features.
fit1 <- lm(logpsa ~ cancervol + weight + age + benpros + capspen)
fit1
fit2 <- lm(logpsa ~ cancervol + benpros+ capspen)
fit2
# Compare first two models.
anova(fit1, fit2)
# Apply stepwise selection.
# Forward selection based on AIC.
fit3.forward <-step(lm(logpsa ~ 1), scope = list(upper = ~ cancervol + weight + age + benpros + capspen), direction = "forward")
fit3.forward
# Backward elimination based on AIC.
fit3.backward <- step(lm(logpsa ~ cancervol + weight + age + benpros + capspen), scope = list(lower = ~1), direction = "backward")
fit3.backward
# Both forward/backward selection.
fit3.both <- step(lm(logpsa ~ 1),scope = list(lower = ~1, upper = ~ cancervol + weight + age + benpros + capspen), direction = "both")
fit3.both
# Model selected bases on analysis
fit3 <- lm(logpsa ~ cancervol + benpros)
summary(fit3)
# Compare the model with the guess one.
anova(fit3, fit2)
#now we add qualitative (categorical) variables
fit4 <- update(fit3, . ~ . + factor(vesinv))
fit5 <- update(fit3, . ~ . + factor(gleason))
# Comparing two categorical variables.
summary(fit4)
anova(fit3, fit4)
summary(fit5)
anova(fit3, fit5)
# Finalize the model using exploratory analysis
fit6 <- update(fit3, . ~ . + factor(vesinv) + factor(gleason))
summary(fit6)
# Apply stepwise selection and comparing our model . Forward selection based on AIC for all features
fit7.forward <-step(lm(logpsa ~ 1), scope = list(upper = ~ cancervol + weight + age + benpros + capspen+ as.factor(vesinv)+ as.factor(gleason)), direction = "forward")
fit7.forward
# Backward elimination based on AIC
fit7.backward <- step(lm(logpsa ~ cancervol + weight + age + benpros + capspen+ as.factor(vesinv)+ as.factor(gleason)), scope = list(lower = ~1), direction = "backward")
fit7.backward
# Both forward/backward
fit7.both <- step(lm(logpsa ~ 1),scope = list(lower = ~1, upper = ~ cancervol + weight + age + benpros + capspen + as.factor(vesinv)+ as.factor(gleason)), direction = "both")
fit7.both
#So our model is similar as step wise model so we will continue with model fit6 as final model
# Residual plot of fit6.
plot(fitted(fit6), resid(fit6))
abline(h = 0)
# Plot the absolute residual of fit3.
plot(fitted(fit6), abs(resid(fit6)))
# Plot the times series plot of residuals
plot(resid(fit6), type="l")
abline(h = 0)
# Normal QQ plot of fit6
qqnorm(resid(fit6))
qqline(resid(fit6))
# functino to get mode
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
# Predict the PSA level for predictors having valuesof sample means and categorical predictors at their most frequent label
prediction=predict(fit6, data.frame(cancervol = mean(cancervol),
benpros = mean(benpros),
vesinv = getmode(vesinv),
gleason = getmode(gleason)))
# since our respnse variable is log(psa)
exp(prediction)
|
/Miniproject_6/miniproject_6.r
|
no_license
|
parashar18/Statistical-Methods-for-Data-Science-CS-6313
|
R
| false | false | 4,618 |
r
|
#getting inputs from csv file
data=read.csv('C:/Users/Parashar Parikh/Desktop/UTD/Sem3/stats/Miniprojects/Miniproject_6/prostate_cancer.csv')
#storing all data
psa=data[,2]
cancervol=data[,3]
weight=data[,4]
age=data[,5]
benpros=data[,6]
vesinv=data[,7]
capspen=data[,8]
gleason=data[,9]
#Exploratory Analysis of PSA Feature
#Histogram
hist(psa, xlab="PSA Level",main= "Histogram of PSA Level",breaks=20)
#Q-Q Plots
qqnorm(psa)
qqline(psa)
#Boxplot
boxplot(psa)
#we can see from box plot that psa has lot of outliers so we need to tranform it using log transformation
#Boxplot of transformed psa level (log(psa))
boxplot(log(psa))
#now we are finding corrilation between features regarding our predictor
trandata=data
trandata$psa=log(psa)
cor(trandata[,2:9])
#transforming psa in log(psa)
logpsa=log(psa)
#we can visualize pair plot for linear dependencies based on good corrilation
pairs(~logpsa + cancervol + capspen + vesinv + gleason + benpros)
# Drawing scatterplots of each variables (except categorical) with log(psa).
plot(cancervol, logpsa, xlab="Cancer Volume",ylab="Log of PSA level")
abline(lm(logpsa ~ cancervol))
plot(weight, logpsa,xlab="Weight", ylab="Log of PSA level")
abline(lm(logpsa ~ weight))
plot(age, logpsa,xlab="Age", ylab="Log of PSA level")
abline(lm(logpsa ~ age))
plot(benpros, logpsa,xlab="Benign prostatic hyperplasia", ylab="Log of PSA level")
abline(lm(logpsa~ benpros ))
plot(capspen, logpsa,xlab="Capsular penetration",ylab="Log of PSA level")
abline(lm(logpsa ~ capspen))
# cheking for all features.
fit1 <- lm(logpsa ~ cancervol + weight + age + benpros + capspen)
fit1
fit2 <- lm(logpsa ~ cancervol + benpros+ capspen)
fit2
# Compare first two models.
anova(fit1, fit2)
# Apply stepwise selection.
# Forward selection based on AIC.
fit3.forward <-step(lm(logpsa ~ 1), scope = list(upper = ~ cancervol + weight + age + benpros + capspen), direction = "forward")
fit3.forward
# Backward elimination based on AIC.
fit3.backward <- step(lm(logpsa ~ cancervol + weight + age + benpros + capspen), scope = list(lower = ~1), direction = "backward")
fit3.backward
# Both forward/backward selection.
fit3.both <- step(lm(logpsa ~ 1),scope = list(lower = ~1, upper = ~ cancervol + weight + age + benpros + capspen), direction = "both")
fit3.both
# Model selected bases on analysis
fit3 <- lm(logpsa ~ cancervol + benpros)
summary(fit3)
# Compare the model with the guess one.
anova(fit3, fit2)
#now we add qualitative (categorical) variables
fit4 <- update(fit3, . ~ . + factor(vesinv))
fit5 <- update(fit3, . ~ . + factor(gleason))
# Comparing two categorical variables.
summary(fit4)
anova(fit3, fit4)
summary(fit5)
anova(fit3, fit5)
# Finalize the model using exploratory analysis
fit6 <- update(fit3, . ~ . + factor(vesinv) + factor(gleason))
summary(fit6)
# Apply stepwise selection and comparing our model . Forward selection based on AIC for all features
fit7.forward <-step(lm(logpsa ~ 1), scope = list(upper = ~ cancervol + weight + age + benpros + capspen+ as.factor(vesinv)+ as.factor(gleason)), direction = "forward")
fit7.forward
# Backward elimination based on AIC
fit7.backward <- step(lm(logpsa ~ cancervol + weight + age + benpros + capspen+ as.factor(vesinv)+ as.factor(gleason)), scope = list(lower = ~1), direction = "backward")
fit7.backward
# Both forward/backward
fit7.both <- step(lm(logpsa ~ 1),scope = list(lower = ~1, upper = ~ cancervol + weight + age + benpros + capspen + as.factor(vesinv)+ as.factor(gleason)), direction = "both")
fit7.both
#So our model is similar as step wise model so we will continue with model fit6 as final model
# Residual plot of fit6.
plot(fitted(fit6), resid(fit6))
abline(h = 0)
# Plot the absolute residual of fit3.
plot(fitted(fit6), abs(resid(fit6)))
# Plot the times series plot of residuals
plot(resid(fit6), type="l")
abline(h = 0)
# Normal QQ plot of fit6
qqnorm(resid(fit6))
qqline(resid(fit6))
# functino to get mode
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
# Predict the PSA level for predictors having valuesof sample means and categorical predictors at their most frequent label
prediction=predict(fit6, data.frame(cancervol = mean(cancervol),
benpros = mean(benpros),
vesinv = getmode(vesinv),
gleason = getmode(gleason)))
# since our respnse variable is log(psa)
exp(prediction)
|
library(tidyverse)
library(VennDiagram)
# Create Venn Diagram -----------------------------------------------------
grid.newpage()
draw.pairwise.venn(34642, 22857, 8630, category = c("Geneious_and_Truseq", "DLC380_No_Normal_Pipeline"),
lty = rep("blank", 2), fill = c("light blue", "pink"), alpha = rep(0.5, 2), cat.pos = c(0, 0),
cat.dist = rep(0.025, 2), scaled = FALSE)
grid.newpage()
draw.pairwise.venn(6094, 3442, 2833, category = c("Geneious_and_Truseq", "DLC380_No_Normal_Pipeline"),
lty = rep("blank", 2), fill = c("light blue", "pink"), alpha = rep(0.5, 2), cat.pos = c(0, 0),
cat.dist = rep(0.025, 2), scaled = FALSE)
grid.newpage()
draw.pairwise.venn(6091, 3442, 2833, category = c("Geneious", "DLC380_No_Normal_Pipeline"),
lty = rep("blank", 2), fill = c("light blue", "pink"), alpha = rep(0.5, 2), cat.pos = c(0, 0),
cat.dist = rep(0.025, 2), scaled = FALSE)
|
/dlc380_compare_maf/create-venndiagram.R
|
no_license
|
jeffstang/TangJ_prj_portfolio
|
R
| false | false | 1,015 |
r
|
library(tidyverse)
library(VennDiagram)
# Create Venn Diagram -----------------------------------------------------
grid.newpage()
draw.pairwise.venn(34642, 22857, 8630, category = c("Geneious_and_Truseq", "DLC380_No_Normal_Pipeline"),
lty = rep("blank", 2), fill = c("light blue", "pink"), alpha = rep(0.5, 2), cat.pos = c(0, 0),
cat.dist = rep(0.025, 2), scaled = FALSE)
grid.newpage()
draw.pairwise.venn(6094, 3442, 2833, category = c("Geneious_and_Truseq", "DLC380_No_Normal_Pipeline"),
lty = rep("blank", 2), fill = c("light blue", "pink"), alpha = rep(0.5, 2), cat.pos = c(0, 0),
cat.dist = rep(0.025, 2), scaled = FALSE)
grid.newpage()
draw.pairwise.venn(6091, 3442, 2833, category = c("Geneious", "DLC380_No_Normal_Pipeline"),
lty = rep("blank", 2), fill = c("light blue", "pink"), alpha = rep(0.5, 2), cat.pos = c(0, 0),
cat.dist = rep(0.025, 2), scaled = FALSE)
|
library(ape)
testtree <- read.tree("3139_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3139_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/3139_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 135 |
r
|
library(ape)
testtree <- read.tree("3139_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3139_0_unrooted.txt")
|
###############################################################################
###############################################################################
###############################################################################
## definuji pomocné funkce ----------------------------------------------------
###############################################################################
#### funkce na dělení textu do vět --------------------------------------------
splitTextIntoSentences <- function(
my_text
){
# '''
# Textový řetězec "my_text" o jedné či více větách rozdělí
# s určitou mírou spolehlivosti na samostatné věty.
# '''
split_indices <- NULL
my_sentences <- NULL
for(stop_mark in c(
"\\.\\s*[A-Z]+", ## tečka, mezera (>= 0), velké písmeno
"\\?\\s*[A-Z]+", ## otazník, mezera (>= 0), velké písmeno
"\\!\\s*[A-Z]+", ## vykřičník, mezera (>= 0), velké písmeno
"\\:\\s*" ## dvojtečka, mezera (>= 0)
)){
split_indices <- c(
split_indices,
gregexpr(
pattern = stop_mark,
text = my_text
)[[1]] + 1
)
}
ordered_split_indices <- split_indices[split_indices > 0][
order(split_indices[split_indices > 0])
]
if(length(ordered_split_indices) > 0){
ordered_split_indices <- c(
1,
ordered_split_indices,
nchar(my_text)
)
for(i in 1:(length(ordered_split_indices) - 1)){
my_sentences <- c(
my_sentences,
substr(
my_text,
ordered_split_indices[i],
ordered_split_indices[i + 1]
)
)
}
}else{
my_sentences <- my_text
}
for(j in 1:length(my_sentences)){
while(substr(my_sentences[j], 1, 1) == " "){
my_sentences[j] <- substr(
my_sentences[j], 2, nchar(my_sentences[j])
)
}
while(substr(
my_sentences[j],
nchar(my_sentences[j]),
nchar(my_sentences[j])
) == " "){
my_sentences[j] <- substr(
my_sentences[j],
1,
(nchar(my_sentences[j]) - 1)
)
}
}
return(my_sentences)
}
#### --------------------------------------------------------------------------
###############################################################################
#### funkce na rozdělení věty na slova ----------------------------------------
splitSentenceIntoWords <- function(
my_sentence
){
# '''
# Rozděluje větu "my_sentence" na jednotlivá slova.
# '''
return(
strsplit(
x = my_sentence,
split = " "
)[[1]]
)
}
#### --------------------------------------------------------------------------
###############################################################################
#### funkce pro tvorbu n-gramů ------------------------------------------------
getNGrams <- function(
my_splitted_sentences,
n = 2
){
# '''
# Nad větou rozdělenou na slova "my_splitted_sentences" vytvoří
# všechny n-gramy pro zadané "n".
# '''
output <- NULL
if(length(my_splitted_sentences) >= n){
for(i in 1:(length(my_splitted_sentences) - n + 1)){
output <- c(
output,
paste(
my_splitted_sentences[i:(i + n - 1)],
collapse = " "
)
)
}
}
return(output)
}
#### --------------------------------------------------------------------------
###############################################################################
#### funkce pro webscraping jedné stránky Wikipedie (typu článek)
#### a pro následnou úpravu formátu do podoby volného textu -------------------
webscrapeMyWikipediaPage <- function(
page_url
){
# '''
# Funkce stáhne statický HTML obsah jedné stránky z (anglické)
# Wikipedie, která je pod odkazem "page_url". Poté extrahuje jen
# odstavcové statě ohraničené HTML tagy <p>...</p>.
# Z nich pak odstraní veškeré další HTML tagy, HTML entity či
# wikipedické tagy.
# Nakonec vrací textový řetezec odpovídající jen přirozenému
# textu v odstavcích dané stránky Wikipedie.
# Kromě toho ještě z textu stránky extrahuje interní webové odkazy
# na další stránky Wikipedie, které je poté možné scrapovat.
# '''
## stahuji statický HTML obsah --------------------------------------------
my_html <- readLines(
con = page_url,
encoding = "UTF-8"
)
## extrahuji jen odstavcové statě ohraničené HTML tagy <p>...</p> ---------
my_raw_paragraphs <- my_html[
grepl("<p>", my_html) & grepl("</p>", my_html)
]
## očišťuji text paragrafů o HTML tagy, HTML entity a wikipedické tagy ----
my_paragraphs <- gsub("<.*?>", "", my_raw_paragraphs)
my_paragraphs <- gsub("&.*?;", "", my_paragraphs)
my_paragraphs <- gsub("\\[.*?\\]", "", my_paragraphs)
my_paragraphs <- gsub("\t", "", my_paragraphs) ## zbavuji se taublátorů
## vytvářím jeden dlouhý řetězec (odstavec) -------------------------------
my_text_output <- paste(my_paragraphs, collapse = " ")
## extrahuji z textu všechny webové interní linky na další stránky
## Wikipedie --------------------------------------------------------------
my_links <- paste(
"http://en.wikipedia.org",
gsub(
'\\"',
"",
gsub(
'(.*)(href=)(\\"/wiki/.*?\\")(.*)',
"\\3",
my_raw_paragraphs[
grepl("href=", my_raw_paragraphs)
]
)
),
sep = ""
)
## odstraňuji nesmyslné outlinky -- ty, co obsahují mezeru, eventuálně
## ty, co odkazují na celý portál nebo kategorii (jsou o obvykle jen
## seznamy hesel, tedy nevhodné pro sestavené korpusu) --------------------
my_links <- my_links[!grepl(" ", my_links)]
my_links <- my_links[!grepl("Portal:", my_links)]
my_links <- my_links[!grepl("Category:", my_links)]
## vracím výstup ----------------------------------------------------------
return(
list(
"text_stranky" = my_text_output,
"outlinky_stranky" = my_links
)
)
}
#### --------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
|
/seminarni_prace/asynchronni_ulohy_seminarni_prace/helper_functions.R
|
no_license
|
LStepanek/4IZ470_Dolovani_znalosti_z_webu
|
R
| false | false | 7,778 |
r
|
###############################################################################
###############################################################################
###############################################################################
## definuji pomocné funkce ----------------------------------------------------
###############################################################################
#### funkce na dělení textu do vět --------------------------------------------
splitTextIntoSentences <- function(
my_text
){
# '''
# Textový řetězec "my_text" o jedné či více větách rozdělí
# s určitou mírou spolehlivosti na samostatné věty.
# '''
split_indices <- NULL
my_sentences <- NULL
for(stop_mark in c(
"\\.\\s*[A-Z]+", ## tečka, mezera (>= 0), velké písmeno
"\\?\\s*[A-Z]+", ## otazník, mezera (>= 0), velké písmeno
"\\!\\s*[A-Z]+", ## vykřičník, mezera (>= 0), velké písmeno
"\\:\\s*" ## dvojtečka, mezera (>= 0)
)){
split_indices <- c(
split_indices,
gregexpr(
pattern = stop_mark,
text = my_text
)[[1]] + 1
)
}
ordered_split_indices <- split_indices[split_indices > 0][
order(split_indices[split_indices > 0])
]
if(length(ordered_split_indices) > 0){
ordered_split_indices <- c(
1,
ordered_split_indices,
nchar(my_text)
)
for(i in 1:(length(ordered_split_indices) - 1)){
my_sentences <- c(
my_sentences,
substr(
my_text,
ordered_split_indices[i],
ordered_split_indices[i + 1]
)
)
}
}else{
my_sentences <- my_text
}
for(j in 1:length(my_sentences)){
while(substr(my_sentences[j], 1, 1) == " "){
my_sentences[j] <- substr(
my_sentences[j], 2, nchar(my_sentences[j])
)
}
while(substr(
my_sentences[j],
nchar(my_sentences[j]),
nchar(my_sentences[j])
) == " "){
my_sentences[j] <- substr(
my_sentences[j],
1,
(nchar(my_sentences[j]) - 1)
)
}
}
return(my_sentences)
}
#### --------------------------------------------------------------------------
###############################################################################
#### funkce na rozdělení věty na slova ----------------------------------------
splitSentenceIntoWords <- function(
my_sentence
){
# '''
# Rozděluje větu "my_sentence" na jednotlivá slova.
# '''
return(
strsplit(
x = my_sentence,
split = " "
)[[1]]
)
}
#### --------------------------------------------------------------------------
###############################################################################
#### funkce pro tvorbu n-gramů ------------------------------------------------
getNGrams <- function(
my_splitted_sentences,
n = 2
){
# '''
# Nad větou rozdělenou na slova "my_splitted_sentences" vytvoří
# všechny n-gramy pro zadané "n".
# '''
output <- NULL
if(length(my_splitted_sentences) >= n){
for(i in 1:(length(my_splitted_sentences) - n + 1)){
output <- c(
output,
paste(
my_splitted_sentences[i:(i + n - 1)],
collapse = " "
)
)
}
}
return(output)
}
#### --------------------------------------------------------------------------
###############################################################################
#### funkce pro webscraping jedné stránky Wikipedie (typu článek)
#### a pro následnou úpravu formátu do podoby volného textu -------------------
webscrapeMyWikipediaPage <- function(
page_url
){
# '''
# Funkce stáhne statický HTML obsah jedné stránky z (anglické)
# Wikipedie, která je pod odkazem "page_url". Poté extrahuje jen
# odstavcové statě ohraničené HTML tagy <p>...</p>.
# Z nich pak odstraní veškeré další HTML tagy, HTML entity či
# wikipedické tagy.
# Nakonec vrací textový řetezec odpovídající jen přirozenému
# textu v odstavcích dané stránky Wikipedie.
# Kromě toho ještě z textu stránky extrahuje interní webové odkazy
# na další stránky Wikipedie, které je poté možné scrapovat.
# '''
## stahuji statický HTML obsah --------------------------------------------
my_html <- readLines(
con = page_url,
encoding = "UTF-8"
)
## extrahuji jen odstavcové statě ohraničené HTML tagy <p>...</p> ---------
my_raw_paragraphs <- my_html[
grepl("<p>", my_html) & grepl("</p>", my_html)
]
## očišťuji text paragrafů o HTML tagy, HTML entity a wikipedické tagy ----
my_paragraphs <- gsub("<.*?>", "", my_raw_paragraphs)
my_paragraphs <- gsub("&.*?;", "", my_paragraphs)
my_paragraphs <- gsub("\\[.*?\\]", "", my_paragraphs)
my_paragraphs <- gsub("\t", "", my_paragraphs) ## zbavuji se taublátorů
## vytvářím jeden dlouhý řetězec (odstavec) -------------------------------
my_text_output <- paste(my_paragraphs, collapse = " ")
## extrahuji z textu všechny webové interní linky na další stránky
## Wikipedie --------------------------------------------------------------
my_links <- paste(
"http://en.wikipedia.org",
gsub(
'\\"',
"",
gsub(
'(.*)(href=)(\\"/wiki/.*?\\")(.*)',
"\\3",
my_raw_paragraphs[
grepl("href=", my_raw_paragraphs)
]
)
),
sep = ""
)
## odstraňuji nesmyslné outlinky -- ty, co obsahují mezeru, eventuálně
## ty, co odkazují na celý portál nebo kategorii (jsou o obvykle jen
## seznamy hesel, tedy nevhodné pro sestavené korpusu) --------------------
my_links <- my_links[!grepl(" ", my_links)]
my_links <- my_links[!grepl("Portal:", my_links)]
my_links <- my_links[!grepl("Category:", my_links)]
## vracím výstup ----------------------------------------------------------
return(
list(
"text_stranky" = my_text_output,
"outlinky_stranky" = my_links
)
)
}
#### --------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
|
context("utils")
test_that("function tryCatchExt catches errors", {
catchErr <- tryCatchExt(stop("testErr"))
expect_null(catchErr$value)
expect_null(catchErr$warning)
expect_equal(catchErr$error, "testErr")
})
test_that("function tryCatchExt catches warnings", {
catchWarn <- tryCatchExt(warning("testWng"))
expect_equal(catchWarn$value, "testWng")
expect_equal(catchWarn$warning, "testWng")
expect_null(catchWarn$error)
catch2Warn <- tryCatchExt({warning("testWng"); warning("testWng2")})
expect_equal(catch2Warn$value, "testWng2")
expect_equal(catch2Warn$warning, c("testWng", "testWng2"))
})
test_that("function tryCatchExt returns values", {
catchVal <- tryCatchExt(1)
expect_equal(catchVal$value, 1)
expect_null(catchVal$warning)
expect_null(catchVal$error)
})
test_that("function tryCatchExt returns combinations of outputs", {
catchWarnVal <- tryCatchExt({warning("testWng"); 1})
expect_equal(catchWarnVal$value, 1)
expect_equal(catchWarnVal$warning, "testWng")
expect_null(catchWarnVal$error)
catchWarnErr <- tryCatchExt({warning("testWng"); stop("testErr")})
expect_null(catchWarnErr$value)
expect_equal(catchWarnErr$warning, "testWng")
expect_equal(catchWarnErr$error, "testErr")
})
test_that("function supprWarn functions properly", {
expect_warning(supprWarn(sqrt(-1), "testMsg"), "NaNs produced")
expect_silent(supprWarn(sqrt(-1), "NaNs produced"))
})
test_that("function wrnToErr functions properly", {
mod <- list(error = "testErr",
warning = c("testWrn", "Abnormal termination"))
modOut <- wrnToErr(mod)
expect_equal(modOut$error, c("testErr", "Abnormal termination"))
expect_equal(modOut$warning, "testWrn")
})
|
/tests/testthat/test-utils.R
|
no_license
|
Manigben/statgenGxE
|
R
| false | false | 1,706 |
r
|
context("utils")
test_that("function tryCatchExt catches errors", {
catchErr <- tryCatchExt(stop("testErr"))
expect_null(catchErr$value)
expect_null(catchErr$warning)
expect_equal(catchErr$error, "testErr")
})
test_that("function tryCatchExt catches warnings", {
catchWarn <- tryCatchExt(warning("testWng"))
expect_equal(catchWarn$value, "testWng")
expect_equal(catchWarn$warning, "testWng")
expect_null(catchWarn$error)
catch2Warn <- tryCatchExt({warning("testWng"); warning("testWng2")})
expect_equal(catch2Warn$value, "testWng2")
expect_equal(catch2Warn$warning, c("testWng", "testWng2"))
})
test_that("function tryCatchExt returns values", {
catchVal <- tryCatchExt(1)
expect_equal(catchVal$value, 1)
expect_null(catchVal$warning)
expect_null(catchVal$error)
})
test_that("function tryCatchExt returns combinations of outputs", {
catchWarnVal <- tryCatchExt({warning("testWng"); 1})
expect_equal(catchWarnVal$value, 1)
expect_equal(catchWarnVal$warning, "testWng")
expect_null(catchWarnVal$error)
catchWarnErr <- tryCatchExt({warning("testWng"); stop("testErr")})
expect_null(catchWarnErr$value)
expect_equal(catchWarnErr$warning, "testWng")
expect_equal(catchWarnErr$error, "testErr")
})
test_that("function supprWarn functions properly", {
expect_warning(supprWarn(sqrt(-1), "testMsg"), "NaNs produced")
expect_silent(supprWarn(sqrt(-1), "NaNs produced"))
})
test_that("function wrnToErr functions properly", {
mod <- list(error = "testErr",
warning = c("testWrn", "Abnormal termination"))
modOut <- wrnToErr(mod)
expect_equal(modOut$error, c("testErr", "Abnormal termination"))
expect_equal(modOut$warning, "testWrn")
})
|
library(RWDataPlyr)
context('check that getDataForAllScens works')
# get a specified set of slots and apply some aggregation method to them
scenNames <- scenFolders <- c('ISM1988_2014,2007Dems,IG,Most')
slotAggList <- slot_agg_list(system.file(
'extdata/SlotAggTable.csv',
package = 'RWDataPlyr'
))
scenPath <- system.file('extdata','Scenario/',package = 'RWDataPlyr')
oFile <- 'tmp.txt'
expect_warning(keyData <- getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp2.txt"
))
on.exit(file.remove("tmp2.txt"), add = TRUE)
slotAggList <- list(list(rdf = 'KeySlots.rdf', slots = 'all'))
# will return monthly data for all slots in KeySlots.rdf
expect_warning(allData <- getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
oFile
))
on.exit(file.remove("tmp.txt"), add = TRUE)
expectedSlotNames <- sort(paste(rdf_slot_names(keyRdf),'Monthly','1',sep='_'))
test_that("getting all slot data from RDF does actually return all slots", {
expect_equal(levels(as.factor(allData$Variable)),expectedSlotNames)
})
test_that("getting all slot data matches a pre-configured slotAggList", {
expect_equal(
dplyr::filter(keyData, Variable == 'Powell.Outflow_EOCY_0.001')$Value,
(dplyr::filter(
allData,
Variable == 'Powell.Outflow_Monthly_1',
Month == 'December'
)$Value) * 0.001
)
expect_equal(
dplyr::filter(keyData, Variable == 'Mead.Pool Elevation_EOCY_1')$Value,
dplyr::filter(
allData,
Variable == 'Mead.Pool Elevation_Monthly_1',
Month == 'December'
)$Value
)
})
test_that('file extension is checked', {
expect_error(
expect_warning(
getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
'tst.xyz'
)
),
paste0(
'oFile has an invalid file exention.\n',
'getDataForAllScens does not know how to handle ".', 'xyz',
'" extensions.'
)
)
expect_error(
expect_warning(
getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
'tst.cvs'
)
),
paste0(
'oFile has an invalid file exention.\n',
'getDataForAllScens does not know how to handle ".', 'cvs',
'" extensions.'
)
)
})
# a .txt already exists, create .csv and .feather
# monthly
expect_warning(getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp.feather"
))
on.exit(file.remove(c("tmp.feather")), add = TRUE)
expect_warning(getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp.csv"
))
on.exit(file.remove("tmp.csv"), add = TRUE)
# annual (keyData)
slotAggList <- slot_agg_list(system.file(
'extdata/SlotAggTable.csv',
package = 'RWDataPlyr'
))
expect_warning(getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp2.feather"
))
on.exit(file.remove("tmp2.feather"), add = TRUE)
expect_warning(getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp2.csv"
))
on.exit(file.remove("tmp2.csv"), add = TRUE)
test_that("data matches regardless of file extension", {
expect_equal(keyData, data.table::fread("tmp2.txt", data.table = FALSE))
expect_equal(allData, data.table::fread("tmp.txt", data.table = FALSE))
expect_equal(keyData, data.table::fread("tmp2.csv", data.table = FALSE))
expect_equal(allData, data.table::fread("tmp.csv", data.table = FALSE))
expect_equal(keyData, as.data.frame(feather::read_feather("tmp2.feather")))
expect_equal(allData, as.data.frame(feather::read_feather("tmp.feather")))
})
|
/tests/testthat/test_getDataForAllScens.R
|
permissive
|
rabutler/RWDataPlyr
|
R
| false | false | 3,654 |
r
|
library(RWDataPlyr)
context('check that getDataForAllScens works')
# get a specified set of slots and apply some aggregation method to them
scenNames <- scenFolders <- c('ISM1988_2014,2007Dems,IG,Most')
slotAggList <- slot_agg_list(system.file(
'extdata/SlotAggTable.csv',
package = 'RWDataPlyr'
))
scenPath <- system.file('extdata','Scenario/',package = 'RWDataPlyr')
oFile <- 'tmp.txt'
expect_warning(keyData <- getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp2.txt"
))
on.exit(file.remove("tmp2.txt"), add = TRUE)
slotAggList <- list(list(rdf = 'KeySlots.rdf', slots = 'all'))
# will return monthly data for all slots in KeySlots.rdf
expect_warning(allData <- getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
oFile
))
on.exit(file.remove("tmp.txt"), add = TRUE)
expectedSlotNames <- sort(paste(rdf_slot_names(keyRdf),'Monthly','1',sep='_'))
test_that("getting all slot data from RDF does actually return all slots", {
expect_equal(levels(as.factor(allData$Variable)),expectedSlotNames)
})
test_that("getting all slot data matches a pre-configured slotAggList", {
expect_equal(
dplyr::filter(keyData, Variable == 'Powell.Outflow_EOCY_0.001')$Value,
(dplyr::filter(
allData,
Variable == 'Powell.Outflow_Monthly_1',
Month == 'December'
)$Value) * 0.001
)
expect_equal(
dplyr::filter(keyData, Variable == 'Mead.Pool Elevation_EOCY_1')$Value,
dplyr::filter(
allData,
Variable == 'Mead.Pool Elevation_Monthly_1',
Month == 'December'
)$Value
)
})
test_that('file extension is checked', {
expect_error(
expect_warning(
getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
'tst.xyz'
)
),
paste0(
'oFile has an invalid file exention.\n',
'getDataForAllScens does not know how to handle ".', 'xyz',
'" extensions.'
)
)
expect_error(
expect_warning(
getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
'tst.cvs'
)
),
paste0(
'oFile has an invalid file exention.\n',
'getDataForAllScens does not know how to handle ".', 'cvs',
'" extensions.'
)
)
})
# a .txt already exists, create .csv and .feather
# monthly
expect_warning(getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp.feather"
))
on.exit(file.remove(c("tmp.feather")), add = TRUE)
expect_warning(getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp.csv"
))
on.exit(file.remove("tmp.csv"), add = TRUE)
# annual (keyData)
slotAggList <- slot_agg_list(system.file(
'extdata/SlotAggTable.csv',
package = 'RWDataPlyr'
))
expect_warning(getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp2.feather"
))
on.exit(file.remove("tmp2.feather"), add = TRUE)
expect_warning(getDataForAllScens(
scenFolders,
scenNames,
slotAggList,
scenPath,
"tmp2.csv"
))
on.exit(file.remove("tmp2.csv"), add = TRUE)
test_that("data matches regardless of file extension", {
expect_equal(keyData, data.table::fread("tmp2.txt", data.table = FALSE))
expect_equal(allData, data.table::fread("tmp.txt", data.table = FALSE))
expect_equal(keyData, data.table::fread("tmp2.csv", data.table = FALSE))
expect_equal(allData, data.table::fread("tmp.csv", data.table = FALSE))
expect_equal(keyData, as.data.frame(feather::read_feather("tmp2.feather")))
expect_equal(allData, as.data.frame(feather::read_feather("tmp.feather")))
})
|
#' Send text to Microsoft Cognitive Services' Sentiment API
#'
#' Send lines of text to an API to get the sentiment score returned
#'
#' @param textdf A data.frame consisting of two cols with colnames `c("id","text")`.
#' Optionally you can also provide a "language" column with ISO country codes,
#' otherwise it will default to "en".
#' @param apikey Your key for working with Microsoft Cognitive Services
#'
#' @return response A data.frame with id and a sentiment score
#'
#' @export
#'
getSentiment<-function(textdf, apikey=NULL){
if(is.null(apikey)) apikey<-APIKEY
stopifnot(inherits(textdf, "data.frame"))
if(!("language" %in% colnames(textdf))) textdf$language <-"en"
tosend<-jsonlite::toJSON(list(documents= textdf))
cogapi<-"https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment"
# Construct a request
response<-httr::POST(cogapi,
httr::add_headers(`Ocp-Apim-Subscription-Key`=apikey),
body=tosend)
respcontent<-httr::content(response, as="text")
responses<-jsonlite::fromJSON(respcontent)$documents
if(class(textdf$id) %in% c("numeric","integer")) responses$id<-as.numeric(responses$id)
# Combine
return( dplyr::left_join(textdf, responses, by="id"))
}
|
/R/getSentiment.R
|
no_license
|
kashenfelter/TextAnalysis
|
R
| false | false | 1,256 |
r
|
#' Send text to Microsoft Cognitive Services' Sentiment API
#'
#' Send lines of text to an API to get the sentiment score returned
#'
#' @param textdf A data.frame consisting of two cols with colnames `c("id","text")`.
#' Optionally you can also provide a "language" column with ISO country codes,
#' otherwise it will default to "en".
#' @param apikey Your key for working with Microsoft Cognitive Services
#'
#' @return response A data.frame with id and a sentiment score
#'
#' @export
#'
getSentiment<-function(textdf, apikey=NULL){
if(is.null(apikey)) apikey<-APIKEY
stopifnot(inherits(textdf, "data.frame"))
if(!("language" %in% colnames(textdf))) textdf$language <-"en"
tosend<-jsonlite::toJSON(list(documents= textdf))
cogapi<-"https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment"
# Construct a request
response<-httr::POST(cogapi,
httr::add_headers(`Ocp-Apim-Subscription-Key`=apikey),
body=tosend)
respcontent<-httr::content(response, as="text")
responses<-jsonlite::fromJSON(respcontent)$documents
if(class(textdf$id) %in% c("numeric","integer")) responses$id<-as.numeric(responses$id)
# Combine
return( dplyr::left_join(textdf, responses, by="id"))
}
|
library(utils) #read.csv, read.table
library(stringr) #str_match
library(sp) #CRS, coordinates, proj4string
library(sf) # st_as_sf, st_read, st_geometry
# Assemble WildTrax data
#There are 5 filename that use apostrophe or special characters in filename. The system do not recognize them. Need to change the name manually
wdir <- "path/to/unzip/folder/"
wt_data <- "name/of/folder"
wtList <- list.files(file.path(wdir, wt_data), pattern = ".report.csv")
wt_report <- lapply(wtList, function(x) {
print(x)
f_wt <- read.csv(file.path(wdir, wt_data, x), sep=",", header = TRUE, stringsAsFactors = FALSE)
#f_aru <- read_csv(fi, sep=",")
return(f_wt)
})
wt_bind <-do.call(rbind, wt_report)
## Extract Status and Sensor from abstract
wtproject <- list.files(file.path(wdir, wt_data), pattern = ".abstract.csv")
wt_report <- lapply(wtproject, function(x) {
f_wt<- read.table(file.path(wdir, wt_data, x),sep=",", allowEscapes=TRUE)
org_extract <- str_match(f_wt, "Organization: \\s*(.*?)\\s*\n")[,2]
org_extract <- gsub("/.*$","",org_extract)
prj_extract <- str_match(f_wt, "Project Name: \\s*(.*?)\\s*\n")[,2]
stat_extract <- str_match(f_wt, "Project Status: \\s*(.*?)\\s*\n")[,2]
sensor_extract <- str_match(f_wt, "Sensor: \\s*(.*?)\\s*\n")[,2]
data.frame(organization = org_extract, project = prj_extract , status = stat_extract, sensor = sensor_extract, stringsAsFactors=FALSE)
})
prj_tbl <- do.call(rbind, wt_report)
# Merge
wt_data <- merge(wt_bind, prj_tbl, by = c("project", "organization"), all.x = TRUE)
#####################
#-- MAP
#####################
# Delete NAs
wt_geo <-wt_data[!(is.na(wt_data$latitude)),]
wt_geo <-wt_geo[!(is.na(wt_geo$longitude)),]
# common projections
DD <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
LAEA <- CRS("+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs")
coordinates(wt_geo) <- c("longitude", "latitude")
proj4string(wt_geo) <- DD
pc <- st_as_sf(wt_geo, coords = c("longitude", "latitude"), crs = DD)
## Map Studyregion
f_studyregion <- "Path/to/studyregion/shapefile.shp"
studyregion <- st_read(f_studyregion)
plot(st_geometry(studyregion))
## Add point (ARU, PC or PC2 (pending to WildTrax))
sensor <- pc$sensor
plot(st_geometry(pc), pch = 20, col=ifelse(sensor=="ARU", "red",ifelse(sensor== "PC", "black", "blue")), add= TRUE)
|
/WildTrax/WTbind.R
|
no_license
|
borealbirds/BAMTools
|
R
| false | false | 2,358 |
r
|
library(utils) #read.csv, read.table
library(stringr) #str_match
library(sp) #CRS, coordinates, proj4string
library(sf) # st_as_sf, st_read, st_geometry
# Assemble WildTrax data
#There are 5 filename that use apostrophe or special characters in filename. The system do not recognize them. Need to change the name manually
wdir <- "path/to/unzip/folder/"
wt_data <- "name/of/folder"
wtList <- list.files(file.path(wdir, wt_data), pattern = ".report.csv")
wt_report <- lapply(wtList, function(x) {
print(x)
f_wt <- read.csv(file.path(wdir, wt_data, x), sep=",", header = TRUE, stringsAsFactors = FALSE)
#f_aru <- read_csv(fi, sep=",")
return(f_wt)
})
wt_bind <-do.call(rbind, wt_report)
## Extract Status and Sensor from abstract
wtproject <- list.files(file.path(wdir, wt_data), pattern = ".abstract.csv")
wt_report <- lapply(wtproject, function(x) {
f_wt<- read.table(file.path(wdir, wt_data, x),sep=",", allowEscapes=TRUE)
org_extract <- str_match(f_wt, "Organization: \\s*(.*?)\\s*\n")[,2]
org_extract <- gsub("/.*$","",org_extract)
prj_extract <- str_match(f_wt, "Project Name: \\s*(.*?)\\s*\n")[,2]
stat_extract <- str_match(f_wt, "Project Status: \\s*(.*?)\\s*\n")[,2]
sensor_extract <- str_match(f_wt, "Sensor: \\s*(.*?)\\s*\n")[,2]
data.frame(organization = org_extract, project = prj_extract , status = stat_extract, sensor = sensor_extract, stringsAsFactors=FALSE)
})
prj_tbl <- do.call(rbind, wt_report)
# Merge
wt_data <- merge(wt_bind, prj_tbl, by = c("project", "organization"), all.x = TRUE)
#####################
#-- MAP
#####################
# Delete NAs
wt_geo <-wt_data[!(is.na(wt_data$latitude)),]
wt_geo <-wt_geo[!(is.na(wt_geo$longitude)),]
# common projections
DD <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
LAEA <- CRS("+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs")
coordinates(wt_geo) <- c("longitude", "latitude")
proj4string(wt_geo) <- DD
pc <- st_as_sf(wt_geo, coords = c("longitude", "latitude"), crs = DD)
## Map Studyregion
f_studyregion <- "Path/to/studyregion/shapefile.shp"
studyregion <- st_read(f_studyregion)
plot(st_geometry(studyregion))
## Add point (ARU, PC or PC2 (pending to WildTrax))
sensor <- pc$sensor
plot(st_geometry(pc), pch = 20, col=ifelse(sensor=="ARU", "red",ifelse(sensor== "PC", "black", "blue")), add= TRUE)
|
# Page No. 179
ct<-c("c1","c2","c3","c4","c5","c6","c7","c8","c9","c10")
t(combn(ct, 2))
t_p<-nrow(t(combn(ct, 2)))
p_s2c<-1/t_p
print(p_s2c)
|
/An_Introduction_To_Statistical_Methods_And_Data_Analysis_by_R_Lyman_Ott_And_Michael_Longnecker/CH4/EX4.20/Ex4_20.r
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false | false | 143 |
r
|
# Page No. 179
ct<-c("c1","c2","c3","c4","c5","c6","c7","c8","c9","c10")
t(combn(ct, 2))
t_p<-nrow(t(combn(ct, 2)))
p_s2c<-1/t_p
print(p_s2c)
|
library(data.table)
# path <- "S:/kachharaa/CONA/Arrowtown2019/ODINs/"
# setwd(path)
## creating location timeseries till date ####
odin.loc.info <- fread("S:/kachharaa/CONA/Arrowtown2019/ODINs/odin_locations.txt", stringsAsFactors = F)
odin.loc.info$startdate <- dmy_hm(odin.loc.info$startdate, tz = "Pacific/Auckland")
odin.loc.info$enddate <- dmy_hm(odin.loc.info$enddate, tz = "Pacific/Auckland")
### currently valid locations get todays timestap attached ####
odin.loc.info$enddate[which(is.na(odin.loc.info$enddate))] <- Sys.time() + 86400
### creating 1 minute time series of locations#####
location.list <-list()
for(i in 1:nrow(odin.loc.info)) {
cur.loc <- odin.loc.info[i,]
locations.ts <- data.table(date = seq(cur.loc$startdate, cur.loc$enddate, by = "1 min"))
locations.ts$serialn <- cur.loc$serialn
locations.ts$serialn_concat <- cur.loc$serialn_concat
locations.ts$lat <- cur.loc$lat
locations.ts$lon <- cur.loc$lon
location.list[[i]] <- locations.ts
}
allLocations <- rbindlist(location.list)
|
/LocationTimeSeries_ODINs.R
|
no_license
|
niwa/CONA_Arrowtown
|
R
| false | false | 1,070 |
r
|
library(data.table)
# path <- "S:/kachharaa/CONA/Arrowtown2019/ODINs/"
# setwd(path)
## creating location timeseries till date ####
odin.loc.info <- fread("S:/kachharaa/CONA/Arrowtown2019/ODINs/odin_locations.txt", stringsAsFactors = F)
odin.loc.info$startdate <- dmy_hm(odin.loc.info$startdate, tz = "Pacific/Auckland")
odin.loc.info$enddate <- dmy_hm(odin.loc.info$enddate, tz = "Pacific/Auckland")
### currently valid locations get todays timestap attached ####
odin.loc.info$enddate[which(is.na(odin.loc.info$enddate))] <- Sys.time() + 86400
### creating 1 minute time series of locations#####
location.list <-list()
for(i in 1:nrow(odin.loc.info)) {
cur.loc <- odin.loc.info[i,]
locations.ts <- data.table(date = seq(cur.loc$startdate, cur.loc$enddate, by = "1 min"))
locations.ts$serialn <- cur.loc$serialn
locations.ts$serialn_concat <- cur.loc$serialn_concat
locations.ts$lat <- cur.loc$lat
locations.ts$lon <- cur.loc$lon
location.list[[i]] <- locations.ts
}
allLocations <- rbindlist(location.list)
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
shinyServer <- function(input, output, session) {
session$onSessionEnded(stopApp) # it can be annoying that when you close the browser window, the app is still running and you need to manually press “Esc” to kill it
# inputs from ui:
# input$Title = "title for tables and graphs"
# input$IndexTest = "name of index test"
# input$ReferenceTest = "name of reference test"
#
# input$gPrevalence --- true prevalence
# input$iPopulation --- population in the ir (Index cf Reference test) contingency matrix
#
# input$irSen
# input$irSpec
#
# input$rgSen
# input$rgSpec
#
# input$igSen
# input$igSpec
#
igDxAcc <-rgDxAcc <- irDxAcc <- initDxAccList() # initialise diagnostic accuracy lists
output$debug1 <- renderPrint({
sessionInfo()
})
output$debug2 <- renderPrint({
irTitle()
})
# Tabulate (for the index test)
# true accuracy measures, absolute errors, percentage errors (for mid-ranges of given parameters). And lower and upper uncertainty intervals with 95% limits derived from a probability sensitivity analysis which varies measured and assumed parameters across their limits with PDFs able to be selected by the user from on option list.
# graphs
# 1. Mosaic plots (to be shown in facets) of:
# a. Index test: observed TP, FP, FN, TN
# b. Reference test: assumed TP, FP, FN, TN (derived from sensitivity etc)
# c. Index test: derived true TP, FP, FN, TN
# d. Error matrix: observed – true index test evaluation data
# 2. Dependence of measured sensitivity of the index test on true sensitivity of reference test
# X = assumed true sensitivity of reference test
# (from lower to upper limit)
# Y = derived true sensitivity of index test (ribbon with 95% limits derived from probability sensitivity analysis which varies measured and assumed parameters across their limits)
# Y = measured sensitivity of index test (ribbon with given limits)
# 3. Dependence of measured specificity of the index test on true specificity of reference test
# 4. As above for specificity, mutatis mutandis
# 5. As above, mutatis mutandis, for predictive values, false positive and false negative rates
# 6. Animations of effects of univariate incremental changes in true sensitivity and specificity of reference test
#
##############################################################################################################################
#
# Tabulate (for the index test)
# true accuracy measures, absolute errors, percentage errors (for mid-ranges of given parameters). And lower and upper uncertainty intervals with 95% limits derived from a probability sensitivity analysis which varies measured and assumed parameters across their limits with PDFs able to be selected by the user from on option list.
#
# set titles and labels for index and reference tests
irTitle <- eventReactive(input$GoButton,
{
paste0("Contingency matrix and diagnostic accuracy stats for ", input$IndexTest, " compared to ", input$ReferenceTest)
})
rgTitle <- eventReactive(input$GoButton,
{
paste0("Contingency matrix and diagnostic accuracy stats for ", input$ReferenceTest, " compared to gold standard")
})
igTitle <- eventReactive(input$GoButton,
{
paste0("Contingency matrix and diagnostic accuracy stats for ", input$IndexTest, " adjusted for inaccuracies in ", input$ReferenceTest)
})
IT <- eventReactive(input$GoButton, {
irDxAcc$Title <- input$Title
irDxAcc$IndexTest <- input$IndexTest
irDxAcc$ReferenceTest <- input$ReferenceTest
# set population and prevalence
irDxAcc$DxStats["Estimate","Prevalence"] <- input$gPrevalence
irDxAcc$DxStats["Estimate","Population"] <- input$iPopulation
# set sensitivity and specificity
# use the given range for low and high limits, and their mean for the estimate
irDxAcc$DxStats["Conf_Low","Sensitivity"] <- input$irSen[1]
irDxAcc$DxStats["Estimate","Sensitivity"] <- mean(input$irSen)
irDxAcc$DxStats["Conf_high","Sensitivity"] <- input$irSen[2]
irDxAcc$DxStats["Conf_Low","Specificity"] <- input$irSpec[1]
irDxAcc$DxStats["Estimate","Specificity"] <- mean(input$irSpec)
irDxAcc$DxStats["Conf_high","Specificity"] <- input$irSpec[2]
# calculate contingency matrix and diagnostic accuracy stats
##### to do: update function to calculate confidence limits
irDxAcc <- DxAcc(irDxAcc, direction = "From stats", CImethod = "proportion")
return(irDxAcc)
})
RT <- eventReactive(input$GoButton, {
rgDxAcc$Title <- input$Title
rgDxAcc$IndexTest <- input$IndexTest
rgDxAcc$ReferenceTest <- input$ReferenceTest
# assume same population and prevalence for reference test as for index test
rgDxAcc$DxStats["Estimate","Prevalence"] <- input$gPrevalence
rgDxAcc$DxStats["Estimate","Population"] <- input$iPopulation
# set sensitivity and specificity
# use the given range for low and high limits, and their mean for the estimate
rgDxAcc$DxStats["Conf_Low","Sensitivity"] <- input$rgSen[1]
rgDxAcc$DxStats["Estimate","Sensitivity"] <- mean(input$rgSen)
rgDxAcc$DxStats["Conf_high","Sensitivity"] <- input$rgSen[2]
rgDxAcc$DxStats["Conf_Low","Specificity"] <- input$rgSpec[1]
rgDxAcc$DxStats["Estimate","Specificity"] <- mean(input$rgSpec)
rgDxAcc$DxStats["Conf_high","Specificity"] <- input$rgSpec[2]
# calculate contingency matrix and diagnostic accuracy stats
rgDxAcc <- DxAcc(rgDxAcc, direction = "From stats", CImethod = "estimated range")
return(rgDxAcc)
})
# print tables for index test (measured)
output$ITtitle <- renderText(irTitle())
output$ITCMTable <- renderTable(IT()$DxCM)
output$ITStatsTable <- renderTable(IT()$DxStats)
# print tables for reference test (estimated)
output$RTtitle <- renderText(RTtitle())
output$RTStatsTable <- renderTable(RT()$DxStats)
output$RTCMTable <- renderTable(RT()$DxCM)
# print tables for index test (adjusted for imperfect reference test)
output$ITAtitle <- renderText(ITAtitle())
output$ITAStatsTable <- renderTable(ITA()$DxStats)
output$ITACMTable <- renderTable(ITA()$DxCM)
# input$Title = "title for tables and graphs"
# input$IndexTest = "name of index test"
# input$ReferenceTest = "name of reference test"
#
# input$gPrevalence
# input$iPopulation
#
# input$irSen
# input$irSpec
#
# input$rgSen
# input$rgSpec
#
# input$igSen
# input$igSpec
}
|
/ShinyApps/ImpRefApp/server.R
|
no_license
|
NIHRDECncl/R-tools
|
R
| false | false | 7,041 |
r
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
shinyServer <- function(input, output, session) {
session$onSessionEnded(stopApp) # it can be annoying that when you close the browser window, the app is still running and you need to manually press “Esc” to kill it
# inputs from ui:
# input$Title = "title for tables and graphs"
# input$IndexTest = "name of index test"
# input$ReferenceTest = "name of reference test"
#
# input$gPrevalence --- true prevalence
# input$iPopulation --- population in the ir (Index cf Reference test) contingency matrix
#
# input$irSen
# input$irSpec
#
# input$rgSen
# input$rgSpec
#
# input$igSen
# input$igSpec
#
igDxAcc <-rgDxAcc <- irDxAcc <- initDxAccList() # initialise diagnostic accuracy lists
output$debug1 <- renderPrint({
sessionInfo()
})
output$debug2 <- renderPrint({
irTitle()
})
# Tabulate (for the index test)
# true accuracy measures, absolute errors, percentage errors (for mid-ranges of given parameters). And lower and upper uncertainty intervals with 95% limits derived from a probability sensitivity analysis which varies measured and assumed parameters across their limits with PDFs able to be selected by the user from on option list.
# graphs
# 1. Mosaic plots (to be shown in facets) of:
# a. Index test: observed TP, FP, FN, TN
# b. Reference test: assumed TP, FP, FN, TN (derived from sensitivity etc)
# c. Index test: derived true TP, FP, FN, TN
# d. Error matrix: observed – true index test evaluation data
# 2. Dependence of measured sensitivity of the index test on true sensitivity of reference test
# X = assumed true sensitivity of reference test
# (from lower to upper limit)
# Y = derived true sensitivity of index test (ribbon with 95% limits derived from probability sensitivity analysis which varies measured and assumed parameters across their limits)
# Y = measured sensitivity of index test (ribbon with given limits)
# 3. Dependence of measured specificity of the index test on true specificity of reference test
# 4. As above for specificity, mutatis mutandis
# 5. As above, mutatis mutandis, for predictive values, false positive and false negative rates
# 6. Animations of effects of univariate incremental changes in true sensitivity and specificity of reference test
#
##############################################################################################################################
#
# Tabulate (for the index test)
# true accuracy measures, absolute errors, percentage errors (for mid-ranges of given parameters). And lower and upper uncertainty intervals with 95% limits derived from a probability sensitivity analysis which varies measured and assumed parameters across their limits with PDFs able to be selected by the user from on option list.
#
# set titles and labels for index and reference tests
irTitle <- eventReactive(input$GoButton,
{
paste0("Contingency matrix and diagnostic accuracy stats for ", input$IndexTest, " compared to ", input$ReferenceTest)
})
rgTitle <- eventReactive(input$GoButton,
{
paste0("Contingency matrix and diagnostic accuracy stats for ", input$ReferenceTest, " compared to gold standard")
})
igTitle <- eventReactive(input$GoButton,
{
paste0("Contingency matrix and diagnostic accuracy stats for ", input$IndexTest, " adjusted for inaccuracies in ", input$ReferenceTest)
})
IT <- eventReactive(input$GoButton, {
irDxAcc$Title <- input$Title
irDxAcc$IndexTest <- input$IndexTest
irDxAcc$ReferenceTest <- input$ReferenceTest
# set population and prevalence
irDxAcc$DxStats["Estimate","Prevalence"] <- input$gPrevalence
irDxAcc$DxStats["Estimate","Population"] <- input$iPopulation
# set sensitivity and specificity
# use the given range for low and high limits, and their mean for the estimate
irDxAcc$DxStats["Conf_Low","Sensitivity"] <- input$irSen[1]
irDxAcc$DxStats["Estimate","Sensitivity"] <- mean(input$irSen)
irDxAcc$DxStats["Conf_high","Sensitivity"] <- input$irSen[2]
irDxAcc$DxStats["Conf_Low","Specificity"] <- input$irSpec[1]
irDxAcc$DxStats["Estimate","Specificity"] <- mean(input$irSpec)
irDxAcc$DxStats["Conf_high","Specificity"] <- input$irSpec[2]
# calculate contingency matrix and diagnostic accuracy stats
##### to do: update function to calculate confidence limits
irDxAcc <- DxAcc(irDxAcc, direction = "From stats", CImethod = "proportion")
return(irDxAcc)
})
RT <- eventReactive(input$GoButton, {
rgDxAcc$Title <- input$Title
rgDxAcc$IndexTest <- input$IndexTest
rgDxAcc$ReferenceTest <- input$ReferenceTest
# assume same population and prevalence for reference test as for index test
rgDxAcc$DxStats["Estimate","Prevalence"] <- input$gPrevalence
rgDxAcc$DxStats["Estimate","Population"] <- input$iPopulation
# set sensitivity and specificity
# use the given range for low and high limits, and their mean for the estimate
rgDxAcc$DxStats["Conf_Low","Sensitivity"] <- input$rgSen[1]
rgDxAcc$DxStats["Estimate","Sensitivity"] <- mean(input$rgSen)
rgDxAcc$DxStats["Conf_high","Sensitivity"] <- input$rgSen[2]
rgDxAcc$DxStats["Conf_Low","Specificity"] <- input$rgSpec[1]
rgDxAcc$DxStats["Estimate","Specificity"] <- mean(input$rgSpec)
rgDxAcc$DxStats["Conf_high","Specificity"] <- input$rgSpec[2]
# calculate contingency matrix and diagnostic accuracy stats
rgDxAcc <- DxAcc(rgDxAcc, direction = "From stats", CImethod = "estimated range")
return(rgDxAcc)
})
# print tables for index test (measured)
output$ITtitle <- renderText(irTitle())
output$ITCMTable <- renderTable(IT()$DxCM)
output$ITStatsTable <- renderTable(IT()$DxStats)
# print tables for reference test (estimated)
output$RTtitle <- renderText(RTtitle())
output$RTStatsTable <- renderTable(RT()$DxStats)
output$RTCMTable <- renderTable(RT()$DxCM)
# print tables for index test (adjusted for imperfect reference test)
output$ITAtitle <- renderText(ITAtitle())
output$ITAStatsTable <- renderTable(ITA()$DxStats)
output$ITACMTable <- renderTable(ITA()$DxCM)
# input$Title = "title for tables and graphs"
# input$IndexTest = "name of index test"
# input$ReferenceTest = "name of reference test"
#
# input$gPrevalence
# input$iPopulation
#
# input$irSen
# input$irSpec
#
# input$rgSen
# input$rgSpec
#
# input$igSen
# input$igSpec
}
|
library(data.table)
library(ggplot2)
library(gridExtra)
library(plyr)
rm(list=ls())
coulomb<-6.24151e18
ee<-1.60217733E-19
chargestate<-2
#setwd("/Volumes/ibares/AMS/AMS-Messdaten/2017/2017_07_10f_Be-7_Be-10/ssi-batch/2017_07_13_7Be_RAIN_HANNOVER/")
setwd('M://ibares/AMS/AMS-Messdaten/2017/2017_07_10f_Be-7_Be-10/ssi-batch/2017_07_13_7Be_RAIN_HANNOVER/')
dir.create(paste(getwd(),"/tmp_ssi_results/",sep=""))
results_directory<-paste(getwd(),"/tmp_ssi_results/", sep="")
#source("/Volumes/ibares/AMS/Programme/R_Studio/use4ssi/read_all_ssi_blocks.R")
source("M://ibares/AMS/Programme/R_Studio/use4ssi/read_all_ssi_blocks.R")
all_blocks<-read.csv("tmp_ssi_results/all_samples_and_blocks.csv")
#create new empty data.frame for sample summaries
summary_ssires<-data.frame()
#start sample evaluation
#source("/Volumes/ibares/AMS/Programme/R_Studio/use4ssi/while_evaluation.R")
source("M://ibares/AMS/Programme/R_Studio/use4ssi/while_evaluation.R")
#create final summary of machine ratios per sample
sumSample<-data.frame()
sumSample<-ddply(summary_ssires,.(sampleID), function(x)
{data.frame(w_mean_of_turns=weighted.mean(x$w_mean, 1/x$final_error),
error=1/sqrt(sum(1/x$final_error^2))
)})
#write csv files
write.csv(summary_ssires,paste(results_directory,"summary_per_ssires.csv",sep = "/"),row.names = FALSE)
write.csv(sumSample,paste(results_directory,"samples_machineRatios.csv",sep = "/"), row.names = FALSE)
|
/sample_evaluation.R
|
no_license
|
NikSeldon/ams_dd_data_analysis
|
R
| false | false | 1,438 |
r
|
library(data.table)
library(ggplot2)
library(gridExtra)
library(plyr)
rm(list=ls())
coulomb<-6.24151e18
ee<-1.60217733E-19
chargestate<-2
#setwd("/Volumes/ibares/AMS/AMS-Messdaten/2017/2017_07_10f_Be-7_Be-10/ssi-batch/2017_07_13_7Be_RAIN_HANNOVER/")
setwd('M://ibares/AMS/AMS-Messdaten/2017/2017_07_10f_Be-7_Be-10/ssi-batch/2017_07_13_7Be_RAIN_HANNOVER/')
dir.create(paste(getwd(),"/tmp_ssi_results/",sep=""))
results_directory<-paste(getwd(),"/tmp_ssi_results/", sep="")
#source("/Volumes/ibares/AMS/Programme/R_Studio/use4ssi/read_all_ssi_blocks.R")
source("M://ibares/AMS/Programme/R_Studio/use4ssi/read_all_ssi_blocks.R")
all_blocks<-read.csv("tmp_ssi_results/all_samples_and_blocks.csv")
#create new empty data.frame for sample summaries
summary_ssires<-data.frame()
#start sample evaluation
#source("/Volumes/ibares/AMS/Programme/R_Studio/use4ssi/while_evaluation.R")
source("M://ibares/AMS/Programme/R_Studio/use4ssi/while_evaluation.R")
#create final summary of machine ratios per sample
sumSample<-data.frame()
sumSample<-ddply(summary_ssires,.(sampleID), function(x)
{data.frame(w_mean_of_turns=weighted.mean(x$w_mean, 1/x$final_error),
error=1/sqrt(sum(1/x$final_error^2))
)})
#write csv files
write.csv(summary_ssires,paste(results_directory,"summary_per_ssires.csv",sep = "/"),row.names = FALSE)
write.csv(sumSample,paste(results_directory,"samples_machineRatios.csv",sep = "/"), row.names = FALSE)
|
test_that("returns double", {
vector <- c("a", "b", "a", "a")
contrasts <- build_contrast(vector, "a", "b")
expect_type(contrasts, "double")
})
test_that("returns two codes only", {
vector <- c("a", "b", "a", "a")
contrasts <- build_contrast(vector, "a", "b")
expect_length(unique(contrasts), 2L)
})
test_that("does not throw an error", {
vector <- c("a", "b", "a", "a")
expect_silent(build_contrast(vector, "a", "b"))
})
test_that("default method does not throw an error", {
vector <- c(1, 1, 2)
expect_silent(build_contrast(vector, "1", "2"))
})
|
/tests/testthat/test-build_contrast.R
|
no_license
|
cran/JSmediation
|
R
| false | false | 598 |
r
|
test_that("returns double", {
vector <- c("a", "b", "a", "a")
contrasts <- build_contrast(vector, "a", "b")
expect_type(contrasts, "double")
})
test_that("returns two codes only", {
vector <- c("a", "b", "a", "a")
contrasts <- build_contrast(vector, "a", "b")
expect_length(unique(contrasts), 2L)
})
test_that("does not throw an error", {
vector <- c("a", "b", "a", "a")
expect_silent(build_contrast(vector, "a", "b"))
})
test_that("default method does not throw an error", {
vector <- c(1, 1, 2)
expect_silent(build_contrast(vector, "1", "2"))
})
|
test_that("multiplication works", {
expect_equal(hello("J"), "Hello, J, this is the world!")
})
|
/tests/testthat/test-hello.R
|
permissive
|
AlexKorole/myRTestPrj
|
R
| false | false | 98 |
r
|
test_that("multiplication works", {
expect_equal(hello("J"), "Hello, J, this is the world!")
})
|
#' @title n_activity_instances
#'
#' @export n_activity_instances
n_activity_instances <- function(eventlog) {
stop_eventlog(eventlog)
colnames(eventlog)[colnames(eventlog) == activity_instance_id(eventlog)] <- "activity_instance_classifier"
return(length(unique(eventlog$activity_instance_classifier)))
}
|
/R/n_activity_instances.R
|
no_license
|
smyth7/edeaR
|
R
| false | false | 310 |
r
|
#' @title n_activity_instances
#'
#' @export n_activity_instances
n_activity_instances <- function(eventlog) {
stop_eventlog(eventlog)
colnames(eventlog)[colnames(eventlog) == activity_instance_id(eventlog)] <- "activity_instance_classifier"
return(length(unique(eventlog$activity_instance_classifier)))
}
|
# Read file with bitcoin and google trends data and perform basic analysis
#
# Date: 20/09/2017
source('..//Programas/p0_setup_v2.R') #.. setup, basic functions, etc...
today = as.character(Sys.Date()) # to be used to name output data files
# btc_gtrends =read.xlsx("bitcoin_GT_consolidated_until_20170830.xlsx", sheetIndex = 1)
#btc_gtrends =read.xlsx("bitcoin_GT_consolidated_until_20170906.xlsx", sheetIndex = 1)
#btc_gtrends =read.xlsx("bitcoin_GT_consolidated_until_20170913.xlsx", sheetIndex = 1)
btc_gtrends =read.xlsx("bitcoin_GT_consolidated_until_20170920.xlsx", sheetIndex = 1)
View(btc_gtrends)
str(btc_gtrends)
colnames(btc_gtrends) <- c("date", "BTC_price", "GTrends")
btc_gtrends$date = as.POSIXlt(btc_gtrends$date, origin = "1970-01-01")
n = nrow(btc_gtrends)
# dates column is usually messed up, so reconstruct it
a = as.POSIXct(btc_gtrends$date[1])
b = round(as.POSIXct(btc_gtrends$date[n]), units = "hours")
btc_gtrends$date = seq(a, b, by="hour")
rm(a,b)
# delete empty column (junk) if it appears
btc_gtrends = btc_gtrends[,-4]
View(btc_gtrends)
# Convert series to xts
btc_gtrends$BTC_price = as.xts(btc_gtrends$BTC_price, order.by = btc_gtrends$date)
btc_gtrends$GTrends = as.xts(btc_gtrends$GTrends, order.by = btc_gtrends$date)
data.to.plot <- cbind(btc_gtrends$BTC_price, btc_gtrends$GTrends)
colnames(data.to.plot) <- c("BTC_price", "GTrends")
fig= autoplot(as.xts(data.to.plot), facets = NULL) +
geom_line(cex = 1.05) + geom_point() +
ggtitle("Bitcoin Price and Google Trends\n") +
scale_y_continuous(sec.axis = ~./50) +
theme(legend.position="none") #+ guides(fill=FALSE) #scale_fill_discrete(guide=FALSE)
print(fig)
# Create function to normalize data to 0-1 range
normalize01 <- function(x) {
# normalizes series to 0-1 range
score = (x - min(x, na.rm = T))/(max(x, na.rm = T) - min(x, na.rm = T))
return(score)
}
#=============================================================================
# Graphs - BTC price and GTrends data
# I had to change the axis AND rescale the GTrends data so both series would
# fit nicely in the same graph
# Change accordingly for other currencies
#=============================================================================
fig = ggplot() + xlab('date') + ylab('values') +
ggtitle("Bitcoin Price and Google Trends\n (both scaled to 0-100)") +
geom_line(data = btc_gtrends, aes(x = date, y = 100*normalize01(BTC_price), col = "BTC price"), color = "red", cex = 1.05) +
geom_line(data = btc_gtrends, aes(x = date, y = GTrends, col = "Google Trends"), color = "steelblue", cex = 1.05) +
#scale_y_continuous(sec.axis = ~./50) +
#ylim = c(0.0, max(btc_gtrends$BTC_price)) +
theme(legend.position="bottom")
print(fig)
# Scatter plot
fig = ggplot(data = btc_gtrends, aes(x = as.vector(GTrends),
y = as.vector(BTC_price))) +
geom_point(colour = "steelblue", size = 3, shape = 15) + #shape = 16 are filled circles
ggtitle("Bitcoin Price and Google Trends\n")
print(fig)
#==============================================================================
# Cross correlations of series (in the original scale)
# ============================================================================
lag_max = as.integer(readline(prompt = "****** MAXIMUM LAG ?? ***** "))
ccf_BTC_GT = stats::ccf(as.vector(btc_gtrends$BTC_price),
as.vector(btc_gtrends$GTrends), lag.max = lag_max, na.action = na.pass, plot = FALSE)
# maximum cross correlation
max(ccf_BTC_GT$acf)
# show the index position where the maximum ccf occurs
index_pos_max_ccf=which(ccf_BTC_GT$acf == max(ccf_BTC_GT$acf))
index_pos_max_ccf
# the corresponding lag is
lag_max_ccf = index_pos_max_ccf - (lag_max + 1)
#which(ccf_BTC_GT$acf == max(ccf_BTC_GT$acf))
lag_max_ccf
# plot ccf
fig = plot(ccf_BTC_GT, main = "Cross Correlations - BTC and GT - original scale")
print(fig)
# Creates bitcoin LOG-return series
btc_gtrends$return = log(btc_gtrends$BTC_price) - log(quantmod::Lag(btc_gtrends$BTC_price, k = 1))
#quantmod::dailyReturn(btc_gtrends$BTC_price, type = "log")
# Creates differenced google trends series
btc_gtrends$dif_GT = diff(btc_gtrends$GTrends,lag = 1, differences = 1, na.pad = TRUE)
# creates lagged DIFFERENCED google trends
btc_gtrends$dif_GT_lag1 = quantmod::Lag(btc_gtrends$dif_GT, k = 1)
btc_gtrends$dif_GT_lag2 = quantmod::Lag(btc_gtrends$dif_GT_lag1, k = 1)
btc_gtrends$dif_GT_lag3 = quantmod::Lag(btc_gtrends$dif_GT_lag2, k = 1)
btc_gtrends$dif_GT_lag4 = quantmod::Lag(btc_gtrends$dif_GT_lag3, k = 1)
btc_gtrends$dif_GT_lag5 = quantmod::Lag(btc_gtrends$dif_GT_lag4, k = 1)
btc_gtrends$dif_GT_lag6 = quantmod::Lag(btc_gtrends$dif_GT_lag5, k = 1)
btc_gtrends$dif_GT_lag7 = quantmod::Lag(btc_gtrends$dif_GT_lag6, k = 1)
btc_gtrends$dif_GT_lag8 = quantmod::Lag(btc_gtrends$dif_GT_lag7, k = 1)
btc_gtrends$dif_GT_lag9 = quantmod::Lag(btc_gtrends$dif_GT_lag8, k = 1)
btc_gtrends$dif_GT_lag10 = quantmod::Lag(btc_gtrends$dif_GT_lag9, k = 1)
btc_gtrends$dif_GT_lag11 = quantmod::Lag(btc_gtrends$dif_GT_lag10, k = 1)
btc_gtrends$dif_GT_lag12 = quantmod::Lag(btc_gtrends$dif_GT_lag11, k = 1)
#==============================================================================
# Cross correlation - bitcoin RETURNS and GT differenced
#==============================================================================
ccf_BTC_ret_GT_dif = stats::ccf(as.vector(btc_gtrends$return),
as.vector(btc_gtrends$dif_GT), lag.max = lag_max, na.action = na.pass, plot = FALSE)
# maximum cross correlation in ABSOLUTE value
max_abs_ccf=max(abs(ccf_BTC_ret_GT_dif$acf))
max_abs_ccf
# show the index position where the maximum ccf occurs
index_pos_max_ccf_ret = which(ccf_BTC_ret_GT_dif$acf == max((ccf_BTC_ret_GT_dif$acf)))
index_pos_max_ccf_ret
# the corresponding lag is
lag_max_ccf_ret = index_pos_max_ccf_ret - (lag_max + 1)
lag_max_ccf_ret
# plot ccf
fig = plot(ccf_BTC_ret_GT_dif, main = "Cross Correlations -\n Return BTC and Differenced GT")
print(fig)
print(ccf_BTC_ret_GT_dif)
# creates lagged google trends
# btc_gtrends$GT_lag1 = quantmod::Lag(btc_gtrends$GTrends, k = 1)
# btc_gtrends$GT_lag2 = quantmod::Lag(btc_gtrends$GT_lag1, k = 1)
# btc_gtrends$GT_lag3 = quantmod::Lag(btc_gtrends$GT_lag2, k = 1)
# btc_gtrends$GT_lag4 = quantmod::Lag(btc_gtrends$GT_lag3, k = 1)
# btc_gtrends$GT_lag5 = quantmod::Lag(btc_gtrends$GT_lag4, k = 1)
# btc_gtrends$GT_lag6 = quantmod::Lag(btc_gtrends$GT_lag5, k = 1)
# btc_gtrends$GT_lag7 = quantmod::Lag(btc_gtrends$GT_lag6, k = 1)
# btc_gtrends$GT_lag8 = quantmod::Lag(btc_gtrends$GT_lag7, k = 1)
# btc_gtrends$GT_lag9 = quantmod::Lag(btc_gtrends$GT_lag8, k = 1)
# btc_gtrends$GT_lag10 = quantmod::Lag(btc_gtrends$GT_lag9, k = 1)
# btc_gtrends$GT_lag11 = quantmod::Lag(btc_gtrends$GT_lag10, k = 1)
# btc_gtrends$GT_lag12 = quantmod::Lag(btc_gtrends$GT_lag11, k = 1)
# PerformanceAnalytics::charts.TimeSeries(data.to.plot)
# library(ggplot2)
# ggCcf(as.vector(btc_gtrends$BTC_price),
# as.vector(btc_gtrends$GTrends), lag.max = lag_max,
# type = c("correlation"), plot = TRUE)
#
# ggCcf(ccf_BTC_GT)
# ============================================================================
# Create image file
# ============================================================================
# Note: "today" defined in the setup file (p0_setup_v2.r)
file_name = paste0("bitcoin_google_trends_analysis_",today,".Rdata")
file_name
save.image(file_name)
|
/Bitcoin_Google_Trends_20170831.R
|
no_license
|
barrosm/Cryptocurrencies
|
R
| false | false | 7,624 |
r
|
# Read file with bitcoin and google trends data and perform basic analysis
#
# Date: 20/09/2017
source('..//Programas/p0_setup_v2.R') #.. setup, basic functions, etc...
today = as.character(Sys.Date()) # to be used to name output data files
# btc_gtrends =read.xlsx("bitcoin_GT_consolidated_until_20170830.xlsx", sheetIndex = 1)
#btc_gtrends =read.xlsx("bitcoin_GT_consolidated_until_20170906.xlsx", sheetIndex = 1)
#btc_gtrends =read.xlsx("bitcoin_GT_consolidated_until_20170913.xlsx", sheetIndex = 1)
btc_gtrends =read.xlsx("bitcoin_GT_consolidated_until_20170920.xlsx", sheetIndex = 1)
View(btc_gtrends)
str(btc_gtrends)
colnames(btc_gtrends) <- c("date", "BTC_price", "GTrends")
btc_gtrends$date = as.POSIXlt(btc_gtrends$date, origin = "1970-01-01")
n = nrow(btc_gtrends)
# dates column is usually messed up, so reconstruct it
a = as.POSIXct(btc_gtrends$date[1])
b = round(as.POSIXct(btc_gtrends$date[n]), units = "hours")
btc_gtrends$date = seq(a, b, by="hour")
rm(a,b)
# delete empty column (junk) if it appears
btc_gtrends = btc_gtrends[,-4]
View(btc_gtrends)
# Convert series to xts
btc_gtrends$BTC_price = as.xts(btc_gtrends$BTC_price, order.by = btc_gtrends$date)
btc_gtrends$GTrends = as.xts(btc_gtrends$GTrends, order.by = btc_gtrends$date)
data.to.plot <- cbind(btc_gtrends$BTC_price, btc_gtrends$GTrends)
colnames(data.to.plot) <- c("BTC_price", "GTrends")
fig= autoplot(as.xts(data.to.plot), facets = NULL) +
geom_line(cex = 1.05) + geom_point() +
ggtitle("Bitcoin Price and Google Trends\n") +
scale_y_continuous(sec.axis = ~./50) +
theme(legend.position="none") #+ guides(fill=FALSE) #scale_fill_discrete(guide=FALSE)
print(fig)
# Create function to normalize data to 0-1 range
normalize01 <- function(x) {
# normalizes series to 0-1 range
score = (x - min(x, na.rm = T))/(max(x, na.rm = T) - min(x, na.rm = T))
return(score)
}
#=============================================================================
# Graphs - BTC price and GTrends data
# I had to change the axis AND rescale the GTrends data so both series would
# fit nicely in the same graph
# Change accordingly for other currencies
#=============================================================================
fig = ggplot() + xlab('date') + ylab('values') +
ggtitle("Bitcoin Price and Google Trends\n (both scaled to 0-100)") +
geom_line(data = btc_gtrends, aes(x = date, y = 100*normalize01(BTC_price), col = "BTC price"), color = "red", cex = 1.05) +
geom_line(data = btc_gtrends, aes(x = date, y = GTrends, col = "Google Trends"), color = "steelblue", cex = 1.05) +
#scale_y_continuous(sec.axis = ~./50) +
#ylim = c(0.0, max(btc_gtrends$BTC_price)) +
theme(legend.position="bottom")
print(fig)
# Scatter plot
fig = ggplot(data = btc_gtrends, aes(x = as.vector(GTrends),
y = as.vector(BTC_price))) +
geom_point(colour = "steelblue", size = 3, shape = 15) + #shape = 16 are filled circles
ggtitle("Bitcoin Price and Google Trends\n")
print(fig)
#==============================================================================
# Cross correlations of series (in the original scale)
# ============================================================================
lag_max = as.integer(readline(prompt = "****** MAXIMUM LAG ?? ***** "))
ccf_BTC_GT = stats::ccf(as.vector(btc_gtrends$BTC_price),
as.vector(btc_gtrends$GTrends), lag.max = lag_max, na.action = na.pass, plot = FALSE)
# maximum cross correlation
max(ccf_BTC_GT$acf)
# show the index position where the maximum ccf occurs
index_pos_max_ccf=which(ccf_BTC_GT$acf == max(ccf_BTC_GT$acf))
index_pos_max_ccf
# the corresponding lag is
lag_max_ccf = index_pos_max_ccf - (lag_max + 1)
#which(ccf_BTC_GT$acf == max(ccf_BTC_GT$acf))
lag_max_ccf
# plot ccf
fig = plot(ccf_BTC_GT, main = "Cross Correlations - BTC and GT - original scale")
print(fig)
# Creates bitcoin LOG-return series
btc_gtrends$return = log(btc_gtrends$BTC_price) - log(quantmod::Lag(btc_gtrends$BTC_price, k = 1))
#quantmod::dailyReturn(btc_gtrends$BTC_price, type = "log")
# Creates differenced google trends series
btc_gtrends$dif_GT = diff(btc_gtrends$GTrends,lag = 1, differences = 1, na.pad = TRUE)
# creates lagged DIFFERENCED google trends
btc_gtrends$dif_GT_lag1 = quantmod::Lag(btc_gtrends$dif_GT, k = 1)
btc_gtrends$dif_GT_lag2 = quantmod::Lag(btc_gtrends$dif_GT_lag1, k = 1)
btc_gtrends$dif_GT_lag3 = quantmod::Lag(btc_gtrends$dif_GT_lag2, k = 1)
btc_gtrends$dif_GT_lag4 = quantmod::Lag(btc_gtrends$dif_GT_lag3, k = 1)
btc_gtrends$dif_GT_lag5 = quantmod::Lag(btc_gtrends$dif_GT_lag4, k = 1)
btc_gtrends$dif_GT_lag6 = quantmod::Lag(btc_gtrends$dif_GT_lag5, k = 1)
btc_gtrends$dif_GT_lag7 = quantmod::Lag(btc_gtrends$dif_GT_lag6, k = 1)
btc_gtrends$dif_GT_lag8 = quantmod::Lag(btc_gtrends$dif_GT_lag7, k = 1)
btc_gtrends$dif_GT_lag9 = quantmod::Lag(btc_gtrends$dif_GT_lag8, k = 1)
btc_gtrends$dif_GT_lag10 = quantmod::Lag(btc_gtrends$dif_GT_lag9, k = 1)
btc_gtrends$dif_GT_lag11 = quantmod::Lag(btc_gtrends$dif_GT_lag10, k = 1)
btc_gtrends$dif_GT_lag12 = quantmod::Lag(btc_gtrends$dif_GT_lag11, k = 1)
#==============================================================================
# Cross correlation - bitcoin RETURNS and GT differenced
#==============================================================================
ccf_BTC_ret_GT_dif = stats::ccf(as.vector(btc_gtrends$return),
as.vector(btc_gtrends$dif_GT), lag.max = lag_max, na.action = na.pass, plot = FALSE)
# maximum cross correlation in ABSOLUTE value
max_abs_ccf=max(abs(ccf_BTC_ret_GT_dif$acf))
max_abs_ccf
# show the index position where the maximum ccf occurs
index_pos_max_ccf_ret = which(ccf_BTC_ret_GT_dif$acf == max((ccf_BTC_ret_GT_dif$acf)))
index_pos_max_ccf_ret
# the corresponding lag is
lag_max_ccf_ret = index_pos_max_ccf_ret - (lag_max + 1)
lag_max_ccf_ret
# plot ccf
fig = plot(ccf_BTC_ret_GT_dif, main = "Cross Correlations -\n Return BTC and Differenced GT")
print(fig)
print(ccf_BTC_ret_GT_dif)
# creates lagged google trends
# btc_gtrends$GT_lag1 = quantmod::Lag(btc_gtrends$GTrends, k = 1)
# btc_gtrends$GT_lag2 = quantmod::Lag(btc_gtrends$GT_lag1, k = 1)
# btc_gtrends$GT_lag3 = quantmod::Lag(btc_gtrends$GT_lag2, k = 1)
# btc_gtrends$GT_lag4 = quantmod::Lag(btc_gtrends$GT_lag3, k = 1)
# btc_gtrends$GT_lag5 = quantmod::Lag(btc_gtrends$GT_lag4, k = 1)
# btc_gtrends$GT_lag6 = quantmod::Lag(btc_gtrends$GT_lag5, k = 1)
# btc_gtrends$GT_lag7 = quantmod::Lag(btc_gtrends$GT_lag6, k = 1)
# btc_gtrends$GT_lag8 = quantmod::Lag(btc_gtrends$GT_lag7, k = 1)
# btc_gtrends$GT_lag9 = quantmod::Lag(btc_gtrends$GT_lag8, k = 1)
# btc_gtrends$GT_lag10 = quantmod::Lag(btc_gtrends$GT_lag9, k = 1)
# btc_gtrends$GT_lag11 = quantmod::Lag(btc_gtrends$GT_lag10, k = 1)
# btc_gtrends$GT_lag12 = quantmod::Lag(btc_gtrends$GT_lag11, k = 1)
# PerformanceAnalytics::charts.TimeSeries(data.to.plot)
# library(ggplot2)
# ggCcf(as.vector(btc_gtrends$BTC_price),
# as.vector(btc_gtrends$GTrends), lag.max = lag_max,
# type = c("correlation"), plot = TRUE)
#
# ggCcf(ccf_BTC_GT)
# ============================================================================
# Create image file
# ============================================================================
# Note: "today" defined in the setup file (p0_setup_v2.r)
file_name = paste0("bitcoin_google_trends_analysis_",today,".Rdata")
file_name
save.image(file_name)
|
library(dplyr)
library(plotly)
linely <- function(data, x, mode = 'lines', lcol = 'blue', lwidth = 1, ltype = 'plain',
title = NULL, p_bgcol = NULL, plot_bgcol = NULL,
title_family = 'Arial', title_size = 12, title_color = 'black',
axis_modify = FALSE, x_min, x_max, y_min, y_max,
x_title = NULL, x_showline = FALSE, x_showgrid = TRUE,
x_gridcol = NULL, x_showticklabels = TRUE,
x_lcol = NULL, x_lwidth = NULL, x_zline = FALSE,
x_autotick = TRUE, x_ticks = TRUE, x_tickcol = 'black',
x_ticklen = NULL, x_tickw = NULL, x_ticfont = 'Arial',
x_tickfsize = 10, x_tickfcol = 'black', y_title = NULL,
y_showline = FALSE, y_showgrid = TRUE,
y_gridcol = NULL, y_showticklabels = TRUE,
y_lcol = NULL, y_lwidth = NULL, y_zline = FALSE,
y_autotick = TRUE, y_ticks = TRUE, y_tickcol = 'black',
y_ticklen = NULL, y_tickw = NULL, y_ticfont = 'Arial',
y_tickfsize = 10, y_tickfcol = 'black',
ax_family = 'Arial', ax_size = 12, ax_color = 'black',
add_txt = FALSE, t_x, t_y, t_text, t_showarrow = FALSE,
t_font = 'Arial', t_size = 10, t_col = 'blue') {
yax <- data %>% select_(x) %>% unlist()
xax <- yax %>% length() %>% seq_len()
p <- plot_ly(data = data,
type = "scatter",
mode = mode,
x = xax,
y = yax,
line = list(
color = lcol,
width = lwidth,
dash = ltype
))
title_font <- list(
family = title_family,
size = title_size,
color = title_color
)
axis_font <- list(
family = ax_family,
size = ax_size,
color = ax_color
)
xaxis <- list(title = x_title,
titlefont = axis_font,
showline = x_showline,
showgrid = x_showgrid,
gridcolor = x_gridcol,
showticklabels = x_showticklabels,
linecolor = x_lcol,
linewidth = x_lwidth,
zeroline = x_zline,
autotick = x_autotick,
ticks = x_ticks,
tickcolor = x_tickcol,
tickwidth = x_tickw,
ticklen = x_ticklen,
tickfont = list(family = x_ticfont,
size = x_tickfsize,
color = x_tickfcol))
yaxis <- list(title = y_title,
titlefont = axis_font,
showline = y_showline,
showgrid = y_showgrid,
gridcolor = y_gridcol,
showticklabels = y_showticklabels,
linecolor = y_lcol,
linewidth = y_lwidth,
zeroline = y_zline,
autotick = y_autotick,
ticks = y_ticks,
tickcolor = y_tickcol,
tickwidth = y_tickw,
ticklen = y_ticklen,
tickfont = list(family = y_ticfont,
size = y_tickfsize,
color = y_tickfcol))
p <- p %>%
layout(title = title,
font = title_font,
paper_bgcolor = p_bgcol,
plot_bgcolor = plot_bgcol,
xaxis = xaxis,
yaxis = yaxis)
if(add_txt) {
annote <- list(
x = t_x,
y = t_y,
text = t_text,
font = list(family = t_font,
size = t_size,
color = t_col),
showarrow = t_showarrow
)
p <- p %>%
layout(annotations = annote)
}
if(axis_modify) {
p <- p %>%
layout(
xaxis = list(
range = list(x_min, x_max)
),
yaxis = list(
range = list(y_min, y_max)
)
)
}
p
}
data1 <- c(7.2, 7.6, 6.8, 6.5, 7)
data2 <- c(6.8, 7.2, 7.8, 7, 6.2)
data <- data.frame(x = data1, y = data2)
p <- linely(data, 'x', mode = 'lines+markers', title = 'Line Chart',
x_title = 'Year', y_title = 'Growth', axis_modify = TRUE,
x_min = 0, x_max = 7, y_min = 4, y_max = 9)
p
|
/linely.R
|
no_license
|
aravindhebbali/plotly_xplorerr
|
R
| false | false | 4,341 |
r
|
library(dplyr)
library(plotly)
linely <- function(data, x, mode = 'lines', lcol = 'blue', lwidth = 1, ltype = 'plain',
title = NULL, p_bgcol = NULL, plot_bgcol = NULL,
title_family = 'Arial', title_size = 12, title_color = 'black',
axis_modify = FALSE, x_min, x_max, y_min, y_max,
x_title = NULL, x_showline = FALSE, x_showgrid = TRUE,
x_gridcol = NULL, x_showticklabels = TRUE,
x_lcol = NULL, x_lwidth = NULL, x_zline = FALSE,
x_autotick = TRUE, x_ticks = TRUE, x_tickcol = 'black',
x_ticklen = NULL, x_tickw = NULL, x_ticfont = 'Arial',
x_tickfsize = 10, x_tickfcol = 'black', y_title = NULL,
y_showline = FALSE, y_showgrid = TRUE,
y_gridcol = NULL, y_showticklabels = TRUE,
y_lcol = NULL, y_lwidth = NULL, y_zline = FALSE,
y_autotick = TRUE, y_ticks = TRUE, y_tickcol = 'black',
y_ticklen = NULL, y_tickw = NULL, y_ticfont = 'Arial',
y_tickfsize = 10, y_tickfcol = 'black',
ax_family = 'Arial', ax_size = 12, ax_color = 'black',
add_txt = FALSE, t_x, t_y, t_text, t_showarrow = FALSE,
t_font = 'Arial', t_size = 10, t_col = 'blue') {
yax <- data %>% select_(x) %>% unlist()
xax <- yax %>% length() %>% seq_len()
p <- plot_ly(data = data,
type = "scatter",
mode = mode,
x = xax,
y = yax,
line = list(
color = lcol,
width = lwidth,
dash = ltype
))
title_font <- list(
family = title_family,
size = title_size,
color = title_color
)
axis_font <- list(
family = ax_family,
size = ax_size,
color = ax_color
)
xaxis <- list(title = x_title,
titlefont = axis_font,
showline = x_showline,
showgrid = x_showgrid,
gridcolor = x_gridcol,
showticklabels = x_showticklabels,
linecolor = x_lcol,
linewidth = x_lwidth,
zeroline = x_zline,
autotick = x_autotick,
ticks = x_ticks,
tickcolor = x_tickcol,
tickwidth = x_tickw,
ticklen = x_ticklen,
tickfont = list(family = x_ticfont,
size = x_tickfsize,
color = x_tickfcol))
yaxis <- list(title = y_title,
titlefont = axis_font,
showline = y_showline,
showgrid = y_showgrid,
gridcolor = y_gridcol,
showticklabels = y_showticklabels,
linecolor = y_lcol,
linewidth = y_lwidth,
zeroline = y_zline,
autotick = y_autotick,
ticks = y_ticks,
tickcolor = y_tickcol,
tickwidth = y_tickw,
ticklen = y_ticklen,
tickfont = list(family = y_ticfont,
size = y_tickfsize,
color = y_tickfcol))
p <- p %>%
layout(title = title,
font = title_font,
paper_bgcolor = p_bgcol,
plot_bgcolor = plot_bgcol,
xaxis = xaxis,
yaxis = yaxis)
if(add_txt) {
annote <- list(
x = t_x,
y = t_y,
text = t_text,
font = list(family = t_font,
size = t_size,
color = t_col),
showarrow = t_showarrow
)
p <- p %>%
layout(annotations = annote)
}
if(axis_modify) {
p <- p %>%
layout(
xaxis = list(
range = list(x_min, x_max)
),
yaxis = list(
range = list(y_min, y_max)
)
)
}
p
}
data1 <- c(7.2, 7.6, 6.8, 6.5, 7)
data2 <- c(6.8, 7.2, 7.8, 7, 6.2)
data <- data.frame(x = data1, y = data2)
p <- linely(data, 'x', mode = 'lines+markers', title = 'Line Chart',
x_title = 'Year', y_title = 'Growth', axis_modify = TRUE,
x_min = 0, x_max = 7, y_min = 4, y_max = 9)
p
|
#' model_fit_check
#' Copyright (c) 2019. Kaleido Biosciences. All Rights Reserved
#'
#'This function prints graphs visually displaying the model fits from a randomly sampled set of variables of the users choosing.
#'A replicate from each unique condition specified is randomly sampled and the fit and extracted parameters that are easy to visualize are shown.
#' @param phgropro_output This is the output from phgropro. It contains tidy pH and OD600 data.
#' @param grouping_vars This contains the variables you would like to see the fit for a randomly sampled replicate of.
#'
#' @return prints a randomly sampled plot from each condition to the console as specified by grouping_vars.
#' @export
#' @examples
#'\dontrun{phgropro_output = phgrofit::phgropro_output("Filepath of biotek export.txt","filepath of metadata.csv,Plate_Type = 384)
#'model_fit_check(phgropro_output,grouping_vars = c("Community","Compound))}
#'#This would print graphs from a randomly sampled replicate of each combination of variables specified by grouping_vars
model_fit_check = function(phgropro_output,grouping_vars = "Sample.ID"){
#extracting the grouping vars in order to work with dplyr framework
cols_quo = dplyr::syms(grouping_vars)
kin_and_mod = dplyr::mutate(phgropro_output,Concat = paste(!!!cols_quo,sep =","))
#Looking at each distinct combination of Community, Compound, Compound Concentration, and Media present in the data set so that each can be plotted.
distinct_metadata = dplyr::distinct(kin_and_mod,Concat) %>%
dplyr::pull(Concat)
randomized_unique = vector()
for(i in distinct_metadata){
distinct = dplyr::filter(kin_and_mod,Concat == i) %>%
dplyr::distinct(Sample.ID) %>%
dplyr::pull()
temp_randomized_unique = sample(distinct,1)
randomized_unique = c(randomized_unique,temp_randomized_unique)
}
#Filtering and looping
for(i in randomized_unique){
#if pH and od600
if("pH" %in% names(kin_and_mod)){
#getting rid of NAs just like we do in the actual modeling
input = dplyr::filter(kin_and_mod,Sample.ID == i)
p1 = graph_check(input)
p2 = ggpubr::annotate_figure(p1,paste0(input$Concat))
print(p2)
}else{
#else growth only
#getting rid of NAs just like we do in the actual modeling
input = dplyr::filter(kin_and_mod,Sample.ID == i) %>%
dplyr::filter(!is.na(OD600))
p1 = graph_check(input)
p2 = ggpubr::annotate_figure(p1,paste0(input$Concat))
print(p2)
}
}
}
|
/R/model_fit_check.R
|
permissive
|
Kaleido-Biosciences/phgrofit
|
R
| false | false | 2,646 |
r
|
#' model_fit_check
#' Copyright (c) 2019. Kaleido Biosciences. All Rights Reserved
#'
#'This function prints graphs visually displaying the model fits from a randomly sampled set of variables of the users choosing.
#'A replicate from each unique condition specified is randomly sampled and the fit and extracted parameters that are easy to visualize are shown.
#' @param phgropro_output This is the output from phgropro. It contains tidy pH and OD600 data.
#' @param grouping_vars This contains the variables you would like to see the fit for a randomly sampled replicate of.
#'
#' @return prints a randomly sampled plot from each condition to the console as specified by grouping_vars.
#' @export
#' @examples
#'\dontrun{phgropro_output = phgrofit::phgropro_output("Filepath of biotek export.txt","filepath of metadata.csv,Plate_Type = 384)
#'model_fit_check(phgropro_output,grouping_vars = c("Community","Compound))}
#'#This would print graphs from a randomly sampled replicate of each combination of variables specified by grouping_vars
model_fit_check = function(phgropro_output,grouping_vars = "Sample.ID"){
#extracting the grouping vars in order to work with dplyr framework
cols_quo = dplyr::syms(grouping_vars)
kin_and_mod = dplyr::mutate(phgropro_output,Concat = paste(!!!cols_quo,sep =","))
#Looking at each distinct combination of Community, Compound, Compound Concentration, and Media present in the data set so that each can be plotted.
distinct_metadata = dplyr::distinct(kin_and_mod,Concat) %>%
dplyr::pull(Concat)
randomized_unique = vector()
for(i in distinct_metadata){
distinct = dplyr::filter(kin_and_mod,Concat == i) %>%
dplyr::distinct(Sample.ID) %>%
dplyr::pull()
temp_randomized_unique = sample(distinct,1)
randomized_unique = c(randomized_unique,temp_randomized_unique)
}
#Filtering and looping
for(i in randomized_unique){
#if pH and od600
if("pH" %in% names(kin_and_mod)){
#getting rid of NAs just like we do in the actual modeling
input = dplyr::filter(kin_and_mod,Sample.ID == i)
p1 = graph_check(input)
p2 = ggpubr::annotate_figure(p1,paste0(input$Concat))
print(p2)
}else{
#else growth only
#getting rid of NAs just like we do in the actual modeling
input = dplyr::filter(kin_and_mod,Sample.ID == i) %>%
dplyr::filter(!is.na(OD600))
p1 = graph_check(input)
p2 = ggpubr::annotate_figure(p1,paste0(input$Concat))
print(p2)
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add-to-simulation.R
\name{add_oref_to_list}
\alias{add_oref_to_list}
\title{Internal function to add OutputRef to a list of OutputRef objects}
\usage{
add_oref_to_list(oref, oref_list, sim_dir)
}
\arguments{
\item{oref}{OutputRef to add}
\item{oref_list}{list of OutputRef objects}
\item{sim_dir}{sim@dir}
}
\description{
Makes sure that OutputRef with this same index and method is not already in
list. Although not checked, it is assumed that this is only called on a list
of OutputRef objects all coming from same model.
}
\keyword{internal}
|
/man/add_oref_to_list.Rd
|
no_license
|
zdk123/simulator
|
R
| false | true | 626 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add-to-simulation.R
\name{add_oref_to_list}
\alias{add_oref_to_list}
\title{Internal function to add OutputRef to a list of OutputRef objects}
\usage{
add_oref_to_list(oref, oref_list, sim_dir)
}
\arguments{
\item{oref}{OutputRef to add}
\item{oref_list}{list of OutputRef objects}
\item{sim_dir}{sim@dir}
}
\description{
Makes sure that OutputRef with this same index and method is not already in
list. Although not checked, it is assumed that this is only called on a list
of OutputRef objects all coming from same model.
}
\keyword{internal}
|
install.packages("twitteR")
install.packages("RColorBrewer")
install.packages("ROAuth")
library(twitteR)
library(RColorBrewer)
library(ROAuth)
url_rqst<-"https://api.twitter.com/oauth/request_token"
url_acc<-"https://api.twitter.com/oauth/access_token"
url_auth<-"https://api.twitter.com/oauth/authorize"
API_key<-"pgQe8vyN3VNZXkoKYyuBg"
API_secret<-"iz0zf9x6mrSijhbz5wsNNAGrsboQ3pVOX4NFbEqgNX8"
Acc_token <-"47051754-6wuje0FeeS3xyVWGNJ8Kb3nkr9PA5HHS235gTSB10"
Acc_secret <-"KtkJqhLUr2sEfrWL7sIiuBpAmm76lpM9Q6RqMmY07t0"
#===============
setup_twitter_oauth(consumer_key=API_key, consumer_secret=API_secret, access_token=Acc_token, access_secret=Acc_secret)
sales_tweets<-searchTwitter("buy new car",n=500)
library(plyr)
sales_text<-laply(sales_tweets,function(t)t$getText())
str(sales_text)
head(sales_text,3)
pos.word=scan("positive-words.txt",what="character",comment.char=";")
neg.word=scan("negative-words.txt",what="character",comment.char=";")
pos.words<-c(pos.word,"ripper","speed")
neg.words<-c(neg.word,"small","narrow")
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
require(plyr)
require(stringr)
# we got a vector of sentences. plyr will handle a list
# or a vector as an "l" for us
# we want a simple array ("a") of scores back, so we use
# "l" + "a" + "ply" = "laply":
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences with R's regex-driven global substitute, gsub():
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# and convert to lower case:
sentence = tolower(sentence) # for english
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
head(sales_text,5)
Encoding(sales_text)[1:10]
sales_text<-sales_text[!Encoding(sales_text)=="UTF-8"]
head(sales_text,4)
sales_text[[10]]
sales_scores=score.sentiment(sales_text,pos.words,neg.words,.progress='text')
hist(sales_scores$score)
df <- do.call("rbind", lapply(sales_tweets, as.data.frame))
removeTwit <- function(x) {gsub("@[[:graph:]]*", "", x)}
df$ptext <- sapply(df$text, removeTwit)
removeURL <- function(x) { gsub("http://[[:graph:]]*", "", x)}
df$ptext <- sapply(df$ptext, removeURL)
*#build, corpus
myCorpus <- Corpus(VectorSource(df$ptext))
tmp1 <- tm_map(myCorpus, stemDocument, lazy = TRUE)
tmp2<-tm_map(tmp1,removePunctuation)
tmp3<-tm_map(tmp2,stripWhitespace)
tmp4 <-tm_map(tmp3,removeNumbers)
tmp5<-tm_map(tmp4, removeWords, stopwords("english"))
tdm <- TermDocumentMatrix(tmp5)
findFreqTerms(tdm, lowfreq=20)
findAssocs(tdm,'car',0.2)
dtm <- DocumentTermMatrix(tmp5)
inspect(dtm[1:10,100:105])
|
/source/twiterR.R
|
no_license
|
irichgreen/My_R_practice
|
R
| false | false | 3,502 |
r
|
install.packages("twitteR")
install.packages("RColorBrewer")
install.packages("ROAuth")
library(twitteR)
library(RColorBrewer)
library(ROAuth)
url_rqst<-"https://api.twitter.com/oauth/request_token"
url_acc<-"https://api.twitter.com/oauth/access_token"
url_auth<-"https://api.twitter.com/oauth/authorize"
API_key<-"pgQe8vyN3VNZXkoKYyuBg"
API_secret<-"iz0zf9x6mrSijhbz5wsNNAGrsboQ3pVOX4NFbEqgNX8"
Acc_token <-"47051754-6wuje0FeeS3xyVWGNJ8Kb3nkr9PA5HHS235gTSB10"
Acc_secret <-"KtkJqhLUr2sEfrWL7sIiuBpAmm76lpM9Q6RqMmY07t0"
#===============
setup_twitter_oauth(consumer_key=API_key, consumer_secret=API_secret, access_token=Acc_token, access_secret=Acc_secret)
sales_tweets<-searchTwitter("buy new car",n=500)
library(plyr)
sales_text<-laply(sales_tweets,function(t)t$getText())
str(sales_text)
head(sales_text,3)
pos.word=scan("positive-words.txt",what="character",comment.char=";")
neg.word=scan("negative-words.txt",what="character",comment.char=";")
pos.words<-c(pos.word,"ripper","speed")
neg.words<-c(neg.word,"small","narrow")
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
require(plyr)
require(stringr)
# we got a vector of sentences. plyr will handle a list
# or a vector as an "l" for us
# we want a simple array ("a") of scores back, so we use
# "l" + "a" + "ply" = "laply":
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences with R's regex-driven global substitute, gsub():
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# and convert to lower case:
sentence = tolower(sentence) # for english
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
head(sales_text,5)
Encoding(sales_text)[1:10]
sales_text<-sales_text[!Encoding(sales_text)=="UTF-8"]
head(sales_text,4)
sales_text[[10]]
sales_scores=score.sentiment(sales_text,pos.words,neg.words,.progress='text')
hist(sales_scores$score)
df <- do.call("rbind", lapply(sales_tweets, as.data.frame))
removeTwit <- function(x) {gsub("@[[:graph:]]*", "", x)}
df$ptext <- sapply(df$text, removeTwit)
removeURL <- function(x) { gsub("http://[[:graph:]]*", "", x)}
df$ptext <- sapply(df$ptext, removeURL)
*#build, corpus
myCorpus <- Corpus(VectorSource(df$ptext))
tmp1 <- tm_map(myCorpus, stemDocument, lazy = TRUE)
tmp2<-tm_map(tmp1,removePunctuation)
tmp3<-tm_map(tmp2,stripWhitespace)
tmp4 <-tm_map(tmp3,removeNumbers)
tmp5<-tm_map(tmp4, removeWords, stopwords("english"))
tdm <- TermDocumentMatrix(tmp5)
findFreqTerms(tdm, lowfreq=20)
findAssocs(tdm,'car',0.2)
dtm <- DocumentTermMatrix(tmp5)
inspect(dtm[1:10,100:105])
|
# Page No. 101
crime_rate=c(876,578,718,388,562,971,698,298,673,537,642,856,376,508,529,393,354,735,811,504,807,719,464,410,491,557,771,685,448,571,189,661,877,563,647,447,336,526,624,605,496,296,628,481,224,868,804,210,421,435,291,393,605,341,352,374,267,684,685,460,466,498,562,739,562,817,690,720,758,731,480,559,505,703,809,706,631,626,639,585,570,928,516,885,751,561,1020,592,814,843)
boxplot(crime_rate, horizontal = TRUE, axes = FALSE, staplewex = 1)
text(x=fivenum(crime_rate), labels =fivenum(crime_rate), y=1.25)
|
/An_Introduction_To_Statistical_Methods_And_Data_Analysis_by_R_Lyman_Ott_And_Michael_Longnecker/CH3/EX3.15/Ex3_15.r
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false | false | 525 |
r
|
# Page No. 101
crime_rate=c(876,578,718,388,562,971,698,298,673,537,642,856,376,508,529,393,354,735,811,504,807,719,464,410,491,557,771,685,448,571,189,661,877,563,647,447,336,526,624,605,496,296,628,481,224,868,804,210,421,435,291,393,605,341,352,374,267,684,685,460,466,498,562,739,562,817,690,720,758,731,480,559,505,703,809,706,631,626,639,585,570,928,516,885,751,561,1020,592,814,843)
boxplot(crime_rate, horizontal = TRUE, axes = FALSE, staplewex = 1)
text(x=fivenum(crime_rate), labels =fivenum(crime_rate), y=1.25)
|
##########################
### randomization test ###
##########################
#Description
# This fuction was written by Nick Sard in 2016. It is designed to take a two column
# data frame with reproductive success estimates (column 1) from two groups (column 2) and determine
# if the mean or variance are statistically significant based on a distrubution of n randomized comparisons.
randomization.test <- function(tmp,test="Two_Sided", paramater = "mean", group,n=1000){
#getting the groups
groups <- unique(tmp[,2])
if(length(groups)!=2){stop("You need two groups to perform this function")}
#defining groups
g1 <- group
g2 <- groups[groups!=group]
#getting the values and seperating them by group
vals <- tmp[,1]
n.all <- length(vals)
vals1 <- tmp[tmp[,2]==g1,1]
vals2 <- tmp[tmp[,2]==g2,1]
#getting counts of each group
n1 <- length(vals1)
n2 <- length(vals2)
#doing the stats based on means
if(paramater == "mean"){
#calculating means for both groups and getting the difference
mean1 <- mean(vals1)
mean2 <- mean(vals2)
real.diff <- mean1-mean2
#saving this information for output df
p1 <- mean1
p2 <- mean2
#doing the simulations
sim.diffs <- NULL
for(i in 1:n){
sim.vals <- sample(x = vals,replace = F,size = n.all)
sim.diff1 <- mean(sim.vals[1:n1])-mean(sim.vals[(n.all-n2+1):n.all])
sim.diffs <- c(sim.diffs,sim.diff1)
} # end permutation
} #end means if statement
#doing the stats based on variance
if(paramater == "variance"){
#calculating vars for both groups and getting the difference
var1 <- var(vals1)
var2 <- var(vals2)
real.diff <- var1-var2
#saving this information for output df
p1 <- var1
p2 <- var2
#doing the simulations
sim.diffs <- NULL
for(i in 1:n){
sim.vals <- sample(x = vals,replace = F,size = n.all)
sim.diff1 <- var(sim.vals[1:n1])-var(sim.vals[(n.all-n2+1):n.all])
sim.diffs <- c(sim.diffs,sim.diff1)
} # end permutation
} #end vars if statement
#calculating p-value by comparing real.diff to simulated diffs
if(test == "Two_Sided"){p.val <- length(sim.diffs[abs(sim.diffs)>abs(real.diff)])/n}
if(test == "Less_Than"){p.val <- length(sim.diffs[sim.diffs<real.diff])/n}
if(test == "Greater_Than"){p.val <- length(sim.diffs[sim.diffs>real.diff])/n}
#putting all information together in a nice output format
output <- data.frame(Group1 = g1, Group2 = g2,
Par1 = p1, Par2 = p2,
Diff = real.diff, Pval = p.val,stringsAsFactors = F)
return(output)
} #end of permutation function
|
/R/randomization.test.R
|
no_license
|
nicksard/fitr
|
R
| false | false | 2,695 |
r
|
##########################
### randomization test ###
##########################
#Description
# This fuction was written by Nick Sard in 2016. It is designed to take a two column
# data frame with reproductive success estimates (column 1) from two groups (column 2) and determine
# if the mean or variance are statistically significant based on a distrubution of n randomized comparisons.
randomization.test <- function(tmp,test="Two_Sided", paramater = "mean", group,n=1000){
#getting the groups
groups <- unique(tmp[,2])
if(length(groups)!=2){stop("You need two groups to perform this function")}
#defining groups
g1 <- group
g2 <- groups[groups!=group]
#getting the values and seperating them by group
vals <- tmp[,1]
n.all <- length(vals)
vals1 <- tmp[tmp[,2]==g1,1]
vals2 <- tmp[tmp[,2]==g2,1]
#getting counts of each group
n1 <- length(vals1)
n2 <- length(vals2)
#doing the stats based on means
if(paramater == "mean"){
#calculating means for both groups and getting the difference
mean1 <- mean(vals1)
mean2 <- mean(vals2)
real.diff <- mean1-mean2
#saving this information for output df
p1 <- mean1
p2 <- mean2
#doing the simulations
sim.diffs <- NULL
for(i in 1:n){
sim.vals <- sample(x = vals,replace = F,size = n.all)
sim.diff1 <- mean(sim.vals[1:n1])-mean(sim.vals[(n.all-n2+1):n.all])
sim.diffs <- c(sim.diffs,sim.diff1)
} # end permutation
} #end means if statement
#doing the stats based on variance
if(paramater == "variance"){
#calculating vars for both groups and getting the difference
var1 <- var(vals1)
var2 <- var(vals2)
real.diff <- var1-var2
#saving this information for output df
p1 <- var1
p2 <- var2
#doing the simulations
sim.diffs <- NULL
for(i in 1:n){
sim.vals <- sample(x = vals,replace = F,size = n.all)
sim.diff1 <- var(sim.vals[1:n1])-var(sim.vals[(n.all-n2+1):n.all])
sim.diffs <- c(sim.diffs,sim.diff1)
} # end permutation
} #end vars if statement
#calculating p-value by comparing real.diff to simulated diffs
if(test == "Two_Sided"){p.val <- length(sim.diffs[abs(sim.diffs)>abs(real.diff)])/n}
if(test == "Less_Than"){p.val <- length(sim.diffs[sim.diffs<real.diff])/n}
if(test == "Greater_Than"){p.val <- length(sim.diffs[sim.diffs>real.diff])/n}
#putting all information together in a nice output format
output <- data.frame(Group1 = g1, Group2 = g2,
Par1 = p1, Par2 = p2,
Diff = real.diff, Pval = p.val,stringsAsFactors = F)
return(output)
} #end of permutation function
|
#' Example data for the \pkg{rsurface} package
#'
#' This example uses experimental data published in Czitrom and Spagon (1997),
#' \emph{Statistical Case Studies for Industrial Process Improvement} that
#' describes a semiconductor wafer processing experiment. A goal of this experiment
#' was to fit response surface models to the deposition layer stress
#' as a function of two particular controllable factors of the chemical vapor deposition
#' (CVD) reactor process. These factors were pressure (measured in torr)
#' and the ratio of the gaseous reactants hydrogen gas and tungsten(VI) fluoride.
#'
#' @format A data frame with three columns and ten rows of values
#' \describe{
#' \item{Factor1}{Pressure measured in torr}
#' \item{Factor2}{The ratio of gaseous reactants.
#' The smallest and greatest values for the ratios of hydrogen gas to tungsten(VI) fluoride were chosen to be 2 and 10.}
#' \item{Response}{Deposition layer stress}
#' }
#'
#' @references
#' Czitrom, V., and Spagon, P. D., (1997), Statistical Case Studies for Industrial Process Improvement, Philadelphia, PA, ASA-SIAM Series on Statistics and Applied Probability.
"ExampleData"
|
/R/ExampleData.R
|
no_license
|
cran/rsurface
|
R
| false | false | 1,217 |
r
|
#' Example data for the \pkg{rsurface} package
#'
#' This example uses experimental data published in Czitrom and Spagon (1997),
#' \emph{Statistical Case Studies for Industrial Process Improvement} that
#' describes a semiconductor wafer processing experiment. A goal of this experiment
#' was to fit response surface models to the deposition layer stress
#' as a function of two particular controllable factors of the chemical vapor deposition
#' (CVD) reactor process. These factors were pressure (measured in torr)
#' and the ratio of the gaseous reactants hydrogen gas and tungsten(VI) fluoride.
#'
#' @format A data frame with three columns and ten rows of values
#' \describe{
#' \item{Factor1}{Pressure measured in torr}
#' \item{Factor2}{The ratio of gaseous reactants.
#' The smallest and greatest values for the ratios of hydrogen gas to tungsten(VI) fluoride were chosen to be 2 and 10.}
#' \item{Response}{Deposition layer stress}
#' }
#'
#' @references
#' Czitrom, V., and Spagon, P. D., (1997), Statistical Case Studies for Industrial Process Improvement, Philadelphia, PA, ASA-SIAM Series on Statistics and Applied Probability.
"ExampleData"
|
#unusued?
addTargetFunctions = function( ... ){
arguments = rlang::enquos(...)
lapply( arguments, function(qq){
ll = getFunctionPropertiesFromQuosure(qq )
storage_hash_table[[ ll$keyname ]] = ll
})
storage_hash_table
}
#unused?
getFunctionPropertiesFromQuosure = function(qq ){
##make sure you pass in a quosure
if( rlang::is_quosure(qq)){
keyname = NULL
substituted = rlang::get_expr(qq)
if( is.call(substituted )){ #handle function calls
function_name = substituted[[1]]
if (function_name == "::"){ #handle specific cases where the called function is `::`. Like when we call addTargetFunctions( dplyr::summarise )
substituted = pryr::standardise_call( substituted )
keyname = paste0( substituted$pkg, "::", substituted$name)
}
}
ref = rlang::eval_tidy( qq )
environment = environment( rlang::eval_tidy( qq ))
environmentName = environmentName( environment )
if ( is.null(keyname)){
keyname = paste0( environmentName, '::' , deparse( rlang::quo_get_expr ( qq ) ) )
}
result = list(
ref = ref,
environment = environment,
environmentName = environmentName,
keyname = keyname
)
} else{
stop("This function expects a quosure")
}
result
}
#'
#'
#' Add documentation for a function
#'
#' Enter a link that you think documents this function well. IT will show up when you do flashcards
#' ( not supported yet! )
#'
#' @param targetFunction the target function that documentation is being added for
#' @param urls the urls that are added as documentation
#' @param call_counts_hash_table the call count hash table ( or defaults to the remembr one)
#'
#' @export
addDocumentationURL = function( targetFunction, urls , call_counts_hash_table = NULL ){
if ( is.null(call_counts_hash_table)){
call_counts_hash_table = getCallCountsHashTable()
}
#addTargetFunction( targetFunction ) #not sure how to do this part!
if ( is.character(targetFunction)){
keyname = targetFunction
} else {
qq = rlang::enquo( targetFunction )
props = getFunctionPropertiesFromQuosure( qq )
keyname = props$keyname
}
if (is.null(urls) | length(urls) == 0){
return()
}
present_card = call_counts_hash_table[[keyname]]
if ( is.null( present_card)){
stop(paste0( "card ",keyname, " does not exist, could not add documentation "))
}
present_card$urls = unique( c( present_card$urls, urls ) )
call_counts_hash_table[[keyname]] = present_card
call_counts_hash_table
}
#' Get documentation for a function
#'
#' Pass in a function and get any urls associated with it
#'
#' @param targetFunction a function reference that you want to get documentation urls for
#' @param call_counts_hash_table the call counts hash table ( uses remebmr one by default )
#'
#' @export
getDocumentationURLs = function( targetFunction , call_counts_hash_table = NULL ){
if ( is.null(call_counts_hash_table)){
call_counts_hash_table = getCallCountsHashTable()
}
if ( is.character(targetFunction)){
keyname = targetFunction
} else{
qq = rlang::enquo( targetFunction )
props = getFunctionPropertiesFromQuosure( qq )
keyname = props$keyname
}
if ( rlang::env_has(call_counts_hash_table, keyname)){
call_counts_hash_table[[keyname]]$urls
} else {
return( character() )
}
}
#showDocumentationUrls = function( storage_env ){
# ls ( storage_env )
#}
|
/R/documentation_url.R
|
no_license
|
djacobs7/remembr
|
R
| false | false | 3,449 |
r
|
#unusued?
addTargetFunctions = function( ... ){
arguments = rlang::enquos(...)
lapply( arguments, function(qq){
ll = getFunctionPropertiesFromQuosure(qq )
storage_hash_table[[ ll$keyname ]] = ll
})
storage_hash_table
}
#unused?
getFunctionPropertiesFromQuosure = function(qq ){
##make sure you pass in a quosure
if( rlang::is_quosure(qq)){
keyname = NULL
substituted = rlang::get_expr(qq)
if( is.call(substituted )){ #handle function calls
function_name = substituted[[1]]
if (function_name == "::"){ #handle specific cases where the called function is `::`. Like when we call addTargetFunctions( dplyr::summarise )
substituted = pryr::standardise_call( substituted )
keyname = paste0( substituted$pkg, "::", substituted$name)
}
}
ref = rlang::eval_tidy( qq )
environment = environment( rlang::eval_tidy( qq ))
environmentName = environmentName( environment )
if ( is.null(keyname)){
keyname = paste0( environmentName, '::' , deparse( rlang::quo_get_expr ( qq ) ) )
}
result = list(
ref = ref,
environment = environment,
environmentName = environmentName,
keyname = keyname
)
} else{
stop("This function expects a quosure")
}
result
}
#'
#'
#' Add documentation for a function
#'
#' Enter a link that you think documents this function well. IT will show up when you do flashcards
#' ( not supported yet! )
#'
#' @param targetFunction the target function that documentation is being added for
#' @param urls the urls that are added as documentation
#' @param call_counts_hash_table the call count hash table ( or defaults to the remembr one)
#'
#' @export
addDocumentationURL = function( targetFunction, urls , call_counts_hash_table = NULL ){
if ( is.null(call_counts_hash_table)){
call_counts_hash_table = getCallCountsHashTable()
}
#addTargetFunction( targetFunction ) #not sure how to do this part!
if ( is.character(targetFunction)){
keyname = targetFunction
} else {
qq = rlang::enquo( targetFunction )
props = getFunctionPropertiesFromQuosure( qq )
keyname = props$keyname
}
if (is.null(urls) | length(urls) == 0){
return()
}
present_card = call_counts_hash_table[[keyname]]
if ( is.null( present_card)){
stop(paste0( "card ",keyname, " does not exist, could not add documentation "))
}
present_card$urls = unique( c( present_card$urls, urls ) )
call_counts_hash_table[[keyname]] = present_card
call_counts_hash_table
}
#' Get documentation for a function
#'
#' Pass in a function and get any urls associated with it
#'
#' @param targetFunction a function reference that you want to get documentation urls for
#' @param call_counts_hash_table the call counts hash table ( uses remebmr one by default )
#'
#' @export
getDocumentationURLs = function( targetFunction , call_counts_hash_table = NULL ){
if ( is.null(call_counts_hash_table)){
call_counts_hash_table = getCallCountsHashTable()
}
if ( is.character(targetFunction)){
keyname = targetFunction
} else{
qq = rlang::enquo( targetFunction )
props = getFunctionPropertiesFromQuosure( qq )
keyname = props$keyname
}
if ( rlang::env_has(call_counts_hash_table, keyname)){
call_counts_hash_table[[keyname]]$urls
} else {
return( character() )
}
}
#showDocumentationUrls = function( storage_env ){
# ls ( storage_env )
#}
|
# Jaana Simola
# 07.11.2018
# The first script
# 1. read data
lrn14 <- read.table("http://www.helsinki.fi/~kvehkala/JYTmooc/JYTOPKYS3-data.txt", sep="\t", header=TRUE)
# 2. explore the structure and dimensions of the data
str(lrn14) # The 60 columns are integers except the last column which represents gender as a factor
dim(lrn14) # The data has 183 rows and 60 columns
# 3. Create an analysis dataset with the variables gender, age, attitude, deep, stra, surf
library(dplyr) # Access the dplyr library
# columns for the analysis dataset
cols <- c("gender", "Age", "Attitude","deep", "stra", "surf", "Points")
# combine questions in the learning2014 data
deep_questions <- c("D03", "D11", "D19", "D27", "D07", "D14", "D22", "D30","D06", "D15", "D23", "D31")
stra_questions <- c("ST01","ST09","ST17","ST25","ST04","ST12","ST20","ST28")
surf_questions <- c("SU02","SU10","SU18","SU26", "SU05","SU13","SU21","SU29","SU08","SU16","SU24","SU32")
# select data and scale combination variables by taking the mean
deep_columns <- select(lrn14, one_of(deep_questions))
lrn14$deep <- rowMeans(deep_columns)
strategic_columns <- select(lrn14, one_of(stra_questions))
lrn14$stra <- rowMeans(strategic_columns)
surface_columns <- select(lrn14, one_of(surf_questions))
lrn14$surf <- rowMeans(surface_columns)
# create the analysis dataset, learning2014
learning2014 <- select(lrn14, one_of(cols))
# exclude observations where the exam points variable is zero
learning2014 <- filter(learning2014, Points > 0)
str(learning2014) # study the dimensions of learning2014: 166 obs, 7 vars
# ret the working directory
setwd("~/Documents/GitHub/IODS-project")
getwd()
# save the analysis dataset and read it
write.csv(learning2014, file = "learning2014.csv", row.names = FALSE)
read.csv("learning2014.csv")
|
/data/create_learning2014.R
|
no_license
|
jsimola/IODS-project
|
R
| false | false | 1,806 |
r
|
# Jaana Simola
# 07.11.2018
# The first script
# 1. read data
lrn14 <- read.table("http://www.helsinki.fi/~kvehkala/JYTmooc/JYTOPKYS3-data.txt", sep="\t", header=TRUE)
# 2. explore the structure and dimensions of the data
str(lrn14) # The 60 columns are integers except the last column which represents gender as a factor
dim(lrn14) # The data has 183 rows and 60 columns
# 3. Create an analysis dataset with the variables gender, age, attitude, deep, stra, surf
library(dplyr) # Access the dplyr library
# columns for the analysis dataset
cols <- c("gender", "Age", "Attitude","deep", "stra", "surf", "Points")
# combine questions in the learning2014 data
deep_questions <- c("D03", "D11", "D19", "D27", "D07", "D14", "D22", "D30","D06", "D15", "D23", "D31")
stra_questions <- c("ST01","ST09","ST17","ST25","ST04","ST12","ST20","ST28")
surf_questions <- c("SU02","SU10","SU18","SU26", "SU05","SU13","SU21","SU29","SU08","SU16","SU24","SU32")
# select data and scale combination variables by taking the mean
deep_columns <- select(lrn14, one_of(deep_questions))
lrn14$deep <- rowMeans(deep_columns)
strategic_columns <- select(lrn14, one_of(stra_questions))
lrn14$stra <- rowMeans(strategic_columns)
surface_columns <- select(lrn14, one_of(surf_questions))
lrn14$surf <- rowMeans(surface_columns)
# create the analysis dataset, learning2014
learning2014 <- select(lrn14, one_of(cols))
# exclude observations where the exam points variable is zero
learning2014 <- filter(learning2014, Points > 0)
str(learning2014) # study the dimensions of learning2014: 166 obs, 7 vars
# ret the working directory
setwd("~/Documents/GitHub/IODS-project")
getwd()
# save the analysis dataset and read it
write.csv(learning2014, file = "learning2014.csv", row.names = FALSE)
read.csv("learning2014.csv")
|
\name{tensorGMam-package}
\alias{tensorGMam-package}
\alias{tensorGMam}
\docType{package}
\title{
A tensor estimation approach to integrative mulit-view multivariate additive models
}
\description{
For a high-dimensional grouped multivariate additive model (GMAM) using B-splines, with or without aparsity assumptions,
treating the coefficients as a third-order or even fourth-order tensor and borrowing Tucker decomposition to reduce the number of parameters. The multivariate sparse group lasso (mcp or scad) and the coordinate descent algorithm are used to estimate functions for sparsity situation.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Xu Liu
Maintainer: Xu Liu <liu.xu@sufe.edu.cn>
}
\references{
A tensor estimation approach to integrative mulit-view multivariate additive models.
}
\keyword{ High-dimensional, Sparse models; Tensor estimation; Tucker decomposition. }
|
/man/tensorGMam-package.Rd
|
no_license
|
ymingliu/tensorGMam
|
R
| false | false | 1,006 |
rd
|
\name{tensorGMam-package}
\alias{tensorGMam-package}
\alias{tensorGMam}
\docType{package}
\title{
A tensor estimation approach to integrative mulit-view multivariate additive models
}
\description{
For a high-dimensional grouped multivariate additive model (GMAM) using B-splines, with or without aparsity assumptions,
treating the coefficients as a third-order or even fourth-order tensor and borrowing Tucker decomposition to reduce the number of parameters. The multivariate sparse group lasso (mcp or scad) and the coordinate descent algorithm are used to estimate functions for sparsity situation.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Xu Liu
Maintainer: Xu Liu <liu.xu@sufe.edu.cn>
}
\references{
A tensor estimation approach to integrative mulit-view multivariate additive models.
}
\keyword{ High-dimensional, Sparse models; Tensor estimation; Tucker decomposition. }
|
rm(list=ls())
setwd("/media/tandrean/Elements/PhD/ChIP-profile/New.Test.Steffen.Data/ChIP.MCF7/CTCF")
library(data.table)
mydata<-fread("chrX.SMARCA4.txt")
mydata.paste <- paste(mydata$V1, mydata$V2, mydata$V3, sep="_")
Id <- mydata.paste
Score <- rowSums(mydata[,4:6])
#Detect the reproducible regions
#Open a data.table
df <-data.table(Id=1:length(Id), region.name= Id ,Score=Score, BR=rep(NA,length(Score)),stringsAsFactors=FALSE, key = "Id")
head(df)
#Run The Algorithm
BR <- list()
id = 0
tmp <- c()
start = Sys.time()
for (i in df$Id) {
if (df[i, "Score"] == 0) {
if (length(tmp) > 0) {
id <- id + 1
new_name <- sprintf("BR_%d", id)
BR[[new_name]] <- tmp
df[tmp,"BR"]=rep(new_name,length(tmp))
tmp = c()
}#Close 2nd if
}else{ #open 1st if:else
tmp = c(tmp,i)
} #Close 1st if
} #Close for loop
total = Sys.time() - start
print(total)
#Compute the reproducible regions
start = Sys.time()
VR_flag <-
sapply (BR,
function(posList) {
tmp=df$Score[match(posList, df$Id)]
if (max(tmp) == 3){
return(T)
} else {
df[posList,"BR"] <<- rep(NA,length(posList))
return(F)
}
}
)
total = Sys.time() - start
print(total)
#Extract the reproducible and open a dataframe
VR = BR[VR_flag]
out <- unlist(VR)
out2 <- as.numeric(out)
#Back To the original data frame
df2 <- df[df$Id %in% out2,]
out3 <- strsplit(df2$region.name, "_")
head(out3)
length <- length(out3)
df.Reproducible <- data.frame("chr"=character(length=length),"start"=numeric(length=length),"end"=numeric(length = length))
df.Reproducible$chr <- as.character(df.Reproducible$chr)
#Extract in a dataframe the regions reproducible
for(i in 1:length(out3)){
df.Reproducible$chr[i] <- out3[[i]][1]
df.Reproducible$start[i] <- as.numeric(out3[[i]][2])
df.Reproducible$end[i] <- as.numeric(out3[[i]][3])
}
options(scipen = 999)
write.table(df.Reproducible,"Reproducible.chrX.SMARCA4.Idr.txt",quote=FALSE,col.names = TRUE,row.names = FALSE,sep="\t")
length(Score)
#Compute the non reproducible regions
start = Sys.time()
VR_flag <-
sapply (BR,
function(posList) {
tmp=df$Score[match(posList, df$Id)]
if (max(tmp) < 3){
return(T)
} else {
df[posList,"BR"] <<- rep(NA,length(posList))
return(F)
}
}
)
total = Sys.time() - start
#Extract the non reproducible and open a dataframe
VR = BR[VR_flag]
out <- unlist(VR)
out2 <- as.numeric(out)
#Back To the original data frame
df2 <- df[df$Id %in% out2,]
out3 <- strsplit(df2$region.name, "_")
head(out3)
length <- length(out3)
df.Not.Reproducible <- data.frame("chr"=character(length=length),"start"=numeric(length=length),"end"=numeric(length = length))
df.Not.Reproducible$chr <- as.character(df.Not.Reproducible$chr)
#Extract in a dataframe the regions not reproducible
for(i in 1:length(out3)){
df.Not.Reproducible$chr[i] <- out3[[i]][1]
df.Not.Reproducible$start[i] <- as.numeric(out3[[i]][2])
df.Not.Reproducible$end[i] <- as.numeric(out3[[i]][3])
}
options(scipen = 999)
write.table(df.Not.Reproducible,"Not.Reproducible.chrX.SMARCA4.Idr.txt",quote=FALSE,col.names = TRUE,row.names = FALSE,sep="\t")
|
/Job.Array/chrX.r
|
no_license
|
tAndreani/IPVARIABLE
|
R
| false | false | 3,314 |
r
|
rm(list=ls())
setwd("/media/tandrean/Elements/PhD/ChIP-profile/New.Test.Steffen.Data/ChIP.MCF7/CTCF")
library(data.table)
mydata<-fread("chrX.SMARCA4.txt")
mydata.paste <- paste(mydata$V1, mydata$V2, mydata$V3, sep="_")
Id <- mydata.paste
Score <- rowSums(mydata[,4:6])
#Detect the reproducible regions
#Open a data.table
df <-data.table(Id=1:length(Id), region.name= Id ,Score=Score, BR=rep(NA,length(Score)),stringsAsFactors=FALSE, key = "Id")
head(df)
#Run The Algorithm
BR <- list()
id = 0
tmp <- c()
start = Sys.time()
for (i in df$Id) {
if (df[i, "Score"] == 0) {
if (length(tmp) > 0) {
id <- id + 1
new_name <- sprintf("BR_%d", id)
BR[[new_name]] <- tmp
df[tmp,"BR"]=rep(new_name,length(tmp))
tmp = c()
}#Close 2nd if
}else{ #open 1st if:else
tmp = c(tmp,i)
} #Close 1st if
} #Close for loop
total = Sys.time() - start
print(total)
#Compute the reproducible regions
start = Sys.time()
VR_flag <-
sapply (BR,
function(posList) {
tmp=df$Score[match(posList, df$Id)]
if (max(tmp) == 3){
return(T)
} else {
df[posList,"BR"] <<- rep(NA,length(posList))
return(F)
}
}
)
total = Sys.time() - start
print(total)
#Extract the reproducible and open a dataframe
VR = BR[VR_flag]
out <- unlist(VR)
out2 <- as.numeric(out)
#Back To the original data frame
df2 <- df[df$Id %in% out2,]
out3 <- strsplit(df2$region.name, "_")
head(out3)
length <- length(out3)
df.Reproducible <- data.frame("chr"=character(length=length),"start"=numeric(length=length),"end"=numeric(length = length))
df.Reproducible$chr <- as.character(df.Reproducible$chr)
#Extract in a dataframe the regions reproducible
for(i in 1:length(out3)){
df.Reproducible$chr[i] <- out3[[i]][1]
df.Reproducible$start[i] <- as.numeric(out3[[i]][2])
df.Reproducible$end[i] <- as.numeric(out3[[i]][3])
}
options(scipen = 999)
write.table(df.Reproducible,"Reproducible.chrX.SMARCA4.Idr.txt",quote=FALSE,col.names = TRUE,row.names = FALSE,sep="\t")
length(Score)
#Compute the non reproducible regions
start = Sys.time()
VR_flag <-
sapply (BR,
function(posList) {
tmp=df$Score[match(posList, df$Id)]
if (max(tmp) < 3){
return(T)
} else {
df[posList,"BR"] <<- rep(NA,length(posList))
return(F)
}
}
)
total = Sys.time() - start
#Extract the non reproducible and open a dataframe
VR = BR[VR_flag]
out <- unlist(VR)
out2 <- as.numeric(out)
#Back To the original data frame
df2 <- df[df$Id %in% out2,]
out3 <- strsplit(df2$region.name, "_")
head(out3)
length <- length(out3)
df.Not.Reproducible <- data.frame("chr"=character(length=length),"start"=numeric(length=length),"end"=numeric(length = length))
df.Not.Reproducible$chr <- as.character(df.Not.Reproducible$chr)
#Extract in a dataframe the regions not reproducible
for(i in 1:length(out3)){
df.Not.Reproducible$chr[i] <- out3[[i]][1]
df.Not.Reproducible$start[i] <- as.numeric(out3[[i]][2])
df.Not.Reproducible$end[i] <- as.numeric(out3[[i]][3])
}
options(scipen = 999)
write.table(df.Not.Reproducible,"Not.Reproducible.chrX.SMARCA4.Idr.txt",quote=FALSE,col.names = TRUE,row.names = FALSE,sep="\t")
|
# inference_laplace_full
# Exact inference using the laplace approximation and a full GP
# y is response data
# data is a dataframe containing columns on which the kernel acts,
# giving observations on which to train the model
# kernel is a gpe kernel object
# mean_function is a gpe mean function (for now just an R function)
inference_laplace_full <- function(y,
data,
kernel,
likelihood,
mean_function,
inducing_data,
weights,
verbose = verbose) {
# NB inducing_data is ignored
# apply mean function to get prior mean at observation locations
mn_prior <- mean_function(data)
# control parameters
tol <- 10 ^ -12
itmax = 50
# self kernel (with observation noise)
Kxx <- kernel(data, data)
# number of observations
n <- nrow(data)
# diagonal matrix
eye <- diag(n)
# initialise a
a <- rep(0, n)
# set f to the prior
f <- mn_prior
# initialise loop
obj.old <- Inf
obj <- -sum(likelihood$d0(y, f, weights))
it <- 0
# start newton iterations
while ((obj.old - obj) > tol & it < itmax) {
# increment iterator and update objective
it <- it + 1
obj.old <- obj
# get the negative log Hessian and its root
W <- -(likelihood$d2(y, f, weights))
rW <- sqrt(W)
# difference between posterior mode and prior
cf <- f - mn_prior
# get cholesky factorisation
L <- jitchol(rW %*% t(rW) * Kxx + eye)
# get direction of the posterior mode
b <- W * cf + likelihood$d1(y, f, weights)
mat2 <- rW * (Kxx %*% b)
adiff <- b - rW * backsolve(L, forwardsolve(t(L), mat2)) - a
# make sure it's a vector, not a matrix
dim(adiff) <- NULL
# find optimum step size toward the mode using Brent's method
res <- optimise(laplace_psiline_full,
interval = c(0, 2),
adiff = adiff,
a = a,
K = Kxx,
y = y,
d0 = likelihood$d0,
mn = mn_prior,
weights)
# move to the new posterior mode
a <- a + res$minimum * adiff
f <- Kxx %*% a + mn_prior
obj <- laplace_psi(a,
f,
mn_prior,
y,
likelihood$d0,
weights)
}
# recompute hessian at mode
W <- -(likelihood$d2(y, f, weights))
# return marginal negative log-likelihood
lZ <- -(a %*% (f - mn_prior))[1, 1] / 2 -
sum(likelihood$d0(y, f, weights)) +
sum(log(diag(L)))
# return posterior object
posterior <- createPosterior(inference_name = 'inference_laplace_full',
lZ = lZ,
data = data,
kernel = kernel,
likelihood = likelihood,
mean_function = mean_function,
inducing_data = inducing_data,
weights,
mn_prior = mn_prior,
L = L,
a = a,
W = W)
# return a posterior object
return (posterior)
}
# projection for full inference
project_laplace_full <- function(posterior,
new_data,
variance = c('none', 'diag', 'matrix')) {
# get the required variance argument
variance <- match.arg(variance)
# prior mean over the test locations
mn_prior_xp <- posterior$mean_function(new_data)
# projection matrix
Kxxp <- posterior$kernel(posterior$data,
new_data)
# its transpose
Kxpx <- t(Kxxp)
# get posterior mean
mu <- Kxpx %*% posterior$components$a + mn_prior_xp
# NB can easily modify this to return only the diagonal elements
# (variances) with kernel(..., diag = TRUE)
# calculation of the diagonal of t(v) %*% v is also easy:
# (colSums(v ^ 2))
if (variance == 'none') {
# if mean only
var <- NULL
} else {
# compute common variance components
rW <- sqrt(as.vector(posterior$components$W))
# get posterior covariance
v <- backsolve(posterior$components$L,
rW * Kxxp,
transpose = TRUE)
if (variance == 'diag') {
# if diagonal (elementwise) variance only
# diagonal matrix of the prior covariance on xp
Kxpxp_diag <- posterior$kernel(new_data, diag = TRUE)
# diagonal elements of t(v) %*% v
vtv_diag <- colSums(v ^ 2)
# diagonal elements of the posterior
K_diag <- diag(Kxpxp_diag) - vtv_diag
var <- K_diag
} else {
# if full variance
# prior covariance on xp
Kxpxp <- posterior$kernel(new_data)
# posterior covariance on xp
K <- Kxpxp - crossprod(v)
var <- K
}
}
# return both
ans <- list(mu = mu,
var = var)
return (ans)
}
|
/R/inference_laplace_full.R
|
permissive
|
ogaoue/gpe
|
R
| false | false | 5,346 |
r
|
# inference_laplace_full
# Exact inference using the laplace approximation and a full GP
# y is response data
# data is a dataframe containing columns on which the kernel acts,
# giving observations on which to train the model
# kernel is a gpe kernel object
# mean_function is a gpe mean function (for now just an R function)
inference_laplace_full <- function(y,
data,
kernel,
likelihood,
mean_function,
inducing_data,
weights,
verbose = verbose) {
# NB inducing_data is ignored
# apply mean function to get prior mean at observation locations
mn_prior <- mean_function(data)
# control parameters
tol <- 10 ^ -12
itmax = 50
# self kernel (with observation noise)
Kxx <- kernel(data, data)
# number of observations
n <- nrow(data)
# diagonal matrix
eye <- diag(n)
# initialise a
a <- rep(0, n)
# set f to the prior
f <- mn_prior
# initialise loop
obj.old <- Inf
obj <- -sum(likelihood$d0(y, f, weights))
it <- 0
# start newton iterations
while ((obj.old - obj) > tol & it < itmax) {
# increment iterator and update objective
it <- it + 1
obj.old <- obj
# get the negative log Hessian and its root
W <- -(likelihood$d2(y, f, weights))
rW <- sqrt(W)
# difference between posterior mode and prior
cf <- f - mn_prior
# get cholesky factorisation
L <- jitchol(rW %*% t(rW) * Kxx + eye)
# get direction of the posterior mode
b <- W * cf + likelihood$d1(y, f, weights)
mat2 <- rW * (Kxx %*% b)
adiff <- b - rW * backsolve(L, forwardsolve(t(L), mat2)) - a
# make sure it's a vector, not a matrix
dim(adiff) <- NULL
# find optimum step size toward the mode using Brent's method
res <- optimise(laplace_psiline_full,
interval = c(0, 2),
adiff = adiff,
a = a,
K = Kxx,
y = y,
d0 = likelihood$d0,
mn = mn_prior,
weights)
# move to the new posterior mode
a <- a + res$minimum * adiff
f <- Kxx %*% a + mn_prior
obj <- laplace_psi(a,
f,
mn_prior,
y,
likelihood$d0,
weights)
}
# recompute hessian at mode
W <- -(likelihood$d2(y, f, weights))
# return marginal negative log-likelihood
lZ <- -(a %*% (f - mn_prior))[1, 1] / 2 -
sum(likelihood$d0(y, f, weights)) +
sum(log(diag(L)))
# return posterior object
posterior <- createPosterior(inference_name = 'inference_laplace_full',
lZ = lZ,
data = data,
kernel = kernel,
likelihood = likelihood,
mean_function = mean_function,
inducing_data = inducing_data,
weights,
mn_prior = mn_prior,
L = L,
a = a,
W = W)
# return a posterior object
return (posterior)
}
# projection for full inference
project_laplace_full <- function(posterior,
new_data,
variance = c('none', 'diag', 'matrix')) {
# get the required variance argument
variance <- match.arg(variance)
# prior mean over the test locations
mn_prior_xp <- posterior$mean_function(new_data)
# projection matrix
Kxxp <- posterior$kernel(posterior$data,
new_data)
# its transpose
Kxpx <- t(Kxxp)
# get posterior mean
mu <- Kxpx %*% posterior$components$a + mn_prior_xp
# NB can easily modify this to return only the diagonal elements
# (variances) with kernel(..., diag = TRUE)
# calculation of the diagonal of t(v) %*% v is also easy:
# (colSums(v ^ 2))
if (variance == 'none') {
# if mean only
var <- NULL
} else {
# compute common variance components
rW <- sqrt(as.vector(posterior$components$W))
# get posterior covariance
v <- backsolve(posterior$components$L,
rW * Kxxp,
transpose = TRUE)
if (variance == 'diag') {
# if diagonal (elementwise) variance only
# diagonal matrix of the prior covariance on xp
Kxpxp_diag <- posterior$kernel(new_data, diag = TRUE)
# diagonal elements of t(v) %*% v
vtv_diag <- colSums(v ^ 2)
# diagonal elements of the posterior
K_diag <- diag(Kxpxp_diag) - vtv_diag
var <- K_diag
} else {
# if full variance
# prior covariance on xp
Kxpxp <- posterior$kernel(new_data)
# posterior covariance on xp
K <- Kxpxp - crossprod(v)
var <- K
}
}
# return both
ans <- list(mu = mu,
var = var)
return (ans)
}
|
# load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/Icog_result_intrinsic_subtype.Rdata")
# load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_intrinsic_subtype.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/Icog_result_intrinsic_subtype_082119.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_intrinsic_subtype_082119.Rdata")
icog_result <- icog_result_casecase
onco_result <- onco_result_casecase
rm(icog_result_casecase)
rm(onco_result_casecase)
gc()
load("/data/zhangh24/match.Rdata")
idx = which(is.na(data$SNP.ICOGS)|is.na(data$SNP.ONCO)|is.na(data$var_name))
data_c = data[-idx,]
shared_rs_id = intersect(data_c$SNP.ICOGS,icog_result$rs_id)
shared_rs_id2=intersect(data_c$SNP.ONCO,onco_result$rs_id)
idx.icog_shared = which((icog_result$rs_id%in%shared_rs_id)==T)
icog_result_shared = icog_result[idx.icog_shared,]
idx.icog_match = match(shared_rs_id,icog_result_shared$rs_id)
icog_result_shared = icog_result_shared[idx.icog_match,]
idx.onco_shared = which((onco_result$rs_id%in%shared_rs_id2)==T)
onco_result_shared = onco_result[idx.onco_shared,]
idx.onco_match = match(shared_rs_id2,onco_result_shared$rs_id)
onco_result_shared = onco_result_shared[idx.onco_match,]
####take out data_c
idx.shared_data_c <- which((data_c$SNP.ICOGS%in%shared_rs_id)==T)
data_c_shared <- data_c[idx.shared_data_c,]
idx.icog_match_data_c <- match(shared_rs_id,data_c_shared$SNP.ICOGS)
data_c_shared <- data_c_shared[idx.icog_match_data_c,]
#icog_result_shared <- icog_result_shared[,-ncol(icog_result_shared)]
icog_result_shared <- cbind(icog_result_shared,data_c_shared)
all.equal(icog_result_shared$rs_id,icog_result_shared$SNP.ICOGS)
#onco_result_shared <- onco_result_shared[,-ncol(onco_result_shared)]
onco_result_shared <- cbind(onco_result_shared,data_c_shared)
all.equal(onco_result_shared$rs_id,onco_result_shared$SNP.ONCO)
save(icog_result_shared,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/icog_result_shared_082119.Rdata")
save(onco_result_shared,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_shared_082119.Rdata")
#load("/data/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2_fixed/result/icog_result_shared.Rdata")
#load("/data/zhangh24/breast_cancer_data_analysis/whole_genome/ONCO/ERPRHER2_fixed/result/onco_result_shared.Rdata")
idx.filter <- which(icog_result_shared$exp_freq_a1>=0.01&
onco_result_shared$exp_freq_a1>=0.01&
icog_result_shared$exp_freq_a1<=0.99&
onco_result_shared$exp_freq_a1<=0.99)
icog_result_shared_1p <- icog_result_shared[idx.filter,]
onco_result_shared_1p <- onco_result_shared[idx.filter,]
save(icog_result_shared_1p,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/icog_result_shared_1p_082119.Rdata")
save(onco_result_shared_1p,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_shared_1p_082119.Rdata")
idx.icog.only = which((!is.na(data$SNP.ICOGS))&(!is.na(data$var_name))&is.na(data$SNP.ONCO))
data_icog_only <- data[idx.icog.only,]
shared_rs_id_icog_only = intersect(data_icog_only$SNP.ICOGS,icog_result$rs_id)
idx.icog.only_shared = which((icog_result$rs_id%in%shared_rs_id_icog_only)==T)
icog_result_only_shared = icog_result[idx.icog.only_shared,]
idx.icog.only_match_shared = match(shared_rs_id_icog_only,icog_result_only_shared$rs_id)
icog_result_only_shared = icog_result_only_shared[idx.icog.only_match_shared,]
####take out data_c
idx.icog.only.shared_data <- which((data_icog_only$SNP.ICOGS%in%shared_rs_id_icog_only)==T)
data_icog_only_shared <- data_icog_only[idx.icog.only.shared_data,]
idx.icog_only_match_data <- match(shared_rs_id_icog_only,data_icog_only_shared$SNP.ICOGS)
data_icog_only_shared <- data_icog_only_shared[idx.icog_only_match_data,]
#icog_result_shared <- icog_result_shared[,-ncol(icog_result_shared)]
icog_result_only_shared <- cbind(icog_result_only_shared,data_icog_only_shared )
all.equal(icog_result_only_shared$rs_id,icog_result_only_shared$SNP.ICOGS)
idx.onco.only = which((!is.na(data$SNP.ONCO))&(!is.na(data$var_name))&is.na(data$SNP.ICOGS))
data_onco_only <- data[idx.onco.only,]
shared_rs_id_onco_only = intersect(data_onco_only$SNP.ONCO,onco_result$rs_id)
idx.onco.only_shared = which((onco_result$rs_id%in%shared_rs_id_onco_only)==T)
onco_result_only_shared = onco_result[idx.onco.only_shared,]
idx.onco.only_match_shared = match(shared_rs_id_onco_only,onco_result_only_shared$rs_id)
onco_result_only_shared = onco_result_only_shared[idx.onco.only_match_shared,]
####take out data_c
idx.onco.only.shared_data <- which((data_onco_only$SNP.ONCO%in%shared_rs_id_onco_only)==T)
data_onco_only_shared <- data_onco_only[idx.onco.only.shared_data,]
idx.onco_only_match_data <- match(shared_rs_id_onco_only,data_onco_only_shared$SNP.ONCO)
data_onco_only_shared <- data_onco_only_shared[idx.onco_only_match_data,]
#icog_result_shared <- icog_result_shared[,-ncol(icog_result_shared)]
onco_result_only_shared <- cbind(onco_result_only_shared,data_onco_only_shared )
all.equal(onco_result_only_shared$rs_id,onco_result_only_shared$SNP.ONCO)
idx.filter.icog.only <- which(icog_result_only_shared$exp_freq_a1>=0.01&
icog_result_only_shared$exp_freq_a1<=0.99)
icog_result_only_shared_1p <- icog_result_only_shared[idx.filter.icog.only,]
idx.filter.onco.only <- which(onco_result_only_shared$exp_freq_a1>=0.01&
onco_result_only_shared$exp_freq_a1<=0.99)
onco_result_only_shared_1p <- onco_result_only_shared[idx.filter.onco.only,]
save(icog_result_only_shared_1p,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/icog_result_only_shared_1p_082119.Rdata")
save(onco_result_only_shared_1p,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_only_shared_1p_082119.Rdata")
|
/whole_genome_age/ICOG/Intrinsic_subtypes/code/find_shared_SNPs.R
|
no_license
|
andrewhaoyu/breast_cancer_data_analysis
|
R
| false | false | 6,358 |
r
|
# load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/Icog_result_intrinsic_subtype.Rdata")
# load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_intrinsic_subtype.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/Icog_result_intrinsic_subtype_082119.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_intrinsic_subtype_082119.Rdata")
icog_result <- icog_result_casecase
onco_result <- onco_result_casecase
rm(icog_result_casecase)
rm(onco_result_casecase)
gc()
load("/data/zhangh24/match.Rdata")
idx = which(is.na(data$SNP.ICOGS)|is.na(data$SNP.ONCO)|is.na(data$var_name))
data_c = data[-idx,]
shared_rs_id = intersect(data_c$SNP.ICOGS,icog_result$rs_id)
shared_rs_id2=intersect(data_c$SNP.ONCO,onco_result$rs_id)
idx.icog_shared = which((icog_result$rs_id%in%shared_rs_id)==T)
icog_result_shared = icog_result[idx.icog_shared,]
idx.icog_match = match(shared_rs_id,icog_result_shared$rs_id)
icog_result_shared = icog_result_shared[idx.icog_match,]
idx.onco_shared = which((onco_result$rs_id%in%shared_rs_id2)==T)
onco_result_shared = onco_result[idx.onco_shared,]
idx.onco_match = match(shared_rs_id2,onco_result_shared$rs_id)
onco_result_shared = onco_result_shared[idx.onco_match,]
####take out data_c
idx.shared_data_c <- which((data_c$SNP.ICOGS%in%shared_rs_id)==T)
data_c_shared <- data_c[idx.shared_data_c,]
idx.icog_match_data_c <- match(shared_rs_id,data_c_shared$SNP.ICOGS)
data_c_shared <- data_c_shared[idx.icog_match_data_c,]
#icog_result_shared <- icog_result_shared[,-ncol(icog_result_shared)]
icog_result_shared <- cbind(icog_result_shared,data_c_shared)
all.equal(icog_result_shared$rs_id,icog_result_shared$SNP.ICOGS)
#onco_result_shared <- onco_result_shared[,-ncol(onco_result_shared)]
onco_result_shared <- cbind(onco_result_shared,data_c_shared)
all.equal(onco_result_shared$rs_id,onco_result_shared$SNP.ONCO)
save(icog_result_shared,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/icog_result_shared_082119.Rdata")
save(onco_result_shared,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_shared_082119.Rdata")
#load("/data/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2_fixed/result/icog_result_shared.Rdata")
#load("/data/zhangh24/breast_cancer_data_analysis/whole_genome/ONCO/ERPRHER2_fixed/result/onco_result_shared.Rdata")
idx.filter <- which(icog_result_shared$exp_freq_a1>=0.01&
onco_result_shared$exp_freq_a1>=0.01&
icog_result_shared$exp_freq_a1<=0.99&
onco_result_shared$exp_freq_a1<=0.99)
icog_result_shared_1p <- icog_result_shared[idx.filter,]
onco_result_shared_1p <- onco_result_shared[idx.filter,]
save(icog_result_shared_1p,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/icog_result_shared_1p_082119.Rdata")
save(onco_result_shared_1p,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_shared_1p_082119.Rdata")
idx.icog.only = which((!is.na(data$SNP.ICOGS))&(!is.na(data$var_name))&is.na(data$SNP.ONCO))
data_icog_only <- data[idx.icog.only,]
shared_rs_id_icog_only = intersect(data_icog_only$SNP.ICOGS,icog_result$rs_id)
idx.icog.only_shared = which((icog_result$rs_id%in%shared_rs_id_icog_only)==T)
icog_result_only_shared = icog_result[idx.icog.only_shared,]
idx.icog.only_match_shared = match(shared_rs_id_icog_only,icog_result_only_shared$rs_id)
icog_result_only_shared = icog_result_only_shared[idx.icog.only_match_shared,]
####take out data_c
idx.icog.only.shared_data <- which((data_icog_only$SNP.ICOGS%in%shared_rs_id_icog_only)==T)
data_icog_only_shared <- data_icog_only[idx.icog.only.shared_data,]
idx.icog_only_match_data <- match(shared_rs_id_icog_only,data_icog_only_shared$SNP.ICOGS)
data_icog_only_shared <- data_icog_only_shared[idx.icog_only_match_data,]
#icog_result_shared <- icog_result_shared[,-ncol(icog_result_shared)]
icog_result_only_shared <- cbind(icog_result_only_shared,data_icog_only_shared )
all.equal(icog_result_only_shared$rs_id,icog_result_only_shared$SNP.ICOGS)
idx.onco.only = which((!is.na(data$SNP.ONCO))&(!is.na(data$var_name))&is.na(data$SNP.ICOGS))
data_onco_only <- data[idx.onco.only,]
shared_rs_id_onco_only = intersect(data_onco_only$SNP.ONCO,onco_result$rs_id)
idx.onco.only_shared = which((onco_result$rs_id%in%shared_rs_id_onco_only)==T)
onco_result_only_shared = onco_result[idx.onco.only_shared,]
idx.onco.only_match_shared = match(shared_rs_id_onco_only,onco_result_only_shared$rs_id)
onco_result_only_shared = onco_result_only_shared[idx.onco.only_match_shared,]
####take out data_c
idx.onco.only.shared_data <- which((data_onco_only$SNP.ONCO%in%shared_rs_id_onco_only)==T)
data_onco_only_shared <- data_onco_only[idx.onco.only.shared_data,]
idx.onco_only_match_data <- match(shared_rs_id_onco_only,data_onco_only_shared$SNP.ONCO)
data_onco_only_shared <- data_onco_only_shared[idx.onco_only_match_data,]
#icog_result_shared <- icog_result_shared[,-ncol(icog_result_shared)]
onco_result_only_shared <- cbind(onco_result_only_shared,data_onco_only_shared )
all.equal(onco_result_only_shared$rs_id,onco_result_only_shared$SNP.ONCO)
idx.filter.icog.only <- which(icog_result_only_shared$exp_freq_a1>=0.01&
icog_result_only_shared$exp_freq_a1<=0.99)
icog_result_only_shared_1p <- icog_result_only_shared[idx.filter.icog.only,]
idx.filter.onco.only <- which(onco_result_only_shared$exp_freq_a1>=0.01&
onco_result_only_shared$exp_freq_a1<=0.99)
onco_result_only_shared_1p <- onco_result_only_shared[idx.filter.onco.only,]
save(icog_result_only_shared_1p,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/Intrinsic_subtypes/result/icog_result_only_shared_1p_082119.Rdata")
save(onco_result_only_shared_1p,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/intrinsic_subtypes/result/onco_result_only_shared_1p_082119.Rdata")
|
#Loading a JSON file as a DataFrame
callDetailsDF <- read.df("/home/spark/sampledata/json/cdrs.json","json")
#Writing the DataFrame out as a Parquet
write.parquet(callDetailsDF,"cdrs.parquet")
#Reading Parquet as a DataFrame
callDetailsParquetDF <- read.parquet("cdrs.parquet")
#Data Manipulation of Parquet Data
createOrReplaceTempView(callDetailsParquetDF,"parquetFile")
topCallLocsDF <- sql("select Origin,Dest, count(*) as cnt from calldetails group by Origin,Dest order by cnt desc")
head(topCallLocsDF)
|
/Chapter04/ReadWriteParquet.r
|
permissive
|
PacktPublishing/Learning-Apache-Spark-2
|
R
| false | false | 510 |
r
|
#Loading a JSON file as a DataFrame
callDetailsDF <- read.df("/home/spark/sampledata/json/cdrs.json","json")
#Writing the DataFrame out as a Parquet
write.parquet(callDetailsDF,"cdrs.parquet")
#Reading Parquet as a DataFrame
callDetailsParquetDF <- read.parquet("cdrs.parquet")
#Data Manipulation of Parquet Data
createOrReplaceTempView(callDetailsParquetDF,"parquetFile")
topCallLocsDF <- sql("select Origin,Dest, count(*) as cnt from calldetails group by Origin,Dest order by cnt desc")
head(topCallLocsDF)
|
# Read Training data
# set
train <- read.table(file="UCI HAR Dataset\\train\\X_train.txt")
# subject
train_subject <- read.table(file="UCI HAR Dataset\\train\\subject_train.txt")
# label
train_Activity <- read.table(file="UCI HAR Dataset\\train\\y_train.txt")
# Prepare Training set
train$Subject <- train_subject$V1
train$Activity <- train_Activity$V1
#Read Test data
#set
test <- read.table(file = "UCI HAR Dataset\\test\\X_test.txt")
# subject
test_subject <- read.table(file="UCI HAR Dataset\\test\\subject_test.txt")
# label
test_Activity <- read.table(file="UCI HAR Dataset\\test\\y_test.txt")
# Prepare Test set
test$Subject <- test_subject$V1
test$Activity <- test_Activity$V1
# Merge both Test & Training set
data <- rbind(train, test)
#Read features
features <- read.table(file="UCI HAR Dataset\\features.txt", colClasses= c("integer","character"))
# Add remaining variable labels
features <- rbind(features, c(nrow(features)+1, "subject"))
features <- rbind(features, c(nrow(features)+1, "activity"))
# Use descrptive variable names
names(data) <- features$V2
# extract values which contain either mean ot std
data <- data[, grepl("mean|std|subject|activity", names(data))]
#remove punctuation in variable names
names(data) <- gsub("[[:punct:]]", "", names(data))
# Read Activity label
activties <- read.table(file="UCI HAR Dataset\\activity_labels.txt", colClasses= c("integer","character"))
# Assign activity labels
data$activity <- factor(data$activity, labels= activties$V2)
# Create tidy data set
tidy <- ddply(melt(data, id.vars = c("activity", "subject")),c("activity", "subject"), summarise, mean = mean(value))
# Write it in txt file
write.table(tidy,file ="tidydata.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
texan678/getandclean
|
R
| false | false | 1,728 |
r
|
# Read Training data
# set
train <- read.table(file="UCI HAR Dataset\\train\\X_train.txt")
# subject
train_subject <- read.table(file="UCI HAR Dataset\\train\\subject_train.txt")
# label
train_Activity <- read.table(file="UCI HAR Dataset\\train\\y_train.txt")
# Prepare Training set
train$Subject <- train_subject$V1
train$Activity <- train_Activity$V1
#Read Test data
#set
test <- read.table(file = "UCI HAR Dataset\\test\\X_test.txt")
# subject
test_subject <- read.table(file="UCI HAR Dataset\\test\\subject_test.txt")
# label
test_Activity <- read.table(file="UCI HAR Dataset\\test\\y_test.txt")
# Prepare Test set
test$Subject <- test_subject$V1
test$Activity <- test_Activity$V1
# Merge both Test & Training set
data <- rbind(train, test)
#Read features
features <- read.table(file="UCI HAR Dataset\\features.txt", colClasses= c("integer","character"))
# Add remaining variable labels
features <- rbind(features, c(nrow(features)+1, "subject"))
features <- rbind(features, c(nrow(features)+1, "activity"))
# Use descrptive variable names
names(data) <- features$V2
# extract values which contain either mean ot std
data <- data[, grepl("mean|std|subject|activity", names(data))]
#remove punctuation in variable names
names(data) <- gsub("[[:punct:]]", "", names(data))
# Read Activity label
activties <- read.table(file="UCI HAR Dataset\\activity_labels.txt", colClasses= c("integer","character"))
# Assign activity labels
data$activity <- factor(data$activity, labels= activties$V2)
# Create tidy data set
tidy <- ddply(melt(data, id.vars = c("activity", "subject")),c("activity", "subject"), summarise, mean = mean(value))
# Write it in txt file
write.table(tidy,file ="tidydata.txt", row.name=FALSE)
|
#' @title Get raw data
#' @description A function, that returns all, unprocessed data concerning one of the offices in Warsaw.
#' @param office_name \code{character} acronym of office in Warsaw.
#'
#' You can get a list of possible values using \code{\link[kolejkeR]{get_available_offices}} function.
#' @return A \code{data.frame} with following columns:
#' \itemize{
#' \item status - either 0 (queue is not operating) or 1 (queue is operating).
#' \item czasObslugi - expected time of waiting in queue, in minutes. See also: \code{\link[kolejkeR]{get_waiting_time}}.
#' \item lp - ordinal number.
#' \item idGrupy - ID of a queue from \code{nazwaGrupy}.
#' \item liczbaCzynnychStan - amount of opened counters. See also: \code{\link[kolejkeR]{get_open_counters}}.
#' \item nazwaGrupy - a name of a queue. See also: \code{\link[kolejkeR]{get_available_queues}}.
#' \item literaGrupy - a single letter symbolizing a queue name from \code{nazwaGrupy}.
#' \item liczbaKlwKolejce - amount of people in queue. See also: \code{\link[kolejkeR]{get_number_of_people}}.
#' \item aktualnyNumer - current ticket number. See also: \code{\link[kolejkeR]{get_current_ticket_number}}.
#' }
#' @examples
#' office <- get_available_offices()[1]
#' get_raw_data(office)
#' @seealso \code{\link[kolejkeR]{get_waiting_time}} and others to extract data directly.
#' @seealso \code{\link[kolejkeR]{get_waiting_time_verbose}} and others for a more verbose output.
#' @export
get_raw_data <- function(office_name) {
get_data(office_name)
}
#' @title Get available offices
#' @family {getters}
#' @description A function, that returns available office names to pass down to other methods.
#' @return A \code{character} vector of names of offices in Warsaw
#' @examples offices <- get_available_offices()
#' @export
get_available_offices <- function() {
names(office_ids_list)
}
#' @title Get available queues
#' @family {getters}
#' @description A function, that returns available queue names to pass down to other methods.
#' @return A \code{character} vector of names of queues in one of the offices in Warsaw.
#' @inheritParams get_raw_data
#' @examples office <- get_available_offices()[1]
#' get_available_queues(office)
#' @export
get_available_queues <- function(office_name) {
get_data(office_name)[["nazwaGrupy"]]
}
#' @title Get specific data directly
#' @inheritParams get_raw_data
#' @param queue_name A \code{character} vector describing the queues we are interested in.
#' You can get a list of possible values using \code{\link[kolejkeR]{get_available_queues}} function.
#' @description Several functions to get specific data, such as waiting time, open encounters, current ticket number and
#' amount of people in a set of specific queues in specified office.
#' @describeIn get_waiting_time Returns expected time to be served.
#' @return A \code{character} vector (unless specified differently below) of the same length as \code{queue_name}, containing the information dependent on the called function.
#'
#' If \code{get_waiting_time} is called: A \code{numeric} vector with estimated time of waiting in the queues, in minutes.
#' @examples office <- get_available_offices()[1]
#' queue <- get_available_queues(office)
#'
#' get_waiting_time(office, queue)
#'
#' get_open_counters(office, queue)
#'
#' get_current_ticket_number(office, queue)
#'
#' get_number_of_people(office, queue)
#' @seealso \code{\link[kolejkeR]{get_waiting_time_verbose}} and others for a more verbose output.
#' @export
get_waiting_time <- function(office_name, queue_name) {
data <- get_data(office_name)
if(any(!queue_name %in% data[["nazwaGrupy"]])) stop("Unrecognized queue name!")
minutes <- data[data[["nazwaGrupy"]] == queue_name, "czasObslugi"]
as.numeric(minutes)
}
#' @title Get specific data verbosely
#' @inheritParams get_raw_data
#' @param queue_name A \code{character} describing a queue we are interested in.
#' You can get a list of possible values using \code{\link[kolejkeR]{get_available_queues}} function.
#' @param language A \code{character}. Only two languages supported: English (\code{"en"}) and Polish (\code{"pl"}).
#' @description Several functions to get specific data, such as waiting time, open encounters, current ticket number and
#' amount of people in a set of specified queues in specified office.
#' @describeIn get_waiting_time_verbose Returns expected time to be served.
#' @return A \code{character} vector of the same length as \code{queue_name} with each element in format depending on the called function and the variable \code{language}. Below we assume, that \code{language} variable is default.
#'
#' If \code{get_waiting_time_verbose} is called:
#'
#' "Waiting time for <queue name> is x minutes".
#' @examples office <- get_available_offices()[1]
#' queue <- get_available_queues(office)
#'
#' get_waiting_time_verbose(office, queue)
#'
#' get_open_counters_verbose(office, queue)
#'
#' get_current_ticket_number_verbose(office, queue)
#'
#' get_number_of_people_verbose(office, queue)
#' @seealso \code{\link[kolejkeR]{get_waiting_time}} and others to extract data directly.
#' @export
get_waiting_time_verbose <- function(office_name, queue_name, language="en") {
minutes <- get_waiting_time(office_name, queue_name)
apply(rbind(minutes, queue_name), 2, function(x) {
as.character(
glue::glue(texts[[language]][["get_waiting_time"]],
.envir=list(queue_name=x[2],
minutes=x[1],
ending=female_endings[as.numeric(x[1]) %% 10 + 1]))
)
})
}
#' @inheritParams get_waiting_time
#' @describeIn get_waiting_time Returns amount of opened encounters.
#' @return If \code{get_open_encounters} is called - A \code{numeric} vector with the amounts of opened encounters servicing the queues.
#' @export
get_open_counters <- function(office_name, queue_name) {
data <- get_data(office_name)
if(any(!queue_name %in% data[["nazwaGrupy"]])) stop("Unrecognized queue name!")
counters <- data[data[["nazwaGrupy"]] == queue_name, "liczbaCzynnychStan"]
counters
}
#' @inheritParams get_waiting_time_verbose
#' @describeIn get_waiting_time_verbose Returns amount of opened encounters.
#' @return If \code{get_open_encounters_verbose} is called:
#'
#' "There are x open encounters for <queue name>".
#' @export
get_open_counters_verbose <- function(office_name, queue_name, language = "en") {
counters <- get_open_counters(office_name, queue_name)
apply(rbind(counters, queue_name), 2, function(x) {
as.character(
glue::glue(texts[[language]][["get_open_counters"]],
.envir=list(queue_name=x[2],
counters_literal=counters_to_string()[as.numeric(x[1]) %% 10 + 1],
counters=as.character(x[1])))
)
})
}
#' @inheritParams get_waiting_time
#' @describeIn get_waiting_time Returns the identifier of the current ticket.
#' @return If \code{get_current_ticket_number} is called - the current ticket identifiers in the queues.
#' @export
get_current_ticket_number <- function(office_name, queue_name) {
data <- get_data(office_name)
if(any(!queue_name %in% data[["nazwaGrupy"]])) stop("Unrecognized queue name!")
ticket_number <- data[data[["nazwaGrupy"]] == queue_name, "aktualnyNumer"]
if(ticket_number == ""){ticket_number <- 0}
ticket_number
}
#' @inheritParams get_waiting_time_verbose
#' @describeIn get_waiting_time_verbose Returns current ticket number.
#' @return If \code{get_current_number_verbose} is called:
#'
#' "Current ticket number is x"
#' @export
get_current_ticket_number_verbose <- function(office_name, queue_name, language="en") {
ticket_number <- get_current_ticket_number(office_name, queue_name)
sapply(ticket_number, function(x) {
as.character(
glue::glue(texts[[language]][["get_current_ticket_number"]],
.envir=list(ticket_number=x))
)
})
}
#' @inheritParams get_waiting_time
#' @describeIn get_waiting_time Returns amount of people waiting in the queue.
#' @return If \code{get_number_of_people} is called - A \code{numeric} vector with the amounts of people waiting in the queues.
#' @export
get_number_of_people <- function(office_name, queue_name) {
data <- get_data(office_name)
if(any(!queue_name %in% data[["nazwaGrupy"]])) stop("Unrecognized queue name!")
number_of_people <- data[data[["nazwaGrupy"]] == queue_name, "liczbaKlwKolejce"]
number_of_people
}
#' @inheritParams get_waiting_time_verbose
#' @describeIn get_waiting_time_verbose Returns number of people waiting in specified queue.
#' @return If \code{get_number_of_people_verbose} is called:
#'
#' "There are x people in <queue name>"
#' @export
get_number_of_people_verbose <- function(office_name, queue_name, language = 'en') {
number_of_people <- get_number_of_people(office_name, queue_name)
apply(rbind(number_of_people, queue_name), 2, function(x) {
as.character(
glue::glue(texts[[language]][["get_number_of_people"]],
.envir=list(queue_name=x[2],
number_of_people=x[1]))
)
})
}
#' @title Dump data to csv
#' @description Dumps data from Warsaw queue api from all offices to csv file
#' @param filename \code{character} filename for resulting csv, should include file extension
#'
#' @export
append_api_data_to_csv <- function(filename) {
queue_data <- get_all_data_with_time()
utils::write.table(queue_data, filename, sep = ",", col.names = !file.exists(filename), append = T, row.names = FALSE)
}
|
/R/api.R
|
permissive
|
HaZdula/kolejkeR
|
R
| false | false | 9,628 |
r
|
#' @title Get raw data
#' @description A function, that returns all, unprocessed data concerning one of the offices in Warsaw.
#' @param office_name \code{character} acronym of office in Warsaw.
#'
#' You can get a list of possible values using \code{\link[kolejkeR]{get_available_offices}} function.
#' @return A \code{data.frame} with following columns:
#' \itemize{
#' \item status - either 0 (queue is not operating) or 1 (queue is operating).
#' \item czasObslugi - expected time of waiting in queue, in minutes. See also: \code{\link[kolejkeR]{get_waiting_time}}.
#' \item lp - ordinal number.
#' \item idGrupy - ID of a queue from \code{nazwaGrupy}.
#' \item liczbaCzynnychStan - amount of opened counters. See also: \code{\link[kolejkeR]{get_open_counters}}.
#' \item nazwaGrupy - a name of a queue. See also: \code{\link[kolejkeR]{get_available_queues}}.
#' \item literaGrupy - a single letter symbolizing a queue name from \code{nazwaGrupy}.
#' \item liczbaKlwKolejce - amount of people in queue. See also: \code{\link[kolejkeR]{get_number_of_people}}.
#' \item aktualnyNumer - current ticket number. See also: \code{\link[kolejkeR]{get_current_ticket_number}}.
#' }
#' @examples
#' office <- get_available_offices()[1]
#' get_raw_data(office)
#' @seealso \code{\link[kolejkeR]{get_waiting_time}} and others to extract data directly.
#' @seealso \code{\link[kolejkeR]{get_waiting_time_verbose}} and others for a more verbose output.
#' @export
get_raw_data <- function(office_name) {
get_data(office_name)
}
#' @title Get available offices
#' @family {getters}
#' @description A function, that returns available office names to pass down to other methods.
#' @return A \code{character} vector of names of offices in Warsaw
#' @examples offices <- get_available_offices()
#' @export
get_available_offices <- function() {
names(office_ids_list)
}
#' @title Get available queues
#' @family {getters}
#' @description A function, that returns available queue names to pass down to other methods.
#' @return A \code{character} vector of names of queues in one of the offices in Warsaw.
#' @inheritParams get_raw_data
#' @examples office <- get_available_offices()[1]
#' get_available_queues(office)
#' @export
get_available_queues <- function(office_name) {
get_data(office_name)[["nazwaGrupy"]]
}
#' @title Get specific data directly
#' @inheritParams get_raw_data
#' @param queue_name A \code{character} vector describing the queues we are interested in.
#' You can get a list of possible values using \code{\link[kolejkeR]{get_available_queues}} function.
#' @description Several functions to get specific data, such as waiting time, open encounters, current ticket number and
#' amount of people in a set of specific queues in specified office.
#' @describeIn get_waiting_time Returns expected time to be served.
#' @return A \code{character} vector (unless specified differently below) of the same length as \code{queue_name}, containing the information dependent on the called function.
#'
#' If \code{get_waiting_time} is called: A \code{numeric} vector with estimated time of waiting in the queues, in minutes.
#' @examples office <- get_available_offices()[1]
#' queue <- get_available_queues(office)
#'
#' get_waiting_time(office, queue)
#'
#' get_open_counters(office, queue)
#'
#' get_current_ticket_number(office, queue)
#'
#' get_number_of_people(office, queue)
#' @seealso \code{\link[kolejkeR]{get_waiting_time_verbose}} and others for a more verbose output.
#' @export
get_waiting_time <- function(office_name, queue_name) {
data <- get_data(office_name)
if(any(!queue_name %in% data[["nazwaGrupy"]])) stop("Unrecognized queue name!")
minutes <- data[data[["nazwaGrupy"]] == queue_name, "czasObslugi"]
as.numeric(minutes)
}
#' @title Get specific data verbosely
#' @inheritParams get_raw_data
#' @param queue_name A \code{character} describing a queue we are interested in.
#' You can get a list of possible values using \code{\link[kolejkeR]{get_available_queues}} function.
#' @param language A \code{character}. Only two languages supported: English (\code{"en"}) and Polish (\code{"pl"}).
#' @description Several functions to get specific data, such as waiting time, open encounters, current ticket number and
#' amount of people in a set of specified queues in specified office.
#' @describeIn get_waiting_time_verbose Returns expected time to be served.
#' @return A \code{character} vector of the same length as \code{queue_name} with each element in format depending on the called function and the variable \code{language}. Below we assume, that \code{language} variable is default.
#'
#' If \code{get_waiting_time_verbose} is called:
#'
#' "Waiting time for <queue name> is x minutes".
#' @examples office <- get_available_offices()[1]
#' queue <- get_available_queues(office)
#'
#' get_waiting_time_verbose(office, queue)
#'
#' get_open_counters_verbose(office, queue)
#'
#' get_current_ticket_number_verbose(office, queue)
#'
#' get_number_of_people_verbose(office, queue)
#' @seealso \code{\link[kolejkeR]{get_waiting_time}} and others to extract data directly.
#' @export
get_waiting_time_verbose <- function(office_name, queue_name, language="en") {
minutes <- get_waiting_time(office_name, queue_name)
apply(rbind(minutes, queue_name), 2, function(x) {
as.character(
glue::glue(texts[[language]][["get_waiting_time"]],
.envir=list(queue_name=x[2],
minutes=x[1],
ending=female_endings[as.numeric(x[1]) %% 10 + 1]))
)
})
}
#' @inheritParams get_waiting_time
#' @describeIn get_waiting_time Returns amount of opened encounters.
#' @return If \code{get_open_encounters} is called - A \code{numeric} vector with the amounts of opened encounters servicing the queues.
#' @export
get_open_counters <- function(office_name, queue_name) {
data <- get_data(office_name)
if(any(!queue_name %in% data[["nazwaGrupy"]])) stop("Unrecognized queue name!")
counters <- data[data[["nazwaGrupy"]] == queue_name, "liczbaCzynnychStan"]
counters
}
#' @inheritParams get_waiting_time_verbose
#' @describeIn get_waiting_time_verbose Returns amount of opened encounters.
#' @return If \code{get_open_encounters_verbose} is called:
#'
#' "There are x open encounters for <queue name>".
#' @export
get_open_counters_verbose <- function(office_name, queue_name, language = "en") {
counters <- get_open_counters(office_name, queue_name)
apply(rbind(counters, queue_name), 2, function(x) {
as.character(
glue::glue(texts[[language]][["get_open_counters"]],
.envir=list(queue_name=x[2],
counters_literal=counters_to_string()[as.numeric(x[1]) %% 10 + 1],
counters=as.character(x[1])))
)
})
}
#' @inheritParams get_waiting_time
#' @describeIn get_waiting_time Returns the identifier of the current ticket.
#' @return If \code{get_current_ticket_number} is called - the current ticket identifiers in the queues.
#' @export
get_current_ticket_number <- function(office_name, queue_name) {
data <- get_data(office_name)
if(any(!queue_name %in% data[["nazwaGrupy"]])) stop("Unrecognized queue name!")
ticket_number <- data[data[["nazwaGrupy"]] == queue_name, "aktualnyNumer"]
if(ticket_number == ""){ticket_number <- 0}
ticket_number
}
#' @inheritParams get_waiting_time_verbose
#' @describeIn get_waiting_time_verbose Returns current ticket number.
#' @return If \code{get_current_number_verbose} is called:
#'
#' "Current ticket number is x"
#' @export
get_current_ticket_number_verbose <- function(office_name, queue_name, language="en") {
ticket_number <- get_current_ticket_number(office_name, queue_name)
sapply(ticket_number, function(x) {
as.character(
glue::glue(texts[[language]][["get_current_ticket_number"]],
.envir=list(ticket_number=x))
)
})
}
#' @inheritParams get_waiting_time
#' @describeIn get_waiting_time Returns amount of people waiting in the queue.
#' @return If \code{get_number_of_people} is called - A \code{numeric} vector with the amounts of people waiting in the queues.
#' @export
get_number_of_people <- function(office_name, queue_name) {
data <- get_data(office_name)
if(any(!queue_name %in% data[["nazwaGrupy"]])) stop("Unrecognized queue name!")
number_of_people <- data[data[["nazwaGrupy"]] == queue_name, "liczbaKlwKolejce"]
number_of_people
}
#' @inheritParams get_waiting_time_verbose
#' @describeIn get_waiting_time_verbose Returns number of people waiting in specified queue.
#' @return If \code{get_number_of_people_verbose} is called:
#'
#' "There are x people in <queue name>"
#' @export
get_number_of_people_verbose <- function(office_name, queue_name, language = 'en') {
number_of_people <- get_number_of_people(office_name, queue_name)
apply(rbind(number_of_people, queue_name), 2, function(x) {
as.character(
glue::glue(texts[[language]][["get_number_of_people"]],
.envir=list(queue_name=x[2],
number_of_people=x[1]))
)
})
}
#' @title Dump data to csv
#' @description Dumps data from Warsaw queue api from all offices to csv file
#' @param filename \code{character} filename for resulting csv, should include file extension
#'
#' @export
append_api_data_to_csv <- function(filename) {
queue_data <- get_all_data_with_time()
utils::write.table(queue_data, filename, sep = ",", col.names = !file.exists(filename), append = T, row.names = FALSE)
}
|
penorm <-
function (e, m = 0, sd = 1)
{
z = (e - m)/sd
p = pnorm(z)
d = dnorm(z)
u = -d - z * p
asy = u/(2 * u + z)
return(asy)
}
|
/expectreg/R/penorm.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 155 |
r
|
penorm <-
function (e, m = 0, sd = 1)
{
z = (e - m)/sd
p = pnorm(z)
d = dnorm(z)
u = -d - z * p
asy = u/(2 * u + z)
return(asy)
}
|
x=read.csv(file.choose())
x
head(x)
Job=x$JOB
Cons=x$CONSERVATIVE
Soc=x$SOCIAL
Out=x$OUTDOOR
Jcode=ifelse(Job=='Mechanic',1,0)
plot(Out,jitter(Jcode, 0.15),pch=19,
xlab = "Outdoor", ylab = "Job(0 - Customer Service, 1 - Mechanic)")
g=glm(JOB~.,family=binomial, data=x)
g
summary(g)
|
/Logistic.R
|
no_license
|
bhupi18/BHUPENDRA-
|
R
| false | false | 303 |
r
|
x=read.csv(file.choose())
x
head(x)
Job=x$JOB
Cons=x$CONSERVATIVE
Soc=x$SOCIAL
Out=x$OUTDOOR
Jcode=ifelse(Job=='Mechanic',1,0)
plot(Out,jitter(Jcode, 0.15),pch=19,
xlab = "Outdoor", ylab = "Job(0 - Customer Service, 1 - Mechanic)")
g=glm(JOB~.,family=binomial, data=x)
g
summary(g)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import_data.R
\name{import_actigraph_csv_chunked}
\alias{import_actigraph_csv_chunked}
\title{Import large raw multi-channel accelerometer data stored in Actigraph raw csv
format in chunks}
\usage{
import_actigraph_csv_chunked(
filepath,
in_voltage = FALSE,
header = TRUE,
chunk_samples = 180000
)
}
\arguments{
\item{filepath}{string. The filepath of the input data.The first column of
the input data should always include timestamps.}
\item{in_voltage}{set as TRUE only when the input Actigraph csv file is in
analog quantized format and need to be converted into g value}
\item{header}{boolean. If TRUE, the input csv file will have column names in
the first row.}
\item{chunk_samples}{number. The number of samples in each chunk. Default is
180000.}
}
\value{
list. The list contains two items. The first item is a generator
function that each time it is called, it will return a data.frame of the
imported chunk. The second item is a \code{close} function which you can
call at any moment to close the file loading.
}
\description{
\code{import_actigraph_csv_chunked} imports the raw multi-channel accelerometer data
stored in Actigraph raw csv format. It supports files from the following
devices: GT3X, GT3X+, GT3X+BT, GT9X, and GT9X-IMU.
}
\details{
For old device (GT3X) that stores accelerometer values as digital voltage.
The function will convert the values to \eqn{g} unit using the following
equation.
\deqn{x_g = \frac{x_{voltage}r}{(2 ^ r) - \frac{v}{2}}}
Where \eqn{v} is the max voltage corresponding to the max accelerometer value
that can be found in the meta section in the csv file; \eqn{r} is the
resolution level which is the number of bits used to store the voltage
values. \eqn{r} can also be found in the meta section in the csv file.
}
\section{How is it used in MIMS-unit algorithm?}{
This function is a File IO
function that is used to import data from Actigraph devices during
algorithm validation.
}
\examples{
default_ops = options()
options(digits.secs=3)
# Use the actigraph csv file shipped with the package
filepath = system.file('extdata', 'actigraph_timestamped.csv', package='MIMSunit')
# Check original file format
readLines(filepath)[1:15]
# Example 1: Load chunks every 2000 samples
results = import_actigraph_csv_chunked(filepath, chunk_samples=2000)
next_chunk = results[[1]]
close_connection = results[[2]]
# Check data as chunks, you can see chunks are shifted at each iteration.
n = 1
repeat {
df = next_chunk()
if (nrow(df) > 0) {
print(paste('chunk', n))
print(paste("df:", df[1, 1], '-', df[nrow(df),1]))
n = n + 1
}
else {
break
}
}
# Close connection after reading all the data
close_connection()
# Example 2: Close loading early
results = import_actigraph_csv_chunked(filepath, chunk_samples=2000)
next_chunk = results[[1]]
close_connection = results[[2]]
# Check data as chunks, you can see chunk time is shifting forward at each iteration.
n = 1
repeat {
df = next_chunk()
if (nrow(df) > 0) {
print(paste('chunk', n))
print(paste("df:", df[1, 1], '-', df[nrow(df),1]))
n = n + 1
close_connection()
}
else {
break
}
}
# Restore default options
options(default_ops)
}
\seealso{
Other File I/O functions:
\code{\link{export_to_actilife}()},
\code{\link{import_actigraph_count_csv}()},
\code{\link{import_actigraph_csv}()},
\code{\link{import_actigraph_meta}()},
\code{\link{import_activpal3_csv}()},
\code{\link{import_enmo_csv}()},
\code{\link{import_mhealth_csv_chunked}()},
\code{\link{import_mhealth_csv}()}
}
\concept{File I/O functions}
|
/man/import_actigraph_csv_chunked.Rd
|
permissive
|
oslerinhealth/MIMSunit
|
R
| false | true | 3,752 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import_data.R
\name{import_actigraph_csv_chunked}
\alias{import_actigraph_csv_chunked}
\title{Import large raw multi-channel accelerometer data stored in Actigraph raw csv
format in chunks}
\usage{
import_actigraph_csv_chunked(
filepath,
in_voltage = FALSE,
header = TRUE,
chunk_samples = 180000
)
}
\arguments{
\item{filepath}{string. The filepath of the input data.The first column of
the input data should always include timestamps.}
\item{in_voltage}{set as TRUE only when the input Actigraph csv file is in
analog quantized format and need to be converted into g value}
\item{header}{boolean. If TRUE, the input csv file will have column names in
the first row.}
\item{chunk_samples}{number. The number of samples in each chunk. Default is
180000.}
}
\value{
list. The list contains two items. The first item is a generator
function that each time it is called, it will return a data.frame of the
imported chunk. The second item is a \code{close} function which you can
call at any moment to close the file loading.
}
\description{
\code{import_actigraph_csv_chunked} imports the raw multi-channel accelerometer data
stored in Actigraph raw csv format. It supports files from the following
devices: GT3X, GT3X+, GT3X+BT, GT9X, and GT9X-IMU.
}
\details{
For old device (GT3X) that stores accelerometer values as digital voltage.
The function will convert the values to \eqn{g} unit using the following
equation.
\deqn{x_g = \frac{x_{voltage}r}{(2 ^ r) - \frac{v}{2}}}
Where \eqn{v} is the max voltage corresponding to the max accelerometer value
that can be found in the meta section in the csv file; \eqn{r} is the
resolution level which is the number of bits used to store the voltage
values. \eqn{r} can also be found in the meta section in the csv file.
}
\section{How is it used in MIMS-unit algorithm?}{
This function is a File IO
function that is used to import data from Actigraph devices during
algorithm validation.
}
\examples{
default_ops = options()
options(digits.secs=3)
# Use the actigraph csv file shipped with the package
filepath = system.file('extdata', 'actigraph_timestamped.csv', package='MIMSunit')
# Check original file format
readLines(filepath)[1:15]
# Example 1: Load chunks every 2000 samples
results = import_actigraph_csv_chunked(filepath, chunk_samples=2000)
next_chunk = results[[1]]
close_connection = results[[2]]
# Check data as chunks, you can see chunks are shifted at each iteration.
n = 1
repeat {
df = next_chunk()
if (nrow(df) > 0) {
print(paste('chunk', n))
print(paste("df:", df[1, 1], '-', df[nrow(df),1]))
n = n + 1
}
else {
break
}
}
# Close connection after reading all the data
close_connection()
# Example 2: Close loading early
results = import_actigraph_csv_chunked(filepath, chunk_samples=2000)
next_chunk = results[[1]]
close_connection = results[[2]]
# Check data as chunks, you can see chunk time is shifting forward at each iteration.
n = 1
repeat {
df = next_chunk()
if (nrow(df) > 0) {
print(paste('chunk', n))
print(paste("df:", df[1, 1], '-', df[nrow(df),1]))
n = n + 1
close_connection()
}
else {
break
}
}
# Restore default options
options(default_ops)
}
\seealso{
Other File I/O functions:
\code{\link{export_to_actilife}()},
\code{\link{import_actigraph_count_csv}()},
\code{\link{import_actigraph_csv}()},
\code{\link{import_actigraph_meta}()},
\code{\link{import_activpal3_csv}()},
\code{\link{import_enmo_csv}()},
\code{\link{import_mhealth_csv_chunked}()},
\code{\link{import_mhealth_csv}()}
}
\concept{File I/O functions}
|
# Fit the penalized occupancy models of Hutchinson et al (2015).
computeMPLElambda = function(formula, data, knownOcc = numeric(0), starts, method = "BFGS", engine = c("C", "R")){
designMats <- getDesign(data, formula)
X <- designMats$X; V <- designMats$V; y <- designMats$y
removed <- designMats$removed.sites
y <- truncateToBinary(y)
## convert knownOcc to logical so we can correctly to handle NAs.
knownOccLog <- rep(FALSE, numSites(data))
knownOccLog[knownOcc] <- TRUE
if(length(removed)>0)
knownOccLog <- knownOccLog[-removed]
nDP <- ncol(V)
nOP <- ncol(X)
nP <- nDP + nOP
if(!missing(starts) && length(starts) != nP)
stop(paste("The number of starting values should be", nP))
if(missing(starts)) starts <- rep(0, nP)
LRparams = glm.fit(x=X,y=apply(y,1,max),family=binomial(),intercept=F,start=starts[1:nOP])
naiveOcc = mean(LRparams$fitted.values)
occuOutMLE = occu(formula,data,knownOcc = knownOcc, starts = starts,
method = "BFGS", engine = c("C", "R"), se = TRUE)
meanDet = mean((1+exp(-occuOutMLE[2]@estimates%*%t(V)))^-1)
MPLElambda = sqrt(sum(diag(occuOutMLE[2]@covMat)))*(1-(1-meanDet)^(dim(y)[2]))*(1-naiveOcc) # what if there are different numbers of visits to different sites?
return(MPLElambda)
}
occuPEN_CV <- function(formula, data, knownOcc = numeric(0), starts,
method = "BFGS", engine = c("C", "R"),
lambdaVec = c(0,2^seq(-4,4)),
pen.type = c("Bayes","Ridge"),
k = 5,
foldAssignments = NA,
...)
{
if(!is(data, "unmarkedFrameOccu"))
stop("Data is not an unmarkedFrameOccu object.")
pen.type = pen.type[1]
if (pen.type=="MPLE") stop("MPLE does not require cross-validation.")
if (!(pen.type=="Bayes" | pen.type=="Ridge"))
stop("pen.type not recognized. Choose Bayes or Ridge.")
if (length(lambdaVec)==1) stop("Must provide more than one lambda for cross-validation.")
engine <- match.arg(engine, c("C", "R"))
designMats <- getDesign(data, formula)
X <- designMats$X; V <- designMats$V; y <- designMats$y
y <- truncateToBinary(y)
J <- ncol(y)
M <- nrow(y)
if (!(length(foldAssignments)==1 & is.na(foldAssignments)[1])) { # user-supplied foldAssignments
if (!(k==length(unique(foldAssignments)))) stop("Value of k does not match number of folds indicated in foldAssignments.")
} else { # create foldAssignments
# attempt to include sites with and without observations in each fold
foldAssignments = c(1:M)
idxsWithObs = which(rowSums(y)>0)
idxsWoObs = which(rowSums(y)==0)
if (length(idxsWithObs)>0 & length(idxsWoObs)>0) {
foldAssignments[idxsWithObs] = sample(rep(1:k,ceiling(length(idxsWithObs)/k))[1:length(idxsWithObs)])
foldAssignments[idxsWoObs] = sample(rep(1:k,ceiling(length(idxsWoObs)/k))[1:length(idxsWoObs)])
} else if (k<=M) {
foldAssignments = sample(rep(1:k,ceiling(M/k)))[1:M]
} else {
stop("k>M. More folds than sites creates folds. Specify a smaller k.")
}
}
#print(foldAssignments)
foldNames = unique(foldAssignments)
if(identical(engine, "C")) {
nll <- function(params) {
beta.psi <- params[1:nOP]
beta.p <- params[(nOP+1):nP]
.Call("nll_occu",
yvec, X, V, beta.psi, beta.p, nd, knownOccLog, navec,
X.offset, V.offset, "logit",
PACKAGE = "unmarked")
}
} else {
nll <- function(params) { # penalize this function
psi <- plogis(X %*% params[1 : nOP] + X.offset)
psi[knownOccLog] <- 1
pvec <- plogis(V %*% params[(nOP + 1) : nP] + V.offset)
cp <- (pvec^yvec) * ((1 - pvec)^(1 - yvec))
cp[navec] <- 1 # so that NA's don't modify likelihood
cpmat <- matrix(cp, M, J, byrow = TRUE) #
loglik <- log(rowProds(cpmat) * psi + nd * (1 - psi))
-sum(loglik)
}
} # end if (engine)
lambdaScores = lambdaVec*0 # score by held-out likelihood
for (f in 1:k) {
fold = foldNames[f]
occuTrain = data[which(foldAssignments!=fold),] # train on NOT this fold
occuTest = data[which(foldAssignments==fold),] # test on this fold
designMats <- getDesign(occuTest, formula)
X <- designMats$X; V <- designMats$V; y <- designMats$y
removed <- designMats$removed.sites
X.offset <- designMats$X.offset; V.offset <- designMats$V.offset
if(is.null(X.offset)) {
X.offset <- rep(0, nrow(X))
}
if(is.null(V.offset)) {
V.offset <- rep(0, nrow(V))
}
y <- truncateToBinary(y)
J <- ncol(y)
M <- nrow(y)
## convert knownOcc to logical so we can correctly to handle NAs.
knownOccLog <- rep(FALSE, numSites(data))
knownOccLog[knownOcc] <- TRUE
if(length(removed)>0)
knownOccLog <- knownOccLog[-removed]
occParms <- colnames(X)
detParms <- colnames(V)
nDP <- ncol(V)
nOP <- ncol(X)
nP <- nDP + nOP
if(!missing(starts) && length(starts) != nP)
stop(paste("The number of starting values should be", nP))
if(missing(starts)) starts <- rep(0, nP)
yvec <- as.numeric(t(y))
navec <- is.na(yvec)
nd <- ifelse(rowSums(y,na.rm=TRUE) == 0, 1, 0) # no det at site i
# For each lambda, get parameters on the training set, and use them
# to compute the likelihood on the held-out test fold.
for (la in 1:length(lambdaVec)) {
occuOut = occuPEN(formula, occuTrain, starts, lambda=lambdaVec[la],pen.type=pen.type)
ests = c(as.numeric(occuOut[1]@estimates),as.numeric(occuOut[2]@estimates))
lambdaScores[la] = lambdaScores[la] + nll(ests)
} # la
} # f
bestLambda = lambdaVec[which.min(lambdaScores)]
#print(lambdaScores)
occuOut = occuPEN(formula, data, starts=starts, lambda=bestLambda, pen.type=pen.type)
umfit <- new("unmarkedFitOccuPEN_CV", fitType = "occu", call = match.call(),
formula = formula, data = data,
sitesRemoved = designMats$removed.sites,
estimates = occuOut@estimates, AIC = occuOut@AIC,
opt = occuOut@opt,
negLogLike = occuOut@negLogLike,
nllFun = occuOut@nllFun, knownOcc = knownOccLog,
pen.type = pen.type, lambdaVec = lambdaVec,
k = k, foldAssignments = foldAssignments,
lambdaScores = lambdaScores, chosenLambda = bestLambda)
return(umfit)
} # fn: occuPEN_CV
occuPEN <- function(formula, data, knownOcc = numeric(0), starts,
method = "BFGS", engine = c("C", "R"),
# se = TRUE,
lambda = 0,
pen.type = c("Bayes","Ridge","MPLE"),
...)
{
if(!is(data, "unmarkedFrameOccu"))
stop("Data is not an unmarkedFrameOccu object.")
pen.type = pen.type[1]
if (!(pen.type=="Bayes" | pen.type=="Ridge" | pen.type=="MPLE"))
stop("pen.type not recognized. Choose Bayes, Ridge, or MPLE.")
engine <- match.arg(engine, c("C", "R"))
designMats <- getDesign(data, formula)
X <- designMats$X; V <- designMats$V; y <- designMats$y
if (ncol(X)==1 & pen.type=="MPLE") stop("MPLE requires occupancy covariates.")
if (ncol(X)==1 & ncol(V)==1 & pen.type=="Ridge") stop("Ridge requires covariates.")
removed <- designMats$removed.sites
X.offset <- designMats$X.offset; V.offset <- designMats$V.offset
if(is.null(X.offset)) {
X.offset <- rep(0, nrow(X))
}
if(is.null(V.offset)) {
V.offset <- rep(0, nrow(V))
}
y <- truncateToBinary(y)
J <- ncol(y)
M <- nrow(y)
## convert knownOcc to logical so we can correctly to handle NAs.
knownOccLog <- rep(FALSE, numSites(data))
knownOccLog[knownOcc] <- TRUE
if(length(removed)>0)
knownOccLog <- knownOccLog[-removed]
occParms <- colnames(X)
detParms <- colnames(V)
nDP <- ncol(V)
nOP <- ncol(X)
nP <- nDP + nOP
if(!missing(starts) && length(starts) != nP)
stop(paste("The number of starting values should be", nP))
if(missing(starts)) starts <- rep(0, nP)
yvec <- as.numeric(t(y))
navec <- is.na(yvec)
nd <- ifelse(rowSums(y,na.rm=TRUE) == 0, 1, 0) # no det at site i
## need to add offsets !!!!!!!!!!!!!!
## and fix bug causing crash when NAs are in V
## compute logistic regression MPLE targets and lambda:
if (pen.type=="MPLE") {
LRparams = glm.fit(x=X,y=apply(y,1,max),family=binomial(),intercept=F,start=starts[1:nOP])
MPLElambda = computeMPLElambda(formula, data, knownOcc = numeric(0), starts, method = "BFGS", engine = c("C", "R"))
if (MPLElambda != lambda) warning("Supplied lambda does not match the computed value. Proceeding with the supplied lambda.")
}
if(identical(engine, "C")) {
nll <- function(params) {
beta.psi <- params[1:nOP]
beta.p <- params[(nOP+1):nP]
if (pen.type=="Bayes") {
penalty = sum(params^2)*lambda*0.5
} else if (pen.type=="Ridge") {
penalty = 0
if (nOP>1) { penalty = penalty + sum((params[2:nOP])^2) }
if (nDP>1) { penalty = penalty + sum((params[(nOP+2):nP])^2) }
penalty = penalty*lambda*0.5
} else if (pen.type=="MPLE") {
penalty = abs(params[1:nOP]-LRparams$coefficients)
penalty = sum(penalty)*lambda
} else {
stop("pen.type not found")
}
.Call("nll_occuPEN",
yvec, X, V, beta.psi, beta.p, nd, knownOccLog, navec,
X.offset, V.offset, penalty,
PACKAGE = "unmarked")
}
} else {
nll <- function(params) { # penalize this function
psi <- plogis(X %*% params[1 : nOP] + X.offset)
psi[knownOccLog] <- 1
pvec <- plogis(V %*% params[(nOP + 1) : nP] + V.offset)
cp <- (pvec^yvec) * ((1 - pvec)^(1 - yvec))
cp[navec] <- 1 # so that NA's don't modify likelihood
cpmat <- matrix(cp, M, J, byrow = TRUE) #
loglik <- log(rowProds(cpmat) * psi + nd * (1 - psi))
#-sum(loglik)
if (pen.type=="Bayes") {
penalty = sum(params^2)*lambda*0.5
} else if (pen.type=="Ridge") {
penalty = 0
if (nOP>1) { penalty = penalty + sum((params[2:nOP])^2) }
if (nDP>1) { penalty = penalty + sum((params[(nOP+2):nP])^2) }
penalty = penalty*lambda*0.5
} else if (pen.type=="MPLE") {
penalty = abs(params[1:nOP]-LRparams$coefficients)
penalty = sum(penalty)*lambda
} else {
stop("pen.type not found")
}
penLL = sum(loglik) - penalty
return(-penLL)
}
} # end if (engine)
fm <- optim(starts, nll, method = method, hessian = FALSE, ...)
opt <- fm
covMat <- matrix(NA, nP, nP)
ests <- fm$par
fmAIC <- 2 * fm$value + 2 * nP #+ 2*nP*(nP + 1)/(M - nP - 1)
names(ests) <- c(occParms, detParms)
state <- unmarkedEstimate(name = "Occupancy", short.name = "psi",
estimates = ests[1:nOP],
covMat = as.matrix(covMat[1:nOP,1:nOP]),
invlink = "logistic",
invlinkGrad = "logistic.grad")
det <- unmarkedEstimate(name = "Detection", short.name = "p",
estimates = ests[(nOP + 1) : nP],
covMat = as.matrix(covMat[(nOP + 1) : nP,
(nOP + 1) : nP]),
invlink = "logistic",
invlinkGrad = "logistic.grad")
estimateList <- unmarkedEstimateList(list(state=state, det=det))
umfit <- new("unmarkedFitOccuPEN", fitType = "occu", call = match.call(),
formula = formula, data = data,
sitesRemoved = designMats$removed.sites,
estimates = estimateList, AIC = fmAIC, opt = opt,
negLogLike = fm$value,
nllFun = nll, knownOcc = knownOccLog,
pen.type = pen.type, lambda = c(lambda))
return(umfit)
}
|
/fuzzedpackages/unmarked/R/occuPEN.R
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 11,909 |
r
|
# Fit the penalized occupancy models of Hutchinson et al (2015).
computeMPLElambda = function(formula, data, knownOcc = numeric(0), starts, method = "BFGS", engine = c("C", "R")){
designMats <- getDesign(data, formula)
X <- designMats$X; V <- designMats$V; y <- designMats$y
removed <- designMats$removed.sites
y <- truncateToBinary(y)
## convert knownOcc to logical so we can correctly to handle NAs.
knownOccLog <- rep(FALSE, numSites(data))
knownOccLog[knownOcc] <- TRUE
if(length(removed)>0)
knownOccLog <- knownOccLog[-removed]
nDP <- ncol(V)
nOP <- ncol(X)
nP <- nDP + nOP
if(!missing(starts) && length(starts) != nP)
stop(paste("The number of starting values should be", nP))
if(missing(starts)) starts <- rep(0, nP)
LRparams = glm.fit(x=X,y=apply(y,1,max),family=binomial(),intercept=F,start=starts[1:nOP])
naiveOcc = mean(LRparams$fitted.values)
occuOutMLE = occu(formula,data,knownOcc = knownOcc, starts = starts,
method = "BFGS", engine = c("C", "R"), se = TRUE)
meanDet = mean((1+exp(-occuOutMLE[2]@estimates%*%t(V)))^-1)
MPLElambda = sqrt(sum(diag(occuOutMLE[2]@covMat)))*(1-(1-meanDet)^(dim(y)[2]))*(1-naiveOcc) # what if there are different numbers of visits to different sites?
return(MPLElambda)
}
occuPEN_CV <- function(formula, data, knownOcc = numeric(0), starts,
method = "BFGS", engine = c("C", "R"),
lambdaVec = c(0,2^seq(-4,4)),
pen.type = c("Bayes","Ridge"),
k = 5,
foldAssignments = NA,
...)
{
if(!is(data, "unmarkedFrameOccu"))
stop("Data is not an unmarkedFrameOccu object.")
pen.type = pen.type[1]
if (pen.type=="MPLE") stop("MPLE does not require cross-validation.")
if (!(pen.type=="Bayes" | pen.type=="Ridge"))
stop("pen.type not recognized. Choose Bayes or Ridge.")
if (length(lambdaVec)==1) stop("Must provide more than one lambda for cross-validation.")
engine <- match.arg(engine, c("C", "R"))
designMats <- getDesign(data, formula)
X <- designMats$X; V <- designMats$V; y <- designMats$y
y <- truncateToBinary(y)
J <- ncol(y)
M <- nrow(y)
if (!(length(foldAssignments)==1 & is.na(foldAssignments)[1])) { # user-supplied foldAssignments
if (!(k==length(unique(foldAssignments)))) stop("Value of k does not match number of folds indicated in foldAssignments.")
} else { # create foldAssignments
# attempt to include sites with and without observations in each fold
foldAssignments = c(1:M)
idxsWithObs = which(rowSums(y)>0)
idxsWoObs = which(rowSums(y)==0)
if (length(idxsWithObs)>0 & length(idxsWoObs)>0) {
foldAssignments[idxsWithObs] = sample(rep(1:k,ceiling(length(idxsWithObs)/k))[1:length(idxsWithObs)])
foldAssignments[idxsWoObs] = sample(rep(1:k,ceiling(length(idxsWoObs)/k))[1:length(idxsWoObs)])
} else if (k<=M) {
foldAssignments = sample(rep(1:k,ceiling(M/k)))[1:M]
} else {
stop("k>M. More folds than sites creates folds. Specify a smaller k.")
}
}
#print(foldAssignments)
foldNames = unique(foldAssignments)
if(identical(engine, "C")) {
nll <- function(params) {
beta.psi <- params[1:nOP]
beta.p <- params[(nOP+1):nP]
.Call("nll_occu",
yvec, X, V, beta.psi, beta.p, nd, knownOccLog, navec,
X.offset, V.offset, "logit",
PACKAGE = "unmarked")
}
} else {
nll <- function(params) { # penalize this function
psi <- plogis(X %*% params[1 : nOP] + X.offset)
psi[knownOccLog] <- 1
pvec <- plogis(V %*% params[(nOP + 1) : nP] + V.offset)
cp <- (pvec^yvec) * ((1 - pvec)^(1 - yvec))
cp[navec] <- 1 # so that NA's don't modify likelihood
cpmat <- matrix(cp, M, J, byrow = TRUE) #
loglik <- log(rowProds(cpmat) * psi + nd * (1 - psi))
-sum(loglik)
}
} # end if (engine)
lambdaScores = lambdaVec*0 # score by held-out likelihood
for (f in 1:k) {
fold = foldNames[f]
occuTrain = data[which(foldAssignments!=fold),] # train on NOT this fold
occuTest = data[which(foldAssignments==fold),] # test on this fold
designMats <- getDesign(occuTest, formula)
X <- designMats$X; V <- designMats$V; y <- designMats$y
removed <- designMats$removed.sites
X.offset <- designMats$X.offset; V.offset <- designMats$V.offset
if(is.null(X.offset)) {
X.offset <- rep(0, nrow(X))
}
if(is.null(V.offset)) {
V.offset <- rep(0, nrow(V))
}
y <- truncateToBinary(y)
J <- ncol(y)
M <- nrow(y)
## convert knownOcc to logical so we can correctly to handle NAs.
knownOccLog <- rep(FALSE, numSites(data))
knownOccLog[knownOcc] <- TRUE
if(length(removed)>0)
knownOccLog <- knownOccLog[-removed]
occParms <- colnames(X)
detParms <- colnames(V)
nDP <- ncol(V)
nOP <- ncol(X)
nP <- nDP + nOP
if(!missing(starts) && length(starts) != nP)
stop(paste("The number of starting values should be", nP))
if(missing(starts)) starts <- rep(0, nP)
yvec <- as.numeric(t(y))
navec <- is.na(yvec)
nd <- ifelse(rowSums(y,na.rm=TRUE) == 0, 1, 0) # no det at site i
# For each lambda, get parameters on the training set, and use them
# to compute the likelihood on the held-out test fold.
for (la in 1:length(lambdaVec)) {
occuOut = occuPEN(formula, occuTrain, starts, lambda=lambdaVec[la],pen.type=pen.type)
ests = c(as.numeric(occuOut[1]@estimates),as.numeric(occuOut[2]@estimates))
lambdaScores[la] = lambdaScores[la] + nll(ests)
} # la
} # f
bestLambda = lambdaVec[which.min(lambdaScores)]
#print(lambdaScores)
occuOut = occuPEN(formula, data, starts=starts, lambda=bestLambda, pen.type=pen.type)
umfit <- new("unmarkedFitOccuPEN_CV", fitType = "occu", call = match.call(),
formula = formula, data = data,
sitesRemoved = designMats$removed.sites,
estimates = occuOut@estimates, AIC = occuOut@AIC,
opt = occuOut@opt,
negLogLike = occuOut@negLogLike,
nllFun = occuOut@nllFun, knownOcc = knownOccLog,
pen.type = pen.type, lambdaVec = lambdaVec,
k = k, foldAssignments = foldAssignments,
lambdaScores = lambdaScores, chosenLambda = bestLambda)
return(umfit)
} # fn: occuPEN_CV
occuPEN <- function(formula, data, knownOcc = numeric(0), starts,
method = "BFGS", engine = c("C", "R"),
# se = TRUE,
lambda = 0,
pen.type = c("Bayes","Ridge","MPLE"),
...)
{
if(!is(data, "unmarkedFrameOccu"))
stop("Data is not an unmarkedFrameOccu object.")
pen.type = pen.type[1]
if (!(pen.type=="Bayes" | pen.type=="Ridge" | pen.type=="MPLE"))
stop("pen.type not recognized. Choose Bayes, Ridge, or MPLE.")
engine <- match.arg(engine, c("C", "R"))
designMats <- getDesign(data, formula)
X <- designMats$X; V <- designMats$V; y <- designMats$y
if (ncol(X)==1 & pen.type=="MPLE") stop("MPLE requires occupancy covariates.")
if (ncol(X)==1 & ncol(V)==1 & pen.type=="Ridge") stop("Ridge requires covariates.")
removed <- designMats$removed.sites
X.offset <- designMats$X.offset; V.offset <- designMats$V.offset
if(is.null(X.offset)) {
X.offset <- rep(0, nrow(X))
}
if(is.null(V.offset)) {
V.offset <- rep(0, nrow(V))
}
y <- truncateToBinary(y)
J <- ncol(y)
M <- nrow(y)
## convert knownOcc to logical so we can correctly to handle NAs.
knownOccLog <- rep(FALSE, numSites(data))
knownOccLog[knownOcc] <- TRUE
if(length(removed)>0)
knownOccLog <- knownOccLog[-removed]
occParms <- colnames(X)
detParms <- colnames(V)
nDP <- ncol(V)
nOP <- ncol(X)
nP <- nDP + nOP
if(!missing(starts) && length(starts) != nP)
stop(paste("The number of starting values should be", nP))
if(missing(starts)) starts <- rep(0, nP)
yvec <- as.numeric(t(y))
navec <- is.na(yvec)
nd <- ifelse(rowSums(y,na.rm=TRUE) == 0, 1, 0) # no det at site i
## need to add offsets !!!!!!!!!!!!!!
## and fix bug causing crash when NAs are in V
## compute logistic regression MPLE targets and lambda:
if (pen.type=="MPLE") {
LRparams = glm.fit(x=X,y=apply(y,1,max),family=binomial(),intercept=F,start=starts[1:nOP])
MPLElambda = computeMPLElambda(formula, data, knownOcc = numeric(0), starts, method = "BFGS", engine = c("C", "R"))
if (MPLElambda != lambda) warning("Supplied lambda does not match the computed value. Proceeding with the supplied lambda.")
}
if(identical(engine, "C")) {
nll <- function(params) {
beta.psi <- params[1:nOP]
beta.p <- params[(nOP+1):nP]
if (pen.type=="Bayes") {
penalty = sum(params^2)*lambda*0.5
} else if (pen.type=="Ridge") {
penalty = 0
if (nOP>1) { penalty = penalty + sum((params[2:nOP])^2) }
if (nDP>1) { penalty = penalty + sum((params[(nOP+2):nP])^2) }
penalty = penalty*lambda*0.5
} else if (pen.type=="MPLE") {
penalty = abs(params[1:nOP]-LRparams$coefficients)
penalty = sum(penalty)*lambda
} else {
stop("pen.type not found")
}
.Call("nll_occuPEN",
yvec, X, V, beta.psi, beta.p, nd, knownOccLog, navec,
X.offset, V.offset, penalty,
PACKAGE = "unmarked")
}
} else {
nll <- function(params) { # penalize this function
psi <- plogis(X %*% params[1 : nOP] + X.offset)
psi[knownOccLog] <- 1
pvec <- plogis(V %*% params[(nOP + 1) : nP] + V.offset)
cp <- (pvec^yvec) * ((1 - pvec)^(1 - yvec))
cp[navec] <- 1 # so that NA's don't modify likelihood
cpmat <- matrix(cp, M, J, byrow = TRUE) #
loglik <- log(rowProds(cpmat) * psi + nd * (1 - psi))
#-sum(loglik)
if (pen.type=="Bayes") {
penalty = sum(params^2)*lambda*0.5
} else if (pen.type=="Ridge") {
penalty = 0
if (nOP>1) { penalty = penalty + sum((params[2:nOP])^2) }
if (nDP>1) { penalty = penalty + sum((params[(nOP+2):nP])^2) }
penalty = penalty*lambda*0.5
} else if (pen.type=="MPLE") {
penalty = abs(params[1:nOP]-LRparams$coefficients)
penalty = sum(penalty)*lambda
} else {
stop("pen.type not found")
}
penLL = sum(loglik) - penalty
return(-penLL)
}
} # end if (engine)
fm <- optim(starts, nll, method = method, hessian = FALSE, ...)
opt <- fm
covMat <- matrix(NA, nP, nP)
ests <- fm$par
fmAIC <- 2 * fm$value + 2 * nP #+ 2*nP*(nP + 1)/(M - nP - 1)
names(ests) <- c(occParms, detParms)
state <- unmarkedEstimate(name = "Occupancy", short.name = "psi",
estimates = ests[1:nOP],
covMat = as.matrix(covMat[1:nOP,1:nOP]),
invlink = "logistic",
invlinkGrad = "logistic.grad")
det <- unmarkedEstimate(name = "Detection", short.name = "p",
estimates = ests[(nOP + 1) : nP],
covMat = as.matrix(covMat[(nOP + 1) : nP,
(nOP + 1) : nP]),
invlink = "logistic",
invlinkGrad = "logistic.grad")
estimateList <- unmarkedEstimateList(list(state=state, det=det))
umfit <- new("unmarkedFitOccuPEN", fitType = "occu", call = match.call(),
formula = formula, data = data,
sitesRemoved = designMats$removed.sites,
estimates = estimateList, AIC = fmAIC, opt = opt,
negLogLike = fm$value,
nllFun = nll, knownOcc = knownOccLog,
pen.type = pen.type, lambda = c(lambda))
return(umfit)
}
|
librosa <- NULL # nocov start
np <- NULL
.onLoad <- function(libname, pkgname) {
reticulate::use_condaenv("r-reticulate")
np <<- reticulate::import("numpy", delay_load = FALSE)
librosa <<- reticulate::import("librosa", delay_load = FALSE)
} # nocov end
|
/R/zzz.R
|
permissive
|
UBC-MDS/AudioFilters_R
|
R
| false | false | 261 |
r
|
librosa <- NULL # nocov start
np <- NULL
.onLoad <- function(libname, pkgname) {
reticulate::use_condaenv("r-reticulate")
np <<- reticulate::import("numpy", delay_load = FALSE)
librosa <<- reticulate::import("librosa", delay_load = FALSE)
} # nocov end
|
# Question 5
#######################################################################################################################
#
# You can compute the ratio of the number of boys divided by the number of girls born in 1940 manually
# by typing 1211684/1148715.
#
# You can get that same ratio for all years by typing
#
# present$boys/present$girls
# Similarly, you can calculate the proportion of male newborns in 1940 by typing
#
# 1211684/(1211684 + 1148715)
# To get that proportion for all years use present$boys/(present$boys + present$girls).
#
# Note that with R as with your calculator, you need to be conscious of the order of operations.
# Here, we want to divide the number of boys by the total number of newborns, so we have to use parentheses.
# Without them, R will first do the division, then the addition, giving you something that is not a proportion.
#
# Make a plot of the proportion of boys over time, and based on the plot determine if the
# following statement is true or false: The proportion of boys born in the US has decreased over time.
#
#######################################################################################################################
1 TRUE
2 FALSE
plot(present$year, present$boys/(present$boys + present$girls))
Answer - 1 TRUE
|
/dataCamp/openCourses/dataAnalysisAndStatisticalInference/1_introductionToR/12_question5.R
|
permissive
|
odonnmi/learnNPractice
|
R
| false | false | 1,297 |
r
|
# Question 5
#######################################################################################################################
#
# You can compute the ratio of the number of boys divided by the number of girls born in 1940 manually
# by typing 1211684/1148715.
#
# You can get that same ratio for all years by typing
#
# present$boys/present$girls
# Similarly, you can calculate the proportion of male newborns in 1940 by typing
#
# 1211684/(1211684 + 1148715)
# To get that proportion for all years use present$boys/(present$boys + present$girls).
#
# Note that with R as with your calculator, you need to be conscious of the order of operations.
# Here, we want to divide the number of boys by the total number of newborns, so we have to use parentheses.
# Without them, R will first do the division, then the addition, giving you something that is not a proportion.
#
# Make a plot of the proportion of boys over time, and based on the plot determine if the
# following statement is true or false: The proportion of boys born in the US has decreased over time.
#
#######################################################################################################################
1 TRUE
2 FALSE
plot(present$year, present$boys/(present$boys + present$girls))
Answer - 1 TRUE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grouped_box.R
\name{Grouped_BoxVilion}
\alias{Grouped_BoxVilion}
\title{Grouped_BoxVilion}
\usage{
Grouped_BoxVilion(
stat_type = "nonparametric",
data_name = "data.csv",
info_name = "info.csv",
plot.type = "box",
zscore = TRUE,
palette = "nrc_npg",
plot_nrow = 2,
pairwise.display = "significant",
p.adjust.method = "fdr",
ylab = "Relative Abundance (log2)",
levels = c("M1", "M2", "M3")
)
}
\arguments{
\item{stat_type}{parametric}
\item{data_name}{data.csv}
\item{info_name}{info_name}
\item{plot.type}{box}
\item{zscore}{default TRUE}
\item{palette}{palette}
\item{plot_nrow}{plot_nrow}
\item{pairwise.display}{significant}
\item{p.adjust.method}{fdr}
\item{ylab}{name}
\item{levels}{order of group}
}
\value{
All the results can be got form other functions and instruction.
}
\description{
a function to draw BoxPlot
}
\author{
Shine Shen
\email{qq951633542@163.com}
}
|
/man/Grouped_BoxVilion.Rd
|
no_license
|
shineshen007/shine
|
R
| false | true | 984 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grouped_box.R
\name{Grouped_BoxVilion}
\alias{Grouped_BoxVilion}
\title{Grouped_BoxVilion}
\usage{
Grouped_BoxVilion(
stat_type = "nonparametric",
data_name = "data.csv",
info_name = "info.csv",
plot.type = "box",
zscore = TRUE,
palette = "nrc_npg",
plot_nrow = 2,
pairwise.display = "significant",
p.adjust.method = "fdr",
ylab = "Relative Abundance (log2)",
levels = c("M1", "M2", "M3")
)
}
\arguments{
\item{stat_type}{parametric}
\item{data_name}{data.csv}
\item{info_name}{info_name}
\item{plot.type}{box}
\item{zscore}{default TRUE}
\item{palette}{palette}
\item{plot_nrow}{plot_nrow}
\item{pairwise.display}{significant}
\item{p.adjust.method}{fdr}
\item{ylab}{name}
\item{levels}{order of group}
}
\value{
All the results can be got form other functions and instruction.
}
\description{
a function to draw BoxPlot
}
\author{
Shine Shen
\email{qq951633542@163.com}
}
|
##########################################################
## test-kp.r
##
## unit tests for functions that compute known population
## degree estimates
##
## TODO -- eventually, develop a catalog of simple networks
## that we can hand-compute estimator values for,
## and that can be part of these tests
## (see also the tests for test_estimators.r)
## TODO -- I don't understand why @import plyr,
## which is in the networksampling-help.R file,
## doesn't take care of this...
library(plyr)
## these tests use the toy networks that come
## packaged with the networksampling package
## TODO -- I don't understand why the package
## data aren't available without having to
## specify package=...
## (this could be a devtools thing?)
data(toynetworks,package="networkreporting")
data(toynrnetworks,package="networkreporting")
####################################
## known population estimator
context("estimators - known population")
## TODO
## NOTE that the toy networks used in the estimator tests
## would also work here...
####################################
## total degree estimator
context("estimators - known population total degree")
## TODO
|
/inst/tests/test_kp.r
|
no_license
|
msalganik/networkreporting
|
R
| false | false | 1,186 |
r
|
##########################################################
## test-kp.r
##
## unit tests for functions that compute known population
## degree estimates
##
## TODO -- eventually, develop a catalog of simple networks
## that we can hand-compute estimator values for,
## and that can be part of these tests
## (see also the tests for test_estimators.r)
## TODO -- I don't understand why @import plyr,
## which is in the networksampling-help.R file,
## doesn't take care of this...
library(plyr)
## these tests use the toy networks that come
## packaged with the networksampling package
## TODO -- I don't understand why the package
## data aren't available without having to
## specify package=...
## (this could be a devtools thing?)
data(toynetworks,package="networkreporting")
data(toynrnetworks,package="networkreporting")
####################################
## known population estimator
context("estimators - known population")
## TODO
## NOTE that the toy networks used in the estimator tests
## would also work here...
####################################
## total degree estimator
context("estimators - known population total degree")
## TODO
|
library(pifpaf)
### Name: paf.linear
### Title: Population Attributable Fraction with Linear Relative Risk
### Function
### Aliases: paf.linear
### ** Examples
#Example 1: Univariate relative risk
#----------------------------------------
set.seed(18427)
X <- data.frame(Exposure = rnorm(100,3,.5))
thetahat <- c(1, 0.12) #Linear risk given by 1 + 0.12*X
paf.linear(X, thetahat)
#This is the same as doing:
paf(X, thetahat, rr = function(X, theta){X*theta[2] + theta[1]})
#Same example with kernel method
paf.linear(X, thetahat, method = "kernel")
#Same example with approximate method
Xmean <- data.frame(mean(X[,"Exposure"]))
Xvar <- var(X)
paf.linear(Xmean, thetahat, method = "approximate", Xvar = Xvar)
#Example 2: Multivariate relative risk
#----------------------------------------
X <- data.frame(Exposure = rnorm(100,2,.7), Covariate = rnorm(100,4,1))
theta <- c(1, 0.3,0.1)
paf.linear(X, theta) #Linear risk given by 1 + 0.3*X1 + 0.1*X2
#Example 3: Polynomial relative risk
#----------------------------------------
X <- runif(100)
X2 <- X^2
X3 <- X^3
matX <- data.frame(X,X2,X3)
theta <- c(1, 0.3,0.1, 0.4)
paf.linear(matX,theta) #Polynomial risk: 1 + 0.3*X + 0.1*X^2 + 0.4*X^3
|
/data/genthat_extracted_code/pifpaf/examples/paf.linear.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,227 |
r
|
library(pifpaf)
### Name: paf.linear
### Title: Population Attributable Fraction with Linear Relative Risk
### Function
### Aliases: paf.linear
### ** Examples
#Example 1: Univariate relative risk
#----------------------------------------
set.seed(18427)
X <- data.frame(Exposure = rnorm(100,3,.5))
thetahat <- c(1, 0.12) #Linear risk given by 1 + 0.12*X
paf.linear(X, thetahat)
#This is the same as doing:
paf(X, thetahat, rr = function(X, theta){X*theta[2] + theta[1]})
#Same example with kernel method
paf.linear(X, thetahat, method = "kernel")
#Same example with approximate method
Xmean <- data.frame(mean(X[,"Exposure"]))
Xvar <- var(X)
paf.linear(Xmean, thetahat, method = "approximate", Xvar = Xvar)
#Example 2: Multivariate relative risk
#----------------------------------------
X <- data.frame(Exposure = rnorm(100,2,.7), Covariate = rnorm(100,4,1))
theta <- c(1, 0.3,0.1)
paf.linear(X, theta) #Linear risk given by 1 + 0.3*X1 + 0.1*X2
#Example 3: Polynomial relative risk
#----------------------------------------
X <- runif(100)
X2 <- X^2
X3 <- X^3
matX <- data.frame(X,X2,X3)
theta <- c(1, 0.3,0.1, 0.4)
paf.linear(matX,theta) #Polynomial risk: 1 + 0.3*X + 0.1*X^2 + 0.4*X^3
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric_Fbeta.R
\name{metric_F1}
\alias{metric_F1}
\title{metric_F1}
\usage{
metric_F1(
actual,
predicted,
weight = rep(1, length(actual)),
na.rm = FALSE,
threshold = 0.5
)
}
\arguments{
\item{actual}{Array[Numeric] - Values we are aiming to predict.}
\item{predicted}{Array[Numeric] - Values that we have predicted.}
\item{weight}{Optional: Array[Numeric] - Weighting of predictions. If NULL even weighting is used}
\item{na.rm}{Optional: boolean - If \code{FALSE} function will return NA is any value in NA}
\item{threshold}{Optional: Numeric between 0 and 1. If prediction proablity is below \code{threshold} the predicted value is 0.}
}
\value{
precision of classification TP / (TP + FN)
}
\description{
Returns the F1 [2 * (precision * recall) / (precision + recall)] of a classification using the confusion matrix
Note: Predictions should be annualized (independent of exposure)
Note: Perfect F1 is 1, poor model is 0
}
\section{Inputs}{
}
\examples{
metric_F1(actual=c(0,1,0,0), predicted=c(0.1,0.9,0.4,0.6))
metric_Fbeta(actual=c(0,1,0,0), predicted=c(0.1,0.9,0.4,0.6), threshold=0.7)
## metric_F1 is a specific value of metric_Fbeta
metric_Fbeta(actual=c(0,1,0,0), predicted=c(0.1,0.9,0.4,0.6), beta=1)
}
\seealso{
\code{\link{metric_precision}}, \code{\link{metric_recall}} and \code{\link{metric_Fbeta}}
}
|
/man/metric_F1.Rd
|
no_license
|
gloverd2/codeBase
|
R
| false | true | 1,415 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric_Fbeta.R
\name{metric_F1}
\alias{metric_F1}
\title{metric_F1}
\usage{
metric_F1(
actual,
predicted,
weight = rep(1, length(actual)),
na.rm = FALSE,
threshold = 0.5
)
}
\arguments{
\item{actual}{Array[Numeric] - Values we are aiming to predict.}
\item{predicted}{Array[Numeric] - Values that we have predicted.}
\item{weight}{Optional: Array[Numeric] - Weighting of predictions. If NULL even weighting is used}
\item{na.rm}{Optional: boolean - If \code{FALSE} function will return NA is any value in NA}
\item{threshold}{Optional: Numeric between 0 and 1. If prediction proablity is below \code{threshold} the predicted value is 0.}
}
\value{
precision of classification TP / (TP + FN)
}
\description{
Returns the F1 [2 * (precision * recall) / (precision + recall)] of a classification using the confusion matrix
Note: Predictions should be annualized (independent of exposure)
Note: Perfect F1 is 1, poor model is 0
}
\section{Inputs}{
}
\examples{
metric_F1(actual=c(0,1,0,0), predicted=c(0.1,0.9,0.4,0.6))
metric_Fbeta(actual=c(0,1,0,0), predicted=c(0.1,0.9,0.4,0.6), threshold=0.7)
## metric_F1 is a specific value of metric_Fbeta
metric_Fbeta(actual=c(0,1,0,0), predicted=c(0.1,0.9,0.4,0.6), beta=1)
}
\seealso{
\code{\link{metric_precision}}, \code{\link{metric_recall}} and \code{\link{metric_Fbeta}}
}
|
# Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.21.1
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ApiResponseMunicipalities Class
#'
#' @field municipalities
#' @field next_page
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ApiResponseMunicipalities <- R6::R6Class(
'ApiResponseMunicipalities',
public = list(
`municipalities` = NA,
`municipalities_data_frame` = NULL,
`next_page` = NA,
initialize = function(`municipalities`, `next_page`){
if (!missing(`municipalities`)) {
self$`municipalities` <- `municipalities`
}
if (!missing(`next_page`)) {
self$`next_page` <- `next_page`
}
},
toJSON = function() {
ApiResponseMunicipalitiesObject <- list()
if (!is.null(self$`municipalities`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`municipalities`) && ((length(self$`municipalities`) == 0) || ((length(self$`municipalities`) != 0 && R6::is.R6(self$`municipalities`[[1]]))))) {
ApiResponseMunicipalitiesObject[['municipalities']] <- lapply(self$`municipalities`, function(x) x$toJSON())
} else {
ApiResponseMunicipalitiesObject[['municipalities']] <- jsonlite::toJSON(self$`municipalities`, auto_unbox = TRUE)
}
}
if (!is.null(self$`next_page`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`next_page`) && ((length(self$`next_page`) == 0) || ((length(self$`next_page`) != 0 && R6::is.R6(self$`next_page`[[1]]))))) {
ApiResponseMunicipalitiesObject[['next_page']] <- lapply(self$`next_page`, function(x) x$toJSON())
} else {
ApiResponseMunicipalitiesObject[['next_page']] <- jsonlite::toJSON(self$`next_page`, auto_unbox = TRUE)
}
}
ApiResponseMunicipalitiesObject
},
fromJSON = function(ApiResponseMunicipalitiesJson) {
ApiResponseMunicipalitiesObject <- jsonlite::fromJSON(ApiResponseMunicipalitiesJson)
if (!is.null(ApiResponseMunicipalitiesObject$`municipalities`)) {
self$`municipalities` <- ApiResponseMunicipalitiesObject$`municipalities`
}
if (!is.null(ApiResponseMunicipalitiesObject$`next_page`)) {
self$`next_page` <- ApiResponseMunicipalitiesObject$`next_page`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(ApiResponseMunicipalitiesJson) {
ApiResponseMunicipalitiesObject <- jsonlite::fromJSON(ApiResponseMunicipalitiesJson, simplifyDataFrame = FALSE)
self$setFromList(ApiResponseMunicipalitiesObject)
},
setFromList = function(listObject) {
self$`municipalities` <- lapply(listObject$`municipalities`, function(x) {
MunicipalityObject <- Municipality$new()
MunicipalityObject$setFromList(x)
return(MunicipalityObject)
})
municipalities_list <- lapply(self$`municipalities`, function(x) {
return(x$getAsList())
})
self$`municipalities_data_frame` <- do.call(rbind, lapply(municipalities_list, data.frame))
if (!is.null(listObject$`next_page`)) {
self$`next_page` <- listObject$`next_page`
}
else {
self$`next_page` <- NA
}
},
getAsList = function() {
listObject = list()
# listObject[["municipalities"]] <- lapply(self$`municipalities`, function(o) {
# return(o$getAsList())
# })
listObject[["next_page"]] <- self$`next_page`
return(listObject)
}
)
)
|
/R/ApiResponseMunicipalities.r
|
no_license
|
bucklbj6038/r-sdk
|
R
| false | false | 4,143 |
r
|
# Intrinio API
#
# Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner.
#
# OpenAPI spec version: 2.21.1
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' ApiResponseMunicipalities Class
#'
#' @field municipalities
#' @field next_page
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ApiResponseMunicipalities <- R6::R6Class(
'ApiResponseMunicipalities',
public = list(
`municipalities` = NA,
`municipalities_data_frame` = NULL,
`next_page` = NA,
initialize = function(`municipalities`, `next_page`){
if (!missing(`municipalities`)) {
self$`municipalities` <- `municipalities`
}
if (!missing(`next_page`)) {
self$`next_page` <- `next_page`
}
},
toJSON = function() {
ApiResponseMunicipalitiesObject <- list()
if (!is.null(self$`municipalities`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`municipalities`) && ((length(self$`municipalities`) == 0) || ((length(self$`municipalities`) != 0 && R6::is.R6(self$`municipalities`[[1]]))))) {
ApiResponseMunicipalitiesObject[['municipalities']] <- lapply(self$`municipalities`, function(x) x$toJSON())
} else {
ApiResponseMunicipalitiesObject[['municipalities']] <- jsonlite::toJSON(self$`municipalities`, auto_unbox = TRUE)
}
}
if (!is.null(self$`next_page`)) {
# If the object is an empty list or a list of R6 Objects
if (is.list(self$`next_page`) && ((length(self$`next_page`) == 0) || ((length(self$`next_page`) != 0 && R6::is.R6(self$`next_page`[[1]]))))) {
ApiResponseMunicipalitiesObject[['next_page']] <- lapply(self$`next_page`, function(x) x$toJSON())
} else {
ApiResponseMunicipalitiesObject[['next_page']] <- jsonlite::toJSON(self$`next_page`, auto_unbox = TRUE)
}
}
ApiResponseMunicipalitiesObject
},
fromJSON = function(ApiResponseMunicipalitiesJson) {
ApiResponseMunicipalitiesObject <- jsonlite::fromJSON(ApiResponseMunicipalitiesJson)
if (!is.null(ApiResponseMunicipalitiesObject$`municipalities`)) {
self$`municipalities` <- ApiResponseMunicipalitiesObject$`municipalities`
}
if (!is.null(ApiResponseMunicipalitiesObject$`next_page`)) {
self$`next_page` <- ApiResponseMunicipalitiesObject$`next_page`
}
},
toJSONString = function() {
jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE)
},
fromJSONString = function(ApiResponseMunicipalitiesJson) {
ApiResponseMunicipalitiesObject <- jsonlite::fromJSON(ApiResponseMunicipalitiesJson, simplifyDataFrame = FALSE)
self$setFromList(ApiResponseMunicipalitiesObject)
},
setFromList = function(listObject) {
self$`municipalities` <- lapply(listObject$`municipalities`, function(x) {
MunicipalityObject <- Municipality$new()
MunicipalityObject$setFromList(x)
return(MunicipalityObject)
})
municipalities_list <- lapply(self$`municipalities`, function(x) {
return(x$getAsList())
})
self$`municipalities_data_frame` <- do.call(rbind, lapply(municipalities_list, data.frame))
if (!is.null(listObject$`next_page`)) {
self$`next_page` <- listObject$`next_page`
}
else {
self$`next_page` <- NA
}
},
getAsList = function() {
listObject = list()
# listObject[["municipalities"]] <- lapply(self$`municipalities`, function(o) {
# return(o$getAsList())
# })
listObject[["next_page"]] <- self$`next_page`
return(listObject)
}
)
)
|
testlist <- list(alpha = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), cluster = integer(0), i = 0L)
result <- do.call(o2geosocial:::cpp_find_local_cases,testlist)
str(result)
|
/o2geosocial/inst/testfiles/cpp_find_local_cases/libFuzzer_cpp_find_local_cases/cpp_find_local_cases_valgrind_files/1612733142-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 238 |
r
|
testlist <- list(alpha = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), cluster = integer(0), i = 0L)
result <- do.call(o2geosocial:::cpp_find_local_cases,testlist)
str(result)
|
# ------------------------------------------------------------------------------
# Title: Creating vector of gridded population
# Author: Ryan Gan
# Date Created: 2017-12-29
# ------------------------------------------------------------------------------
# library
library(ncdf4)
library(tidyverse)
# extracting population densities from bonne's bluesky grid -----
pop_nc <- ncdf4::nc_open("./data/blueskypopulation.nc")
cali_id <- bluesky_grid$id
# extract population and population density for california grid cells
pop <- as.vector(ncdf4::ncvar_get(pop_nc, varid = "Population"))
popden <- as.vector(ncdf4::ncvar_get(pop_nc, varid ="PopulationDensity"))
# extract latlon
lat <- ncdf4::ncvar_get(pop_nc, varid ="latitude")
lon <- ncdf4::ncvar_get(pop_nc, varid = "longitude")
# expand grid
lonlat <- as.matrix(expand.grid(lon,lat))
# create population dataframe and add names
population_df <- data.frame(cbind(lonlat, pop, popden))
# assign names
names(population_df) <- c("lon", "lat", "pop", "popden")
# sf label starts top left and goes right, then down one row
# sort by desc(lat) then lon to match how i labeled the sf objects
population_df <- population_df %>%
arrange(desc(lat), lon) %>%
mutate(id = seq(1:94068)) %>%
dplyr::select(id, pop, popden)
# saving population density and population vector
write_csv(population_df, paste0("./data/2015-bluesky_grid_population.csv"))
|
/support_r_scripts/bluesky_grid_population_vector.R
|
no_license
|
VS-DavidSouth/smoke_forecaster
|
R
| false | false | 1,402 |
r
|
# ------------------------------------------------------------------------------
# Title: Creating vector of gridded population
# Author: Ryan Gan
# Date Created: 2017-12-29
# ------------------------------------------------------------------------------
# library
library(ncdf4)
library(tidyverse)
# extracting population densities from bonne's bluesky grid -----
pop_nc <- ncdf4::nc_open("./data/blueskypopulation.nc")
cali_id <- bluesky_grid$id
# extract population and population density for california grid cells
pop <- as.vector(ncdf4::ncvar_get(pop_nc, varid = "Population"))
popden <- as.vector(ncdf4::ncvar_get(pop_nc, varid ="PopulationDensity"))
# extract latlon
lat <- ncdf4::ncvar_get(pop_nc, varid ="latitude")
lon <- ncdf4::ncvar_get(pop_nc, varid = "longitude")
# expand grid
lonlat <- as.matrix(expand.grid(lon,lat))
# create population dataframe and add names
population_df <- data.frame(cbind(lonlat, pop, popden))
# assign names
names(population_df) <- c("lon", "lat", "pop", "popden")
# sf label starts top left and goes right, then down one row
# sort by desc(lat) then lon to match how i labeled the sf objects
population_df <- population_df %>%
arrange(desc(lat), lon) %>%
mutate(id = seq(1:94068)) %>%
dplyr::select(id, pop, popden)
# saving population density and population vector
write_csv(population_df, paste0("./data/2015-bluesky_grid_population.csv"))
|
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
##This function computes the inverse of the special "matrix" returned by makeCacheMatrix
##If the inverse has already been calculated (and the matrix has not changed)
##then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x=matrix(), ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
tracyzh/ProgrammingAssignment2
|
R
| false | false | 815 |
r
|
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
##This function computes the inverse of the special "matrix" returned by makeCacheMatrix
##If the inverse has already been calculated (and the matrix has not changed)
##then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x=matrix(), ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setinverse(m)
m
}
|
# Data structures in R
#Vectors ----
x=1:10 #create seq of nos from 1 to 10
x #Need to press ctrl+enter
X1 <- 1:20 #older way of coding #assigning
X1 #Need to press ctrl+enter #Printing
(x1=1:30) #assinging as well as printing #changing the position by increasing or decreasing the size of the source box
(x2=c(1,2,13,4,5))
class(x2) #telling me the type of vector
(x3=letters[1:10]) #fast way of creating alphabets
class(x3)
LETTERS[1:26]
(x3b = c('a',"Dhiraj","4"))
class(x3b)
(x4=c(T,FALSE,TRUE,T,F)) #makes it in the form where you can either write it as single T or TRUe and same for f and False
class(x4)
x5=c(3L,5L)
class(x5)
x5a = c(3,5)
class(x5a)
(x5b = c(1, 'a', T, 4L))
class(x5b)
#access elements
(x6 = seq(0,100,by=3)) #starting, ending and interval (from,to,by)
methods(class='numeric')
?seq
#[1] 0 2 4 6 8 10
ls() #variables in my environment
x6 #number of elements
length(x6)
x6[20]
x6[3] # access the 3rd element
#[1] 4
x6[c(2,4)] #access 2nd and 4th element'
x6[-1] #access all but the 1st element
x6[-c(1,10)]
x6[c(2,-4)] #cannot mix positive and negative integers #error in x6[c(2,-4)] : only 0's may be mixed with the negative subscripts
x6[c(2.4,3.54)] #real numbers are truncated to integers #equivalent to c(2,3)
x6[-c(1,5,20)]
x6
length(x6)
x6[-(length(x6)-1)]
(x7 = c(x6,x2)) #combining 2 vectors
#modify
x6
sort(x6)
sort(x6[-c(1,2)])
sort(x6,decreasing=T)
sort(x6,decreasing=F)
rev(x6)
seq(-3,10,by=.2)
(x= 13:2)
x6[-c(1:12)] #removes the elements from 1 to 12
(x= -3:2)
x[2] <- 0; x #modify the 2nd element
#[1] -3 0 -1 0 1 2
x #element for comparison
x <0 # logical comparison if elements are less than 0
x[x<0] = 5;x #modify elements less than 0
x[x<= 1 & x >= -1]= 100;x
x
x = x[1:4]; x #truncate x to first 4 elements
#delete vector
(x = seq(1,5, length.out = 15))
x = NULL
x
x[4] #null
(x = rnorm(100)) #Standard normal distribution mean =0 sd=1
plot(density(x))
(x1 = rnorm(1000, mean=50, sd=5)) #as the parameters become large you come to the parameters you have assigned to it for example 92
(x2 = rnorm(1000000, mean=50, sd=5))
mean (x2)
abline(v=mean(x1), h=0.04)
plot(density(x1))
mean(x1)
# Matrix ----
1:12
100:111
(m1 = matrix(1:12, nrow=4))
(m2 = matrix(1:12, ncol=3, byrow=T)) #bydefault the data is filled by column but if i want to fill by row need to specify byrow
x=101:124
length(x)
matrix(x, ncol=6)
class(m1)
attributes(m1) #row name and col name will also be displayed
dim(m1) # only 4 and 3 will be displayed
m1
#access elements of matrix
m1[1,2:3] # 1st row with 2nd and srd column
m1[c(1,3),] #blank means all colns
m1[,-c(1,3)]
m1
paste("c","D", sep="-")
paste("c","D", "-")
paste("c",1:100, sep="-")
(colnames(m1)=paste('C',1:3, sep=''))
#vector to matrix
m3=1:24
dim(m3)=c(6,4)
m3
m2
m2[c(TRUE,F,T,F), c(2,3)] #logical indexing
m2
m2[m2>5] #all elements greater than 5
m1;m2
m1[1:2,1:2]
m1[c('R1'),c('c1','c3')] #will be complete once you assign the names
#modify vector
m2
m2[2,2] =5 #assigning an element as 5
rbind(m2, c(50,60,70)) #its not forever- only for this function
cbind(m2, c(3,4,5,6)) #its not forever- only for this function
colSums(m1);rowSums(m1)
colMeans(m1);rowMeans(m1)
t(m1) #transpose #changes
m1
sweep(m1,MARGIN = 1, STATS = c(2,3,4,5), FUN="+") #rowise
sweep(m1, MARGIN = 2, STATS = c(2,3,4), FUN="*") #colwise
#addmargins
addmargins(m1,margin=1,sum) #colwise
addmargins(m1,1,sd)
addmargins(m1,2,mean) #rowise
addmargins(m1,c(1,2), mean) #row and col wise fn
addmargins(m1,c(1,2),list(list(mean,sum,max),list(var,sd)))
#Arrays ----
# Data Frames ----
(rollno=1:30)
(sname = paste('student',1:30,sep=''))
(gender = sample(c('m','f'), size=30,replace=T, prob=c(.7,.3)))
(marks = floor(rnorm(30,mean=50,sd=30)))
(marks2 = ceiling(rnorm(30,40,5)))
(course = sample(c('BBA','MBA'), size=30, replace=T, prob=c(.5,.5)))
rollno; sname; gender
marks; marks2;course
#create df
df1=data.frame(rollno, sname, gender, marks, marks2, course, stringsAsFactors = F)
str(df1) #structure of DF
head(df1) #top 6 rows
head(df1,n=3) #top 3 rows
tail(df1,n=6)
summary(df1)
df1$gender = factor(df1$gender)
df1$course = factor(df1$course)
str(df1)
summary(df1)
#list ----
# Factors ----
|
/Data Structures.R
|
no_license
|
ishaparasramka/Analytics
|
R
| false | false | 4,195 |
r
|
# Data structures in R
#Vectors ----
x=1:10 #create seq of nos from 1 to 10
x #Need to press ctrl+enter
X1 <- 1:20 #older way of coding #assigning
X1 #Need to press ctrl+enter #Printing
(x1=1:30) #assinging as well as printing #changing the position by increasing or decreasing the size of the source box
(x2=c(1,2,13,4,5))
class(x2) #telling me the type of vector
(x3=letters[1:10]) #fast way of creating alphabets
class(x3)
LETTERS[1:26]
(x3b = c('a',"Dhiraj","4"))
class(x3b)
(x4=c(T,FALSE,TRUE,T,F)) #makes it in the form where you can either write it as single T or TRUe and same for f and False
class(x4)
x5=c(3L,5L)
class(x5)
x5a = c(3,5)
class(x5a)
(x5b = c(1, 'a', T, 4L))
class(x5b)
#access elements
(x6 = seq(0,100,by=3)) #starting, ending and interval (from,to,by)
methods(class='numeric')
?seq
#[1] 0 2 4 6 8 10
ls() #variables in my environment
x6 #number of elements
length(x6)
x6[20]
x6[3] # access the 3rd element
#[1] 4
x6[c(2,4)] #access 2nd and 4th element'
x6[-1] #access all but the 1st element
x6[-c(1,10)]
x6[c(2,-4)] #cannot mix positive and negative integers #error in x6[c(2,-4)] : only 0's may be mixed with the negative subscripts
x6[c(2.4,3.54)] #real numbers are truncated to integers #equivalent to c(2,3)
x6[-c(1,5,20)]
x6
length(x6)
x6[-(length(x6)-1)]
(x7 = c(x6,x2)) #combining 2 vectors
#modify
x6
sort(x6)
sort(x6[-c(1,2)])
sort(x6,decreasing=T)
sort(x6,decreasing=F)
rev(x6)
seq(-3,10,by=.2)
(x= 13:2)
x6[-c(1:12)] #removes the elements from 1 to 12
(x= -3:2)
x[2] <- 0; x #modify the 2nd element
#[1] -3 0 -1 0 1 2
x #element for comparison
x <0 # logical comparison if elements are less than 0
x[x<0] = 5;x #modify elements less than 0
x[x<= 1 & x >= -1]= 100;x
x
x = x[1:4]; x #truncate x to first 4 elements
#delete vector
(x = seq(1,5, length.out = 15))
x = NULL
x
x[4] #null
(x = rnorm(100)) #Standard normal distribution mean =0 sd=1
plot(density(x))
(x1 = rnorm(1000, mean=50, sd=5)) #as the parameters become large you come to the parameters you have assigned to it for example 92
(x2 = rnorm(1000000, mean=50, sd=5))
mean (x2)
abline(v=mean(x1), h=0.04)
plot(density(x1))
mean(x1)
# Matrix ----
1:12
100:111
(m1 = matrix(1:12, nrow=4))
(m2 = matrix(1:12, ncol=3, byrow=T)) #bydefault the data is filled by column but if i want to fill by row need to specify byrow
x=101:124
length(x)
matrix(x, ncol=6)
class(m1)
attributes(m1) #row name and col name will also be displayed
dim(m1) # only 4 and 3 will be displayed
m1
#access elements of matrix
m1[1,2:3] # 1st row with 2nd and srd column
m1[c(1,3),] #blank means all colns
m1[,-c(1,3)]
m1
paste("c","D", sep="-")
paste("c","D", "-")
paste("c",1:100, sep="-")
(colnames(m1)=paste('C',1:3, sep=''))
#vector to matrix
m3=1:24
dim(m3)=c(6,4)
m3
m2
m2[c(TRUE,F,T,F), c(2,3)] #logical indexing
m2
m2[m2>5] #all elements greater than 5
m1;m2
m1[1:2,1:2]
m1[c('R1'),c('c1','c3')] #will be complete once you assign the names
#modify vector
m2
m2[2,2] =5 #assigning an element as 5
rbind(m2, c(50,60,70)) #its not forever- only for this function
cbind(m2, c(3,4,5,6)) #its not forever- only for this function
colSums(m1);rowSums(m1)
colMeans(m1);rowMeans(m1)
t(m1) #transpose #changes
m1
sweep(m1,MARGIN = 1, STATS = c(2,3,4,5), FUN="+") #rowise
sweep(m1, MARGIN = 2, STATS = c(2,3,4), FUN="*") #colwise
#addmargins
addmargins(m1,margin=1,sum) #colwise
addmargins(m1,1,sd)
addmargins(m1,2,mean) #rowise
addmargins(m1,c(1,2), mean) #row and col wise fn
addmargins(m1,c(1,2),list(list(mean,sum,max),list(var,sd)))
#Arrays ----
# Data Frames ----
(rollno=1:30)
(sname = paste('student',1:30,sep=''))
(gender = sample(c('m','f'), size=30,replace=T, prob=c(.7,.3)))
(marks = floor(rnorm(30,mean=50,sd=30)))
(marks2 = ceiling(rnorm(30,40,5)))
(course = sample(c('BBA','MBA'), size=30, replace=T, prob=c(.5,.5)))
rollno; sname; gender
marks; marks2;course
#create df
df1=data.frame(rollno, sname, gender, marks, marks2, course, stringsAsFactors = F)
str(df1) #structure of DF
head(df1) #top 6 rows
head(df1,n=3) #top 3 rows
tail(df1,n=6)
summary(df1)
df1$gender = factor(df1$gender)
df1$course = factor(df1$course)
str(df1)
summary(df1)
#list ----
# Factors ----
|
setClass("board",
representation = representation(x="matrix"),
prototype = list(x=matrix())
)
setValidity("board",
function(object){
x <- object@x
non.nas <- x[!is.na(x)]
if (!is.matrix(x)){
return("not a matrix")
} else if (any(non.nas != round(non.nas))){
return("matrix includes non-integers")
} else if (any(non.nas < 0)){
return("matrix includes negative numbers")
} else if (any(is.nan(x))){
return("matrix includes one or more NaN elements")
} else if (dof(x)<1){
return("less than one (1) degree of freedom")
} else {
return(TRUE)
}
}
)
"is.board" <- function(x){is(x,"board")}
"as.board" <- function(x){
if(is.board(x)){
return(x)
} else {
return(new("board",x=x))
}
}
"marginals" <- function(x){
x <- as.board(x)@x
mode(x) <- "integer"
list(
rs = apply(x,1,sum,na.rm=TRUE),
cs = apply(x,2,sum,na.rm=TRUE),
na = which(is.na(x),arr.ind=TRUE),
x = x
)
}
".Cargs" <- function(x){
jj <- marginals(x)
list(
rs = as.integer( jj$rs ),
nrow = as.integer(length(jj$rs)),
cs = as.integer( jj$cs ),
ncol = as.integer(length(jj$cs)),
na = as.integer( jj$na ),
nna = as.integer(nrow( jj$na))
)
}
"aylmer.test" <-
function (x, alternative = "two.sided", simulate.p.value = FALSE, n = 1e5, B = 2000, burnin=100, use.brob=FALSE)
{
DNAME <- deparse(substitute(x))
if(is.function(alternative)){
return(aylmer.function(x=x, func=alternative, simulate.p.value = simulate.p.value, n = n, B = B, burnin=burnin, use.brob=use.brob, DNAME=DNAME))
}
METHOD <- "Aylmer test for count data"
if(!any(is.na(x))){
warning("supplied matrix has no NAs. Consider using 'stats:::fisher.test()'")
}
x.dof <- dof(x)
stopifnot(x.dof>0)
almost.1 <- 1 + 64 * .Machine$double.eps
if(simulate.p.value){
stopifnot(identical(length(B),1L))
STATISTIC <- prob(x, use.brob=use.brob)
METHOD <-
paste(METHOD, "with simulated p-value\n\t (based on", B, "replicates)")
random_probs <- randomprobs(x, B, burnin=burnin, use.brob=use.brob)
PVAL <-
as.numeric((1+sum(random_probs <= STATISTIC/almost.1))/(B+1))
} else {
STATISTIC <- prob(x, use.brob=use.brob, give.log=FALSE)
a <- allprobs(x, n=n, normalize=FALSE)
PVAL <- sum(a[a <= STATISTIC*almost.1])/sum(a)
if(x.dof == 1){
alternative <-
char.expand(alternative, c("two.sided", "less", "greater"))
PVAL <-
switch(alternative,
two.sided = PVAL,
greater = .pval.1dof(x, greater=TRUE),
less = .pval.1dof(x, greater=FALSE)
)
}
}
RVAL <- list(p.value = PVAL, alternative = alternative, method = METHOD,
data.name = DNAME)
attr(RVAL, "class") <- "htest"
return(RVAL)
}
"aylmer.function" <-
function (x, func, simulate.p.value = FALSE, n = 1e5, B = 2000, burnin=100, use.brob=FALSE, DNAME=NULL)
{
if(is.null(DNAME)){
DNAME <- deparse(substitute(x))
}
METHOD <- "Aylmer functional test for count data"
stopifnot(dof(x)>0)
if(simulate.p.value){ # Monte Carlo ...
stopifnot(identical(length(B),1L))
STATISTIC <- func(x)
METHOD <-
paste(METHOD, "with simulated p-value\n\t (based on", B, "replicates)")
random_probs <- randomprobs(x, B, burnin=burnin, use.brob=use.brob, func=func)
almost.1 <- 1 + 64 * .Machine$double.eps
PVAL <-
## as.numeric((1+sum(random_probs <= STATISTIC/almost.1))/(B+1))
as.numeric((1+sum(random_probs >= STATISTIC*almost.1))/(B+1))
} else { # ... enumeration
STATISTIC <- func(x)
a <- allprobs(x, n=n, normalize=FALSE)
allfuncs <- apply(allboards(x,n=n),3,func)
## PVAL <- sum(a[allfuncs <= STATISTIC])/sum(a)
PVAL <- sum(a[allfuncs >= STATISTIC])/sum(a)
}
RVAL <- list(p.value = PVAL, alternative = "test function exceeds observed",
method = METHOD, data.name = DNAME)
attr(RVAL, "class") <- "htest"
return(RVAL)
}
".pval.1dof" <- function(x,greater){
almost.1 <- 1 + 64 * .Machine$double.eps
jj <- allboards(x)
or <- apply(jj,3,odds.ratio)
p <- allprobs(x)
x.or <- odds.ratio(x) / almost.1
if(greater){
return(sum(p[or > x.or]))
} else {
return(sum(p[or < x.or]))
}
}
"dof" <- function(x){(nrow(x)-1)*(ncol(x)-1)-sum(is.na(x))}
"odds.ratio" <- function(x){
stopifnot(is.1dof(x))
n <- nrow(x)
ind <- cbind(1:n,c(2:n,1))
return(prod(diag(x))/prod(x[ind]))
}
"maxlike" <- function(x){
warning("not coded up in C")
allboards(x)[,,which.max(allprobs(x))]
}
"is.1dof" <- function(x){
n <- nrow(x)
if(!is.matrix(x) | n != ncol(x)){
return(FALSE)
}
ind <- cbind(1:n,c(2:n,1))
if(all(!is.na(diag(x))) & all(!is.na(x[ind])) & sum(is.na(x))==n*(n-2)){
return(TRUE)
} else {
return(FALSE)
}
}
"as.pairwise" <- function(x){
stopifnot(nrow(x)==ncol(x))
n <- nrow(x)
k <- n * (n - 1) / 2
out <- matrix(NA, k, n)
upper.indexes <- which( lower.tri( x ), arr.ind=TRUE )
from.mat <- rbind( upper.indexes, upper.indexes[ , 2:1 ] )
to.mat <-
cbind(rep(1:nrow(upper.indexes),2), as.vector(upper.indexes[, 2:1]))
out[ to.mat ] <- x[ from.mat ]
colnames(out) <- colnames(x)
return(out)
}
"randomprobs" <- function(x, B=2000, n=100, burnin=0, use.brob=FALSE, func=NULL){
x <- as.board(x)@x
out <- rep(0,B)
if(use.brob){
out <- as.brob(out)
}
default <- FALSE
if(is.null(func)){
func <- function(x){prob(x, give.log=TRUE, use.brob=use.brob)}
default <- TRUE
}
old <- x
out[1] <- func(x)
if(out[1] == -Inf){
if(use.brob){
stop("This cannot happen unless the board has astronomically large entries")
} else {
stop("Board has probability of zero (well, less than .Machine$double.xmin). Consider setting use.brob to TRUE")
}
}
for(i in seq_len(B+burnin)[-1]){
proposed <- candidate(old, n=n)
num <- prob(proposed, give.log=TRUE, use.brob=use.brob)
den <- prob(old , give.log=TRUE, use.brob=use.brob)
if ((num == -Inf) & (den == -Inf)) { #zero probability
stop("this cannot happen.")
}
alpha <- min(as.numeric(exp(num-den)),1) #num, den are logs
if (runif(1) < alpha){
if(default){
out[i] <- num
} else {
out[i] <- func(proposed)
}
old <- proposed
} else {
if(default){
out[i] <- den
} else {
out[i] <- func(old)
}
}
}
if(burnin>0){
out <- out[-seq_len(burnin)]
}
return(out)
}
"randomboards" <- function(x, B=2000, n=100, burnin=0){
x <- as.board(x)@x
out <- array(0L,c(nrow(x),ncol(x),B+burnin))
old <- x
out[,,1] <- x
for(i in seq_len(B+burnin)[-1]){
proposed <- candidate(old, n=n)
num <- prob(proposed, give.log=TRUE)
den <- prob(old , give.log=TRUE)
if ((num == -Inf) & (den == -Inf)) { #zero probability
stop("this cannot happen.")
}
alpha <- min(as.numeric(exp(num-den)),1) #num, den are logs
if (runif(1) < alpha){
out[,,i] <- proposed
old <- proposed
} else {
out[,,i] <- old
}
}
if(burnin>0){
out <- out[,,-seq_len(burnin)]
}
dimnames(out) <- dimnames(x)
return(out)
}
"best" <- function(x, func=NULL, n=100, ...){
if(is.null(func)){
func <- function(x){-prob(x)}
}
dims <- dim(x)
ind <- which(is.na(x) , arr.ind=TRUE)
tovec <- function(x){
x[ind] <- -1
as.vector(x)
}
tomat <- function(x){
dim(x) <- dims
x[ind] <- NA
x
}
out <- optim(tovec(x) , fn=function(x){func(tomat(x))} , gr=function(x){tovec(candidate(tomat(x), n=n))} , method="SANN" , ...)
out$par <- tomat(out$par)
rownames(out$par) <- rownames(x)
colnames(out$par) <- colnames(x)
out
}
"good" <- function(x, method = "D", ...){
jj <- marginals(x)
N <- sum(x,na.rm=TRUE)
B <- exp(
sum(lchoose(jj$rs+ncol(x)-1,jj$rs))+
sum(lchoose(jj$cs+nrow(x)-1,jj$cs))-
lchoose(N+nrow(x)*ncol(x)-1,N)
)
if(any(is.na(x)) & !method=="A"){
warning("Good's method is for matrices with no NA entries. Answer supplied is indicative only (but should provide an upper bound)")
}
return(
switch(method,
A = no.of.boards(x, ...),
B = B,
C = 1.3*N^2*B/(nrow(x)*sum(jj$rs^2)),
D = 1.3*N^4*B/(nrow(x)*ncol(x)*sum(outer(jj$rs^2,jj$cs^2))),
"method must be one of A-D"
)
)
}
"candidate" <- function(x, n=100, give=FALSE){
stopifnot(is.matrix(x))
m <- marginals(x)
cx <- .Cargs(x)
x[is.na(x)] <- 0
flash <- c("randpath", cx, list(ans=as.integer(as.vector(x))), n=as.integer(n), PACKAGE="aylmer")
jj <- do.call(".C",flash)
n <- jj$n
if(give){
return(n)
}
if(n==0){
print(x)
stop("no acceptable candidates found. Consider increasing n")
}
out <- jj$ans
dim(out) <- c(cx$nrow,cx$ncol)
out[m$na] <- NA
rownames(out) <- rownames(x)
colnames(out) <- colnames(x)
return(out)
}
|
/aylmer/R/aylmer.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 9,364 |
r
|
setClass("board",
representation = representation(x="matrix"),
prototype = list(x=matrix())
)
setValidity("board",
function(object){
x <- object@x
non.nas <- x[!is.na(x)]
if (!is.matrix(x)){
return("not a matrix")
} else if (any(non.nas != round(non.nas))){
return("matrix includes non-integers")
} else if (any(non.nas < 0)){
return("matrix includes negative numbers")
} else if (any(is.nan(x))){
return("matrix includes one or more NaN elements")
} else if (dof(x)<1){
return("less than one (1) degree of freedom")
} else {
return(TRUE)
}
}
)
"is.board" <- function(x){is(x,"board")}
"as.board" <- function(x){
if(is.board(x)){
return(x)
} else {
return(new("board",x=x))
}
}
"marginals" <- function(x){
x <- as.board(x)@x
mode(x) <- "integer"
list(
rs = apply(x,1,sum,na.rm=TRUE),
cs = apply(x,2,sum,na.rm=TRUE),
na = which(is.na(x),arr.ind=TRUE),
x = x
)
}
".Cargs" <- function(x){
jj <- marginals(x)
list(
rs = as.integer( jj$rs ),
nrow = as.integer(length(jj$rs)),
cs = as.integer( jj$cs ),
ncol = as.integer(length(jj$cs)),
na = as.integer( jj$na ),
nna = as.integer(nrow( jj$na))
)
}
"aylmer.test" <-
function (x, alternative = "two.sided", simulate.p.value = FALSE, n = 1e5, B = 2000, burnin=100, use.brob=FALSE)
{
DNAME <- deparse(substitute(x))
if(is.function(alternative)){
return(aylmer.function(x=x, func=alternative, simulate.p.value = simulate.p.value, n = n, B = B, burnin=burnin, use.brob=use.brob, DNAME=DNAME))
}
METHOD <- "Aylmer test for count data"
if(!any(is.na(x))){
warning("supplied matrix has no NAs. Consider using 'stats:::fisher.test()'")
}
x.dof <- dof(x)
stopifnot(x.dof>0)
almost.1 <- 1 + 64 * .Machine$double.eps
if(simulate.p.value){
stopifnot(identical(length(B),1L))
STATISTIC <- prob(x, use.brob=use.brob)
METHOD <-
paste(METHOD, "with simulated p-value\n\t (based on", B, "replicates)")
random_probs <- randomprobs(x, B, burnin=burnin, use.brob=use.brob)
PVAL <-
as.numeric((1+sum(random_probs <= STATISTIC/almost.1))/(B+1))
} else {
STATISTIC <- prob(x, use.brob=use.brob, give.log=FALSE)
a <- allprobs(x, n=n, normalize=FALSE)
PVAL <- sum(a[a <= STATISTIC*almost.1])/sum(a)
if(x.dof == 1){
alternative <-
char.expand(alternative, c("two.sided", "less", "greater"))
PVAL <-
switch(alternative,
two.sided = PVAL,
greater = .pval.1dof(x, greater=TRUE),
less = .pval.1dof(x, greater=FALSE)
)
}
}
RVAL <- list(p.value = PVAL, alternative = alternative, method = METHOD,
data.name = DNAME)
attr(RVAL, "class") <- "htest"
return(RVAL)
}
"aylmer.function" <-
function (x, func, simulate.p.value = FALSE, n = 1e5, B = 2000, burnin=100, use.brob=FALSE, DNAME=NULL)
{
if(is.null(DNAME)){
DNAME <- deparse(substitute(x))
}
METHOD <- "Aylmer functional test for count data"
stopifnot(dof(x)>0)
if(simulate.p.value){ # Monte Carlo ...
stopifnot(identical(length(B),1L))
STATISTIC <- func(x)
METHOD <-
paste(METHOD, "with simulated p-value\n\t (based on", B, "replicates)")
random_probs <- randomprobs(x, B, burnin=burnin, use.brob=use.brob, func=func)
almost.1 <- 1 + 64 * .Machine$double.eps
PVAL <-
## as.numeric((1+sum(random_probs <= STATISTIC/almost.1))/(B+1))
as.numeric((1+sum(random_probs >= STATISTIC*almost.1))/(B+1))
} else { # ... enumeration
STATISTIC <- func(x)
a <- allprobs(x, n=n, normalize=FALSE)
allfuncs <- apply(allboards(x,n=n),3,func)
## PVAL <- sum(a[allfuncs <= STATISTIC])/sum(a)
PVAL <- sum(a[allfuncs >= STATISTIC])/sum(a)
}
RVAL <- list(p.value = PVAL, alternative = "test function exceeds observed",
method = METHOD, data.name = DNAME)
attr(RVAL, "class") <- "htest"
return(RVAL)
}
".pval.1dof" <- function(x,greater){
almost.1 <- 1 + 64 * .Machine$double.eps
jj <- allboards(x)
or <- apply(jj,3,odds.ratio)
p <- allprobs(x)
x.or <- odds.ratio(x) / almost.1
if(greater){
return(sum(p[or > x.or]))
} else {
return(sum(p[or < x.or]))
}
}
"dof" <- function(x){(nrow(x)-1)*(ncol(x)-1)-sum(is.na(x))}
"odds.ratio" <- function(x){
stopifnot(is.1dof(x))
n <- nrow(x)
ind <- cbind(1:n,c(2:n,1))
return(prod(diag(x))/prod(x[ind]))
}
"maxlike" <- function(x){
warning("not coded up in C")
allboards(x)[,,which.max(allprobs(x))]
}
"is.1dof" <- function(x){
n <- nrow(x)
if(!is.matrix(x) | n != ncol(x)){
return(FALSE)
}
ind <- cbind(1:n,c(2:n,1))
if(all(!is.na(diag(x))) & all(!is.na(x[ind])) & sum(is.na(x))==n*(n-2)){
return(TRUE)
} else {
return(FALSE)
}
}
"as.pairwise" <- function(x){
stopifnot(nrow(x)==ncol(x))
n <- nrow(x)
k <- n * (n - 1) / 2
out <- matrix(NA, k, n)
upper.indexes <- which( lower.tri( x ), arr.ind=TRUE )
from.mat <- rbind( upper.indexes, upper.indexes[ , 2:1 ] )
to.mat <-
cbind(rep(1:nrow(upper.indexes),2), as.vector(upper.indexes[, 2:1]))
out[ to.mat ] <- x[ from.mat ]
colnames(out) <- colnames(x)
return(out)
}
"randomprobs" <- function(x, B=2000, n=100, burnin=0, use.brob=FALSE, func=NULL){
x <- as.board(x)@x
out <- rep(0,B)
if(use.brob){
out <- as.brob(out)
}
default <- FALSE
if(is.null(func)){
func <- function(x){prob(x, give.log=TRUE, use.brob=use.brob)}
default <- TRUE
}
old <- x
out[1] <- func(x)
if(out[1] == -Inf){
if(use.brob){
stop("This cannot happen unless the board has astronomically large entries")
} else {
stop("Board has probability of zero (well, less than .Machine$double.xmin). Consider setting use.brob to TRUE")
}
}
for(i in seq_len(B+burnin)[-1]){
proposed <- candidate(old, n=n)
num <- prob(proposed, give.log=TRUE, use.brob=use.brob)
den <- prob(old , give.log=TRUE, use.brob=use.brob)
if ((num == -Inf) & (den == -Inf)) { #zero probability
stop("this cannot happen.")
}
alpha <- min(as.numeric(exp(num-den)),1) #num, den are logs
if (runif(1) < alpha){
if(default){
out[i] <- num
} else {
out[i] <- func(proposed)
}
old <- proposed
} else {
if(default){
out[i] <- den
} else {
out[i] <- func(old)
}
}
}
if(burnin>0){
out <- out[-seq_len(burnin)]
}
return(out)
}
"randomboards" <- function(x, B=2000, n=100, burnin=0){
x <- as.board(x)@x
out <- array(0L,c(nrow(x),ncol(x),B+burnin))
old <- x
out[,,1] <- x
for(i in seq_len(B+burnin)[-1]){
proposed <- candidate(old, n=n)
num <- prob(proposed, give.log=TRUE)
den <- prob(old , give.log=TRUE)
if ((num == -Inf) & (den == -Inf)) { #zero probability
stop("this cannot happen.")
}
alpha <- min(as.numeric(exp(num-den)),1) #num, den are logs
if (runif(1) < alpha){
out[,,i] <- proposed
old <- proposed
} else {
out[,,i] <- old
}
}
if(burnin>0){
out <- out[,,-seq_len(burnin)]
}
dimnames(out) <- dimnames(x)
return(out)
}
"best" <- function(x, func=NULL, n=100, ...){
if(is.null(func)){
func <- function(x){-prob(x)}
}
dims <- dim(x)
ind <- which(is.na(x) , arr.ind=TRUE)
tovec <- function(x){
x[ind] <- -1
as.vector(x)
}
tomat <- function(x){
dim(x) <- dims
x[ind] <- NA
x
}
out <- optim(tovec(x) , fn=function(x){func(tomat(x))} , gr=function(x){tovec(candidate(tomat(x), n=n))} , method="SANN" , ...)
out$par <- tomat(out$par)
rownames(out$par) <- rownames(x)
colnames(out$par) <- colnames(x)
out
}
"good" <- function(x, method = "D", ...){
jj <- marginals(x)
N <- sum(x,na.rm=TRUE)
B <- exp(
sum(lchoose(jj$rs+ncol(x)-1,jj$rs))+
sum(lchoose(jj$cs+nrow(x)-1,jj$cs))-
lchoose(N+nrow(x)*ncol(x)-1,N)
)
if(any(is.na(x)) & !method=="A"){
warning("Good's method is for matrices with no NA entries. Answer supplied is indicative only (but should provide an upper bound)")
}
return(
switch(method,
A = no.of.boards(x, ...),
B = B,
C = 1.3*N^2*B/(nrow(x)*sum(jj$rs^2)),
D = 1.3*N^4*B/(nrow(x)*ncol(x)*sum(outer(jj$rs^2,jj$cs^2))),
"method must be one of A-D"
)
)
}
"candidate" <- function(x, n=100, give=FALSE){
stopifnot(is.matrix(x))
m <- marginals(x)
cx <- .Cargs(x)
x[is.na(x)] <- 0
flash <- c("randpath", cx, list(ans=as.integer(as.vector(x))), n=as.integer(n), PACKAGE="aylmer")
jj <- do.call(".C",flash)
n <- jj$n
if(give){
return(n)
}
if(n==0){
print(x)
stop("no acceptable candidates found. Consider increasing n")
}
out <- jj$ans
dim(out) <- c(cx$nrow,cx$ncol)
out[m$na] <- NA
rownames(out) <- rownames(x)
colnames(out) <- colnames(x)
return(out)
}
|
context("subset")
test_that("subset.mcmcr", {
expect_identical(pars(subset(mcmcr_example, pars = rev(pars(mcmcr_example)))), rev(pars(mcmcr_example)))
expect_identical(nchains(subset(mcmcr_example, 1L)), 1L)
expect_identical(nsims(subset(mcmcr_example, rep(1L, 5), 2:3)), 10L)
expect_identical(nterms(subset(mcmcr_example, pars = "beta")), 4L)
})
test_that("subset.mcmcrs", {
mcmcrs <- mcmcrs(mcmcr::mcmcr_example, mcmcr::mcmcr_example)
expect_identical(pars(subset(mcmcrs, pars = rev(pars(mcmcrs)))), rev(pars(mcmcrs)))
expect_identical(nchains(subset(mcmcrs, 1L)), 1L)
expect_identical(nsims(subset(mcmcrs, rep(1L, 5), 2:3)), 10L)
expect_identical(nterms(subset(mcmcrs, pars = "beta")), 4L)
})
test_that("subset.mcmc.list", {
expect_identical(pars(subset(as.mcmc.list(mcmcr_example), pars = "beta")), "beta")
expect_identical(niters(subset(as.mcmc.list(mcmcr_example), iters = 10L)), 1L)
expect_identical(nchains(subset(as.mcmc.list(mcmcr_example), chains = 2L)), 1L)
})
|
/tests/testthat/test-subset.R
|
permissive
|
krlmlr/mcmcr
|
R
| false | false | 1,000 |
r
|
context("subset")
test_that("subset.mcmcr", {
expect_identical(pars(subset(mcmcr_example, pars = rev(pars(mcmcr_example)))), rev(pars(mcmcr_example)))
expect_identical(nchains(subset(mcmcr_example, 1L)), 1L)
expect_identical(nsims(subset(mcmcr_example, rep(1L, 5), 2:3)), 10L)
expect_identical(nterms(subset(mcmcr_example, pars = "beta")), 4L)
})
test_that("subset.mcmcrs", {
mcmcrs <- mcmcrs(mcmcr::mcmcr_example, mcmcr::mcmcr_example)
expect_identical(pars(subset(mcmcrs, pars = rev(pars(mcmcrs)))), rev(pars(mcmcrs)))
expect_identical(nchains(subset(mcmcrs, 1L)), 1L)
expect_identical(nsims(subset(mcmcrs, rep(1L, 5), 2:3)), 10L)
expect_identical(nterms(subset(mcmcrs, pars = "beta")), 4L)
})
test_that("subset.mcmc.list", {
expect_identical(pars(subset(as.mcmc.list(mcmcr_example), pars = "beta")), "beta")
expect_identical(niters(subset(as.mcmc.list(mcmcr_example), iters = 10L)), 1L)
expect_identical(nchains(subset(as.mcmc.list(mcmcr_example), chains = 2L)), 1L)
})
|
library(surveillance)
### Name: calibrationTest
### Title: Calibration Tests for Poisson or Negative Binomial Predictions
### Aliases: calibrationTest calibrationTest.default
### Keywords: htest
### ** Examples
mu <- c(0.1, 1, 3, 6, pi, 100)
size <- 0.1
set.seed(1)
y <- rnbinom(length(mu), mu = mu, size = size)
calibrationTest(y, mu = mu, size = size) # p = 0.99
calibrationTest(y, mu = mu, size = 1) # p = 4.3e-05
calibrationTest(y, mu = 1, size = size) # p = 0.6959
calibrationTest(y, mu = 1, size = size, which = "rps") # p = 0.1286
|
/data/genthat_extracted_code/surveillance/examples/calibration.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 545 |
r
|
library(surveillance)
### Name: calibrationTest
### Title: Calibration Tests for Poisson or Negative Binomial Predictions
### Aliases: calibrationTest calibrationTest.default
### Keywords: htest
### ** Examples
mu <- c(0.1, 1, 3, 6, pi, 100)
size <- 0.1
set.seed(1)
y <- rnbinom(length(mu), mu = mu, size = size)
calibrationTest(y, mu = mu, size = size) # p = 0.99
calibrationTest(y, mu = mu, size = 1) # p = 4.3e-05
calibrationTest(y, mu = 1, size = size) # p = 0.6959
calibrationTest(y, mu = 1, size = size, which = "rps") # p = 0.1286
|
### Data
## Description
# Social Media Volume
## Set working directory:
getwd()
setwd('/Users/Xyz27/Dropbox/Thesis/DATA')
## Load the cand_full file:
cand_full<-read.csv("cand_full_class.csv")
cand_full$X<-NULL
## Summarize by volume:
sm_volume<-aggregate(text ~ name+period+platform, data = cand_full, FUN=length)# count)
names(sm_volume)<-c("name", "period", "platform", "post_volume")
## Save the social media volume table:
write.csv(sm_volume, 'smvolume.csv')
## Load the SM volume data:
setwd('/Users/Xyz27/Dropbox/Thesis/DATA')
smv<-read.csv("smvolume.csv")
smv$X<-NULL
## DISTRIBUTION OF VOLUME DATA - OVERALL
summary(smv$post_volume)
# Normal
hist<-ggplot(data=smv, aes(post_volume)) + geom_histogram(bins=80,colour = "red", fill="red") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
# Log
hist<-ggplot(data=smv, aes(log(post_volume))) + geom_histogram(bins=15,colour = "red", fill="red") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
## DISTRIBUTION OF VOLUME DATA - TWITTER
t_v<-smv[smv$platform=="twitter",]
summary(t_v$post_volume)
#View(t_v)
hist<-ggplot(data=t_v, aes(post_volume)) + geom_histogram(bins=80,colour = "light blue", fill="light blue") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
# Log
hist<-ggplot(data=t_v, aes(log(post_volume))) + geom_histogram(bins=15,colour = "light blue", fill="light blue") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
## DISTRIBUTION OF VOLUME DATA - FACEBOOK
f_v<-smv[smv$platform=="facebook",]
summary(f_v$post_volume)
#View(f_v)
hist<-ggplot(data=f_v, aes(post_volume)) + geom_histogram(bins=30,colour = "dark blue", fill="dark blue") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
# Log
hist<-ggplot(data=f_v, aes(log(post_volume))) + geom_histogram(bins=5,colour = "dark blue", fill="dark blue") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
|
/T -- SM_Volume.R
|
no_license
|
lilymcelwee/Social-Media-and-Campaign-Finance
|
R
| false | false | 2,695 |
r
|
### Data
## Description
# Social Media Volume
## Set working directory:
getwd()
setwd('/Users/Xyz27/Dropbox/Thesis/DATA')
## Load the cand_full file:
cand_full<-read.csv("cand_full_class.csv")
cand_full$X<-NULL
## Summarize by volume:
sm_volume<-aggregate(text ~ name+period+platform, data = cand_full, FUN=length)# count)
names(sm_volume)<-c("name", "period", "platform", "post_volume")
## Save the social media volume table:
write.csv(sm_volume, 'smvolume.csv')
## Load the SM volume data:
setwd('/Users/Xyz27/Dropbox/Thesis/DATA')
smv<-read.csv("smvolume.csv")
smv$X<-NULL
## DISTRIBUTION OF VOLUME DATA - OVERALL
summary(smv$post_volume)
# Normal
hist<-ggplot(data=smv, aes(post_volume)) + geom_histogram(bins=80,colour = "red", fill="red") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
# Log
hist<-ggplot(data=smv, aes(log(post_volume))) + geom_histogram(bins=15,colour = "red", fill="red") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
## DISTRIBUTION OF VOLUME DATA - TWITTER
t_v<-smv[smv$platform=="twitter",]
summary(t_v$post_volume)
#View(t_v)
hist<-ggplot(data=t_v, aes(post_volume)) + geom_histogram(bins=80,colour = "light blue", fill="light blue") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
# Log
hist<-ggplot(data=t_v, aes(log(post_volume))) + geom_histogram(bins=15,colour = "light blue", fill="light blue") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
## DISTRIBUTION OF VOLUME DATA - FACEBOOK
f_v<-smv[smv$platform=="facebook",]
summary(f_v$post_volume)
#View(f_v)
hist<-ggplot(data=f_v, aes(post_volume)) + geom_histogram(bins=30,colour = "dark blue", fill="dark blue") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
# Log
hist<-ggplot(data=f_v, aes(log(post_volume))) + geom_histogram(bins=5,colour = "dark blue", fill="dark blue") +
theme_wsj(base_size=10, base_family="Verdana", title_family="Verdana")+scale_colour_wsj() +
theme(axis.title = element_text()) + labs(x = 'Post Volume (Per Period)', y = 'Count')
hist
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{coxprocess_gibbsvelocity}
\alias{coxprocess_gibbsvelocity}
\title{Compute Gibbs velocity for Cox process model}
\usage{
coxprocess_gibbsvelocity(time, xparticles, exponent, counts)
}
\arguments{
\item{time}{time}
\item{xparticles}{particle positions}
\item{exponent}{exponent of tempering schedule}
\item{counts}{dataset}
}
\value{
gibbs_velocity gibbs velocity field
}
\description{
Compute Gibbs velocity for Cox process model
}
|
/man/coxprocess_gibbsvelocity.Rd
|
no_license
|
jeremyhengjm/GibbsFlow
|
R
| false | true | 532 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{coxprocess_gibbsvelocity}
\alias{coxprocess_gibbsvelocity}
\title{Compute Gibbs velocity for Cox process model}
\usage{
coxprocess_gibbsvelocity(time, xparticles, exponent, counts)
}
\arguments{
\item{time}{time}
\item{xparticles}{particle positions}
\item{exponent}{exponent of tempering schedule}
\item{counts}{dataset}
}
\value{
gibbs_velocity gibbs velocity field
}
\description{
Compute Gibbs velocity for Cox process model
}
|
\name{begins}
\alias{begins}
\title{
Check Start of Character String
}
\description{
Checks whether a character string begins with a particular prefix.
}
\usage{
begins(x, firstbit)
}
\arguments{
\item{x}{
Character string, or vector of character strings, to be tested.
}
\item{firstbit}{
A single character string.
}
}
\details{
This simple wrapper function checks whether (each entry in) \code{x}
begins with the string \code{firstbit}, and returns a logical value
or logical vector with one entry for each entry of \code{x}.
This function is useful mainly for reducing complexity in model formulae.
}
\value{
Logical vector of the same length as \code{x}.
}
\author{
Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
Rolf Turner
\email{r.turner@auckland.ac.nz}
and Ege Rubak
\email{rubak@math.aau.dk}
}
\examples{
begins(c("Hello", "Goodbye"), "Hell")
begins("anything", "")
}
\keyword{character}
|
/man/begins.Rd
|
no_license
|
cuulee/spatstat
|
R
| false | false | 979 |
rd
|
\name{begins}
\alias{begins}
\title{
Check Start of Character String
}
\description{
Checks whether a character string begins with a particular prefix.
}
\usage{
begins(x, firstbit)
}
\arguments{
\item{x}{
Character string, or vector of character strings, to be tested.
}
\item{firstbit}{
A single character string.
}
}
\details{
This simple wrapper function checks whether (each entry in) \code{x}
begins with the string \code{firstbit}, and returns a logical value
or logical vector with one entry for each entry of \code{x}.
This function is useful mainly for reducing complexity in model formulae.
}
\value{
Logical vector of the same length as \code{x}.
}
\author{
Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
Rolf Turner
\email{r.turner@auckland.ac.nz}
and Ege Rubak
\email{rubak@math.aau.dk}
}
\examples{
begins(c("Hello", "Goodbye"), "Hell")
begins("anything", "")
}
\keyword{character}
|
# Week 5
# September 25-29
# Estimate of Pr(use | 40-49)
exp(-1.5072 + 1.4246) / (1 + exp(-1.5072 + 1.4246))
(93) / (101 + 93)
exp(1.4246)
72*101 / (93*325)
# Actually
(93*325) / (72*101)
# vs. 30-39
exp(-1.50 + 1.42) / exp(-1.5 + 1.04)
exp(1.42 -1.04)
######################################
Freq=c(6,4,52,10,14,10,54,27,33,80,46,78,6,48,8,31,
53,10,212,50,60,19,155,65,112,77,118,68,35,46,8,12)
use=c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
morekids=c(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0)
uppered=c(0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1)
age2529=c(0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0)
age3039=c(0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0)
age4049=c(0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1)
fiji=data.frame(use,Freq,morekids,uppered,age2529,age3039,age4049)
library(vcdExtra)
fiji.ind=expand.dft(fiji)
fiji.ind[1:8,]
########################
fiji.glm = glm(use~ morekids +uppered + age2529 + age3039 + age4049 + morekids:age2529 +
morekids:age3039 + morekids:age4049, family = binomial,data = fiji.ind)
library(pROC)
prob = predict(fiji.glm,type= "response")
roccurve = roc(fiji.ind$use~prob)
plot(roccurve)
coords(roccurve,"best",ret = c("threshold","specificity","1-npv"))
auc(roccurve)
library(caret)
modcv = train(factor(use)~ morekids +uppered + age2529 + age3039 + age4049 + morekids:age2529 +
morekids:age3039 + morekids:age4049, family = binomial,data = fiji.ind,
method = "glm",
trControl = trainControl(method = "cv", number = 10,verboseIter = TRUE))
summary(modcv)
modcv
|
/week_5.R
|
no_license
|
zachmwhite/STA_841_cat
|
R
| false | false | 1,715 |
r
|
# Week 5
# September 25-29
# Estimate of Pr(use | 40-49)
exp(-1.5072 + 1.4246) / (1 + exp(-1.5072 + 1.4246))
(93) / (101 + 93)
exp(1.4246)
72*101 / (93*325)
# Actually
(93*325) / (72*101)
# vs. 30-39
exp(-1.50 + 1.42) / exp(-1.5 + 1.04)
exp(1.42 -1.04)
######################################
Freq=c(6,4,52,10,14,10,54,27,33,80,46,78,6,48,8,31,
53,10,212,50,60,19,155,65,112,77,118,68,35,46,8,12)
use=c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
morekids=c(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0)
uppered=c(0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0,1,1)
age2529=c(0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0)
age3039=c(0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0)
age4049=c(0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1)
fiji=data.frame(use,Freq,morekids,uppered,age2529,age3039,age4049)
library(vcdExtra)
fiji.ind=expand.dft(fiji)
fiji.ind[1:8,]
########################
fiji.glm = glm(use~ morekids +uppered + age2529 + age3039 + age4049 + morekids:age2529 +
morekids:age3039 + morekids:age4049, family = binomial,data = fiji.ind)
library(pROC)
prob = predict(fiji.glm,type= "response")
roccurve = roc(fiji.ind$use~prob)
plot(roccurve)
coords(roccurve,"best",ret = c("threshold","specificity","1-npv"))
auc(roccurve)
library(caret)
modcv = train(factor(use)~ morekids +uppered + age2529 + age3039 + age4049 + morekids:age2529 +
morekids:age3039 + morekids:age4049, family = binomial,data = fiji.ind,
method = "glm",
trControl = trainControl(method = "cv", number = 10,verboseIter = TRUE))
summary(modcv)
modcv
|
col_means_std <- function(){
source("merge_test.R")
source("merge_train.R")
all_data <- merge(merge_test(),merge_train(),all=TRUE)
means <- colMeans(all_data,na.rm=TRUE)
sds <- apply(all_data,2,sd,na.rm=TRUE)
means_and_sds <- data.frame(means,sds)
means_and_sds
}
|
/col_means_std.R
|
no_license
|
mdavebt/My-Projects
|
R
| false | false | 291 |
r
|
col_means_std <- function(){
source("merge_test.R")
source("merge_train.R")
all_data <- merge(merge_test(),merge_train(),all=TRUE)
means <- colMeans(all_data,na.rm=TRUE)
sds <- apply(all_data,2,sd,na.rm=TRUE)
means_and_sds <- data.frame(means,sds)
means_and_sds
}
|
##Server Code ####
# Define server logic required to draw a histogram
server <- function(input, output) {
df <- eventReactive(input$do, {
withProgress(message = 'Running',
value = 0, {
for (i in 1:3) {
incProgress(1/2)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
tweets <- userTimeline(input$name, n = input$num)
tweets_df <- tbl_df(map_df(tweets, as.data.frame))
tweets.text = tweets_df$text
tweets.text
})
observeEvent(input$get,{output$plot <- renderPlot({
withProgress(message = 'Calculating Emotional Sentiment',
value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
tweets <- userTimeline(input$name, n = input$num)
tweets_df <- tbl_df(map_df(tweets, as.data.frame))
tweets.text = tweets_df$text
clean.text <- function(some_txt)
{
some_txt = gsub("&", "", some_txt)
# some_txt<-gsub("[[:cntrl:]]","",some_txt)
some_txt = gsub("(RT|via)((?:\b\\W*@\\w+)+)", "", some_txt)
some_txt = gsub("@\\w+", "", some_txt)
some_txt = gsub("[[:punct:]]", "", some_txt)
some_txt = gsub("[[:digit:]]", "", some_txt)
some_txt = gsub("http\\w+", "", some_txt)
some_txt = gsub("[ ]{2,}", "", some_txt)
some_txt = gsub("^\\s+|\\s+$...", "", some_txt)
# define "tolower error handling" function
try.tolower = function(x)
{
y = NA
try_error = tryCatch(tolower(x), error=function(e) e)
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
some_txt = sapply(some_txt, try.tolower)
some_txt = some_txt[some_txt != ""]
names(some_txt) = NULL
return(some_txt)
}
##Cleans the twitter data
clean_text = clean.text(tweets.text)
value <- get_nrc_sentiment(clean_text)
barplot(
sort(colSums(prop.table(value[, 1:10]))),
horiz = input$horizontal,
cex.names = 0.7,
las = 1,
main = paste(input$name," Emotional Sentiment")
,col = "blue"
)
})})
## Positive vs. Negative
observeEvent(input$gettwo,{output$plot2 <- renderPlot({
withProgress(message = 'Calculating Positive vs Negative Sentiment',
value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
tweets <- userTimeline(input$name, n = input$num)
tweets_df <- tbl_df(map_df(tweets, as.data.frame))
tweets.text = tweets_df$text
clean.text <- function(some_txt)
{
some_txt = gsub("&", "", some_txt)
# some_txt<-gsub("[[:cntrl:]]","",some_txt)
some_txt = gsub("(RT|via)((?:\b\\W*@\\w+)+)", "", some_txt)
some_txt = gsub("@\\w+", "", some_txt)
some_txt = gsub("[[:punct:]]", "", some_txt)
some_txt = gsub("[[:digit:]]", "", some_txt)
some_txt = gsub("http\\w+", "", some_txt)
some_txt = gsub("[ ]{2,}", "", some_txt)
some_txt = gsub("^\\s+|\\s+$...", "", some_txt)
# define "tolower error handling" function
try.tolower = function(x)
{
y = NA
try_error = tryCatch(tolower(x), error=function(e) e)
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
some_txt = sapply(some_txt, try.tolower)
some_txt = some_txt[some_txt != ""]
names(some_txt) = NULL
return(some_txt)
}
##Cleans the twitter data
clean_text = clean.text(tweets.text)
value <- get_nrc_sentiment(clean_text)
barplot(
sort(colSums(prop.table(value[, 9:10]))),
horiz = input$horizontal,
cex.names = 0.7,
las = 1,
main = paste(input$name," Positive vs Negative Sentiment")
,col = "blue"
)
})})
observeEvent(input$get2,{texterdf<- reactive({
texter<-userTimeline(searchString = input$name, n=input$num)
texter <- tbl_df(map_df(texter, as.data.frame))
texter
return(df)
})})
###Creates Twitter Data Frame
tweetdf <- reactive({
withProgress(message = 'Creating Twitter Data Table',
value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
# generate bins based on input$bins from ui.R
tweets <- userTimeline(user = input$name,n = input$num)
tweets<-tbl_df(map_df(tweets,as.data.frame))
tweets
})
#
observeEvent(input$display,{output$table<-DT::renderDataTable(tweetdf(), options = list(lengthChange = TRUE,autoWidth = TRUE,scrollX = TRUE),filter='top',
class = "cell-border stripe")})
output$download <- downloadHandler(
filename = function() { paste("Twitter Data Frame",input$name, sep='',".csv") },
content = function(file) {
withProgress(message = 'Downloading Twitter Data',
value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
texter<-userTimeline(input$name, n = input$num)
texterdf <- tbl_df(map_df(texter, as.data.frame))
write.csv(texterdf, file)
})
}
|
/server.R
|
no_license
|
gonzalezben81/twitter
|
R
| false | false | 6,140 |
r
|
##Server Code ####
# Define server logic required to draw a histogram
server <- function(input, output) {
df <- eventReactive(input$do, {
withProgress(message = 'Running',
value = 0, {
for (i in 1:3) {
incProgress(1/2)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
tweets <- userTimeline(input$name, n = input$num)
tweets_df <- tbl_df(map_df(tweets, as.data.frame))
tweets.text = tweets_df$text
tweets.text
})
observeEvent(input$get,{output$plot <- renderPlot({
withProgress(message = 'Calculating Emotional Sentiment',
value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
tweets <- userTimeline(input$name, n = input$num)
tweets_df <- tbl_df(map_df(tweets, as.data.frame))
tweets.text = tweets_df$text
clean.text <- function(some_txt)
{
some_txt = gsub("&", "", some_txt)
# some_txt<-gsub("[[:cntrl:]]","",some_txt)
some_txt = gsub("(RT|via)((?:\b\\W*@\\w+)+)", "", some_txt)
some_txt = gsub("@\\w+", "", some_txt)
some_txt = gsub("[[:punct:]]", "", some_txt)
some_txt = gsub("[[:digit:]]", "", some_txt)
some_txt = gsub("http\\w+", "", some_txt)
some_txt = gsub("[ ]{2,}", "", some_txt)
some_txt = gsub("^\\s+|\\s+$...", "", some_txt)
# define "tolower error handling" function
try.tolower = function(x)
{
y = NA
try_error = tryCatch(tolower(x), error=function(e) e)
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
some_txt = sapply(some_txt, try.tolower)
some_txt = some_txt[some_txt != ""]
names(some_txt) = NULL
return(some_txt)
}
##Cleans the twitter data
clean_text = clean.text(tweets.text)
value <- get_nrc_sentiment(clean_text)
barplot(
sort(colSums(prop.table(value[, 1:10]))),
horiz = input$horizontal,
cex.names = 0.7,
las = 1,
main = paste(input$name," Emotional Sentiment")
,col = "blue"
)
})})
## Positive vs. Negative
observeEvent(input$gettwo,{output$plot2 <- renderPlot({
withProgress(message = 'Calculating Positive vs Negative Sentiment',
value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
tweets <- userTimeline(input$name, n = input$num)
tweets_df <- tbl_df(map_df(tweets, as.data.frame))
tweets.text = tweets_df$text
clean.text <- function(some_txt)
{
some_txt = gsub("&", "", some_txt)
# some_txt<-gsub("[[:cntrl:]]","",some_txt)
some_txt = gsub("(RT|via)((?:\b\\W*@\\w+)+)", "", some_txt)
some_txt = gsub("@\\w+", "", some_txt)
some_txt = gsub("[[:punct:]]", "", some_txt)
some_txt = gsub("[[:digit:]]", "", some_txt)
some_txt = gsub("http\\w+", "", some_txt)
some_txt = gsub("[ ]{2,}", "", some_txt)
some_txt = gsub("^\\s+|\\s+$...", "", some_txt)
# define "tolower error handling" function
try.tolower = function(x)
{
y = NA
try_error = tryCatch(tolower(x), error=function(e) e)
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
some_txt = sapply(some_txt, try.tolower)
some_txt = some_txt[some_txt != ""]
names(some_txt) = NULL
return(some_txt)
}
##Cleans the twitter data
clean_text = clean.text(tweets.text)
value <- get_nrc_sentiment(clean_text)
barplot(
sort(colSums(prop.table(value[, 9:10]))),
horiz = input$horizontal,
cex.names = 0.7,
las = 1,
main = paste(input$name," Positive vs Negative Sentiment")
,col = "blue"
)
})})
observeEvent(input$get2,{texterdf<- reactive({
texter<-userTimeline(searchString = input$name, n=input$num)
texter <- tbl_df(map_df(texter, as.data.frame))
texter
return(df)
})})
###Creates Twitter Data Frame
tweetdf <- reactive({
withProgress(message = 'Creating Twitter Data Table',
value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
# generate bins based on input$bins from ui.R
tweets <- userTimeline(user = input$name,n = input$num)
tweets<-tbl_df(map_df(tweets,as.data.frame))
tweets
})
#
observeEvent(input$display,{output$table<-DT::renderDataTable(tweetdf(), options = list(lengthChange = TRUE,autoWidth = TRUE,scrollX = TRUE),filter='top',
class = "cell-border stripe")})
output$download <- downloadHandler(
filename = function() { paste("Twitter Data Frame",input$name, sep='',".csv") },
content = function(file) {
withProgress(message = 'Downloading Twitter Data',
value = 0, {
for (i in 1:10) {
incProgress(1/10)
Sys.sleep(0.25)
}
},env = parent.frame(n=1))
texter<-userTimeline(input$name, n = input$num)
texterdf <- tbl_df(map_df(texter, as.data.frame))
write.csv(texterdf, file)
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/browse.study.R
\name{browse.study}
\alias{browse.study}
\title{Open the study webpage in a web browser}
\usage{
browse.study(phs, jupyter = FALSE)
}
\arguments{
\item{phs}{dbGap study ID (phs00xxxx, or 00xxxx, or xxx)}
\item{jupyter}{set on TRUE if you are in a jypyterhub environment}
}
\value{
Open the study webpage in a web browser
}
\description{
Open the study webpage in a web browser
}
\author{
Gregoire Versmee, Laura Versmee
}
|
/man/browse.study.Rd
|
permissive
|
hms-dbmi/dbGaP2x
|
R
| false | true | 516 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/browse.study.R
\name{browse.study}
\alias{browse.study}
\title{Open the study webpage in a web browser}
\usage{
browse.study(phs, jupyter = FALSE)
}
\arguments{
\item{phs}{dbGap study ID (phs00xxxx, or 00xxxx, or xxx)}
\item{jupyter}{set on TRUE if you are in a jypyterhub environment}
}
\value{
Open the study webpage in a web browser
}
\description{
Open the study webpage in a web browser
}
\author{
Gregoire Versmee, Laura Versmee
}
|
# AUTO GENERATED FILE - DO NOT EDIT
daqPrecisionInput <- function(id=NULL, value=NULL, size=NULL, min=NULL, max=NULL, precision=NULL, disabled=NULL, theme=NULL, label=NULL, labelPosition=NULL, className=NULL, style=NULL) {
component <- list(
props = list(id=id, value=value, size=size, min=min, max=max, precision=precision, disabled=disabled, theme=theme, label=label, labelPosition=labelPosition, className=className, style=style),
type = 'PrecisionInput',
namespace = 'dash_daq',
propNames = c('id', 'value', 'size', 'min', 'max', 'precision', 'disabled', 'theme', 'label', 'labelPosition', 'className', 'style'),
package = 'dashDaq'
)
component$props <- filter_null(component$props)
structure(component, class = c('dash_component', 'list'))
}
|
/R/daqPrecisionInput.R
|
permissive
|
lgianca/dash-daq
|
R
| false | false | 814 |
r
|
# AUTO GENERATED FILE - DO NOT EDIT
daqPrecisionInput <- function(id=NULL, value=NULL, size=NULL, min=NULL, max=NULL, precision=NULL, disabled=NULL, theme=NULL, label=NULL, labelPosition=NULL, className=NULL, style=NULL) {
component <- list(
props = list(id=id, value=value, size=size, min=min, max=max, precision=precision, disabled=disabled, theme=theme, label=label, labelPosition=labelPosition, className=className, style=style),
type = 'PrecisionInput',
namespace = 'dash_daq',
propNames = c('id', 'value', 'size', 'min', 'max', 'precision', 'disabled', 'theme', 'label', 'labelPosition', 'className', 'style'),
package = 'dashDaq'
)
component$props <- filter_null(component$props)
structure(component, class = c('dash_component', 'list'))
}
|
# using file protiens.csv
data<-read.csv(file.choose(),header = T)
#ScatterPlot
library(ggvis)
data %>% ggvis(~RedMeat,~WhiteMeat,fill=~Country) %>% layer_points()
data %>% ggvis(~Eggs,~Milk,fill=~Country) %>% layer_points()
#K-Means
set.seed(1)
grpProtien<-kmeans(data[,-1],centers=7)
o=order(grpProtien$cluster)
data.frame(data$Country[o],grpProtien$cluster[o])
set.seed(1)
grpProtien<-kmeans(data[,-1],centers=3)
o=order(grpProtien$cluster)
data.frame(data$Country[o],grpProtien$cluster[o])
|
/K MEANS Clustering/R/K Means clustering on Protein dataset/kmeans protein.r
|
no_license
|
dattatrayshinde/Machine-Learning-Algorithms-in-R
|
R
| false | false | 521 |
r
|
# using file protiens.csv
data<-read.csv(file.choose(),header = T)
#ScatterPlot
library(ggvis)
data %>% ggvis(~RedMeat,~WhiteMeat,fill=~Country) %>% layer_points()
data %>% ggvis(~Eggs,~Milk,fill=~Country) %>% layer_points()
#K-Means
set.seed(1)
grpProtien<-kmeans(data[,-1],centers=7)
o=order(grpProtien$cluster)
data.frame(data$Country[o],grpProtien$cluster[o])
set.seed(1)
grpProtien<-kmeans(data[,-1],centers=3)
o=order(grpProtien$cluster)
data.frame(data$Country[o],grpProtien$cluster[o])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nba-player-per-game-stats.R
\name{NBAPlayerPerGameStats}
\alias{NBAPlayerPerGameStats}
\title{NBA Player Career Statistics}
\usage{
NBAPlayerPerGameStats(player_link)
}
\arguments{
\item{player_link}{A link suffix, e.g. "/players/d/davisan02.html"}
}
\value{
An object of class tbl_df
}
\description{
This function gets a player's career stats from basketball-reference.com
}
\examples{
NBAPlayerPerGameStats("/players/d/davisan02.html") # Anthony Davis
NBAPlayerPerGameStats("/players/j/jamesle01.html") # Lebron James
}
|
/man/NBAPlayerPerGameStats.Rd
|
no_license
|
tomiaJO/ballr-1
|
R
| false | true | 600 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nba-player-per-game-stats.R
\name{NBAPlayerPerGameStats}
\alias{NBAPlayerPerGameStats}
\title{NBA Player Career Statistics}
\usage{
NBAPlayerPerGameStats(player_link)
}
\arguments{
\item{player_link}{A link suffix, e.g. "/players/d/davisan02.html"}
}
\value{
An object of class tbl_df
}
\description{
This function gets a player's career stats from basketball-reference.com
}
\examples{
NBAPlayerPerGameStats("/players/d/davisan02.html") # Anthony Davis
NBAPlayerPerGameStats("/players/j/jamesle01.html") # Lebron James
}
|
install.packages("rvest")
library(rvest)
car_link <- "https://en.wikipedia.org/wiki/Comma-separated_values"
car_html <- read_html(car_link)
table <- html_nodes(car_html, ".wikitable") %>%
html_table()
csv <- write.csv(table, file = "Cars")
read.csv("Cars")
|
/michelle mau upload file disini/r_csv/practice assg 2.R
|
no_license
|
Jason-Joseph/st2195_assignment_2
|
R
| false | false | 269 |
r
|
install.packages("rvest")
library(rvest)
car_link <- "https://en.wikipedia.org/wiki/Comma-separated_values"
car_html <- read_html(car_link)
table <- html_nodes(car_html, ".wikitable") %>%
html_table()
csv <- write.csv(table, file = "Cars")
read.csv("Cars")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-fishing-mortality.R
\name{plot_fishing_mortality}
\alias{plot_fishing_mortality}
\title{Plot fishing mortality (F)}
\usage{
plot_fishing_mortality(M)
}
\arguments{
\item{M}{list of object(s) created by read_admb function}
\item{xlab}{the x-axis label for the plot}
\item{ylab}{the y-axis label for the plot}
}
\value{
plot of fishing mortality (F)
}
\description{
Plot fishing mortality (F)
}
\author{
JN Ianelli, SJD Martell, DN Webber
}
|
/gmr/man/plot_fishing_mortality.Rd
|
no_license
|
seacode/gmacs
|
R
| false | true | 548 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-fishing-mortality.R
\name{plot_fishing_mortality}
\alias{plot_fishing_mortality}
\title{Plot fishing mortality (F)}
\usage{
plot_fishing_mortality(M)
}
\arguments{
\item{M}{list of object(s) created by read_admb function}
\item{xlab}{the x-axis label for the plot}
\item{ylab}{the y-axis label for the plot}
}
\value{
plot of fishing mortality (F)
}
\description{
Plot fishing mortality (F)
}
\author{
JN Ianelli, SJD Martell, DN Webber
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runACF.R
\name{runACF}
\alias{runACF}
\title{run functions to create acf matrix and plot the results}
\usage{
runACF(block, model, store = FALSE, save = F,
suppress.printout = FALSE)
}
\arguments{
\item{block}{Vector of blocks that identify data points that are correlated}
\item{model}{Fitted model object (glm or gam)}
\item{store}{(\code{default=F}). Logical stating whether a list of the matrix of correlations is stored (output from \code{acffunc}.)}
\item{save}{(\code{default=FALSE}). Logical stating whether plot should be saved into working directory.}
\item{suppress.printout}{(Default: \code{FALSE}. Logical stating whether to show a printout of block numbers to assess progress. `FALSE` will show printout.}
}
\value{
Plot of lag vs correlation. Each grey line is the correlation for each individual block in \code{block}. The red line is the mean values for each lag.
If \code{store=TRUE} then the matrix of correlations (nblocks x length_max_block) is returned and \code{plotacf} may be used to plot the acf.
}
\description{
run functions to create acf matrix and plot the results
}
\examples{
# load data
data(ns.data.re)
model<-gamMRSea(birds ~ observationhour + as.factor(floodebb) + as.factor(impact),
family='quasipoisson', data=ns.data.re)
ns.data.re$blockid<-paste(ns.data.re$GridCode, ns.data.re$Year, ns.data.re$MonthOfYear,
ns.data.re$DayOfMonth, sep='')
ns.data.re$blockid<-as.factor(ns.data.re$blockid)
runACF(ns.data.re$blockid, model, suppress.printout=TRUE)
}
\author{
LAS Scott-Hayward, University of St Andrews
}
|
/man/runACF.Rd
|
no_license
|
CMFell/MRSeaCF
|
R
| false | true | 1,668 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runACF.R
\name{runACF}
\alias{runACF}
\title{run functions to create acf matrix and plot the results}
\usage{
runACF(block, model, store = FALSE, save = F,
suppress.printout = FALSE)
}
\arguments{
\item{block}{Vector of blocks that identify data points that are correlated}
\item{model}{Fitted model object (glm or gam)}
\item{store}{(\code{default=F}). Logical stating whether a list of the matrix of correlations is stored (output from \code{acffunc}.)}
\item{save}{(\code{default=FALSE}). Logical stating whether plot should be saved into working directory.}
\item{suppress.printout}{(Default: \code{FALSE}. Logical stating whether to show a printout of block numbers to assess progress. `FALSE` will show printout.}
}
\value{
Plot of lag vs correlation. Each grey line is the correlation for each individual block in \code{block}. The red line is the mean values for each lag.
If \code{store=TRUE} then the matrix of correlations (nblocks x length_max_block) is returned and \code{plotacf} may be used to plot the acf.
}
\description{
run functions to create acf matrix and plot the results
}
\examples{
# load data
data(ns.data.re)
model<-gamMRSea(birds ~ observationhour + as.factor(floodebb) + as.factor(impact),
family='quasipoisson', data=ns.data.re)
ns.data.re$blockid<-paste(ns.data.re$GridCode, ns.data.re$Year, ns.data.re$MonthOfYear,
ns.data.re$DayOfMonth, sep='')
ns.data.re$blockid<-as.factor(ns.data.re$blockid)
runACF(ns.data.re$blockid, model, suppress.printout=TRUE)
}
\author{
LAS Scott-Hayward, University of St Andrews
}
|
# -------------------------------------
# Coursera Data Science
# 04 Exploratory Data Analysis
# Project 1
# -------------------------------------
# Plot 4
# this code assumes, that the cleaned dataset (filename = EPCdata.RData) is in the working directory
load("./EPCdata.RData")
png(file = "./plot4.png", width = 480, heigh = 480) # init file
par(mfrow = c(2, 2)) # 4 panels filled by rows
## 1st Plot = Plot 2
with(data, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power"))
## 2nd Plot
with(data, plot(datetime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage"))
# 3rd Plot = Plot 3
with(data, plot(datetime, Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering"))
with(data, lines(datetime, Sub_metering_1, col = "black"))
with(data, lines(datetime, Sub_metering_2, col = "red"))
with(data, lines(datetime, Sub_metering_3, col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
# 4th Plot
with(data, plot(datetime, Global_reactive_power, type = "l"))
dev.off()
|
/plot4.R
|
no_license
|
lukasstammler/ExploratoryDataAnalysis
|
R
| false | false | 1,119 |
r
|
# -------------------------------------
# Coursera Data Science
# 04 Exploratory Data Analysis
# Project 1
# -------------------------------------
# Plot 4
# this code assumes, that the cleaned dataset (filename = EPCdata.RData) is in the working directory
load("./EPCdata.RData")
png(file = "./plot4.png", width = 480, heigh = 480) # init file
par(mfrow = c(2, 2)) # 4 panels filled by rows
## 1st Plot = Plot 2
with(data, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power"))
## 2nd Plot
with(data, plot(datetime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage"))
# 3rd Plot = Plot 3
with(data, plot(datetime, Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering"))
with(data, lines(datetime, Sub_metering_1, col = "black"))
with(data, lines(datetime, Sub_metering_2, col = "red"))
with(data, lines(datetime, Sub_metering_3, col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
# 4th Plot
with(data, plot(datetime, Global_reactive_power, type = "l"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_description.R
\docType{data}
\name{celllineDEP_lib}
\alias{celllineDEP_lib}
\title{Cancer cell lines in CCLE database}
\description{
Avaliable cancer cell lines in CCLE database, checking if user inputs cell
line name in including in CCLE so Level 2 model can be called without extra
drug induced differentially activated pathway table inputed
}
|
/man/celllineDEP_lib.Rd
|
no_license
|
VeronicaFung/DComboNet
|
R
| false | true | 429 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_description.R
\docType{data}
\name{celllineDEP_lib}
\alias{celllineDEP_lib}
\title{Cancer cell lines in CCLE database}
\description{
Avaliable cancer cell lines in CCLE database, checking if user inputs cell
line name in including in CCLE so Level 2 model can be called without extra
drug induced differentially activated pathway table inputed
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.